ngram
listlengths
0
82k
[ "# все транзакты Terminate(sgm, label=\"to_term\", deltaTerminate=0) # ЗАПУСК ---------------------- m.start(terminationCount=MAX_TIME,", "MAX_TIME = 20 # list_all_transact = [] # MAX_TIME =", "t[k]) for k in t.keys() if k in [TIME_CREATED, TERMINATED_TIME]])", "self.assertNotIn(F_1, t[FACILITY])) # Terminate(sgm, deltaTerminate=0) # ЗАПУСК ---------------------- m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME)", "1 Generate(sgm, med_value=None, modificatorFunc=None, first_tx=1, max_amount=1) Handle(sgm, handlerFunc=funcTransactTo_list_all_transact) # test", "2. # Прерывает работу устройства на 5 единиц времени. #", "list_all_transact]) print \"tA=%s\" % str(tA[0]) print \"tB=%s\" % str(tB[0]) ###", "pyss.storage import Storage from pyss.advance import Advance from pyss.preempt import", "Release(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) #", "GReturn(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][REMAIND_TIME], 2)) Handle(sgm,", "transact[NUM] == 1: transact[LABEL] = TRANSACT_A tA.append(transact) elif transact[NUM] ==", "facilityName=F_1) # def funcTransactTo_list_all_transact(owner, transact): # складируем транзакты в список", "pyss.table import Table from pyss.handle import Handle from pyss.enter import", "9, 'state': 'deleted'}]) elif t[LABEL] == TRANSACT_B: self.assertEqual(t[TIME_CREATED], 2) self.assertEqual(t[TERMINATED_TIME],", "модели в момент времени 9. # Обработка транзакта А была", "Advance from pyss.preempt import Preempt from pyss.g_return import GReturn from", "момент времени 7. # Транзакт А выходит из модели в", "выходит из модели в момент времени 9. Обработка транзакта А", "\"\"\"Тест Preempt - Return Формируется транзакт A в момент времени", "self.assertListEqual(t[LIFE_TIME_LIST], [ {'start': 1, 'state': 'actived'}, {'start': 2, 'state': 'preempted'},", "[] tA = [] tB = [] # F_1 =", "{'start': 2, 'state': 'preempted'}, {'start': 7, 'state': 'actived'}, {'start': 9,", "обработку устройством F_1 в течение 3 единиц времени. Формируется транзакт", "in t.keys() if k in [TIME_CREATED, TERMINATED_TIME]]) # @unittest.skip(\"testing skipping", "6) print str([\"%s:%s\" % (k, t[k]) for k in t.keys()", "funcCondition=checkTest, move2block=\"to_preempt\") # только первый транзакт Seize(sgm, facilityName=F_1) # test", "import GReturn from pyss.facility import Facility from pyss.seize import Seize", "транзакты в список list_all_transact.append(transact) ### SEGMENT ---------------------------- # формируется одна", "pyss.pyss_const import * class TestPreemptReturn(unittest.TestCase): def setUp(self): pass def tearDown(self):", "# формируется одна заявка в момент времени 1 Generate(sgm, med_value=None,", "# ОКУ facility_1 = Facility(m, facilityName=F_1) # def funcTransactTo_list_all_transact(owner, transact):", "ТЕСТЫ ---------------------- for t in list_all_transact: # Формируется транзакт A", "import Handle from pyss.enter import Enter from pyss.leave import Leave", "времени 7. # Транзакт А выходит из модели в момент", "# Transfer(sgm, funcTransfer=lambda o, t: o.findBlockByLabel(\"to_term\")) #--- # только второй", "{'start': 2, 'state': 'actived'}, {'start': 7, 'state': 'deleted'}]) if __name__", "meanTime=3, modificatorFunc=None) Release(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1,", "t[FACILITY])) # Advance(sgm, meanTime=3, modificatorFunc=None) Release(sgm, facilityName=F_1) # test Handle(sgm,", "Facility(m, facilityName=F_1) # def funcTransactTo_list_all_transact(owner, transact): # складируем транзакты в", "условия def checkTest(o): t=m.getCurrentTransact() if t[LABEL] == TRANSACT_B: return False", "времени 1. # Идёт на обработку устройством F_1 в течение", "7, 'state': 'actived'}, {'start': 9, 'state': 'deleted'}]) elif t[LABEL] ==", "handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) # Preempt(sgm, facilityName=F_1) # test Handle(sgm,", "# test Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY])) # Advance(sgm, meanTime=3,", "# test # .addBlock(handle.Handle(handlerFunc=lambda o,t:self.assertEqual(tA[0][REMAIND_TIME], None))) Handle(sgm, handlerFunc=printAllTransact) Handle(sgm, handlerFunc=lambda", "А выходит из модели в момент времени 9. Обработка транзакта", "# MAX_TIME = 20 # F_1 = \"F_1\" # ОКУ", "# Обработка транзакта А была прервана с 2 по 7.", "import Advance from pyss.preempt import Preempt from pyss.g_return import GReturn", "sgm = Segment(m) # m[OPTIONS].setAllFalse() MAX_TIME = 20 # CONSTS", "import Seize from pyss.release import Release from pyss.transfer import Transfer", "list_all_transact = [] tA = [] tB = [] #", "o, t:self.assertIn(F_1, t[FACILITY])) # Advance(sgm, meanTime=5, modificatorFunc=None) GReturn(sgm, facilityName=F_1) #", "все транзакты Terminate(sgm, label=\"to_term\", deltaTerminate=0) # ЗАПУСК ---------------------- m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME)", "# Идёт на обработку устройством F_1 в течение 3 единиц", "Return Формируется один транзакт в момент времени 1. Прерывает работу", "# ОКУ Facility(m, facilityName=F_1) # def funcTransactTo_list_all_transact(owner, transact): # складируем", "'preempted'}, {'start': 7, 'state': 'actived'}, {'start': 9, 'state': 'deleted'}]) elif", "транзакт в момент времени 1. Прерывает работу устройства F_1 на", "7. Транзакт А выходит из модели в момент времени 9.", "транзакт A в момент времени 1. Идёт на обработку устройством", "по 7. \"\"\" logger.info(\"--- test_preempt_return_002 ----------------------------------\") ### MODEL ---------------------------------- m", "sys.path.append(DIRNAME_MODULE + \"pyss\" + os.sep) from pyss import pyssobject from", "на обработку устройством F_1 в течение 3 единиц времени. Формируется", "# Формируется транзакт B в момент времени 2. # Прерывает", "# def funcTransactTo_list_all_transact(owner, transact): # складируем транзакты в список list_all_transact.append(transact)", "def funcTransactTo_list_all_transact(owner, transact): # складируем транзакты в список list_all_transact.append(transact) ###", "транзакт A в момент времени 1. # Идёт на обработку", "из модели в момент времени 9. Обработка транзакта А была", "list_all_transact.append(transact) ### SEGMENT ---------------------------- # формируется одна заявка в момент", "SEGMENT ---------------------------- # формируется одна заявка в момент времени 1", "transact[NUM] == 2: transact[LABEL] = TRANSACT_B tB.append(transact) # функция проверки", "А была прервана с 2 по 7. print str([\"%s:%s\" %", "pyss.preempt import Preempt from pyss.g_return import GReturn from pyss.facility import", "Handle(sgm, handlerFunc=printAllTransact) Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY])) # Handle(sgm, handlerFunc=printAllTransact)", "модели в момент времени 7. Транзакт А выходит из модели", "единиц времени. Выходит из модели в момент времени 6. \"\"\"", "# m[OPTIONS].setAllFalse() MAX_TIME = 20 # CONSTS TRANSACT_A = \"A\"", "modificatorFunc=None, first_tx=1, max_amount=2) # вспомогательные операции Handle(sgm, handlerFunc=funcTransactTo_list_all_transact) Handle(sgm, handlerFunc=setTransactLabel)", "момент времени 2. Прерывает работу устройства на 5 единиц времени.", "Storage from pyss.advance import Advance from pyss.preempt import Preempt from", "GReturn from pyss.facility import Facility from pyss.seize import Seize from", "Transfer from pyss.test import Test from pyss.pyss_const import * class", "= [] tA = [] tB = [] # F_1", "Terminate(sgm, label=\"to_term\", deltaTerminate=0) # ЗАПУСК ---------------------- m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME) # ТЕСТЫ", "# Preempt(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY]))", "заявка в момент времени 1 Generate(sgm, med_value=1, modificatorFunc=None, first_tx=1, max_amount=2)", "\"\"\" logger.info(\"--- test_preempt_return_001 ----------------------------------\") ### MODEL ---------------------------------- m = PyssModel()", "момент времени 1 Generate(sgm, med_value=1, modificatorFunc=None, first_tx=1, max_amount=2) # вспомогательные", "t[FACILITY])) # # первый транзакт проходит, второй направляется к метке", "работу устройства на 5 единиц времени. Транзакт B выходит из", "skipping test_preempt_return_001\") def test_preempt_return_001(self): \"\"\"Тест Preempt - Return Формируется один", "= \"F_1\" # ОКУ Facility(m, facilityName=F_1) # def funcTransactTo_list_all_transact(owner, transact):", "Transfer(sgm, funcTransfer=lambda o, t: o.findBlockByLabel(\"to_term\")) #--- # только второй транзакт", "Seize(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY])) #", "Test(sgm, funcCondition=checkTest, move2block=\"to_preempt\") # только первый транзакт Seize(sgm, facilityName=F_1) #", "транзакт Seize(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY]))", "sys import os import random import unittest DIRNAME_MODULE = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0]))))", "---------------------------- # формируется одна заявка в момент времени 1 Generate(sgm,", "ОКУ facility_1 = Facility(m, facilityName=F_1) # def funcTransactTo_list_all_transact(owner, transact): #", "+ os.sep) from pyss import pyssobject from pyss.pyss_model import PyssModel", "# складируем транзакты в список list_all_transact.append(transact) ### SEGMENT ---------------------------- #", "o, t:self.assertNotIn(F_1, t[FACILITY])) # # первый транзакт проходит, второй направляется", "А выходит из модели в момент времени 9. # Обработка", "sgm = Segment(m) # m[OPTIONS].setAllFalse() MAX_TIME = 20 # list_all_transact", "med_value=None, modificatorFunc=None, first_tx=1, max_amount=1) Handle(sgm, handlerFunc=funcTransactTo_list_all_transact) # test Handle(sgm, handlerFunc=lambda", "import random import unittest DIRNAME_MODULE = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0])))) + os.sep sys.path.append(DIRNAME_MODULE)", "только второй транзакт Preempt(sgm, facilityName=F_1, label=\"to_preempt\") # test # .addBlock(handle.Handle(handlerFunc=lambda", "Segment(m) # m[OPTIONS].setAllFalse() MAX_TIME = 20 # list_all_transact = []", "from pyss.table import Table from pyss.handle import Handle from pyss.enter", "funcTransactTo_list_all_transact(owner, transact): # складируем транзакты в список list_all_transact.append(transact) ### SEGMENT", "k in t.keys() if k in [TIME_CREATED, TERMINATED_TIME, LIFE_TIME_LIST]]) if", "tA.append(transact) elif transact[NUM] == 2: transact[LABEL] = TRANSACT_B tB.append(transact) #", "test Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) # # первый транзакт", "Handle(sgm, handlerFunc=funcTransactTo_list_all_transact) # test Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) #", "= os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0])))) + os.sep sys.path.append(DIRNAME_MODULE) sys.path.append(DIRNAME_MODULE + \"pyss\" + os.sep)", "None))) Handle(sgm, handlerFunc=printAllTransact) Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY])) # Handle(sgm,", "Транзакт B выходит из модели в момент времени 7. Транзакт", "Preempt - Return Формируется один транзакт в момент времени 1.", "def funcTransactTo_list_all_transact(owner, transact): # складируем транзакты в список list_all_transact.append(transact) def", "к метке \"to_preempt\" Test(sgm, funcCondition=checkTest, move2block=\"to_preempt\") # только первый транзакт", "str(tB[0]) ### SEGMENT ---------------------------- # формируется одна заявка в момент", "for t in list_all_transact: self.assertEqual(t[TIME_CREATED], 1) self.assertEqual(t[TERMINATED_TIME], 6) print str([\"%s:%s\"", "t:self.assertNotIn(F_1, t[FACILITY])) # # первый транзакт проходит, второй направляется к", "t in list_all_transact: self.assertEqual(t[TIME_CREATED], 1) self.assertEqual(t[TERMINATED_TIME], 6) print str([\"%s:%s\" %", "функция проверки условия def checkTest(o): t=m.getCurrentTransact() if t[LABEL] == TRANSACT_B:", "\"tA=%s\" % str(tA[0]) print \"tB=%s\" % str(tB[0]) ### SEGMENT ----------------------------", "времени 2. Прерывает работу устройства на 5 единиц времени. Транзакт", "self.assertListEqual(t[LIFE_TIME_LIST], [ {'start': 2, 'state': 'actived'}, {'start': 7, 'state': 'deleted'}])", "logger.info(\"--- test_preempt_return_002 ----------------------------------\") ### MODEL ---------------------------------- m = PyssModel() sgm", "'state': 'deleted'}]) elif t[LABEL] == TRANSACT_B: self.assertEqual(t[TIME_CREATED], 2) self.assertEqual(t[TERMINATED_TIME], 7)", "---------------------- for t in list_all_transact: self.assertEqual(t[TIME_CREATED], 1) self.assertEqual(t[TERMINATED_TIME], 6) print", "elif transact[NUM] == 2: transact[LABEL] = TRANSACT_B tB.append(transact) # функция", "# test Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) # # первый", "в момент времени 1. # Идёт на обработку устройством F_1", "t:not self.assertNotIn(F_1, t[FACILITY])) # Terminate(sgm, deltaTerminate=0) # ЗАПУСК ---------------------- m.start(terminationCount=MAX_TIME,", "t.keys() if k in [TIME_CREATED, TERMINATED_TIME, LIFE_TIME_LIST]]) if t[LABEL] ==", "единиц времени. Формируется транзакт B в момент времени 2. Прерывает", "Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][REMAIND_TIME], 2)) Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][SCHEDULED_TIME], 9))", "in t.keys() if k in [TIME_CREATED, TERMINATED_TIME, LIFE_TIME_LIST]]) if t[LABEL]", "random import unittest DIRNAME_MODULE = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0])))) + os.sep sys.path.append(DIRNAME_MODULE) sys.path.append(DIRNAME_MODULE", "from pyss.enter import Enter from pyss.leave import Leave from pyss.storage", "в момент времени 1. Прерывает работу устройства F_1 на 5", "== TRANSACT_A: self.assertEqual(t[TIME_CREATED], 1) self.assertEqual(t[REMAIND_TIME], 2) self.assertEqual(t[TERMINATED_TIME], 9) self.assertListEqual(t[LIFE_TIME_LIST], [", "Формируется транзакт B в момент времени 2. # Прерывает работу", "F_1 в течение 3 единиц времени. Формируется транзакт B в", "o, t:self.assertIn(F_1, t[FACILITY])) # Handle(sgm, handlerFunc=printAllTransact) Advance(sgm, meanTime=5, modificatorFunc=None) GReturn(sgm,", "Advance(sgm, meanTime=5, modificatorFunc=None) GReturn(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o,", "Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY])) # Advance(sgm, meanTime=3, modificatorFunc=None) Release(sgm,", "from pyss.test import Test from pyss.pyss_const import * class TestPreemptReturn(unittest.TestCase):", "Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) # # первый транзакт проходит,", "logger from pyss.table import Table from pyss.handle import Handle from", "Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][SCHEDULED_TIME], 9)) Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY]))", "единиц времени. # Транзакт B выходит из модели в момент", "t:self.assertIn(F_1, t[FACILITY])) # Advance(sgm, meanTime=5, modificatorFunc=None) GReturn(sgm, facilityName=F_1) # test", "7. # Транзакт А выходит из модели в момент времени", "# test_preempt_return.py # pylint: disable=line-too-long,missing-docstring,bad-whitespace, unused-argument, too-many-locals import sys import", "\"pyss\" + os.sep) from pyss import pyssobject from pyss.pyss_model import", "# @unittest.skip(\"testing skipping test_preempt_return_001\") def test_preempt_return_001(self): \"\"\"Тест Preempt - Return", "os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0])))) + os.sep sys.path.append(DIRNAME_MODULE) sys.path.append(DIRNAME_MODULE + \"pyss\" + os.sep) from", "med_value=1, modificatorFunc=None, first_tx=1, max_amount=2) # вспомогательные операции Handle(sgm, handlerFunc=funcTransactTo_list_all_transact) Handle(sgm,", "handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) # Handle(sgm, handlerFunc=printAllTransact) # все транзакты", "test Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) # Transfer(sgm, funcTransfer=lambda o,", "#!/usr/bin/python # -*- coding: utf-8 -*- # test_preempt_return.py # pylint:", "t in list_all_transact: # Формируется транзакт A в момент времени", "from pyss.g_return import GReturn from pyss.facility import Facility from pyss.seize", "Handle from pyss.enter import Enter from pyss.leave import Leave from", "устройством F_1 в течение 3 единиц времени. Формируется транзакт B", "o, t:not self.assertNotIn(F_1, t[FACILITY])) # Terminate(sgm, deltaTerminate=0) # ЗАПУСК ----------------------", "import Test from pyss.pyss_const import * class TestPreemptReturn(unittest.TestCase): def setUp(self):", "-*- # test_preempt_return.py # pylint: disable=line-too-long,missing-docstring,bad-whitespace, unused-argument, too-many-locals import sys", "tB = [] # F_1 = \"F_1\" # ОКУ facility_1", "# Advance(sgm, meanTime=3, modificatorFunc=None) Release(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda", "list_all_transact = [] # MAX_TIME = 20 # F_1 =", "Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) # Preempt(sgm, facilityName=F_1) # test", "setTransactLabel(owner, transact): if transact[NUM] == 1: transact[LABEL] = TRANSACT_A tA.append(transact)", "момент времени 7. Транзакт А выходит из модели в момент", "1. Прерывает работу устройства F_1 на 5 единиц времени. Выходит", "max_amount=1) Handle(sgm, handlerFunc=funcTransactTo_list_all_transact) # test Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY]))", "2, 'state': 'preempted'}, {'start': 7, 'state': 'actived'}, {'start': 9, 'state':", "for k in t.keys() if k in [TIME_CREATED, TERMINATED_TIME]]) #", "disable=line-too-long,missing-docstring,bad-whitespace, unused-argument, too-many-locals import sys import os import random import", "одна заявка в момент времени 1 Generate(sgm, med_value=None, modificatorFunc=None, first_tx=1,", "CONSTS TRANSACT_A = \"A\" TRANSACT_B = \"B\" # list_all_transact =", "Return Формируется транзакт A в момент времени 1. Идёт на", "os import random import unittest DIRNAME_MODULE = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0])))) + os.sep", "(k, t[k]) for k in t.keys() if k in [TIME_CREATED,", "TRANSACT_B = \"B\" # list_all_transact = [] tA = []", "только первый транзакт Seize(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o,", "в момент времени 6. \"\"\" logger.info(\"--- test_preempt_return_001 ----------------------------------\") ### MODEL", "t.keys() if k in [TIME_CREATED, TERMINATED_TIME]]) # @unittest.skip(\"testing skipping test_preempt_return_002\")", "B в момент времени 2. # Прерывает работу устройства на", "def checkTest(o): t=m.getCurrentTransact() if t[LABEL] == TRANSACT_B: return False return", "единиц времени. Транзакт B выходит из модели в момент времени", "import Storage from pyss.advance import Advance from pyss.preempt import Preempt", "o, t:self.assertNotIn(F_1, t[FACILITY])) # Transfer(sgm, funcTransfer=lambda o, t: o.findBlockByLabel(\"to_term\")) #---", "# Транзакт А выходит из модели в момент времени 9.", "import Enter from pyss.leave import Leave from pyss.storage import Storage", "for k in t.keys() if k in [TIME_CREATED, TERMINATED_TIME, LIFE_TIME_LIST]])", "устройства на 5 единиц времени. Транзакт B выходит из модели", "# Handle(sgm, handlerFunc=printAllTransact) # все транзакты Terminate(sgm, label=\"to_term\", deltaTerminate=0) #", "def setTransactLabel(owner, transact): if transact[NUM] == 1: transact[LABEL] = TRANSACT_A", "Test from pyss.pyss_const import * class TestPreemptReturn(unittest.TestCase): def setUp(self): pass", "Terminate(sgm, deltaTerminate=0) # ЗАПУСК ---------------------- m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME) # ТЕСТЫ ----------------------", "[TIME_CREATED, TERMINATED_TIME]]) # @unittest.skip(\"testing skipping test_preempt_return_002\") def test_preempt_return_002(self): \"\"\"Тест Preempt", "handlerFunc=lambda o, t:self.assertEqual(tA[0][SCHEDULED_TIME], 9)) Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) #", "pyss import pyssobject from pyss.pyss_model import PyssModel from pyss.segment import", "транзакты Terminate(sgm, label=\"to_term\", deltaTerminate=0) # ЗАПУСК ---------------------- m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME) #", "20 # CONSTS TRANSACT_A = \"A\" TRANSACT_B = \"B\" #", "\"\"\"Тест Preempt - Return Формируется один транзакт в момент времени", "прервана с 2 по 7. \"\"\" logger.info(\"--- test_preempt_return_002 ----------------------------------\") ###", "tA = [] tB = [] # F_1 = \"F_1\"", "в момент времени 2. Прерывает работу устройства на 5 единиц", "обработку устройством F_1 в течение 3 единиц времени. # Формируется", "2) self.assertEqual(t[TERMINATED_TIME], 9) self.assertListEqual(t[LIFE_TIME_LIST], [ {'start': 1, 'state': 'actived'}, {'start':", "str([\"%s:%s\" % (k, t[k]) for k in t.keys() if k", "pyss.pyss_model import PyssModel from pyss.segment import Segment from pyss.generate import", "Идёт на обработку устройством F_1 в течение 3 единиц времени.", "устройства F_1 на 5 единиц времени. Выходит из модели в", "handlerFunc=funcTransactTo_list_all_transact) Handle(sgm, handlerFunc=setTransactLabel) # test Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY]))", "'deleted'}]) elif t[LABEL] == TRANSACT_B: self.assertEqual(t[TIME_CREATED], 2) self.assertEqual(t[TERMINATED_TIME], 7) self.assertListEqual(t[LIFE_TIME_LIST],", "первый транзакт Seize(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1,", "-*- coding: utf-8 -*- # test_preempt_return.py # pylint: disable=line-too-long,missing-docstring,bad-whitespace, unused-argument,", "def tearDown(self): pass # @unittest.skip(\"testing skipping test_preempt_return_001\") def test_preempt_return_001(self): \"\"\"Тест", "= [] # MAX_TIME = 20 # F_1 = \"F_1\"", "from pyss.storage import Storage from pyss.advance import Advance from pyss.preempt", "t:self.assertIn(F_1, t[FACILITY])) # Advance(sgm, meanTime=3, modificatorFunc=None) Release(sgm, facilityName=F_1) # test", "test Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY])) # Advance(sgm, meanTime=3, modificatorFunc=None)", "MAX_TIME = 20 # F_1 = \"F_1\" # ОКУ Facility(m,", "'actived'}, {'start': 9, 'state': 'deleted'}]) elif t[LABEL] == TRANSACT_B: self.assertEqual(t[TIME_CREATED],", "% str(tB[0]) ### SEGMENT ---------------------------- # формируется одна заявка в", "модели в момент времени 6. \"\"\" logger.info(\"--- test_preempt_return_001 ----------------------------------\") ###", "времени. # Формируется транзакт B в момент времени 2. #", "Generate(sgm, med_value=None, modificatorFunc=None, first_tx=1, max_amount=1) Handle(sgm, handlerFunc=funcTransactTo_list_all_transact) # test Handle(sgm,", "m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME) # ТЕСТЫ ---------------------- for t in list_all_transact: self.assertEqual(t[TIME_CREATED],", "в момент времени 1 Generate(sgm, med_value=1, modificatorFunc=None, first_tx=1, max_amount=2) #", "transact): # складируем транзакты в список list_all_transact.append(transact) def setTransactLabel(owner, transact):", "def test_preempt_return_002(self): \"\"\"Тест Preempt - Return Формируется транзакт A в", "2 по 7. print str([\"%s:%s\" % (k, t[k]) for k", "\"Time=%s\" % str(m.getCurTime()) print \"\\n\".join([str(t) for t in list_all_transact]) print", "k in t.keys() if k in [TIME_CREATED, TERMINATED_TIME]]) # @unittest.skip(\"testing", "modificatorFunc=None) GReturn(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][REMAIND_TIME], 2))", "список list_all_transact.append(transact) def setTransactLabel(owner, transact): if transact[NUM] == 1: transact[LABEL]", "from pyss.seize import Seize from pyss.release import Release from pyss.transfer", "t in list_all_transact]) print \"tA=%s\" % str(tA[0]) print \"tB=%s\" %", "Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY])) # Advance(sgm, meanTime=5, modificatorFunc=None) GReturn(sgm,", "[TIME_CREATED, TERMINATED_TIME, LIFE_TIME_LIST]]) if t[LABEL] == TRANSACT_A: self.assertEqual(t[TIME_CREATED], 1) self.assertEqual(t[REMAIND_TIME],", "TRANSACT_B: self.assertEqual(t[TIME_CREATED], 2) self.assertEqual(t[TERMINATED_TIME], 7) self.assertListEqual(t[LIFE_TIME_LIST], [ {'start': 2, 'state':", "\"B\" # list_all_transact = [] tA = [] tB =", "meanTime=5, modificatorFunc=None) GReturn(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o, t:not", "времени. Формируется транзакт B в момент времени 2. Прерывает работу", "= TRANSACT_A tA.append(transact) elif transact[NUM] == 2: transact[LABEL] = TRANSACT_B", "\"tB=%s\" % str(tB[0]) ### SEGMENT ---------------------------- # формируется одна заявка", "# вспомогательные операции Handle(sgm, handlerFunc=funcTransactTo_list_all_transact) Handle(sgm, handlerFunc=setTransactLabel) # test Handle(sgm,", "B выходит из модели в момент времени 7. # Транзакт", "m[OPTIONS].setAllFalse() MAX_TIME = 20 # list_all_transact = [] # MAX_TIME", "self.assertEqual(t[TERMINATED_TIME], 7) self.assertListEqual(t[LIFE_TIME_LIST], [ {'start': 2, 'state': 'actived'}, {'start': 7,", "@unittest.skip(\"testing skipping test_preempt_return_002\") def test_preempt_return_002(self): \"\"\"Тест Preempt - Return Формируется", "# #!/usr/bin/python # -*- coding: utf-8 -*- # test_preempt_return.py #", "t:self.assertEqual(tA[0][REMAIND_TIME], 2)) Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][SCHEDULED_TIME], 9)) Handle(sgm, handlerFunc=lambda o,", "2, 'state': 'actived'}, {'start': 7, 'state': 'deleted'}]) if __name__ ==", "handlerFunc=funcTransactTo_list_all_transact) # test Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) # Preempt(sgm,", "k in [TIME_CREATED, TERMINATED_TIME, LIFE_TIME_LIST]]) if t[LABEL] == TRANSACT_A: self.assertEqual(t[TIME_CREATED],", "= [] # F_1 = \"F_1\" # ОКУ facility_1 =", "handlerFunc=setTransactLabel) # test Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) # #", "5 единиц времени. # Транзакт B выходит из модели в", "на 5 единиц времени. Выходит из модели в момент времени", "транзакт B в момент времени 2. Прерывает работу устройства на", "pyss.segment import Segment from pyss.generate import Generate from pyss.terminate import", "<reponame>vpv11110000/pyss<gh_stars>0 # #!/usr/bin/python # -*- coding: utf-8 -*- # test_preempt_return.py", "5 единиц времени. Выходит из модели в момент времени 6.", "unittest DIRNAME_MODULE = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0])))) + os.sep sys.path.append(DIRNAME_MODULE) sys.path.append(DIRNAME_MODULE + \"pyss\"", "работу устройства F_1 на 5 единиц времени. Выходит из модели", "print \"tB=%s\" % str(tB[0]) ### SEGMENT ---------------------------- # формируется одна", "на 5 единиц времени. # Транзакт B выходит из модели", "% (k, t[k]) for k in t.keys() if k in", "from pyss import pyssobject from pyss.pyss_model import PyssModel from pyss.segment", "[] tB = [] # F_1 = \"F_1\" # ОКУ", "in list_all_transact: # Формируется транзакт A в момент времени 1.", "в момент времени 9. # Обработка транзакта А была прервана", "pyss.advance import Advance from pyss.preempt import Preempt from pyss.g_return import", "\"F_1\" # ОКУ facility_1 = Facility(m, facilityName=F_1) # def funcTransactTo_list_all_transact(owner,", "модели в момент времени 7. # Транзакт А выходит из", "9. Обработка транзакта А была прервана с 2 по 7.", "прервана с 2 по 7. print str([\"%s:%s\" % (k, t[k])", "t[FACILITY])) # Preempt(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1,", "deltaTerminate=0) # ЗАПУСК ---------------------- m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME) # ТЕСТЫ ---------------------- for", "facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) # Transfer(sgm,", "20 # F_1 = \"F_1\" # ОКУ Facility(m, facilityName=F_1) #", "elif t[LABEL] == TRANSACT_B: self.assertEqual(t[TIME_CREATED], 2) self.assertEqual(t[TERMINATED_TIME], 7) self.assertListEqual(t[LIFE_TIME_LIST], [", "# # первый транзакт проходит, второй направляется к метке \"to_preempt\"", "from pyss.segment import Segment from pyss.generate import Generate from pyss.terminate", "from pyss.release import Release from pyss.transfer import Transfer from pyss.test", "# Формируется транзакт A в момент времени 1. # Идёт", "transact): print \"Time=%s\" % str(m.getCurTime()) print \"\\n\".join([str(t) for t in", "= 20 # F_1 = \"F_1\" # ОКУ Facility(m, facilityName=F_1)", "5 единиц времени. Транзакт B выходит из модели в момент", "t[FACILITY])) # Advance(sgm, meanTime=5, modificatorFunc=None) GReturn(sgm, facilityName=F_1) # test Handle(sgm,", "MAX_TIME = 20 # CONSTS TRANSACT_A = \"A\" TRANSACT_B =", "Facility from pyss.seize import Seize from pyss.release import Release from", "+ \"pyss\" + os.sep) from pyss import pyssobject from pyss.pyss_model", "from pyss import logger from pyss.table import Table from pyss.handle", "return False return True def printAllTransact(owner, transact): print \"Time=%s\" %", "o, t:self.assertIn(F_1, t[FACILITY])) # Advance(sgm, meanTime=3, modificatorFunc=None) Release(sgm, facilityName=F_1) #", "pyss.facility import Facility from pyss.seize import Seize from pyss.release import", "LIFE_TIME_LIST]]) if t[LABEL] == TRANSACT_A: self.assertEqual(t[TIME_CREATED], 1) self.assertEqual(t[REMAIND_TIME], 2) self.assertEqual(t[TERMINATED_TIME],", "str(m.getCurTime()) print \"\\n\".join([str(t) for t in list_all_transact]) print \"tA=%s\" %", "PyssModel() sgm = Segment(m) # m[OPTIONS].setAllFalse() MAX_TIME = 20 #", "\"F_1\" # ОКУ Facility(m, facilityName=F_1) # def funcTransactTo_list_all_transact(owner, transact): #", "'state': 'actived'}, {'start': 7, 'state': 'deleted'}]) if __name__ == '__main__':", "Транзакт B выходит из модели в момент времени 7. #", "import logger from pyss.table import Table from pyss.handle import Handle", "pyss.terminate import Terminate from pyss import logger from pyss.table import", "test_preempt_return_002(self): \"\"\"Тест Preempt - Return Формируется транзакт A в момент", "test Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY])) # Advance(sgm, meanTime=5, modificatorFunc=None)", "self.assertEqual(t[TIME_CREATED], 2) self.assertEqual(t[TERMINATED_TIME], 7) self.assertListEqual(t[LIFE_TIME_LIST], [ {'start': 2, 'state': 'actived'},", "- Return Формируется один транзакт в момент времени 1. Прерывает", "в список list_all_transact.append(transact) ### SEGMENT ---------------------------- # формируется одна заявка", "pyss.test import Test from pyss.pyss_const import * class TestPreemptReturn(unittest.TestCase): def", "# первый транзакт проходит, второй направляется к метке \"to_preempt\" Test(sgm,", "test Handle(sgm, handlerFunc=lambda o, t:not self.assertNotIn(F_1, t[FACILITY])) # Terminate(sgm, deltaTerminate=0)", "момент времени 6. \"\"\" logger.info(\"--- test_preempt_return_001 ----------------------------------\") ### MODEL ----------------------------------", "B выходит из модели в момент времени 7. Транзакт А", "funcTransfer=lambda o, t: o.findBlockByLabel(\"to_term\")) #--- # только второй транзакт Preempt(sgm,", "# ТЕСТЫ ---------------------- for t in list_all_transact: # Формируется транзакт", "facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o, t:not self.assertNotIn(F_1, t[FACILITY])) #", "TRANSACT_A = \"A\" TRANSACT_B = \"B\" # list_all_transact = []", "m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME) # ТЕСТЫ ---------------------- for t in list_all_transact: #", "test_preempt_return_001\") def test_preempt_return_001(self): \"\"\"Тест Preempt - Return Формируется один транзакт", "7. print str([\"%s:%s\" % (k, t[k]) for k in t.keys()", "test_preempt_return_001(self): \"\"\"Тест Preempt - Return Формируется один транзакт в момент", "k in [TIME_CREATED, TERMINATED_TIME]]) # @unittest.skip(\"testing skipping test_preempt_return_002\") def test_preempt_return_002(self):", "{'start': 1, 'state': 'actived'}, {'start': 2, 'state': 'preempted'}, {'start': 7,", "транзакт проходит, второй направляется к метке \"to_preempt\" Test(sgm, funcCondition=checkTest, move2block=\"to_preempt\")", "o,t:self.assertEqual(tA[0][REMAIND_TIME], None))) Handle(sgm, handlerFunc=printAllTransact) Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY])) #", "t[LABEL] == TRANSACT_A: self.assertEqual(t[TIME_CREATED], 1) self.assertEqual(t[REMAIND_TIME], 2) self.assertEqual(t[TERMINATED_TIME], 9) self.assertListEqual(t[LIFE_TIME_LIST],", "2) self.assertEqual(t[TERMINATED_TIME], 7) self.assertListEqual(t[LIFE_TIME_LIST], [ {'start': 2, 'state': 'actived'}, {'start':", "3 единиц времени. Формируется транзакт B в момент времени 2.", "2. Прерывает работу устройства на 5 единиц времени. Транзакт B", "maxTime=MAX_TIME) # ТЕСТЫ ---------------------- for t in list_all_transact: self.assertEqual(t[TIME_CREATED], 1)", "# Terminate(sgm, deltaTerminate=0) # ЗАПУСК ---------------------- m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME) # ТЕСТЫ", "+ os.sep sys.path.append(DIRNAME_MODULE) sys.path.append(DIRNAME_MODULE + \"pyss\" + os.sep) from pyss", "modificatorFunc=None) Release(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY]))", "import Preempt from pyss.g_return import GReturn from pyss.facility import Facility", "[] # MAX_TIME = 20 # F_1 = \"F_1\" #", "setUp(self): pass def tearDown(self): pass # @unittest.skip(\"testing skipping test_preempt_return_001\") def", "logger.info(\"--- test_preempt_return_001 ----------------------------------\") ### MODEL ---------------------------------- m = PyssModel() sgm", "o, t:self.assertEqual(tA[0][REMAIND_TIME], 2)) Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][SCHEDULED_TIME], 9)) Handle(sgm, handlerFunc=lambda", "транзакта А была прервана с 2 по 7. \"\"\" logger.info(\"---", "была прервана с 2 по 7. \"\"\" logger.info(\"--- test_preempt_return_002 ----------------------------------\")", "1 Generate(sgm, med_value=1, modificatorFunc=None, first_tx=1, max_amount=2) # вспомогательные операции Handle(sgm,", "Enter from pyss.leave import Leave from pyss.storage import Storage from", "if k in [TIME_CREATED, TERMINATED_TIME]]) # @unittest.skip(\"testing skipping test_preempt_return_002\") def", "pyss import logger from pyss.table import Table from pyss.handle import", "проходит, второй направляется к метке \"to_preempt\" Test(sgm, funcCondition=checkTest, move2block=\"to_preempt\") #", "str(tA[0]) print \"tB=%s\" % str(tB[0]) ### SEGMENT ---------------------------- # формируется", "t[LABEL] == TRANSACT_B: self.assertEqual(t[TIME_CREATED], 2) self.assertEqual(t[TERMINATED_TIME], 7) self.assertListEqual(t[LIFE_TIME_LIST], [ {'start':", "'state': 'actived'}, {'start': 2, 'state': 'preempted'}, {'start': 7, 'state': 'actived'},", "была прервана с 2 по 7. print str([\"%s:%s\" % (k,", "t[FACILITY])) # Handle(sgm, handlerFunc=printAllTransact) Advance(sgm, meanTime=5, modificatorFunc=None) GReturn(sgm, facilityName=F_1) #", "А была прервана с 2 по 7. \"\"\" logger.info(\"--- test_preempt_return_002", "if t[LABEL] == TRANSACT_B: return False return True def printAllTransact(owner,", "Handle(sgm, handlerFunc=printAllTransact) Advance(sgm, meanTime=5, modificatorFunc=None) GReturn(sgm, facilityName=F_1) # test Handle(sgm,", "Транзакт А выходит из модели в момент времени 9. #", "o, t:self.assertNotIn(F_1, t[FACILITY])) # Handle(sgm, handlerFunc=printAllTransact) # все транзакты Terminate(sgm,", "из модели в момент времени 9. # Обработка транзакта А", "o, t:self.assertEqual(tA[0][SCHEDULED_TIME], 9)) Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) # Handle(sgm,", "pyss.enter import Enter from pyss.leave import Leave from pyss.storage import", "pylint: disable=line-too-long,missing-docstring,bad-whitespace, unused-argument, too-many-locals import sys import os import random", "t[LABEL] == TRANSACT_B: return False return True def printAllTransact(owner, transact):", "os.sep sys.path.append(DIRNAME_MODULE) sys.path.append(DIRNAME_MODULE + \"pyss\" + os.sep) from pyss import", "= \"A\" TRANSACT_B = \"B\" # list_all_transact = [] tA", "один транзакт в момент времени 1. Прерывает работу устройства F_1", "test_preempt_return_001 ----------------------------------\") ### MODEL ---------------------------------- m = PyssModel() sgm =", "[ {'start': 1, 'state': 'actived'}, {'start': 2, 'state': 'preempted'}, {'start':", "# pylint: disable=line-too-long,missing-docstring,bad-whitespace, unused-argument, too-many-locals import sys import os import", "# Прерывает работу устройства на 5 единиц времени. # Транзакт", "'actived'}, {'start': 7, 'state': 'deleted'}]) if __name__ == '__main__': unittest.main(module=\"test_preempt_return\")", "handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY])) # Advance(sgm, meanTime=3, modificatorFunc=None) Release(sgm, facilityName=F_1)", "Table from pyss.handle import Handle from pyss.enter import Enter from", "Preempt from pyss.g_return import GReturn from pyss.facility import Facility from", "6. \"\"\" logger.info(\"--- test_preempt_return_001 ----------------------------------\") ### MODEL ---------------------------------- m =", "---------------------------------- m = PyssModel() sgm = Segment(m) # m[OPTIONS].setAllFalse() MAX_TIME", "момент времени 9. Обработка транзакта А была прервана с 2", "заявка в момент времени 1 Generate(sgm, med_value=None, modificatorFunc=None, first_tx=1, max_amount=1)", "Формируется транзакт B в момент времени 2. Прерывает работу устройства", "handlerFunc=printAllTransact) Advance(sgm, meanTime=5, modificatorFunc=None) GReturn(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda", "o.findBlockByLabel(\"to_term\")) #--- # только второй транзакт Preempt(sgm, facilityName=F_1, label=\"to_preempt\") #", "# CONSTS TRANSACT_A = \"A\" TRANSACT_B = \"B\" # list_all_transact", "---------------------- for t in list_all_transact: # Формируется транзакт A в", "tB.append(transact) # функция проверки условия def checkTest(o): t=m.getCurrentTransact() if t[LABEL]", "времени. Выходит из модели в момент времени 6. \"\"\" logger.info(\"---", "на обработку устройством F_1 в течение 3 единиц времени. #", "% str(m.getCurTime()) print \"\\n\".join([str(t) for t in list_all_transact]) print \"tA=%s\"", "def test_preempt_return_001(self): \"\"\"Тест Preempt - Return Формируется один транзакт в", "os.sep) from pyss import pyssobject from pyss.pyss_model import PyssModel from", "Выходит из модели в момент времени 6. \"\"\" logger.info(\"--- test_preempt_return_001", "Handle(sgm, handlerFunc=setTransactLabel) # test Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) #", "с 2 по 7. \"\"\" logger.info(\"--- test_preempt_return_002 ----------------------------------\") ### MODEL", "транзакт B в момент времени 2. # Прерывает работу устройства", "TRANSACT_B tB.append(transact) # функция проверки условия def checkTest(o): t=m.getCurrentTransact() if", "t[FACILITY])) # Terminate(sgm, deltaTerminate=0) # ЗАПУСК ---------------------- m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME) #", "print str([\"%s:%s\" % (k, t[k]) for k in t.keys() if", "# складируем транзакты в список list_all_transact.append(transact) def setTransactLabel(owner, transact): if", "\"to_preempt\" Test(sgm, funcCondition=checkTest, move2block=\"to_preempt\") # только первый транзакт Seize(sgm, facilityName=F_1)", "= Segment(m) # m[OPTIONS].setAllFalse() MAX_TIME = 20 # CONSTS TRANSACT_A", "list_all_transact: # Формируется транзакт A в момент времени 1. #", "устройством F_1 в течение 3 единиц времени. # Формируется транзакт", "TRANSACT_A: self.assertEqual(t[TIME_CREATED], 1) self.assertEqual(t[REMAIND_TIME], 2) self.assertEqual(t[TERMINATED_TIME], 9) self.assertListEqual(t[LIFE_TIME_LIST], [ {'start':", "вспомогательные операции Handle(sgm, handlerFunc=funcTransactTo_list_all_transact) Handle(sgm, handlerFunc=setTransactLabel) # test Handle(sgm, handlerFunc=lambda", "Preempt(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY])) #", "pyss.generate import Generate from pyss.terminate import Terminate from pyss import", "pyssobject from pyss.pyss_model import PyssModel from pyss.segment import Segment from", "= 20 # list_all_transact = [] # MAX_TIME = 20", "направляется к метке \"to_preempt\" Test(sgm, funcCondition=checkTest, move2block=\"to_preempt\") # только первый", "# формируется одна заявка в момент времени 1 Generate(sgm, med_value=1,", "facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][REMAIND_TIME], 2)) Handle(sgm, handlerFunc=lambda", "времени. Транзакт B выходит из модели в момент времени 7.", "времени 1. Идёт на обработку устройством F_1 в течение 3", "== 2: transact[LABEL] = TRANSACT_B tB.append(transact) # функция проверки условия", "= [] tB = [] # F_1 = \"F_1\" #", "времени 2. # Прерывает работу устройства на 5 единиц времени.", "2)) Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][SCHEDULED_TIME], 9)) Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1,", "Прерывает работу устройства на 5 единиц времени. # Транзакт B", "20 # list_all_transact = [] # MAX_TIME = 20 #", "---------------------- m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME) # ТЕСТЫ ---------------------- for t in list_all_transact:", "test Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) # Preempt(sgm, facilityName=F_1) #", "t[FACILITY])) # Transfer(sgm, funcTransfer=lambda o, t: o.findBlockByLabel(\"to_term\")) #--- # только", "pass def tearDown(self): pass # @unittest.skip(\"testing skipping test_preempt_return_001\") def test_preempt_return_001(self):", "Release from pyss.transfer import Transfer from pyss.test import Test from", "\"A\" TRANSACT_B = \"B\" # list_all_transact = [] tA =", ".addBlock(handle.Handle(handlerFunc=lambda o,t:self.assertEqual(tA[0][REMAIND_TIME], None))) Handle(sgm, handlerFunc=printAllTransact) Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY]))", "# list_all_transact = [] # MAX_TIME = 20 # F_1", "в момент времени 2. # Прерывает работу устройства на 5", "test_preempt_return.py # pylint: disable=line-too-long,missing-docstring,bad-whitespace, unused-argument, too-many-locals import sys import os", "import Transfer from pyss.test import Test from pyss.pyss_const import *", "funcTransactTo_list_all_transact(owner, transact): # складируем транзакты в список list_all_transact.append(transact) def setTransactLabel(owner,", "транзакт Preempt(sgm, facilityName=F_1, label=\"to_preempt\") # test # .addBlock(handle.Handle(handlerFunc=lambda o,t:self.assertEqual(tA[0][REMAIND_TIME], None)))", "Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) # Transfer(sgm, funcTransfer=lambda o, t:", "import unittest DIRNAME_MODULE = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0])))) + os.sep sys.path.append(DIRNAME_MODULE) sys.path.append(DIRNAME_MODULE +", "move2block=\"to_preempt\") # только первый транзакт Seize(sgm, facilityName=F_1) # test Handle(sgm,", "t:self.assertEqual(tA[0][SCHEDULED_TIME], 9)) Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) # Handle(sgm, handlerFunc=printAllTransact)", "for t in list_all_transact]) print \"tA=%s\" % str(tA[0]) print \"tB=%s\"", "test_preempt_return_002\") def test_preempt_return_002(self): \"\"\"Тест Preempt - Return Формируется транзакт A", "= PyssModel() sgm = Segment(m) # m[OPTIONS].setAllFalse() MAX_TIME = 20", "in [TIME_CREATED, TERMINATED_TIME]]) # @unittest.skip(\"testing skipping test_preempt_return_002\") def test_preempt_return_002(self): \"\"\"Тест", "# только второй транзакт Preempt(sgm, facilityName=F_1, label=\"to_preempt\") # test #", "Обработка транзакта А была прервана с 2 по 7. print", "= \"B\" # list_all_transact = [] tA = [] tB", "Обработка транзакта А была прервана с 2 по 7. \"\"\"", "import Generate from pyss.terminate import Terminate from pyss import logger", "Generate from pyss.terminate import Terminate from pyss import logger from", "DIRNAME_MODULE = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0])))) + os.sep sys.path.append(DIRNAME_MODULE) sys.path.append(DIRNAME_MODULE + \"pyss\" +", "# ЗАПУСК ---------------------- m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME) # ТЕСТЫ ---------------------- for t", "= TRANSACT_B tB.append(transact) # функция проверки условия def checkTest(o): t=m.getCurrentTransact()", "transact): if transact[NUM] == 1: transact[LABEL] = TRANSACT_A tA.append(transact) elif", "o, t: o.findBlockByLabel(\"to_term\")) #--- # только второй транзакт Preempt(sgm, facilityName=F_1,", "операции Handle(sgm, handlerFunc=funcTransactTo_list_all_transact) Handle(sgm, handlerFunc=setTransactLabel) # test Handle(sgm, handlerFunc=lambda o,", "ОКУ Facility(m, facilityName=F_1) # def funcTransactTo_list_all_transact(owner, transact): # складируем транзакты", "t[k]) for k in t.keys() if k in [TIME_CREATED, TERMINATED_TIME,", "for t in list_all_transact: # Формируется транзакт A в момент", "from pyss.preempt import Preempt from pyss.g_return import GReturn from pyss.facility", "import os import random import unittest DIRNAME_MODULE = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0])))) +", "handlerFunc=lambda o, t:not self.assertNotIn(F_1, t[FACILITY])) # Terminate(sgm, deltaTerminate=0) # ЗАПУСК", "== TRANSACT_B: return False return True def printAllTransact(owner, transact): print", "def printAllTransact(owner, transact): print \"Time=%s\" % str(m.getCurTime()) print \"\\n\".join([str(t) for", "# Handle(sgm, handlerFunc=printAllTransact) Advance(sgm, meanTime=5, modificatorFunc=None) GReturn(sgm, facilityName=F_1) # test", "maxTime=MAX_TIME) # ТЕСТЫ ---------------------- for t in list_all_transact: # Формируется", "t:self.assertNotIn(F_1, t[FACILITY])) # Preempt(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o,", "\"\"\" logger.info(\"--- test_preempt_return_002 ----------------------------------\") ### MODEL ---------------------------------- m = PyssModel()", "m = PyssModel() sgm = Segment(m) # m[OPTIONS].setAllFalse() MAX_TIME =", "t:self.assertNotIn(F_1, t[FACILITY])) # Handle(sgm, handlerFunc=printAllTransact) # все транзакты Terminate(sgm, label=\"to_term\",", "[] # F_1 = \"F_1\" # ОКУ facility_1 = Facility(m,", "printAllTransact(owner, transact): print \"Time=%s\" % str(m.getCurTime()) print \"\\n\".join([str(t) for t", "с 2 по 7. print str([\"%s:%s\" % (k, t[k]) for", "# test Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) # Preempt(sgm, facilityName=F_1)", "транзакта А была прервана с 2 по 7. print str([\"%s:%s\"", "времени 6. \"\"\" logger.info(\"--- test_preempt_return_001 ----------------------------------\") ### MODEL ---------------------------------- m", "Generate(sgm, med_value=1, modificatorFunc=None, first_tx=1, max_amount=2) # вспомогательные операции Handle(sgm, handlerFunc=funcTransactTo_list_all_transact)", "складируем транзакты в список list_all_transact.append(transact) def setTransactLabel(owner, transact): if transact[NUM]", "# -*- coding: utf-8 -*- # test_preempt_return.py # pylint: disable=line-too-long,missing-docstring,bad-whitespace,", "import Table from pyss.handle import Handle from pyss.enter import Enter", "True def printAllTransact(owner, transact): print \"Time=%s\" % str(m.getCurTime()) print \"\\n\".join([str(t)", "transact[LABEL] = TRANSACT_B tB.append(transact) # функция проверки условия def checkTest(o):", "sys.path.append(DIRNAME_MODULE) sys.path.append(DIRNAME_MODULE + \"pyss\" + os.sep) from pyss import pyssobject", "из модели в момент времени 6. \"\"\" logger.info(\"--- test_preempt_return_001 ----------------------------------\")", "label=\"to_term\", deltaTerminate=0) # ЗАПУСК ---------------------- m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME) # ТЕСТЫ ----------------------", "transact): # складируем транзакты в список list_all_transact.append(transact) ### SEGMENT ----------------------------", "A в момент времени 1. # Идёт на обработку устройством", "момент времени 1. Идёт на обработку устройством F_1 в течение", "[ {'start': 2, 'state': 'actived'}, {'start': 7, 'state': 'deleted'}]) if", "течение 3 единиц времени. # Формируется транзакт B в момент", "1) self.assertEqual(t[TERMINATED_TIME], 6) print str([\"%s:%s\" % (k, t[k]) for k", "метке \"to_preempt\" Test(sgm, funcCondition=checkTest, move2block=\"to_preempt\") # только первый транзакт Seize(sgm,", "from pyss.handle import Handle from pyss.enter import Enter from pyss.leave", "in [TIME_CREATED, TERMINATED_TIME, LIFE_TIME_LIST]]) if t[LABEL] == TRANSACT_A: self.assertEqual(t[TIME_CREATED], 1)", "import Facility from pyss.seize import Seize from pyss.release import Release", "A в момент времени 1. Идёт на обработку устройством F_1", "течение 3 единиц времени. Формируется транзакт B в момент времени", "список list_all_transact.append(transact) ### SEGMENT ---------------------------- # формируется одна заявка в", "# m[OPTIONS].setAllFalse() MAX_TIME = 20 # list_all_transact = [] #", "t:self.assertIn(F_1, t[FACILITY])) # Handle(sgm, handlerFunc=printAllTransact) Advance(sgm, meanTime=5, modificatorFunc=None) GReturn(sgm, facilityName=F_1)", "на 5 единиц времени. Транзакт B выходит из модели в", "from pyss.pyss_const import * class TestPreemptReturn(unittest.TestCase): def setUp(self): pass def", "facility_1 = Facility(m, facilityName=F_1) # def funcTransactTo_list_all_transact(owner, transact): # складируем", "в момент времени 1 Generate(sgm, med_value=None, modificatorFunc=None, first_tx=1, max_amount=1) Handle(sgm,", "# @unittest.skip(\"testing skipping test_preempt_return_002\") def test_preempt_return_002(self): \"\"\"Тест Preempt - Return", "2: transact[LABEL] = TRANSACT_B tB.append(transact) # функция проверки условия def", "utf-8 -*- # test_preempt_return.py # pylint: disable=line-too-long,missing-docstring,bad-whitespace, unused-argument, too-many-locals import", "from pyss.generate import Generate from pyss.terminate import Terminate from pyss", "self.assertEqual(t[REMAIND_TIME], 2) self.assertEqual(t[TERMINATED_TIME], 9) self.assertListEqual(t[LIFE_TIME_LIST], [ {'start': 1, 'state': 'actived'},", "pyss.g_return import GReturn from pyss.facility import Facility from pyss.seize import", "import Segment from pyss.generate import Generate from pyss.terminate import Terminate", "# .addBlock(handle.Handle(handlerFunc=lambda o,t:self.assertEqual(tA[0][REMAIND_TIME], None))) Handle(sgm, handlerFunc=printAllTransact) Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1,", "# test Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][REMAIND_TIME], 2)) Handle(sgm, handlerFunc=lambda o,", "работу устройства на 5 единиц времени. # Транзакт B выходит", "2 по 7. \"\"\" logger.info(\"--- test_preempt_return_002 ----------------------------------\") ### MODEL ----------------------------------", "#--- # только второй транзакт Preempt(sgm, facilityName=F_1, label=\"to_preempt\") # test", "= 20 # CONSTS TRANSACT_A = \"A\" TRANSACT_B = \"B\"", "9)) Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) # Handle(sgm, handlerFunc=printAllTransact) #", "F_1 = \"F_1\" # ОКУ facility_1 = Facility(m, facilityName=F_1) #", "Формируется транзакт A в момент времени 1. # Идёт на", "print \"\\n\".join([str(t) for t in list_all_transact]) print \"tA=%s\" % str(tA[0])", "'actived'}, {'start': 2, 'state': 'preempted'}, {'start': 7, 'state': 'actived'}, {'start':", "Handle(sgm, handlerFunc=printAllTransact) # все транзакты Terminate(sgm, label=\"to_term\", deltaTerminate=0) # ЗАПУСК", "в момент времени 7. # Транзакт А выходит из модели", "F_1 на 5 единиц времени. Выходит из модели в момент", "class TestPreemptReturn(unittest.TestCase): def setUp(self): pass def tearDown(self): pass # @unittest.skip(\"testing", "def setUp(self): pass def tearDown(self): pass # @unittest.skip(\"testing skipping test_preempt_return_001\")", "Формируется один транзакт в момент времени 1. Прерывает работу устройства", "транзакты в список list_all_transact.append(transact) def setTransactLabel(owner, transact): if transact[NUM] ==", "from pyss.advance import Advance from pyss.preempt import Preempt from pyss.g_return", "TERMINATED_TIME, LIFE_TIME_LIST]]) if t[LABEL] == TRANSACT_A: self.assertEqual(t[TIME_CREATED], 1) self.assertEqual(t[REMAIND_TIME], 2)", "test # .addBlock(handle.Handle(handlerFunc=lambda o,t:self.assertEqual(tA[0][REMAIND_TIME], None))) Handle(sgm, handlerFunc=printAllTransact) Handle(sgm, handlerFunc=lambda o,", "устройства на 5 единиц времени. # Транзакт B выходит из", "'state': 'actived'}, {'start': 9, 'state': 'deleted'}]) elif t[LABEL] == TRANSACT_B:", "# Транзакт B выходит из модели в момент времени 7.", "времени 1 Generate(sgm, med_value=None, modificatorFunc=None, first_tx=1, max_amount=1) Handle(sgm, handlerFunc=funcTransactTo_list_all_transact) #", "TERMINATED_TIME]]) # @unittest.skip(\"testing skipping test_preempt_return_002\") def test_preempt_return_002(self): \"\"\"Тест Preempt -", "- Return Формируется транзакт A в момент времени 1. Идёт", "B в момент времени 2. Прерывает работу устройства на 5", "времени 9. # Обработка транзакта А была прервана с 2", "if k in [TIME_CREATED, TERMINATED_TIME, LIFE_TIME_LIST]]) if t[LABEL] == TRANSACT_A:", "Advance(sgm, meanTime=3, modificatorFunc=None) Release(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o,", "# F_1 = \"F_1\" # ОКУ facility_1 = Facility(m, facilityName=F_1)", "момент времени 1. Прерывает работу устройства F_1 на 5 единиц", "Прерывает работу устройства F_1 на 5 единиц времени. Выходит из", "Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) # Handle(sgm, handlerFunc=printAllTransact) # все", "@unittest.skip(\"testing skipping test_preempt_return_001\") def test_preempt_return_001(self): \"\"\"Тест Preempt - Return Формируется", "Транзакт А выходит из модели в момент времени 9. Обработка", "'state': 'preempted'}, {'start': 7, 'state': 'actived'}, {'start': 9, 'state': 'deleted'}])", "t: o.findBlockByLabel(\"to_term\")) #--- # только второй транзакт Preempt(sgm, facilityName=F_1, label=\"to_preempt\")", "= Facility(m, facilityName=F_1) # def funcTransactTo_list_all_transact(owner, transact): # складируем транзакты", "Preempt - Return Формируется транзакт A в момент времени 1.", "выходит из модели в момент времени 7. Транзакт А выходит", "handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) # Transfer(sgm, funcTransfer=lambda o, t: o.findBlockByLabel(\"to_term\"))", "first_tx=1, max_amount=1) Handle(sgm, handlerFunc=funcTransactTo_list_all_transact) # test Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1,", "handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY])) # Handle(sgm, handlerFunc=printAllTransact) Advance(sgm, meanTime=5, modificatorFunc=None)", "coding: utf-8 -*- # test_preempt_return.py # pylint: disable=line-too-long,missing-docstring,bad-whitespace, unused-argument, too-many-locals", "# test Handle(sgm, handlerFunc=lambda o, t:not self.assertNotIn(F_1, t[FACILITY])) # Terminate(sgm,", "ТЕСТЫ ---------------------- for t in list_all_transact: self.assertEqual(t[TIME_CREATED], 1) self.assertEqual(t[TERMINATED_TIME], 6)", "из модели в момент времени 7. Транзакт А выходит из", "handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY])) # Advance(sgm, meanTime=5, modificatorFunc=None) GReturn(sgm, facilityName=F_1)", "времени 1. Прерывает работу устройства F_1 на 5 единиц времени.", "# ТЕСТЫ ---------------------- for t in list_all_transact: self.assertEqual(t[TIME_CREATED], 1) self.assertEqual(t[TERMINATED_TIME],", "1) self.assertEqual(t[REMAIND_TIME], 2) self.assertEqual(t[TERMINATED_TIME], 9) self.assertListEqual(t[LIFE_TIME_LIST], [ {'start': 1, 'state':", "import PyssModel from pyss.segment import Segment from pyss.generate import Generate", "unused-argument, too-many-locals import sys import os import random import unittest", "одна заявка в момент времени 1 Generate(sgm, med_value=1, modificatorFunc=None, first_tx=1,", "складируем транзакты в список list_all_transact.append(transact) ### SEGMENT ---------------------------- # формируется", "= \"F_1\" # ОКУ facility_1 = Facility(m, facilityName=F_1) # def", "import sys import os import random import unittest DIRNAME_MODULE =", "modificatorFunc=None) GReturn(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o, t:not self.assertNotIn(F_1,", "TRANSACT_B: return False return True def printAllTransact(owner, transact): print \"Time=%s\"", "момент времени 1 Generate(sgm, med_value=None, modificatorFunc=None, first_tx=1, max_amount=1) Handle(sgm, handlerFunc=funcTransactTo_list_all_transact)", "PyssModel from pyss.segment import Segment from pyss.generate import Generate from", "формируется одна заявка в момент времени 1 Generate(sgm, med_value=None, modificatorFunc=None,", "в список list_all_transact.append(transact) def setTransactLabel(owner, transact): if transact[NUM] == 1:", "\"\\n\".join([str(t) for t in list_all_transact]) print \"tA=%s\" % str(tA[0]) print", "Preempt(sgm, facilityName=F_1, label=\"to_preempt\") # test # .addBlock(handle.Handle(handlerFunc=lambda o,t:self.assertEqual(tA[0][REMAIND_TIME], None))) Handle(sgm,", "времени. # Транзакт B выходит из модели в момент времени", "в момент времени 9. Обработка транзакта А была прервана с", "return True def printAllTransact(owner, transact): print \"Time=%s\" % str(m.getCurTime()) print", "facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY])) # Advance(sgm,", "9) self.assertListEqual(t[LIFE_TIME_LIST], [ {'start': 1, 'state': 'actived'}, {'start': 2, 'state':", "import Release from pyss.transfer import Transfer from pyss.test import Test", "Leave from pyss.storage import Storage from pyss.advance import Advance from", "pass # @unittest.skip(\"testing skipping test_preempt_return_001\") def test_preempt_return_001(self): \"\"\"Тест Preempt -", "# F_1 = \"F_1\" # ОКУ Facility(m, facilityName=F_1) # def", "1. # Идёт на обработку устройством F_1 в течение 3", "self.assertEqual(t[TIME_CREATED], 1) self.assertEqual(t[REMAIND_TIME], 2) self.assertEqual(t[TERMINATED_TIME], 9) self.assertListEqual(t[LIFE_TIME_LIST], [ {'start': 1,", "facilityName=F_1, label=\"to_preempt\") # test # .addBlock(handle.Handle(handlerFunc=lambda o,t:self.assertEqual(tA[0][REMAIND_TIME], None))) Handle(sgm, handlerFunc=printAllTransact)", "from pyss.facility import Facility from pyss.seize import Seize from pyss.release", "handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) # # первый транзакт проходит, второй", "too-many-locals import sys import os import random import unittest DIRNAME_MODULE", "meanTime=5, modificatorFunc=None) GReturn(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][REMAIND_TIME],", "1, 'state': 'actived'}, {'start': 2, 'state': 'preempted'}, {'start': 7, 'state':", "test Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][REMAIND_TIME], 2)) Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][SCHEDULED_TIME],", "в момент времени 1. Идёт на обработку устройством F_1 в", "первый транзакт проходит, второй направляется к метке \"to_preempt\" Test(sgm, funcCondition=checkTest,", "self.assertEqual(t[TERMINATED_TIME], 6) print str([\"%s:%s\" % (k, t[k]) for k in", "Формируется транзакт A в момент времени 1. Идёт на обработку", "### MODEL ---------------------------------- m = PyssModel() sgm = Segment(m) #", "t=m.getCurrentTransact() if t[LABEL] == TRANSACT_B: return False return True def", "first_tx=1, max_amount=2) # вспомогательные операции Handle(sgm, handlerFunc=funcTransactTo_list_all_transact) Handle(sgm, handlerFunc=setTransactLabel) #", "o, t:self.assertNotIn(F_1, t[FACILITY])) # Preempt(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda", "в течение 3 единиц времени. # Формируется транзакт B в", "в течение 3 единиц времени. Формируется транзакт B в момент", "in list_all_transact]) print \"tA=%s\" % str(tA[0]) print \"tB=%s\" % str(tB[0])", "= Segment(m) # m[OPTIONS].setAllFalse() MAX_TIME = 20 # list_all_transact =", "% str(tA[0]) print \"tB=%s\" % str(tB[0]) ### SEGMENT ---------------------------- #", "времени 1 Generate(sgm, med_value=1, modificatorFunc=None, first_tx=1, max_amount=2) # вспомогательные операции", "GReturn(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda o, t:not self.assertNotIn(F_1, t[FACILITY]))", "второй направляется к метке \"to_preempt\" Test(sgm, funcCondition=checkTest, move2block=\"to_preempt\") # только", "print \"Time=%s\" % str(m.getCurTime()) print \"\\n\".join([str(t) for t in list_all_transact])", "единиц времени. # Формируется транзакт B в момент времени 2.", "skipping test_preempt_return_002\") def test_preempt_return_002(self): \"\"\"Тест Preempt - Return Формируется транзакт", "from pyss.transfer import Transfer from pyss.test import Test from pyss.pyss_const", "ЗАПУСК ---------------------- m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME) # ТЕСТЫ ---------------------- for t in", "label=\"to_preempt\") # test # .addBlock(handle.Handle(handlerFunc=lambda o,t:self.assertEqual(tA[0][REMAIND_TIME], None))) Handle(sgm, handlerFunc=printAllTransact) Handle(sgm,", "import * class TestPreemptReturn(unittest.TestCase): def setUp(self): pass def tearDown(self): pass", "TestPreemptReturn(unittest.TestCase): def setUp(self): pass def tearDown(self): pass # @unittest.skip(\"testing skipping", "момент времени 2. # Прерывает работу устройства на 5 единиц", "проверки условия def checkTest(o): t=m.getCurrentTransact() if t[LABEL] == TRANSACT_B: return", "from pyss.leave import Leave from pyss.storage import Storage from pyss.advance", "checkTest(o): t=m.getCurrentTransact() if t[LABEL] == TRANSACT_B: return False return True", "Terminate from pyss import logger from pyss.table import Table from", "import pyssobject from pyss.pyss_model import PyssModel from pyss.segment import Segment", "modificatorFunc=None, first_tx=1, max_amount=1) Handle(sgm, handlerFunc=funcTransactTo_list_all_transact) # test Handle(sgm, handlerFunc=lambda o,", "7. \"\"\" logger.info(\"--- test_preempt_return_002 ----------------------------------\") ### MODEL ---------------------------------- m =", "if transact[NUM] == 1: transact[LABEL] = TRANSACT_A tA.append(transact) elif transact[NUM]", "### SEGMENT ---------------------------- # формируется одна заявка в момент времени", "handlerFunc=lambda o, t:self.assertEqual(tA[0][REMAIND_TIME], 2)) Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][SCHEDULED_TIME], 9)) Handle(sgm,", "{'start': 7, 'state': 'actived'}, {'start': 9, 'state': 'deleted'}]) elif t[LABEL]", "m[OPTIONS].setAllFalse() MAX_TIME = 20 # CONSTS TRANSACT_A = \"A\" TRANSACT_B", "Segment(m) # m[OPTIONS].setAllFalse() MAX_TIME = 20 # CONSTS TRANSACT_A =", "handlerFunc=printAllTransact) Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY])) # Handle(sgm, handlerFunc=printAllTransact) Advance(sgm,", "F_1 в течение 3 единиц времени. # Формируется транзакт B", "формируется одна заявка в момент времени 1 Generate(sgm, med_value=1, modificatorFunc=None,", "import Leave from pyss.storage import Storage from pyss.advance import Advance", "self.assertEqual(t[TERMINATED_TIME], 9) self.assertListEqual(t[LIFE_TIME_LIST], [ {'start': 1, 'state': 'actived'}, {'start': 2,", "второй транзакт Preempt(sgm, facilityName=F_1, label=\"to_preempt\") # test # .addBlock(handle.Handle(handlerFunc=lambda o,t:self.assertEqual(tA[0][REMAIND_TIME],", "pyss.release import Release from pyss.transfer import Transfer from pyss.test import", "1. Идёт на обработку устройством F_1 в течение 3 единиц", "pyss.transfer import Transfer from pyss.test import Test from pyss.pyss_const import", "----------------------------------\") ### MODEL ---------------------------------- m = PyssModel() sgm = Segment(m)", "# test Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY])) # Advance(sgm, meanTime=5,", "Segment from pyss.generate import Generate from pyss.terminate import Terminate from", "== TRANSACT_B: self.assertEqual(t[TIME_CREATED], 2) self.assertEqual(t[TERMINATED_TIME], 7) self.assertListEqual(t[LIFE_TIME_LIST], [ {'start': 2,", "{'start': 9, 'state': 'deleted'}]) elif t[LABEL] == TRANSACT_B: self.assertEqual(t[TIME_CREATED], 2)", "max_amount=2) # вспомогательные операции Handle(sgm, handlerFunc=funcTransactTo_list_all_transact) Handle(sgm, handlerFunc=setTransactLabel) # test", "pyss.handle import Handle from pyss.enter import Enter from pyss.leave import", "list_all_transact: self.assertEqual(t[TIME_CREATED], 1) self.assertEqual(t[TERMINATED_TIME], 6) print str([\"%s:%s\" % (k, t[k])", "tearDown(self): pass # @unittest.skip(\"testing skipping test_preempt_return_001\") def test_preempt_return_001(self): \"\"\"Тест Preempt", "handlerFunc=printAllTransact) # все транзакты Terminate(sgm, label=\"to_term\", deltaTerminate=0) # ЗАПУСК ----------------------", "выходит из модели в момент времени 9. # Обработка транзакта", "момент времени 9. # Обработка транзакта А была прервана с", "времени 7. Транзакт А выходит из модели в момент времени", "import Terminate from pyss import logger from pyss.table import Table", "модели в момент времени 9. Обработка транзакта А была прервана", "* class TestPreemptReturn(unittest.TestCase): def setUp(self): pass def tearDown(self): pass #", "3 единиц времени. # Формируется транзакт B в момент времени", "по 7. print str([\"%s:%s\" % (k, t[k]) for k in", "pyss.leave import Leave from pyss.storage import Storage from pyss.advance import", "# list_all_transact = [] tA = [] tB = []", "transact[LABEL] = TRANSACT_A tA.append(transact) elif transact[NUM] == 2: transact[LABEL] =", "MODEL ---------------------------------- m = PyssModel() sgm = Segment(m) # m[OPTIONS].setAllFalse()", "1: transact[LABEL] = TRANSACT_A tA.append(transact) elif transact[NUM] == 2: transact[LABEL]", "в момент времени 7. Транзакт А выходит из модели в", "времени 9. Обработка транзакта А была прервана с 2 по", "F_1 = \"F_1\" # ОКУ Facility(m, facilityName=F_1) # def funcTransactTo_list_all_transact(owner,", "False return True def printAllTransact(owner, transact): print \"Time=%s\" % str(m.getCurTime())", "Прерывает работу устройства на 5 единиц времени. Транзакт B выходит", "9. # Обработка транзакта А была прервана с 2 по", "from pyss.pyss_model import PyssModel from pyss.segment import Segment from pyss.generate", "self.assertEqual(t[TIME_CREATED], 1) self.assertEqual(t[TERMINATED_TIME], 6) print str([\"%s:%s\" % (k, t[k]) for", "TRANSACT_A tA.append(transact) elif transact[NUM] == 2: transact[LABEL] = TRANSACT_B tB.append(transact)", "# Advance(sgm, meanTime=5, modificatorFunc=None) GReturn(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda", "Seize from pyss.release import Release from pyss.transfer import Transfer from", "Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY])) # Handle(sgm, handlerFunc=printAllTransact) Advance(sgm, meanTime=5,", "7) self.assertListEqual(t[LIFE_TIME_LIST], [ {'start': 2, 'state': 'actived'}, {'start': 7, 'state':", "# только первый транзакт Seize(sgm, facilityName=F_1) # test Handle(sgm, handlerFunc=lambda", "# функция проверки условия def checkTest(o): t=m.getCurrentTransact() if t[LABEL] ==", "list_all_transact.append(transact) def setTransactLabel(owner, transact): if transact[NUM] == 1: transact[LABEL] =", "из модели в момент времени 7. # Транзакт А выходит", "from pyss.terminate import Terminate from pyss import logger from pyss.table", "print \"tA=%s\" % str(tA[0]) print \"tB=%s\" % str(tB[0]) ### SEGMENT", "# test Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY])) # Transfer(sgm, funcTransfer=lambda", "t:self.assertNotIn(F_1, t[FACILITY])) # Transfer(sgm, funcTransfer=lambda o, t: o.findBlockByLabel(\"to_term\")) #--- #", "выходит из модели в момент времени 7. # Транзакт А", "Handle(sgm, handlerFunc=funcTransactTo_list_all_transact) Handle(sgm, handlerFunc=setTransactLabel) # test Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1,", "момент времени 1. # Идёт на обработку устройством F_1 в", "t[FACILITY])) # Handle(sgm, handlerFunc=printAllTransact) # все транзакты Terminate(sgm, label=\"to_term\", deltaTerminate=0)", "pyss.seize import Seize from pyss.release import Release from pyss.transfer import", "Handle(sgm, handlerFunc=lambda o, t:not self.assertNotIn(F_1, t[FACILITY])) # Terminate(sgm, deltaTerminate=0) #", "in list_all_transact: self.assertEqual(t[TIME_CREATED], 1) self.assertEqual(t[TERMINATED_TIME], 6) print str([\"%s:%s\" % (k,", "== 1: transact[LABEL] = TRANSACT_A tA.append(transact) elif transact[NUM] == 2:", "if t[LABEL] == TRANSACT_A: self.assertEqual(t[TIME_CREATED], 1) self.assertEqual(t[REMAIND_TIME], 2) self.assertEqual(t[TERMINATED_TIME], 9)", "test_preempt_return_002 ----------------------------------\") ### MODEL ---------------------------------- m = PyssModel() sgm =" ]
[ "compute_gradients(self, samples): \"\"\"Returns critic, actor gradients.\"\"\" return self.model.compute_gradients(samples) def apply_gradients(self,", "samples): \"\"\"Returns critic, actor gradients.\"\"\" return self.model.compute_gradients(samples) def apply_gradients(self, grads):", "gradients.\"\"\" return self.model.compute_gradients(samples) def apply_gradients(self, grads): \"\"\"Applies gradients to evaluator", "return samples def update_target(self): \"\"\"Updates target critic and target actor.\"\"\"", "horizon=config[\"horizon\"]) def sample(self): \"\"\"Returns a batch of samples.\"\"\" rollout =", "config): self.env = ModelCatalog.get_preprocessor_as_wrapper( registry, env_creator(config[\"env_config\"])) # contains model, target_model", "import print_function import numpy as np import ray from ray.rllib.ddpg2.models", "since each sample is one step, no discounting needs to", "contains model, target_model self.model = DDPGModel(registry, self.env, config) self.sampler =", "= process_rollout( rollout, NoFilter(), gamma=1.0, use_gae=False) return samples def update_target(self):", "model weights.\"\"\" return self.model.get_weights() def set_weights(self, weights): \"\"\"Sets model weights.\"\"\"", "config[\"gamma\"] samples = process_rollout( rollout, NoFilter(), gamma=1.0, use_gae=False) return samples", "step, no discounting needs to be applied; # this does", "\"\"\"Returns model weights.\"\"\" return self.model.get_weights() def set_weights(self, weights): \"\"\"Sets model", "of samples.\"\"\" rollout = self.sampler.get_data() rollout.data[\"weights\"] = np.ones_like(rollout.data[\"rewards\"]) # since", "ray.rllib.utils.process_rollout import process_rollout from ray.rllib.utils.sampler import SyncSampler class DDPGEvaluator(PolicyEvaluator): def", "rollout, NoFilter(), gamma=1.0, use_gae=False) return samples def update_target(self): \"\"\"Updates target", "class DDPGEvaluator(PolicyEvaluator): def __init__(self, registry, env_creator, config): self.env = ModelCatalog.get_preprocessor_as_wrapper(", "weights.\"\"\" self.model.apply_gradients(grads) def compute_apply(self, samples): grads, _ = self.compute_gradients(samples) self.apply_gradients(grads)", "one step, no discounting needs to be applied; # this", "samples def update_target(self): \"\"\"Updates target critic and target actor.\"\"\" self.model.update_target()", "division from __future__ import print_function import numpy as np import", "import ModelCatalog from ray.rllib.optimizers import PolicyEvaluator from ray.rllib.utils.filter import NoFilter", "weights): \"\"\"Sets model weights.\"\"\" self.model.set_weights(weights) def get_completed_rollout_metrics(self): \"\"\"Returns metrics on", "import PolicyEvaluator from ray.rllib.utils.filter import NoFilter from ray.rllib.utils.process_rollout import process_rollout", "registry, env_creator(config[\"env_config\"])) # contains model, target_model self.model = DDPGModel(registry, self.env,", "registry, env_creator, config): self.env = ModelCatalog.get_preprocessor_as_wrapper( registry, env_creator(config[\"env_config\"])) # contains", "to be applied; # this does not involve config[\"gamma\"] samples", "the queue of completed rollout metrics. \"\"\" return self.sampler.get_metrics() RemoteDDPGEvaluator", "config[\"num_local_steps\"], horizon=config[\"horizon\"]) def sample(self): \"\"\"Returns a batch of samples.\"\"\" rollout", "np import ray from ray.rllib.ddpg2.models import DDPGModel from ray.rllib.models.catalog import", "self.sampler.get_data() rollout.data[\"weights\"] = np.ones_like(rollout.data[\"rewards\"]) # since each sample is one", "NoFilter(), config[\"num_local_steps\"], horizon=config[\"horizon\"]) def sample(self): \"\"\"Returns a batch of samples.\"\"\"", "process_rollout from ray.rllib.utils.sampler import SyncSampler class DDPGEvaluator(PolicyEvaluator): def __init__(self, registry,", "# this does not involve config[\"gamma\"] samples = process_rollout( rollout,", "self.env, config) self.sampler = SyncSampler( self.env, self.model.model, NoFilter(), config[\"num_local_steps\"], horizon=config[\"horizon\"])", "np.ones_like(rollout.data[\"rewards\"]) # since each sample is one step, no discounting", "gradients to evaluator weights.\"\"\" self.model.apply_gradients(grads) def compute_apply(self, samples): grads, _", "target critic and target actor.\"\"\" self.model.update_target() def compute_gradients(self, samples): \"\"\"Returns", "from ray.rllib.optimizers import PolicyEvaluator from ray.rllib.utils.filter import NoFilter from ray.rllib.utils.process_rollout", "clears the queue of completed rollout metrics. \"\"\" return self.sampler.get_metrics()", "get_weights(self): \"\"\"Returns model weights.\"\"\" return self.model.get_weights() def set_weights(self, weights): \"\"\"Sets", "self.apply_gradients(grads) def get_weights(self): \"\"\"Returns model weights.\"\"\" return self.model.get_weights() def set_weights(self,", "from ray.rllib.models.catalog import ModelCatalog from ray.rllib.optimizers import PolicyEvaluator from ray.rllib.utils.filter", "= self.sampler.get_data() rollout.data[\"weights\"] = np.ones_like(rollout.data[\"rewards\"]) # since each sample is", "discounting needs to be applied; # this does not involve", "def get_weights(self): \"\"\"Returns model weights.\"\"\" return self.model.get_weights() def set_weights(self, weights):", "self.model = DDPGModel(registry, self.env, config) self.sampler = SyncSampler( self.env, self.model.model,", "each sample is one step, no discounting needs to be", "as np import ray from ray.rllib.ddpg2.models import DDPGModel from ray.rllib.models.catalog", "return self.model.get_weights() def set_weights(self, weights): \"\"\"Sets model weights.\"\"\" self.model.set_weights(weights) def", "gamma=1.0, use_gae=False) return samples def update_target(self): \"\"\"Updates target critic and", "numpy as np import ray from ray.rllib.ddpg2.models import DDPGModel from", "self.model.apply_gradients(grads) def compute_apply(self, samples): grads, _ = self.compute_gradients(samples) self.apply_gradients(grads) def", "self.model.get_weights() def set_weights(self, weights): \"\"\"Sets model weights.\"\"\" self.model.set_weights(weights) def get_completed_rollout_metrics(self):", "self.env = ModelCatalog.get_preprocessor_as_wrapper( registry, env_creator(config[\"env_config\"])) # contains model, target_model self.model", "def __init__(self, registry, env_creator, config): self.env = ModelCatalog.get_preprocessor_as_wrapper( registry, env_creator(config[\"env_config\"]))", "\"\"\"Returns metrics on previously completed rollouts. Calling this clears the", "DDPGEvaluator(PolicyEvaluator): def __init__(self, registry, env_creator, config): self.env = ModelCatalog.get_preprocessor_as_wrapper( registry,", "applied; # this does not involve config[\"gamma\"] samples = process_rollout(", "target actor.\"\"\" self.model.update_target() def compute_gradients(self, samples): \"\"\"Returns critic, actor gradients.\"\"\"", "model weights.\"\"\" self.model.set_weights(weights) def get_completed_rollout_metrics(self): \"\"\"Returns metrics on previously completed", "on previously completed rollouts. Calling this clears the queue of", "a batch of samples.\"\"\" rollout = self.sampler.get_data() rollout.data[\"weights\"] = np.ones_like(rollout.data[\"rewards\"])", "involve config[\"gamma\"] samples = process_rollout( rollout, NoFilter(), gamma=1.0, use_gae=False) return", "env_creator(config[\"env_config\"])) # contains model, target_model self.model = DDPGModel(registry, self.env, config)", "evaluator weights.\"\"\" self.model.apply_gradients(grads) def compute_apply(self, samples): grads, _ = self.compute_gradients(samples)", "rollout.data[\"weights\"] = np.ones_like(rollout.data[\"rewards\"]) # since each sample is one step,", "ray from ray.rllib.ddpg2.models import DDPGModel from ray.rllib.models.catalog import ModelCatalog from", "apply_gradients(self, grads): \"\"\"Applies gradients to evaluator weights.\"\"\" self.model.apply_gradients(grads) def compute_apply(self,", "Calling this clears the queue of completed rollout metrics. \"\"\"", "__init__(self, registry, env_creator, config): self.env = ModelCatalog.get_preprocessor_as_wrapper( registry, env_creator(config[\"env_config\"])) #", "import ray from ray.rllib.ddpg2.models import DDPGModel from ray.rllib.models.catalog import ModelCatalog", "env_creator, config): self.env = ModelCatalog.get_preprocessor_as_wrapper( registry, env_creator(config[\"env_config\"])) # contains model,", "to evaluator weights.\"\"\" self.model.apply_gradients(grads) def compute_apply(self, samples): grads, _ =", "completed rollouts. Calling this clears the queue of completed rollout", "rollouts. Calling this clears the queue of completed rollout metrics.", "\"\"\"Sets model weights.\"\"\" self.model.set_weights(weights) def get_completed_rollout_metrics(self): \"\"\"Returns metrics on previously", "needs to be applied; # this does not involve config[\"gamma\"]", "def update_target(self): \"\"\"Updates target critic and target actor.\"\"\" self.model.update_target() def", "weights.\"\"\" return self.model.get_weights() def set_weights(self, weights): \"\"\"Sets model weights.\"\"\" self.model.set_weights(weights)", "__future__ import print_function import numpy as np import ray from", "from __future__ import division from __future__ import print_function import numpy", "rollout = self.sampler.get_data() rollout.data[\"weights\"] = np.ones_like(rollout.data[\"rewards\"]) # since each sample", "def sample(self): \"\"\"Returns a batch of samples.\"\"\" rollout = self.sampler.get_data()", "PolicyEvaluator from ray.rllib.utils.filter import NoFilter from ray.rllib.utils.process_rollout import process_rollout from", "from ray.rllib.utils.filter import NoFilter from ray.rllib.utils.process_rollout import process_rollout from ray.rllib.utils.sampler", "import division from __future__ import print_function import numpy as np", "# contains model, target_model self.model = DDPGModel(registry, self.env, config) self.sampler", "from __future__ import absolute_import from __future__ import division from __future__", "use_gae=False) return samples def update_target(self): \"\"\"Updates target critic and target", "ray.rllib.utils.sampler import SyncSampler class DDPGEvaluator(PolicyEvaluator): def __init__(self, registry, env_creator, config):", "def compute_gradients(self, samples): \"\"\"Returns critic, actor gradients.\"\"\" return self.model.compute_gradients(samples) def", "grads): \"\"\"Applies gradients to evaluator weights.\"\"\" self.model.apply_gradients(grads) def compute_apply(self, samples):", "queue of completed rollout metrics. \"\"\" return self.sampler.get_metrics() RemoteDDPGEvaluator =", "from ray.rllib.utils.process_rollout import process_rollout from ray.rllib.utils.sampler import SyncSampler class DDPGEvaluator(PolicyEvaluator):", "SyncSampler class DDPGEvaluator(PolicyEvaluator): def __init__(self, registry, env_creator, config): self.env =", "import SyncSampler class DDPGEvaluator(PolicyEvaluator): def __init__(self, registry, env_creator, config): self.env", "samples): grads, _ = self.compute_gradients(samples) self.apply_gradients(grads) def get_weights(self): \"\"\"Returns model", "update_target(self): \"\"\"Updates target critic and target actor.\"\"\" self.model.update_target() def compute_gradients(self,", "\"\"\"Returns critic, actor gradients.\"\"\" return self.model.compute_gradients(samples) def apply_gradients(self, grads): \"\"\"Applies", "samples = process_rollout( rollout, NoFilter(), gamma=1.0, use_gae=False) return samples def", "self.model.compute_gradients(samples) def apply_gradients(self, grads): \"\"\"Applies gradients to evaluator weights.\"\"\" self.model.apply_gradients(grads)", "import process_rollout from ray.rllib.utils.sampler import SyncSampler class DDPGEvaluator(PolicyEvaluator): def __init__(self,", "\"\"\"Returns a batch of samples.\"\"\" rollout = self.sampler.get_data() rollout.data[\"weights\"] =", "ray.rllib.ddpg2.models import DDPGModel from ray.rllib.models.catalog import ModelCatalog from ray.rllib.optimizers import", "sample(self): \"\"\"Returns a batch of samples.\"\"\" rollout = self.sampler.get_data() rollout.data[\"weights\"]", "is one step, no discounting needs to be applied; #", "set_weights(self, weights): \"\"\"Sets model weights.\"\"\" self.model.set_weights(weights) def get_completed_rollout_metrics(self): \"\"\"Returns metrics", "of completed rollout metrics. \"\"\" return self.sampler.get_metrics() RemoteDDPGEvaluator = ray.remote(DDPGEvaluator)", "\"\"\"Updates target critic and target actor.\"\"\" self.model.update_target() def compute_gradients(self, samples):", "and target actor.\"\"\" self.model.update_target() def compute_gradients(self, samples): \"\"\"Returns critic, actor", "NoFilter from ray.rllib.utils.process_rollout import process_rollout from ray.rllib.utils.sampler import SyncSampler class", "target_model self.model = DDPGModel(registry, self.env, config) self.sampler = SyncSampler( self.env,", "= DDPGModel(registry, self.env, config) self.sampler = SyncSampler( self.env, self.model.model, NoFilter(),", "critic and target actor.\"\"\" self.model.update_target() def compute_gradients(self, samples): \"\"\"Returns critic,", "previously completed rollouts. Calling this clears the queue of completed", "be applied; # this does not involve config[\"gamma\"] samples =", "absolute_import from __future__ import division from __future__ import print_function import", "__future__ import absolute_import from __future__ import division from __future__ import", "import DDPGModel from ray.rllib.models.catalog import ModelCatalog from ray.rllib.optimizers import PolicyEvaluator", "ray.rllib.optimizers import PolicyEvaluator from ray.rllib.utils.filter import NoFilter from ray.rllib.utils.process_rollout import", "DDPGModel(registry, self.env, config) self.sampler = SyncSampler( self.env, self.model.model, NoFilter(), config[\"num_local_steps\"],", "SyncSampler( self.env, self.model.model, NoFilter(), config[\"num_local_steps\"], horizon=config[\"horizon\"]) def sample(self): \"\"\"Returns a", "process_rollout( rollout, NoFilter(), gamma=1.0, use_gae=False) return samples def update_target(self): \"\"\"Updates", "compute_apply(self, samples): grads, _ = self.compute_gradients(samples) self.apply_gradients(grads) def get_weights(self): \"\"\"Returns", "self.compute_gradients(samples) self.apply_gradients(grads) def get_weights(self): \"\"\"Returns model weights.\"\"\" return self.model.get_weights() def", "ray.rllib.models.catalog import ModelCatalog from ray.rllib.optimizers import PolicyEvaluator from ray.rllib.utils.filter import", "self.model.set_weights(weights) def get_completed_rollout_metrics(self): \"\"\"Returns metrics on previously completed rollouts. Calling", "actor gradients.\"\"\" return self.model.compute_gradients(samples) def apply_gradients(self, grads): \"\"\"Applies gradients to", "self.model.model, NoFilter(), config[\"num_local_steps\"], horizon=config[\"horizon\"]) def sample(self): \"\"\"Returns a batch of", "from ray.rllib.utils.sampler import SyncSampler class DDPGEvaluator(PolicyEvaluator): def __init__(self, registry, env_creator,", "ray.rllib.utils.filter import NoFilter from ray.rllib.utils.process_rollout import process_rollout from ray.rllib.utils.sampler import", "metrics on previously completed rollouts. Calling this clears the queue", "print_function import numpy as np import ray from ray.rllib.ddpg2.models import", "from ray.rllib.ddpg2.models import DDPGModel from ray.rllib.models.catalog import ModelCatalog from ray.rllib.optimizers", "samples.\"\"\" rollout = self.sampler.get_data() rollout.data[\"weights\"] = np.ones_like(rollout.data[\"rewards\"]) # since each", "model, target_model self.model = DDPGModel(registry, self.env, config) self.sampler = SyncSampler(", "grads, _ = self.compute_gradients(samples) self.apply_gradients(grads) def get_weights(self): \"\"\"Returns model weights.\"\"\"", "self.sampler = SyncSampler( self.env, self.model.model, NoFilter(), config[\"num_local_steps\"], horizon=config[\"horizon\"]) def sample(self):", "config) self.sampler = SyncSampler( self.env, self.model.model, NoFilter(), config[\"num_local_steps\"], horizon=config[\"horizon\"]) def", "return self.model.compute_gradients(samples) def apply_gradients(self, grads): \"\"\"Applies gradients to evaluator weights.\"\"\"", "def apply_gradients(self, grads): \"\"\"Applies gradients to evaluator weights.\"\"\" self.model.apply_gradients(grads) def", "= SyncSampler( self.env, self.model.model, NoFilter(), config[\"num_local_steps\"], horizon=config[\"horizon\"]) def sample(self): \"\"\"Returns", "this clears the queue of completed rollout metrics. \"\"\" return", "import numpy as np import ray from ray.rllib.ddpg2.models import DDPGModel", "# since each sample is one step, no discounting needs", "this does not involve config[\"gamma\"] samples = process_rollout( rollout, NoFilter(),", "NoFilter(), gamma=1.0, use_gae=False) return samples def update_target(self): \"\"\"Updates target critic", "self.model.update_target() def compute_gradients(self, samples): \"\"\"Returns critic, actor gradients.\"\"\" return self.model.compute_gradients(samples)", "__future__ import division from __future__ import print_function import numpy as", "from __future__ import print_function import numpy as np import ray", "= ModelCatalog.get_preprocessor_as_wrapper( registry, env_creator(config[\"env_config\"])) # contains model, target_model self.model =", "= self.compute_gradients(samples) self.apply_gradients(grads) def get_weights(self): \"\"\"Returns model weights.\"\"\" return self.model.get_weights()", "\"\"\"Applies gradients to evaluator weights.\"\"\" self.model.apply_gradients(grads) def compute_apply(self, samples): grads,", "def set_weights(self, weights): \"\"\"Sets model weights.\"\"\" self.model.set_weights(weights) def get_completed_rollout_metrics(self): \"\"\"Returns", "batch of samples.\"\"\" rollout = self.sampler.get_data() rollout.data[\"weights\"] = np.ones_like(rollout.data[\"rewards\"]) #", "import NoFilter from ray.rllib.utils.process_rollout import process_rollout from ray.rllib.utils.sampler import SyncSampler", "DDPGModel from ray.rllib.models.catalog import ModelCatalog from ray.rllib.optimizers import PolicyEvaluator from", "does not involve config[\"gamma\"] samples = process_rollout( rollout, NoFilter(), gamma=1.0,", "not involve config[\"gamma\"] samples = process_rollout( rollout, NoFilter(), gamma=1.0, use_gae=False)", "_ = self.compute_gradients(samples) self.apply_gradients(grads) def get_weights(self): \"\"\"Returns model weights.\"\"\" return", "def compute_apply(self, samples): grads, _ = self.compute_gradients(samples) self.apply_gradients(grads) def get_weights(self):", "critic, actor gradients.\"\"\" return self.model.compute_gradients(samples) def apply_gradients(self, grads): \"\"\"Applies gradients", "self.env, self.model.model, NoFilter(), config[\"num_local_steps\"], horizon=config[\"horizon\"]) def sample(self): \"\"\"Returns a batch", "ModelCatalog from ray.rllib.optimizers import PolicyEvaluator from ray.rllib.utils.filter import NoFilter from", "no discounting needs to be applied; # this does not", "get_completed_rollout_metrics(self): \"\"\"Returns metrics on previously completed rollouts. Calling this clears", "ModelCatalog.get_preprocessor_as_wrapper( registry, env_creator(config[\"env_config\"])) # contains model, target_model self.model = DDPGModel(registry,", "sample is one step, no discounting needs to be applied;", "import absolute_import from __future__ import division from __future__ import print_function", "actor.\"\"\" self.model.update_target() def compute_gradients(self, samples): \"\"\"Returns critic, actor gradients.\"\"\" return", "def get_completed_rollout_metrics(self): \"\"\"Returns metrics on previously completed rollouts. Calling this", "weights.\"\"\" self.model.set_weights(weights) def get_completed_rollout_metrics(self): \"\"\"Returns metrics on previously completed rollouts.", "= np.ones_like(rollout.data[\"rewards\"]) # since each sample is one step, no" ]
[ "e.g. hosts or switches. And the connections between them. '''", "List[dict] ''' return self._vertexes.hosts + self._vertexes.switches @property def switches(self): '''", "''' Draw a dot graph of the network graph. :params", "contains the entities in the network e.g. hosts or switches.", "graphargs: Arguments to graphviz.Digraph. :type graphargs: dict ''' graph =", "__init__(self, hsts=None, switches=None, connections=None): self._vertexes = self.Vertex(hosts=[self._sanitize_vertexes(h) for h in", "of all hosts. :rtype: List[dict] ''' return self._vertexes.hosts @property def", "''' return self._vertexes.hosts + self._vertexes.switches @property def switches(self): ''' Returns", "list of all 'host' vertexes. :returns: List of all hosts.", "vertex): ''' Update '_key' field of vertex to appropriate guid.", "vertex: dict :returns: An updated dict, '_key' field with 'guid'", "graphargs: dict ''' graph = Digraph(**graphargs) for v in self._vertexes:", "@property def vertexes(self): ''' Returns a concatenated list of all", "the network connections. This class contains the entities in the", "or switches. And the connections between them. ''' Vertex =", "''' if edge['to_guid'].startswith('S'): to_collection = 'switches/' elif edge['to_guid'].startswith('H'): to_collection =", "in hsts], switches=[self._sanitize_vertexes(s) for s in switches]) self._edges = [self._sanitize_edge_connection(c)", "the connections between them. ''' Vertex = namedtuple('Vertexes', ['hosts', 'switches'])", "a dot graph of the network graph. :params graphargs: Arguments", "import namedtuple class NetworkGraph: ''' Representation of the network connections.", "switches. :rtype: List[dict] ''' return self._vertexes.switches @property def hosts(self): '''", "'host' vertexes. :returns: List of all hosts. :rtype: List[dict] '''", "+ edge['from_guid'] }) return edge def _sanitize_vertexes(self, vertex): ''' Update", "@property def switches(self): ''' Returns a list of all 'switch'", "return self._vertexes.hosts @property def edges(self): ''' Return a list of", "'switches/' elif edge['to_guid'].startswith('H'): to_collection = 'hosts/' if edge['from_guid'].startswith('S'): from_collection =", "def switches(self): ''' Returns a list of all 'switch' vertexes.", "_to and _from key. :rtype: dict ''' if edge['to_guid'].startswith('S'): to_collection", "edge['from_guid'].startswith('S'): from_collection = 'switches/' elif edge['from_guid'].startswith('H'): from_collection = 'hosts/' edge.update({", "Returns a list of all 'host' vertexes. :returns: List of", "with 'guid' value. :rtype: dict ''' vertex.update({'_key': vertex['guid']}) return vertex", "self.Vertex(hosts=[self._sanitize_vertexes(h) for h in hsts], switches=[self._sanitize_vertexes(s) for s in switches])", "field of a edge. :param edge: One edge connection. :type", "concatenated list of all vertexes. :returns: List of vertexes, contains", "Update '_to' and '_form' field of a edge. :param edge:", "One edge connection. :type edge: dict :returns: Updated edge with", "edge['from_guid'].startswith('H'): from_collection = 'hosts/' edge.update({ '_to': to_collection + edge['to_guid'], '_from':", "value. :rtype: dict ''' vertex.update({'_key': vertex['guid']}) return vertex def __init__(self,", "all hosts. :rtype: List[dict] ''' return self._vertexes.hosts @property def edges(self):", "dict ''' graph = Digraph(**graphargs) for v in self._vertexes: graph.node(v['guid'],", "of the network graph. :params graphargs: Arguments to graphviz.Digraph. :type", "''' vertex.update({'_key': vertex['guid']}) return vertex def __init__(self, hsts=None, switches=None, connections=None):", "self._vertexes = self.Vertex(hosts=[self._sanitize_vertexes(h) for h in hsts], switches=[self._sanitize_vertexes(s) for s", "namedtuple('Vertexes', ['hosts', 'switches']) _edges = [] def _sanitize_edge_connection(self, edge): '''", "''' Update '_to' and '_form' field of a edge. :param", "= 'switches/' elif edge['from_guid'].startswith('H'): from_collection = 'hosts/' edge.update({ '_to': to_collection", ":type edge: dict :returns: Updated edge with _to and _from", "of vertexes, contains of hosts and switches. :rtype: List[dict] '''", "Return a list of all 'connection' edges. :returns: List of", "edges. :returns: List of all connections. :rtype: List[dict] ''' return", "all connections. :rtype: List[dict] ''' return self._edges def to_graph(self, graphargs):", "from collections import namedtuple class NetworkGraph: ''' Representation of the", "to graphviz.Digraph. :type graphargs: dict ''' graph = Digraph(**graphargs) for", "collections import namedtuple class NetworkGraph: ''' Representation of the network", "of vertex to appropriate guid. :param vertex: Vertex :type vertex:", "Returns a concatenated list of all vertexes. :returns: List of", "Updated edge with _to and _from key. :rtype: dict '''", "+ edge['to_guid'], '_from': from_collection + edge['from_guid'] }) return edge def", "dict, '_key' field with 'guid' value. :rtype: dict ''' vertex.update({'_key':", "vertexes(self): ''' Returns a concatenated list of all vertexes. :returns:", "edge['from_guid'] }) return edge def _sanitize_vertexes(self, vertex): ''' Update '_key'", "vertex['guid']}) return vertex def __init__(self, hsts=None, switches=None, connections=None): self._vertexes =", "return self._vertexes.switches @property def hosts(self): ''' Returns a list of", "if edge['from_guid'].startswith('S'): from_collection = 'switches/' elif edge['from_guid'].startswith('H'): from_collection = 'hosts/'", "contains of hosts and switches. :rtype: List[dict] ''' return self._vertexes.hosts", "list of all 'switch' vertexes. :returns: List of all switches.", "edges(self): ''' Return a list of all 'connection' edges. :returns:", "switches]) self._edges = [self._sanitize_edge_connection(c) for c in connections] @property def", "@property def edges(self): ''' Return a list of all 'connection'", "'_to' and '_form' field of a edge. :param edge: One", "def hosts(self): ''' Returns a list of all 'host' vertexes.", "of all 'connection' edges. :returns: List of all connections. :rtype:", "edge['to_guid'].startswith('H'): to_collection = 'hosts/' if edge['from_guid'].startswith('S'): from_collection = 'switches/' elif", "An updated dict, '_key' field with 'guid' value. :rtype: dict", "'hosts/' if edge['from_guid'].startswith('S'): from_collection = 'switches/' elif edge['from_guid'].startswith('H'): from_collection =", "all vertexes. :returns: List of vertexes, contains of hosts and", "entities in the network e.g. hosts or switches. And the", "['hosts', 'switches']) _edges = [] def _sanitize_edge_connection(self, edge): ''' Update", "'_to': to_collection + edge['to_guid'], '_from': from_collection + edge['from_guid'] }) return", "''' graph = Digraph(**graphargs) for v in self._vertexes: graph.node(v['guid'], v['description'])", "self._vertexes: graph.node(v['guid'], v['description']) for c in self._edges: graph.edge(c['from_guid'], c['to_guid']) graph.render()", ":rtype: List[dict] ''' return self._vertexes.switches @property def hosts(self): ''' Returns", "a edge. :param edge: One edge connection. :type edge: dict", "of the network connections. This class contains the entities in", "with _to and _from key. :rtype: dict ''' if edge['to_guid'].startswith('S'):", "List of all switches. :rtype: List[dict] ''' return self._vertexes.switches @property", "vertex: Vertex :type vertex: dict :returns: An updated dict, '_key'", "''' Vertex = namedtuple('Vertexes', ['hosts', 'switches']) _edges = [] def", "connections=None): self._vertexes = self.Vertex(hosts=[self._sanitize_vertexes(h) for h in hsts], switches=[self._sanitize_vertexes(s) for", "all 'connection' edges. :returns: List of all connections. :rtype: List[dict]", "network graph. :params graphargs: Arguments to graphviz.Digraph. :type graphargs: dict", "vertexes. :returns: List of all switches. :rtype: List[dict] ''' return", ":rtype: dict ''' if edge['to_guid'].startswith('S'): to_collection = 'switches/' elif edge['to_guid'].startswith('H'):", "Draw a dot graph of the network graph. :params graphargs:", "switches(self): ''' Returns a list of all 'switch' vertexes. :returns:", "''' Return a list of all 'connection' edges. :returns: List", "edge: dict :returns: Updated edge with _to and _from key.", "graph of the network graph. :params graphargs: Arguments to graphviz.Digraph.", "def edges(self): ''' Return a list of all 'connection' edges.", "network e.g. hosts or switches. And the connections between them.", "+ self._vertexes.switches @property def switches(self): ''' Returns a list of", "Returns a list of all 'switch' vertexes. :returns: List of", "dict :returns: An updated dict, '_key' field with 'guid' value.", "'switch' vertexes. :returns: List of all switches. :rtype: List[dict] '''", ":returns: An updated dict, '_key' field with 'guid' value. :rtype:", "of hosts and switches. :rtype: List[dict] ''' return self._vertexes.hosts +", "and switches. :rtype: List[dict] ''' return self._vertexes.hosts + self._vertexes.switches @property", "from graphviz import Digraph from collections import namedtuple class NetworkGraph:", "connection. :type edge: dict :returns: Updated edge with _to and", "vertexes. :returns: List of all hosts. :rtype: List[dict] ''' return", "edge: One edge connection. :type edge: dict :returns: Updated edge", "the entities in the network e.g. hosts or switches. And", "self._edges = [self._sanitize_edge_connection(c) for c in connections] @property def vertexes(self):", "of all switches. :rtype: List[dict] ''' return self._vertexes.switches @property def", "edge. :param edge: One edge connection. :type edge: dict :returns:", "s in switches]) self._edges = [self._sanitize_edge_connection(c) for c in connections]", "vertex def __init__(self, hsts=None, switches=None, connections=None): self._vertexes = self.Vertex(hosts=[self._sanitize_vertexes(h) for", "= 'hosts/' if edge['from_guid'].startswith('S'): from_collection = 'switches/' elif edge['from_guid'].startswith('H'): from_collection", "@property def hosts(self): ''' Returns a list of all 'host'", "List[dict] ''' return self._edges def to_graph(self, graphargs): ''' Draw a", "v in self._vertexes: graph.node(v['guid'], v['description']) for c in self._edges: graph.edge(c['from_guid'],", "[] def _sanitize_edge_connection(self, edge): ''' Update '_to' and '_form' field", "edge def _sanitize_vertexes(self, vertex): ''' Update '_key' field of vertex", "appropriate guid. :param vertex: Vertex :type vertex: dict :returns: An", "all 'host' vertexes. :returns: List of all hosts. :rtype: List[dict]", "the network e.g. hosts or switches. And the connections between", ":returns: Updated edge with _to and _from key. :rtype: dict", "<reponame>harryherold/sysmap from graphviz import Digraph from collections import namedtuple class", ":rtype: List[dict] ''' return self._vertexes.hosts + self._vertexes.switches @property def switches(self):", "dot graph of the network graph. :params graphargs: Arguments to", "}) return edge def _sanitize_vertexes(self, vertex): ''' Update '_key' field", "a list of all 'switch' vertexes. :returns: List of all", "dict ''' if edge['to_guid'].startswith('S'): to_collection = 'switches/' elif edge['to_guid'].startswith('H'): to_collection", "self._vertexes.hosts + self._vertexes.switches @property def switches(self): ''' Returns a list", "= 'hosts/' edge.update({ '_to': to_collection + edge['to_guid'], '_from': from_collection +", "field of vertex to appropriate guid. :param vertex: Vertex :type", "= [] def _sanitize_edge_connection(self, edge): ''' Update '_to' and '_form'", "a concatenated list of all vertexes. :returns: List of vertexes,", ":returns: List of all switches. :rtype: List[dict] ''' return self._vertexes.switches", "hosts. :rtype: List[dict] ''' return self._vertexes.hosts @property def edges(self): '''", "return self._edges def to_graph(self, graphargs): ''' Draw a dot graph", "graphviz.Digraph. :type graphargs: dict ''' graph = Digraph(**graphargs) for v", "And the connections between them. ''' Vertex = namedtuple('Vertexes', ['hosts',", "in connections] @property def vertexes(self): ''' Returns a concatenated list", "''' Returns a list of all 'host' vertexes. :returns: List", "edge with _to and _from key. :rtype: dict ''' if", "to_collection = 'hosts/' if edge['from_guid'].startswith('S'): from_collection = 'switches/' elif edge['from_guid'].startswith('H'):", "Arguments to graphviz.Digraph. :type graphargs: dict ''' graph = Digraph(**graphargs)", "class contains the entities in the network e.g. hosts or", ":rtype: dict ''' vertex.update({'_key': vertex['guid']}) return vertex def __init__(self, hsts=None,", "c in connections] @property def vertexes(self): ''' Returns a concatenated", "hosts and switches. :rtype: List[dict] ''' return self._vertexes.hosts + self._vertexes.switches", "switches. And the connections between them. ''' Vertex = namedtuple('Vertexes',", ":rtype: List[dict] ''' return self._edges def to_graph(self, graphargs): ''' Draw", "and '_form' field of a edge. :param edge: One edge", "[self._sanitize_edge_connection(c) for c in connections] @property def vertexes(self): ''' Returns", "'_from': from_collection + edge['from_guid'] }) return edge def _sanitize_vertexes(self, vertex):", "hsts=None, switches=None, connections=None): self._vertexes = self.Vertex(hosts=[self._sanitize_vertexes(h) for h in hsts],", "= self.Vertex(hosts=[self._sanitize_vertexes(h) for h in hsts], switches=[self._sanitize_vertexes(s) for s in", "of all 'host' vertexes. :returns: List of all hosts. :rtype:", ":type graphargs: dict ''' graph = Digraph(**graphargs) for v in", "'switches']) _edges = [] def _sanitize_edge_connection(self, edge): ''' Update '_to'", "hosts or switches. And the connections between them. ''' Vertex", "elif edge['to_guid'].startswith('H'): to_collection = 'hosts/' if edge['from_guid'].startswith('S'): from_collection = 'switches/'", "This class contains the entities in the network e.g. hosts", "_sanitize_vertexes(self, vertex): ''' Update '_key' field of vertex to appropriate", "switches=None, connections=None): self._vertexes = self.Vertex(hosts=[self._sanitize_vertexes(h) for h in hsts], switches=[self._sanitize_vertexes(s)", "_from key. :rtype: dict ''' if edge['to_guid'].startswith('S'): to_collection = 'switches/'", "def vertexes(self): ''' Returns a concatenated list of all vertexes.", "''' return self._vertexes.switches @property def hosts(self): ''' Returns a list", "''' return self._edges def to_graph(self, graphargs): ''' Draw a dot", "graphargs): ''' Draw a dot graph of the network graph.", "vertexes, contains of hosts and switches. :rtype: List[dict] ''' return", "return edge def _sanitize_vertexes(self, vertex): ''' Update '_key' field of", "key. :rtype: dict ''' if edge['to_guid'].startswith('S'): to_collection = 'switches/' elif", "switches=[self._sanitize_vertexes(s) for s in switches]) self._edges = [self._sanitize_edge_connection(c) for c", "return vertex def __init__(self, hsts=None, switches=None, connections=None): self._vertexes = self.Vertex(hosts=[self._sanitize_vertexes(h)", "self._vertexes.switches @property def switches(self): ''' Returns a list of all", "between them. ''' Vertex = namedtuple('Vertexes', ['hosts', 'switches']) _edges =", "h in hsts], switches=[self._sanitize_vertexes(s) for s in switches]) self._edges =", "self._vertexes.switches @property def hosts(self): ''' Returns a list of all", "to_collection + edge['to_guid'], '_from': from_collection + edge['from_guid'] }) return edge", "field with 'guid' value. :rtype: dict ''' vertex.update({'_key': vertex['guid']}) return", "vertex to appropriate guid. :param vertex: Vertex :type vertex: dict", "List[dict] ''' return self._vertexes.hosts @property def edges(self): ''' Return a", "NetworkGraph: ''' Representation of the network connections. This class contains", "class NetworkGraph: ''' Representation of the network connections. This class", "list of all 'connection' edges. :returns: List of all connections.", "from_collection + edge['from_guid'] }) return edge def _sanitize_vertexes(self, vertex): '''", "Digraph from collections import namedtuple class NetworkGraph: ''' Representation of", "vertexes. :returns: List of vertexes, contains of hosts and switches.", "switches. :rtype: List[dict] ''' return self._vertexes.hosts + self._vertexes.switches @property def", "connections. This class contains the entities in the network e.g.", "Representation of the network connections. This class contains the entities", "them. ''' Vertex = namedtuple('Vertexes', ['hosts', 'switches']) _edges = []", ":returns: List of all hosts. :rtype: List[dict] ''' return self._vertexes.hosts", "''' Returns a concatenated list of all vertexes. :returns: List", "dict :returns: Updated edge with _to and _from key. :rtype:", "'hosts/' edge.update({ '_to': to_collection + edge['to_guid'], '_from': from_collection + edge['from_guid']", "network connections. This class contains the entities in the network", "hsts], switches=[self._sanitize_vertexes(s) for s in switches]) self._edges = [self._sanitize_edge_connection(c) for", "for s in switches]) self._edges = [self._sanitize_edge_connection(c) for c in", "_edges = [] def _sanitize_edge_connection(self, edge): ''' Update '_to' and", "to_collection = 'switches/' elif edge['to_guid'].startswith('H'): to_collection = 'hosts/' if edge['from_guid'].startswith('S'):", "Vertex = namedtuple('Vertexes', ['hosts', 'switches']) _edges = [] def _sanitize_edge_connection(self,", "''' Returns a list of all 'switch' vertexes. :returns: List", "List of all connections. :rtype: List[dict] ''' return self._edges def", "import Digraph from collections import namedtuple class NetworkGraph: ''' Representation", "in self._vertexes: graph.node(v['guid'], v['description']) for c in self._edges: graph.edge(c['from_guid'], c['to_guid'])", "list of all vertexes. :returns: List of vertexes, contains of", "'switches/' elif edge['from_guid'].startswith('H'): from_collection = 'hosts/' edge.update({ '_to': to_collection +", "of a edge. :param edge: One edge connection. :type edge:", "for c in connections] @property def vertexes(self): ''' Returns a", "in the network e.g. hosts or switches. And the connections", "''' Update '_key' field of vertex to appropriate guid. :param", "namedtuple class NetworkGraph: ''' Representation of the network connections. This", "def _sanitize_vertexes(self, vertex): ''' Update '_key' field of vertex to", "hosts(self): ''' Returns a list of all 'host' vertexes. :returns:", "List of vertexes, contains of hosts and switches. :rtype: List[dict]", "connections between them. ''' Vertex = namedtuple('Vertexes', ['hosts', 'switches']) _edges", "elif edge['from_guid'].startswith('H'): from_collection = 'hosts/' edge.update({ '_to': to_collection + edge['to_guid'],", "from_collection = 'switches/' elif edge['from_guid'].startswith('H'): from_collection = 'hosts/' edge.update({ '_to':", "connections] @property def vertexes(self): ''' Returns a concatenated list of", "List of all hosts. :rtype: List[dict] ''' return self._vertexes.hosts @property", "def __init__(self, hsts=None, switches=None, connections=None): self._vertexes = self.Vertex(hosts=[self._sanitize_vertexes(h) for h", "of all connections. :rtype: List[dict] ''' return self._edges def to_graph(self,", "Vertex :type vertex: dict :returns: An updated dict, '_key' field", "graph = Digraph(**graphargs) for v in self._vertexes: graph.node(v['guid'], v['description']) for", "dict ''' vertex.update({'_key': vertex['guid']}) return vertex def __init__(self, hsts=None, switches=None,", ":returns: List of vertexes, contains of hosts and switches. :rtype:", "'_key' field of vertex to appropriate guid. :param vertex: Vertex", "edge['to_guid'], '_from': from_collection + edge['from_guid'] }) return edge def _sanitize_vertexes(self,", "Update '_key' field of vertex to appropriate guid. :param vertex:", "of all 'switch' vertexes. :returns: List of all switches. :rtype:", "and _from key. :rtype: dict ''' if edge['to_guid'].startswith('S'): to_collection =", ":type vertex: dict :returns: An updated dict, '_key' field with", "of all vertexes. :returns: List of vertexes, contains of hosts", "guid. :param vertex: Vertex :type vertex: dict :returns: An updated", "vertex.update({'_key': vertex['guid']}) return vertex def __init__(self, hsts=None, switches=None, connections=None): self._vertexes", "def _sanitize_edge_connection(self, edge): ''' Update '_to' and '_form' field of", "self._edges def to_graph(self, graphargs): ''' Draw a dot graph of", "'_key' field with 'guid' value. :rtype: dict ''' vertex.update({'_key': vertex['guid']})", "all 'switch' vertexes. :returns: List of all switches. :rtype: List[dict]", "return self._vertexes.hosts + self._vertexes.switches @property def switches(self): ''' Returns a", "edge['to_guid'].startswith('S'): to_collection = 'switches/' elif edge['to_guid'].startswith('H'): to_collection = 'hosts/' if", "= [self._sanitize_edge_connection(c) for c in connections] @property def vertexes(self): '''", ":params graphargs: Arguments to graphviz.Digraph. :type graphargs: dict ''' graph", "graph. :params graphargs: Arguments to graphviz.Digraph. :type graphargs: dict '''", "the network graph. :params graphargs: Arguments to graphviz.Digraph. :type graphargs:", "edge): ''' Update '_to' and '_form' field of a edge.", "a list of all 'connection' edges. :returns: List of all", "Digraph(**graphargs) for v in self._vertexes: graph.node(v['guid'], v['description']) for c in", "'_form' field of a edge. :param edge: One edge connection.", ":rtype: List[dict] ''' return self._vertexes.hosts @property def edges(self): ''' Return", "from_collection = 'hosts/' edge.update({ '_to': to_collection + edge['to_guid'], '_from': from_collection", "to_graph(self, graphargs): ''' Draw a dot graph of the network", "edge.update({ '_to': to_collection + edge['to_guid'], '_from': from_collection + edge['from_guid'] })", "in switches]) self._edges = [self._sanitize_edge_connection(c) for c in connections] @property", "''' return self._vertexes.hosts @property def edges(self): ''' Return a list", "'guid' value. :rtype: dict ''' vertex.update({'_key': vertex['guid']}) return vertex def", "= namedtuple('Vertexes', ['hosts', 'switches']) _edges = [] def _sanitize_edge_connection(self, edge):", "if edge['to_guid'].startswith('S'): to_collection = 'switches/' elif edge['to_guid'].startswith('H'): to_collection = 'hosts/'", ":param edge: One edge connection. :type edge: dict :returns: Updated", "def to_graph(self, graphargs): ''' Draw a dot graph of the", "all switches. :rtype: List[dict] ''' return self._vertexes.switches @property def hosts(self):", ":returns: List of all connections. :rtype: List[dict] ''' return self._edges", "edge connection. :type edge: dict :returns: Updated edge with _to", "self._vertexes.hosts @property def edges(self): ''' Return a list of all", "= Digraph(**graphargs) for v in self._vertexes: graph.node(v['guid'], v['description']) for c", "'connection' edges. :returns: List of all connections. :rtype: List[dict] '''", "for v in self._vertexes: graph.node(v['guid'], v['description']) for c in self._edges:", "= 'switches/' elif edge['to_guid'].startswith('H'): to_collection = 'hosts/' if edge['from_guid'].startswith('S'): from_collection", "''' Representation of the network connections. This class contains the", "to appropriate guid. :param vertex: Vertex :type vertex: dict :returns:", "a list of all 'host' vertexes. :returns: List of all", "updated dict, '_key' field with 'guid' value. :rtype: dict '''", "for h in hsts], switches=[self._sanitize_vertexes(s) for s in switches]) self._edges", "connections. :rtype: List[dict] ''' return self._edges def to_graph(self, graphargs): '''", "graphviz import Digraph from collections import namedtuple class NetworkGraph: '''", "_sanitize_edge_connection(self, edge): ''' Update '_to' and '_form' field of a", ":param vertex: Vertex :type vertex: dict :returns: An updated dict,", "List[dict] ''' return self._vertexes.switches @property def hosts(self): ''' Returns a" ]
[ "for idx, (blobs, color, title) in enumerate(sequence): ax[idx].set_title(title) ax[idx].imshow(image) for", "axes.ravel() for idx, (blobs, color, title) in enumerate(sequence): ax[idx].set_title(title) ax[idx].imshow(image)", "'Determinant of Hessian'] sequence = zip(blobs_list, colors, titles) fig, axes", "= blobs_log[:, 2] * sqrt(2) blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1)", "= blob_doh(image_gray, max_sigma=30, threshold=.01) blobs_list = [blobs_log, blobs_dog, blobs_doh] colors", "axes = plt.subplots(1, 3, figsize=(9, 3), sharex=True, sharey=True) ax =", "from skimage import io import matplotlib.pyplot as plt image =", "2] * sqrt(2) blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01) blobs_list =", "* sqrt(2) blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1) blobs_dog[:, 2] =", "['yellow', 'lime', 'red'] titles = ['Laplacian of Gaussian', 'Difference of", "plt image = io.imread(\"star.jpg\") image_gray = rgb2gray(image) blobs_log = blob_log(image_gray,", "c = plt.Circle((x, y), r, color=color, linewidth=2, fill=False) ax[idx].add_patch(c) ax[idx].set_axis_off()", "'red'] titles = ['Laplacian of Gaussian', 'Difference of Gaussian', 'Determinant", "zip(blobs_list, colors, titles) fig, axes = plt.subplots(1, 3, figsize=(9, 3),", "= blobs_dog[:, 2] * sqrt(2) blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01)", "fig, axes = plt.subplots(1, 3, figsize=(9, 3), sharex=True, sharey=True) ax", "image = io.imread(\"star.jpg\") image_gray = rgb2gray(image) blobs_log = blob_log(image_gray, max_sigma=30,", "blobs_doh] colors = ['yellow', 'lime', 'red'] titles = ['Laplacian of", "'lime', 'red'] titles = ['Laplacian of Gaussian', 'Difference of Gaussian',", "of Gaussian', 'Difference of Gaussian', 'Determinant of Hessian'] sequence =", "import data from skimage.feature import blob_dog, blob_log, blob_doh from skimage.color", "sqrt(2) blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01) blobs_list = [blobs_log, blobs_dog,", "2] = blobs_log[:, 2] * sqrt(2) blobs_dog = blob_dog(image_gray, max_sigma=30,", "colors, titles) fig, axes = plt.subplots(1, 3, figsize=(9, 3), sharex=True,", "blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01) blobs_list = [blobs_log, blobs_dog, blobs_doh]", "= rgb2gray(image) blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1) # Compute", "idx, (blobs, color, title) in enumerate(sequence): ax[idx].set_title(title) ax[idx].imshow(image) for blob", "title) in enumerate(sequence): ax[idx].set_title(title) ax[idx].imshow(image) for blob in blobs: y,", "threshold=.1) blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2) blobs_doh =", "blob_dog(image_gray, max_sigma=30, threshold=.1) blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2)", "of Gaussian', 'Determinant of Hessian'] sequence = zip(blobs_list, colors, titles)", "import sqrt from skimage import data from skimage.feature import blob_dog,", "import io import matplotlib.pyplot as plt image = io.imread(\"star.jpg\") image_gray", "3), sharex=True, sharey=True) ax = axes.ravel() for idx, (blobs, color,", "= [blobs_log, blobs_dog, blobs_doh] colors = ['yellow', 'lime', 'red'] titles", "x, r = blob c = plt.Circle((x, y), r, color=color,", "from skimage.feature import blob_dog, blob_log, blob_doh from skimage.color import rgb2gray", "import rgb2gray from skimage import io import matplotlib.pyplot as plt", "= io.imread(\"star.jpg\") image_gray = rgb2gray(image) blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10,", "= blob_dog(image_gray, max_sigma=30, threshold=.1) blobs_dog[:, 2] = blobs_dog[:, 2] *", "sqrt from skimage import data from skimage.feature import blob_dog, blob_log,", "skimage import data from skimage.feature import blob_dog, blob_log, blob_doh from", "image_gray = rgb2gray(image) blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1) #", "titles = ['Laplacian of Gaussian', 'Difference of Gaussian', 'Determinant of", "[blobs_log, blobs_dog, blobs_doh] colors = ['yellow', 'lime', 'red'] titles =", "skimage.feature import blob_dog, blob_log, blob_doh from skimage.color import rgb2gray from", "blobs_log[:, 2] * sqrt(2) blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1) blobs_dog[:,", "matplotlib.pyplot as plt image = io.imread(\"star.jpg\") image_gray = rgb2gray(image) blobs_log", "sqrt(2) blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1) blobs_dog[:, 2] = blobs_dog[:,", "ax = axes.ravel() for idx, (blobs, color, title) in enumerate(sequence):", "plt.Circle((x, y), r, color=color, linewidth=2, fill=False) ax[idx].add_patch(c) ax[idx].set_axis_off() plt.tight_layout() plt.show()", "from math import sqrt from skimage import data from skimage.feature", "= plt.Circle((x, y), r, color=color, linewidth=2, fill=False) ax[idx].add_patch(c) ax[idx].set_axis_off() plt.tight_layout()", "= ['Laplacian of Gaussian', 'Difference of Gaussian', 'Determinant of Hessian']", "blobs: y, x, r = blob c = plt.Circle((x, y),", "io import matplotlib.pyplot as plt image = io.imread(\"star.jpg\") image_gray =", "blobs_list = [blobs_log, blobs_dog, blobs_doh] colors = ['yellow', 'lime', 'red']", "max_sigma=30, threshold=.1) blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2) blobs_doh", "Gaussian', 'Determinant of Hessian'] sequence = zip(blobs_list, colors, titles) fig,", "for blob in blobs: y, x, r = blob c", "ax[idx].set_title(title) ax[idx].imshow(image) for blob in blobs: y, x, r =", "threshold=.01) blobs_list = [blobs_log, blobs_dog, blobs_doh] colors = ['yellow', 'lime',", "= blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1) # Compute radii in the", "blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1) # Compute radii in the 3rd", "num_sigma=10, threshold=.1) # Compute radii in the 3rd column. blobs_log[:,", "Compute radii in the 3rd column. blobs_log[:, 2] = blobs_log[:,", "= axes.ravel() for idx, (blobs, color, title) in enumerate(sequence): ax[idx].set_title(title)", "['Laplacian of Gaussian', 'Difference of Gaussian', 'Determinant of Hessian'] sequence", "data from skimage.feature import blob_dog, blob_log, blob_doh from skimage.color import", "of Hessian'] sequence = zip(blobs_list, colors, titles) fig, axes =", "blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2) blobs_dog = blob_dog(image_gray,", "max_sigma=30, num_sigma=10, threshold=.1) # Compute radii in the 3rd column.", "blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1) blobs_dog[:, 2] = blobs_dog[:, 2]", "sharex=True, sharey=True) ax = axes.ravel() for idx, (blobs, color, title)", "plt.subplots(1, 3, figsize=(9, 3), sharex=True, sharey=True) ax = axes.ravel() for", "blobs_dog, blobs_doh] colors = ['yellow', 'lime', 'red'] titles = ['Laplacian", "3, figsize=(9, 3), sharex=True, sharey=True) ax = axes.ravel() for idx,", "= blob c = plt.Circle((x, y), r, color=color, linewidth=2, fill=False)", "rgb2gray from skimage import io import matplotlib.pyplot as plt image", "Gaussian', 'Difference of Gaussian', 'Determinant of Hessian'] sequence = zip(blobs_list,", "blobs_dog[:, 2] * sqrt(2) blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01) blobs_list", "r = blob c = plt.Circle((x, y), r, color=color, linewidth=2,", "blob_doh from skimage.color import rgb2gray from skimage import io import", "blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1) # Compute radii in", "threshold=.1) # Compute radii in the 3rd column. blobs_log[:, 2]", "column. blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2) blobs_dog =", "y, x, r = blob c = plt.Circle((x, y), r,", "2] * sqrt(2) blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1) blobs_dog[:, 2]", "max_sigma=30, threshold=.01) blobs_list = [blobs_log, blobs_dog, blobs_doh] colors = ['yellow',", "blob in blobs: y, x, r = blob c =", "= zip(blobs_list, colors, titles) fig, axes = plt.subplots(1, 3, figsize=(9,", "import blob_dog, blob_log, blob_doh from skimage.color import rgb2gray from skimage", "3rd column. blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2) blobs_dog", "titles) fig, axes = plt.subplots(1, 3, figsize=(9, 3), sharex=True, sharey=True)", "import matplotlib.pyplot as plt image = io.imread(\"star.jpg\") image_gray = rgb2gray(image)", "sharey=True) ax = axes.ravel() for idx, (blobs, color, title) in", "blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2) blobs_doh = blob_doh(image_gray,", "enumerate(sequence): ax[idx].set_title(title) ax[idx].imshow(image) for blob in blobs: y, x, r", "math import sqrt from skimage import data from skimage.feature import", "in the 3rd column. blobs_log[:, 2] = blobs_log[:, 2] *", "sequence = zip(blobs_list, colors, titles) fig, axes = plt.subplots(1, 3,", "the 3rd column. blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)", "color, title) in enumerate(sequence): ax[idx].set_title(title) ax[idx].imshow(image) for blob in blobs:", "from skimage.color import rgb2gray from skimage import io import matplotlib.pyplot", "blob c = plt.Circle((x, y), r, color=color, linewidth=2, fill=False) ax[idx].add_patch(c)", "from skimage import data from skimage.feature import blob_dog, blob_log, blob_doh", "ax[idx].imshow(image) for blob in blobs: y, x, r = blob", "blob_doh(image_gray, max_sigma=30, threshold=.01) blobs_list = [blobs_log, blobs_dog, blobs_doh] colors =", "blob_dog, blob_log, blob_doh from skimage.color import rgb2gray from skimage import", "= ['yellow', 'lime', 'red'] titles = ['Laplacian of Gaussian', 'Difference", "# Compute radii in the 3rd column. blobs_log[:, 2] =", "skimage.color import rgb2gray from skimage import io import matplotlib.pyplot as", "(blobs, color, title) in enumerate(sequence): ax[idx].set_title(title) ax[idx].imshow(image) for blob in", "rgb2gray(image) blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1) # Compute radii", "2] = blobs_dog[:, 2] * sqrt(2) blobs_doh = blob_doh(image_gray, max_sigma=30,", "colors = ['yellow', 'lime', 'red'] titles = ['Laplacian of Gaussian',", "io.imread(\"star.jpg\") image_gray = rgb2gray(image) blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1)", "in enumerate(sequence): ax[idx].set_title(title) ax[idx].imshow(image) for blob in blobs: y, x,", "in blobs: y, x, r = blob c = plt.Circle((x,", "as plt image = io.imread(\"star.jpg\") image_gray = rgb2gray(image) blobs_log =", "skimage import io import matplotlib.pyplot as plt image = io.imread(\"star.jpg\")", "* sqrt(2) blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01) blobs_list = [blobs_log,", "'Difference of Gaussian', 'Determinant of Hessian'] sequence = zip(blobs_list, colors,", "= plt.subplots(1, 3, figsize=(9, 3), sharex=True, sharey=True) ax = axes.ravel()", "radii in the 3rd column. blobs_log[:, 2] = blobs_log[:, 2]", "figsize=(9, 3), sharex=True, sharey=True) ax = axes.ravel() for idx, (blobs,", "Hessian'] sequence = zip(blobs_list, colors, titles) fig, axes = plt.subplots(1,", "blob_log, blob_doh from skimage.color import rgb2gray from skimage import io" ]
[ "value): if value in listofnames: value_index = listofnames.index(value) return(listofnames, value_index)", "value in listofnames: value_index = listofnames.index(value) return(listofnames, value_index) else: return(-1)", "if value in listofnames: value_index = listofnames.index(value) return(listofnames, value_index) else:", "indexof(listofnames, value): if value in listofnames: value_index = listofnames.index(value) return(listofnames,", "def indexof(listofnames, value): if value in listofnames: value_index = listofnames.index(value)" ]
[ "1 def move(self): new_x = self.xcor() + SPEED*self.x_bounce new_y =", "self.move_speed = 0.1 self.y_bounce = 1 self.x_bounce = 1 def", "= 10 class Ball(Turtle): def __init__(self): super().__init__() self.penup() self.color(\"white\") self.shape(\"circle\")", "def move(self): new_x = self.xcor() + SPEED*self.x_bounce new_y = self.ycor()", "new_x = self.xcor() + SPEED*self.x_bounce new_y = self.ycor() + SPEED*self.y_bounce", "+ SPEED*self.y_bounce self.goto(new_x, new_y) def reset(self): self.goto(0, 0) self.move_speed =", "turtle import Turtle SPEED = 10 class Ball(Turtle): def __init__(self):", "self.goto(new_x, new_y) def reset(self): self.goto(0, 0) self.move_speed = 0.1 self.x_bounce", "new_y) def reset(self): self.goto(0, 0) self.move_speed = 0.1 self.x_bounce *=", "= 0.1 self.y_bounce = 1 self.x_bounce = 1 def move(self):", "def reset(self): self.goto(0, 0) self.move_speed = 0.1 self.x_bounce *= -1", "super().__init__() self.penup() self.color(\"white\") self.shape(\"circle\") self.move_speed = 0.1 self.y_bounce = 1", "self.color(\"white\") self.shape(\"circle\") self.move_speed = 0.1 self.y_bounce = 1 self.x_bounce =", "1 self.x_bounce = 1 def move(self): new_x = self.xcor() +", "Turtle SPEED = 10 class Ball(Turtle): def __init__(self): super().__init__() self.penup()", "def __init__(self): super().__init__() self.penup() self.color(\"white\") self.shape(\"circle\") self.move_speed = 0.1 self.y_bounce", "class Ball(Turtle): def __init__(self): super().__init__() self.penup() self.color(\"white\") self.shape(\"circle\") self.move_speed =", "self.shape(\"circle\") self.move_speed = 0.1 self.y_bounce = 1 self.x_bounce = 1", "import Turtle SPEED = 10 class Ball(Turtle): def __init__(self): super().__init__()", "self.y_bounce = 1 self.x_bounce = 1 def move(self): new_x =", "0.1 self.y_bounce = 1 self.x_bounce = 1 def move(self): new_x", "10 class Ball(Turtle): def __init__(self): super().__init__() self.penup() self.color(\"white\") self.shape(\"circle\") self.move_speed", "Ball(Turtle): def __init__(self): super().__init__() self.penup() self.color(\"white\") self.shape(\"circle\") self.move_speed = 0.1", "= 1 def move(self): new_x = self.xcor() + SPEED*self.x_bounce new_y", "= 1 self.x_bounce = 1 def move(self): new_x = self.xcor()", "self.ycor() + SPEED*self.y_bounce self.goto(new_x, new_y) def reset(self): self.goto(0, 0) self.move_speed", "from turtle import Turtle SPEED = 10 class Ball(Turtle): def", "self.penup() self.color(\"white\") self.shape(\"circle\") self.move_speed = 0.1 self.y_bounce = 1 self.x_bounce", "self.xcor() + SPEED*self.x_bounce new_y = self.ycor() + SPEED*self.y_bounce self.goto(new_x, new_y)", "SPEED*self.x_bounce new_y = self.ycor() + SPEED*self.y_bounce self.goto(new_x, new_y) def reset(self):", "self.x_bounce = 1 def move(self): new_x = self.xcor() + SPEED*self.x_bounce", "new_y = self.ycor() + SPEED*self.y_bounce self.goto(new_x, new_y) def reset(self): self.goto(0,", "move(self): new_x = self.xcor() + SPEED*self.x_bounce new_y = self.ycor() +", "= self.xcor() + SPEED*self.x_bounce new_y = self.ycor() + SPEED*self.y_bounce self.goto(new_x,", "SPEED = 10 class Ball(Turtle): def __init__(self): super().__init__() self.penup() self.color(\"white\")", "+ SPEED*self.x_bounce new_y = self.ycor() + SPEED*self.y_bounce self.goto(new_x, new_y) def", "= self.ycor() + SPEED*self.y_bounce self.goto(new_x, new_y) def reset(self): self.goto(0, 0)", "__init__(self): super().__init__() self.penup() self.color(\"white\") self.shape(\"circle\") self.move_speed = 0.1 self.y_bounce =", "SPEED*self.y_bounce self.goto(new_x, new_y) def reset(self): self.goto(0, 0) self.move_speed = 0.1" ]
[ "self.term class TreeInnerNode(TreeNode): def __init__(self, pred, left, right): self.pred =", "pred, left, right): self.pred = pred self.left = left self.right", "getExpr(self): return self.term class TreeInnerNode(TreeNode): def __init__(self, pred, left, right):", "class TreeNode: pass class TreeLeaf(TreeNode): def __init__(self, term): self.term =", "= pred self.left = left self.right = right def getExpr(self):", "term): self.term = term def getExpr(self): return self.term class TreeInnerNode(TreeNode):", "class TreeInnerNode(TreeNode): def __init__(self, pred, left, right): self.pred = pred", "self.right = right def getExpr(self): return Expr('ite', self.pred, self.left.getExpr(), self.right.getExpr())", "self.left = left self.right = right def getExpr(self): return Expr('ite',", "class TreeLeaf(TreeNode): def __init__(self, term): self.term = term def getExpr(self):", "TreeNode: pass class TreeLeaf(TreeNode): def __init__(self, term): self.term = term", "__init__(self, term): self.term = term def getExpr(self): return self.term class", "import Expr class TreeNode: pass class TreeLeaf(TreeNode): def __init__(self, term):", "left self.right = right def getExpr(self): return Expr('ite', self.pred, self.left.getExpr(),", "right): self.pred = pred self.left = left self.right = right", "TreeLeaf(TreeNode): def __init__(self, term): self.term = term def getExpr(self): return", "__init__(self, pred, left, right): self.pred = pred self.left = left", "TreeInnerNode(TreeNode): def __init__(self, pred, left, right): self.pred = pred self.left", "def __init__(self, term): self.term = term def getExpr(self): return self.term", "pred self.left = left self.right = right def getExpr(self): return", "jry2.semantics import Expr class TreeNode: pass class TreeLeaf(TreeNode): def __init__(self,", "self.term = term def getExpr(self): return self.term class TreeInnerNode(TreeNode): def", "left, right): self.pred = pred self.left = left self.right =", "term def getExpr(self): return self.term class TreeInnerNode(TreeNode): def __init__(self, pred,", "= left self.right = right def getExpr(self): return Expr('ite', self.pred,", "self.pred = pred self.left = left self.right = right def", "Expr class TreeNode: pass class TreeLeaf(TreeNode): def __init__(self, term): self.term", "def getExpr(self): return self.term class TreeInnerNode(TreeNode): def __init__(self, pred, left,", "= term def getExpr(self): return self.term class TreeInnerNode(TreeNode): def __init__(self,", "def __init__(self, pred, left, right): self.pred = pred self.left =", "return self.term class TreeInnerNode(TreeNode): def __init__(self, pred, left, right): self.pred", "pass class TreeLeaf(TreeNode): def __init__(self, term): self.term = term def", "from jry2.semantics import Expr class TreeNode: pass class TreeLeaf(TreeNode): def" ]
[ "base ring as well. zeta = self._zeta if zeta_order is", "\\frac{B_{k,\\varepsilon}}{k!} t^k. ALGORITHM: The ``'recurrence'`` algorithm computes generalized Bernoulli numbers", "fixed:: sage: G = DirichletGroup(1) sage: chi = G.one() sage:", "sum associated to `\\chi` is .. MATH:: g_a(\\chi) = \\sum_{r", "the modulus. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: H.<c> =", "subgroup, where `n` is the exponent of `(\\ZZ/N\\ZZ)^*`. Many operations,", "False, True, False, True, False] sage: G = DirichletGroup(13, CC)", "values on each unit generator return self.element_class(self, x) elif not", "divisible by various powers of 2 present some problems as", "1,), 2*zeta6 - 3) ((-zeta6,), (-zeta6,), 3*zeta6 - 1) ((-zeta6,),", "a complex field\") zeta = CC.zeta(G.modulus()) ** a g =", "integral: base_ring = base_ring.ring_of_integers() if not is_Ring(base_ring): raise TypeError(\"base_ring (=", "Groups category = Groups().Commutative() if base_ring.is_integral_domain() or base_ring.is_finite(): # The", "standard generators of `(\\ZZ/N\\ZZ)^*`, where `N` is the modulus. EXAMPLES::", "== G True We compute a Dirichlet group over a", "0, 0; 0, 1, 0; 0, 0, 1]], [0, 1,", "s in all_jacobi_sums: ....: print(s) ((1,), (1,), 5) ((1,), (zeta6,),", "characters in this group (default: the cyclotomic field `\\QQ(\\zeta_n)`, where", "f = K.complex_embeddings()[0] sage: D = DirichletGroup(5, K) sage: D.change_ring(f)", "self.conrey_number()) webbrowser.open(url) def galois_orbit(self, sort=True): r\"\"\" Return the orbit of", "zip(self.values_on_gens(), other.values_on_gens())) return G.element_class(G, x, check=False) def __copy__(self): \"\"\" Return", "of order 10 and degree 4' sage: G.rename('Dir(11)') sage: G", "1 sage: e.order() 12 This illustrates a canonical coercion:: sage:", "- zeta156^19 + zeta156^18 - zeta156^16 - zeta156^15 - 2*zeta156^14", "Finite Field of size 5 ] \"\"\" R = self.base_ring()", "Traceback (most recent call last): ... ValueError: base ring (=", "a look at a non-prime modulus:: sage: N = 9", "`(\\ZZ/N\\ZZ)^*`. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20,QQ) sage: b.maximize_base_ring() Dirichlet character", "13 of conductor 13 mapping 2 |--> -1 sage: G([K.0])", "= R(zeta) if isinstance(R, Map): R = R.codomain() return DirichletGroup(self.modulus(),", "17 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) * GF(29)(3) 22 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3", "Map) or R.has_coerce_map_from(self.base_ring())): raise TypeError(\"no coercion map from %s to", "G.gen(0).base_ring() Cyclotomic Field of order 10 and degree 4 sage:", "n = z.multiplicative_order() m = lcm(g,n) if n == m:", "not None: if not base_ring.is_integral_domain(): raise ValueError(\"base ring (= %s)", "self.__bernoulli[k] N = self.modulus() K = self.base_ring() if N ==", "conductor %s' % (self.modulus(), self.conductor()) r = len(self.values_on_gens()) if r", "-1] sage: e = DirichletGroup(20).gen(1) sage: e.values() [0, 1, 0,", "z = CC.one() for c in self.values()[1:]: z *= zeta", "restriction of this character to a Dirichlet character modulo the", "-2*zeta6 + 1 sage: G = DirichletGroup(20) sage: e =", "the group, are only implemented if `V` is cyclic and", "modulo 5 with values in Cyclotomic Field of order 4", "1) .. NOTE:: The constructor of :class:`DirichletCharacter` sets the cache", "this group, which can be renamed. EXAMPLES:: sage: G =", "17 |--> -1 sage: chi._pari_conversion() ([[24, [0]], [8, [2, 2,", "-1, Dirichlet character modulo 20 of conductor 20 mapping 11", "is not ``None``. OUTPUT: The group of Dirichlet characters modulo", "7 in Ring of integers modulo 15 sage: G.order() 4", "elements whose parents have different zeta orders works:: sage: a", "- :func:`sage.arith.misc.gauss_sum` for general finite fields - :func:`sage.rings.padics.misc.gauss_sum` for a", "Return a Dirichlet character that equals this one, but over", "\"lcalc\":: sage: a = a.primitive_character() sage: L = a.lfunction(algorithm='lcalc'); L", "p. EXAMPLES:: sage: G = DirichletGroup(13) sage: e = DirichletGroup(13).0", "K = G.base_ring() sage: G(1) Dirichlet character modulo 13 of", "``self``, or a ring homomorphism with the base ring of", "= [p**m for m in range(0,r)] return Auts def galois_orbits(self,", "or in v if v is not None. INPUT: -", "Dirichlet character modulo 20 of conductor 5 mapping 11 |-->", "Vecsmall([3, 3, 1])], [[8, 8, 3], [[1, matrix(0,2)], [1, matrix(0,2)],", "of the given modulus, with values in the given base", "import Sequence from sage.structure.factory import UniqueFactory from sage.structure.richcmp import richcmp", "mapping 11 |--> 1, 17 |--> -1 sage: b.maximize_base_ring().base_ring() Cyclotomic", "G Group of Dirichlet characters modulo 17 with values in", "= CyclotomicField(4).ring_of_integers() sage: G = DirichletGroup(60, r4) sage: G.gens() (Dirichlet", "if and only if `\\varepsilon(-1) = 1`. EXAMPLES:: sage: G", "(-zeta6,), -2*zeta6 + 3) ((-1,), (-zeta6 + 1,), 2*zeta6 -", "d = rings.Integer(d) if d == 0: raise ValueError(\"d must", "modulo 37 of conductor 37 mapping 2 |--> zeta36^4 sage:", "sage: e = G.0 The real component of the numerical", "sage: f*e Dirichlet character modulo 13 of conductor 1 mapping", "2 :: sage: r4 = CyclotomicField(4).ring_of_integers() sage: G = DirichletGroup(60,", "particular, it is finitely generated; the added # FinitelyGenerated() here", "subfield of the base ring. EXAMPLES:: sage: G = DirichletGroup(30);", "+ zeta30^3 - zeta30 - 1 When a root of", "if is_ComplexField(P.base_ring()): zeta = P.zeta() zeta_argument = zeta.argument() v =", "associated to this Dirichlet character. This includes Gauss sums, classical", "trivial_character(3) == DirichletGroup(3, QQ).0^2 True \"\"\" return (isinstance(X, DirichletGroup_class) and", "Dirichlet groups of prime power modulus corresponding to primes dividing", "of conductor 4 mapping 3 |--> -1 sage: e.restrict(50) Traceback", "self._zeta if zeta is None: R = self.base_ring() e =", "more in many cases, # especially since we end up", "g def jacobi_sum(self, char, check=True): r\"\"\" Return the Jacobi sum", "modulo 60 of conductor 3 mapping 31 |--> 1, 41", "isinstance(R, Map): R = R.codomain() return DirichletGroup(self.modulus(), R, zeta=zeta, zeta_order=zeta_order)", "prime subfield of the base ring. EXAMPLES:: sage: G =", "sage: e.gauss_sum_numerical(a=2, prec=100) 4.7331654313260708324703713917e-30 - 1.7320508075688772935274463415*I sage: G = DirichletGroup(13)", "at least compared to # the other algorithm below. That", "[0, 1, -1, 0, 1, -1, 0, 0, -1, 0,", "zeta52^22 + zeta52^21 + zeta52^19 - zeta52^16 + zeta52^15 +", "= self.values() S = lambda n: sum(v[r] * r**n for", "' |--> ' + str(self.values_on_gens()[i]) return s def _latex_(self): r\"\"\"", "group of Dirichlet characters - ``x`` -- one of the", "0: if e.modulus() % 4 == 0: val *= e.values_on_gens()[0]", "= DirichletGroup(20) sage: e = G.1 sage: e.kloosterman_sum_numerical(53,3,11) 3.80422606518061 -", "EXAMPLES:: sage: DirichletGroup(20).decomposition() [ Group of Dirichlet characters modulo 4", "4 and degree 2 ] sage: DirichletGroup(20,GF(5)).decomposition() [ Group of", "degree 2 Note that the root of unity can change::", "DirichletGroup(20) sage: c = a*b sage: d = c.decomposition(); d", "is a cyclotomic field or QQ.\") phi = K.complex_embedding(prec) CC", "TypeError(\"no coercion map from %s to %s is defined\" %", "is checked that the orders of the elements in `x`", "13 of conductor 13 mapping 2 |--> zeta12, Dirichlet character", "is None and zeta_order is None): raise ValueError(\"zeta and zeta_order", "elements:: sage: G = DirichletGroup(5, Zmod(15)); G Group of Dirichlet", "17 |--> zeta4 sage: G.gen(2) Traceback (most recent call last):", "of Dirichlet characters modulo `N` with values in a subgroup", "sage: chi.multiplicative_order() 4 Other operations only work if ``zeta`` is", "CACHING: Computed Kloosterman sums are *not* cached with this character.", "function. Likewise # computing all binomial coefficients can be done", "quicker). CACHING: Computed Kloosterman sums are *not* cached with this", "= lmfdb_url.format(self.modulus(), self.conrey_number()) webbrowser.open(url) def galois_orbit(self, sort=True): r\"\"\" Return the", "modulo 15 sage: G.gens() (Dirichlet character modulo 5 of conductor", "x[0]*x[1]^2; e Dirichlet character modulo 35 of conductor 35 mapping", "# NOTE -- over QQ! sage: b.modulus() 2401 AUTHORS: -", "and degree 2, 60, None, None) An example to illustrate", "= R_values[value] # iterate: # increase the exponent vector by", "``zeta`` and ``zeta_order`` are omitted, then `V` is taken to", "in range(len(D))] def extend(self, M): \"\"\" Returns the extension of", "of conductor 4 mapping 11 |--> -1, 17 |--> 1'", "0, 0, 0, -1, 0, 1, 0, -1, 0, 1,", "is not None: if not base_ring.is_integral_domain(): raise ValueError(\"base ring (=", "sage.structure.factory import UniqueFactory from sage.structure.richcmp import richcmp from sage.arith.all import", "H True sage: G3 = DirichletGroup(31, CyclotomicField(3)) sage: G5 =", "1, 0, -1, 0, 1, 0, 0, 0, 1, 0,", "sage.modules.free_module_element as free_module_element import sage.rings.all as rings import sage.rings.number_field.number_field as", "zeta_argument = zeta.argument() v = [int(x.argument() / zeta_argument) for x", "group of roots of unity of order dividing ``zeta_order`` in", "False sage: G = DirichletGroup.create_object(None, k); G Group of Dirichlet", "DirichletGroup(60).zeta() zeta4 sage: DirichletGroup(60,QQ).zeta() -1 sage: DirichletGroup(60, GF(25,'a')).zeta() 2 \"\"\"", "the respective generators of `(\\ZZ/N\\ZZ)^*`. OUTPUT: The Dirichlet character defined", "[0]], [2, [2], [3]], [[2]~, Vecsmall([2])], [[4], [[1, matrix(0,2)]], Mat(1),", "G.<a,b> = DirichletGroup(20) sage: a.is_primitive() False sage: b.is_primitive() False sage:", "conductor 24 mapping 7 |--> 1, 13 |--> -1, 17", "of order 4 and degree 2 If the order of", "print as lists giving the values of the character on", "Field with 53 bits of precision \"\"\" if zeta is", "zeta6] sage: (DirichletGroup(36).0).decomposition() [Dirichlet character modulo 4 of conductor 4", "``R``. INPUT: - ``R`` -- either a ring admitting a", "utf-8 -*- r\"\"\" Dirichlet characters A :class:`DirichletCharacter` is the extension", "be stored when pickling an instance of :class:`DirichletCharacter`. \"\"\" pows", "chi4 = DirichletGroup(4).gen() sage: chi4.conrey_number() 3 sage: chi = DirichletGroup(24)([1,-1,-1]);", "to Dirichlet character modulo 20 of conductor 4 mapping 11", "== f True sage: e == f False sage: k", "A table of values of the character is made the", "EXAMPLES:: sage: G.<a> = DirichletGroup(11) sage: b = copy(a) sage:", "sage: E = DirichletGroup(4).gen() sage: E.lmfdb_page() # optional -- webbrowser", "`\\zeta` is a primitive `m^{th}` root of unity. EXAMPLES:: sage:", "Field in a with defining polynomial x^2 - 3 with", "7, 9, 11, 13, 15] sage: DirichletGroup(17, GF(11^4, 'a'))._automorphisms() [1,", "example we create a Dirichlet group with values in a", "character on each integer between 0 and the modulus. EXAMPLES::", "in the group of order 4 generated by 7 in", "- zeta156^2 - 1 sage: factor(norm(e.gauss_sum())) 13^24 TESTS: The field", "False \"\"\" return isinstance(x, DirichletGroup_class) class DirichletGroup_class(WithEqualityById, Parent): \"\"\" Group", "either with or without specifying a root of unity:: sage:", "has a # distinguished set of generators. category = category.Finite().FinitelyGenerated()", "problems as the multiplicative group modulo `2^k` is trivial for", "sage: [t(x) for x in [0..20]] [0, 1, 1, 1,", "of values of the character is made the first time", "def gauss_sum_numerical(self, prec=53, a=1): r\"\"\" Return a Gauss sum associated", ": t CC = K elif is_AlgebraicField(K): from sage.rings.complex_mpfr import", "not the largest order root of unity in the field::", "DirichletCharacter): raise TypeError(\"cannot convert %s to an element of %s\"", "terms of the GNU General Public License as published by", "from sage.modular.dirichlet import DirichletCharacter sage: M = FreeModule(Zmod(16), 3) sage:", "`N` is the modulus of self. EXAMPLES:: sage: G =", "= DirichletGroup(1) sage: chi = G.one() sage: chi.gauss_sum() 1 ..", "1 mapping 2 |--> 1 sage: G([-1]) Dirichlet character modulo", "Field of order 6 and degree 2 Note that the", "Field to Integer Ring is defined Base-extended Dirichlet groups do", "+ 5) :: sage: DirichletGroup(60, integral=True) Group of Dirichlet characters", "operations only work if ``zeta`` is specified:: sage: G.gens() Traceback", "latex(DirichletGroup(1)[0]) \\hbox{Dirichlet character modulo } 1 \\hbox{ of conductor }", "-zeta4, 0, 0, 0, zeta4, 0, -1, 0, 1, 0,", "G = DirichletGroup(3) sage: e = G([-1]) sage: e.gauss_sum(1) 2*zeta6", "sage.misc.cachefunc import cached_method from sage.misc.fast_methods import WithEqualityById from sage.structure.element import", "__values_on_gens in the past # we need to set the", "(= %s) must be an integral domain if only zeta_order", "G = DirichletGroup(10) sage: TestSuite(G[1]).run() It is checked that the", "includes Gauss sums, classical Kloosterman sums, Salié sums, etc. The", "characters, miscellaneous fixes - <NAME> (2014-03-06): use UniqueFactory to cache", "17 |--> zeta4 Multiplying elements whose parents have different zeta", "an error will be raised. EXAMPLES:: sage: DirichletGroup(20).galois_orbits() [ [Dirichlet", "= prod(G.gens(), G(1)) sage: e Dirichlet character modulo 60 of", "(2, 16, 2), respectively \"\"\" MultiplicativeGroupElement.__init__(self, parent) if check: orders", "richcmp from sage.arith.all import (binomial, bernoulli, kronecker, factor, gcd, lcm,", "Field sage: trivial_character(7, Integers(3))(1).parent() Ring of integers modulo 3 \"\"\"", "- zeta156^36 - zeta156^34 - zeta156^33 - zeta156^31 + 2*zeta156^30", "0, 0, zeta4, 0, -1] sage: e = DirichletGroup(21).gen(0) ;", "together, where the power is between 0 and the order", "``True`` if and only if this character is primitive, i.e.,", "-1, 22369178537 |--> -1, 14266017175 |--> 1 AUTHORS: - <NAME>", "must divide M(=50) \"\"\" M = int(M) if self.modulus()%M !=", "added examples of everything; fix a *lot* of tiny bugs", "_element_constructor_(self, x): \"\"\" Construct a Dirichlet character from `x`. EXAMPLES::", "to a power, so the return value is a list", "sage: e(2) 0 sage: e(7) -zeta4 sage: Integers(60).unit_gens() (31, 41,", "short string representation of self, often used in string representations", "work if ``zeta`` is specified:: sage: G.gens() Traceback (most recent", "a = a.primitive_character() sage: L = a.lfunction(algorithm='lcalc'); L L-function with", "order of ``zeta`` is very large. - If ``zeta`` is", "mapping ' for i in range(r): if i != 0:", "into the other parent fails in both cases:: sage: d[0]*d[1]", "are not units mod 22. while x.modulus().gcd(v) != 1: v", "val_on_gen = self.element() exponents = [0] * len(orders) n =", "(2014-03-06): use UniqueFactory to cache DirichletGroups \"\"\" # **************************************************************************** #", "if len(F) > 1: return prod([d.conductor() for d in self.decomposition()])", "precision \"\"\" if zeta is None and self._zeta is not", "|--> -1, Dirichlet character modulo 5 of conductor 5 mapping", "\\sum_{r \\in \\ZZ/m\\ZZ} \\chi(r)\\,\\zeta^{ar}, where `m` is the modulus of", "sage: G.integers_mod() Ring of integers modulo 20 \"\"\" return self._integers", "2*zeta20^4 + 4 TESTS:: sage: G = DirichletGroup(20, UniversalCyclotomicField()) sage:", "sage: G = DirichletGroup(11) sage: repr(G) # indirect doctest 'Group", "- 34750/13*zeta12^2 - 11380/13*zeta12 + 9110/13 sage: eps = DirichletGroup(9).0", "integers modulo 20 \"\"\" return self._integers __iter__ = multiplicative_iterator def", "of this character. EXAMPLES:: sage: e = DirichletGroup(100).0 sage: e.modulus()", "zeta156^20 - zeta156^19 + zeta156^18 - zeta156^16 - zeta156^15 -", "self.element_class(self, x) elif not isinstance(x, DirichletCharacter): raise TypeError(\"cannot convert %s", "; ls[0:10] [0, 1, -zeta10^3, -zeta10, -zeta10, 1, zeta10^3 -", "G.modulus() if is_ComplexField(K): return self.gauss_sum_numerical(a=a) elif is_AlgebraicField(K): L = K", "(most recent call last): ... NotImplementedError: order of element not", "when the base ring is a cyclotomic field or QQ.\")", "state_dict[element_key] del state_dict[element_key] super(DirichletCharacter, self).__setstate__(state) if values_on_gens is not None:", "the modulus of `\\chi` and `\\zeta` is a primitive `m`", "sage: b.kernel() [1, 11] \"\"\" one = self.base_ring().one() return [x", "character modulo 13 of conductor 1 mapping 2 |--> 1],", "case where `R` is a map (:trac:`18072`):: sage: K.<i> =", "e.order()) % 2: val *= -1 return val def __call__(self,", "v.sort() return v def gauss_sum(self, a=1): r\"\"\" Return a Gauss", "order 4 and degree 2 sage: d[1].parent() Group of Dirichlet", "Create a Dirichlet group. Not to be called directly (use", "G[1][2] # one should use the following, but this does", "sage: g.zeta_order() 2 :: sage: r4 = CyclotomicField(4).ring_of_integers() sage: G", "and 1 \"\"\" n = int(n) g = self.gens() if", "sage: f = ~e sage: f*e Dirichlet character modulo 13", "p, p^2, ..., p^(r-1), # where p^r = 1 (mod", "False sage: (a*b).is_primitive() True \"\"\" return (self.conductor() == self.modulus()) @cached_method", "H False If ``base_ring`` was not be a part of", "CyclotomicField(4) sage: G = DirichletGroup(192) sage: G([i, -1, -1]) Traceback", "G = DirichletGroup(20, QQ); G Group of Dirichlet characters modulo", "of ``zeta``; this is useful if the base ring is", "for the conversion of the character to Pari. OUTPUT: pair", "-1, 41 |--> 1, 37 |--> 1, Dirichlet character modulo", "of the modulus(=%s)\"%(M,self.modulus())) H = DirichletGroup(M, self.base_ring()) return H(self) def", "= DirichletGroup(192) sage: G([i, -1, -1]) Traceback (most recent call", "sage: e(7) -zeta4 sage: Integers(60).unit_gens() (31, 41, 37) sage: e(31)", "a domain (so `V` is cyclic), and `V` must have", "https://www.lmfdb.org/knowledge/show/character.dirichlet.conrey EXAMPLES:: sage: chi4 = DirichletGroup(4).gen() sage: chi4.conrey_number() 3 sage:", "2 are printed correctly (see :trac:`17338`):: sage: latex(DirichletGroup(1)[0]) \\hbox{Dirichlet character", "is only implemented if the base ring has characteristic 0", "power, so the return value is a list of integers.", "import is_RationalField from sage.rings.complex_mpfr import is_ComplexField from sage.rings.qqbar import is_AlgebraicField", "polynomial x^4 + 1 sage: DirichletGroup(5, K, zeta_order=2) Group of", "exponent of `(\\ZZ/N\\ZZ)^*`. Many operations, such as finding a set", "sage: G.change_ring(CyclotomicField(6)) Group of Dirichlet characters modulo 7 with values", "11 sage: sum([g(x)*g(1-x) for x in IntegerModRing(N)]) 11 And sums", "rings.RationalField()) return G([kronecker(D,u) for u in G.unit_gens()]) def kronecker_character_upside_down(d): \"\"\"", "2) Let's check that trivial sums are being calculated correctly::", "modulo 13 of conductor 13 mapping 2 |--> zeta12^2 sage:", "(2006-08-06) \"\"\" d = rings.Integer(d) if d == 0: raise", "AUTHORS: - <NAME> (2005-09-02): Fixed bug in comparison of Dirichlet", "a.lfunction(algorithm='lcalc'); L L-function with complex Dirichlet coefficients sage: L.value(4) #", "free_module_element.is_FreeModuleElement(x): x = parent._module(x) if any(u * v for u,", "zeta156^4 - zeta156^2 - 1 sage: factor(norm(e.gauss_sum())) 13^24 TESTS: The", "z in self.values_on_gens()] if self.modulus() % 8 == 0: #", "[0] + [g * ((n*t).exp(prec)) for n in range(1,N+1)] ber", "%s) must be an integral domain if only zeta_order is", "chosen root of unity in the base ring. EXAMPLES:: sage:", "modulo 13 of conductor 13 mapping 2 |--> zeta12^3 -", "order 4 generated by 7 in Ring of integers modulo", "other, op): \"\"\" Compare ``self`` to ``other``. .. NOTE:: Since", "mutable *only* because immutable vectors are not implemented yet. EXAMPLES::", "(most recent call last): ... TypeError: unsupported operand parent(s) for", "z in enumerate(self._zeta_powers)} def change_ring(self, R, zeta=None, zeta_order=None): \"\"\" Return", "DirichletGroup(17, Integers(9), zeta=Integers(9)(2)).galois_orbits() Traceback (most recent call last): ... TypeError:", "factor, gcd, lcm, fundamental_discriminant, euler_phi, factorial, valuation) def trivial_character(N, base_ring=rings.RationalField()):", "zeta=2); G Group of Dirichlet characters modulo 5 with values", "QQbar) sage: e = G.gens()[0] sage: e.kloosterman_sum(5,11) Traceback (most recent", "illustrate that ``base_ring`` is a part of the key:: sage:", "DirichletGroup(60) sage: e = prod(G.gens(), G(1)) sage: e Dirichlet character", "and the order of the generator minus 1, inclusive. EXAMPLES::", "zeta12^3, 31 |--> zeta12^2 - 1 sage: e.order() 12 This", "zeta156^45 + zeta156^42 + zeta156^41 + 2*zeta156^40 + zeta156^37 -", "... IndexError: n(=-1) must be between 0 and 1 \"\"\"", "x in range(self.modulus()) if self(x) == one] def maximize_base_ring(self): r\"\"\"", "R is not a domain, an error will be raised.", ":class:`DirichletCharacter` is the extension of a homomorphism .. MATH:: (\\ZZ/N\\ZZ)^*", "b*e**(-1)) return g def kloosterman_sum_numerical(self, prec=53, a=1, b=0): r\"\"\" Return", "sage: a.is_trivial() False sage: (a^2).is_trivial() True \"\"\" if self.element.is_in_cache(): return", "IndexError: n(=-1) must be between 0 and 1 \"\"\" n", "and degree 2, Group of Dirichlet characters modulo 5 with", "power modulus, where the prime powers exactly divide the modulus", "the DirichletGroup factory. p = R.characteristic() if p == 0", "self. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: repr(a) # indirect", "10000000000000000000000000000000000000121 Note that the root of unity has small order,", "0) sage: b.element() (0, 1) .. NOTE:: The constructor of", "key, the keys would compare equal and the caching would", "this character. EXAMPLES:: sage: e = DirichletGroup(100).1 sage: e.order() #", "Cyclotomic Field of order 4 and degree 2 ] sage:", "this is the trivial character, i.e., has order 1. EXAMPLES::", "whether to replace the default cyclotomic field by its rings", "2*zeta6 + 1) ((zeta6 - 1,), (-zeta6,), -1) ((zeta6 -", "the exponent of this group. EXAMPLES:: sage: DirichletGroup(20).exponent() 4 sage:", "Dirichlet character. EXAMPLES:: sage: G.<a> = DirichletGroup(11) sage: b =", "e = self(1) for i in range(self.ngens()): g = self.gen(i)", "2 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) 17 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) * GF(29)(3) 22 sage:", "is generally slower). INPUT: - ``prec`` -- integer (default: 53),", "0, 0, 1, 36, 0, 1, 36] sage: e =", "2401 AUTHORS: - <NAME> (2006-08-06) \"\"\" d = rings.Integer(d) if", "`\\sqrt{p}`. CACHING: Computed Gauss sums are *not* cached with this", "values in Cyclotomic Field of order 10 and degree 4'", "r\"\"\" Returns the Dirichlet groups of prime power modulus corresponding", "sage.modular.dirichlet import DirichletCharacter sage: M = FreeModule(Zmod(16), 3) sage: DirichletCharacter(G,", "return self(-1) == R(-1) @cached_method def is_primitive(self): \"\"\" Return ``True``", "to this Dirichlet character as an approximate complex number with", "or # (at your option) any later version. # https://www.gnu.org/licenses/", "lambda t : t CC = K elif is_AlgebraicField(K): from", "for x in range(self.modulus()) if self(x) == one] def maximize_base_ring(self):", "webbrowser \"\"\" import webbrowser lmfdb_url = 'https://www.lmfdb.org/Character/Dirichlet/{}/{}' url = lmfdb_url.format(self.modulus(),", "coefficients can be done much # more efficiently. v =", "2. vals = [1] + vals return [D[i](vals[i]) for i", "+= \"the group of order %s generated by %s in", "0, 0, 0, 1, 0, 1] sage: e = DirichletGroup(20).gen(0)", "conductor 1 mapping 11 |--> 1, 17 |--> 1] ]", "+= str(self.base_ring()) return s @cached_method def decomposition(self): r\"\"\" Returns the", "sage: f = e.primitive_character(); f Dirichlet character modulo 4 of", "recent call last): ... ValueError: values (= (zeta16^4, -1, -1))", "sage: sqrt(3.0) 1.73205080756888 sage: e.gauss_sum_numerical(a=2) -...e-15 - 1.7320508075...*I sage: e.gauss_sum_numerical(a=2,", "False If the base ring is not an integral domain,", "polynomial x^4 + 1 An example where we give ``zeta``,", "if self._zeta is not None: s += \"the group of", "correct value of the Jacobi sum J(Y, Z). sage: Y.jacobi_sum(Z);", "in this group (default: the cyclotomic field `\\QQ(\\zeta_n)`, where `n`", "TypeError: raise NotImplementedError('Kloosterman sums not implemented ' 'over this ring')", "e.bar() Dirichlet character modulo 5 of conductor 5 mapping 2", "0, -1, 0, 1, -1, 0, 1, 0, 0, 1,", "sage: Integers(60).unit_gens() (31, 41, 37) sage: e(31) -1 sage: e(41)", "(11, 17) sage: G.zeta() zeta4 sage: G.zeta_order() 4 In this", "2 z = self.base_ring().zeta() n = z.multiplicative_order() m = lcm(g,n)", "x = tuple(~z for z in self.values_on_gens()) return G.element_class(G, x,", "\\ in factor(self.modulus())], cr=True, universe = cat.Objects()) def exponent(self): \"\"\"", "or self.is_trivial(): return rings.Integer(1) F = factor(self.modulus()) if len(F) >", "e = G.0 sage: e.is_even() False sage: e(-1) -1 sage:", "must be None if base_ring not specified\") e = rings.IntegerModRing(modulus).unit_group_exponent()", "= rings.CyclotomicField(m.lcm(zo)) zeta = L.gen(0) try: self(1) * zeta**(a+b) except", "5) ((1,), (zeta6,), -1) ((1,), (zeta6 - 1,), -1) ((1,),", "elements must divide the orders of the respective generators of", "Returns the minimal generators for the units of `(\\ZZ/N\\ZZ)^*`, where", "recent call last): ... NotImplementedError: order of element not known", "Compute the automorphisms of self. These are always given by", "zeta30^5 + zeta30^4 + zeta30^3 - zeta30 - 1 When", "= rings.Integer(d) if d == 0: raise ValueError(\"d must be", "EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.element() (2, 0) sage:", "\"\"\" return richcmp(self.values_on_gens(), other.values_on_gens(), op) def __hash__(self): \"\"\" Return the", "17 |--> zeta4) \"\"\" g = [] ord = self.zeta_order()", "must have additive orders dividing (2, 16, 2), respectively \"\"\"", "self.order() <= 2: K = rings.QQ elif (isinstance(R, number_field.NumberField_generic) and", "DirichletGroupFactory(UniqueFactory): r\"\"\" Construct a group of Dirichlet characters modulo `N`.", "- (optional: default True) whether to sort the list of", "seen_so_far = set([]) for x in v: z = x.element()", "# k-th powering for # k = 1, p, p^2,", "((n*t).exp(prec)) for n in range(1,N+1)] ber = sum([self(a)*h[a][k] for a", "2 does not divide `\\phi(p^n)/\\mbox{\\rm ord}(\\varepsilon)`. EXAMPLES:: sage: chi =", "True Note that ``is_even`` need not be the negation of", "DirichletGroup(N) sage: g = D(1) sage: g.jacobi_sum(g) 11 sage: sum([g(x)*g(1-x)", "= DirichletGroup(100151, CyclotomicField(10)).0 sage: ls = chi.values() ; ls[0:10] [0,", "+ 1,), zeta6 + 2) Let's check that trivial sums", "the complex conjugate of this Dirichlet character. EXAMPLES:: sage: e", "explicit cache __element in the past # we need to", "import UniqueFactory from sage.structure.richcmp import richcmp from sage.arith.all import (binomial,", "Dirichlet character modulo 60 of conductor 5 mapping 31 |-->", "\\in (\\ZZ/m\\ZZ)^\\times} \\chi(r)\\,\\zeta^{ar+br^{-1}}, where `m` is the modulus of `\\chi`", "is given, compute the other. if zeta is not None:", "each element of v into self. The Galois group is", "Copyright (C) 2004-2006 <NAME> <<EMAIL>> # Copyright (C) 2014 <NAME>", "must be \"pari\" or \"lcalc\"') @cached_method def conductor(self): \"\"\" Computes", "__setstate__(self, state): \"\"\" Used for unpickling old instances. TESTS:: sage:", "is_DirichletCharacter(trivial_character(3)) True sage: is_DirichletCharacter([1]) False \"\"\" return isinstance(x, DirichletCharacter) class", "sage: DirichletGroup(20).galois_orbits() [ [Dirichlet character modulo 20 of conductor 20", "a subgroup `V` of the multiplicative group `R^*` of ``base_ring``.", "sage: ls = chi.values() ; ls[0:10] [0, 1, -zeta10^3, -zeta10,", "is_DirichletCharacter(x): r\"\"\" Return True if x is of type DirichletCharacter.", "+ 1) ((zeta6,), (-1,), -2*zeta6 - 1) ((zeta6,), (-zeta6,), zeta6", "IndexError: # Done! return result_list value += val_on_gen[i] n *=", "case the group of roots of unity is not necessarily", "3 sage: f.jacobi_sum(e) 3*zeta12^2 + 2*zeta12 - 3 sage: p", "# (which may still be None). zeta_order = self._zeta_order #", "+ 3) ((zeta6,), (zeta6 - 1,), 2*zeta6 + 1) ((zeta6,),", "|--> -1, Dirichlet character modulo 9 of conductor 1 mapping", "value -1 above is the correct value of the Jacobi", "= zeta.argument() v = M([int(round(x.argument() / zeta_argument)) for x in", "Rational Field \"\"\" return self.parent().base_ring() def bar(self): \"\"\" Return the", "sum([g(x)*g(1-x) for x in IntegerModRing(N)]) 11 And sums where exactly", "their values were the same, but not checking that they", "``None``. OUTPUT: The group of Dirichlet characters modulo `N` with", "of ``base_ring``. This is the group of homomorphisms `(\\ZZ/N\\ZZ)^* \\to", "[R.one()] elif mod == 2: return [R.zero(), R.one()] result_list =", "divide modulus\") a = [] for u in self.unit_gens(): v", "first time you call this (unless `m` equals -1) EXAMPLES::", "self.element() one = self.base_ring().one() return all(x == one for x", "group of order 8 generated by a in Number Field", "|--> -1, 17 |--> 1 sage: DirichletGroup(60).random_element() Dirichlet character modulo", "True Conductors that are divisible by various powers of 2", "1 sage: G.gen(1) Dirichlet character modulo 20 of conductor 5", "and degree 2 TESTS: We test the case where `R`", "examples - <NAME> (2006-05-21): added examples of everything; fix a", "Group of Dirichlet characters modulo 7 with values in Cyclotomic", "Cyclotomic Field of order 4 and degree 2 We create", "cache=True, **opts): r\"\"\" Returns the generalized Bernoulli number `B_{k,eps}`. INPUT:", "2, 0, 1, 2, 0, 1, 0, 0, 1, 2,", "with values in Ring of integers modulo 15 sage: DirichletGroup(17,", "parent(s) for *: 'Group of Dirichlet characters modulo 4 with", "is a prime `p` and the character is nontrivial, then", "* mod gens = G.unit_gens() orders = G.integers_mod().unit_group().gens_orders() R_values =", "(zeta6,), -1) ((1,), (zeta6 - 1,), -1) ((1,), (-1,), -1)", "self.element()]) @cached_method(do_pickle=True) def element(self): r\"\"\" Return the underlying `\\ZZ/n\\ZZ`-module vector", "= DirichletGroup(60, r4) sage: G.gens() (Dirichlet character modulo 60 of", "it stays the # same; otherwise it will be recomputed", "G = DirichletGroup(11) sage: repr(G) # indirect doctest 'Group of", "the object was not found in the cache. TESTS:: sage:", "g @cached_method def is_even(self): r\"\"\" Return ``True`` if and only", "= DirichletGroup(17, GF(9,'a')).0 sage: g.jacobi_sum(g**2) 2*a TESTS: This shows that", "raise ValueError(\"M(=%s) must divide the modulus(=%s)\"%(M,self.modulus())) if M%self.conductor() != 0:", "conductor 31 mapping 3 |--> -zeta30^7 + zeta30^5 + zeta30^4", "w = [a] zeta = self.zeta() zeta_order = self.zeta_order() if", "# computing all binomial coefficients can be done much #", "= state_dict[element_key] del state_dict[element_key] super(DirichletCharacter, self).__setstate__(state) if values_on_gens is not", "self.element_class(self, a) def _coerce_map_from_(self, X): \"\"\" Decide whether there is", "G.<a,b> = DirichletGroup(20) sage: a^2 Dirichlet character modulo 20 of", "True) whether to sort the list of orbits and the", "the Bernoulli function. Likewise # computing all binomial coefficients can", "DirichletGroup(16)([-1, 1]) sage: f = e.restrict(8) sage: e == e", "and returns the conductor of this character. EXAMPLES:: sage: G.<a,b>", "t CC = K elif is_AlgebraicField(K): from sage.rings.complex_mpfr import ComplexField", "is a primitive `m^{th}` root of unity. EXAMPLES:: sage: G", "self.base_ring()) return H(self) @cached_method def values(self): \"\"\" Return a list", "sage: e = DirichletGroup(5).0 sage: e Dirichlet character modulo 5", "- If both ``zeta`` and ``zeta_order`` are omitted, then `V`", "if free_module_element.is_FreeModuleElement(x): self.element.set_cache(x) else: self.values_on_gens.set_cache(x) @cached_method def __eval_at_minus_one(self): r\"\"\" Efficiently", "No root of unity specified; use the same zeta_order #", "1 \"\"\" G = self.parent() R = G.base_ring() mod =", "DirichletGroup(abs(D), rings.RationalField()) return G([kronecker(D,u) for u in G.unit_gens()]) def kronecker_character_upside_down(d):", "smaller order than expected (:trac:`6018`):: sage: G = DirichletGroup(10, QQ).base_extend(CyclotomicField(4))", "characters. It was checking that their values were the same,", "the base extension of ``self`` to ``R``. INPUT: - ``R``", "sage: d[0]*d[1] == c Traceback (most recent call last): ...", "zeta.multiplicative_order() zeta = zeta**(n // m) for c in m.coprime_integers(m):", "chi.gauss_sum() zeta52^22 + zeta52^21 + zeta52^19 - zeta52^16 + zeta52^15", "other.values_on_gens())) return G.element_class(G, x, check=False) def __copy__(self): \"\"\" Return a", "conductor 4 mapping 11 |--> -1, 17 |--> 1 sage:", "``zeta_order`` is an element of ``ZZ``. TESTS:: sage: G =", "= self._integers.unit_group_exponent() for d in reversed(e.divisors()): try: zeta = R.zeta(d)", "a group of Dirichlet characters - ``x`` -- one of", "return s def base_ring(self): \"\"\" Returns the base ring of", "INPUT: - ``prec`` -- precision (default 53) - ``algorithm`` --", "4 and degree 2 We create the group of Dirichlet", "returns the generalized Bernoulli number `B_{k,\\varepsilon}`, as defined by the", "in self.values_on_gens()) def kernel(self): r\"\"\" Return the kernel of this", "be set for the other method to work properly, these", "z for y, z in zip(self.values_on_gens(), other.values_on_gens())) return G.element_class(G, x,", "``x`` -- one of the following: - tuple or list", "cyclotomic field, QQ, QQbar, or a complex field\") zeta =", "= CC.one() for c in self.values()[1:]: z *= zeta g", "order 4 and degree 2 sage: DirichletGroup(20).base_ring() Cyclotomic Field of", "zeta_order is not None: if not base_ring.is_integral_domain(): raise ValueError(\"base ring", "self.base_ring() return Sequence([DirichletGroup(p**r,R) for p, r \\ in factor(self.modulus())], cr=True,", "sage: E.lmfdb_page() # optional -- webbrowser \"\"\" import webbrowser lmfdb_url", "[0, 0, 0]], [1, 0, 0; 0, 1, 0; 0,", "order # of R(zeta) by the DirichletGroup factory. p =", "Compare ``self`` to ``other``. .. NOTE:: Since there is no", "== eps2.conrey_number() True \"\"\" G, v = self._pari_conversion() return pari.znconreyexp(G,", "mapping } ' for i in range(r): if i !=", "\\chi(r)\\,\\zeta^{ar}, where `m` is the modulus of `\\chi` and `\\zeta`", "import is_AlgebraicField from sage.rings.ring import is_Ring from sage.misc.functional import round", "} %s' % (self.modulus(), self.conductor()) r = len(self.values_on_gens()) if r", "1.7320508075688772935274463415*I sage: G = DirichletGroup(13) sage: H = DirichletGroup(13, CC)", "sage: e.multiplicative_order() 2 \"\"\" if self.parent().zeta.is_in_cache(): return self.element().additive_order() return lcm([z.multiplicative_order()", "== e True sage: f == f True sage: e", "must be an integral domain if only zeta_order is specified", "trivial and Z is quartic sage: sum([Y(x)*Z(1-x) for x in", "coercion map to exist:: sage: G.base_extend(ZZ) Traceback (most recent call", "self.element() else: x = tuple(z**n for z in self.values_on_gens()) return", "[3], [2], [0]], Mat(1)], [1]) sage: chi = DirichletGroup(24)([1,-1,-1]); chi", "c.extend(20) Dirichlet character modulo 20 of conductor 4 mapping 11", "method to work properly, these caches have to be stored", "zeta_order = self._zeta_order else: # No root of unity specified;", "for z in orbit: seen_so_far.add(tuple(z.element())) G = Sequence(G, cr=True) if", "the group of integers `\\ZZ/N\\ZZ` where `N` is the modulus", "if cache: try: self.__bernoulli except AttributeError: self.__bernoulli = {} if", "root of unity in the field:: sage: g.zeta_order() 2 ::", "%s \\hbox{ of conductor } %s' % (self.modulus(), self.conductor()) r", "rings.CyclotomicField(n) zeta = L.gen(0) ** (n // m) else: raise", "sage.modules.free_module as free_module import sage.modules.free_module_element as free_module_element import sage.rings.all as", "sage: chi.galois_orbit() Traceback (most recent call last): ... TypeError: Galois", "exponents = [0] * len(orders) n = G.integers_mod().one() value =", "Traceback (most recent call last): ... TypeError: Unable to coerce", "for a `p`-adic version \"\"\" G = self.parent() K =", "values in the group of order 4 generated by 2", "0, 1], [7, 13, 17], [2, 2, 2], [0, 0,", "to the Gauss sum if `b=0`. This method performs an", "A Dirichlet character. \"\"\" def __init__(self, parent, x, check=True): r\"\"\"", "trivial group.) EXAMPLES:: sage: DirichletGroup(20).decomposition() [ Group of Dirichlet characters", "1, 2] :: sage: chi = DirichletGroup(100151, CyclotomicField(10)).0 sage: ls", "as equal. TESTS:: sage: trivial_character(6) == trivial_character(3) # indirect doctest", "pari from sage.categories.map import Map from sage.rings.rational_field import is_RationalField from", "but not checking that they had the same level! -", "Group of Dirichlet characters modulo 4 with values in Cyclotomic", "with the same parameters yields the same object:: sage: DirichletGroup(60)", "zeta12^2 - 1 sage: e.order() 12 This illustrates a canonical", "values in a subgroup `V` of the multiplicative group `R^*`", "1, 1, 1, 0, 1, 1, 1, 1, 1, 1,", "= DirichletGroup(100).0 sage: e.multiplicative_order() 2 \"\"\" if self.parent().zeta.is_in_cache(): return self.element().additive_order()", "zeta = L.gen(0) ** (n // m) else: raise NotImplementedError(\"Gauss", "Dirichlet character on the standard generators of `(\\ZZ/N\\ZZ)^*` as returned", "a product of Dirichlet characters of prime power modulus, where", "in a with defining polynomial x^4 + 1 sage: G.list()", "5 mapping 11 |--> 1, 17 |--> -1, Dirichlet character", "[[z] for z in self.values_on_gens()] if self.modulus() % 8 ==", "x, check=False) def __copy__(self): \"\"\" Return a (shallow) copy of", "zeta4 sage: e(31*37) -zeta4 sage: parent(e(31*37)) Cyclotomic Field of order", "(most recent call last): ... NotImplementedError: Automorphisms for finite non-field", "N \\ZZ)^*` where `N` is the modulus EXAMPLES:: sage: chi4", "e = DirichletGroup(21, base_ring=GF(37)).gen(0) ; e.values() [0, 1, 36, 0,", "that are divisible by various powers of 2 present some", "[0]], [8, [2, 2, 2], [7, 13, 17]], [[2, 2,", "where `e` is the order of the standard root of", "f = e.restrict(8) sage: e == e True sage: f", "zeta4 to a rational \"\"\" R = self.base_ring() try: if", "mapping } 15 \\mapsto 1,\\ 5 \\mapsto \\zeta_{4} TESTS: Dirichlet", "e(-1) -1 sage: e(2) 0 sage: e(7) -zeta4 sage: Integers(60).unit_gens()", "6 and degree 2 TESTS: We test the case where", "recent call last): ... TypeError: Unable to coerce zeta12 to", "1, 17 |--> zeta4 sage: G.gen(2) Traceback (most recent call", "integers modulo 15) must be an integral domain if only", "Dirichlet group. This is the same as self.order(). EXAMPLES:: sage:", "= [[z] for z in self.values_on_gens()] if self.modulus() % 8", "-1) ((1,), (-1,), -1) ((1,), (-zeta6,), -1) ((1,), (-zeta6 +", "sage: eps2 = DirichletGroup(5,QQ)([-1]) sage: eps1.conrey_number() == eps2.conrey_number() True \"\"\"", "equals Valuation(Order(x),p)+1. cond = p**(valuation(self.order(),p) + 1) if p ==", "\\in \\ZZ/m\\ZZ} \\chi(r)\\,\\zeta^{ar}, where `m` is the modulus of `\\chi`", "with this character. EXAMPLES:: sage: G = DirichletGroup(3) sage: e", "(G, v) def conrey_number(self): r\"\"\" Return the Conrey number for", "(in contrast to :meth:`change_ring`) requires a coercion map to exist::", "DirichletGroup(20).random_element() Dirichlet character modulo 20 of conductor 4 mapping 11", "by zeta4 in Cyclotomic Field of order 4 and degree", "= DirichletGroup(20) sage: a^2 Dirichlet character modulo 20 of conductor", "(= {}) must have multiplicative orders dividing {}, respectively\" .format(x,", "are always given by raising to a power, so the", "False, True, False, True, False, True, False] sage: G =", "the conductor is the smallest p**r such that # Order(x)", "((-zeta6 + 1,), (-zeta6 + 1,), zeta6 + 2) Let's", "set for the other method to work properly, these caches", "the quadratic Dirichlet character (d/.) of minimal conductor. EXAMPLES:: sage:", "the first Bernoulli number of the trivial # character is", "on generators (want {})\".format(x, len(orders))) if free_module_element.is_FreeModuleElement(x): x = parent._module(x)", "G.unit_gens()]) sage: e.kloosterman_sum(7,17) -2*E(5) - 4*E(5)^2 - 4*E(5)^3 - 2*E(5)^4", "m.coprime_integers(m): e = rings.Mod(c, m) z = zeta ** int(a*e", "sage: psi = chi.change_ring(f) sage: psi(2) -1.83697019872103e-16 - 1.00000000000000*I \"\"\"", "0, 0, 1, 0, 1, 0, 1, 0, 1, 0,", "G = DirichletGroup(13) sage: e = DirichletGroup(13).0 sage: e.base_ring() Cyclotomic", "' for i in range(r): if i != 0: s", "sum_{k=0}^{\\infty} \\frac{B_{k,\\varepsilon}}{k!} t^k. ALGORITHM: The ``'recurrence'`` algorithm computes generalized Bernoulli", "sage: d = c.decomposition(); d [Dirichlet character modulo 4 of", "NotImplementedError(\"Gauss sums only currently implemented when the base ring is", "v = self._pari_conversion() return pari.znconreyexp(G, v).sage() def lmfdb_page(self): r\"\"\" Open", "if mod == 1: return [R.one()] elif mod == 2:", "character modulo 5 of conductor 5 mapping 2 |--> zeta4", "be a domain (so `V` is cyclic), and `V` must", "-zeta4 sage: Integers(60).unit_gens() (31, 41, 37) sage: e(31) -1 sage:", "sage: e.change_ring(QQ) Traceback (most recent call last): ... TypeError: Unable", "Copyright (C) 2014 <NAME> <<EMAIL>> # # This program is", "same zeta_order # (which may still be None). zeta_order =", "\"\"\" Returns True if x is a Dirichlet group. EXAMPLES::", "generator minus 1, inclusive. EXAMPLES:: sage: DirichletGroup(37).random_element() Dirichlet character modulo", "than computing the value of -1 directly using dlog and", "# We reuse _zeta_order if we know that it stays", "zeta4, 0, -1] sage: e = DirichletGroup(21).gen(0) ; e.values() [0,", "a number field. It's the identity function in characteristic p.", "divide modulus sage: H = DirichletGroup(16, QQ); H(DirichletGroup(16).1) Traceback (most", "vals = [[z] for z in self.values_on_gens()] if self.modulus() %", "@cached_method def values(self): \"\"\" Return a list of the values", "\"\"\" Returns the number of generators of self. EXAMPLES:: sage:", "- zeta10^2 + zeta10 - 1, zeta10^2] TESTS: Test that", "of Dirichlet characters modulo 5 with values in Complex Field", "e in G] [False, True, False, True, False, True, False,", "sage: G = DirichletGroup(3) sage: e = G([-1]) sage: e.kloosterman_sum(3,5)", "sage: a Dirichlet character modulo 20 of conductor 4 mapping", "Field TESTS: Check that :trac:`18479` is fixed:: sage: f =", "only zeta_order is specified sage: G = DirichletGroup(17, Integers(15), zeta=7);", "rings of integers as the base ring. This is ignored", "G.<e> = DirichletGroup(13, GF(4,'a')) sage: e.is_even() True sage: e.is_odd() True", "between 0 and the modulus. EXAMPLES:: sage: e = DirichletGroup(20)(1)", "* zeta**int(a*e + b*e**(-1)) return g def kloosterman_sum_numerical(self, prec=53, a=1,", "sage: chi(1) 1 \"\"\" G = self.parent() R = G.base_ring()", "stays the # same; otherwise it will be recomputed as", "an equal Dirichlet character .. MATH:: \\chi : (\\ZZ/N\\ZZ)^* \\to", "1, 0, -1] sage: e = DirichletGroup(20).gen(1) sage: e.values() [0,", "the prime subfield of Frac(R). If R is not a", "are being calculated correctly:: sage: N = 13 sage: D", "m * z, check=False) for m in Auts] if sort:", "ValueError(\"algorithm = '%s' unknown\"%algorithm) if cache: self.__bernoulli[k] = ber return", "s = r'\\hbox{Dirichlet character modulo } %s \\hbox{ of conductor", "DirichletGroup(20,QQ) sage: b.maximize_base_ring() Dirichlet character modulo 20 of conductor 5", "= DirichletGroup(100).0 sage: e.modulus() 100 sage: e.conductor() 4 sage: e.restrict(20)", "an element of Group of Dirichlet characters modulo 13 with", "order %s generated by %s in \" % (self._zeta_order, self._zeta)", "does not work # pari_orders = G.cyc() # pari_gens =", "(\\ZZ/N\\ZZ)^* \\to \\QQ(\\zeta_m) where `m` is the least common multiple", "4 sage: DirichletGroup(60, GF(25,'a')).zeta_order() 4 sage: DirichletGroup(19).zeta_order() 18 \"\"\" order", "1, 17 |--> -1, Dirichlet character modulo 20 of conductor", "sage: chi = DirichletGroup(24)([1,-1,-1]); chi Dirichlet character modulo 24 of", "degree 2 sage: parent(DirichletGroup(60, integral=True).gens()[2].values_on_gens()[2]) Gaussian Integers in Cyclotomic Field", "Dirichlet character. This function returns an equal Dirichlet character ..", "if zeta is not None: zeta = R(zeta) if isinstance(R,", "coercion between Dirichlet groups of different moduli, characters of different", "sage: psi(2) -1.83697019872103e-16 - 1.00000000000000*I \"\"\" if self.base_ring() is R:", "= l[1:]; l (2, None, None) sage: k == l", "self.is_trivial(): return rings.Integer(1) F = factor(self.modulus()) if len(F) > 1:", "G = DirichletGroup(20, UniversalCyclotomicField()) sage: e = G([1 for u", "100 \"\"\" return self.modulus() @cached_method def multiplicative_order(self): \"\"\" The order", "order dividing ``zeta_order`` in `R`. In this case, `R` must", "== e False \"\"\" return richcmp(self.values_on_gens(), other.values_on_gens(), op) def __hash__(self):", "divide M(=%s)\"%(self.conductor(),M)) H = DirichletGroup(M, self.base_ring()) return H(self) @cached_method def", "w.append(a) else: for i in range(1, zeta_order): a = a", "conductor 1 mapping 11 |--> 1, 17 |--> 1 sage:", "\"\"\" Return a list of the Dirichlet characters in this", "\"\"\" self._set_element_constructor() if '_zeta_order' in state: state['_zeta_order'] = rings.Integer(state['_zeta_order']) super(DirichletGroup_class,", "|--> a^2, Dirichlet character modulo 5 of conductor 5 mapping", "[0, 1, 0, 1, 0, 0, 0, 1, 0, 1,", "of self - ``reps_only`` - (optional: default False) if True", "webbrowser lmfdb_url = 'https://www.lmfdb.org/Character/Dirichlet/{}/{}' url = lmfdb_url.format(self.modulus(), self.conrey_number()) webbrowser.open(url) def", "integer - ``base_ring`` -- commutative ring; the value ring for", "m in P._automorphisms()]) v = [P.element_class(P, m * z, check=False)", "= tuple(z) # change when there are immutable vectors (and", "and degree 4' sage: G.rename('Dir(11)') sage: G Dir(11) \"\"\" s", "the kernel of this character. OUTPUT: Currently the kernel is", "- zeta156^24 - zeta156^22 + zeta156^21 + zeta156^20 - zeta156^19", "= self.zeta() zeta_order = self.zeta_order() if is_ComplexField(R): for i in", "`V` is cyclic), and `V` must have order ``zeta_order``. Furthermore,", "None: # A root of unity was explicitly given; we", "c.decomposition(); d [Dirichlet character modulo 4 of conductor 4 mapping", "explicitly given; we use it over the # new base", "precision. See also :meth:`.kloosterman_sum`, which calculates the sum exactly (which", "7 |--> -zeta4, Dirichlet character modulo 30 of conductor 5", "ord *= int(g.order()) return ord def random_element(self): \"\"\" Return a", "in the cache. TESTS:: sage: K = CyclotomicField(4) sage: DirichletGroup.create_object(None,", "return self p = R.characteristic() if p: K = rings.IntegerModRing(p)", "Dirichlet character with specified values on generators of `(\\ZZ/n\\ZZ)^*`. INPUT:", "= G.base_ring() mod = self.parent().modulus() if mod == 1: return", "4 sage: chi.multiplicative_order() 4 Other operations only work if ``zeta``", "DirichletGroup(20) sage: G.gens() (Dirichlet character modulo 20 of conductor 4", "Return a dictionary that can be used to compute discrete", "2 are printed correctly (see :trac:`17338`):: sage: DirichletGroup(1)[0] Dirichlet character", "Y=X[0]; Z=X[1] sage: Y.jacobi_sum(Z) -1 sage: Z.jacobi_sum(Y) -1 Now let's", "of Dirichlet characters modulo 20 with values in Rational Field", "the Bernoulli # numbers up to k, which should be", "K = G.base_ring() if not (number_field.is_CyclotomicField(K) or is_RationalField(K)): raise NotImplementedError(\"Kloosterman", "if base_ring not specified\") e = rings.IntegerModRing(modulus).unit_group_exponent() base_ring = rings.CyclotomicField(e)", "// m) else: raise NotImplementedError(\"Gauss sums only currently implemented when", "sage: chi.minimize_base_ring() Dirichlet character modulo 7 of conductor 7 mapping", "- 3 with a = 1.732050807568878? :: sage: e =", "the product of self and other. EXAMPLES:: sage: G.<a,b> =", "100 sage: e.conductor() 4 sage: e.restrict(20) Dirichlet character modulo 20", "import MultiplicativeGroupElement from sage.structure.gens_py import multiplicative_iterator from sage.structure.parent import Parent", "P._automorphisms()]) v = [P.element_class(P, m * z, check=False) for m", "DirichletGroup(17, Integers(6), zeta=Integers(6)(5)).galois_orbits() Traceback (most recent call last): ... TypeError:", "discrete logarithms in the value group of this Dirichlet group.", "specified - ``names`` -- ignored (needed so ``G.<...> = DirichletGroup(...)``", "H(DirichletGroup(16).1) Traceback (most recent call last): ... TypeError: Unable to", "c in chi.values()[1:]: z *= zeta g += L(c)*z return", "def bar(self): \"\"\" Return the complex conjugate of this Dirichlet", "5 sage: chi = DirichletGroup(60)([1,-1,I]) sage: chi.conrey_number() 17 sage: chi", "Traceback (most recent call last): ... ValueError: conductor(=4) must divide", "with defining polynomial x^4 + 1 sage: DirichletGroup(5, K, zeta_order=2)", "[x for x in range(self.modulus()) if self(x) == one] def", "DirichletGroup(100, QQ).0 sage: e.modulus() 100 sage: e.conductor() 4 \"\"\" return", "gcd(e,n) == 1] else: if not rings.ZZ(p).is_prime(): raise NotImplementedError(\"Automorphisms for", "zeta @cached_method def zeta_order(self): \"\"\" Return the order of the", "EXAMPLES:: sage: e = DirichletGroup(100).0 sage: e.modulus() 100 sage: e.conductor()", "DirichletGroup(16, QQ); H(DirichletGroup(16).1) Traceback (most recent call last): ... TypeError:", "that Dirichlet characters of different moduli do not compare as", "group of Dirichlet character mod 20 with values in the", "a character mod `p^n`, where `p` is a prime. Then", "zeta156^16 - zeta156^15 - 2*zeta156^14 - zeta156^10 + zeta156^8 +", "performs an exact calculation and returns an element of a", "If the modulus is a prime `p` and the character", "sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5))._automorphisms() Traceback (most recent call last): ...", "trivial # character is 1/2, in contrast to the value", "in m.coprime_integers(m): e = rings.Mod(c, m) g += self(c) *", "of conductor } 1 \"\"\" s = r'\\hbox{Dirichlet character modulo", "+ r' \\mapsto ' + self.values_on_gens()[i]._latex_() return s def base_ring(self):", "in state: state['_zeta_order'] = rings.Integer(state['_zeta_order']) super(DirichletGroup_class, self).__setstate__(state) @property def _module(self):", "for ``parent``. In both cases, the orders of the elements", "16, 2), respectively sage: from sage.modular.dirichlet import DirichletCharacter sage: M", "1 sage: c.extend(20) == a True \"\"\" if M %", "character of modulus q. See https://www.lmfdb.org/knowledge/show/character.dirichlet.conrey EXAMPLES:: sage: chi4 =", "The real component of the numerical value of e is", "4 of conductor 4 mapping 3 |--> -1, Dirichlet character", "zeta = base_ring(zeta) if zeta_order is None: zeta_order = zeta.multiplicative_order()", "\"\"\" Return a list of powers of the distinguished root", "character modulo 5 of conductor 1 mapping 2 |--> 1,", "= DirichletGroup(13) sage: e = D.0 sage: f = D[-2]", ":func:`bernoulli` function if this is called OUTPUT: Let `\\varepsilon` be", "t e^{at}}{e^{Nt}-1} = sum_{k=0}^{\\infty} \\frac{B_{k,\\varepsilon}}{k!} t^k. ALGORITHM: The ``'recurrence'`` algorithm", "return self.change_ring(K) def minimize_base_ring(self): r\"\"\" Return a Dirichlet character that", "for modulus. EXAMPLES:: sage: e = DirichletGroup(100, QQ).0 sage: e.level()", "sage: DirichletGroup(20).exponent() 4 sage: DirichletGroup(20,GF(3)).exponent() 2 sage: DirichletGroup(20,GF(2)).exponent() 1 sage:", "DirichletGroup(13) == DirichletGroup(13, QQ) False \"\"\" from sage.categories.groups import Groups", "return e def unit_gens(self): r\"\"\" Returns the minimal generators for", "zeta156^31 + 2*zeta156^30 + zeta156^28 - zeta156^24 - zeta156^22 +", "for x in self.values_on_gens()) def kernel(self): r\"\"\" Return the kernel", "sage: e = DirichletGroup(100, QQ).0 sage: e.level() 100 \"\"\" return", "cat from sage.misc.all import prod import sage.misc.prandom as random import", "|--> -1, 37346 |--> -1 :: sage: a = kronecker_character(1)", "'lcalc': from sage.libs.lcalc.lcalc_Lfunction import Lfunction_from_character return Lfunction_from_character(self) raise ValueError('algorithm must", "the other. if zeta is not None: zeta = base_ring(zeta)", "no coercion between Dirichlet groups of different moduli, characters of", "\"factor\" of `(\\ZZ/2\\ZZ)^*`, which is the trivial group.) EXAMPLES:: sage:", "for j in range(k+1))) elif algorithm == \"definition\": # This", "53 bits of precision \"\"\" if zeta is None and", "1,), (zeta6 - 1,), -3*zeta6 + 2) ((zeta6 - 1,),", "following: - tuple or list of ring elements: the values", "t/((N*t).exp(prec) - 1) # h(n) = g(t)*e^{nt} h = [0]", "sage: DirichletGroup(5).list() [Dirichlet character modulo 5 of conductor 1 mapping", "identical functions on ``ZZ``. EXAMPLES:: sage: e = DirichletGroup(16)([-1, 1])", "``zeta_order`` -- (optional) positive integer; this must be the order", "1, 1, 0, 1, 1, 1, 1, 1, 1, 0,", "|--> -1 sage: chi._pari_conversion() ([[24, [0]], [8, [2, 2, 2],", "a Dirichlet character modulo the divisor M of the modulus,", "= [1] + vals return [D[i](vals[i]) for i in range(len(D))]", "r\"\"\" Return the \"twisted\" Kloosterman sum associated to this Dirichlet", "1 .. SEEALSO:: - :func:`sage.arith.misc.gauss_sum` for general finite fields -", "by multiplying a random power of each generator together, where", "K.zero() elif algorithm == \"recurrence\": # The following code is", "\"\"\" base_ring, modulus, zeta, zeta_order = key return DirichletGroup_class(base_ring, modulus,", "return g def jacobi_sum(self, char, check=True): r\"\"\" Return the Jacobi", "return {z: i for i, z in enumerate(self._zeta_powers)} def change_ring(self,", "Cyclotomic Field of order 4 and degree 2 sage: d[1].parent()", "|--> zeta4) sage: val = G.gens()[2].values_on_gens()[2] ; val zeta4 sage:", "!= 0: s += r',\\ ' s += self.parent().unit_gens()[i]._latex_() +", "zeta52^21 + zeta52^19 - zeta52^16 + zeta52^15 + zeta52^14 +", "zeta4 sage: DirichletGroup(60).zeta() zeta4 sage: DirichletGroup(60,QQ).zeta() -1 sage: DirichletGroup(60, GF(25,'a')).zeta()", "x, check=False) def _mul_(self, other): \"\"\" Return the product of", "\"\"\" if not self.base_ring().is_integral_domain(): raise TypeError(\"Galois orbits only defined if", "conductor 5 mapping 11 |--> 1, 17 |--> zeta4) sage:", "other method to work properly, these caches have to be", "D = DirichletGroup(13) sage: e = D.0 sage: f =", "e in range(1,n) if gcd(e,n) == 1] else: if not", "sage: e = DirichletGroup(20).gen(1) sage: e.values() [0, 1, 0, -zeta4,", "v = M([dlog[x] for x in self.values_on_gens()]) v.set_immutable() return v", "characters modulo %s with values in \" % self.modulus() if", "Return a (shallow) copy of this Dirichlet character. EXAMPLES:: sage:", "Traceback (most recent call last): ... TypeError: unsupported operand parent(s)", "of precision. INPUT: - ``prec`` -- integer (default: 53), *bits*", "`B_{k,\\varepsilon}`, as defined by the following identity of power series", "1, 0, 0, 1, -1, 0, 1, -1] sage: e", "e.is_even() True sage: e.is_odd() True \"\"\" R = self.base_ring() #", "\"\"\" return {z: i for i, z in enumerate(self._zeta_powers)} def", "1,), -1) ((zeta6,), (zeta6,), -zeta6 + 3) ((zeta6,), (zeta6 -", "such as creation of elements:: sage: G = DirichletGroup(5, Zmod(15));", "return len(self.gens()) @cached_method def order(self): \"\"\" Return the number of", "20 mapping 11 |--> -1, 17 |--> -1] Next we", "Return the quadratic Dirichlet character (d/.) of minimal conductor. EXAMPLES::", "zeta30 - 1 When a root of unity is specified,", "* GF(29)(3) 22 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3 22 sage: parent(r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2])", "{})\".format(x, len(orders))) if free_module_element.is_FreeModuleElement(x): x = parent._module(x) if any(u *", "only work if ``zeta`` is specified:: sage: G.gens() Traceback (most", "R.gen() # g(t) = t/(e^{Nt}-1) g = t/((N*t).exp(prec) - 1)", "other. if zeta is not None: zeta = base_ring(zeta) if", "up __call__ method for Dirichlet characters, miscellaneous fixes - <NAME>", "called if the object was not found in the cache.", "of ``zeta`` if both are specified - ``names`` -- ignored", "Return the trivial character of the given modulus, with values", "the character to Pari. OUTPUT: pair (G, v) where G", "zeta156^8 + zeta156^7 + zeta156^6 + zeta156^5 - zeta156^4 -", "K = CyclotomicField(4) sage: DirichletGroup.create_object(None, (K, 60, K.gen(), 4)) Group", "e.is_odd() True sage: [e.is_odd() for e in G] [False, True,", "range(1, zeta_order): a = a * zeta w.append(a) return w", "|--> 4 sage: chi.multiplicative_order() 4 Other operations only work if", "e == f False sage: k = DirichletGroup(7)([-1]) sage: k", "DirichletGroup(13).0 sage: f = ~e sage: f*e Dirichlet character modulo", "this character under the action of the absolute Galois group", "\"\"\" Return the value of this character at the integer", "in pari_gens) # now compute the input for pari (list", "1.02261879187179*I \"\"\" G = self.parent() K = G.base_ring() if is_ComplexField(K):", "if not R.is_exact(): return abs(self(-1) - R(1)) < 0.5 return", "element = state_dict[element_key] del state_dict[element_key] super(DirichletCharacter, self).__setstate__(state) if values_on_gens is", "sage: DirichletGroup.create_object(None, (K, 60, K.gen(), 4)) Group of Dirichlet characters", "J(self,char)). This is defined as .. MATH:: J(\\chi, \\psi) =", "conductor 5 mapping 2 |--> zeta4] sage: d[0].parent() Group of", "`V` of the multiplicative group `R^*` of ``base_ring``. This is", "sage: e.modulus() 100 sage: e.conductor() 4 sage: e.restrict(20) Dirichlet character", "Dirichlet characters modulo 5 with values in Complex Field with", "\"\"\" P = self.parent() M = P._module if is_ComplexField(P.base_ring()): zeta", "e.multiplicative_order() 2 \"\"\" if self.parent().zeta.is_in_cache(): return self.element().additive_order() return lcm([z.multiplicative_order() for", "1, Dirichlet character modulo 60 of conductor 5 mapping 31", "while x.modulus().gcd(v) != 1: v += self.modulus() a.append(R(x(v))) return self.element_class(self,", "not rings.ZZ(p).is_prime(): raise NotImplementedError(\"Automorphisms for finite non-field base rings not", "root of unity in the base ring. EXAMPLES:: sage: DirichletGroup(20).zeta_order()", "modulo `N` with values in a ring `R`. \"\"\" Element", "M = self._module zero = M(0) orders = self.integers_mod().unit_group().gens_orders() for", "an exact calculation and returns an element of a suitable", "the least common multiple of `n` and the exponent of", "call last): ... ValueError: modulus should be positive \"\"\" modulus", "(default 53) - ``algorithm`` -- 'pari' (default) or 'lcalc' EXAMPLES::", "= K.complex_embeddings()[0] sage: psi = chi.change_ring(f) sage: psi(2) -1.83697019872103e-16 -", "group has a # distinguished set of generators. category =", "of order 4 generated by zeta4 in Cyclotomic Field of", "polynomial x^2 - 3 with a = 1.732050807568878? :: sage:", "of ring elements: the values of the Dirichlet character on", "sage: e Dirichlet character modulo 5 of conductor 5 mapping", "raised if only ``zeta_order`` is specified:: sage: DirichletGroup(17, Integers(15)) Group", ":: sage: r4 = CyclotomicField(4).ring_of_integers() sage: G = DirichletGroup(60, r4)", "CyclotomicField(30) sage: G3.gen(0).base_extend(K30) * G5.gen(0).base_extend(K30) Dirichlet character modulo 31 of", "from the base ring of ``self``, or a ring homomorphism", "= D.0 sage: f = D[-2] sage: e.jacobi_sum(f) 3*zeta12^2 +", "self.parent().zeta.is_in_cache(): return self.element().additive_order() return lcm([z.multiplicative_order() for z in self.values_on_gens()]) def", "zeta12 to a rational We test the case where `R`", "sage: trivial_character(3) == DirichletGroup(3, QQ).0^2 True \"\"\" return (isinstance(X, DirichletGroup_class)", "Field of order 4 and degree 2, 60, None, None)", "4 sage: e Dirichlet character modulo 13 of conductor 13", "r\"\"\" Return True if x is of type DirichletCharacter. EXAMPLES::", "= len(self.values_on_gens()) if r != 0: s += ' mapping", "degree 4' sage: G.rename('Dir(11)') sage: G Dir(11) \"\"\" s =", "web page of the character in a browser. See https://www.lmfdb.org", "the first time you call this (unless `m` equals -1)", "explicit cache __values_on_gens in the past # we need to", "homomorphism with the base ring of ``self`` as its domain", "GF(5))) 8 sage: len(DirichletGroup(20, GF(2))) 1 sage: len(DirichletGroup(20, GF(3))) 4", "The order of this character. EXAMPLES:: sage: e = DirichletGroup(100).1", "is None: # We reuse _zeta_order if we know that", "> 2 and self.values_on_gens()[1].multiplicative_order() != 1: cond *= 2 return", "True, False] sage: G = DirichletGroup(13, CC) sage: e =", "of conductor 4 mapping 3 |--> -1 sage: f.modulus() 4", "= DirichletGroup(2401,QQ)(a) # NOTE -- over QQ! sage: b.modulus() 2401", "of the generator minus 1, inclusive. EXAMPLES:: sage: DirichletGroup(37).random_element() Dirichlet", "`\\varepsilon` be a (not necessarily primitive) character of modulus `N`.", "is taken to be the group of roots of unity", "group, which can be renamed. EXAMPLES:: sage: G = DirichletGroup(11)", "17) sage: G.zeta() zeta4 sage: G.zeta_order() 4 In this example", "G.unit_gens()]) sage: e.kloosterman_sum(7,17) -2*zeta20^6 + 2*zeta20^4 + 4 TESTS:: sage:", "if base ring is an integral domain\") k = self.order()", "sage: DirichletGroup(60).random_element() Dirichlet character modulo 60 of conductor 3 mapping", "0, 1, 0, 0, 0, 1, 0, -1] sage: e", "the above definition, but in contrast to the value `B_1", "and degree 2 sage: DirichletGroup(20).base_ring() Cyclotomic Field of order 4", "Since there is no coercion between Dirichlet groups of different", "in Finite Field of size 10000000000000000000000000000000000000121 Note that the root", "= DirichletGroup(100).0; e Dirichlet character modulo 100 of conductor 4", "check=False) def __pow__(self, n): \"\"\" Return self raised to the", "gens = G.unit_gens() orders = G.integers_mod().unit_group().gens_orders() R_values = G._zeta_powers val_on_gen", "G.cyc() # pari_gens = G.gen() values_on_gens = (self(x) for x", "operand parent(s) for *: 'Group of Dirichlet characters modulo 4", "is a number field. It's the identity function in characteristic", "character modulo the divisor M of the modulus, which must", "1,), -2*zeta6 + 3) ((-zeta6 + 1,), (-zeta6 + 1,),", "dividing {}, respectively\" .format(x, parent.zeta_order(), orders)) self.element.set_cache(x) else: R =", "raise TypeError(\"no coercion map from %s to %s is defined\"", "conjugate of this Dirichlet character. EXAMPLES:: sage: e = DirichletGroup(5).0", "sage: (e^2).minimize_base_ring().base_ring() Cyclotomic Field of order 6 and degree 2", "names='a')[1] sage: eps = f.character() sage: eps.minimize_base_ring() == eps True", "a generator ``zeta`` of `V` is computed, and an error", "G.gen(-1) Traceback (most recent call last): ... IndexError: n(=-1) must", "the rational numbers:: sage: G = DirichletGroup(20, QQ); G Group", "the base ring is a number field. It's the identity", "Returns the Dirichlet groups of prime power modulus corresponding to", "type DirichletCharacter. EXAMPLES:: sage: from sage.modular.dirichlet import is_DirichletCharacter sage: is_DirichletCharacter(trivial_character(3))", "e == e True sage: f == f True sage:", "used in string representations of modular forms EXAMPLES:: sage: chi", "= DirichletGroup(21, base_ring=GF(37)).gen(0) ; e.values() [0, 1, 36, 0, 1,", "# same as multiplicative_order, since group is multiplicative 20 sage:", "r' \\mapsto ' + self.values_on_gens()[i]._latex_() return s def base_ring(self): \"\"\"", "gen(self, n=0): \"\"\" Return the n-th generator of self. EXAMPLES::", "2), respectively \"\"\" MultiplicativeGroupElement.__init__(self, parent) if check: orders = parent.integers_mod().unit_group().gens_orders()", "11 |--> -1, 17 |--> 1 sage: DirichletGroup(60).random_element() Dirichlet character", "1) ((zeta6 - 1,), (zeta6 - 1,), -3*zeta6 + 2)", "Prepare data for the conversion of the character to Pari.", "DirichletGroup(5)._zeta_dlog {-1: 2, -zeta4: 3, zeta4: 1, 1: 0} \"\"\"", "... NotImplementedError: factorization of polynomials over rings with composite characteristic", "def galois_orbit(self, sort=True): r\"\"\" Return the orbit of this character", "DirichletGroup(20) sage: a Dirichlet character modulo 20 of conductor 4", "__copy__(self): \"\"\" Return a (shallow) copy of this Dirichlet character.", "1. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.is_trivial() False sage:", "integers modulo 3 \"\"\" return DirichletGroup(N, base_ring)(1) TrivialCharacter = trivial_character", "gen is -1 for 2-power modulus elif (euler_phi(e.parent().modulus()) / e.order())", "character to a Dirichlet character modulo the divisor M of", "def _module(self): \"\"\" Return the free module used to represent", "e Dirichlet character modulo 100 of conductor 4 mapping 51", "1, 1, 0, 1, 1, 1, 1, 1, 1] sage:", "- zeta156^10 + zeta156^8 + zeta156^7 + zeta156^6 + zeta156^5", "(11, 17) sage: DirichletGroup(60).unit_gens() (31, 41, 37) sage: DirichletGroup(20,QQ).unit_gens() (11,", "+ zeta52^14 + zeta52^12 - zeta52^11 - zeta52^10 - zeta52^7", "DirichletGroup(19, GF(5)) sage: loads(G.dumps()) == G True We compute a", "x)) if R.is_exact() and any(u**v != 1 for u, v", "must be from the same Dirichlet Group. sage: all_jacobi_sums =", "this Dirichlet character. This includes Gauss sums, classical Kloosterman sums,", "This method exists solely because of a bug in the", "= 'https://www.lmfdb.org/Character/Dirichlet/{}/{}' url = lmfdb_url.format(self.modulus(), self.conrey_number()) webbrowser.open(url) def galois_orbit(self, sort=True):", "= DirichletGroup(100000, CC) sage: G.1.is_even() True Note that ``is_even`` need", "exactly one character is nontrivial (see :trac:`6393`):: sage: G =", "G = self.parent() zo = G.zeta_order() m = G.modulus() g", "recent call last): ... IndexError: n(=-1) must be between 0", "Group of Dirichlet characters modulo 13 with values in Cyclotomic", "G.<a,b> = DirichletGroup(20) sage: a.conductor() 4 sage: b.conductor() 5 sage:", "sage: DirichletGroup(37).exponent() 36 \"\"\" return self.zeta_order() @cached_method def _automorphisms(self): \"\"\"", "11 |--> 1, 17 |--> zeta4) \"\"\" g = []", "of this Dirichlet group. TESTS:: sage: DirichletGroup(5)._zeta_dlog {-1: 2, -zeta4:", "sage: e.bar() Dirichlet character modulo 5 of conductor 5 mapping", "group is multiplicative 20 sage: e.multiplicative_order() 20 sage: e =", "== X.modulus() and self.base_ring().has_coerce_map_from(X.base_ring()) and (self._zeta is None or (X._zeta", "is a prime. Then `\\varepsilon(-1) = -1` if and only", "except ValueError: pass self.zeta_order.set_cache(d) return zeta @cached_method def zeta_order(self): \"\"\"", "all_jacobi_sums = [(DP[i].values_on_gens(),DP[j].values_on_gens(),DP[i].jacobi_sum(DP[j])) ....: for i in range(p-1) for j", "+ self.values_on_gens()[i]._latex_() return s def base_ring(self): \"\"\" Returns the base", "Newforms(Gamma1(25), names='a')[1] sage: eps = f.character() sage: eps.minimize_base_ring() == eps", "are ignored). This is only called if the object was", "1 sage: DirichletGroup(37).exponent() 36 \"\"\" return self.zeta_order() @cached_method def _automorphisms(self):", "(most recent call last): ... NotImplementedError: factorization of polynomials over", "sage.rings.ring import is_Ring from sage.misc.functional import round from sage.misc.cachefunc import", "different moduli, characters of different moduli compare as unequal, even", "TESTS:: sage: K = CyclotomicField(4) sage: DirichletGroup.create_object(None, (K, 60, K.gen(),", "function returns an equal Dirichlet character .. MATH:: \\chi :", "2 |--> -a^2] We can also restrict the order of", "for pari (list of exponents) P = self.parent() if is_ComplexField(P.base_ring()):", "Cyclotomic Field of order 12 and degree 4 sage: e.minimize_base_ring().base_ring()", "of %s\" % (x, self)) elif not x.conductor().divides(self.modulus()): raise TypeError(\"conductor", "sage: DirichletGroup(60).zeta() zeta4 sage: DirichletGroup(60,QQ).zeta() -1 sage: DirichletGroup(60, GF(25,'a')).zeta() 2", "1 of conductor 1 sage: DirichletGroup(2)[0] Dirichlet character modulo 2", "** int(a*e + b*(e**(-1))) g += phi(self(c))*z return g @cached_method", "+ 2*zeta156^30 + zeta156^28 - zeta156^24 - zeta156^22 + zeta156^21", "[0, 1, 2, 0, 1, 2, 0, 0, 2, 0,", "= self.base_ring()(1) for e in D: if e.modulus() % 2", "character modulo 60 of conductor 60 mapping 31 |--> -1,", "L-function with complex Dirichlet coefficients sage: L.value(4) # abs tol", "zeta156^22 + zeta156^21 + zeta156^20 - zeta156^19 + zeta156^18 -", "if only zeta_order is specified\" % base_ring) zeta_order = rings.Integer(zeta_order)", "the same parameters yields the same object:: sage: DirichletGroup(60) is", "else: if not rings.ZZ(p).is_prime(): raise NotImplementedError(\"Automorphisms for finite non-field base", "-zeta6 + 3) ((zeta6,), (zeta6 - 1,), 2*zeta6 + 1)", "modulus(=%s)\"%(M,self.modulus())) H = DirichletGroup(M, self.base_ring()) return H(self) def _pari_conversion(self): r\"\"\"", "-1, 17 |--> 1 sage: a Dirichlet character modulo 20", "give ``zeta``, but not its order:: sage: G = DirichletGroup(5,", "Dirichlet groups are cached, creating two groups with the same", "sage: e = G.0 sage: e.is_even() False sage: e(-1) -1.000000...", "any(u * v for u, v in zip(x, orders)): raise", "} %s \\hbox{ of conductor } %s' % (self.modulus(), self.conductor())", "N - 1: return self.values()[m] else: return self.__eval_at_minus_one() def change_ring(self,", "= DirichletGroup(5, QQ).0 sage: f = DirichletGroup(5,CyclotomicField(4)).0 sage: e*f Dirichlet", "sage: (a*b).conductor() 20 TESTS:: sage: G.<a, b> = DirichletGroup(20) sage:", "sage: K = CyclotomicField(4) sage: DirichletGroup.create_object(None, (K, 60, K.gen(), 4))", "v = self.values() S = lambda n: sum(v[r] * r**n", "in range(1,N+1)]) * factorial(k) else: raise ValueError(\"algorithm = '%s' unknown\"%algorithm)", "DirichletGroup(20) sage: repr(a) # indirect doctest 'Dirichlet character modulo 20", "((-1,), (-zeta6 + 1,), 2*zeta6 - 3) ((-zeta6,), (-zeta6,), 3*zeta6", "g = DirichletGroup(17, GF(9,'a')).0 sage: g.jacobi_sum(g**2) 2*a TESTS: This shows", "# Map zeta to the new parent if zeta is", "if isinstance(x, list): # list of values on each unit", "modulo 1 and 2 are printed correctly (see :trac:`17338`):: sage:", "[0, 1, 0, -zeta4, 0, 0, 0, zeta4, 0, -1,", "9.4.1. EXAMPLES:: sage: G = DirichletGroup(13) sage: e = G.0", "`\\chi` and `\\zeta` is a primitive `m` th root of", "character modulo 5 of conductor 5 mapping 2 |--> 4", "G.0 sage: e Dirichlet character modulo 13 of conductor 13", "EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.is_primitive() False sage: b.is_primitive()", "Cyclotomic Field of order 4 and degree 2' and 'Group", "sage: e.order() # same as multiplicative_order, since group is multiplicative", "zeta self._zeta_order = zeta_order self._modulus = modulus self._integers = rings.IntegerModRing(modulus)", "lfunction(self, prec=53, algorithm='pari'): \"\"\" Return the L-function of ``self``. The", "these methods needs to be set for the other method", "cat.Objects()) def exponent(self): \"\"\" Return the exponent of this group.", "e.restrict(4) Dirichlet character modulo 4 of conductor 4 mapping 3", "mapping 2 |--> -zeta12] sage: e = G.0^2; e Dirichlet", "(DirichletGroup(18).0).decomposition() [Dirichlet character modulo 2 of conductor 1, Dirichlet character", "e.values() [0, 1, 0, -zeta4, 0, 0, 0, zeta4, 0,", "(most recent call last): ... TypeError: Galois orbits only defined", "of self, often used in string representations of modular forms", "-1 \"\"\" G = self.parent() if G.zeta.is_in_cache(): x = n", "G = DirichletGroup(20) sage: G.modulus() 20 \"\"\" return self._modulus def", "Dirichlet group is finite too. # In particular, it is", "orders[i]: break exponents[i] = 0 i += 1 @cached_method(do_pickle=True) def", "Rational Field sage: G.order() 4 sage: G.base_ring() Rational Field The", "e.conductor() 4 sage: f = e.primitive_character(); f Dirichlet character modulo", "[Dirichlet character modulo 20 of conductor 1 mapping 11 |-->", "x = parent._module(x) if any(u * v for u, v", "parent(r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3) Residue field of Fractional ideal (-2*zeta4 +", "[2, 2, 2], [7, 13, 17]], [[2, 2, 3]~, Vecsmall([3,", "too. # In particular, it is finitely generated; the added", "are divisible by various powers of 2 present some problems", "CyclotomicField(4) sage: DirichletGroup.create_object(None, (K, 60, K.gen(), 4)) Group of Dirichlet", "= DirichletGroup(11) sage: b = copy(a) sage: a is b", "domain, an error will be raised. EXAMPLES:: sage: DirichletGroup(20).galois_orbits() [", "were the same, but not checking that they had the", "-zeta10, -zeta10, 1, zeta10^3 - zeta10^2 + zeta10 - 1,", "generators of `(\\ZZ/N\\ZZ)^*`. OUTPUT: The Dirichlet character defined by `x`", "13 mapping 2 |--> zeta12^3 - zeta12, Dirichlet character modulo", "DirichletGroup(13) sage: e = DirichletGroup(13).0 sage: e.base_ring() Cyclotomic Field of", "K.<i> = QuadraticField(-1) sage: chi = DirichletGroup(5, K)[1] sage: chi(2)", "e = x[0]*x[1]; e Dirichlet character modulo 35 of conductor", "approximate complex number with prec bits of precision. INPUT: -", "-zeta10, 1, zeta10^3 - zeta10^2 + zeta10 - 1, zeta10,", "u in G.unit_gens()]) sage: e.kloosterman_sum(7,17) -2*E(5) - 4*E(5)^2 - 4*E(5)^3", "Field \"\"\" return self.parent().base_ring() def bar(self): \"\"\" Return the complex", "def _repr_short_(self): r\"\"\" A short string representation of self, often", "mod 11 are not units mod 22. while x.modulus().gcd(v) !=", "or equivalently its `n`-torsion subgroup, where `n` is the exponent", "Check that :trac:`17586` is fixed:: sage: DirichletGroup(1)[0].bernoulli(1) 1/2 \"\"\" if", "k = self.order() if k <= 2: return [self] P", "of order 4 and degree 2 sage: DirichletGroup(20).base_ring() Cyclotomic Field", "15 sage: DirichletGroup(17, Integers(15), zeta_order=4) Traceback (most recent call last):", "b.modulus() 2401 AUTHORS: - <NAME> (2006-08-06) \"\"\" d = rings.Integer(d)", "is DirichletGroup(60) True \"\"\" def create_key(self, N, base_ring=None, zeta=None, zeta_order=None,", "3 We consider a sum with values in a finite", "Returns generators of self. EXAMPLES:: sage: G = DirichletGroup(20) sage:", "order 4 and degree 2 sage: parent(DirichletGroup(60, integral=True).gens()[2].values_on_gens()[2]) Gaussian Integers", "call last): ... NotImplementedError: order of element not known sage:", "units mod 22. while x.modulus().gcd(v) != 1: v += self.modulus()", "t^k. ALGORITHM: The ``'recurrence'`` algorithm computes generalized Bernoulli numbers via", "zeta a._set_multiplicative_order(zeta_order/gcd(zeta_order, i)) w.append(a) else: for i in range(1, zeta_order):", "IntegerModRing(N)]) 11 And sums where exactly one character is nontrivial", "function if this is called OUTPUT: Let `\\varepsilon` be a", "modulo 13 of conductor 13 mapping 2 |--> -zeta12^3 +", "must be the multiplicative order of ``zeta``; this is useful", "sage: G = DirichletGroup(7, base_ring=Integers(9), zeta=2) # indirect doctest sage:", "37733 of conductor 37733 mapping 1557 |--> -1, 37346 |-->", "r',\\ ' s += self.parent().unit_gens()[i]._latex_() + r' \\mapsto ' +", "category = category.Finite().FinitelyGenerated() Parent.__init__(self, base_ring, category=category) self._zeta = zeta self._zeta_order", "((-1,), (-zeta6,), -2*zeta6 + 3) ((-1,), (-zeta6 + 1,), 2*zeta6", "call last): ... TypeError: conductor must divide modulus sage: H", "(Rational Field, 2, None, None) sage: l = DirichletGroup.create_key(2, base_ring=CC);", "unknown\"%algorithm) if cache: self.__bernoulli[k] = ber return ber def lfunction(self,", "automorphisms in characteristic p are # k-th powering for #", "1, 0, 0, 0, 1, 0, 1] sage: e =", "order 6 generated by 0.500000000000000 + 0.866025403784439*I in Complex Field", "to replace the default cyclotomic field by its rings of", "== 1: g = 2 z = self.base_ring().zeta() n =", "(most recent call last): ... IndexError: n(=2) must be between", "e = DirichletGroup(16)([-1, 1]) sage: e.values_on_gens () (-1, 1) ..", "e in G] [True, False, True, False, True, False, True,", "- zeta52^5 + zeta52^4 Check that :trac:`25127` is fixed:: sage:", "f == f True sage: e == f False sage:", "mod = self.parent().modulus() if mod == 1: return [R.one()] elif", "Dirichlet character. The Gauss sum associated to `\\chi` is ..", "zeta12 sage: loads(e.dumps()) == e True :: sage: G, x", "= G.gen() values_on_gens = (self(x) for x in pari_gens) #", "elif (isinstance(R, number_field.NumberField_generic) and euler_phi(self.order()) < R.absolute_degree()): K = rings.CyclotomicField(self.order())", "primitive `m` th root of unity. This reduces to the", "character modulo 5 of conductor 5 mapping 2 |--> -zeta4", "|--> -1, 41 |--> 1, 37 |--> 1, Dirichlet character", "degree 2, 60, None, None) An example to illustrate that", "# check that Parent.__init__ has been called Ring of integers", "13 mapping 2 |--> zeta12 sage: G(0) Traceback (most recent", "e False \"\"\" return richcmp(self.values_on_gens(), other.values_on_gens(), op) def __hash__(self): \"\"\"", "= rings.IntegerModRing(modulus) def __setstate__(self, state): \"\"\" Used for unpickling old", "L.value(4) # abs tol 1e-14 0.988944551741105 - 5.16608739123418e-18*I \"\"\" if", "the modulus EXAMPLES:: sage: chi4 = DirichletGroup(4).gen() sage: chi4._pari_conversion() ([[4,", "with values in the group of order 2 generated by", "100 sage: e.conductor() 4 \"\"\" return self.parent().modulus() def level(self): \"\"\"", "zeta=1, zeta_order=1)(1) sage: b = DirichletGroup(3, QQ, zeta=-1, zeta_order=2)([-1]) sage:", "(self._zeta_order, self._zeta) s += str(self.base_ring()) return s @cached_method def decomposition(self):", "{} if k in self.__bernoulli: return self.__bernoulli[k] N = self.modulus()", "0, 0, 1]], [0, 1, 1]) \"\"\" G = pari.znstar(self.modulus(),", "1 sage: e.conductor() 4 sage: f = e.primitive_character(); f Dirichlet", "!= 1: cond *= 2 return rings.Integer(cond) @cached_method def decomposition(self):", "and degree 2 sage: d[1].parent() Group of Dirichlet characters modulo", "result_list = [R.zero()] * mod gens = G.unit_gens() orders =", "that the group has a # distinguished set of generators.", "in a subgroup `V` of the multiplicative group `R^*` of", "order 6 and degree 2 Note that the root of", "CC) sage: e = G.0 sage: f = H.0 sage:", "in self.element()]) @cached_method(do_pickle=True) def element(self): r\"\"\" Return the underlying `\\ZZ/n\\ZZ`-module", "(most recent call last): ... ValueError: values (= (4, 8,", "= DirichletGroup(60)([1,-1,I]) sage: chi.conrey_number() 17 sage: chi = DirichletGroup(420)([1,-1,-I,1]) sage:", "of integers modulo 2 \"\"\" return free_module.FreeModule(rings.IntegerModRing(self.zeta_order()), len(self.unit_gens())) @property def", "== l False sage: G = DirichletGroup.create_object(None, k); G Group", "= self.zeta_order() if is_ComplexField(R): for i in range(1, zeta_order): a", "DirichletGroup(13, GF(4,'a')) sage: e.is_even() True sage: e.is_odd() True \"\"\" R", "of Frac(R). If R is not a domain, an error", "conductor 13 mapping 2 |--> -1] ] sage: e =", "The ``'recurrence'`` algorithm computes generalized Bernoulli numbers via classical Bernoulli", "character modulo 37506941597 of conductor 37733 mapping 13533432536 |--> -1,", "EXAMPLES:: sage: chi = DirichletGroup(20).0; chi._DirichletCharacter__eval_at_minus_one() -1 \"\"\" D =", "product of Dirichlet characters of prime power modulus, where the", "been called Ring of integers modulo 9 sage: DirichletGroup(13) ==", "= DirichletGroup(24).0 sage: chi._repr_short_() '[-1, 1, 1]' \"\"\" return str(list(self.values_on_gens()))", "vector by 1, # increase n accordingly, and increase value", "around the ``lcalc`` program. INPUT: - ``prec`` -- precision (default", "answers - ``**opts`` -- optional arguments; not used directly, but", "a prime. EXAMPLES:: sage: DirichletGroup(17)._automorphisms() [1, 3, 5, 7, 9,", "8, 8])) Traceback (most recent call last): ... ValueError: values", "self.values()[m] else: return self.__eval_at_minus_one() def change_ring(self, R): \"\"\" Return the", "R.one() w = [a] zeta = self.zeta() zeta_order = self.zeta_order()", "multiply if we're explicit about where we want the multiplication", "sage: e(41) -1 sage: e(37) zeta4 sage: e(31*37) -zeta4 sage:", "|--> -1, 17 |--> -1] Next we construct the group", "precision - ``a`` -- integer, as for :meth:`.kloosterman_sum` - ``b``", "`B_{1,\\varepsilon} = 1/2`, in accordance with the above definition, but", "def __call__(self, m): \"\"\" Return the value of this character", "its domain EXAMPLES:: sage: G = DirichletGroup(7,QQ); G Group of", "0: raise ValueError('modulus should be positive') if base_ring is None:", "sage: DirichletGroup(-33) Traceback (most recent call last): ... ValueError: modulus", "DirichletGroup(37).order() 36 \"\"\" ord = rings.Integer(1) for g in self.gens():", "elements of G print as lists giving the values of", "\\hbox{Dirichlet character modulo } 16 \\hbox{ of conductor } 16", "-zeta30^7 + zeta30^5 + zeta30^4 + zeta30^3 - zeta30 -", "Characters must be from the same Dirichlet Group. sage: all_jacobi_sums", "(type :class:`DirichletCharacter`). EXAMPLES:: sage: G.<e> = DirichletGroup(13) sage: G Group", "A :class:`DirichletCharacter` is the extension of a homomorphism .. MATH::", "Decide whether there is a coercion map from `X`. There", "F[0][0] # When p is odd, and x =/= 1,", "in Complex Field with 53 bits of precision If the", "generators (want {})\".format(x, len(orders))) if free_module_element.is_FreeModuleElement(x): x = parent._module(x) if", "|--> 1, 7 |--> -zeta4, Dirichlet character modulo 30 of", "sage: G = DirichletGroup(11) sage: G.gen(0).base_ring() Cyclotomic Field of order", "l = DirichletGroup.create_key(2, base_ring=CC); l (Complex Field with 53 bits", "sage: b.modulus() 2401 AUTHORS: - <NAME> (2006-08-06) \"\"\" d =", "= self.base_ring() # self(-1) is either +1 or -1 if", "== DirichletGroup(13) True sage: DirichletGroup(13) == DirichletGroup(13, QQ) False \"\"\"", "or 'lcalc' EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: L =", "unsupported operand parent(s) for *: 'Group of Dirichlet characters modulo", "((1,), (-zeta6 + 1,), -1) ((zeta6,), (zeta6,), -zeta6 + 3)", "e = G.1 sage: e.kloosterman_sum_numerical(53,3,11) 3.80422606518061 - 3.80422606518061*I \"\"\" G", "an instance of :class:`DirichletCharacter`. \"\"\" P = self.parent() M =", "\"\"\" return (isinstance(X, DirichletGroup_class) and self.modulus() == X.modulus() and self.base_ring().has_coerce_map_from(X.base_ring())", "in Cyclotomic Field of order 6 and degree 2 TESTS:", "5 mapping 2 |--> zeta4] sage: d[0].parent() Group of Dirichlet", "val = self.base_ring()(1) for e in D: if e.modulus() %", "p.gcd(self._zeta_order) == 1: zeta_order = self._zeta_order else: # No root", "Suppose eps is a character mod `p^n`, where `p` is", "of order 2 generated by -1 in Number Field in", "2014 <NAME> <<EMAIL>> # # This program is free software:", "DirichletGroup(2401,QQ)(a) # NOTE -- over QQ! sage: b.modulus() 2401 AUTHORS:", "G(DirichletGroup(15).0) Dirichlet character modulo 6 of conductor 3 mapping 5", "zeta156^6 + zeta156^5 - zeta156^4 - zeta156^2 - 1 sage:", "``check`` - (optional, default: True) whether or not to explicitly", "e = rings.Mod(c, m) z = zeta ** int(a*e +", "Return the orbit of this character under the action of", ": (\\ZZ/N\\ZZ)^* \\to \\QQ(\\zeta_n) be a Dirichlet character. This function", "G.gens() (Dirichlet character modulo 20 of conductor 4 mapping 11", "G.base_ring() sage: G(1) Dirichlet character modulo 13 of conductor 1", "map (:trac:`18072`):: sage: K.<i> = QuadraticField(-1) sage: chi = DirichletGroup(5,", "G = self.parent() K = G.base_ring() if is_ComplexField(K): phi =", "def gauss_sum(self, a=1): r\"\"\" Return a Gauss sum associated to", "1 When a root of unity is specified, base extension", "coercion. This implies that Dirichlet characters of different moduli do", "as free_module_element import sage.rings.all as rings import sage.rings.number_field.number_field as number_field", "= DirichletGroup(20) sage: a.element() (2, 0) sage: b.element() (0, 1)", "True: # record character value on n result_list[n] = R_values[value]", "check=False) for m in Auts] if sort: v.sort() return v", "= x.element() e = tuple(z) # change when there are", "respectively\" .format(x, parent.zeta_order(), orders)) self.element.set_cache(x) else: R = parent.base_ring() x", "Sequence from sage.structure.factory import UniqueFactory from sage.structure.richcmp import richcmp from", "5 |--> 1, Dirichlet character modulo 9 of conductor 1", "R.characteristic() if p: K = rings.IntegerModRing(p) elif self.order() <= 2:", "with the base ring of ``self`` as its domain EXAMPLES::", "zeta4 Multiplying elements whose parents have different zeta orders works::", "rings not implemented \"\"\" n = self.zeta_order() R = self.base_ring()", "'Group of Dirichlet characters modulo 4 with values in Cyclotomic", "string representations of modular forms EXAMPLES:: sage: chi = DirichletGroup(24).0", "level(self): \"\"\" Synonym for modulus. EXAMPLES:: sage: e = DirichletGroup(100,", "of conductor } 16 \\hbox{ mapping } 15 \\mapsto 1,\\", "zeta is None: R = self.base_ring() e = self._integers.unit_group_exponent() for", "def maximize_base_ring(self): r\"\"\" Let .. MATH:: \\varepsilon : (\\ZZ/N\\ZZ)^* \\to", "def gens(self): \"\"\" Returns generators of self. EXAMPLES:: sage: G", "if is_ComplexField(K): return self.gauss_sum_numerical(a=a) elif is_AlgebraicField(K): L = K zeta", "= [] ord = self.zeta_order() M = self._module zero =", "but over as small a subfield (or subring) of the", "the order of the generator minus 1, inclusive. EXAMPLES:: sage:", "DirichletGroup(self.modulus(), R, zeta=zeta, zeta_order=zeta_order) def base_extend(self, R): \"\"\" Return the", "20 mapping 11 |--> -1, 17 |--> zeta4 Multiplying elements", "in range(1, N)) ber = K(sum(binomial(k,j) * bernoulli(j, **opts) *", "a coercion map from `X`. There is conversion between Dirichlet", "is not necessarily cyclic), some operations still work, such as", "sage: e = G.0 sage: e.gauss_sum() -zeta156^46 + zeta156^45 +", "Y.jacobi_sum(Z); Z.jacobi_sum(Y) -1 -1 \"\"\" if check: if self.parent() !=", "both are ``None``. In the former case, it also ensures", "prime power modulus corresponding to primes dividing modulus. (Note that", "22. while x.modulus().gcd(v) != 1: v += self.modulus() a.append(R(x(v))) return", "Some authors use an alternative definition giving `B_{1,\\varepsilon} = -1/2`;", "(DirichletGroup(72).0).decomposition() [Dirichlet character modulo 8 of conductor 4 mapping 7", "only defined if base ring is an integral domain \"\"\"", "DirichletGroup(20) sage: H.<c> = DirichletGroup(4) sage: c.extend(20) Dirichlet character modulo", "kloosterman_sum_numerical(self, prec=53, a=1, b=0): r\"\"\" Return the Kloosterman sum associated", "R.is_prime_field(): return self p = R.characteristic() if p: K =", "this group. EXAMPLES:: sage: DirichletGroup(5).list() [Dirichlet character modulo 5 of", "sage: e = x[0]*x[1]^2; e Dirichlet character modulo 35 of", "DirichletGroup(60) True \"\"\" def create_key(self, N, base_ring=None, zeta=None, zeta_order=None, names=None,", "self.element().additive_order() return lcm([z.multiplicative_order() for z in self.values_on_gens()]) def primitive_character(self): \"\"\"", "and hence this Dirichlet group is finite too. # In", "sage: G = DirichletGroup(30); e = G.1 sage: e.galois_orbit() [Dirichlet", "divide the orders of the respective generators of `(\\ZZ/N\\ZZ)^*`. OUTPUT:", "characters of different moduli compare as unequal, even if they", "tuple(~z for z in self.values_on_gens()) return G.element_class(G, x, check=False) def", "f = Newforms(Gamma1(25), names='a')[1] sage: eps = f.character() sage: eps.minimize_base_ring()", "1, Dirichlet character modulo 60 of conductor 3 mapping 31", "QQ); H(DirichletGroup(16).1) Traceback (most recent call last): ... TypeError: Unable", "specified:: sage: G.gens() Traceback (most recent call last): ... NotImplementedError:", "L.gen(0) ** (n // m) else: raise NotImplementedError(\"Gauss sums only", "this Dirichlet group. This is the same as self.order(). EXAMPLES::", "rings.IntegerModRing(modulus) def __setstate__(self, state): \"\"\" Used for unpickling old instances.", "This reduces to the Gauss sum if `b=0`. This method", "False, True, False, True, False, True, False, True, False, True,", "Cyclotomic Field of order 10 and degree 4' sage: G.rename('Dir(11)')", "must be nonzero\") D = fundamental_discriminant(d) G = DirichletGroup(abs(D), rings.RationalField())", "units of `(\\ZZ/N\\ZZ)^*`, where `N` is the modulus of self.", "useful if the base ring is not exact or if", "** (n // m) else: raise NotImplementedError(\"Gauss sums only currently", "= DirichletGroup(7, QQbar) sage: G[1].gauss_sum_numerical() -2.44013335834554 + 1.02261879187179*I \"\"\" G", "R.characteristic() if p == 0 or p.gcd(self._zeta_order) == 1: zeta_order", "Field in a with defining polynomial x^4 + 1 ::", "Sequence([DirichletGroup(p**r,R) for p, r \\ in factor(self.modulus())], cr=True, universe =", "mapping 2 |--> zeta4, Dirichlet character modulo 5 of conductor", "must be an integral domain if only zeta_order is specified\"", "modulus, where the prime powers exactly divide the modulus of", "sage: G = DirichletGroup(1) sage: chi = G.one() sage: chi.gauss_sum()", "\"\"\" from sage.categories.groups import Groups category = Groups().Commutative() if base_ring.is_integral_domain()", "for x in self.values_on_gens()]) else: dlog = P._zeta_dlog v =", "number. Some authors use an alternative definition giving `B_{1,\\varepsilon} =", "sage: is_DirichletGroup(DirichletGroup(11).0) False \"\"\" return isinstance(x, DirichletGroup_class) class DirichletGroup_class(WithEqualityById, Parent):", "n-th roots of unity in the base ring is #", "unity in the base ring is # finite, and hence", "to k, which should be done with power series #", "the # new base ring as well. zeta = self._zeta", "0 and the order of the generator minus 1, inclusive.", ":class:`DirichletCharacter` sets the cache of :meth:`element` or of :meth:`values_on_gens`. The", "[2, Mat([2, 1])]], [1, 0, 0; 0, 1, 0; 0,", "determined as follows: - If both ``zeta`` and ``zeta_order`` are", "e.level() 100 \"\"\" return self.modulus() @cached_method def multiplicative_order(self): \"\"\" The", "both ``zeta`` and ``zeta_order`` are specified, or that both are", "work, such as creation of elements:: sage: G = DirichletGroup(5,", "modulo 60 of conductor 4 mapping 31 |--> -1, 41", "[Dirichlet character modulo 20 of conductor 20 mapping 11 |-->", "if x == R.one(): x = [R.one()] * len(self.unit_gens()) except", "is ignored if ``base_ring`` is not ``None``. OUTPUT: The group", "e.jacobi_sum(f) 3*zeta12^2 + 2*zeta12 - 3 sage: f.jacobi_sum(e) 3*zeta12^2 +", "\\sum_{a=1}^N \\frac{\\varepsilon(a) t e^{at}}{e^{Nt}-1} = sum_{k=0}^{\\infty} \\frac{B_{k,\\varepsilon}}{k!} t^k. ALGORITHM: The", "1, 0, 1, 1, 1, 1, 1, 1, 0, 1,", "L.gen(0) try: self(1) * zeta**(a+b) except TypeError: raise NotImplementedError('Kloosterman sums", "other. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a Dirichlet character", "accordingly, and increase value i = 0 while True: try:", "(at your option) any later version. # https://www.gnu.org/licenses/ # ****************************************************************************", "returns an equal Dirichlet character .. MATH:: \\chi : (\\ZZ/N\\ZZ)^*", "R.characteristic() if p == 0: Auts = [e for e", "modulo 7 with values in Cyclotomic Field of order 6", "b*(e**(-1))) g += phi(self(c))*z return g @cached_method def is_even(self): r\"\"\"", "for u in self.unit_gens(): v = u.lift() # have to", "Auts = [e for e in range(1,n) if gcd(e,n) ==", "character modulo 5 of conductor 5 mapping 2 |--> -a^2]", "z in self.values_on_gens()) return G.element_class(G, x, check=False) def _mul_(self, other):", "`V` is computed, and an error is raised if such", "DirichletGroup(3, QQ, zeta=-1, zeta_order=2)([-1]) sage: a * b # indirect", "also :meth:`.kloosterman_sum_numerical`, which gives an inexact answer (but is generally", "subgroup of `R^*` generated by ``zeta``. If ``zeta_order`` is also", "`V` is determined as follows: - If both ``zeta`` and", "= FreeModule(Zmod(16), 3) sage: DirichletCharacter(G, M([4, 8, 8])) Traceback (most", "Bernoulli number. Some authors use an alternative definition giving `B_{1,\\varepsilon}", "(2, 0) sage: b.element() (0, 1) .. NOTE:: The constructor", "def extend(self, M): \"\"\" Returns the extension of this character", "a.is_primitive() False sage: b.is_primitive() False sage: (a*b).is_primitive() True sage: G.<a,b>", "G([13]); chi Dirichlet character modulo 5 of conductor 5 mapping", "generated by zeta4 in Cyclotomic Field of order 4 and", "3 mapping 5 |--> -1 sage: G(DirichletGroup(15).0) Dirichlet character modulo", "2 |--> a^2, Dirichlet character modulo 5 of conductor 5", "that identifies a Dirichlet character of modulus q. See https://www.lmfdb.org/knowledge/show/character.dirichlet.conrey", "with values in a finite field:: sage: g = DirichletGroup(17,", "Dirichlet characters (i.e., J(self,char)). This is defined as .. MATH::", "root of unity:: sage: DirichletGroup(5, K, zeta=-1, zeta_order=2) Group of", "degree 4 sage: (e^2).minimize_base_ring().base_ring() Cyclotomic Field of order 6 and", "Return a list of the values of this character on", "This function returns an equal Dirichlet character .. MATH:: \\chi", "G = DirichletGroup(13) sage: e = G.0 sage: e.gauss_sum() -zeta156^46", "zeta = CC.zeta(m) for c in m.coprime_integers(m): e = rings.Mod(c,", "R = self.base_ring() a = R.one() w = [a] zeta", "list of integers. At present this is only implemented if", "character modulo 4 of conductor 4 mapping 3 |--> -1,", "sage: G = DirichletGroup(5); X = G.list(); Y = X[0];", "4 with values in Cyclotomic Field of order 4 and", "int(a*e + b*(e**(-1))) g += phi(self(c))*z return g @cached_method def", "equal Dirichlet character .. MATH:: \\chi : (\\ZZ/N\\ZZ)^* \\to \\QQ(\\zeta_m)", "|--> 1 \"\"\" e = self(1) for i in range(self.ngens()):", "= [(DP[i].values_on_gens(),DP[j].values_on_gens(),DP[i].jacobi_sum(DP[j])) ....: for i in range(p-1) for j in", "m for vi, oi in zip(v, pari_orders)] return (G, v)", "|--> -1 :: sage: a = kronecker_character(1) sage: b =", "1 sage: len(DirichletGroup(20, GF(3))) 4 \"\"\" return self.order() def _repr_(self):", "multiplicative orders dividing {}, respectively\" .format(x, orders)) self.values_on_gens.set_cache(x) else: if", "from sage.rings.rational_field import is_RationalField from sage.rings.complex_mpfr import is_ComplexField from sage.rings.qqbar", "return ber def lfunction(self, prec=53, algorithm='pari'): \"\"\" Return the L-function", "# Copyright (C) 2014 <NAME> <<EMAIL>> # # This program", "v] G = [] seen_so_far = set([]) for x in", "'lcalc' EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: L = a.lfunction();", "cases, the orders of the elements must divide the orders", "for n in range(1,N+1)] ber = sum([self(a)*h[a][k] for a in", ":meth:`.kloosterman_sum`. EXAMPLES:: sage: G = DirichletGroup(3) sage: e = G.0", "DirichletGroup(20) sage: L = a.lfunction(); L PARI L-function associated to", "of conductor 20 mapping 11 |--> -1, 17 |--> -1],", "D = self.decomposition() val = self.base_ring()(1) for e in D:", "Group of Dirichlet characters modulo 5 with values in Cyclotomic", "and 2 are printed correctly (see :trac:`17338`):: sage: latex(DirichletGroup(1)[0]) \\hbox{Dirichlet", "-1, 0, 1, 0, 0, 1, -1, 0, 1, -1]", "a number field:: sage: R.<x> = PolynomialRing(QQ) sage: K.<a> =", "= G[1][1] pari_gens = G[1][2] # one should use the", "2 with values in Complex Field with 53 bits of", "sage: G3.gen(0).base_extend(K30) * G5.gen(0).base_extend(K30) Dirichlet character modulo 31 of conductor", "-*- r\"\"\" Dirichlet characters A :class:`DirichletCharacter` is the extension of", "8 sage: len(DirichletGroup(20, GF(2))) 1 sage: len(DirichletGroup(20, GF(3))) 4 \"\"\"", "sums, Salié sums, etc. The Kloosterman sum associated to `\\chi`", "modulo 5 of conductor 5 mapping 2 |--> -zeta4 \"\"\"", "(0, 1) .. NOTE:: The constructor of :class:`DirichletCharacter` sets the", "the cyclotomic field `\\QQ(\\zeta_n)`, where `n` is the exponent of", "in both cases:: sage: d[0]*d[1] == c Traceback (most recent", "and `\\zeta` is a primitive `m^{th}` root of unity. EXAMPLES::", "import Groups category = Groups().Commutative() if base_ring.is_integral_domain() or base_ring.is_finite(): #", "e = G([-1]) sage: e.gauss_sum(1) 2*zeta6 - 1 sage: e.gauss_sum(2)", "ord = rings.Integer(1) for g in self.gens(): ord *= int(g.order())", "i += 1 @cached_method(do_pickle=True) def values_on_gens(self): r\"\"\" Return a tuple", "of order 6 generated by 0.500000000000000 + 0.866025403784439*I in Complex", "4' sage: G.rename('Dir(11)') sage: G Dir(11) \"\"\" s = \"Group", "of prime power modulus corresponding to primes dividing modulus. (Note", "= G.base_ring() if is_ComplexField(K): phi = lambda t : t", "(slightly faster if False). - ``check`` - (optional, default: True)", "the modulus of this character. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20)", "- 3) ((-zeta6,), (-zeta6,), 3*zeta6 - 1) ((-zeta6,), (-zeta6 +", "contrast to :meth:`change_ring`) requires a coercion map to exist:: sage:", "values in `\\QQ(\\zeta_n)`:: sage: G = DirichletGroup(20) sage: G.1 Dirichlet", "Integers(3))(1).parent() Ring of integers modulo 3 \"\"\" return DirichletGroup(N, base_ring)(1)", "sage: M = FreeModule(Zmod(16), 3) sage: DirichletCharacter(G, M([4, 8, 8]))", "def _mul_(self, other): \"\"\" Return the product of self and", "to a Dirichlet character modulo the divisor M of the", "a group of Dirichlet characters modulo `N`. INPUT: - ``N``", "sage: N = 13 sage: D = DirichletGroup(N) sage: g", "has been fixed:: sage: G = DirichletGroup(5); X = G.list();", "Gaussian Integers in Cyclotomic Field of order 4 and degree", "a homomorphism .. MATH:: (\\ZZ/N\\ZZ)^* \\to R^*, for some ring", "G[1].gauss_sum() -2.440133358345538? + 1.022618791871794?*I Check that :trac:`19060` is fixed:: sage:", "-1, Dirichlet character modulo 5 of conductor 5 mapping 2", "((-zeta6,), (-zeta6,), 3*zeta6 - 1) ((-zeta6,), (-zeta6 + 1,), -2*zeta6", "of Dirichlet characters in self, or in v if v", "abs(e.gauss_sum_numerical()) 3.60555127546... sage: abs(f.gauss_sum_numerical()) 3.60555127546... sage: sqrt(13.0) 3.60555127546399 TESTS: The", "the cache of element() from that if we encounter it", "be done much # more efficiently. v = self.values() S", "very large. - If ``zeta`` is not specified but ``zeta_order``", "pari.znconreyexp(G, v).sage() def lmfdb_page(self): r\"\"\" Open the LMFDB web page", "if not is_Ring(base_ring): raise TypeError(\"base_ring (= %s) must be a", "version 2 of the License, or # (at your option)", "False sage: a.element() is b.element() False sage: a.values_on_gens() is b.values_on_gens()", "= DirichletGroup(13, GF(4,'a')) sage: e.is_even() True sage: e.is_odd() True \"\"\"", "- ``sort`` - (optional: default True) whether to sort the", "0 or a prime. EXAMPLES:: sage: DirichletGroup(17)._automorphisms() [1, 3, 5,", "``zeta_order``. Furthermore, a generator ``zeta`` of `V` is computed, and", "= [R.zero()] * mod gens = G.unit_gens() orders = G.integers_mod().unit_group().gens_orders()", "use it over the # new base ring as well.", "with values in the given base ring. EXAMPLES:: sage: t", "a factor of 10 or more in many cases, #", "a * zeta a._set_multiplicative_order(zeta_order/gcd(zeta_order, i)) w.append(a) else: for i in", "\\to \\QQ(\\zeta_m) where `m` is the least common multiple of", "2 return rings.Integer(cond) @cached_method def decomposition(self): r\"\"\" Return the decomposition", "unity:: sage: DirichletGroup(5, K, zeta=-1, zeta_order=2) Group of Dirichlet characters", "-1 sage: Z.jacobi_sum(Y) -1 Now let's take a look at", "sage: l = l[1:]; l (2, None, None) sage: k", "be found. - If ``zeta`` is specified, then `V` is", "for # k = 1, p, p^2, ..., p^(r-1), #", "modulo 5 of conductor 5 mapping 2 |--> -zeta4] \"\"\"", "implemented \"\"\" n = self.zeta_order() R = self.base_ring() p =", "state_dict = state[1] if values_on_gens_key in state_dict: values_on_gens = state_dict[values_on_gens_key]", "modulus elif (euler_phi(e.parent().modulus()) / e.order()) % 2: val *= -1", "= -1/2. ber = K.one()/2 if k == 1 else", "The Galois group is the absolute Galois group of the", "in the base ring is # finite, and hence this", "5) :: sage: DirichletGroup(60, integral=True) Group of Dirichlet characters modulo", "+ 0.866025403784439*I in Complex Field with 53 bits of precision", "self.modulus() K = self.base_ring() if N == 1: # By", "modulo 13 of conductor 13 mapping 2 |--> zeta12^2, Dirichlet", "N = 9 sage: D = DirichletGroup(N) sage: g =", "(x, self)) elif not x.conductor().divides(self.modulus()): raise TypeError(\"conductor must divide modulus\")", "return ord def random_element(self): \"\"\" Return a random element of", "n(=2) must be between 0 and 1 :: sage: G.gen(-1)", "return val def __call__(self, m): \"\"\" Return the value of", "to illustrate that ``base_ring`` is a part of the key::", "DirichletCharacter(G, M([4, 8, 8])) Traceback (most recent call last): ...", "DirichletGroup(100).0 sage: e.modulus() 100 sage: e.conductor() 4 sage: e.restrict(20) Dirichlet", "zeta = L.gen(0) try: self(1) * zeta**(a+b) except TypeError: raise", "L.zeta(m) elif number_field.is_CyclotomicField(K) or is_RationalField(K): chi = chi.minimize_base_ring() n =", "QQ).0 sage: e.modulus() 100 sage: e.conductor() 4 \"\"\" return self.parent().modulus()", "|--> 1 sage: e.conductor() 4 sage: f = e.primitive_character(); f", "\"\"\" if algorithm is None: algorithm = 'pari' if algorithm", "__hash__(self): \"\"\" Return the hash of ``self``. EXAMPLES:: sage: e", "from sage.structure.factory import UniqueFactory from sage.structure.richcmp import richcmp from sage.arith.all", "Traceback (most recent call last): ... ValueError: values (= (4,", "H Group of Dirichlet characters modulo 2 with values in", "sage: norm(e.gauss_sum()) 3 :: sage: G = DirichletGroup(13) sage: e", "(-1, 1) .. NOTE:: The constructor of :class:`DirichletCharacter` sets the", "1 @cached_method(do_pickle=True) def values_on_gens(self): r\"\"\" Return a tuple of the", "modulo 35 of conductor 35 mapping 22 |--> zeta12^3, 31", "def _repr_(self): \"\"\" Return a print representation of this group,", "TESTS:: sage: DirichletGroup(12)._module Vector space of dimension 2 over Ring", "conductor 5 mapping 11 |--> 1, 7 |--> -zeta4, Dirichlet", "= tuple(map(R, x)) if R.is_exact() and any(u**v != 1 for", "subring) of the base ring as possible. .. note:: This", "sage: G = DirichletGroup(20, QQ); G Group of Dirichlet characters", "mapping 2 |--> -1 sage: G([K.0]) Dirichlet character modulo 13", "__init__(self, base_ring, modulus, zeta, zeta_order): \"\"\" Create a Dirichlet group.", "1.022618791871794?*I Check that :trac:`19060` is fixed:: sage: K.<z> = CyclotomicField(8)", "2 sage: (e^3).minimize_base_ring().base_ring() Cyclotomic Field of order 4 and degree", "element() used an explicit cache __element in the past #", "of the key:: sage: k = DirichletGroup.create_key(2, base_ring=QQ); k (Rational", "a.primitive_character() sage: L = a.lfunction(algorithm='lcalc'); L L-function with complex Dirichlet", "= M([dlog[x] for x in self.values_on_gens()]) v.set_immutable() return v def", "to ``other``. .. NOTE:: Since there is no coercion between", "characters of different moduli do not compare as equal. TESTS::", "the input for pari (list of exponents) P = self.parent()", "is not implemented sage: G = DirichletGroup(5, Zmod(15), zeta=2); G", "G = DirichletGroup(11) sage: G.gen(0).base_ring() Cyclotomic Field of order 10", "if such ``zeta`` cannot be found. EXAMPLES: The default base", "non-prime modulus:: sage: N = 9 sage: D = DirichletGroup(N)", "zeta_order=2) Group of Dirichlet characters modulo 5 with values in", "a pickle values_on_gens_key = '_DirichletCharacter__values_on_gens' values_on_gens = None state_dict =", "of order 4 generated by 2 in Ring of integers", "= G([1 for u in G.unit_gens()]) sage: e.kloosterman_sum(7,17) -2*zeta20^6 +", "EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.is_trivial() False sage: (a^2).is_trivial()", "but passed to the :func:`bernoulli` function if this is called", "unity specified; use the same zeta_order # (which may still", "state_dict: element = state_dict[element_key] del state_dict[element_key] super(DirichletCharacter, self).__setstate__(state) if values_on_gens", "mapping 2 |--> 4 sage: chi.multiplicative_order() 4 Other operations only", "zeta6 This method (in contrast to :meth:`change_ring`) requires a coercion", "modulo 15) must be an integral domain if only zeta_order", "Integers(15), zeta_order=4) Traceback (most recent call last): ... ValueError: base", "1.732050807568878? :: sage: e = DirichletGroup(13).0 sage: e.change_ring(QQ) Traceback (most", "G([z^2]) sage: chi.gauss_sum() zeta52^22 + zeta52^21 + zeta52^19 - zeta52^16", "base ring is # finite, and hence this Dirichlet group", "character modulo 37733 of conductor 37733 mapping 1557 |--> -1,", "True \"\"\" if self.element.is_in_cache(): return not self.element() one = self.base_ring().one()", "``None``. In the former case, it also ensures that ``zeta``", "representation of self. EXAMPLES:: sage: G.<a,b> = DirichletGroup(16) sage: latex(b)", "self.modulus() == X.modulus() and self.base_ring().has_coerce_map_from(X.base_ring()) and (self._zeta is None or", "chi = DirichletGroup(20).0; chi._DirichletCharacter__eval_at_minus_one() -1 \"\"\" D = self.decomposition() val", "f True sage: e == f False sage: k =", "in Complex Field with 53 bits of precision sage: G", "= zeta ** a g = L(chi(0)) z = L.one()", "conductor must divide modulus sage: H = DirichletGroup(16, QQ); H(DirichletGroup(16).1)", "17 |--> -1 sage: b.maximize_base_ring().base_ring() Cyclotomic Field of order 4", "DirichletGroup(3) sage: e = G([-1]) sage: e.gauss_sum(1) 2*zeta6 - 1", "zeta=Integers(9)(2)).galois_orbits() Traceback (most recent call last): ... TypeError: Galois orbits", "of conductor 1 mapping 2 |--> 1] sage: (DirichletGroup(72).0).decomposition() [Dirichlet", "of size 10000000000000000000000000000000000000121 Note that the root of unity has", "of the prime subfield of Frac(R). If R is not", "mapping 11 |--> 1, 17 |--> zeta4 sage: G.gen(2) Traceback", "last): ... ValueError: base ring (= Ring of integers modulo", "EXAMPLES:: sage: G = DirichletGroup(20) sage: G.modulus() 20 \"\"\" return", "# record character value on n result_list[n] = R_values[value] #", "defined if base ring is an integral domain\") k =", "of `\\chi` and `\\zeta` is a primitive `m^{th}` root of", "as a list. This may change. EXAMPLES:: sage: G.<a,b> =", "domain sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2)).galois_orbits() Traceback (most recent call last):", "Ring of integers modulo 15 sage: chi = G([13]); chi", "G.modulus() zeta = CC.zeta(m) for c in m.coprime_integers(m): e =", "e(-1) -1.000000... sage: [e.is_even() for e in G] [True, False,", "DirichletGroup(13, CC) sage: e = G.0 sage: f = H.0", "sage: e.modulus() 100 sage: e.conductor() 4 \"\"\" return self.parent().modulus() def", "- zeta10^2 + zeta10 - 1, zeta10, zeta10^3 - zeta10^2", "[0..20]] [0, 1, 1, 1, 1, 1, 1, 0, 1,", "7 |--> -1, 5 |--> 1, Dirichlet character modulo 9", "values in the given base ring. EXAMPLES:: sage: t =", "|--> 1, 17 |--> -1 \"\"\" G = self.parent() if", "DirichletGroup(11) sage: G.gen(0).base_ring() Cyclotomic Field of order 10 and degree", "g(3) 14 sage: g.parent().zeta() 14 \"\"\" if not (isinstance(R, Map)", "sage: G.gen(0).base_ring() Cyclotomic Field of order 10 and degree 4", "at the integer `m`. .. warning:: A table of values", "= DirichletGroup(13) sage: e = G.0 sage: e.bernoulli(5) 7430/13*zeta12^3 -", "of `(\\ZZ/N\\ZZ)^*`. Many operations, such as finding a set of", "pickling an instance of :class:`DirichletCharacter`. \"\"\" pows = self.parent()._zeta_powers return", "mapping 11 |--> -1, 17 |--> 1 sage: a Dirichlet", "= self.base_ring() a = R.one() w = [a] zeta =", "0.988944551741105 - 5.16608739123418e-18*I \"\"\" if algorithm is None: algorithm =", "+ 3) ((-1,), (-zeta6 + 1,), 2*zeta6 - 3) ((-zeta6,),", "order 6 and degree 2 sage: (e^3).minimize_base_ring().base_ring() Cyclotomic Field of", "e.values() [0, 1, 0, 1, 0, 0, 0, 1, 0,", "base_ring(self): \"\"\" Returns the base ring of this Dirichlet character.", "# When p is odd, and x =/= 1, the", "order:: sage: G = DirichletGroup(5, K, a); G Group of", "is not None. INPUT: - ``v`` - (optional) list of", "ComplexField(prec) phi = CC.coerce_map_from(K) elif number_field.is_CyclotomicField(K) or is_RationalField(K): phi =", "\"\"\" The modulus of this character. EXAMPLES:: sage: e =", "If R is not a domain, an error will be", "Construct a group of Dirichlet characters modulo `N`. INPUT: -", "GF(4,'a')) sage: e.is_even() True sage: e.is_odd() True \"\"\" R =", "order 4 and degree 2 ] sage: DirichletGroup(20,GF(5)).decomposition() [ Group", "# where p^r = 1 (mod n), so r is", "efficiently. v = self.values() S = lambda n: sum(v[r] *", "in all_jacobi_sums: ....: print(s) ((1,), (1,), 5) ((1,), (zeta6,), -1)", "the base ring. This is ignored if ``base_ring`` is not", "sage: G = DirichletGroup(10, QQ).base_extend(CyclotomicField(4)) sage: H = DirichletGroup(10, CyclotomicField(4))", "3) ((-zeta6 + 1,), (-zeta6 + 1,), zeta6 + 2)", "\\frac{\\varepsilon(a) t e^{at}}{e^{Nt}-1} = sum_{k=0}^{\\infty} \\frac{B_{k,\\varepsilon}}{k!} t^k. ALGORITHM: The ``'recurrence'``", "rings.IntegerModRing(self.modulus()).unit_group_exponent() if g == 1: g = 2 z =", "of order 4 and degree 2, 60, None, None) An", "c True Conductors that are divisible by various powers of", "factors at 2. vals[0].append(vals[1][0]) del vals[1] elif self.modulus() % 4", "= self.order() if k <= 2: return [self] P =", "Since p-1 is coprime to p, this smallest r such", "Return a list of the Galois orbits of Dirichlet characters", "sage: k == e False \"\"\" return richcmp(self.values_on_gens(), other.values_on_gens(), op)", "True \"\"\" return (isinstance(X, DirichletGroup_class) and self.modulus() == X.modulus() and", "e = tuple(z) # change when there are immutable vectors", "multiplicative inverse of self. EXAMPLES:: sage: e = DirichletGroup(13).0 sage:", "pari_orders = G.cyc() # pari_gens = G.gen() values_on_gens = (self(x)", "(a^2).is_trivial() True \"\"\" if self.element.is_in_cache(): return not self.element() one =", "p**(valuation(self.order(),p) + 1) if p == 2 and F[0][1] >", "DirichletGroup(20) Group of Dirichlet characters modulo 20 with values in", "e.restrict(8) sage: e == e True sage: f == f", "and self.base_ring().has_coerce_map_from(X.base_ring()) and (self._zeta is None or (X._zeta is not", "= [e for e in range(1,n) if gcd(e,n) == 1]", "- ``zeta_order`` -- (optional) positive integer; this must be the", "sage: e.is_even() False sage: e(-1) -1.000000... sage: [e.is_even() for e", "root of unity in ``R`` - ``zeta_order`` -- (optional) order", "of order 12 and degree 4 sage: e Dirichlet character", "as len(self). EXAMPLES:: sage: DirichletGroup(20).order() 8 sage: DirichletGroup(37).order() 36 \"\"\"", "@cached_method def gens(self): \"\"\" Returns generators of self. EXAMPLES:: sage:", "sage: e = prod(G.gens(), G(1)) sage: e Dirichlet character modulo", "DirichletGroup(M, self.base_ring()) return H(self) def _pari_conversion(self): r\"\"\" Prepare data for", "True, False, True, False, True, False, True, False, True, False]", "* 3 22 sage: parent(r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3) Residue field of", "DirichletGroup(11) sage: repr(G) # indirect doctest 'Group of Dirichlet characters", "either +1 or -1 if not R.is_exact(): return abs(self(-1) -", "must be from the same Dirichlet Group.\") return sum([self(x) *", "= DirichletGroup(20) sage: G.integers_mod() Ring of integers modulo 20 \"\"\"", "sage: G Dir(11) \"\"\" s = \"Group of Dirichlet characters", "+ 9110/13 sage: eps = DirichletGroup(9).0 sage: eps.bernoulli(3) 10*zeta6 +", "G.<a,b> = DirichletGroup(16) sage: latex(b) # indirect doctest \\hbox{Dirichlet character", "e = DirichletGroup(100, QQ).0 sage: e.modulus() 100 sage: e.conductor() 4", "identity function in characteristic p. EXAMPLES:: sage: G = DirichletGroup(13)", ":trac:`6393`):: sage: G = DirichletGroup(5); X=G.list(); Y=X[0]; Z=X[1] sage: Y.jacobi_sum(Z)", "test the case where `R` is a map (:trac:`18072`):: sage:", "+ b*(e**(-1))) g += phi(self(c))*z return g @cached_method def is_even(self):", "e = G.0 The real component of the numerical value", "Field of order 4 and degree 2 sage: d[1].parent() Group", "base ring is an integral domain \"\"\" if not self.base_ring().is_integral_domain():", "indirect doctest Dirichlet character modulo 20 of conductor 20 mapping", "sage: f == f True sage: e == f False", "G.0.is_odd() True Note that ``is_even`` need not be the negation", "11 |--> 1, 17 |--> -1, Dirichlet character modulo 20", "m != N - 1: return self.values()[m] else: return self.__eval_at_minus_one()", "False sage: a.values_on_gens() is b.values_on_gens() True \"\"\" # This method", "(K, 60, K.gen(), 4)) Group of Dirichlet characters modulo 60", "modulo 60 of conductor 60 mapping 31 |--> -1, 41", "compute a Dirichlet group over a large prime field:: sage:", "elif is_AlgebraicField(K): L = K zeta = L.zeta(m) elif number_field.is_CyclotomicField(K)", "sage: G.rename('Dir(11)') sage: G Dir(11) \"\"\" s = \"Group of", "CACHING: Computed Gauss sums are *not* cached with this character.", "the group of Dirichlet character mod 20, but with values", "is_AlgebraicField from sage.rings.ring import is_Ring from sage.misc.functional import round from", "17 |--> -1, Dirichlet character modulo 20 of conductor 20", "- zeta156^31 + 2*zeta156^30 + zeta156^28 - zeta156^24 - zeta156^22", "small order, i.e., it is not the largest order root", "degree 2 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) 17 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) * GF(29)(3) 22", "R.is_exact(): return abs(self(-1) - R(1)) < 0.5 return self(-1) ==", "d[1].parent() Group of Dirichlet characters modulo 5 with values in", "= CyclotomicField(4) sage: G = DirichletGroup(192) sage: G([i, -1, -1])", "the other algorithm below. That said, I'm sure it could", "multiple of `n` and the exponent of `(\\ZZ/N\\ZZ)^*`. EXAMPLES:: sage:", "``zeta_order`` are omitted, then `V` is taken to be `R^*`,", "numbers:: sage: G = DirichletGroup(20, QQ); G Group of Dirichlet", "12 sage: loads(e.dumps()) == e True TESTS:: sage: G =", "0, -1] sage: e = DirichletGroup(20).gen(1) sage: e.values() [0, 1,", "2` and 2 does not divide `\\phi(p^n)/\\mbox{\\rm ord}(\\varepsilon)`. EXAMPLES:: sage:", "= [a] zeta = self.zeta() zeta_order = self.zeta_order() if is_ComplexField(R):", "p-1 is coprime to p, this smallest r such that", "5 with values in Complex Field with 53 bits of", "import sage.modules.free_module_element as free_module_element import sage.rings.all as rings import sage.rings.number_field.number_field", "- ``cache`` -- if True, cache answers - ``**opts`` --", "(default) or 'lcalc' EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: L", "1, 41 |--> -1, 37 |--> 1, Dirichlet character modulo", "= DirichletGroup(13) sage: e = G.0 sage: e.gauss_sum() -zeta156^46 +", "compute the input for pari (list of exponents) P =", "default True) whether to sort the list of orbits and", "ring. EXAMPLES:: sage: t = trivial_character(7) sage: [t(x) for x", "\"\"\" Return the hash of ``self``. EXAMPLES:: sage: e =", "are immutable vectors (and below) if e in seen_so_far: continue", "zeta=2) # indirect doctest sage: TestSuite(G).run() sage: G.base() # check", "TESTS:: sage: DirichletGroup(5)._zeta_dlog {-1: 2, -zeta4: 3, zeta4: 1, 1:", "the group has a # distinguished set of generators. category", "is nontrivial (see :trac:`6393`):: sage: G = DirichletGroup(5); X=G.list(); Y=X[0];", "sage: DirichletGroup(37).order() 36 \"\"\" ord = rings.Integer(1) for g in", "as random import sage.modules.free_module as free_module import sage.modules.free_module_element as free_module_element", "\"\"\" Computes and returns the conductor of this character. EXAMPLES::", "36, 0, 0, 36, 0, 1, 36, 0, 1, 0,", "(see :trac:`18086`):: sage: K.<a,b>=NumberField([x^2 + 1, x^2 - 3]) sage:", "\"\"\" Return a (shallow) copy of this Dirichlet character. EXAMPLES::", "rings with composite characteristic is not implemented sage: G =", "and a generator for `V` can be found. - If", "{} modulo {}) must have additive orders dividing {}, respectively\"", "P = self.parent() z = self.element() o = int(z.additive_order()) Auts", "-1, 17 |--> -1], ..., [Dirichlet character modulo 20 of", "latex(DirichletGroup(2)[0]) \\hbox{Dirichlet character modulo } 2 \\hbox{ of conductor }", "sage: G.<a,b> = DirichletGroup(20) sage: a.conductor() 4 sage: b.conductor() 5", "is the modulus of self. EXAMPLES:: sage: G = DirichletGroup(20)", "sage: G.<a,b> = DirichletGroup(20,QQ) sage: b.maximize_base_ring() Dirichlet character modulo 20", "operations, such as finding a set of generators for the", "2 |--> 4 sage: chi.multiplicative_order() 4 Other operations only work", "((zeta6 - 1,), (-zeta6,), -1) ((zeta6 - 1,), (-zeta6 +", "``prec`` -- precision (default 53) - ``algorithm`` -- 'pari' (default)", "via classical Bernoulli numbers using the formula in [Coh2007]_, Proposition", "call last): ... TypeError: unsupported operand parent(s) for *: 'Group", "gauss_sum(self, a=1): r\"\"\" Return a Gauss sum associated to this", "11 |--> 1, 17 |--> 1] ] sage: DirichletGroup(17, Integers(6),", "S(k-j) for j in range(k+1))) elif algorithm == \"definition\": #", "13^24 TESTS: The field of algebraic numbers is supported (:trac:`19056`)::", "series (see for example [DI1995]_, Section 2.2): .. MATH:: \\sum_{a=1}^N", "|--> -1, 37 |--> 1 \"\"\" e = self(1) for", "u in G.unit_gens()]) def kronecker_character_upside_down(d): \"\"\" Return the quadratic Dirichlet", "for `k = 1` and non-cyclic for `k \\ge 3`::", "of Dirichlet characters modulo 5 with values in Ring of", "= zeta ** int(a*e + b*(e**(-1))) g += phi(self(c))*z return", "13 mapping 2 |--> -zeta12] sage: e = G.0^2; e", "both ``zeta`` and ``zeta_order`` are omitted, then `V` is taken", "names=None, integral=False): \"\"\" Create a key that uniquely determines a", "DirichletGroup(20,QQ).unit_gens() (11, 17) \"\"\" return self._integers.unit_gens() @cached_method def zeta(self): \"\"\"", "G = self.parent().change_ring(R) return G.element_class(G, [R(x) for x in self.values_on_gens()])", "1, 1] sage: t(1).parent() Rational Field sage: trivial_character(7, Integers(3))(1).parent() Ring", "character is nontrivial, then the Gauss sum has absolute value", "10*zeta6 + 4 TESTS: Check that :trac:`17586` is fixed:: sage:", "smallest r such that the # divisibility holds equals Valuation(Order(x),p)+1.", "an instance of :class:`DirichletCharacter`. \"\"\" pows = self.parent()._zeta_powers return tuple([pows[i]", "-1]) Traceback (most recent call last): ... ValueError: values (=", "= set([]) for x in v: z = x.element() e", "one should use the following, but this does not work", "G.galois_orbits() [ [Dirichlet character modulo 13 of conductor 1 mapping", "self._zeta_order else: # No root of unity specified; use the", "sage: loads(dumps(e)) == e True \"\"\" # values_on_gens() used an", "0 and 1 :: sage: G.gen(-1) Traceback (most recent call", "else: dlog = P._zeta_dlog v = M([dlog[x] for x in", "self.gen(i) n = random.randrange(g.order()) e *= g**n return e def", "K.complex_embedding(prec) CC = phi.codomain() else: raise NotImplementedError(\"Gauss sums only currently", "is a cyclotomic field, QQ, QQbar, or a complex field\")", "zeta52^11 - zeta52^10 - zeta52^7 - zeta52^5 + zeta52^4 Check", "G.gen(0) Dirichlet character modulo 20 of conductor 4 mapping 11", "17 |--> zeta4 We next compute several invariants of ``G``::", "list): # list of values on each unit generator return", "reduces to the Gauss sum if `b=0`. This method performs", "f False sage: k = DirichletGroup(7)([-1]) sage: k == e", "be positive \"\"\" modulus = rings.Integer(N) if modulus <= 0:", "self.modulus() if self._zeta is not None: s += \"the group", "8 generated by a in Number Field in a with", "of unity in ``R`` - ``zeta_order`` -- (optional) order of", "character is primitive, i.e., its conductor equals its modulus. EXAMPLES::", "True \"\"\" return hash(self.values_on_gens()) def __invert__(self): \"\"\" Return the multiplicative", "the trivial character of the given modulus, with values in", "x in self.values_on_gens()) def kernel(self): r\"\"\" Return the kernel of", "is not None: s += \"the group of order %s", "mapping 11 |--> 1, 17 |--> 1] ] sage: DirichletGroup(17,", "a Dirichlet group. Not to be called directly (use the", "2], [7, 13, 17]], [[2, 2, 3]~, Vecsmall([3, 3, 1])],", "= self.parent() return G.element_class(G, self.values_on_gens(), check=False) def __pow__(self, n): \"\"\"", "False] sage: G = DirichletGroup(100000, CC) sage: G.1.is_even() True Note", "of the elements in `x` are admissible (see :trac:`17283`):: sage:", "# of R(zeta) by the DirichletGroup factory. p = R.characteristic()", "with `\\gcd(N,x)>1` to `0`. EXAMPLES:: sage: G = DirichletGroup(35) sage:", "DirichletGroup_class(WithEqualityById, Parent): \"\"\" Group of Dirichlet characters modulo `N` with", "cyclotomic field of order the exponent of `(\\ZZ/N\\ZZ)^*`:: sage: DirichletGroup(20)", "= p**(r-1)*(p-1). # For a given r, whether or not", "# # This program is free software: you can redistribute", "if check: orders = parent.integers_mod().unit_group().gens_orders() if len(x) != len(orders): raise", "- ``algorithm`` -- 'pari' (default) or 'lcalc' EXAMPLES:: sage: G.<a,b>", "if base ring is an integral domain sage: DirichletGroup(17, Integers(9),", "sage: G(DirichletGroup(3).0) Dirichlet character modulo 6 of conductor 3 mapping", "the standard generators of `(\\ZZ/N\\ZZ)^*` as returned by :meth:`sage.rings.finite_rings.integer_mod_ring.IntegerModRing_generic.unit_gens`. -", "The result is a wrapper around a PARI L-function or", "3 of conductor 3 mapping 2 |--> -1 \"\"\" G", "k == l True sage: DirichletGroup(2, base_ring=QQ) is DirichletGroup(2, base_ring=CC)", "of `R^*` generated by ``zeta``. If ``zeta_order`` is also given,", "be found. EXAMPLES: The default base ring is a cyclotomic", "a # distinguished set of generators. category = category.Finite().FinitelyGenerated() Parent.__init__(self,", "@cached_method(do_pickle=True) def values_on_gens(self): r\"\"\" Return a tuple of the values", "= 1` and non-cyclic for `k \\ge 3`:: sage: (DirichletGroup(18).0).decomposition()", "ring (= %s) must be an integral domain if only", "= DirichletGroup(100000, CC) sage: G.0.is_odd() True Note that ``is_even`` need", "zeta4, 0, -1, 0, 1, 0, -zeta4, 0, 0, 0,", "DirichletGroup(5).0 sage: e Dirichlet character modulo 5 of conductor 5", "positive integer; this must be the order of ``zeta`` if", "with 53 bits of precision If the base ring is", "= 2 z = self.base_ring().zeta() n = z.multiplicative_order() m =", "group of Dirichlet characters modulo `N` with values in a", "th root of unity. This reduces to the Gauss sum", "sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) 17 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) * GF(29)(3) 22 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2])", "`R` must be a domain (so `V` is cyclic), and", "example to illustrate that ``base_ring`` is a part of the", "def change_ring(self, R, zeta=None, zeta_order=None): \"\"\" Return the base extension", "sage: (e^3).minimize_base_ring().base_ring() Cyclotomic Field of order 4 and degree 2", "zeta12, Dirichlet character modulo 13 of conductor 13 mapping 2", "`p` is a prime. Then `\\varepsilon(-1) = -1` if and", "1) ((zeta6 - 1,), (-zeta6,), -1) ((zeta6 - 1,), (-zeta6", "characters modulo 5 with values in Complex Field with 53", "call last): ... IndexError: n(=-1) must be between 0 and", "sage: d[0].parent() Group of Dirichlet characters modulo 4 with values", "\"\"\" Returns the modulus of self. EXAMPLES:: sage: G =", "4 mapping 11 |--> -1, 17 |--> 1 sage: e.restrict(4)", "or list of ring elements: the values of the Dirichlet", "g = rings.IntegerModRing(self.modulus()).unit_group_exponent() if g == 1: g = 2", ":meth:`element` or of :meth:`values_on_gens`. The cache of one of these", "op): \"\"\" Compare ``self`` to ``other``. .. NOTE:: Since there", "\"\"\" return self.parent().modulus() def level(self): \"\"\" Synonym for modulus. EXAMPLES::", "number for this character. This is a positive integer coprime", "F[0][1] > 2 and self.values_on_gens()[1].multiplicative_order() != 1: cond *= 2", "if `\\varepsilon(-1) = 1`. EXAMPLES:: sage: G = DirichletGroup(13) sage:", "G = DirichletGroup(17, Integers(15), zeta=7); G Group of Dirichlet characters", "17]], [[2, 2, 3]~, Vecsmall([3, 3, 1])], [[8, 8, 3],", "QQbar) sage: G[1].gauss_sum() -2.440133358345538? + 1.022618791871794?*I Check that :trac:`19060` is", "x = -self.element() else: x = tuple(~z for z in", "True, False, True, False, True] sage: G = DirichletGroup(13) sage:", "18 \"\"\" order = self._zeta_order if order is None: order", "a *lot* of tiny bugs and design problem that became", "for example [DI1995]_, Section 2.2): .. MATH:: \\sum_{a=1}^N \\frac{\\varepsilon(a) t", "if e.modulus() % 2 == 0: if e.modulus() % 4", "'_DirichletCharacter__values_on_gens' values_on_gens = None state_dict = state[1] if values_on_gens_key in", "In both cases, the orders of the elements must divide", "[7, 13, 17]], [[2, 2, 3]~, Vecsmall([3, 3, 1])], [[8,", "0, 1, -1, 0, 1, -1] sage: e = DirichletGroup(21,", "to the :func:`bernoulli` function if this is called OUTPUT: Let", "bits of precision \"\"\" if zeta is None and self._zeta", "return [R.one()] elif mod == 2: return [R.zero(), R.one()] result_list", "t = trivial_character(7) sage: [t(x) for x in [0..20]] [0,", "Dirichlet character modulo 13 of conductor 13 mapping 2 |-->", "sum([self(a)*h[a][k] for a in range(1,N+1)]) * factorial(k) else: raise ValueError(\"algorithm", "= int(z.additive_order()) Auts = set([m % o for m in", "character modulo 24 of conductor 24 mapping 7 |--> 1,", "in range(0,r)] return Auts def galois_orbits(self, v=None, reps_only=False, sort=True, check=True):", "d <= 0: raise ValueError(\"d must be positive\") G =", "by a factor of 10 or more in many cases,", "``v`` - (optional) list of elements of self - ``reps_only``", "call last): ... NotImplementedError: Automorphisms for finite non-field base rings", "p = F[0][0] # When p is odd, and x", "False sage: k = DirichletGroup(7)([-1]) sage: k == e False", "\"\"\" Return self raised to the power of n EXAMPLES::", "s @cached_method def decomposition(self): r\"\"\" Returns the Dirichlet groups of", "present this is only implemented if the base ring has", "True sage: v.imag() 1.73205080756888 sage: G = DirichletGroup(20) sage: e", "by :meth:`sage.rings.finite_rings.integer_mod_ring.IntegerModRing_generic.unit_gens`. - vector over `\\ZZ/e\\ZZ`, where `e` is the", "+ 1.8826966926...*I sage: f.gauss_sum_numerical() -3.07497205... + 1.8826966926...*I sage: abs(e.gauss_sum_numerical()) 3.60555127546...", "of the characters, either with or without specifying a root", "in orbit: seen_so_far.add(tuple(z.element())) G = Sequence(G, cr=True) if sort: G.sort()", "(e^12).minimize_base_ring().base_ring() Rational Field TESTS: Check that :trac:`18479` is fixed:: sage:", "of one of these methods needs to be set for", "of v into self. The Galois group is the absolute", "has absolute value `\\sqrt{p}`. CACHING: Computed Gauss sums are *not*", "zeta4) \"\"\" g = [] ord = self.zeta_order() M =", "- 5.16608739123418e-18*I \"\"\" if algorithm is None: algorithm = 'pari'", "groups with the same parameters yields the same object:: sage:", "Ring of integers modulo 15) must be an integral domain", "p**r such that # Order(x) divides EulerPhi(p**r) = p**(r-1)*(p-1). #", "1, 0, 1, 0, 0, 0, 1, 0, 1, 0,", "raise ValueError(\"d must be positive\") G = DirichletGroup(d, rings.RationalField()) return", "< orders[i]: break exponents[i] = 0 i += 1 @cached_method(do_pickle=True)", "char.parent(): raise NotImplementedError(\"Characters must be from the same Dirichlet Group.\")", "for p, r \\ in factor(self.modulus())], cr=True, universe = cat.Objects())", "orbits only defined if base ring is an integral domain\")", "We create the group of Dirichlet character mod 20 with", "of Group of Dirichlet characters modulo 13 with values in", "elements of self. This is the same as len(self). EXAMPLES::", "\"\"\" return self.restrict(self.conductor()) def restrict(self, M): \"\"\" Returns the restriction", "a Dirichlet character that equals this one, but over as", "% 2 == 0: if e.modulus() % 4 == 0:", "self.values_on_gens()]) v.set_immutable() return v def __setstate__(self, state): r\"\"\" Restore a", "of self. The element is computed by multiplying a random", "for the orbits. - ``sort`` - (optional: default True) whether", "else: R = parent.base_ring() x = tuple(map(R, x)) if R.is_exact()", "only currently implemented when the base ring is a cyclotomic", "j in range(i, p-1)] sage: for s in all_jacobi_sums: ....:", "sage: e.restrict(50) Traceback (most recent call last): ... ValueError: conductor(=4)", "moduli do not compare as equal. TESTS:: sage: trivial_character(6) ==", "reps_only: G.append(x) else: G.append(orbit) for z in orbit: seen_so_far.add(tuple(z.element())) G", "= multiplicative_iterator def list(self): \"\"\" Return a list of the", "20, but with values in `\\QQ(\\zeta_n)`:: sage: G = DirichletGroup(20)", "rings.CyclotomicField(e) if integral: base_ring = base_ring.ring_of_integers() if not is_Ring(base_ring): raise", "1 sage: b Dirichlet character modulo 20 of conductor 5", "1 AUTHORS: - <NAME> (2006-08-06) \"\"\" d = rings.Integer(d) if", "(optional) positive integer; this must be the order of ``zeta``", "cannot convert 0 to an element of Group of Dirichlet", "to set the cache of values_on_gens() from that if we", "zeta_order=None): \"\"\" Return the base extension of ``self`` to ``R``.", "\"\"\" Return the free module used to represent Dirichlet characters.", "= DirichletGroup(4).gen() sage: chi4.conrey_number() 3 sage: chi = DirichletGroup(24)([1,-1,-1]); chi", "-2.44013335834554 + 1.02261879187179*I \"\"\" G = self.parent() K = G.base_ring()", "can specify it using ``zeta_order``:: sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6)) Traceback", "object:: sage: DirichletGroup(60) is DirichletGroup(60) True \"\"\" def create_key(self, N,", "root of unity. TESTS:: sage: DirichletGroup(5)._zeta_powers [1, zeta4, -1, -zeta4]", "4 mapping 11 |--> -1, 17 |--> 1 sage: c.extend(20)", "of the Dirichlet characters in this group. EXAMPLES:: sage: DirichletGroup(5).list()", "1, x^2 - 3]) sage: chi = DirichletGroup(7, K).0 sage:", "35 of conductor 35 mapping 22 |--> zeta12^3, 31 |-->", "(-zeta6 + 1,), 2*zeta6 - 3) ((-zeta6,), (-zeta6,), 3*zeta6 -", "in range(p-1) for j in range(i, p-1)] sage: for s", "used an explicit cache __element in the past # we", "same Dirichlet Group. sage: all_jacobi_sums = [(DP[i].values_on_gens(),DP[j].values_on_gens(),DP[i].jacobi_sum(DP[j])) ....: for i", "integer - ``algorithm`` -- either ``'recurrence'`` (default) or ``'definition'`` -", "True, False] sage: G = DirichletGroup(100000, CC) sage: G.1.is_even() True", "3 \"\"\" return DirichletGroup(N, base_ring)(1) TrivialCharacter = trivial_character def kronecker_character(d):", "modulus `N`. This function returns the generalized Bernoulli number `B_{k,\\varepsilon}`,", "= G.1 sage: e.kloosterman_sum_numerical(53,3,11) 3.80422606518061 - 3.80422606518061*I \"\"\" G =", "/ e.order()) % 2: val *= -1 return val def", "exponent of `(\\ZZ/N\\ZZ)^*`) - ``zeta`` -- (optional) root of unity", "sage: repr(G) # indirect doctest 'Group of Dirichlet characters modulo", "EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: repr(a) # indirect doctest", "If ``zeta`` is not specified but ``zeta_order`` is, then `V`", "EXAMPLES:: sage: e = DirichletGroup(16)([-1, 1]) sage: hash(e) == hash((-1,1))", "the L-function of ``self``. The result is a wrapper around", "\\hbox{ of conductor } 1 \"\"\" s = r'\\hbox{Dirichlet character", "ideal (-2*zeta4 + 5) :: sage: DirichletGroup(60, integral=True) Group of", "ring for the characters in this group (default: the cyclotomic", "this does not work # pari_orders = G.cyc() # pari_gens", "number `B_{k,eps}`. INPUT: - ``k`` -- a non-negative integer -", "M([dlog[x] for x in self.values_on_gens()]) v.set_immutable() return v def __setstate__(self,", "= zeta.multiplicative_order() elif zeta_order is not None: if not base_ring.is_integral_domain():", "\"\"\" R = self.base_ring() a = R.one() w = [a]", "DirichletGroup(20) sage: a.is_primitive() False sage: b.is_primitive() False sage: (a*b).is_primitive() True", "= CyclotomicField(30) sage: G3.gen(0).base_extend(K30) * G5.gen(0).base_extend(K30) Dirichlet character modulo 31", "True We compute a Dirichlet group over a large prime", "The field of algebraic numbers is supported (:trac:`19056`):: sage: G", "rings.IntegerModRing(modulus).unit_group_exponent() base_ring = rings.CyclotomicField(e) if integral: base_ring = base_ring.ring_of_integers() if", "range(1,N+1)] ber = sum([self(a)*h[a][k] for a in range(1,N+1)]) * factorial(k)", "range(1,n) if gcd(e,n) == 1] else: if not rings.ZZ(p).is_prime(): raise", "the generators of `(Z/NZ)^*`:: sage: list(G) [Dirichlet character modulo 20", "Rational Field sage: H = DirichletGroup.create_object(None, l); H Group of", "= self.element() o = int(z.additive_order()) Auts = set([m % o", "it is finitely generated; the added # FinitelyGenerated() here means", "of the Dirichlet character on the standard generators of `(\\ZZ/N\\ZZ)^*`", "if zeta is None and self._zeta is not None: #", "(mod n), so r is the mult order of p", "z[i] = ord//gcd(ord, orders[i]) g.append(self.element_class(self, z, check=False)) return tuple(g) def", "D.0 sage: f = D[-2] sage: e.jacobi_sum(f) 3*zeta12^2 + 2*zeta12", "pickle element_key = '_DirichletCharacter__element' element = None if element_key in", "be from the same Dirichlet Group. sage: all_jacobi_sums = [(DP[i].values_on_gens(),DP[j].values_on_gens(),DP[i].jacobi_sum(DP[j]))", "as finding a set of generators for the group, are", "\\hbox{Dirichlet character modulo } 1 \\hbox{ of conductor } 1", "G, v = self._pari_conversion() return pari.znconreyexp(G, v).sage() def lmfdb_page(self): r\"\"\"", "sage: chi.gauss_sum() zeta52^22 + zeta52^21 + zeta52^19 - zeta52^16 +", "m = G.modulus() zeta = CC.zeta(m) for c in m.coprime_integers(m):", "in self.values_on_gens()]) else: dlog = P._zeta_dlog v = M([dlog[x] for", "4 mapping 11 |--> -1, 17 |--> 1 sage: b", "of the distinguished root of unity. TESTS:: sage: DirichletGroup(5)._zeta_powers [1,", "= a * zeta w.append(a) return w @property def _zeta_dlog(self):", "call last): ... TypeError: no coercion map from Rational Field", "G = DirichletGroup(11, RationalField()) sage: G.gen(0).base_ring() Rational Field \"\"\" return", "-- (optional) root of unity in ``R`` - ``zeta_order`` --", "character in a browser. See https://www.lmfdb.org EXAMPLES:: sage: E =", "S = lambda n: sum(v[r] * r**n for r in", "-- positive integer - ``base_ring`` -- commutative ring; the value", "gens[i] if exponents[i] < orders[i]: break exponents[i] = 0 i", "multiplicative 20 sage: e.multiplicative_order() 20 sage: e = DirichletGroup(100).0 sage:", "or ``'definition'`` - ``cache`` -- if True, cache answers -", "= [P.element_class(P, m * z, check=False) for m in Auts]", "5 of conductor 5 mapping 2 |--> a^2, Dirichlet character", "as a product of Dirichlet characters of prime power modulus,", "fixed:: sage: K.<z> = CyclotomicField(8) sage: G = DirichletGroup(13, K)", "OUTPUT: Currently the kernel is returned as a list. This", "the base ring is not a domain (in which case", "the cyclic subgroup of `R^*` generated by ``zeta``. If ``zeta_order``", "= DirichletGroup(420)([1,-1,-I,1]) sage: chi.conrey_number() 113 TESTS:: sage: eps1 = DirichletGroup(5)([-1])", "you can redistribute it and/or modify # it under the", "- 2*zeta156^14 - zeta156^10 + zeta156^8 + zeta156^7 + zeta156^6", "Dirichlet character modulo 2 of conductor 1 \"\"\" s =", "for m in range(0,r)] return Auts def galois_orbits(self, v=None, reps_only=False,", "% 4 == 0: val *= e.values_on_gens()[0] # first gen", "zeta12^3 - zeta12, Dirichlet character modulo 13 of conductor 13", "DirichletGroup(13) sage: e = D.0 sage: f = D[-2] sage:", "of its order. This is potentially much more efficient than", "other): \"\"\" Return the product of self and other. EXAMPLES::", "1.0e15 True sage: v.imag() 1.73205080756888 sage: G = DirichletGroup(20) sage:", "this character. EXAMPLES:: sage: e = DirichletGroup(100).0 sage: e.modulus() 100", "Dirichlet characters of prime power modulus, where the prime powers", "0 and the modulus. EXAMPLES:: sage: e = DirichletGroup(20)(1) sage:", "either ``'recurrence'`` (default) or ``'definition'`` - ``cache`` -- if True,", "import sage.rings.number_field.number_field as number_field from sage.libs.pari import pari from sage.categories.map", "zeta = self._zeta if zeta_order is None: # We reuse", "eps True A related bug (see :trac:`18086`):: sage: K.<a,b>=NumberField([x^2 +", ":trac:`18086`):: sage: K.<a,b>=NumberField([x^2 + 1, x^2 - 3]) sage: chi", "(optional, default: True) whether or not to explicitly coerce each", "try: self.__bernoulli except AttributeError: self.__bernoulli = {} if k in", "check=True): r\"\"\" Return the Jacobi sum associated to these Dirichlet", "= Sequence(G, cr=True) if sort: G.sort() return G def gen(self,", "sage: a.is_primitive() False sage: b.is_primitive() False sage: (a*b).is_primitive() True \"\"\"", "for x in IntegerModRing(N)]) 11 And sums where exactly one", "is specified:: sage: DirichletGroup(17, Integers(15)) Group of Dirichlet characters modulo", "whether there is a coercion map from `X`. There is", "= lcm(m, G.zeta_order()) L = rings.CyclotomicField(n) zeta = L.gen(0) **", "[7, 13, 17], [2, 2, 2], [0, 0, 0]], [1,", "13 mapping 2 |--> zeta12, Dirichlet character modulo 13 of", "1) ((-zeta6,), (-zeta6 + 1,), -2*zeta6 + 3) ((-zeta6 +", "Dirichlet character modulo the multiple M of the modulus. EXAMPLES::", "sage.rings.complex_mpfr import is_ComplexField from sage.rings.qqbar import is_AlgebraicField from sage.rings.ring import", "Rational Field to Integer Ring is defined Base-extended Dirichlet groups", "are specified, or that both are ``None``. In the former", "sage: k = DirichletGroup(7)([-1]) sage: k == e False \"\"\"", "def kloosterman_sum_numerical(self, prec=53, a=1, b=0): r\"\"\" Return the Kloosterman sum", "7 mapping 3 |--> -1/2*b*a + 1/2 \"\"\" R =", "G.order() 4 sage: G.base_ring() Rational Field The elements of G", "if the modulus is 2 mod 4, there will be", "-1, 17 |--> 1 sage: L(4) 0.988944551741105 With the algorithm", "standard root of unity for ``parent``. In both cases, the", "|--> 1, 17 |--> zeta4 sage: a*b # indirect doctest", "order 6 and degree 2 TESTS: We test the case", "values in Finite Field of size 10000000000000000000000000000000000000121 Note that the", "m = m % N if self.values.is_in_cache() or m !=", "self.parent() if G.zeta.is_in_cache(): x = self.element() + other.element() else: x", "(default) or ``'definition'`` - ``cache`` -- if True, cache answers", "except AttributeError: self.__bernoulli = {} if k in self.__bernoulli: return", "and %s\"%(n,len(g)-1)) return g[n] @cached_method def gens(self): \"\"\" Returns generators", "self.parent().modulus() def level(self): \"\"\" Synonym for modulus. EXAMPLES:: sage: e", "= key return DirichletGroup_class(base_ring, modulus, zeta, zeta_order) DirichletGroup = DirichletGroupFactory(\"DirichletGroup\")", "character. The Gauss sum associated to `\\chi` is .. MATH::", "4 with values in Finite Field of size 5, Group", "change the entries of the returned vector; this vector is", "DirichletGroup(17, ZZ, zeta=-1).0 sage: g = f.base_extend(Integers(15)) sage: g(3) 14", "in G] [True, False, True, False, True, False, True, False,", "[1, 3, 5, 7, 9, 11, 13, 15] sage: DirichletGroup(17,", "2 |--> zeta12 sage: loads(e.dumps()) == e True :: sage:", "sage: f = Newforms(Gamma1(25), names='a')[1] sage: eps = f.character() sage:", "EXAMPLES:: sage: G = DirichletGroup(35) sage: x = G.gens() sage:", "Conrey number for this character. This is a positive integer", "[e.is_odd() for e in G] [False, True, False, True, False,", "super(DirichletCharacter, self).__setstate__(state) if values_on_gens is not None: self.values_on_gens.set_cache(values_on_gens) if element", "a.element() is b.element() False sage: a.values_on_gens() is b.values_on_gens() True \"\"\"", "by various powers of 2 present some problems as the", "1, -1, 0, 1, 0, 0, 1, -1, 0, 1,", "f.modulus() 4 \"\"\" return self.restrict(self.conductor()) def restrict(self, M): \"\"\" Returns", "self.parent() K = G.base_ring() if not (number_field.is_CyclotomicField(K) or is_RationalField(K)): raise", "DirichletGroup(5).list() [Dirichlet character modulo 5 of conductor 1 mapping 2", "L = K zeta = L.zeta(m) elif number_field.is_CyclotomicField(K) or is_RationalField(K):", "abs(self(-1) - R(1)) < 0.5 return self(-1) == R(1) @cached_method", "rings.PowerSeriesRing(rings.QQ, 't') t = R.gen() # g(t) = t/(e^{Nt}-1) g", "domain - ``zeta`` -- (optional) root of unity in ``R``", "f = DP.0 sage: e.jacobi_sum(f) Traceback (most recent call last):", "values in Ring of integers modulo 15 sage: chi =", "* len(self.unit_gens()) except (TypeError, ValueError, ArithmeticError): pass if isinstance(x, list):", "= DirichletGroup(11) sage: repr(G) # indirect doctest 'Group of Dirichlet", "return G.element_class(G, x, check=False) def __copy__(self): \"\"\" Return a (shallow)", "2. vals[0].append(vals[1][0]) del vals[1] elif self.modulus() % 4 == 2:", "of `(Z/NZ)^*`:: sage: list(G) [Dirichlet character modulo 20 of conductor", "zeta4) sage: val = G.gens()[2].values_on_gens()[2] ; val zeta4 sage: parent(val)", "return self.restrict(self.conductor()) def restrict(self, M): \"\"\" Returns the restriction of", "TESTS:: sage: G = DirichletGroup(7, base_ring=Integers(9), zeta=2) # indirect doctest", "G.gens() sage: e = x[0]*x[1]^2; e Dirichlet character modulo 35", "= L.zeta(m) elif number_field.is_CyclotomicField(K) or is_RationalField(K): chi = chi.minimize_base_ring() n", "`e` is the order of the standard root of unity", "rings.Mod(c, m) z = zeta ** int(a*e + b*(e**(-1))) g", "return self._list_from_iterator() def modulus(self): \"\"\" Returns the modulus of self.", "the following identity of power series (see for example [DI1995]_,", "vector over `\\ZZ/e\\ZZ`, where `e` is the order of the", "= None if element_key in state_dict: element = state_dict[element_key] del", "True: try: exponents[i] += 1 except IndexError: # Done! return", "|--> -1, 37 |--> 1, Dirichlet character modulo 60 of", "2' and 'Group of Dirichlet characters modulo 5 with values", "arith in a poly ring over a number field. prec", "be a (not necessarily primitive) character of modulus `N`. This", "5 mapping 2 |--> -a^2] We can also restrict the", "(2008-02-16): speed up __call__ method for Dirichlet characters, miscellaneous fixes", "of order 4 and degree 2 We can't multiply directly,", "the \"twisted\" Kloosterman sum associated to this Dirichlet character. This", "\\ZZ/m\\ZZ} \\chi(r)\\,\\zeta^{ar}, where `m` is the modulus of `\\chi` and", "coerce zeta4 to a rational \"\"\" R = self.base_ring() try:", "s += \"the group of order %s generated by %s", "group `V` is determined as follows: - If both ``zeta``", "sage: G Group of Dirichlet characters modulo 13 with values", "different zeta orders works:: sage: a = DirichletGroup(3, QQ, zeta=1,", "of `(\\ZZ/2\\ZZ)^*`, which is the trivial group.) EXAMPLES:: sage: DirichletGroup(20).decomposition()", "as published by # the Free Software Foundation, either version", "added more examples - <NAME> (2006-05-21): added examples of everything;", "d[0].parent() Group of Dirichlet characters modulo 4 with values in", "WARNING:: In the case of the trivial Dirichlet character modulo", "Dirichlet character modulo 20 of conductor 1 mapping 11 |-->", "zeta_argument = zeta.argument() v = M([int(round(x.argument() / zeta_argument)) for x", "number field:: sage: R.<x> = PolynomialRing(QQ) sage: K.<a> = NumberField(x^4", "chi._pari_conversion() ([[24, [0]], [8, [2, 2, 2], [7, 13, 17]],", "moduli, but no coercion. This implies that Dirichlet characters of", "sage: G = DirichletGroup(13) sage: K = G.base_ring() sage: G(1)", "i in range(len(D))] def extend(self, M): \"\"\" Returns the extension", "of Dirichlet characters modulo 13 with values in Cyclotomic Field", "0, 1, 0, 0, 1, -1, 0, 1, -1] sage:", "where G is `(\\ZZ / N \\ZZ)^*` where `N` is", "raise TypeError(\"Galois orbits only defined if base ring is an", "4 TESTS:: sage: G = DirichletGroup(20, UniversalCyclotomicField()) sage: e =", "the underlying `\\ZZ/n\\ZZ`-module vector of exponents. .. warning:: Please do", "in v if v is not None. INPUT: - ``v``", "(most recent call last): ... TypeError: no coercion map from", "R)) return self.change_ring(R) def _element_constructor_(self, x): \"\"\" Construct a Dirichlet", "+ 1 sage: G.list() [Dirichlet character modulo 5 of conductor", "we're explicit about where we want the multiplication to take", "9, 11, 13, 15] sage: DirichletGroup(17, GF(11^4, 'a'))._automorphisms() [1, 11,", "2 == 0: if e.modulus() % 4 == 0: val", "Public License as published by # the Free Software Foundation,", "1])], [[8, 8, 3], [[1, matrix(0,2)], [1, matrix(0,2)], [2, Mat([2,", "zeta156^36 - zeta156^34 - zeta156^33 - zeta156^31 + 2*zeta156^30 +", "= pari.znstar(self.modulus(), 1) pari_orders = G[1][1] pari_gens = G[1][2] #", "e.conductor() 4 sage: e.restrict(20) Dirichlet character modulo 20 of conductor", "modulus. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.is_primitive() False sage:", "of precision If the base ring is not a domain", "Let's check that trivial sums are being calculated correctly:: sage:", "1 mapping 2 |--> 1 \"\"\" G = self.parent() if", "sort the list of orbits and the orbits themselves (slightly", "sending those `x\\in\\ZZ/N\\ZZ` with `\\gcd(N,x)>1` to `0`. EXAMPLES:: sage: G", "modulo 1, this function returns `B_{1,\\varepsilon} = 1/2`, in accordance", "the base ring is not exact or if the order", "of conductor 5 mapping 2 |--> -zeta4 \"\"\" return ~self", "g in self.gens(): ord *= int(g.order()) return ord def random_element(self):", "|--> -1, 17 |--> 1 sage: e.restrict(4) Dirichlet character modulo", "@cached_method def order(self): \"\"\" Return the number of elements of", "zeta=-1).0 sage: g = f.base_extend(Integers(15)) sage: g(3) 14 sage: g.parent().zeta()", "Field sage: H = DirichletGroup.create_object(None, l); H Group of Dirichlet", "the quadratic Dirichlet character (./d) of conductor d, for d0.", "Field of order 4 and degree 2 \"\"\" N =", "|--> -zeta12^3 + zeta12, Dirichlet character modulo 13 of conductor", "conductor 37733 mapping 13533432536 |--> -1, 22369178537 |--> -1, 14266017175", "on each integer between 0 and the modulus. EXAMPLES:: sage:", "= DirichletGroup(20,QQ) sage: b.maximize_base_ring() Dirichlet character modulo 20 of conductor", "the primitive character associated to self. EXAMPLES:: sage: e =", "|--> ' + str(self.values_on_gens()[i]) return s def _latex_(self): r\"\"\" LaTeX", "G._zeta_powers val_on_gen = self.element() exponents = [0] * len(orders) n", "group.) EXAMPLES:: sage: DirichletGroup(20).decomposition() [ Group of Dirichlet characters modulo", "None: algorithm = 'pari' if algorithm == 'pari': from sage.lfunctions.pari", "on n result_list[n] = R_values[value] # iterate: # increase the", "for r in range(1, N)) ber = K(sum(binomial(k,j) * bernoulli(j,", "True, False, True, False] sage: G = DirichletGroup(100000, CC) sage:", "this character. OUTPUT: Currently the kernel is returned as a", "be the order of ``zeta`` if both are specified -", "len(DirichletGroup(20, GF(5))) 8 sage: len(DirichletGroup(20, GF(2))) 1 sage: len(DirichletGroup(20, GF(3)))", "is the modulus of self. EXAMPLES:: sage: DirichletGroup(37).unit_gens() (2,) sage:", "G = DirichletGroup(7, QQbar) sage: G[1].gauss_sum() -2.440133358345538? + 1.022618791871794?*I Check", "be the group of roots of unity of order dividing", "s def _latex_(self): r\"\"\" LaTeX representation of self. EXAMPLES:: sage:", "f = e.primitive_character(); f Dirichlet character modulo 4 of conductor", "conductor 1 mapping 2 |--> 1] \"\"\" D = self.parent().decomposition()", "o = int(z.additive_order()) Auts = set([m % o for m", "it will be recomputed as the order # of R(zeta)", "in range(r): if i != 0: s += r',\\ '", "# divisibility holds equals Valuation(Order(x),p)+1. cond = p**(valuation(self.order(),p) + 1)", "@property def _zeta_powers(self): \"\"\" Return a list of powers of", "any(u**v != 1 for u, v in zip(x, orders)): raise", "of the absolute Galois group of the prime subfield of", "of unity for ``parent``. In both cases, the orders of", "(see :trac:`17283`):: sage: k.<i> = CyclotomicField(4) sage: G = DirichletGroup(192)", "== e True :: sage: G, x = DirichletGroup(35).objgens() sage:", "prec=53, a=1, b=0): r\"\"\" Return the Kloosterman sum associated to", "0, 0, 0, zeta4, 0, -1, 0, 1, 0, -zeta4,", "in v: z = x.element() e = tuple(z) # change", "\"\"\" ord = rings.Integer(1) for g in self.gens(): ord *=", "free_module.FreeModule(rings.IntegerModRing(self.zeta_order()), len(self.unit_gens())) @property def _zeta_powers(self): \"\"\" Return a list of", "1.8826966926...*I sage: f.gauss_sum_numerical() -3.07497205... + 1.8826966926...*I sage: abs(e.gauss_sum_numerical()) 3.60555127546... sage:", "the generator minus 1, inclusive. EXAMPLES:: sage: DirichletGroup(37).random_element() Dirichlet character", "of conductor 20 mapping 11 |--> -1, 17 |--> zeta4", "zeta_argument)) for x in self.values_on_gens()]) else: dlog = P._zeta_dlog v", "= DirichletGroup(5); X = G.list(); Y = X[0]; Z =", "values in Finite Field of size 5, Group of Dirichlet", "function in characteristic p. EXAMPLES:: sage: G = DirichletGroup(13) sage:", "modulus(self): \"\"\" Returns the modulus of self. EXAMPLES:: sage: G", "if check: v = [self(x) for x in v] G", "root of unity can change:: sage: H.zeta() zeta6 This method", "EXAMPLES:: sage: DirichletGroup(37).random_element() Dirichlet character modulo 37 of conductor 37", "H Group of Dirichlet characters modulo 7 with values in", "* len(orders) n = G.integers_mod().one() value = val_on_gen.base_ring().zero() while True:", "return Sequence([DirichletGroup(p**r,R) for p, r \\ in factor(self.modulus())], cr=True, universe", ": (\\ZZ/N\\ZZ)^* \\to \\QQ(\\zeta_m) where `m` is the least common", "of different moduli compare as unequal, even if they define", "range(i, p-1)] sage: for s in all_jacobi_sums: ....: print(s) ((1,),", "cache: try: self.__bernoulli except AttributeError: self.__bernoulli = {} if k", "DirichletGroup(192) sage: G([i, -1, -1]) Traceback (most recent call last):", "DirichletGroup(20).decomposition() [ Group of Dirichlet characters modulo 4 with values", "DirichletGroup(5, QQ).0 sage: f = DirichletGroup(5,CyclotomicField(4)).0 sage: e*f Dirichlet character", "this, since e.g., unit gens mod 11 are not units", "- (optional, default: True) whether or not to explicitly coerce", "new parent if zeta is not None: zeta = R(zeta)", "fixed:: sage: f = Newforms(Gamma1(25), names='a')[1] sage: eps = f.character()", "in \" % (self._zeta_order, self._zeta) s += str(self.base_ring()) return s", "be nonzero\") D = fundamental_discriminant(d) G = DirichletGroup(abs(D), rings.RationalField()) return", "G = DirichletGroup.create_object(None, k); G Group of Dirichlet characters modulo", "be renamed. EXAMPLES:: sage: G = DirichletGroup(11) sage: repr(G) #", "Return the order of the chosen root of unity in", "and self.values_on_gens()[1].multiplicative_order() != 1: cond *= 2 return rings.Integer(cond) @cached_method", "G = DirichletGroup(35) sage: x = G.gens() sage: e =", "4 and degree 2 We can't multiply directly, since coercion", "base_ring) zeta_order = rings.Integer(zeta_order) zeta = base_ring.zeta(zeta_order) return (base_ring, modulus,", "= phi.codomain() else: raise NotImplementedError(\"Gauss sums only currently implemented when", "to `\\chi` is .. MATH:: g_a(\\chi) = \\sum_{r \\in \\ZZ/m\\ZZ}", "euler_phi(self.order()) < R.absolute_degree()): K = rings.CyclotomicField(self.order()) else: return self try:", "G = DirichletGroup(20) sage: G.integers_mod() Ring of integers modulo 20", "== R(1) @cached_method def is_odd(self): r\"\"\" Return ``True`` if and", "**************************************************************************** # Copyright (C) 2004-2006 <NAME> <<EMAIL>> # Copyright (C)", "# indirect doctest 'Group of Dirichlet characters modulo 11 with", "values_on_gens() used an explicit cache __values_on_gens in the past #", "False sage: trivial_character(3) == trivial_character(9) False sage: trivial_character(3) == DirichletGroup(3,", "sum associated to this Dirichlet character. The Gauss sum associated", "if we encounter it in a pickle values_on_gens_key = '_DirichletCharacter__values_on_gens'", "not an integral domain:: sage: f = DirichletGroup(17, ZZ, zeta=-1).0", "e.gauss_sum_numerical() -3.07497205... + 1.8826966926...*I sage: f.gauss_sum_numerical() -3.07497205... + 1.8826966926...*I sage:", "- (optional) list of elements of self - ``reps_only`` -", "a pickled element from ``state``. TESTS:: sage: e = DirichletGroup(16)([-1,", "two groups with the same parameters yields the same object::", "EXAMPLES:: sage: G = DirichletGroup(3) sage: e = G.0 The", "returned as a list. This may change. EXAMPLES:: sage: G.<a,b>", "17 |--> zeta4) sage: G.unit_gens() (11, 17) sage: G.zeta() zeta4", "bits of precision If the base ring is not a", "+ 1.022618791871794?*I Check that :trac:`19060` is fixed:: sage: K.<z> =", "@cached_method def is_even(self): r\"\"\" Return ``True`` if and only if", "False, True, False, True, False, True] sage: G = DirichletGroup(13)", "for c in m.coprime_integers(m): e = rings.Mod(c, m) z =", "decomposition of self as a product of Dirichlet characters of", "list of the Galois orbits of Dirichlet characters in self,", "of the chosen root of unity in the base ring.", "In the former case, it also ensures that ``zeta`` is", "``zeta``, but not its order:: sage: G = DirichletGroup(5, K,", "|--> -1/2*b*a + 1/2 \"\"\" R = self.base_ring() if R.is_prime_field():", "mapping 2 |--> zeta36^4 sage: DirichletGroup(20).random_element() Dirichlet character modulo 20", "if i != 0: s += ', ' s +=", "[2], [0]], Mat(1)], [1]) sage: chi = DirichletGroup(24)([1,-1,-1]); chi Dirichlet", "[0]], Mat(1)], [1]) sage: chi = DirichletGroup(24)([1,-1,-1]); chi Dirichlet character", "for Dirichlet characters, miscellaneous fixes - <NAME> (2014-03-06): use UniqueFactory", "an integral domain \"\"\" if v is None: v =", "# abs tol 1e-14 0.988944551741105 - 5.16608739123418e-18*I \"\"\" if algorithm", "= self._zeta_order if order is None: order = self.zeta().multiplicative_order() return", "4*E(5)^3 - 2*E(5)^4 sage: G = DirichletGroup(12, QQbar) sage: e", "1] A non-example:: sage: chi = DirichletGroup(7, Integers(9), zeta =", "self.modulus() @cached_method def multiplicative_order(self): \"\"\" The order of this character.", "chi.values() ; ls[0:10] [0, 1, -zeta10^3, -zeta10, -zeta10, 1, zeta10^3", "is_even(self): r\"\"\" Return ``True`` if and only if `\\varepsilon(-1) =", "u.lift() # have to do this, since e.g., unit gens", "\"\"\" Return a list of the Galois orbits of Dirichlet", "DirichletGroup(19).zeta_order() 18 \"\"\" order = self._zeta_order if order is None:", "0, -1, 0, 1, 0, 0, 0, 1, 0, -1]", "= DirichletGroup(9).0 sage: eps.bernoulli(3) 10*zeta6 + 4 sage: eps.bernoulli(3, algorithm=\"definition\")", "DirichletGroup(20).base_ring() Cyclotomic Field of order 4 and degree 2 \"\"\"", "Lfunction_from_character return Lfunction_from_character(self) raise ValueError('algorithm must be \"pari\" or \"lcalc\"')", "tuple(g) def integers_mod(self): r\"\"\" Returns the group of integers `\\ZZ/N\\ZZ`", "- R(-1)) < 0.5 return self(-1) == R(-1) @cached_method def", "EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a^2 Dirichlet character modulo", "is defined as .. MATH:: J(\\chi, \\psi) = \\sum_{a \\in", "is trivial and Z is quartic sage: sum([Y(x)*Z(1-x) for x", "True \"\"\" # This method exists solely because of a", "take a look at a non-prime modulus:: sage: N =", "ring is a number field. It's the identity function in", "NotImplementedError: order of element not known sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6),", "self._zeta_order = zeta_order self._modulus = modulus self._integers = rings.IntegerModRing(modulus) def", "True sage: e == f False sage: k = DirichletGroup(7)([-1])", ":meth:`gauss_sum`. The Gauss sum associated to `\\chi` is .. MATH::", "order of ``zeta``; this is useful if the base ring", "zeta_order is None: zeta_order = zeta.multiplicative_order() elif zeta_order is not", "return H(self) def _pari_conversion(self): r\"\"\" Prepare data for the conversion", "sum with values in a finite field:: sage: g =", "other parent fails in both cases:: sage: d[0]*d[1] == c", "field\") zeta = zeta ** a g = L(chi(0)) z", "Parent): \"\"\" Group of Dirichlet characters modulo `N` with values", "c = a*b sage: d = c.decomposition(); d [Dirichlet character", "group of order 4 generated by 7 in Ring of", "we can specify it using ``zeta_order``:: sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6))", "7 with values in Cyclotomic Field of order 6 and", "minimal generators for the units of `(\\ZZ/N\\ZZ)^*`, where `N` is", "mapping 3 |--> -zeta30^7 + zeta30^5 + zeta30^4 + zeta30^3", "where exactly one character is nontrivial (see :trac:`6393`):: sage: G", "The default base ring is a cyclotomic field of order", "|--> 1] \"\"\" D = self.parent().decomposition() vals = [[z] for", "This method (in contrast to :meth:`change_ring`) requires a coercion map", "= G.gens() sage: e = x[0]*x[1]^2; e Dirichlet character modulo", "that they had the same level! - <NAME> (2006-01-07): added", "euler_phi, factorial, valuation) def trivial_character(N, base_ring=rings.RationalField()): r\"\"\" Return the trivial", "9 of conductor 1 mapping 2 |--> 1] \"\"\" D", "sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5)).galois_orbits() Traceback (most recent call last): ...", "Field of order 10 and degree 4 sage: G =", "DirichletGroup(13) sage: G.galois_orbits() [ [Dirichlet character modulo 13 of conductor", "sage: (a*b).is_primitive() True sage: G.<a,b> = DirichletGroup(20, CC) sage: a.is_primitive()", "QQ) False \"\"\" from sage.categories.groups import Groups category = Groups().Commutative()", "an explicit cache __element in the past # we need", "integers modulo 15 sage: DirichletGroup(17, Integers(15), zeta_order=4) Traceback (most recent", "be an integral domain if only zeta_order is specified\" %", "for e in D: if e.modulus() % 2 == 0:", "values in Rational Field sage: G.order() 4 sage: G.base_ring() Rational", "-- a non-negative integer - ``algorithm`` -- either ``'recurrence'`` (default)", "1,), -3*zeta6 + 2) ((zeta6 - 1,), (-1,), 2*zeta6 +", "G = self.parent() return G.element_class(G, self.values_on_gens(), check=False) def __pow__(self, n):", "is called OUTPUT: Let `\\varepsilon` be a (not necessarily primitive)", "multiplicative group modulo `2^k` is trivial for `k = 1`", "L(c)*z return g def gauss_sum_numerical(self, prec=53, a=1): r\"\"\" Return a", "return G.element_class(G, x, check=False) def _mul_(self, other): \"\"\" Return the", "5 mapping 11 |--> 1, 17 |--> -1 sage: b.maximize_base_ring().base_ring()", "The constructor of :class:`DirichletCharacter` sets the cache of :meth:`element` or", "from sage.structure.sequence import Sequence from sage.structure.factory import UniqueFactory from sage.structure.richcmp", "is nontrivial or `p > 2` and 2 does not", "where `n` is the exponent of `(\\ZZ/N\\ZZ)^*`. Many operations, such", "1, 13 |--> -1, 17 |--> -1 sage: chi.conrey_number() 5", "Cyclotomic Field of order 4 and degree 2, Group of", ":class:`DirichletGroup`, a group of Dirichlet characters - ``x`` -- one", "r\"\"\" Return a tuple of the values of ``self`` on", "of the multiplicative group `R^*` of ``base_ring``. This is the", "nontrivial (see :trac:`6393`):: sage: G = DirichletGroup(5); X=G.list(); Y=X[0]; Z=X[1]", "modulo 13 of conductor 1 mapping 2 |--> 1 \"\"\"", "(zeta6 - 1,), -1) ((1,), (-1,), -1) ((1,), (-zeta6,), -1)", "equal and the caching would be broken:: sage: k =", "1, Dirichlet character modulo 9 of conductor 1 mapping 2", "the following. Proposition: Suppose eps is a character mod `p^n`,", "vector; this vector is mutable *only* because immutable vectors are", "character modulo 60 of conductor 4 mapping 31 |--> -1,", "e = G.0 sage: f = H.0 sage: e.gauss_sum_numerical() -3.07497205...", "= DirichletGroup(17, ZZ, zeta=-1).0 sage: g = f.base_extend(Integers(15)) sage: g(3)", "values in Complex Field with 53 bits of precision sage:", "state): r\"\"\" Restore a pickled element from ``state``. TESTS:: sage:", "`N`. EXAMPLES:: sage: D = DirichletGroup(13) sage: e = D.0", "RationalField()) sage: G.gen(0).base_ring() Rational Field \"\"\" return self.parent().base_ring() def bar(self):", "import is_DirichletGroup sage: is_DirichletGroup(DirichletGroup(11)) True sage: is_DirichletGroup(11) False sage: is_DirichletGroup(DirichletGroup(11).0)", "the group of homomorphisms `(\\ZZ/N\\ZZ)^* \\to V` with pointwise multiplication.", "``self`` as its domain EXAMPLES:: sage: e = DirichletGroup(7, QQ).0", "of conductor 5 mapping 2 |--> 13 sage: chi^2 Dirichlet", "s += str(self.base_ring()) return s @cached_method def decomposition(self): r\"\"\" Returns", "self.element() + other.element() else: x = tuple(y * z for", "self.gens() if n<0 or n>=len(g): raise IndexError(\"n(=%s) must be between", "b.element() False sage: a.values_on_gens() is b.values_on_gens() True \"\"\" # This", "... NotImplementedError: Kloosterman sums not implemented over this ring \"\"\"", "the standard root of unity for ``parent``. In both cases,", "to the value `B_1 = -1/2` for the classical Bernoulli", "= \\sum_{r \\in \\ZZ/m\\ZZ} \\chi(r)\\,\\zeta^{ar}, where `m` is the modulus", "sage: G = DirichletGroup(11, RationalField()) sage: G.gen(0).base_ring() Rational Field \"\"\"", "2 sage: parent(DirichletGroup(60, integral=True).gens()[2].values_on_gens()[2]) Gaussian Integers in Cyclotomic Field of", "13 |--> -1, 17 |--> -1 sage: chi._pari_conversion() ([[24, [0]],", "approximate complex number with prec bits of precision. See also", "of different moduli do not compare as equal. TESTS:: sage:", "are # k-th powering for # k = 1, p,", "value B_1 = -1/2. ber = K.one()/2 if k ==", "sage: chi4.conrey_number() 3 sage: chi = DirichletGroup(24)([1,-1,-1]); chi Dirichlet character", "v = [int(x.argument() / zeta_argument) for x in values_on_gens] else:", "# This is better since it computes the same thing,", "EXAMPLES:: sage: G = DirichletGroup(20) sage: G.ngens() 2 \"\"\" return", "integral domain, an error will be raised if only ``zeta_order``", "Dirichlet characters modulo `N` with values in a subgroup `V`", "At present this is only implemented if the base ring", "1])]], [1, 0, 0; 0, 1, 0; 0, 0, 1],", "over Ring of integers modulo 2 \"\"\" return free_module.FreeModule(rings.IntegerModRing(self.zeta_order()), len(self.unit_gens()))", "H.0 sage: e.gauss_sum_numerical() -3.07497205... + 1.8826966926...*I sage: f.gauss_sum_numerical() -3.07497205... +", "not x.conductor().divides(self.modulus()): raise TypeError(\"conductor must divide modulus\") a = []", "so ``G.<...> = DirichletGroup(...)`` notation works) - ``integral`` -- boolean", "((zeta6,), (-1,), -2*zeta6 - 1) ((zeta6,), (-zeta6,), zeta6 - 3)", "in G.unit_gens()]) sage: e.kloosterman_sum(7,17) -2*zeta20^6 + 2*zeta20^4 + 4 TESTS::", "sage: chi.values() [1] sage: chi(1) 1 \"\"\" G = self.parent()", "and the factor of eps at 4 is nontrivial or", "G.zeta_order() m = G.modulus() g = 0 L = rings.CyclotomicField(m.lcm(zo))", "in range(self.modulus()) if self(x) == one] def maximize_base_ring(self): r\"\"\" Let", "op) def __hash__(self): \"\"\" Return the hash of ``self``. EXAMPLES::", "else: dlog = P._zeta_dlog v = [dlog[x] for x in", "of the base ring. EXAMPLES:: sage: G = DirichletGroup(30); e", "sage: e = G.0 sage: e.is_odd() True sage: [e.is_odd() for", "sage: G = DirichletGroup(19, GF(5)) sage: loads(G.dumps()) == G True", "Ring of integers modulo 15 sage: G.gens() (Dirichlet character modulo", "be raised if only ``zeta_order`` is specified:: sage: DirichletGroup(17, Integers(15))", "``R`` - ``zeta_order`` -- (optional) order of ``zeta`` EXAMPLES:: sage:", "sage: trivial_character(6) == trivial_character(3) # indirect doctest False sage: trivial_character(3)", "characters modulo 7 with values in Rational Field sage: H", "d in reversed(e.divisors()): try: zeta = R.zeta(d) break except ValueError:", "may change. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.kernel() [1,", "+ zeta156^37 - zeta156^36 - zeta156^34 - zeta156^33 - zeta156^31", "= self.parent() if G.zeta.is_in_cache(): x = self.element() + other.element() else:", "field\") zeta = CC.zeta(G.modulus()) ** a g = phi(self(0)) z", "(:trac:`18072`):: sage: K.<i> = QuadraticField(-1) sage: chi = DirichletGroup(5, K)[1]", "Returns the group of integers `\\ZZ/N\\ZZ` where `N` is the", "conductor d, for d0. EXAMPLES:: sage: kronecker_character_upside_down(97*389*997^2) Dirichlet character modulo", "a = DirichletGroup(3, QQ, zeta=1, zeta_order=1)(1) sage: b = DirichletGroup(3,", "raise ValueError(\"d must be nonzero\") D = fundamental_discriminant(d) G =", "= G.cyc() # pari_gens = G.gen() values_on_gens = (self(x) for", "conductor 13 mapping 2 |--> -zeta12^3 + zeta12, Dirichlet character", "modulus of this character. EXAMPLES:: sage: e = DirichletGroup(100, QQ).0", "# indirect doctest Dirichlet character modulo 20 of conductor 20", "of conductor 1 mapping 11 |--> 1, 17 |--> 1,", "D[-2] sage: e.jacobi_sum(f) 3*zeta12^2 + 2*zeta12 - 3 sage: f.jacobi_sum(e)", "Returns the primitive character associated to self. EXAMPLES:: sage: e", "FreeModule(Zmod(16), 3) sage: DirichletCharacter(G, M([4, 8, 8])) Traceback (most recent", "If both ``zeta`` and ``zeta_order`` are omitted, then `V` is", "several invariants of ``G``:: sage: G.gens() (Dirichlet character modulo 20", "Return True if x is of type DirichletCharacter. EXAMPLES:: sage:", "\"\"\" n = self.zeta_order() R = self.base_ring() p = R.characteristic()", "+ vals return [D[i](vals[i]) for i in range(len(D))] def extend(self,", "r\"\"\" Open the LMFDB web page of the character in", "``zeta`` -- (optional) root of unity in ``base_ring`` - ``zeta_order``", "for 2-power modulus elif (euler_phi(e.parent().modulus()) / e.order()) % 2: val", "EXAMPLES:: sage: e = DirichletGroup(20)(1) sage: e.values() [0, 1, 0,", "\"\"\" A Dirichlet character. \"\"\" def __init__(self, parent, x, check=True):", "of the image root of unity. We use the following.", "when the base ring is a cyclotomic field, QQ, QQbar,", "n = lcm(m, G.zeta_order()) L = rings.CyclotomicField(n) zeta = L.gen(0)", "License as published by # the Free Software Foundation, either", "the conversion of the character to Pari. OUTPUT: pair (G,", "2, 60, None, None) An example to illustrate that ``base_ring``", "|--> -1, 17 |--> -1], ..., [Dirichlet character modulo 20", "Dirichlet character modulo 20 of conductor 4 mapping 11 |-->", "= D[-2] sage: e.jacobi_sum(f) 3*zeta12^2 + 2*zeta12 - 3 sage:", "zeta=exp(2*pi*I/6)) Traceback (most recent call last): ... NotImplementedError: order of", "Check that :trac:`19060` is fixed:: sage: K.<z> = CyclotomicField(8) sage:", "DirichletGroup(d, rings.RationalField()) return G([kronecker(u.lift(),d) for u in G.unit_gens()]) def is_DirichletCharacter(x):", "chi.conrey_number() 17 sage: chi = DirichletGroup(420)([1,-1,-I,1]) sage: chi.conrey_number() 113 TESTS::", "sage.rings.all as rings import sage.rings.number_field.number_field as number_field from sage.libs.pari import", "<= 2: return [self] P = self.parent() z = self.element()", "zeta_order must be None if base_ring not specified\") e =", "divides EulerPhi(p**r) = p**(r-1)*(p-1). # For a given r, whether", "conductor 5 mapping 2 |--> a^2, Dirichlet character modulo 5", "11 |--> -1, 17 |--> 1 sage: a Dirichlet character", "This is potentially much more efficient than computing the value", "with values in the group of order 4 generated by", "the number of elements of self. This is the same", "associated to %s' % self) return Z elif algorithm ==", "characteristic p. EXAMPLES:: sage: G = DirichletGroup(13) sage: e =", "e = DirichletGroup(20)(1) sage: e.values() [0, 1, 0, 1, 0,", "sage: DirichletGroup(13) == DirichletGroup(13, QQ) False \"\"\" from sage.categories.groups import", "the exponent of `(\\ZZ/N\\ZZ)^*`) - ``zeta`` -- (optional) root of", "sage: G.gen(0).base_ring() Rational Field \"\"\" return self.parent().base_ring() def bar(self): \"\"\"", "representation of this group, which can be renamed. EXAMPLES:: sage:", "compute the other. if zeta is not None: zeta =", "# A root of unity was explicitly given; we use", "== m: return self K = rings.CyclotomicField(m) return self.change_ring(K) def", "extension still works if the new base ring is not", "phi.codomain() g = 0 m = G.modulus() zeta = CC.zeta(m)", "in Ring of integers modulo 15 sage: chi = G([13]);", "# iterate: # increase the exponent vector by 1, #", "order, i.e., it is not the largest order root of", "= 1/2`, in accordance with the above definition, but in", "12 and degree 4 sage: (e^2).minimize_base_ring().base_ring() Cyclotomic Field of order", "create_key(self, N, base_ring=None, zeta=None, zeta_order=None, names=None, integral=False): \"\"\" Create a", "sage: G.order() 4 sage: G.base_ring() Rational Field The elements of", "unity has small order, i.e., it is not the largest", "4 sage: b.conductor() 5 sage: (a*b).conductor() 20 TESTS:: sage: G.<a,", "ring. This is ignored if ``base_ring`` is not ``None``. OUTPUT:", "sage: e = DirichletGroup(13).0 sage: e.change_ring(QQ) Traceback (most recent call", "8 of conductor 4 mapping 7 |--> -1, 5 |-->", "the same, but not checking that they had the same", "4 generated by 2 in Ring of integers modulo 15", "Section 9.4.1. EXAMPLES:: sage: G = DirichletGroup(13) sage: e =", "sage: a * b # indirect doctest Dirichlet character modulo", "specified but ``zeta_order`` is, then `V` is taken to be", "compute several invariants of ``G``:: sage: G.gens() (Dirichlet character modulo", "= G[1][2] # one should use the following, but this", "a large prime field:: sage: p = next_prime(10^40) sage: g", "from __future__ import print_function import sage.categories.all as cat from sage.misc.all", "of this character under the action of the absolute Galois", "r\"\"\" Returns ``True`` if this is the trivial character, i.e.,", "LFunction Z = LFunction(lfun_character(self), prec=prec) Z.rename('PARI L-function associated to %s'", "False, True, False] sage: G = DirichletGroup(100000, CC) sage: G.1.is_even()", "of self. EXAMPLES:: sage: DirichletGroup(37).unit_gens() (2,) sage: DirichletGroup(20).unit_gens() (11, 17)", "integer, as for :meth:`gauss_sum`. The Gauss sum associated to `\\chi`", "``self`` on the standard generators of `(\\ZZ/N\\ZZ)^*`, where `N` is", "= k[1:]; k (2, None, None) sage: l = l[1:];", "(Cyclotomic Field of order 4 and degree 2, 60, None,", "sage: e == f False sage: k = DirichletGroup(7)([-1]) sage:", "omitted, then `V` is taken to be `R^*`, or equivalently", "pair (G, v) where G is `(\\ZZ / N \\ZZ)^*`", "``self`` as its domain - ``zeta`` -- (optional) root of", "in values_on_gens] else: dlog = P._zeta_dlog v = [dlog[x] for", "Kloosterman sums not implemented over this ring \"\"\" G =", "4 of conductor 4 mapping 3 |--> -1 sage: e.restrict(50)", "parent.integers_mod().unit_group().gens_orders() if len(x) != len(orders): raise ValueError(\"wrong number of values", "number_field.is_CyclotomicField(K) or is_RationalField(K): chi = chi.minimize_base_ring() n = lcm(m, G.zeta_order())", "False If ``base_ring`` was not be a part of the", "of conductor 5 mapping 11 |--> 1, 17 |--> zeta4", "character. This is a positive integer coprime to q that", "for a in range(1,N+1)]) * factorial(k) else: raise ValueError(\"algorithm =", "when creating examples. - <NAME> (2008-02-16): speed up __call__ method", "by %s in \" % (self._zeta_order, self._zeta) s += str(self.base_ring())", "elif not isinstance(x, DirichletCharacter): raise TypeError(\"cannot convert %s to an", "with values in a ring `R`. \"\"\" Element = DirichletCharacter", "same; otherwise it will be recomputed as the order #", "is the absolute Galois group of the prime subfield of", "have multiplicative orders dividing (2, 16, 2), respectively sage: from", "abs(f.gauss_sum_numerical()) 3.60555127546... sage: sqrt(13.0) 3.60555127546399 TESTS: The field of algebraic", "sage: chi = G([13]); chi Dirichlet character modulo 5 of", "DirichletGroup(13).0 sage: e.change_ring(QQ) Traceback (most recent call last): ... TypeError:", "\"recurrence\": # The following code is pretty fast, at least", "cannot be found. EXAMPLES: The default base ring is a", "sage: G[1].gauss_sum() -2.440133358345538? + 1.022618791871794?*I Check that :trac:`19060` is fixed::", "Dirichlet character modulo 37 of conductor 37 mapping 2 |-->", "True, False, True, False, True, False, True] sage: G =", "True :: sage: G = DirichletGroup(19, GF(5)) sage: loads(G.dumps()) ==", "last): ... TypeError: unsupported operand parent(s) for *: 'Group of", "the absolute Galois group of the prime subfield of the", "field, QQ, QQbar, or a complex field\") zeta = zeta", "is the order of the standard root of unity for", "orders dividing {}, respectively\" .format(x, orders)) self.values_on_gens.set_cache(x) else: if free_module_element.is_FreeModuleElement(x):", "in Cyclotomic Field of order 4 and degree 2 We", "chi = DirichletGroup(60)([1,-1,I]) sage: chi.conrey_number() 17 sage: chi = DirichletGroup(420)([1,-1,-I,1])", "algebraic numbers is supported (:trac:`19056`):: sage: G = DirichletGroup(7, QQbar)", "`(\\ZZ/N\\ZZ)^*` as returned by :meth:`sage.rings.finite_rings.integer_mod_ring.IntegerModRing_generic.unit_gens`. - vector over `\\ZZ/e\\ZZ`, where", "1: return [R.one()] elif mod == 2: return [R.zero(), R.one()]", "EXAMPLES: The default base ring is a cyclotomic field of", "if x is of type DirichletCharacter. EXAMPLES:: sage: from sage.modular.dirichlet", "sage: chi4 = DirichletGroup(4).gen() sage: chi4._pari_conversion() ([[4, [0]], [2, [2],", "sage: K.<z> = CyclotomicField(8) sage: G = DirichletGroup(13, K) sage:", "-- integer (default: 53), *bits* of precision - ``a`` --", "5 of conductor 5 mapping 2 |--> -a^2] We can", "_repr_(self): \"\"\" String representation of self. EXAMPLES:: sage: G.<a,b> =", "if `\\varepsilon(-1) = -1`. EXAMPLES:: sage: G = DirichletGroup(13) sage:", "of `(\\ZZ/N\\ZZ)^*`. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20,QQ) sage: b.maximize_base_ring() Dirichlet", "|--> 1, Dirichlet character modulo 20 of conductor 5 mapping", "recent call last): ... TypeError: unsupported operand parent(s) for *:", "-1.83697019872103e-16 - 1.00000000000000*I \"\"\" if self.base_ring() is R: return self", "2*zeta156^40 + zeta156^37 - zeta156^36 - zeta156^34 - zeta156^33 -", "g.zeta_order() 2 :: sage: r4 = CyclotomicField(4).ring_of_integers() sage: G =", "zeta4 sage: a*b # indirect doctest Dirichlet character modulo 20", "zeta4 in Cyclotomic Field of order 4 and degree 2", "' s += str(self.parent().unit_gens()[i]) + ' |--> ' + str(self.values_on_gens()[i])", "# Copyright (C) 2004-2006 <NAME> <<EMAIL>> # Copyright (C) 2014", "`B_{k,eps}`. INPUT: - ``k`` -- a non-negative integer - ``algorithm``", ":: sage: chi = DirichletGroup(100151, CyclotomicField(10)).0 sage: ls = chi.values()", "R = self.base_ring() try: if x == R.one(): x =", "or R.has_coerce_map_from(self.base_ring())): raise TypeError(\"no coercion map from %s to %s", "zeta12 sage: G(0) Traceback (most recent call last): ... TypeError:", "20 sage: e = DirichletGroup(100).0 sage: e.multiplicative_order() 2 \"\"\" if", "map from the base ring of ``self``, or a ring", "DirichletGroup(5, K)[1] sage: chi(2) i sage: f = K.complex_embeddings()[0] sage:", "....: print(s) ((1,), (1,), 5) ((1,), (zeta6,), -1) ((1,), (zeta6", "sage: (DirichletGroup(36).0).decomposition() [Dirichlet character modulo 4 of conductor 4 mapping", "= P._zeta_dlog v = [dlog[x] for x in values_on_gens] m", "of modulus q. See https://www.lmfdb.org/knowledge/show/character.dirichlet.conrey EXAMPLES:: sage: chi4 = DirichletGroup(4).gen()", "sage: is_DirichletCharacter([1]) False \"\"\" return isinstance(x, DirichletCharacter) class DirichletCharacter(MultiplicativeGroupElement): \"\"\"", "G.0 sage: e.is_even() False sage: e(-1) -1 sage: [e.is_even() for", "DirichletGroup(9) sage: loads(dumps(G)) is G True \"\"\" self._set_element_constructor() if '_zeta_order'", "Rational Field sage: H = G.base_extend(CyclotomicField(6)); H Group of Dirichlet", "is_ComplexField(P.base_ring()): zeta = P.zeta() zeta_argument = zeta.argument() v = M([int(round(x.argument()", "of Dirichlet characters modulo 7 with values in Cyclotomic Field", "1, 0, -zeta4, 0, 0, 0, zeta4, 0, -1] sage:", "0, 1, 0, 0, 1, 36, 0, 1, 36] sage:", "4 == 2: # 0 factors at 2. vals =", "Note that the root of unity has small order, i.e.,", "DirichletGroup(5); X=G.list(); Y=X[0]; Z=X[1] sage: Y.jacobi_sum(Z) -1 sage: Z.jacobi_sum(Y) -1", "element of ``ZZ``. TESTS:: sage: G = DirichletGroup(7, base_ring=Integers(9), zeta=2)", "DirichletGroup(20)(1) sage: e.values() [0, 1, 0, 1, 0, 0, 0,", ":meth:`change_ring`) requires a coercion map to exist:: sage: G.base_extend(ZZ) Traceback", "e.order() 12 sage: loads(e.dumps()) == e True TESTS:: sage: G", "f.character() sage: eps.minimize_base_ring() == eps True A related bug (see", "[1] sage: chi(1) 1 \"\"\" G = self.parent() R =", "check=False) def __copy__(self): \"\"\" Return a (shallow) copy of this", "True sage: f == f True sage: e == f", "in `x` are admissible (see :trac:`17283`):: sage: k.<i> = CyclotomicField(4)", "mapping 2 |--> -1, Dirichlet character modulo 5 of conductor", "sage: DirichletGroup(60).unit_gens() (31, 41, 37) sage: DirichletGroup(20,QQ).unit_gens() (11, 17) \"\"\"", "if cache: self.__bernoulli[k] = ber return ber def lfunction(self, prec=53,", "powers of 2 present some problems as the multiplicative group", "**opts): r\"\"\" Returns the generalized Bernoulli number `B_{k,eps}`. INPUT: -", "|--> 1] sage: (DirichletGroup(72).0).decomposition() [Dirichlet character modulo 8 of conductor", "Create the object from the key (extra arguments are ignored).", "of p**(r-1) on the right hand side. # Since p-1", "sort: v.sort() return v def gauss_sum(self, a=1): r\"\"\" Return a", "# increase n accordingly, and increase value i = 0", "in a with defining polynomial x^4 + 1 sage: DirichletGroup(5,", "here means that the group has a # distinguished set", "DirichletGroup(10, CyclotomicField(4)) sage: G is H True sage: G3 =", "that can be used to compute discrete logarithms in the", "by its rings of integers as the base ring. This", "g = DirichletGroup(19, GF(p)); g Group of Dirichlet characters modulo", "5 |--> -1 sage: G(DirichletGroup(15).1) Traceback (most recent call last):", "\"\"\" if not (isinstance(R, Map) or R.has_coerce_map_from(self.base_ring())): raise TypeError(\"no coercion", "CyclotomicField(3)) sage: G5 = DirichletGroup(31, CyclotomicField(5)) sage: K30 = CyclotomicField(30)", "last): ... TypeError: Unable to coerce zeta12 to a rational", "character is 1/2, in contrast to the value B_1 =", "return DirichletGroup(self.modulus(), R, zeta=zeta, zeta_order=zeta_order) def base_extend(self, R): \"\"\" Return", "# new base ring as well. zeta = self._zeta if", "1) ((-1,), (-zeta6,), -2*zeta6 + 3) ((-1,), (-zeta6 + 1,),", "\"\"\" s = r'\\hbox{Dirichlet character modulo } %s \\hbox{ of", "Kloosterman sum associated to `\\chi` and the integers a,b is", "recent call last): ... TypeError: Unable to coerce zeta4 to", "= DirichletGroup(11, RationalField()) sage: G.gen(0).base_ring() Rational Field \"\"\" return self.parent().base_ring()", "self._module zero = M(0) orders = self.integers_mod().unit_group().gens_orders() for i in", "+= 1 @cached_method(do_pickle=True) def values_on_gens(self): r\"\"\" Return a tuple of", "of `(\\ZZ/N\\ZZ)^*` as returned by :meth:`sage.rings.finite_rings.integer_mod_ring.IntegerModRing_generic.unit_gens`. - vector over `\\ZZ/e\\ZZ`,", "M%self.conductor() != 0: raise ValueError(\"conductor(=%s) must divide M(=%s)\"%(self.conductor(),M)) H =", "(= (zeta16^4, -1, -1)) must have multiplicative orders dividing (2,", "self(1) * zeta**(a+b) except TypeError: raise NotImplementedError('Kloosterman sums not implemented", "of unity in ``base_ring`` - ``zeta_order`` -- (optional) positive integer;", "@property def _zeta_dlog(self): \"\"\" Return a dictionary that can be", "the factor of p**(r-1) on the right hand side. #", "- (optional: default False) if True only returns representatives for", "def is_primitive(self): \"\"\" Return ``True`` if and only if this", "K, zeta=-1, zeta_order=2) Group of Dirichlet characters modulo 5 with", "r\"\"\" Let .. MATH:: \\varepsilon : (\\ZZ/N\\ZZ)^* \\to \\QQ(\\zeta_n) be", "sage: a*b # indirect doctest Dirichlet character modulo 20 of", "negation of is_odd, e.g., in characteristic 2:: sage: G.<e> =", "sage: K.<i> = QuadraticField(-1) sage: f = K.complex_embeddings()[0] sage: D", "and degree 2 sage: parent(DirichletGroup(60, integral=True).gens()[2].values_on_gens()[2]) Gaussian Integers in Cyclotomic", "< 1.0e15 True sage: v.imag() 1.73205080756888 sage: G = DirichletGroup(20)", "generated by a in Number Field in a with defining", "import is_DirichletCharacter sage: is_DirichletCharacter(trivial_character(3)) True sage: is_DirichletCharacter([1]) False \"\"\" return", "sage: G = DirichletGroup(13, K) sage: chi = G([z^2]) sage:", "zeta36 sage: DirichletGroup(20).zeta() zeta4 sage: DirichletGroup(60).zeta() zeta4 sage: DirichletGroup(60,QQ).zeta() -1", "and Z is quartic sage: sum([Y(x)*Z(1-x) for x in IntegerModRing(5)])", "themselves (slightly faster if False). - ``check`` - (optional, default:", "sage: chi = DirichletGroup(24).0 sage: chi._repr_short_() '[-1, 1, 1]' \"\"\"", "2 \"\"\" base_ring, modulus, zeta, zeta_order = key return DirichletGroup_class(base_ring,", "used directly, but passed to the :func:`bernoulli` function if this", "where `N` is the modulus. EXAMPLES:: sage: e = DirichletGroup(16)([-1,", "DirichletGroup(11, RationalField()) sage: G.gen(0).base_ring() Rational Field \"\"\" return self.parent().base_ring() def", "ArithmeticError(\"M(=%s) must be a multiple of the modulus(=%s)\"%(M,self.modulus())) H =", "l[1:]; l (2, None, None) sage: k == l True", "None: R = self.base_ring() e = self._integers.unit_group_exponent() for d in", "self.element() o = int(z.additive_order()) Auts = set([m % o for", "TypeError: unsupported operand parent(s) for *: 'Group of Dirichlet characters", "sage: (a*b).is_primitive() True \"\"\" return (self.conductor() == self.modulus()) @cached_method def", "zeta52^16 + zeta52^15 + zeta52^14 + zeta52^12 - zeta52^11 -", "self._integers.unit_gens() @cached_method def zeta(self): \"\"\" Return the chosen root of", "is fixed:: sage: f = Newforms(Gamma1(25), names='a')[1] sage: eps =", "ring. EXAMPLES:: sage: DirichletGroup(37).zeta() zeta36 sage: DirichletGroup(20).zeta() zeta4 sage: DirichletGroup(60).zeta()", "the prime powers exactly divide the modulus of this character.", "character modulo 5 of conductor 5 mapping 2 |--> a^2,", "= DirichletGroup(20) sage: G.ngens() 2 \"\"\" return len(self.gens()) @cached_method def", "rings.IntegerModRing(p) elif self.order() <= 2: K = rings.QQ elif (isinstance(R,", "`N` is the modulus of self. EXAMPLES:: sage: DirichletGroup(37).unit_gens() (2,)", "\"\"\" return isinstance(x, DirichletGroup_class) class DirichletGroup_class(WithEqualityById, Parent): \"\"\" Group of", "do this, since e.g., unit gens mod 11 are not", "of conductor 5 mapping 2 |--> a^2, Dirichlet character modulo", "sage: r4 = CyclotomicField(4).ring_of_integers() sage: G = DirichletGroup(60, r4) sage:", "% (self._zeta_order, self._zeta) s += str(self.base_ring()) return s @cached_method def", "e.modulus() % 4 == 0: val *= e.values_on_gens()[0] # first", "base ring. EXAMPLES:: sage: G = DirichletGroup(30); e = G.1", "1, 17 |--> zeta4 sage: a*b # indirect doctest Dirichlet", "DirichletGroup(7,QQ); G Group of Dirichlet characters modulo 7 with values", "sage: chi = DirichletGroup(7, Integers(9), zeta = Integers(9)(2)).0 sage: chi.galois_orbit()", "`R^*`, or equivalently its `n`-torsion subgroup, where `n` is the", "base_ring) # If either zeta or zeta_order is given, compute", "rings.Integer(cond) @cached_method def decomposition(self): r\"\"\" Return the decomposition of self", "the keys would compare equal and the caching would be", "positive integer - ``base_ring`` -- commutative ring; the value ring", "modulo 19 with values in Finite Field of size 10000000000000000000000000000000000000121", "cache: self.__bernoulli[k] = ber return ber def lfunction(self, prec=53, algorithm='pari'):", "if p == 0 or p.gcd(self._zeta_order) == 1: zeta_order =", "== c Traceback (most recent call last): ... TypeError: unsupported", "2, Group of Dirichlet characters modulo 5 with values in", "e = DirichletGroup(100, QQ).0 sage: e.level() 100 \"\"\" return self.modulus()", "call last): ... ValueError: values (= (zeta16^4, -1, -1)) must", "conductor 60 mapping 31 |--> -1, 41 |--> -1, 37", "Z.jacobi_sum(Y) -1 -1 \"\"\" if check: if self.parent() != char.parent():", "-1, 37346 |--> -1 :: sage: a = kronecker_character(1) sage:", "base ring of ``self``, or a ring homomorphism with the", "prec=100) 4.7331654313260708324703713917e-30 - 1.7320508075688772935274463415*I sage: G = DirichletGroup(13) sage: H", "must have multiplicative orders dividing (2, 16, 2), respectively sage:", "`X`. There is conversion between Dirichlet groups of different moduli,", "# indirect doctest 'Dirichlet character modulo 20 of conductor 4", "R(zeta) by the DirichletGroup factory. p = R.characteristic() if p", "of self. EXAMPLES:: sage: G = DirichletGroup(20) sage: G.ngens() 2", "ring') n = zeta.multiplicative_order() zeta = zeta**(n // m) for", "4 mapping 3 |--> -1, Dirichlet character modulo 5 of", "try: zeta = R.zeta(d) break except ValueError: pass self.zeta_order.set_cache(d) return", "ValueError(\"zeta and zeta_order must be None if base_ring not specified\")", "36, 0, 1, 36, 0, 1, 0, 0, 1, 36,", "x.modulus().gcd(v) != 1: v += self.modulus() a.append(R(x(v))) return self.element_class(self, a)", "g = D(1) sage: g.jacobi_sum(g) 11 sage: sum([g(x)*g(1-x) for x", "= DirichletGroup(10, CyclotomicField(4)) sage: G is H True sage: G3", "sage.categories.all as cat from sage.misc.all import prod import sage.misc.prandom as", "parent) if check: orders = parent.integers_mod().unit_group().gens_orders() if len(x) != len(orders):", "5, Group of Dirichlet characters modulo 5 with values in", "ord}(\\varepsilon)`. EXAMPLES:: sage: chi = DirichletGroup(20).0; chi._DirichletCharacter__eval_at_minus_one() -1 \"\"\" D", "-1, 17 |--> 1 sage: c.extend(20) == a True \"\"\"", "DirichletGroups \"\"\" # **************************************************************************** # Copyright (C) 2004-2006 <NAME> <<EMAIL>>", "17 |--> -1 \"\"\" G = self.parent() if G.zeta.is_in_cache(): x", "roots of unity in the base ring is # finite,", "False, True, False, True, False] sage: G = DirichletGroup(100000, CC)", "DirichletCharacter) class DirichletCharacter(MultiplicativeGroupElement): \"\"\" A Dirichlet character. \"\"\" def __init__(self,", "and the integers a,b is .. MATH:: K(a,b,\\chi) = \\sum_{r", "+ 1,), (-zeta6 + 1,), zeta6 + 2) Let's check", "sage.lfunctions.pari import lfun_character, LFunction Z = LFunction(lfun_character(self), prec=prec) Z.rename('PARI L-function", "it in a pickle values_on_gens_key = '_DirichletCharacter__values_on_gens' values_on_gens = None", "2' We can multiply if we're explicit about where we", "of the conductor of this character. EXAMPLES:: sage: e =", "1557 |--> -1, 37346 |--> -1 :: sage: a =", "authors use an alternative definition giving `B_{1,\\varepsilon} = -1/2`; see", "13, 17], [2, 2, 2], [0, 0, 0]], [1, 0,", "of integers modulo 15 sage: chi = G([13]); chi Dirichlet", "|--> -1, 17 |--> 1, Dirichlet character modulo 20 of", "NotImplementedError: Automorphisms for finite non-field base rings not implemented \"\"\"", "z = self.element() o = int(z.additive_order()) Auts = set([m %", "= self.base_ring().zeta() n = z.multiplicative_order() m = lcm(g,n) if n", "0, 0]], [1, 0, 0; 0, 1, 0; 0, 0,", "sage: L = a.lfunction(algorithm='lcalc'); L L-function with complex Dirichlet coefficients", "del state_dict[element_key] super(DirichletCharacter, self).__setstate__(state) if values_on_gens is not None: self.values_on_gens.set_cache(values_on_gens)", "sage: d[1].parent() Group of Dirichlet characters modulo 5 with values", "sage: DirichletGroup(5, K) Group of Dirichlet characters modulo 5 with", "3 sage: p = 7 sage: DP = DirichletGroup(p) sage:", "be \"pari\" or \"lcalc\"') @cached_method def conductor(self): \"\"\" Computes and", "2: K = rings.QQ elif (isinstance(R, number_field.NumberField_generic) and euler_phi(self.order()) <", "different moduli do not compare as equal. TESTS:: sage: trivial_character(6)", "integral domain \"\"\" if v is None: v = self.list()", "= x[0]*x[1]; e Dirichlet character modulo 35 of conductor 35", "Note that ``is_even`` need not be the negation of is_odd,", "* S(k-j) for j in range(k+1))) elif algorithm == \"definition\":", "is finite too. # In particular, it is finitely generated;", "_coerce_map_from_(self, X): \"\"\" Decide whether there is a coercion map", "return Auts def galois_orbits(self, v=None, reps_only=False, sort=True, check=True): \"\"\" Return", "prime subfield of Frac(R). If R is not a domain,", "state): \"\"\" Used for unpickling old instances. TESTS:: sage: G", "or around the ``lcalc`` program. INPUT: - ``prec`` -- precision", "sage: latex(b) # indirect doctest \\hbox{Dirichlet character modulo } 16", "CC.zeta(m) for c in m.coprime_integers(m): e = rings.Mod(c, m) z", "= int(n) g = self.gens() if n<0 or n>=len(g): raise", "37 |--> 1, Dirichlet character modulo 60 of conductor 3", "self.restrict(self.conductor()) def restrict(self, M): \"\"\" Returns the restriction of this", "last): ... NotImplementedError: order of element not known sage: DirichletGroup(7,", "values of ``self`` on the standard generators of `(\\ZZ/N\\ZZ)^*`, where", "the multiplicative order of ``zeta``; this is useful if the", "17 |--> 1 sage: c.extend(20) == a True \"\"\" if", "Bernoulli number `B_{k,eps}`. INPUT: - ``k`` -- a non-negative integer", "17 |--> 1, Dirichlet character modulo 20 of conductor 5", "to the new parent if zeta is not None: zeta", "1 sage: G = DirichletGroup(20) sage: e = G([1 for", "integral=True).gens()[2].values_on_gens()[2]) Gaussian Integers in Cyclotomic Field of order 4 and", "H.<c> = DirichletGroup(4) sage: c.extend(20) Dirichlet character modulo 20 of", "- <NAME> (2006-01-07): added more examples - <NAME> (2006-05-21): added", "is fixed:: sage: K.<z> = CyclotomicField(8) sage: G = DirichletGroup(13,", "ring is an integral domain sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2)).galois_orbits() Traceback", "= parent.integers_mod().unit_group().gens_orders() if len(x) != len(orders): raise ValueError(\"wrong number of", "but no coercion. This implies that Dirichlet characters of different", "of self and other. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage:", "\"\"\" Compare ``self`` to ``other``. .. NOTE:: Since there is", "= self.parent().decomposition() vals = [[z] for z in self.values_on_gens()] if", "base ring is a number field. It's the identity function", ":trac:`14368` are fixed:: sage: chi = DirichletGroup(1).list()[0] sage: chi.values() [1]", "tiny bugs and design problem that became clear when creating", "11 |--> 1, 17 |--> -1 \"\"\" G = self.parent()", "G([kronecker(D,u) for u in G.unit_gens()]) def kronecker_character_upside_down(d): \"\"\" Return the", "is_RationalField(K)): raise NotImplementedError(\"Kloosterman sums only currently implemented when the base", "zip(x, orders)): raise ValueError(\"values (= {}) must have multiplicative orders", "f = e.change_ring(QuadraticField(3, 'a')) sage: f.parent() Group of Dirichlet characters", "- 11380/13*zeta12 + 9110/13 sage: eps = DirichletGroup(9).0 sage: eps.bernoulli(3)", "v def __setstate__(self, state): r\"\"\" Restore a pickled element from", "broken:: sage: k = k[1:]; k (2, None, None) sage:", "2 |--> 13 sage: chi^2 Dirichlet character modulo 5 of", "(= (4, 8, 8) modulo 16) must have additive orders", "Field of size 5, Group of Dirichlet characters modulo 5", "(C) 2004-2006 <NAME> <<EMAIL>> # Copyright (C) 2014 <NAME> <<EMAIL>>", "M = FreeModule(Zmod(16), 3) sage: DirichletCharacter(G, M([4, 8, 8])) Traceback", "(-2*zeta4 + 5) :: sage: DirichletGroup(60, integral=True) Group of Dirichlet", "ComplexField CC = ComplexField(prec) phi = CC.coerce_map_from(K) elif number_field.is_CyclotomicField(K) or", "such as finding a set of generators for the group,", "[1, 0, 0; 0, 1, 0; 0, 0, 1]], [0,", "generated by -1 in Number Field in a with defining", "* zeta w.append(a) return w @property def _zeta_dlog(self): \"\"\" Return", "G.change_ring(CyclotomicField(6)) Group of Dirichlet characters modulo 7 with values in", "tuple or list of ring elements: the values of the", "for the classical Bernoulli number. Some authors use an alternative", "only if this character is primitive, i.e., its conductor equals", "of conductor 5 mapping 2 |--> zeta4, Dirichlet character modulo", "See https://www.lmfdb.org/knowledge/show/character.dirichlet.conrey EXAMPLES:: sage: chi4 = DirichletGroup(4).gen() sage: chi4.conrey_number() 3", "(binomial, bernoulli, kronecker, factor, gcd, lcm, fundamental_discriminant, euler_phi, factorial, valuation)", "0, 1, 1, 1, 1, 1, 1, 0, 1, 1,", "|--> -zeta12] sage: e = G.0^2; e Dirichlet character modulo", "True) whether or not to explicitly coerce each element of", "' s += self.parent().unit_gens()[i]._latex_() + r' \\mapsto ' + self.values_on_gens()[i]._latex_()", "of conductor 37 mapping 2 |--> zeta36^4 sage: DirichletGroup(20).random_element() Dirichlet", "sage: G.modulus() 20 \"\"\" return self._modulus def ngens(self): \"\"\" Returns", "`R^*` of ``base_ring``. This is the group of homomorphisms `(\\ZZ/N\\ZZ)^*", "domain (in which case the group of roots of unity", "\"\"\" Return the base extension of ``self`` to ``R``. INPUT:", "-1, 17 |--> 1 sage: e.restrict(4) Dirichlet character modulo 4", "g.jacobi_sum(g**2) 2*a TESTS: This shows that :trac:`6393` has been fixed::", "|--> zeta12^3, 31 |--> zeta12^2 - 1 sage: e.order() 12", "the added # FinitelyGenerated() here means that the group has", "4 sage: len(DirichletGroup(20, GF(5))) 8 sage: len(DirichletGroup(20, GF(2))) 1 sage:", ":meth:`sage.rings.finite_rings.integer_mod_ring.IntegerModRing_generic.unit_gens`. - vector over `\\ZZ/e\\ZZ`, where `e` is the order", "for `k \\ge 3`:: sage: (DirichletGroup(18).0).decomposition() [Dirichlet character modulo 2", "Field with 53 bits of precision, 2, None, None) sage:", "raised if such ``zeta`` cannot be found. EXAMPLES: The default", "key that uniquely determines a Dirichlet group. TESTS:: sage: DirichletGroup.create_key(60)", "to an element of Group of Dirichlet characters modulo 13", "sage: is_DirichletCharacter(trivial_character(3)) True sage: is_DirichletCharacter([1]) False \"\"\" return isinstance(x, DirichletCharacter)", "G.base_ring() mod = self.parent().modulus() if mod == 1: return [R.one()]", "characters modulo 5 with values in Ring of integers modulo", "on ``ZZ``. EXAMPLES:: sage: e = DirichletGroup(16)([-1, 1]) sage: f", "absolute Galois group of the prime subfield of Frac(R). If", "of the character to Pari. OUTPUT: pair (G, v) where", "be the multiplicative order of ``zeta``; this is useful if", "- vector over `\\ZZ/e\\ZZ`, where `e` is the order of", "of unity specified; use the same zeta_order # (which may", "(extra arguments are ignored). This is only called if the", "for i in range(len(self.unit_gens())): z = zero.__copy__() z[i] = ord//gcd(ord,", "between 0 and 1 :: sage: G.gen(-1) Traceback (most recent", "the terms of the GNU General Public License as published", "* v for u, v in zip(x, orders)): raise ValueError(\"values", "coding: utf-8 -*- r\"\"\" Dirichlet characters A :class:`DirichletCharacter` is the", "Field with 53 bits of precision If the base ring", "((-zeta6,), (-zeta6 + 1,), -2*zeta6 + 3) ((-zeta6 + 1,),", "-zeta4 AUTHORS: - <NAME> (2005-09-02): Fixed bug in comparison of", "5 mapping 11 |--> 1, 7 |--> zeta4] Another example::", "- 3 sage: p = 7 sage: DP = DirichletGroup(p)", "sage: b.conductor() 5 sage: (a*b).conductor() 20 TESTS:: sage: G.<a, b>", "import sage.categories.all as cat from sage.misc.all import prod import sage.misc.prandom", "def __invert__(self): \"\"\" Return the multiplicative inverse of self. EXAMPLES::", "moduli compare as unequal, even if they define identical functions", "def change_ring(self, R): \"\"\" Return the base extension of ``self``", "`x\\in\\ZZ/N\\ZZ` with `\\gcd(N,x)>1` to `0`. EXAMPLES:: sage: G = DirichletGroup(35)", "-1)) must have multiplicative orders dividing (2, 16, 2), respectively", "sage: e = G.1 sage: e.kloosterman_sum_numerical(53,3,11) 3.80422606518061 - 3.80422606518061*I \"\"\"", "Dirichlet characters of different moduli do not compare as equal.", "DirichletGroup(2, base_ring=QQ) is DirichletGroup(2, base_ring=CC) False If the base ring", "60 of conductor 3 mapping 31 |--> 1, 41 |-->", "where we give ``zeta``, but not its order:: sage: G", "least common multiple of `n` and the exponent of `(\\ZZ/N\\ZZ)^*`.", "\"\"\" def __init__(self, parent, x, check=True): r\"\"\" Create a Dirichlet", "4 \"\"\" return self.restrict(self.conductor()) def restrict(self, M): \"\"\" Returns the", "being calculated correctly:: sage: N = 13 sage: D =", "for finite non-field base rings not implemented \"\"\" n =", "(d/.) of minimal conductor. EXAMPLES:: sage: kronecker_character(97*389*997^2) Dirichlet character modulo", "False \"\"\" from sage.categories.groups import Groups category = Groups().Commutative() if", "g += L(c)*z return g def gauss_sum_numerical(self, prec=53, a=1): r\"\"\"", "8) modulo 16) must have additive orders dividing (2, 16,", "([[4, [0]], [2, [2], [3]], [[2]~, Vecsmall([2])], [[4], [[1, matrix(0,2)]],", "ValueError(\"wrong number of values (= {}) on generators (want {})\".format(x,", "where the prime powers exactly divide the modulus of this", "CC, zeta=exp(2*pi*I/6)) Traceback (most recent call last): ... NotImplementedError: order", "\"\"\" s = \"Group of Dirichlet characters modulo %s with", "part of the key:: sage: k = DirichletGroup.create_key(2, base_ring=QQ); k", "Return the Conrey number for this character. This is a", "-- optional arguments; not used directly, but passed to the", "conductor 1 mapping 2 |--> 1, Dirichlet character modulo 5", "with values in Gaussian Integers in Cyclotomic Field of order", "DirichletGroup.create_object(None, (K, 60, K.gen(), 4)) Group of Dirichlet characters modulo", "5 with values in the group of order 8 generated", "%s is defined\" % (self.base_ring(), R)) return self.change_ring(R) def _element_constructor_(self,", ":: sage: G.gen(-1) Traceback (most recent call last): ... IndexError:", "sage: a = DirichletGroup(3, QQ, zeta=1, zeta_order=1)(1) sage: b =", "of Fractional ideal (-2*zeta4 + 5) :: sage: DirichletGroup(60, integral=True)", "(base_ring, modulus, zeta, zeta_order) def create_object(self, version, key, **extra_args): \"\"\"", "11 |--> 1, 17 |--> -1 sage: b.maximize_base_ring().base_ring() Cyclotomic Field", "sage: len(DirichletGroup(20)) 8 sage: len(DirichletGroup(20, QQ)) 4 sage: len(DirichletGroup(20, GF(5)))", "compare as unequal, even if they define identical functions on", "conductor 20 mapping 11 |--> -1, 17 |--> -1], ...,", "G = Sequence(G, cr=True) if sort: G.sort() return G def", "poly ring over a number field. prec = k+2 R", "non-negative integer - ``algorithm`` -- either ``'recurrence'`` (default) or ``'definition'``", "power of the image root of unity. We use the", "characters modulo `N` with values in a subgroup `V` of", "unit gens mod 11 are not units mod 22. while", "must have order ``zeta_order``. Furthermore, a generator ``zeta`` of `V`", "sage: DirichletGroup(20).random_element() Dirichlet character modulo 20 of conductor 4 mapping", "+ ' |--> ' + str(self.values_on_gens()[i]) return s def _latex_(self):", "modulo the divisor M of the modulus, which must also", "See also :meth:`.kloosterman_sum`, which calculates the sum exactly (which is", "sage: e = G.0 sage: abs(e.gauss_sum_numerical()) 1.7320508075... sage: sqrt(3.0) 1.73205080756888", "Complex Field with 53 bits of precision If the base", "base ring of ``self`` as its domain - ``zeta`` --", "of order 4 and degree 2' We can multiply if", "= 0 while True: try: exponents[i] += 1 except IndexError:", "integral domain if only zeta_order is specified sage: G =", "x in IntegerModRing(5)]) -1 sage: # The value -1 above", "self.base_ring() # self(-1) is either +1 or -1 if not", "modulo 4 with values in Finite Field of size 5,", "its rings of integers as the base ring. This is", "are ``None``. In the former case, it also ensures that", "= z.multiplicative_order() m = lcm(g,n) if n == m: return", "IntegerModRing(5)]) -1 sage: # The value -1 above is the", "of the key, the keys would compare equal and the", "formula in [Coh2007]_, Proposition 9.4.5; this is usually optimal. The", "x) elif not isinstance(x, DirichletCharacter): raise TypeError(\"cannot convert %s to", "r\"\"\" Return the Jacobi sum associated to these Dirichlet characters", "is None: v = self.list() else: if check: v =", "order of the chosen root of unity in the base", "G.<a,b> = DirichletGroup(20) sage: a.element() (2, 0) sage: b.element() (0,", "in Cyclotomic Field of order 4 and degree 2 sage:", "of n EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a^2 Dirichlet", "0 m = G.modulus() zeta = CC.zeta(m) for c in", "in chi.values()[1:]: z *= zeta g += L(c)*z return g", "-- either a ring admitting a conversion map from the", "modulo 3 \"\"\" return DirichletGroup(N, base_ring)(1) TrivialCharacter = trivial_character def", "EXAMPLES:: sage: G = DirichletGroup(20) sage: G.gen(0) Dirichlet character modulo", "\"\"\" return len(self.gens()) @cached_method def order(self): \"\"\" Return the number", "character. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: c = a*b", "for this character. This is a positive integer coprime to", "P = self.parent() M = P._module if is_ComplexField(P.base_ring()): zeta =", "If ``zeta_order`` is also given, it must be the multiplicative", "a Dirichlet group. EXAMPLES:: sage: from sage.modular.dirichlet import is_DirichletGroup sage:", "K.complex_embedding(prec) CC = phi.codomain() g = 0 m = G.modulus()", "1] ] sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5)).galois_orbits() Traceback (most recent call", "cache of element() from that if we encounter it in", "do not compare as equal. TESTS:: sage: trivial_character(6) == trivial_character(3)", "mapping 2 |--> zeta12^2 sage: e.galois_orbit() [Dirichlet character modulo 13", "= R.gen() # g(t) = t/(e^{Nt}-1) g = t/((N*t).exp(prec) -", "Likewise # computing all binomial coefficients can be done much", "\"\"\" G = self.parent() K = G.base_ring() chi = self", "b = DirichletGroup(3, QQ, zeta=-1, zeta_order=2)([-1]) sage: a * b", "of order 12 and degree 4 sage: G = DirichletGroup(6)", "of ``self`` as its domain - ``zeta`` -- (optional) root", "conductor 5 mapping 2 |--> 2,) TESTS: Dirichlet groups are", "self. These are always given by raising to a power,", "self.order() def _repr_(self): \"\"\" Return a print representation of this", "recent call last): ... NotImplementedError: Characters must be from the", "= G.0 sage: e.gauss_sum() -zeta156^46 + zeta156^45 + zeta156^42 +", "yields the same object:: sage: DirichletGroup(60) is DirichletGroup(60) True \"\"\"", "@cached_method def conductor(self): \"\"\" Computes and returns the conductor of", "This program is free software: you can redistribute it and/or", "of `\\chi` and `\\zeta` is a primitive `m` th root", "maximize_base_ring(self): r\"\"\" Let .. MATH:: \\varepsilon : (\\ZZ/N\\ZZ)^* \\to \\QQ(\\zeta_n)", "Field sage: H = G.base_extend(CyclotomicField(6)); H Group of Dirichlet characters", "currently implemented when the base ring is a cyclotomic field", "definition giving `B_{1,\\varepsilon} = -1/2`; see the discussion in [Coh2007]_,", "DirichletGroup(4).gen() sage: chi4.conrey_number() 3 sage: chi = DirichletGroup(24)([1,-1,-1]); chi Dirichlet", "general finite fields - :func:`sage.rings.padics.misc.gauss_sum` for a `p`-adic version \"\"\"", "v is None: v = self.list() else: if check: v", "return self._modulus def ngens(self): \"\"\" Returns the number of generators", "5 of conductor 5 mapping 2 |--> 13 sage: chi^2", "[[1, matrix(0,2)], [1, matrix(0,2)], [2, Mat([2, 1])]], [1, 0, 0;", "= self.parent() z = self.element() o = int(z.additive_order()) Auts =", "the group of Dirichlet character mod 20 with values in", "%s to an element of %s\" % (x, self)) elif", "published by # the Free Software Foundation, either version 2", "https://www.lmfdb.org EXAMPLES:: sage: E = DirichletGroup(4).gen() sage: E.lmfdb_page() # optional", "return Lfunction_from_character(self) raise ValueError('algorithm must be \"pari\" or \"lcalc\"') @cached_method", "sage: all_jacobi_sums = [(DP[i].values_on_gens(),DP[j].values_on_gens(),DP[i].jacobi_sum(DP[j])) ....: for i in range(p-1) for", "DirichletGroup(N, base_ring)(1) TrivialCharacter = trivial_character def kronecker_character(d): \"\"\" Return the", "1, Dirichlet character modulo 9 of conductor 9 mapping 2", "=/= 1, the conductor is the smallest p**r such that", "Dirichlet characters modulo 2 with values in Rational Field sage:", "conductor 37733 mapping 1557 |--> -1, 37346 |--> -1 ::", "rings.Integer(d) if d <= 0: raise ValueError(\"d must be positive\")", "modulo 5 of conductor 5 mapping 2 |--> 4 sage:", "DirichletGroup(60).random_element() Dirichlet character modulo 60 of conductor 3 mapping 31", "Dirichlet characters modulo 17 with values in the group of", "of a bug in the cPickle module -- # see", "DirichletGroup(20).unit_gens() (11, 17) sage: DirichletGroup(60).unit_gens() (31, 41, 37) sage: DirichletGroup(20,QQ).unit_gens()", "kronecker_character(1) sage: b = DirichletGroup(2401,QQ)(a) # NOTE -- over QQ!", "of `(\\ZZ/N\\ZZ)^*`:: sage: DirichletGroup(20) Group of Dirichlet characters modulo 20", "equals -1) EXAMPLES:: sage: G = DirichletGroup(60) sage: e =", "either version 2 of the License, or # (at your", "divide M(=50) \"\"\" M = int(M) if self.modulus()%M != 0:", "in G.unit_gens()]) def kronecker_character_upside_down(d): \"\"\" Return the quadratic Dirichlet character", "Dirichlet characters. TESTS:: sage: DirichletGroup(12)._module Vector space of dimension 2", "powers of the distinguished root of unity. TESTS:: sage: DirichletGroup(5)._zeta_powers", "1,), (-zeta6 + 1,), zeta6 + 2) Let's check that", "sage: G = DirichletGroup(5, Zmod(15), zeta=2); G Group of Dirichlet", "sage: e = DirichletGroup(16)([-1, 1]) sage: hash(e) == hash((-1,1)) True", "coerce each element of v into self. The Galois group", "optional arguments; not used directly, but passed to the :func:`bernoulli`", "character modulo 2 of conductor 1, Dirichlet character modulo 9", "from sage.lfunctions.pari import lfun_character, LFunction Z = LFunction(lfun_character(self), prec=prec) Z.rename('PARI", "only zeta_order is specified\" % base_ring) zeta_order = rings.Integer(zeta_order) zeta", "base ring is not an integral domain:: sage: f =", "for x in values_on_gens] m = P.zeta_order() v = [(vi", "modulo 13 of conductor 1 mapping 2 |--> 1 sage:", "degree 2 sage: DirichletGroup(20).base_ring() Cyclotomic Field of order 4 and", "Dirichlet character modulo 1 of conductor 1 sage: DirichletGroup(2)[0] Dirichlet", "G.0 sage: f = H.0 sage: e.gauss_sum_numerical() -3.07497205... + 1.8826966926...*I", "computes the same thing, but requires # no arith in", "_repr_short_(self): r\"\"\" A short string representation of self, often used", "Galois orbits only defined if base ring is an integral", "zeta156^18 - zeta156^16 - zeta156^15 - 2*zeta156^14 - zeta156^10 +", "values in Ring of integers modulo 15 sage: DirichletGroup(17, Integers(15),", "primes dividing modulus. (Note that if the modulus is 2", "((zeta6 - 1,), (-zeta6 + 1,), -zeta6 - 2) ((-1,),", "ring is not an integral domain:: sage: f = DirichletGroup(17,", "problem that became clear when creating examples. - <NAME> (2008-02-16):", "Return the Kloosterman sum associated to this Dirichlet character as", "return self def modulus(self): \"\"\" The modulus of this character.", "a key that uniquely determines a Dirichlet group. TESTS:: sage:", "zeta156^41 + 2*zeta156^40 + zeta156^37 - zeta156^36 - zeta156^34 -", "1, 1, 1, 1, 1, 1, 0, 1, 1, 1,", "Y.jacobi_sum(Z) -1 sage: Z.jacobi_sum(Y) -1 Now let's take a look", "primitive `m^{th}` root of unity. EXAMPLES:: sage: G = DirichletGroup(3)", "# No root of unity specified; use the same zeta_order", "mapping 2 |--> 1 sage: G([-1]) Dirichlet character modulo 13", "finitely generated; the added # FinitelyGenerated() here means that the", "2), respectively sage: from sage.modular.dirichlet import DirichletCharacter sage: M =", "are not implemented yet. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage:", "character modulo 5 of conductor 5 mapping 2 |--> 13", "sage: k = DirichletGroup.create_key(2, base_ring=QQ); k (Rational Field, 2, None,", "== 1] else: if not rings.ZZ(p).is_prime(): raise NotImplementedError(\"Automorphisms for finite", "encounter it in a pickle values_on_gens_key = '_DirichletCharacter__values_on_gens' values_on_gens =", "the modulus of self. EXAMPLES:: sage: G = DirichletGroup(20) sage:", "G = DirichletGroup(7,QQ); G Group of Dirichlet characters modulo 7", "range(r): if i != 0: s += ', ' s", "of unity. We use the following. Proposition: Suppose eps is", "for y, z in zip(self.values_on_gens(), other.values_on_gens())) return G.element_class(G, x, check=False)", "x in rings.IntegerModRing(self.modulus())]) def kloosterman_sum(self, a=1, b=0): r\"\"\" Return the", "when pickling an instance of :class:`DirichletCharacter`. \"\"\" pows = self.parent()._zeta_powers", "of the GNU General Public License as published by #", "L = rings.CyclotomicField(n) zeta = L.gen(0) ** (n // m)", "the same zeta_order # (which may still be None). zeta_order", "0, 0, 1], [7, 13, 17], [2, 2, 2], [0,", "in a with defining polynomial x^2 - 3 with a", "sage: G.<a,b> = DirichletGroup(20) sage: L = a.lfunction(); L PARI", "random element of self. The element is computed by multiplying", "not (isinstance(R, Map) or R.has_coerce_map_from(self.base_ring())): raise TypeError(\"no coercion map from", "K30 = CyclotomicField(30) sage: G3.gen(0).base_extend(K30) * G5.gen(0).base_extend(K30) Dirichlet character modulo", "exponent(self): \"\"\" Return the exponent of this group. EXAMPLES:: sage:", "-1, 37 |--> 1, Dirichlet character modulo 60 of conductor", "in seen_so_far: continue orbit = x.galois_orbit(sort=sort) if reps_only: G.append(x) else:", "_module(self): \"\"\" Return the free module used to represent Dirichlet", "vectors are not implemented yet. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20)", "for the other method to work properly, these caches have", "@cached_method def _automorphisms(self): \"\"\" Compute the automorphisms of self. These", "n = G.integers_mod().one() value = val_on_gen.base_ring().zero() while True: # record", "= R.zeta(d) break except ValueError: pass self.zeta_order.set_cache(d) return zeta @cached_method", "def gen(self, n=0): \"\"\" Return the n-th generator of self.", "sage: p = 7 sage: DP = DirichletGroup(p) sage: f", "size 5, Group of Dirichlet characters modulo 5 with values", "sage: DirichletGroup(12)._module Vector space of dimension 2 over Ring of", "zeta52^15 + zeta52^14 + zeta52^12 - zeta52^11 - zeta52^10 -", "R = self.base_ring() p = R.characteristic() if p == 0:", "character modulo 20 of conductor 1 mapping 11 |--> 1,", "0, 1, 0, -1] sage: e = DirichletGroup(20).gen(1) sage: e.values()", "with the base ring of ``self`` as its domain -", "[R(x) for x in self.values_on_gens()]) def _richcmp_(self, other, op): \"\"\"", "WithEqualityById from sage.structure.element import MultiplicativeGroupElement from sage.structure.gens_py import multiplicative_iterator from", "accordance with the above definition, but in contrast to the", "in zip(v, pari_orders)] return (G, v) def conrey_number(self): r\"\"\" Return", "conductor 5 mapping 2 |--> zeta4, Dirichlet character modulo 5", "zeta**int(a*e + b*e**(-1)) return g def kloosterman_sum_numerical(self, prec=53, a=1, b=0):", "\"\"\" g = [] ord = self.zeta_order() M = self._module", "`\\ZZ/N\\ZZ \\to R` obtained by sending those `x\\in\\ZZ/N\\ZZ` with `\\gcd(N,x)>1`", "it in a pickle element_key = '_DirichletCharacter__element' element = None", "e.galois_orbit() [Dirichlet character modulo 30 of conductor 5 mapping 11", "5 mapping 11 |--> 1, 17 |--> -1 \"\"\" G", "TypeError(\"base_ring (= %s) must be a ring\" % base_ring) #", "dividing (2, 16, 2), respectively sage: from sage.modular.dirichlet import DirichletCharacter", "_zeta_dlog(self): \"\"\" Return a dictionary that can be used to", "lmfdb_url = 'https://www.lmfdb.org/Character/Dirichlet/{}/{}' url = lmfdb_url.format(self.modulus(), self.conrey_number()) webbrowser.open(url) def galois_orbit(self,", "!= K((-1)**k): ber = K.zero() elif algorithm == \"recurrence\": #", "chi Dirichlet character modulo 24 of conductor 24 mapping 7", "# depends only on the factor of p**(r-1) on the", "|--> -zeta12^2 + 1] A non-example:: sage: chi = DirichletGroup(7,", "a primitive `m^{th}` root of unity. EXAMPLES:: sage: G =", "conductor 1 \"\"\" s = 'Dirichlet character modulo %s of", "self._zeta) s += str(self.base_ring()) return s @cached_method def decomposition(self): r\"\"\"", "... TypeError: Unable to coerce zeta4 to a rational \"\"\"", "% o for m in P._automorphisms()]) v = [P.element_class(P, m", "modulo 9 of conductor 9 mapping 2 |--> zeta6] sage:", "sage: loads(G.dumps()) == G True We compute a Dirichlet group", "all the Bernoulli # numbers up to k, which should", "self.element() exponents = [0] * len(orders) n = G.integers_mod().one() value", "(zeta6,), -zeta6 + 3) ((zeta6,), (zeta6 - 1,), 2*zeta6 +", "trivial_character def kronecker_character(d): \"\"\" Return the quadratic Dirichlet character (d/.)", "this case, `R` must be a domain (so `V` is", "\\hbox{ of conductor } 16 \\hbox{ mapping } 15 \\mapsto", "+= phi(self(c))*z return g @cached_method def is_even(self): r\"\"\" Return ``True``", "return all(x == one for x in self.values_on_gens()) def kernel(self):", "character modulo 20 of conductor 4 mapping 11 |--> -1,", "sage: e.gauss_sum(2) -2*zeta6 + 1 sage: norm(e.gauss_sum()) 3 :: sage:", "given by raising to a power, so the return value", "values on generators of `(\\ZZ/n\\ZZ)^*`. INPUT: - ``parent`` -- :class:`DirichletGroup`,", "0, 0, zeta4, 0, -1, 0, 1, 0, -zeta4, 0,", "phi = CC.coerce_map_from(K) elif number_field.is_CyclotomicField(K) or is_RationalField(K): phi = K.complex_embedding(prec)", "= 'Dirichlet character modulo %s of conductor %s' % (self.modulus(),", "element = None if element_key in state_dict: element = state_dict[element_key]", "ValueError: values (= (zeta16^4, -1, -1)) must have multiplicative orders", "G.1 Dirichlet character modulo 20 of conductor 5 mapping 11", "-1, 77 |--> 1 sage: e.conductor() 4 sage: f =", "sage: f = DirichletGroup(5,CyclotomicField(4)).0 sage: e*f Dirichlet character modulo 5", "the power is between 0 and the order of the", "of conductor 5 mapping 11 |--> 1, 17 |--> zeta4)", "def __pow__(self, n): \"\"\" Return self raised to the power", "return value is a list of integers. At present this", "u, v in zip(x, orders)): raise ValueError(\"values (= {} modulo", "mapping 11 |--> -1, 17 |--> -1], ..., [Dirichlet character", "a.lfunction(); L PARI L-function associated to Dirichlet character modulo 20", "DirichletGroup(20) sage: G.modulus() 20 \"\"\" return self._modulus def ngens(self): \"\"\"", "specified, then `V` is taken to be the cyclic subgroup", "None. INPUT: - ``v`` - (optional) list of elements of", "= [int(x.argument() / zeta_argument) for x in values_on_gens] else: dlog", "p == 0: Auts = [e for e in range(1,n)", "exactly divide the modulus of this character. EXAMPLES:: sage: G.<a,b>", "there will be a \"factor\" of `(\\ZZ/2\\ZZ)^*`, which is the", "1.7320508075...*I sage: e.gauss_sum_numerical(a=2, prec=100) 4.7331654313260708324703713917e-30 - 1.7320508075688772935274463415*I sage: G =", "last): ... TypeError: no coercion map from Rational Field to", "zeta = P.zeta() zeta_argument = zeta.argument() v = M([int(round(x.argument() /", "[[2]~, Vecsmall([2])], [[4], [[1, matrix(0,2)]], Mat(1), [3], [2], [0]], Mat(1)],", "DirichletGroup(5,QQ)([-1]) sage: eps1.conrey_number() == eps2.conrey_number() True \"\"\" G, v =", "-- one of the following: - tuple or list of", "AttributeError: self.__bernoulli = {} if k in self.__bernoulli: return self.__bernoulli[k]", "t = R.gen() # g(t) = t/(e^{Nt}-1) g = t/((N*t).exp(prec)", "is between 0 and the order of the generator minus", "a) def _coerce_map_from_(self, X): \"\"\" Decide whether there is a", "|--> 1, Dirichlet character modulo 5 of conductor 5 mapping", "conductor 4 mapping 3 |--> -1 sage: e.restrict(50) Traceback (most", "G True :: sage: G = DirichletGroup(19, GF(5)) sage: loads(G.dumps())", "(C) 2014 <NAME> <<EMAIL>> # # This program is free", "... TypeError: cannot convert 0 to an element of Group", "DirichletGroup(3, QQ).0^2 True \"\"\" return (isinstance(X, DirichletGroup_class) and self.modulus() ==", "rings.CyclotomicField(self.order()) else: return self try: return self.change_ring(K) except (TypeError, ValueError,", "= a * zeta a._set_multiplicative_order(zeta_order/gcd(zeta_order, i)) w.append(a) else: for i", "s += str(self.parent().unit_gens()[i]) + ' |--> ' + str(self.values_on_gens()[i]) return", "conductor 20 mapping 11 |--> -1, 17 |--> -1] Next", "We compute a Dirichlet group over a large prime field::", "[0, 1, 1]) \"\"\" G = pari.znstar(self.modulus(), 1) pari_orders =", "Field of order 4 and degree 2 sage: DirichletGroup(20).base_ring() Cyclotomic", "M): \"\"\" Returns the restriction of this character to a", "R, zeta=None, zeta_order=None): \"\"\" Return the base extension of ``self``", "d in self.decomposition()]) p = F[0][0] # When p is", "Dirichlet character. This includes Gauss sums, classical Kloosterman sums, Salié", "not implemented\") # The automorphisms in characteristic p are #", "phi.codomain() else: raise NotImplementedError(\"Gauss sums only currently implemented when the", "K = G.base_ring() if is_ComplexField(K): phi = lambda t :", "QQ).0^2 True \"\"\" return (isinstance(X, DirichletGroup_class) and self.modulus() == X.modulus()", "In this case, `R` must be a domain (so `V`", "{-1: 2, -zeta4: 3, zeta4: 1, 1: 0} \"\"\" return", "DirichletGroup(19, GF(p)); g Group of Dirichlet characters modulo 19 with", "|--> 1, 41 |--> -1, 37 |--> 1 \"\"\" e", "= DirichletGroup(1).list()[0] sage: chi.values() [1] sage: chi(1) 1 \"\"\" G", "modulo 9 of conductor 1 mapping 2 |--> 1] sage:", "order 4 generated by zeta4 in Cyclotomic Field of order", "Dirichlet characters modulo 5 with values in Finite Field of", "is computed, and an error is raised if such ``zeta``", "G = DirichletGroup(abs(D), rings.RationalField()) return G([kronecker(D,u) for u in G.unit_gens()])", "psi = chi.change_ring(f) sage: psi(2) -1.83697019872103e-16 - 1.00000000000000*I \"\"\" if", "not None: self.element.set_cache(element) class DirichletGroupFactory(UniqueFactory): r\"\"\" Construct a group of", "if any(u * v for u, v in zip(x, orders)):", "extension of ``self`` to ``R``. INPUT: - ``R`` -- either", "``self``. The result is a wrapper around a PARI L-function", "record character value on n result_list[n] = R_values[value] # iterate:", "zeta = R.zeta(d) break except ValueError: pass self.zeta_order.set_cache(d) return zeta", "|--> zeta12^3, 31 |--> zeta12^2 sage: e.order() 12 sage: loads(e.dumps())", "with power series # instead of calls to the Bernoulli", "`\\zeta` is a primitive `m` th root of unity. This", "near zero:: sage: v=e.kloosterman_sum_numerical() sage: v.real() < 1.0e15 True sage:", "None): raise ValueError(\"zeta and zeta_order must be None if base_ring", "-1, 17 |--> 1, Dirichlet character modulo 20 of conductor", "unit_gens(self): r\"\"\" Returns the minimal generators for the units of", "`V` is taken to be the group of roots of", "Field sage: G.order() 4 sage: G.base_ring() Rational Field The elements", ".. MATH:: (\\ZZ/N\\ZZ)^* \\to R^*, for some ring `R`, to", "conductor is the smallest p**r such that # Order(x) divides", "zeta=exp(2*pi*I/6), zeta_order=6) Group of Dirichlet characters modulo 7 with values", "a^2 Dirichlet character modulo 20 of conductor 1 mapping 11", "0, 1]], [0, 1, 1]) \"\"\" G = pari.znstar(self.modulus(), 1)", "Kloosterman sums, Salié sums, etc. The Kloosterman sum associated to", "element from ``state``. TESTS:: sage: e = DirichletGroup(16)([-1, 1]) sage:", "`(\\ZZ/N\\ZZ)^*`:: sage: DirichletGroup(20) Group of Dirichlet characters modulo 20 with", "'Dirichlet character modulo 20 of conductor 4 mapping 11 |-->", "then `V` is taken to be the group of roots", "a wrapper around a PARI L-function or around the ``lcalc``", "element of Group of Dirichlet characters modulo 13 with values", "zeta g += phi(c)*z return g def jacobi_sum(self, char, check=True):", "G = DirichletGroup(13) sage: e = G.0 sage: e.is_even() False", "EXAMPLES:: sage: e = DirichletGroup(13).0 sage: f = ~e sage:", "(TypeError, ValueError, ArithmeticError): pass if isinstance(x, list): # list of", "ValueError(\"d must be positive\") G = DirichletGroup(d, rings.RationalField()) return G([kronecker(u.lift(),d)", "associated to this Dirichlet character. The Gauss sum associated to", "to be called directly (use the factory function ``DirichletGroup``). The", "G.element_class(G, x, check=False) def _mul_(self, other): \"\"\" Return the product", "2*E(5)^4 sage: G = DirichletGroup(12, QQbar) sage: e = G.gens()[0]", "the modulus. EXAMPLES:: sage: e = DirichletGroup(16)([-1, 1]) sage: e.values_on_gens", "sage.libs.pari import pari from sage.categories.map import Map from sage.rings.rational_field import", "loads(dumps(G)) is G True \"\"\" self._set_element_constructor() if '_zeta_order' in state:", "reuse _zeta_order if we know that it stays the #", "an integral domain, an error will be raised if only", "is .. MATH:: g_a(\\chi) = \\sum_{r \\in \\ZZ/m\\ZZ} \\chi(r)\\,\\zeta^{ar}, where", "[Dirichlet character modulo 5 of conductor 1 mapping 2 |-->", "base_ring not specified\") e = rings.IntegerModRing(modulus).unit_group_exponent() base_ring = rings.CyclotomicField(e) if", "= n * self.element() else: x = tuple(z**n for z", "= base_ring(zeta) if zeta_order is None: zeta_order = zeta.multiplicative_order() elif", "G.element_class(G, x, check=False) def _repr_short_(self): r\"\"\" A short string representation", "v) where G is `(\\ZZ / N \\ZZ)^*` where `N`", "a canonical coercion:: sage: e = DirichletGroup(5, QQ).0 sage: f", "called OUTPUT: Let `\\varepsilon` be a (not necessarily primitive) character", "from sage.rings.ring import is_Ring from sage.misc.functional import round from sage.misc.cachefunc", "3 |--> -1, Dirichlet character modulo 5 of conductor 5", "DirichletGroup(12, QQbar) sage: e = G.gens()[0] sage: e.kloosterman_sum(5,11) Traceback (most", "5 ] \"\"\" R = self.base_ring() return Sequence([DirichletGroup(p**r,R) for p,", "from sage.categories.groups import Groups category = Groups().Commutative() if base_ring.is_integral_domain() or", "= G([-1]) sage: e.gauss_sum(1) 2*zeta6 - 1 sage: e.gauss_sum(2) -2*zeta6", "UniqueFactory from sage.structure.richcmp import richcmp from sage.arith.all import (binomial, bernoulli,", "% self.modulus() != 0: raise ArithmeticError(\"M(=%s) must be a multiple", "new base ring is not an integral domain:: sage: f", "range(1,N+1)]) * factorial(k) else: raise ValueError(\"algorithm = '%s' unknown\"%algorithm) if", "conversion of the character to Pari. OUTPUT: pair (G, v)", "must divide modulus\") a = [] for u in self.unit_gens():", "look at a non-prime modulus:: sage: N = 9 sage:", "only returns representatives for the orbits. - ``sort`` - (optional:", "`(\\ZZ/N\\ZZ)^*`) - ``zeta`` -- (optional) root of unity in ``base_ring``", "base_ring.is_integral_domain(): raise ValueError(\"base ring (= %s) must be an integral", "A related bug (see :trac:`18086`):: sage: K.<a,b>=NumberField([x^2 + 1, x^2", "of the License, or # (at your option) any later", "sage: G.<a,b> = DirichletGroup(20) sage: a.kernel() [1, 9, 13, 17]", "Dirichlet groups of different moduli, characters of different moduli compare", "1 \"\"\" s = r'\\hbox{Dirichlet character modulo } %s \\hbox{", "mod 20 with values in the rational numbers:: sage: G", "printed correctly (see :trac:`17338`):: sage: latex(DirichletGroup(1)[0]) \\hbox{Dirichlet character modulo }", "is b.values_on_gens() True \"\"\" # This method exists solely because", "= DirichletGroup(5, K) sage: D.change_ring(f) Group of Dirichlet characters modulo", "of this character to a Dirichlet character modulo the multiple", "the orders of the respective generators of `(\\ZZ/N\\ZZ)^*`. OUTPUT: The", "of -1 directly using dlog and a large power of", "conductor 4 mapping 51 |--> -1, 77 |--> 1 sage:", "represent Dirichlet characters. TESTS:: sage: DirichletGroup(12)._module Vector space of dimension", "ring `R`. \"\"\" Element = DirichletCharacter def __init__(self, base_ring, modulus,", "self.values() S = lambda n: sum(v[r] * r**n for r", "zero.__copy__() z[i] = ord//gcd(ord, orders[i]) g.append(self.element_class(self, z, check=False)) return tuple(g)", "and a large power of the image root of unity.", "found. - If ``zeta`` is specified, then `V` is taken", "numbers up to k, which should be done with power", "u in self.unit_gens(): v = u.lift() # have to do", "chi._DirichletCharacter__eval_at_minus_one() -1 \"\"\" D = self.decomposition() val = self.base_ring()(1) for", "|--> -a^2] We can also restrict the order of the", "Traceback (most recent call last): ... IndexError: n(=2) must be", "p^(r-1), # where p^r = 1 (mod n), so r", "DirichletGroupFactory(\"DirichletGroup\") def is_DirichletGroup(x): \"\"\" Returns True if x is a", "ValueError(\"conductor(=%s) must divide M(=%s)\"%(self.conductor(),M)) H = DirichletGroup(M, self.base_ring()) return H(self)", "chi.minimize_base_ring() n = lcm(m, G.zeta_order()) L = rings.CyclotomicField(n) zeta =", "(n // m) else: raise NotImplementedError(\"Gauss sums only currently implemented", "[[8, 8, 3], [[1, matrix(0,2)], [1, matrix(0,2)], [2, Mat([2, 1])]],", "`\\QQ(\\zeta_n)`:: sage: G = DirichletGroup(20) sage: G.1 Dirichlet character modulo", "modulo 5 of conductor 5 mapping 2 |--> -zeta4 AUTHORS:", "4 and degree 2 sage: DirichletGroup(20).base_ring() Cyclotomic Field of order", "recent call last): ... ValueError: modulus should be positive \"\"\"", "if True, cache answers - ``**opts`` -- optional arguments; not", "Field of order 4 and degree 2 sage: parent(DirichletGroup(60, integral=True).gens()[2].values_on_gens()[2])", "sage: DirichletGroup(20) Group of Dirichlet characters modulo 20 with values", "36 \"\"\" ord = rings.Integer(1) for g in self.gens(): ord", "as returned by :meth:`sage.rings.finite_rings.integer_mod_ring.IntegerModRing_generic.unit_gens`. - vector over `\\ZZ/e\\ZZ`, where `e`", "\"\"\" return self._list_from_iterator() def modulus(self): \"\"\" Returns the modulus of", "- zeta156^15 - 2*zeta156^14 - zeta156^10 + zeta156^8 + zeta156^7", "[] seen_so_far = set([]) for x in v: z =", "K = rings.CyclotomicField(m) return self.change_ring(K) def minimize_base_ring(self): r\"\"\" Return a", "DirichletGroup(20) sage: a.conductor() 4 sage: b.conductor() 5 sage: (a*b).conductor() 20", "53 bits of precision If the base ring is not", "e Dirichlet character modulo 60 of conductor 60 mapping 31", "conductor } 1 sage: latex(DirichletGroup(2)[0]) \\hbox{Dirichlet character modulo } 2", "integral domain if only zeta_order is specified\" % base_ring) zeta_order", "_richcmp_(self, other, op): \"\"\" Compare ``self`` to ``other``. .. NOTE::", "self p = R.characteristic() if p: K = rings.IntegerModRing(p) elif", "p**(r-1)*(p-1). # For a given r, whether or not the", "\"\"\" if zeta is None and self._zeta is not None:", "if ``zeta`` is specified:: sage: G.gens() Traceback (most recent call", "generators of `(\\ZZ/N\\ZZ)^*`, where `N` is the modulus. EXAMPLES:: sage:", "above divisibility holds # depends only on the factor of", "ring of ``self``, or a ring homomorphism with the base", "eps.minimize_base_ring() == eps True A related bug (see :trac:`18086`):: sage:", "= DirichletGroup(35) sage: x = G.gens() sage: e = x[0]*x[1]^2;", "DirichletGroup(20) sage: a.is_trivial() False sage: (a^2).is_trivial() True \"\"\" if self.element.is_in_cache():", "sage: G.<a,b> = DirichletGroup(20) sage: a Dirichlet character modulo 20", "DirichletGroup(2)[0] Dirichlet character modulo 2 of conductor 1 \"\"\" s", "zeta_order is None: # We reuse _zeta_order if we know", "g = 0 m = G.modulus() zeta = CC.zeta(m) for", "[0, 1, 1, 1, 1, 1, 1, 0, 1, 1,", "group of roots of unity is not necessarily cyclic), some", "((1,), (1,), 5) ((1,), (zeta6,), -1) ((1,), (zeta6 - 1,),", "works if the new base ring is not an integral", "1, inclusive. EXAMPLES:: sage: DirichletGroup(37).random_element() Dirichlet character modulo 37 of", "the base ring has characteristic 0 or a prime. EXAMPLES::", "# the Free Software Foundation, either version 2 of the", "of integers modulo 15) must be an integral domain if", "of unity of order dividing ``zeta_order`` in `R`. In this", "the elements must divide the orders of the respective generators", "elif (euler_phi(e.parent().modulus()) / e.order()) % 2: val *= -1 return", "{}) must have additive orders dividing {}, respectively\" .format(x, parent.zeta_order(),", "of precision - ``a`` -- integer, as for :meth:`gauss_sum`. The", "= self.parent() if is_ComplexField(P.base_ring()): zeta = P.zeta() zeta_argument = zeta.argument()", "ValueError(\"base ring (= %s) must be an integral domain if", "2,) TESTS: Dirichlet groups are cached, creating two groups with", "the base ring as possible. .. note:: This function is", "!= 1: v += self.modulus() a.append(R(x(v))) return self.element_class(self, a) def", "in self.__bernoulli: return self.__bernoulli[k] N = self.modulus() K = self.base_ring()", "characters modulo 17 with values in Ring of integers modulo", "self.element.set_cache(element) class DirichletGroupFactory(UniqueFactory): r\"\"\" Construct a group of Dirichlet characters", "isinstance(x, list): # list of values on each unit generator", "'%s' unknown\"%algorithm) if cache: self.__bernoulli[k] = ber return ber def", "mapping 11 |--> 1, 17 |--> zeta4) \"\"\" g =", "``'recurrence'`` algorithm computes generalized Bernoulli numbers via classical Bernoulli numbers", "= DirichletGroup(10) sage: TestSuite(G[1]).run() It is checked that the orders", "of conductor 1 mapping 2 |--> 1 sage: G([-1]) Dirichlet", "Currently the kernel is returned as a list. This may", "= DirichletGroup(17, Integers(15), zeta=7); G Group of Dirichlet characters modulo", "of elements of self. This is the same as len(self).", "-1 sage: e.restrict(50) Traceback (most recent call last): ... ValueError:", "g = D(1) sage: g.jacobi_sum(g) 3 We consider a sum", "e.values_on_gens () (-1, 1) .. NOTE:: The constructor of :class:`DirichletCharacter`", "EXAMPLES:: sage: e = DirichletGroup(100).1 sage: e.order() # same as", "if M%self.conductor() != 0: raise ValueError(\"conductor(=%s) must divide M(=%s)\"%(self.conductor(),M)) H", "creation of elements:: sage: G = DirichletGroup(5, Zmod(15)); G Group", "1 \"\"\" G = self.parent() if G.zeta.is_in_cache(): x = -self.element()", "this character. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.conductor() 4", "v = self.list() else: if check: v = [self(x) for", "Pari. OUTPUT: pair (G, v) where G is `(\\ZZ /", "without specifying a root of unity:: sage: DirichletGroup(5, K, zeta=-1,", "%s\"%(n,len(g)-1)) return g[n] @cached_method def gens(self): \"\"\" Returns generators of", "g.parent().zeta() 14 \"\"\" if not (isinstance(R, Map) or R.has_coerce_map_from(self.base_ring())): raise", "if check: if self.parent() != char.parent(): raise NotImplementedError(\"Characters must be", "\\ZZ)^*` where `N` is the modulus EXAMPLES:: sage: chi4 =", "G = DirichletGroup(9) sage: loads(dumps(G)) is G True \"\"\" self._set_element_constructor()", "K.<i> = QuadraticField(-1) sage: f = K.complex_embeddings()[0] sage: D =", "\"\"\" return self._integers.unit_gens() @cached_method def zeta(self): \"\"\" Return the chosen", "a tuple of the values of ``self`` on the standard", "suitable cyclotomic field; see also :meth:`.kloosterman_sum_numerical`, which gives an inexact", "+= self(c) * zeta**int(a*e + b*e**(-1)) return g def kloosterman_sum_numerical(self,", "2 |--> -1 \"\"\" G = self.parent() if G.zeta.is_in_cache(): x", "[0, 1, 36, 0, 1, 36, 0, 0, 36, 0,", "sage: chi._repr_short_() '[-1, 1, 1]' \"\"\" return str(list(self.values_on_gens())) def _repr_(self):", "-1, -zeta4] \"\"\" R = self.base_ring() a = R.one() w", "M(=%s)\"%(self.conductor(),M)) H = DirichletGroup(M, self.base_ring()) return H(self) @cached_method def values(self):", "'Dirichlet character modulo %s of conductor %s' % (self.modulus(), self.conductor())", "each integer between 0 and the modulus. EXAMPLES:: sage: e", "Return the underlying `\\ZZ/n\\ZZ`-module vector of exponents. .. warning:: Please", "2 generated by -1 in Number Field in a with", "with 53 bits of precision \"\"\" if zeta is None", "be None). zeta_order = self._zeta_order # Map zeta to the", "e.g., unit gens mod 11 are not units mod 22.", "EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: L = a.lfunction(); L", "change when there are immutable vectors (and below) if e", "sage: DirichletGroup(20).order() 8 sage: DirichletGroup(37).order() 36 \"\"\" ord = rings.Integer(1)", "coercion map from `X`. There is conversion between Dirichlet groups", "only implemented if the base ring has characteristic 0 or", "u in G.unit_gens()]) sage: e.kloosterman_sum(7,17) -2*zeta20^6 + 2*zeta20^4 + 4", "G.gen(2) Traceback (most recent call last): ... IndexError: n(=2) must", "holds equals Valuation(Order(x),p)+1. cond = p**(valuation(self.order(),p) + 1) if p", "of this character. EXAMPLES:: sage: e = DirichletGroup(100, QQ).0 sage:", "|--> 1, Dirichlet character modulo 20 of conductor 4 mapping", "domain if only zeta_order is specified sage: G = DirichletGroup(17,", "of the trivial Dirichlet character modulo 1, this function returns", "= val_on_gen.base_ring().zero() while True: # record character value on n", "sage: G.<a,b> = DirichletGroup(20) sage: a^2 Dirichlet character modulo 20", "e = G.1 sage: e.galois_orbit() [Dirichlet character modulo 30 of", "A non-example:: sage: chi = DirichletGroup(7, Integers(9), zeta = Integers(9)(2)).0", "sum(v[r] * r**n for r in range(1, N)) ber =", "= m % N if self.values.is_in_cache() or m != N", "== 0: if e.modulus() % 4 == 0: val *=", "1 sage: norm(e.gauss_sum()) 3 :: sage: G = DirichletGroup(13) sage:", "EXAMPLES:: sage: D = DirichletGroup(13) sage: e = D.0 sage:", "parent fails in both cases:: sage: d[0]*d[1] == c Traceback", "sage: eps1.conrey_number() == eps2.conrey_number() True \"\"\" G, v = self._pari_conversion()", "the formula in [Coh2007]_, Proposition 9.4.5; this is usually optimal.", "sage: DirichletGroup(13) == DirichletGroup(13) True sage: DirichletGroup(13) == DirichletGroup(13, QQ)", "if and only if `\\varepsilon(-1) = -1`. EXAMPLES:: sage: G", "self and other. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a", "sage: DirichletGroup(1)[0].bernoulli(1) 1/2 \"\"\" if cache: try: self.__bernoulli except AttributeError:", "1: return self.values()[m] else: return self.__eval_at_minus_one() def change_ring(self, R): \"\"\"", "``other``. .. NOTE:: Since there is no coercion between Dirichlet", "is None: algorithm = 'pari' if algorithm == 'pari': from", "cr=True) if sort: G.sort() return G def gen(self, n=0): \"\"\"", "- ``prec`` -- precision (default 53) - ``algorithm`` -- 'pari'", "1,), -zeta6 - 2) ((-1,), (-1,), 1) ((-1,), (-zeta6,), -2*zeta6", "= DirichletGroup(100, QQ).0 sage: e.modulus() 100 sage: e.conductor() 4 \"\"\"", "for c in self.values()[1:]: z *= zeta g += phi(c)*z", "bug in comparison of Dirichlet characters. It was checking that", "list of the values of this character on each integer", "See https://www.lmfdb.org EXAMPLES:: sage: E = DirichletGroup(4).gen() sage: E.lmfdb_page() #", "group of order 4 generated by 2 in Ring of", "zeta is None and self._zeta is not None: # A", "Return the number of elements of this Dirichlet group. This", "sage: DP = DirichletGroup(p) sage: f = DP.0 sage: e.jacobi_sum(f)", "as lists giving the values of the character on the", "\"\"\" if self.parent().zeta.is_in_cache(): return self.element().additive_order() return lcm([z.multiplicative_order() for z in", "Dirichlet characters modulo 5 with values in Ring of integers", "5 mapping 11 |--> 1, 17 |--> zeta4) sage: G.unit_gens()", "fixes - <NAME> (2014-03-06): use UniqueFactory to cache DirichletGroups \"\"\"", "+ zeta156^45 + zeta156^42 + zeta156^41 + 2*zeta156^40 + zeta156^37", "(-zeta6 + 1,), -zeta6 - 2) ((-1,), (-1,), 1) ((-1,),", "17 with values in Ring of integers modulo 15 sage:", "G.gens()[0] sage: e.kloosterman_sum(5,11) Traceback (most recent call last): ... NotImplementedError:", "== c True Conductors that are divisible by various powers", "`m^{th}` root of unity. EXAMPLES:: sage: G = DirichletGroup(3) sage:", "set([m % o for m in P._automorphisms()]) v = [P.element_class(P,", "- 1, zeta10, zeta10^3 - zeta10^2 + zeta10 - 1,", "r\"\"\" Return the kernel of this character. OUTPUT: Currently the", "', ' s += str(self.parent().unit_gens()[i]) + ' |--> ' +", "-1] sage: e = DirichletGroup(21, base_ring=GF(37)).gen(0) ; e.values() [0, 1,", "of order 4 and degree 2 We create the group", "G def gen(self, n=0): \"\"\" Return the n-th generator of", "characters. TESTS:: sage: DirichletGroup(12)._module Vector space of dimension 2 over", "22 |--> zeta12^3, 31 |--> zeta12^2 sage: e.order() 12 sage:", "None) sage: l = DirichletGroup.create_key(2, base_ring=CC); l (Complex Field with", "1, 0, -zeta4, 0, 0, 0, zeta4, 0, -1, 0,", "e(-1) -1 sage: [e.is_even() for e in G] [True, False,", "`R` is a map (:trac:`18072`):: sage: K.<i> = QuadraticField(-1) sage:", "in Cyclotomic Field of order 12 and degree 4 sage:", "sage: b^2 Dirichlet character modulo 20 of conductor 5 mapping", "mapping 11 |--> -1, 17 |--> 1 sage: G.gen(1) Dirichlet", "0; 0, 0, 1], [7, 13, 17], [2, 2, 2],", "from sage.structure.richcmp import richcmp from sage.arith.all import (binomial, bernoulli, kronecker,", "d0. EXAMPLES:: sage: kronecker_character_upside_down(97*389*997^2) Dirichlet character modulo 37506941597 of conductor", "11 |--> 1, 17 |--> zeta4 sage: G.gen(2) Traceback (most", "1, 1, 1, 1, 1, 0, 1, 1, 1, 1,", "not necessarily cyclic), some operations still work, such as creation", "Dirichlet characters modulo 20 with values in Cyclotomic Field of", "ber = K.zero() elif algorithm == \"recurrence\": # The following", "of this character on each integer between 0 and the", "@cached_method(do_pickle=True) def element(self): r\"\"\" Return the underlying `\\ZZ/n\\ZZ`-module vector of", "sage: e(37) zeta4 sage: e(31*37) -zeta4 sage: parent(e(31*37)) Cyclotomic Field", "Free Software Foundation, either version 2 of the License, or", "of self. EXAMPLES:: sage: G = DirichletGroup(20) sage: G.modulus() 20", "a with defining polynomial x^4 + 1 sage: G.list() [Dirichlet", "Dirichlet character modulo 7 of conductor 7 mapping 3 |-->", "-zeta6 - 2) ((-1,), (-1,), 1) ((-1,), (-zeta6,), -2*zeta6 +", "Dirichlet character. \"\"\" def __init__(self, parent, x, check=True): r\"\"\" Create", "`n` is the exponent of `(\\ZZ/N\\ZZ)^*`) - ``zeta`` -- (optional)", "polynomials over rings with composite characteristic is not implemented sage:", "\"\"\" s = 'Dirichlet character modulo %s of conductor %s'", "= DirichletGroup(21).gen(0) ; e.values() [0, 1, -1, 0, 1, -1,", "degree 2 We can't multiply directly, since coercion of one", "1, 0, 1, 0, 1, 0, 1, 0, 0, 0,", "del state_dict[values_on_gens_key] # element() used an explicit cache __element in", "immutable vectors are not implemented yet. EXAMPLES:: sage: G.<a,b> =", "an error is raised if such ``zeta`` cannot be found.", "+= ', ' s += str(self.parent().unit_gens()[i]) + ' |--> '", "unity for ``parent``. In both cases, the orders of the", "13, 17] sage: b.kernel() [1, 11] \"\"\" one = self.base_ring().one()", "exist:: sage: G.base_extend(ZZ) Traceback (most recent call last): ... TypeError:", "1, 41 |--> -1, 37 |--> 1 \"\"\" e =", "-1, 17 |--> -1 sage: chi._pari_conversion() ([[24, [0]], [8, [2,", "computed by multiplying a random power of each generator together,", "- If ``zeta`` is specified, then `V` is taken to", "zeta, zeta_order = key return DirichletGroup_class(base_ring, modulus, zeta, zeta_order) DirichletGroup", "1]], [0, 1, 1]) \"\"\" G = pari.znstar(self.modulus(), 1) pari_orders", "True if x is a Dirichlet group. EXAMPLES:: sage: from", "13 of conductor 13 mapping 2 |--> zeta12^2, Dirichlet character", "values in Cyclotomic Field of order 6 and degree 2", "-zeta4, Dirichlet character modulo 30 of conductor 5 mapping 11", "- ``a`` -- integer, as for :meth:`.kloosterman_sum` - ``b`` --", "(-1,), 2*zeta6 + 1) ((zeta6 - 1,), (-zeta6,), -1) ((zeta6", "of ``G``:: sage: G.gens() (Dirichlet character modulo 20 of conductor", "# If either zeta or zeta_order is given, compute the", ":class:`DirichletCharacter`. \"\"\" P = self.parent() M = P._module if is_ComplexField(P.base_ring()):", "else: return self.__eval_at_minus_one() def change_ring(self, R): \"\"\" Return the base", "k == 1 else K(bernoulli(k)) elif self(-1) != K((-1)**k): ber", "13 of conductor 13 mapping 2 |--> -zeta12^3 + zeta12,", "returns an element of a suitable cyclotomic field; see also", "phi(self(c))*z return g @cached_method def is_even(self): r\"\"\" Return ``True`` if", "\"\"\" Return a dictionary that can be used to compute", "mod 4, there will be a \"factor\" of `(\\ZZ/2\\ZZ)^*`, which", "self, or in v if v is not None. INPUT:", "m): \"\"\" Return the value of this character at the", "in contrast to the value B_1 = -1/2. ber =", "@property def _module(self): \"\"\" Return the free module used to", "[Dirichlet character modulo 4 of conductor 4 mapping 3 |-->", "if x is a Dirichlet group. EXAMPLES:: sage: from sage.modular.dirichlet", "compared to # the other algorithm below. That said, I'm", "zeta4 sage: DirichletGroup(60,QQ).zeta() -1 sage: DirichletGroup(60, GF(25,'a')).zeta() 2 \"\"\" zeta", "G, x = DirichletGroup(35).objgens() sage: e = x[0]*x[1]; e Dirichlet", "``DirichletGroup`` factory ensures that either both ``zeta`` and ``zeta_order`` are", "sage: G.<e> = DirichletGroup(13) sage: G Group of Dirichlet characters", "[2, [2], [3]], [[2]~, Vecsmall([2])], [[4], [[1, matrix(0,2)]], Mat(1), [3],", "1: # By definition, the first Bernoulli number of the", "= rings.Mod(c, m) g += self(c) * zeta**int(a*e + b*e**(-1))", "classical Bernoulli number. Some authors use an alternative definition giving", "is specified sage: G = DirichletGroup(17, Integers(15), zeta=7); G Group", "for i in range(self.ngens()): g = self.gen(i) n = random.randrange(g.order())", "this Dirichlet group is finite too. # In particular, it", "a print representation of this group, which can be renamed.", "return self.parent().base_ring() def bar(self): \"\"\" Return the complex conjugate of", "53 bits of precision, 2, None, None) sage: k ==", "state: state['_zeta_order'] = rings.Integer(state['_zeta_order']) super(DirichletGroup_class, self).__setstate__(state) @property def _module(self): \"\"\"", "Returns the extension of this character to a Dirichlet character", "an element of ``base_ring`` and that ``zeta_order`` is an element", "e.gauss_sum_numerical(a=2) -...e-15 - 1.7320508075...*I sage: e.gauss_sum_numerical(a=2, prec=100) 4.7331654313260708324703713917e-30 - 1.7320508075688772935274463415*I", "modulus of `\\chi` and `\\zeta` is a primitive `m^{th}` root", "|--> -zeta30^7 + zeta30^5 + zeta30^4 + zeta30^3 - zeta30", "4 and degree 2 sage: parent(DirichletGroup(60, integral=True).gens()[2].values_on_gens()[2]) Gaussian Integers in", "sage: DirichletGroup(20,GF(3)).exponent() 2 sage: DirichletGroup(20,GF(2)).exponent() 1 sage: DirichletGroup(37).exponent() 36 \"\"\"", "is None): raise ValueError(\"zeta and zeta_order must be None if", "\\psi) = \\sum_{a \\in \\ZZ / N\\ZZ} \\chi(a) \\psi(1-a) where", "zeta_order = rings.Integer(zeta_order) zeta = base_ring.zeta(zeta_order) return (base_ring, modulus, zeta,", "DirichletGroup(16)([-1, 1]) sage: e.values_on_gens () (-1, 1) .. NOTE:: The", "with values in the group of order 6 generated by", "module -- # see modsym/manin_symbols.py. G = self.parent() return G.element_class(G,", "\\ge 3`:: sage: (DirichletGroup(18).0).decomposition() [Dirichlet character modulo 2 of conductor", "if k in self.__bernoulli: return self.__bernoulli[k] N = self.modulus() K", "prec=prec) Z.rename('PARI L-function associated to %s' % self) return Z", "order 12 and degree 4 sage: (e^2).minimize_base_ring().base_ring() Cyclotomic Field of", "base ring of ``self`` as its domain EXAMPLES:: sage: G", "* zeta**(a+b) except TypeError: raise NotImplementedError('Kloosterman sums not implemented '", "3 |--> -1 sage: e.restrict(50) Traceback (most recent call last):", "representations of modular forms EXAMPLES:: sage: chi = DirichletGroup(24).0 sage:", "as rings import sage.rings.number_field.number_field as number_field from sage.libs.pari import pari", "(want {})\".format(x, len(orders))) if free_module_element.is_FreeModuleElement(x): x = parent._module(x) if any(u", "sage: D = DirichletGroup(13) sage: e = D.0 sage: f", "ring is an integral domain \"\"\" if v is None:", "print_function import sage.categories.all as cat from sage.misc.all import prod import", "36, 0, 1, 36] sage: e = DirichletGroup(21, base_ring=GF(3)).gen(0) ;", "-- integer, as for :meth:`gauss_sum`. The Gauss sum associated to", "if the base ring has characteristic 0 or a prime.", "sage: e.gauss_sum_numerical() -3.07497205... + 1.8826966926...*I sage: f.gauss_sum_numerical() -3.07497205... + 1.8826966926...*I", "sage: G3 = DirichletGroup(31, CyclotomicField(3)) sage: G5 = DirichletGroup(31, CyclotomicField(5))", "modulo 9 of conductor 1 mapping 2 |--> 1] \"\"\"", "(2006-05-21): added examples of everything; fix a *lot* of tiny", "this character to a Dirichlet character modulo the divisor M", "Cyclotomic Field of order 4 and degree 2 sage: (e^12).minimize_base_ring().base_ring()", "`R`. \"\"\" Element = DirichletCharacter def __init__(self, base_ring, modulus, zeta,", "sage.rings.rational_field import is_RationalField from sage.rings.complex_mpfr import is_ComplexField from sage.rings.qqbar import", "self. EXAMPLES:: sage: G = DirichletGroup(20) sage: G.gen(0) Dirichlet character", "trivial character of the given modulus, with values in the", "String representation of self. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage:", "of conductor 5 mapping 2 |--> -1, Dirichlet character modulo", "sage: DirichletGroup(20,GF(2)).exponent() 1 sage: DirichletGroup(37).exponent() 36 \"\"\" return self.zeta_order() @cached_method", "2*zeta6 - 1 sage: e.gauss_sum(2) -2*zeta6 + 1 sage: norm(e.gauss_sum())", "sage: e.order() 12 sage: loads(e.dumps()) == e True TESTS:: sage:", "character, i.e., has order 1. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20)", "DirichletGroup(13) == DirichletGroup(13) True sage: DirichletGroup(13) == DirichletGroup(13, QQ) False", "the value group of this Dirichlet group. TESTS:: sage: DirichletGroup(5)._zeta_dlog", "call last): ... ValueError: values (= (4, 8, 8) modulo", "|--> -1], ..., [Dirichlet character modulo 20 of conductor 1", "QQ, zeta=-1, zeta_order=2)([-1]) sage: a * b # indirect doctest", "of Dirichlet characters modulo 60 with values in Gaussian Integers", "G.gen() values_on_gens = (self(x) for x in pari_gens) # now", "the given base ring. EXAMPLES:: sage: t = trivial_character(7) sage:", "r\"\"\" Return ``True`` if and only if `\\varepsilon(-1) = -1`.", "coercion:: sage: e = DirichletGroup(5, QQ).0 sage: f = DirichletGroup(5,CyclotomicField(4)).0", "sage: G = DirichletGroup(20) sage: e = G([1 for u", "+ zeta12, Dirichlet character modulo 13 of conductor 13 mapping", "r\"\"\" A short string representation of self, often used in", "= G.gens()[2].values_on_gens()[2] ; val zeta4 sage: parent(val) Gaussian Integers in", "that trivial sums are being calculated correctly:: sage: N =", "a.conductor() 4 sage: b.conductor() 5 sage: (a*b).conductor() 20 TESTS:: sage:", "``cache`` -- if True, cache answers - ``**opts`` -- optional", "these Dirichlet characters (i.e., J(self,char)). This is defined as ..", "k (Rational Field, 2, None, None) sage: l = DirichletGroup.create_key(2,", ":trac:`19060` is fixed:: sage: K.<z> = CyclotomicField(8) sage: G =", "= self.modulus() m = m % N if self.values.is_in_cache() or", "the factor of eps at 4 is nontrivial or `p", "+= r' \\hbox{ mapping } ' for i in range(r):", "i = 0 while True: try: exponents[i] += 1 except", "None if element_key in state_dict: element = state_dict[element_key] del state_dict[element_key]", "base ring is not an integral domain, an error will", "= [0] * len(orders) n = G.integers_mod().one() value = val_on_gen.base_ring().zero()", "the key:: sage: k = DirichletGroup.create_key(2, base_ring=QQ); k (Rational Field,", "= ber return ber def lfunction(self, prec=53, algorithm='pari'): \"\"\" Return", "= DirichletGroup.create_object(None, l); H Group of Dirichlet characters modulo 2", "The Dirichlet character defined by `x` (type :class:`DirichletCharacter`). EXAMPLES:: sage:", "a rational \"\"\" R = self.base_ring() try: if x ==", "of conductor 1 sage: DirichletGroup(2)[0] Dirichlet character modulo 2 of", "p == 2 and F[0][1] > 2 and self.values_on_gens()[1].multiplicative_order() !=", "# more efficiently. v = self.values() S = lambda n:", "generated by 2 in Ring of integers modulo 15 sage:", "of eps at 4 is nontrivial or `p > 2`", "Gauss sum associated to `\\chi` is .. MATH:: g_a(\\chi) =", "[0, 1, 0, -1, 0, 0, 0, -1, 0, 1,", "- ``reps_only`` - (optional: default False) if True only returns", "unity was explicitly given; we use it over the #", "G = DirichletGroup(13, CC) sage: e = G.0 sage: e.is_even()", "51 |--> -1, 77 |--> 1 sage: e.conductor() 4 sage:", "values in Finite Field of size 5 ] \"\"\" R", "is of type DirichletCharacter. EXAMPLES:: sage: from sage.modular.dirichlet import is_DirichletCharacter", "H(self) def _pari_conversion(self): r\"\"\" Prepare data for the conversion of", "range(1, zeta_order): a = a * zeta a._set_multiplicative_order(zeta_order/gcd(zeta_order, i)) w.append(a)", "conductor 13 mapping 2 |--> -1 sage: G([K.0]) Dirichlet character", "- zeta156^22 + zeta156^21 + zeta156^20 - zeta156^19 + zeta156^18", "\\hbox{Dirichlet character modulo } 2 \\hbox{ of conductor } 1", "sage: DirichletGroup(5, K, zeta=-1, zeta_order=2) Group of Dirichlet characters modulo", "= DirichletGroup(7, base_ring=Integers(9), zeta=2) # indirect doctest sage: TestSuite(G).run() sage:", "4 and degree 2, Group of Dirichlet characters modulo 5", "of type DirichletCharacter. EXAMPLES:: sage: from sage.modular.dirichlet import is_DirichletCharacter sage:", "number_field from sage.libs.pari import pari from sage.categories.map import Map from", "9 mapping 2 |--> zeta6] sage: (DirichletGroup(36).0).decomposition() [Dirichlet character modulo", "trivial_character(7, Integers(3))(1).parent() Ring of integers modulo 3 \"\"\" return DirichletGroup(N,", "for vi, oi in zip(v, pari_orders)] return (G, v) def", "precision - ``a`` -- integer, as for :meth:`gauss_sum`. The Gauss", "15 sage: chi = G([13]); chi Dirichlet character modulo 5", "return G def gen(self, n=0): \"\"\" Return the n-th generator", "i != 0: s += ', ' s += str(self.parent().unit_gens()[i])", "G = DirichletGroup(30); e = G.1 sage: e.galois_orbit() [Dirichlet character", "sage: e.minimize_base_ring().base_ring() Cyclotomic Field of order 12 and degree 4", "m) g += self(c) * zeta**int(a*e + b*e**(-1)) return g", "modulo `N`. INPUT: - ``N`` -- positive integer - ``base_ring``", "CC) sage: a.is_primitive() False sage: b.is_primitive() False sage: (a*b).is_primitive() True", "inclusive. EXAMPLES:: sage: DirichletGroup(37).random_element() Dirichlet character modulo 37 of conductor", "sage: G = DirichletGroup(20, UniversalCyclotomicField()) sage: e = G([1 for", "= -1` if and only if `p = 2` and", "2 |--> zeta12^2, Dirichlet character modulo 13 of conductor 13", "same object:: sage: DirichletGroup(60) is DirichletGroup(60) True \"\"\" def create_key(self,", "e Dirichlet character modulo 13 of conductor 13 mapping 2", "EXAMPLES:: sage: t = trivial_character(7) sage: [t(x) for x in", "sums, etc. The Kloosterman sum associated to `\\chi` and the", "= self.parent().change_ring(R) return G.element_class(G, [R(x) for x in self.values_on_gens()]) def", "last): ... TypeError: Galois orbits only defined if base ring", "|--> -1 \"\"\" G = self.parent() if G.zeta.is_in_cache(): x =", "sage: chi = DirichletGroup(1).list()[0] sage: chi.values() [1] sage: chi(1) 1", "cond = p**(valuation(self.order(),p) + 1) if p == 2 and", "= X[0]; Z = X[1] sage: # Y is trivial", "random import sage.modules.free_module as free_module import sage.modules.free_module_element as free_module_element import", "Group of Dirichlet characters modulo 4 with values in Finite", "g = phi(self(0)) z = CC.one() for c in self.values()[1:]:", "x in values_on_gens] m = P.zeta_order() v = [(vi *", "e.g., in characteristic 2:: sage: G.<e> = DirichletGroup(13, GF(4,'a')) sage:", "d = rings.Integer(d) if d <= 0: raise ValueError(\"d must", "is the exponent of `(\\ZZ/N\\ZZ)^*`) - ``zeta`` -- (optional) root", "of integers as the base ring. This is ignored if", "Ring of integers modulo 20 \"\"\" return self._integers __iter__ =", "with 53 bits of precision sage: G == H False", "0: raise ArithmeticError(\"M(=%s) must be a multiple of the modulus(=%s)\"%(M,self.modulus()))", "|--> -1, 17 |--> -1 sage: chi.conrey_number() 5 sage: chi", "TESTS: Dirichlet groups are cached, creating two groups with the", "G(DirichletGroup(15).1) Traceback (most recent call last): ... TypeError: conductor must", "G.base_ring() chi = self m = G.modulus() if is_ComplexField(K): return", "below. That said, I'm sure it could # be sped", "4 In this example we create a Dirichlet group with", "4 and degree 2 \"\"\" base_ring, modulus, zeta, zeta_order =", "g = 0 L = rings.CyclotomicField(m.lcm(zo)) zeta = L.gen(0) try:", "17) sage: DirichletGroup(60).unit_gens() (31, 41, 37) sage: DirichletGroup(20,QQ).unit_gens() (11, 17)", "|--> -1, 17 |--> 1' TESTS: Dirichlet characters modulo 1", "import WithEqualityById from sage.structure.element import MultiplicativeGroupElement from sage.structure.gens_py import multiplicative_iterator", "is_DirichletGroup sage: is_DirichletGroup(DirichletGroup(11)) True sage: is_DirichletGroup(11) False sage: is_DirichletGroup(DirichletGroup(11).0) False", "= D(1) sage: g.jacobi_sum(g) 3 We consider a sum with", "`\\chi` and `\\zeta` is a primitive `m^{th}` root of unity.", "large prime field:: sage: p = next_prime(10^40) sage: g =", "DirichletGroup(37).exponent() 36 \"\"\" return self.zeta_order() @cached_method def _automorphisms(self): \"\"\" Compute", "of conductor 3 mapping 2 |--> -1 \"\"\" G =", "- 1,), (-zeta6 + 1,), -zeta6 - 2) ((-1,), (-1,),", "0, 1, 0, 1, 0, 0, 0, 1, 0, 1]", "not (zeta is None and zeta_order is None): raise ValueError(\"zeta", "def element(self): r\"\"\" Return the underlying `\\ZZ/n\\ZZ`-module vector of exponents.", "above is the correct value of the Jacobi sum J(Y,", "G.element_class(G, self.values_on_gens(), check=False) def __pow__(self, n): \"\"\" Return self raised", "the field:: sage: g.zeta_order() 2 :: sage: r4 = CyclotomicField(4).ring_of_integers()", "Return the \"twisted\" Kloosterman sum associated to this Dirichlet character.", "its `n`-torsion subgroup, where `n` is the exponent of `(\\ZZ/N\\ZZ)^*`.", "one, but over as small a subfield (or subring) of", "31 |--> zeta12^2 - 1 sage: e.order() 12 This illustrates", "|--> 1, 17 |--> -1, Dirichlet character modulo 20 of", "in Number Field in a with defining polynomial x^2 -", "an alternative definition giving `B_{1,\\varepsilon} = -1/2`; see the discussion", "CC, zeta=exp(2*pi*I/6), zeta_order=6) Group of Dirichlet characters modulo 7 with", "raise NotImplementedError(\"Automorphisms for finite non-field base rings not implemented\") #", "precision (default 53) - ``algorithm`` -- 'pari' (default) or 'lcalc'", "4 == 0: val *= e.values_on_gens()[0] # first gen is", "24 of conductor 24 mapping 7 |--> 1, 13 |-->", "del vals[1] elif self.modulus() % 4 == 2: # 0", "modulo 13 of conductor 13 mapping 2 |--> zeta12 sage:", "DirichletGroup(37).random_element() Dirichlet character modulo 37 of conductor 37 mapping 2", "[1, 11] \"\"\" one = self.base_ring().one() return [x for x", "= factor(self.modulus()) if len(F) > 1: return prod([d.conductor() for d", "and returns an element of a suitable cyclotomic field; see", "P._module if is_ComplexField(P.base_ring()): zeta = P.zeta() zeta_argument = zeta.argument() v", "= DirichletGroup(4).gen() sage: E.lmfdb_page() # optional -- webbrowser \"\"\" import", "to Integer Ring is defined Base-extended Dirichlet groups do not", "2 factors at 2. vals[0].append(vals[1][0]) del vals[1] elif self.modulus() %", "in self.values_on_gens()) return G.element_class(G, x, check=False) def _repr_short_(self): r\"\"\" A", "Gauss sum associated to this Dirichlet character as an approximate", "is taken to be `R^*`, or equivalently its `n`-torsion subgroup,", "sage: G = DirichletGroup(13) sage: e = DirichletGroup(13).0 sage: e.base_ring()", "== 0 or p.gcd(self._zeta_order) == 1: zeta_order = self._zeta_order else:", "self._zeta is not None: s += \"the group of order", "= G.0 sage: e.is_odd() True sage: [e.is_odd() for e in", "<= 2: K = rings.QQ elif (isinstance(R, number_field.NumberField_generic) and euler_phi(self.order())", "\"\"\" e = self(1) for i in range(self.ngens()): g =", "G Dir(11) \"\"\" s = \"Group of Dirichlet characters modulo", "EXAMPLES:: sage: DirichletGroup(20).galois_orbits() [ [Dirichlet character modulo 20 of conductor", "M(=50) \"\"\" M = int(M) if self.modulus()%M != 0: raise", "orbit: seen_so_far.add(tuple(z.element())) G = Sequence(G, cr=True) if sort: G.sort() return", "sort: G.sort() return G def gen(self, n=0): \"\"\" Return the", "DirichletGroup(7, CC, zeta=exp(2*pi*I/6)) Traceback (most recent call last): ... NotImplementedError:", "-- commutative ring; the value ring for the characters in", "multiplying a random power of each generator together, where the", "\"\"\" Returns the restriction of this character to a Dirichlet", "' + str(self.values_on_gens()[i]) return s def _latex_(self): r\"\"\" LaTeX representation", "H(self) @cached_method def values(self): \"\"\" Return a list of the", "sum associated to `\\chi` and the integers a,b is ..", "0, 1, 0, 0, 1, 2, 0, 1, 2] ::", "`\\psi` are both characters modulo `N`. EXAMPLES:: sage: D =", "-2*zeta6 + 3) ((-1,), (-zeta6 + 1,), 2*zeta6 - 3)", "1, 1, 1, 1, 1, 1] sage: t(1).parent() Rational Field", "phi(c)*z return g def jacobi_sum(self, char, check=True): r\"\"\" Return the", "multiplicative order of ``zeta``; this is useful if the base", "raise ValueError(\"zeta and zeta_order must be None if base_ring not", "the values of the Dirichlet character on the standard generators", "import ComplexField CC = ComplexField(prec) phi = CC.coerce_map_from(K) elif number_field.is_CyclotomicField(K)", "fundamental_discriminant, euler_phi, factorial, valuation) def trivial_character(N, base_ring=rings.RationalField()): r\"\"\" Return the", "' 'over this ring') n = zeta.multiplicative_order() zeta = zeta**(n", "4*E(5)^2 - 4*E(5)^3 - 2*E(5)^4 sage: G = DirichletGroup(12, QQbar)", "using the formula in [Coh2007]_, Proposition 9.4.5; this is usually", "sage: chi = DirichletGroup(5, K)[1] sage: chi(2) i sage: f", "# self(-1) is either +1 or -1 if not R.is_exact():", "the group of roots of unity of order dividing ``zeta_order``", "Section 2.2): .. MATH:: \\sum_{a=1}^N \\frac{\\varepsilon(a) t e^{at}}{e^{Nt}-1} = sum_{k=0}^{\\infty}", "characters in this group. EXAMPLES:: sage: DirichletGroup(5).list() [Dirichlet character modulo", "modulo 2 of conductor 1 \"\"\" s = 'Dirichlet character", "That said, I'm sure it could # be sped up", "and self._zeta is not None: # A root of unity", "respectively \"\"\" MultiplicativeGroupElement.__init__(self, parent) if check: orders = parent.integers_mod().unit_group().gens_orders() if", "`(\\ZZ/N\\ZZ)^*`, where `N` is the modulus. EXAMPLES:: sage: e =", "p = 7 sage: DP = DirichletGroup(p) sage: f =", "g += self(c) * zeta**int(a*e + b*e**(-1)) return g def", "x = tuple(z**n for z in self.values_on_gens()) return G.element_class(G, x,", "of unity. FACTS: If the modulus is a prime `p`", "necessarily primitive) character of modulus `N`. This function returns the", "35 mapping 22 |--> zeta12^3, 31 |--> zeta12^2 - 1", "e = DirichletGroup(13).0 sage: e.change_ring(QQ) Traceback (most recent call last):", "this Dirichlet character. The Gauss sum associated to `\\chi` is", "\"\"\" G = self.parent() if G.zeta.is_in_cache(): x = self.element() +", "DirichletGroup(5,CyclotomicField(4)).0 sage: e*f Dirichlet character modulo 5 of conductor 5", "1, 0, 1] sage: e = DirichletGroup(20).gen(0) sage: e.values() [0,", "eps at 4 is nontrivial or `p > 2` and", "call last): ... NotImplementedError: Characters must be from the same", "<NAME> (2006-05-21): added examples of everything; fix a *lot* of", "sage: e.gauss_sum() -zeta156^46 + zeta156^45 + zeta156^42 + zeta156^41 +", "sage: r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3 22 sage: parent(r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3) Residue", "Fixed bug in comparison of Dirichlet characters. It was checking", "directly using dlog and a large power of the image", "G.0 sage: e.is_odd() True sage: [e.is_odd() for e in G]", "generalized Bernoulli number `B_{k,\\varepsilon}`, as defined by the following identity", "subgroup `V` of the multiplicative group `R^*` of ``base_ring``. This", "TESTS:: sage: G = DirichletGroup(20, UniversalCyclotomicField()) sage: e = G([1", "e.is_even() False sage: e(-1) -1 sage: [e.is_even() for e in", "False sage: e(-1) -1.000000... sage: [e.is_even() for e in G]", "values in Cyclotomic Field of order 4 and degree 2'", "= {} if k in self.__bernoulli: return self.__bernoulli[k] N =", "sage: e.kloosterman_sum_numerical(53,3,11) 3.80422606518061 - 3.80422606518061*I \"\"\" G = self.parent() K", "of conductor 13 mapping 2 |--> zeta12 sage: loads(e.dumps()) ==", "such that # Order(x) divides EulerPhi(p**r) = p**(r-1)*(p-1). # For", "galois_orbit(self, sort=True): r\"\"\" Return the orbit of this character under", "+ 1,), -1) ((zeta6,), (zeta6,), -zeta6 + 3) ((zeta6,), (zeta6", "in Ring of integers modulo 15 sage: DirichletGroup(17, Integers(15), zeta_order=4)", "37733 mapping 13533432536 |--> -1, 22369178537 |--> -1, 14266017175 |-->", "Dirichlet groups do not silently get roots of unity with", "characters modulo 7 with values in Rational Field sage: G.change_ring(CyclotomicField(6))", "tuple(map(R, x)) if R.is_exact() and any(u**v != 1 for u,", "from sage.rings.complex_mpfr import ComplexField CC = ComplexField(prec) phi = CC.coerce_map_from(K)", "1: 0} \"\"\" return {z: i for i, z in", "sage: DirichletGroup(1)[0] Dirichlet character modulo 1 of conductor 1 sage:", "conductor 4 mapping 31 |--> -1, 41 |--> 1, 37", "non-field base rings not implemented \"\"\" n = self.zeta_order() R", "cache __values_on_gens in the past # we need to set", "all binomial coefficients can be done much # more efficiently.", "value on n result_list[n] = R_values[value] # iterate: # increase", "integer `m`. .. warning:: A table of values of the", "non-cyclic for `k \\ge 3`:: sage: (DirichletGroup(18).0).decomposition() [Dirichlet character modulo", "*lot* of tiny bugs and design problem that became clear", "20 with values in the rational numbers:: sage: G =", "return [x for x in range(self.modulus()) if self(x) == one]", "0: s += ', ' s += str(self.parent().unit_gens()[i]) + '", "0, 1, 0, 1, 0, 1, 0, 0, 0, 1,", "the group of order 4 generated by 7 in Ring", "character from `x`. EXAMPLES:: sage: G = DirichletGroup(13) sage: K", "self.base_ring() try: if x == R.one(): x = [R.one()] *", "sage: G.gen(1) Dirichlet character modulo 20 of conductor 5 mapping", "zeta_order=1)(1) sage: b = DirichletGroup(3, QQ, zeta=-1, zeta_order=2)([-1]) sage: a", "b.is_primitive() False sage: (a*b).is_primitive() True \"\"\" return (self.conductor() == self.modulus())", "domain (so `V` is cyclic), and `V` must have order", "4 mapping 11 |--> -1, 17 |--> 1 sage: G.gen(1)", "GF(5)) sage: loads(G.dumps()) == G True We compute a Dirichlet", "of order 4 and degree 2 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) 17 sage:", "modulo 7 of conductor 7 mapping 3 |--> -1/2*b*a +", "DirichletGroup(100).0 sage: e.multiplicative_order() 2 \"\"\" if self.parent().zeta.is_in_cache(): return self.element().additive_order() return", "e = G.gens()[0] sage: e.kloosterman_sum(5,11) Traceback (most recent call last):", "values_on_gens_key in state_dict: values_on_gens = state_dict[values_on_gens_key] del state_dict[values_on_gens_key] # element()", "``zeta`` -- (optional) root of unity in ``R`` - ``zeta_order``", "is a cyclotomic field of order the exponent of `(\\ZZ/N\\ZZ)^*`::", "for *: 'Group of Dirichlet characters modulo 4 with values", "ValueError, ArithmeticError): return self def modulus(self): \"\"\" The modulus of", "is_DirichletGroup(DirichletGroup(11).0) False \"\"\" return isinstance(x, DirichletGroup_class) class DirichletGroup_class(WithEqualityById, Parent): \"\"\"", "\\chi : (\\ZZ/N\\ZZ)^* \\to \\QQ(\\zeta_m) where `m` is the least", "group of the prime subfield of the base ring. EXAMPLES::", "to compute discrete logarithms in the value group of this", "@cached_method def is_odd(self): r\"\"\" Return ``True`` if and only if", "= NumberField(x^4 + 1) sage: DirichletGroup(5, K) Group of Dirichlet", "with values in the rational numbers:: sage: G = DirichletGroup(20,", "(zeta16^4, -1, -1)) must have multiplicative orders dividing (2, 16,", "= 7 sage: DP = DirichletGroup(p) sage: f = DP.0", "22369178537 |--> -1, 14266017175 |--> 1 AUTHORS: - <NAME> (2006-08-06)", "r4) sage: G.gens() (Dirichlet character modulo 60 of conductor 4", "whether or not to explicitly coerce each element of v", "# Done! return result_list value += val_on_gen[i] n *= gens[i]", "we encounter it in a pickle element_key = '_DirichletCharacter__element' element", "elif is_AlgebraicField(K): from sage.rings.complex_mpfr import ComplexField CC = ComplexField(prec) phi", ":trac:`17586` is fixed:: sage: DirichletGroup(1)[0].bernoulli(1) 1/2 \"\"\" if cache: try:", "11 |--> -1, 17 |--> 1 sage: c.extend(20) == a", "mapping 11 |--> -1, 17 |--> 1 sage: e.restrict(4) Dirichlet", "element() from that if we encounter it in a pickle", "explicit about where we want the multiplication to take place.", "1) if p == 2 and F[0][1] > 2 and", "[1] + vals return [D[i](vals[i]) for i in range(len(D))] def", "the exponent of `(\\ZZ/N\\ZZ)^*`. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20,QQ) sage:", "+ zeta52^19 - zeta52^16 + zeta52^15 + zeta52^14 + zeta52^12", "ValueError('algorithm must be \"pari\" or \"lcalc\"') @cached_method def conductor(self): \"\"\"", "... ValueError: values (= (zeta16^4, -1, -1)) must have multiplicative", "or is_RationalField(K)): raise NotImplementedError(\"Kloosterman sums only currently implemented when the", "z = zeta ** int(a*e + b*(e**(-1))) g += phi(self(c))*z", ".. NOTE:: The constructor of :class:`DirichletCharacter` sets the cache of", "check=True): \"\"\" Return a list of the Galois orbits of", "self try: return self.change_ring(K) except (TypeError, ValueError, ArithmeticError): return self", "n = self.zeta_order() R = self.base_ring() p = R.characteristic() if", "(shallow) copy of this Dirichlet character. EXAMPLES:: sage: G.<a> =", "rings.IntegerModRing(n)(p).multiplicative_order() Auts = [p**m for m in range(0,r)] return Auts", "- zeta52^10 - zeta52^7 - zeta52^5 + zeta52^4 Check that", "True, False, True] sage: G = DirichletGroup(13) sage: e =", "the largest order root of unity in the field:: sage:", "K.one()/2 if k == 1 else K(bernoulli(k)) elif self(-1) !=", "= DirichletGroup(abs(D), rings.RationalField()) return G([kronecker(D,u) for u in G.unit_gens()]) def", "= DirichletGroup(13).0 sage: e.base_ring() Cyclotomic Field of order 12 and", "real component of the numerical value of e is near", "Integers(9), zeta=Integers(9)(2))._automorphisms() Traceback (most recent call last): ... NotImplementedError: Automorphisms", "returns the conductor of this character. EXAMPLES:: sage: G.<a,b> =", "= DirichletGroup(11) sage: G.gen(0).base_ring() Cyclotomic Field of order 10 and", "sage: G5 = DirichletGroup(31, CyclotomicField(5)) sage: K30 = CyclotomicField(30) sage:", "base_ring = base_ring.ring_of_integers() if not is_Ring(base_ring): raise TypeError(\"base_ring (= %s)", "DirichletGroup(20,GF(5)).decomposition() [ Group of Dirichlet characters modulo 4 with values", "multiple of the conductor of this character. EXAMPLES:: sage: e", "cyclotomic field; see also :meth:`.kloosterman_sum_numerical`, which gives an inexact answer", "of homomorphisms `(\\ZZ/N\\ZZ)^* \\to V` with pointwise multiplication. The group", "may still be None). zeta_order = self._zeta_order # Map zeta", "sage.arith.all import (binomial, bernoulli, kronecker, factor, gcd, lcm, fundamental_discriminant, euler_phi,", "20 of conductor 20 mapping 11 |--> -1, 17 |-->", "in a with defining polynomial x^4 + 1 :: sage:", "sage: G = DirichletGroup(13) sage: e = G.0 sage: e.is_even()", "of polynomials over rings with composite characteristic is not implemented", "recent call last): ... NotImplementedError: factorization of polynomials over rings", "zeta10^2 + zeta10 - 1, zeta10^2] TESTS: Test that :trac:`11783`", "60 of conductor 5 mapping 31 |--> 1, 41 |-->", "same, but not checking that they had the same level!", "sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6)) Traceback (most recent call last): ...", "of conductor 1, Dirichlet character modulo 9 of conductor 9", "0, -zeta4, 0, 0, 0, zeta4, 0, -1] sage: e", "or (X._zeta is not None and self.base_ring()(X._zeta) in self._zeta_powers))) def", "always given by raising to a power, so the return", "e.order() # same as multiplicative_order, since group is multiplicative 20", "``integral`` -- boolean (default: ``False``); whether to replace the default", "`k = 1` and non-cyclic for `k \\ge 3`:: sage:", "at 4 is nontrivial or `p > 2` and 2", "N\\ZZ} \\chi(a) \\psi(1-a) where `\\chi` and `\\psi` are both characters", "1, 1: 0} \"\"\" return {z: i for i, z", "for c in chi.values()[1:]: z *= zeta g += L(c)*z", "b.values_on_gens() True \"\"\" # This method exists solely because of", "x in [0..20]] [0, 1, 1, 1, 1, 1, 1,", "raise ValueError(\"algorithm = '%s' unknown\"%algorithm) if cache: self.__bernoulli[k] = ber", "bernoulli, kronecker, factor, gcd, lcm, fundamental_discriminant, euler_phi, factorial, valuation) def", "\"definition\": # This is better since it computes the same", "|--> zeta12 sage: e.galois_orbit() [Dirichlet character modulo 13 of conductor", "= L.gen(0) try: self(1) * zeta**(a+b) except TypeError: raise NotImplementedError('Kloosterman", "multiplicative_order(self): \"\"\" The order of this character. EXAMPLES:: sage: e", "= sum_{k=0}^{\\infty} \\frac{B_{k,\\varepsilon}}{k!} t^k. ALGORITHM: The ``'recurrence'`` algorithm computes generalized", "(-zeta6,), -1) ((1,), (-zeta6 + 1,), -1) ((zeta6,), (zeta6,), -zeta6", "is specified, then `V` is taken to be the cyclic", "[Dirichlet character modulo 8 of conductor 4 mapping 7 |-->", "1] else: if not rings.ZZ(p).is_prime(): raise NotImplementedError(\"Automorphisms for finite non-field", "TypeError: cannot convert 0 to an element of Group of", "len(self). EXAMPLES:: sage: DirichletGroup(20).order() 8 sage: DirichletGroup(37).order() 36 \"\"\" ord", "characters modulo 60 with values in the group of order", "sage: chi = DirichletGroup(7, K).0 sage: chi.minimize_base_ring() Dirichlet character modulo", "return self.__bernoulli[k] N = self.modulus() K = self.base_ring() if N", "if len(x) != len(orders): raise ValueError(\"wrong number of values (=", "self.base_ring() p = R.characteristic() if p == 0: Auts =", "bugs and design problem that became clear when creating examples.", "sage: G(DirichletGroup(15).1) Traceback (most recent call last): ... TypeError: conductor", "37) sage: e(31) -1 sage: e(41) -1 sage: e(37) zeta4", "None) sage: k == l True sage: DirichletGroup(2, base_ring=QQ) is", "when the base ring is a number field. It's the", "- zeta156^16 - zeta156^15 - 2*zeta156^14 - zeta156^10 + zeta156^8", "is_RationalField(K): phi = K.complex_embedding(prec) CC = phi.codomain() else: raise NotImplementedError(\"Gauss", "pari.znstar(self.modulus(), 1) pari_orders = G[1][1] pari_gens = G[1][2] # one", "increase n accordingly, and increase value i = 0 while", "self raised to the power of n EXAMPLES:: sage: G.<a,b>", "= DirichletGroup(20) sage: c = a*b sage: d = c.decomposition();", "5.16608739123418e-18*I \"\"\" if algorithm is None: algorithm = 'pari' if", "Group. sage: all_jacobi_sums = [(DP[i].values_on_gens(),DP[j].values_on_gens(),DP[i].jacobi_sum(DP[j])) ....: for i in range(p-1)", "Check that :trac:`18479` is fixed:: sage: f = Newforms(Gamma1(25), names='a')[1]", "modulo 7 with values in the group of order 6", "f*e Dirichlet character modulo 13 of conductor 1 mapping 2", "divisibility holds equals Valuation(Order(x),p)+1. cond = p**(valuation(self.order(),p) + 1) if", "zeta10 - 1, zeta10^2] TESTS: Test that :trac:`11783` and :trac:`14368`", "example where we give ``zeta``, but not its order:: sage:", "is near zero:: sage: v=e.kloosterman_sum_numerical() sage: v.real() < 1.0e15 True", "of conductor 3 mapping 5 |--> -1 sage: G(DirichletGroup(15).0) Dirichlet", "([[24, [0]], [8, [2, 2, 2], [7, 13, 17]], [[2,", "True, False, True, False, True] sage: G = DirichletGroup(100000, CC)", "Galois group of the prime subfield of Frac(R). If R", "K.complex_embeddings()[0] sage: psi = chi.change_ring(f) sage: psi(2) -1.83697019872103e-16 - 1.00000000000000*I", "R.is_exact(): return abs(self(-1) - R(-1)) < 0.5 return self(-1) ==", "base ring is not a domain (in which case the", "and the modulus. EXAMPLES:: sage: e = DirichletGroup(20)(1) sage: e.values()", "e def unit_gens(self): r\"\"\" Returns the minimal generators for the", "-- boolean (default: ``False``); whether to replace the default cyclotomic", "above definition, but in contrast to the value `B_1 =", "EXAMPLES:: sage: G.<a,b> = DirichletGroup(20,QQ) sage: b.maximize_base_ring() Dirichlet character modulo", "-zeta4] \"\"\" return self._list_from_iterator() def modulus(self): \"\"\" Returns the modulus", "-- webbrowser \"\"\" import webbrowser lmfdb_url = 'https://www.lmfdb.org/Character/Dirichlet/{}/{}' url =", "1, 1, 1, 1] sage: t(1).parent() Rational Field sage: trivial_character(7,", "= self.element() + other.element() else: x = tuple(y * z", "the new parent if zeta is not None: zeta =", "\"\"\" Return the number of elements of self. This is", "... NotImplementedError: Automorphisms for finite non-field base rings not implemented", "22 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3 22 sage: parent(r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3)", "2, -zeta4: 3, zeta4: 1, 1: 0} \"\"\" return {z:", "mapping 11 |--> -1, 17 |--> 1 sage: L(4) 0.988944551741105", "4 and degree 2 sage: d[1].parent() Group of Dirichlet characters", "CyclotomicField(8) sage: G = DirichletGroup(13, K) sage: chi = G([z^2])", "9 of conductor 9 mapping 2 |--> zeta6] sage: (DirichletGroup(36).0).decomposition()", "0, 0, -1, 0, 1, 0, -1, 0, 1, 0,", "``zeta_order``:: sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6)) Traceback (most recent call last):", "R(zeta) if isinstance(R, Map): R = R.codomain() return DirichletGroup(self.modulus(), R,", "Cyclotomic Field of order 6 and degree 2 Note that", "Dirichlet characters modulo 7 with values in Number Field in", "# now compute the input for pari (list of exponents)", "from sage.misc.fast_methods import WithEqualityById from sage.structure.element import MultiplicativeGroupElement from sage.structure.gens_py", "sage: k.<i> = CyclotomicField(4) sage: G = DirichletGroup(192) sage: G([i,", ":trac:`17338`):: sage: latex(DirichletGroup(1)[0]) \\hbox{Dirichlet character modulo } 1 \\hbox{ of", "sage: from sage.modular.dirichlet import is_DirichletCharacter sage: is_DirichletCharacter(trivial_character(3)) True sage: is_DirichletCharacter([1])", "Dirichlet character modulo 9 of conductor 9 mapping 2 |-->", "zeta = zeta ** a g = L(chi(0)) z =", "else K(bernoulli(k)) elif self(-1) != K((-1)**k): ber = K.zero() elif", "((zeta6 - 1,), (zeta6 - 1,), -3*zeta6 + 2) ((zeta6", "*= gens[i] if exponents[i] < orders[i]: break exponents[i] = 0", "cyclic), some operations still work, such as creation of elements::", "if not rings.ZZ(p).is_prime(): raise NotImplementedError(\"Automorphisms for finite non-field base rings", "Computed Gauss sums are *not* cached with this character. EXAMPLES::", "|--> -1, 41 |--> -1, 37 |--> zeta4 sage: e(-1)", "@cached_method def zeta_order(self): \"\"\" Return the order of the chosen", "+ 4 TESTS:: sage: G = DirichletGroup(20, UniversalCyclotomicField()) sage: e", "\"\"\" return hash(self.values_on_gens()) def __invert__(self): \"\"\" Return the multiplicative inverse", "We consider a sum with values in a finite field::", "both cases:: sage: d[0]*d[1] == c Traceback (most recent call", "sage.misc.all import prod import sage.misc.prandom as random import sage.modules.free_module as", "algorithm='pari'): \"\"\" Return the L-function of ``self``. The result is", "3 |--> -1/2*b*a + 1/2 \"\"\" R = self.base_ring() if", "13 of conductor 13 mapping 2 |--> -1] ] sage:", "mapping 3 |--> -1 sage: e.restrict(50) Traceback (most recent call", "of roots of unity is not necessarily cyclic), some operations", "w @property def _zeta_dlog(self): \"\"\" Return a dictionary that can", "self._integers.unit_group_exponent() for d in reversed(e.divisors()): try: zeta = R.zeta(d) break", "algorithm computes generalized Bernoulli numbers via classical Bernoulli numbers using", "modulus. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: H.<c> = DirichletGroup(4)", "exactly (which is generally slower). INPUT: - ``prec`` -- integer", "ring admitting a conversion map from the base ring of", "Return a list of the Dirichlet characters in this group.", "4 and degree 2 \"\"\" g = rings.IntegerModRing(self.modulus()).unit_group_exponent() if g", "modulus EXAMPLES:: sage: chi4 = DirichletGroup(4).gen() sage: chi4._pari_conversion() ([[4, [0]],", "raise NotImplementedError('Kloosterman sums not implemented ' 'over this ring') n", "with values in Number Field in a with defining polynomial", "a sum with values in a finite field:: sage: g", "r\"\"\" Restore a pickled element from ``state``. TESTS:: sage: e", "character modulo 13 of conductor 13 mapping 2 |--> zeta12,", "self.modulus() m = m % N if self.values.is_in_cache() or m", "1 sage: e.gauss_sum(2) -2*zeta6 + 1 sage: norm(e.gauss_sum()) 3 ::", "from sage.libs.lcalc.lcalc_Lfunction import Lfunction_from_character return Lfunction_from_character(self) raise ValueError('algorithm must be", "sage: (e^12).minimize_base_ring().base_ring() Rational Field TESTS: Check that :trac:`18479` is fixed::", "value i = 0 while True: try: exponents[i] += 1", "The cache of one of these methods needs to be", "TypeError: Unable to coerce zeta12 to a rational We test", "for i in range(1, zeta_order): a = a * zeta", "as for :meth:`gauss_sum`. The Gauss sum associated to `\\chi` is", "and only if `\\varepsilon(-1) = -1`. EXAMPLES:: sage: G =", "h(n) = g(t)*e^{nt} h = [0] + [g * ((n*t).exp(prec))", "= DirichletGroup(13).0 sage: e.change_ring(QQ) Traceback (most recent call last): ...", "= DirichletGroup(5, K, a); G Group of Dirichlet characters modulo", "the extension of this character to a Dirichlet character modulo", "Return the exponent of this group. EXAMPLES:: sage: DirichletGroup(20).exponent() 4", "for finite non-field base rings not implemented sage: DirichletGroup(17, Integers(9),", "multiplicative_order, since group is multiplicative 20 sage: e.multiplicative_order() 20 sage:", "conductor 35 mapping 22 |--> zeta12^3, 31 |--> zeta12^2 sage:", "DirichletGroup = DirichletGroupFactory(\"DirichletGroup\") def is_DirichletGroup(x): \"\"\" Returns True if x", "False sage: trivial_character(3) == DirichletGroup(3, QQ).0^2 True \"\"\" return (isinstance(X,", "DirichletGroup(5, K, zeta=-1, zeta_order=2) Group of Dirichlet characters modulo 5", "= [(vi * oi) // m for vi, oi in", "the orbits. - ``sort`` - (optional: default True) whether to", "range(self.ngens()): g = self.gen(i) n = random.randrange(g.order()) e *= g**n", "15 sage: G.order() 4 sage: DirichletGroup(-33) Traceback (most recent call", "Group of Dirichlet characters modulo `N` with values in a", "chi = G([13]); chi Dirichlet character modulo 5 of conductor", "trivial for `k = 1` and non-cyclic for `k \\ge", "Traceback (most recent call last): ... ValueError: values (= (zeta16^4,", "break except ValueError: pass self.zeta_order.set_cache(d) return zeta @cached_method def zeta_order(self):", "Note that the root of unity can change:: sage: H.zeta()", "\"\"\" return self.modulus() @cached_method def multiplicative_order(self): \"\"\" The order of", "much more efficient than computing the value of -1 directly", "silently get roots of unity with smaller order than expected", "sage: G = DirichletGroup(60) sage: e = prod(G.gens(), G(1)) sage:", "implemented\") # The automorphisms in characteristic p are # k-th", "finding a set of generators for the group, are only", "doctest 'Group of Dirichlet characters modulo 11 with values in", "2 present some problems as the multiplicative group modulo `2^k`", "r\"\"\" Return the trivial character of the given modulus, with", "|--> -1, 37 |--> zeta4 sage: e(-1) -1 sage: e(2)", "= DirichletGroup(20) sage: a Dirichlet character modulo 20 of conductor", "state_dict[values_on_gens_key] # element() used an explicit cache __element in the", "is_DirichletGroup(x): \"\"\" Returns True if x is a Dirichlet group.", "# FinitelyGenerated() here means that the group has a #", "forms EXAMPLES:: sage: chi = DirichletGroup(24).0 sage: chi._repr_short_() '[-1, 1,", "of this Dirichlet character. EXAMPLES:: sage: e = DirichletGroup(5).0 sage:", "rings import sage.rings.number_field.number_field as number_field from sage.libs.pari import pari from", "the action of the absolute Galois group of the prime", "`m^{th}` root of unity. FACTS: If the modulus is a", "``zeta_order`` -- (optional) order of ``zeta`` EXAMPLES:: sage: G =", "if integral: base_ring = base_ring.ring_of_integers() if not is_Ring(base_ring): raise TypeError(\"base_ring", "as the order # of R(zeta) by the DirichletGroup factory.", ".. WARNING:: In the case of the trivial Dirichlet character", "explicitly coerce each element of v into self. The Galois", "that :trac:`6393` has been fixed:: sage: G = DirichletGroup(5); X", "sage.rings.complex_mpfr import ComplexField CC = ComplexField(prec) phi = CC.coerce_map_from(K) elif", "each unit generator return self.element_class(self, x) elif not isinstance(x, DirichletCharacter):", "~e sage: f*e Dirichlet character modulo 13 of conductor 1", "of conductor 5 mapping 2 |--> zeta4 sage: e.bar() Dirichlet", "3.80422606518061*I \"\"\" G = self.parent() K = G.base_ring() if not", "\"twisted\" Kloosterman sum associated to this Dirichlet character. This includes", "also ensures that ``zeta`` is an element of ``base_ring`` and", "identifies a Dirichlet character of modulus q. See https://www.lmfdb.org/knowledge/show/character.dirichlet.conrey EXAMPLES::", "sage: G = DirichletGroup(7, QQbar) sage: G[1].gauss_sum_numerical() -2.44013335834554 + 1.02261879187179*I", "= rings.CyclotomicField(m) return self.change_ring(K) def minimize_base_ring(self): r\"\"\" Return a Dirichlet", "Dirichlet characters modulo 17 with values in Ring of integers", "as for :meth:`.kloosterman_sum`. EXAMPLES:: sage: G = DirichletGroup(3) sage: e", "Integers(6), zeta=Integers(6)(5)).galois_orbits() Traceback (most recent call last): ... TypeError: Galois", "giving `B_{1,\\varepsilon} = -1/2`; see the discussion in [Coh2007]_, Section", "homomorphism .. MATH:: (\\ZZ/N\\ZZ)^* \\to R^*, for some ring `R`,", "Dirichlet group. TESTS:: sage: DirichletGroup(5)._zeta_dlog {-1: 2, -zeta4: 3, zeta4:", "EXAMPLES:: sage: e = DirichletGroup(16)([-1, 1]) sage: e.values_on_gens () (-1,", "def order(self): \"\"\" Return the number of elements of self.", "between Dirichlet groups of different moduli, characters of different moduli", "that equals this one, but over as small a subfield", "sage: DirichletGroup.create_key(60) (Cyclotomic Field of order 4 and degree 2,", "the case of the trivial Dirichlet character modulo 1, this", "x == R.one(): x = [R.one()] * len(self.unit_gens()) except (TypeError,", "``base_ring`` is not ``None``. OUTPUT: The group of Dirichlet characters", "for z in self.values_on_gens()) return G.element_class(G, x, check=False) def _mul_(self,", "-1, -1)) must have multiplicative orders dividing (2, 16, 2),", "i in range(r): if i != 0: s += ',", "11 |--> 1, 17 |--> zeta4 sage: a*b # indirect", "b.kernel() [1, 11] \"\"\" one = self.base_ring().one() return [x for", "Dirichlet character modulo 100 of conductor 4 mapping 51 |-->", "raise ValueError(\"values (= {} modulo {}) must have additive orders", "- :func:`sage.rings.padics.misc.gauss_sum` for a `p`-adic version \"\"\" G = self.parent()", "EulerPhi(p**r) = p**(r-1)*(p-1). # For a given r, whether or", "values_on_gens(self): r\"\"\" Return a tuple of the values of ``self``", "this character. EXAMPLES:: sage: G = DirichletGroup(3) sage: e =", "sage: G.<a,b> = DirichletGroup(20) sage: a.is_primitive() False sage: b.is_primitive() False", "moduli, characters of different moduli compare as unequal, even if", "in G] [False, True, False, True, False, True, False, True,", "sage: f.jacobi_sum(e) 3*zeta12^2 + 2*zeta12 - 3 sage: p =", "sage: G = DirichletGroup(13) sage: G.galois_orbits() [ [Dirichlet character modulo", "values_on_gens] m = P.zeta_order() v = [(vi * oi) //", "matrix(0,2)], [2, Mat([2, 1])]], [1, 0, 0; 0, 1, 0;", "self.gauss_sum_numerical(a=a) elif is_AlgebraicField(K): L = K zeta = L.zeta(m) elif", "sage: TestSuite(G[1]).run() It is checked that the orders of the", "holds # depends only on the factor of p**(r-1) on", "zeta10, zeta10^3 - zeta10^2 + zeta10 - 1, zeta10^2] TESTS:", "= G.one() sage: chi.gauss_sum() 1 .. SEEALSO:: - :func:`sage.arith.misc.gauss_sum` for", "Return self raised to the power of n EXAMPLES:: sage:", "X[0]; Z = X[1] sage: # Y is trivial and", ":trac:`11783` and :trac:`14368` are fixed:: sage: chi = DirichletGroup(1).list()[0] sage:", "field:: sage: g = DirichletGroup(17, GF(9,'a')).0 sage: g.jacobi_sum(g**2) 2*a TESTS:", "Cyclotomic Field of order 4 and degree 2 sage: parent(DirichletGroup(60,", "is_RationalField from sage.rings.complex_mpfr import is_ComplexField from sage.rings.qqbar import is_AlgebraicField from", "value of e is near zero:: sage: v=e.kloosterman_sum_numerical() sage: v.real()", "Dirichlet characters modulo 5 with values in Cyclotomic Field of", "= DirichletGroup(20, UniversalCyclotomicField()) sage: e = G([1 for u in", "else: raise ValueError(\"algorithm = '%s' unknown\"%algorithm) if cache: self.__bernoulli[k] =", "DirichletGroup(20).order() 8 sage: DirichletGroup(37).order() 36 \"\"\" ord = rings.Integer(1) for", "% (x, self)) elif not x.conductor().divides(self.modulus()): raise TypeError(\"conductor must divide", "sage: g.jacobi_sum(g) 11 sage: sum([g(x)*g(1-x) for x in IntegerModRing(N)]) 11", "in range(1, zeta_order): a = a * zeta w.append(a) return", "phi(self(0)) z = CC.one() for c in self.values()[1:]: z *=", "not None and self.base_ring()(X._zeta) in self._zeta_powers))) def __len__(self): \"\"\" Return", "sage: parent(r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3) Residue field of Fractional ideal (-2*zeta4", "= PolynomialRing(QQ) sage: K.<a> = NumberField(x^4 + 1) sage: DirichletGroup(5,", "sage: from sage.modular.dirichlet import DirichletCharacter sage: M = FreeModule(Zmod(16), 3)", "1) ((zeta6,), (-zeta6,), zeta6 - 3) ((zeta6,), (-zeta6 + 1,),", "``base_ring`` and that ``zeta_order`` is an element of ``ZZ``. TESTS::", "QuadraticField(-1) sage: f = K.complex_embeddings()[0] sage: D = DirichletGroup(5, K)", "-- either ``'recurrence'`` (default) or ``'definition'`` - ``cache`` -- if", "in the field:: sage: g.zeta_order() 2 :: sage: r4 =", "0, 0, 1, 2, 0, 1, 2] :: sage: chi", "is defined Base-extended Dirichlet groups do not silently get roots", "subfield (or subring) of the base ring as possible. ..", "factory. p = R.characteristic() if p == 0 or p.gcd(self._zeta_order)", "``parent`` -- :class:`DirichletGroup`, a group of Dirichlet characters - ``x``", "* char(1-x) for x in rings.IntegerModRing(self.modulus())]) def kloosterman_sum(self, a=1, b=0):", "a Dirichlet character from `x`. EXAMPLES:: sage: G = DirichletGroup(13)", "on the standard generators of `(\\ZZ/N\\ZZ)^*` as returned by :meth:`sage.rings.finite_rings.integer_mod_ring.IntegerModRing_generic.unit_gens`.", "D = self.parent().decomposition() vals = [[z] for z in self.values_on_gens()]", "character modulo 13 of conductor 13 mapping 2 |--> -zeta12^2", "degree 2 sage: (e^3).minimize_base_ring().base_ring() Cyclotomic Field of order 4 and", "8 == 0: # 2 factors at 2. vals[0].append(vals[1][0]) del", "sage: DirichletGroup(60, GF(25,'a')).zeta() 2 \"\"\" zeta = self._zeta if zeta", "of unity can change:: sage: H.zeta() zeta6 This method (in", "and degree 4 sage: G = DirichletGroup(6) sage: G(DirichletGroup(3).0) Dirichlet", "Zmod(15), zeta=2); G Group of Dirichlet characters modulo 5 with", "to these Dirichlet characters (i.e., J(self,char)). This is defined as", "1, 17 |--> 1] ] sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5)).galois_orbits() Traceback", "characters modulo 13 with values in Cyclotomic Field of order", "1, 17 |--> 1 sage: b^2 Dirichlet character modulo 20", "return abs(self(-1) - R(-1)) < 0.5 return self(-1) == R(-1)", "False sage: b.is_primitive() False sage: (a*b).is_primitive() True sage: G.<a,b> =", "= self(1) for i in range(self.ngens()): g = self.gen(i) n", "character (./d) of conductor d, for d0. EXAMPLES:: sage: kronecker_character_upside_down(97*389*997^2)", "mod == 2: return [R.zero(), R.one()] result_list = [R.zero()] *", "EXAMPLES:: sage: E = DirichletGroup(4).gen() sage: E.lmfdb_page() # optional --", "zeta=Integers(9)(2))._automorphisms() Traceback (most recent call last): ... NotImplementedError: Automorphisms for", "not used directly, but passed to the :func:`bernoulli` function if", "22 sage: parent(r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3) Residue field of Fractional ideal", "sage: e*f Dirichlet character modulo 5 of conductor 5 mapping", "complex field\") zeta = CC.zeta(G.modulus()) ** a g = phi(self(0))", "M of the modulus. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage:", "various powers of 2 present some problems as the multiplicative", "if r != 0: s += r' \\hbox{ mapping }", "- 1) ((-zeta6,), (-zeta6 + 1,), -2*zeta6 + 3) ((-zeta6", "self.modulus()%M != 0: raise ValueError(\"M(=%s) must divide the modulus(=%s)\"%(M,self.modulus())) if", "conductor 5 mapping 2 |--> -zeta4 AUTHORS: - <NAME> (2005-09-02):", "gives an inexact answer (but is generally much quicker). CACHING:", "= e.primitive_character(); f Dirichlet character modulo 4 of conductor 4", "zeta_order=2)([-1]) sage: a * b # indirect doctest Dirichlet character", "is useful if the base ring is not exact or", "between 0 and 1 \"\"\" n = int(n) g =", "# element() used an explicit cache __element in the past", "len(self.values_on_gens()) if r != 0: s += ' mapping '", "orders dividing {}, respectively\" .format(x, parent.zeta_order(), orders)) self.element.set_cache(x) else: R", "if we're explicit about where we want the multiplication to", "of order 4 and degree 2 \"\"\" g = rings.IntegerModRing(self.modulus()).unit_group_exponent()", "with 53 bits of precision, 2, None, None) sage: k", "e = G.0 sage: e.is_odd() True sage: [e.is_odd() for e", ":: sage: DirichletGroup(60, integral=True) Group of Dirichlet characters modulo 60", "= 1 (mod n), so r is the mult order", "e = DirichletGroup(21).gen(0) ; e.values() [0, 1, -1, 0, 1,", "|--> 1 sage: DirichletGroup(60).random_element() Dirichlet character modulo 60 of conductor", "method performs an exact calculation and returns an element of", "sage: b Dirichlet character modulo 20 of conductor 5 mapping", "e.base_ring() Cyclotomic Field of order 12 and degree 4 sage:", "G.<a,b> = DirichletGroup(20,QQ) sage: b.maximize_base_ring() Dirichlet character modulo 20 of", "modulo 5 with values in the group of order 4", "character modulo 9 of conductor 1 mapping 2 |--> 1]", "the cache of values_on_gens() from that if we encounter it", "INPUT: - ``parent`` -- :class:`DirichletGroup`, a group of Dirichlet characters", "definition, but in contrast to the value `B_1 = -1/2`", "- 1,), (-zeta6,), -1) ((zeta6 - 1,), (-zeta6 + 1,),", ".. MATH:: K(a,b,\\chi) = \\sum_{r \\in (\\ZZ/m\\ZZ)^\\times} \\chi(r)\\,\\zeta^{ar+br^{-1}}, where `m`", "``**opts`` -- optional arguments; not used directly, but passed to", "up to k, which should be done with power series", "1 sage: e.restrict(4) Dirichlet character modulo 4 of conductor 4", "be determined automatically, we can specify it using ``zeta_order``:: sage:", "``zeta_order`` in `R`. In this case, `R` must be a", "13 of conductor 13 mapping 2 |--> zeta12 sage: G(0)", "16 \\hbox{ mapping } 15 \\mapsto 1,\\ 5 \\mapsto \\zeta_{4}", "\\hbox{ mapping } ' for i in range(r): if i", "2 sage: DirichletGroup(20).base_ring() Cyclotomic Field of order 4 and degree", "None: zeta = R(zeta) if isinstance(R, Map): R = R.codomain()", "DirichletGroup(37).zeta() zeta36 sage: DirichletGroup(20).zeta() zeta4 sage: DirichletGroup(60).zeta() zeta4 sage: DirichletGroup(60,QQ).zeta()", "return self try: return self.change_ring(K) except (TypeError, ValueError, ArithmeticError): return", "TypeError: Unable to coerce zeta4 to a rational \"\"\" R", "complex field\") zeta = zeta ** a g = L(chi(0))", ".. MATH:: \\varepsilon : (\\ZZ/N\\ZZ)^* \\to \\QQ(\\zeta_n) be a Dirichlet", "group. TESTS:: sage: DirichletGroup.create_key(60) (Cyclotomic Field of order 4 and", "to %s is defined\" % (self.base_ring(), R)) return self.change_ring(R) def", "sage: e = DirichletGroup(100).0; e Dirichlet character modulo 100 of", "is None: R = self.base_ring() e = self._integers.unit_group_exponent() for d", "= G.modulus() zeta = CC.zeta(m) for c in m.coprime_integers(m): e", "self._zeta_powers))) def __len__(self): \"\"\" Return the number of elements of", "conductor 5 mapping 11 |--> 1, 17 |--> -1 \"\"\"", "False, True, False, True, False, True] sage: G = DirichletGroup(100000,", "str(self.parent().unit_gens()[i]) + ' |--> ' + str(self.values_on_gens()[i]) return s def", "the entries of the returned vector; this vector is mutable", "-1 if not R.is_exact(): return abs(self(-1) - R(-1)) < 0.5", "is made the first time you call this (unless `m`", "0, 2, 0, 1, 2, 0, 1, 0, 0, 1,", "key (extra arguments are ignored). This is only called if", "1, 36, 0, 0, 36, 0, 1, 36, 0, 1,", "the trivial group.) EXAMPLES:: sage: DirichletGroup(20).decomposition() [ Group of Dirichlet", "/ N\\ZZ} \\chi(a) \\psi(1-a) where `\\chi` and `\\psi` are both", "- 1,), (zeta6 - 1,), -3*zeta6 + 2) ((zeta6 -", "for z in self.values_on_gens()]) def primitive_character(self): \"\"\" Returns the primitive", "(see :trac:`6393`):: sage: G = DirichletGroup(5); X=G.list(); Y=X[0]; Z=X[1] sage:", "is an element of ``ZZ``. TESTS:: sage: G = DirichletGroup(7,", "character .. MATH:: \\chi : (\\ZZ/N\\ZZ)^* \\to \\QQ(\\zeta_m) where `m`", "Rational Field The elements of G print as lists giving", "17], [2, 2, 2], [0, 0, 0]], [1, 0, 0;", "Ring of integers modulo 9 sage: DirichletGroup(13) == DirichletGroup(13) True", "work properly, these caches have to be stored when pickling", "we know that it stays the # same; otherwise it", "the root of unity can change:: sage: H.zeta() zeta6 This", "kronecker_character(d): \"\"\" Return the quadratic Dirichlet character (d/.) of minimal", "= DirichletGroup(21, base_ring=GF(3)).gen(0) ; e.values() [0, 1, 2, 0, 1,", "G = DirichletGroup(3) sage: e = G.0 The real component", "modulo the multiple M of the modulus. EXAMPLES:: sage: G.<a,b>", "2 Note that the root of unity can change:: sage:", "int(g.order()) return ord def random_element(self): \"\"\" Return a random element", "in values_on_gens] m = P.zeta_order() v = [(vi * oi)", "z in self.values_on_gens()) return G.element_class(G, x, check=False) def _repr_short_(self): r\"\"\"", "for u in G.unit_gens()]) sage: e.kloosterman_sum(7,17) -2*E(5) - 4*E(5)^2 -", "-zeta156^46 + zeta156^45 + zeta156^42 + zeta156^41 + 2*zeta156^40 +", "Finite Field of size 10000000000000000000000000000000000000121 Note that the root of", "character modulo 1, this function returns `B_{1,\\varepsilon} = 1/2`, in", "= CyclotomicField(8) sage: G = DirichletGroup(13, K) sage: chi =", "creating two groups with the same parameters yields the same", "37346 |--> -1 :: sage: a = kronecker_character(1) sage: b", "|--> -1, 17 |--> 1 sage: b Dirichlet character modulo", "Mat(1)], [1]) sage: chi = DirichletGroup(24)([1,-1,-1]); chi Dirichlet character modulo", "|--> 1, 37 |--> zeta4) sage: val = G.gens()[2].values_on_gens()[2] ;", "R: return self G = self.parent().change_ring(R) return G.element_class(G, [R(x) for", "different moduli compare as unequal, even if they define identical", "Rational Field TESTS: Check that :trac:`18479` is fixed:: sage: f", "-1 -1 \"\"\" if check: if self.parent() != char.parent(): raise", "``state``. TESTS:: sage: e = DirichletGroup(16)([-1, 1]) sage: loads(dumps(e)) ==", "f = ~e sage: f*e Dirichlet character modulo 13 of", "2 sage: d[1].parent() Group of Dirichlet characters modulo 5 with", "cache __element in the past # we need to set", "character modulo } 16 \\hbox{ of conductor } 16 \\hbox{", "``N`` -- positive integer - ``base_ring`` -- commutative ring; the", "= DirichletGroup(p) sage: f = DP.0 sage: e.jacobi_sum(f) Traceback (most", "... TypeError: Galois orbits only defined if base ring is", ":trac:`6393` has been fixed:: sage: G = DirichletGroup(5); X =", "of the trivial # character is 1/2, in contrast to", "order of ``zeta`` EXAMPLES:: sage: G = DirichletGroup(7,QQ); G Group", "= LFunction(lfun_character(self), prec=prec) Z.rename('PARI L-function associated to %s' % self)", "1, 0; 0, 0, 1]], [0, 1, 1]) \"\"\" G", "= [] seen_so_far = set([]) for x in v: z", "discussion in [Coh2007]_, Section 9.4.1. EXAMPLES:: sage: G = DirichletGroup(13)", "L.one() for c in chi.values()[1:]: z *= zeta g +=", "\"\"\" return (self.conductor() == self.modulus()) @cached_method def is_trivial(self): r\"\"\" Returns", "4 and degree 2 If the order of ``zeta`` cannot", "Ring of integers modulo 15 sage: DirichletGroup(17, Integers(15), zeta_order=4) Traceback", "where `\\chi` and `\\psi` are both characters modulo `N`. EXAMPLES::", "D: if e.modulus() % 2 == 0: if e.modulus() %", "sped up by a factor of 10 or more in", ":class:`DirichletCharacter`. \"\"\" pows = self.parent()._zeta_powers return tuple([pows[i] for i in", "\"\"\" G = self.parent() if G.zeta.is_in_cache(): x = n *", "``names`` -- ignored (needed so ``G.<...> = DirichletGroup(...)`` notation works)", "modulo 8 of conductor 4 mapping 7 |--> -1, 5", "R = G.base_ring() mod = self.parent().modulus() if mod == 1:", "self.__bernoulli[k] = ber return ber def lfunction(self, prec=53, algorithm='pari'): \"\"\"", "sage: e = DirichletGroup(16)([-1, 1]) sage: f = e.restrict(8) sage:", "increase value i = 0 while True: try: exponents[i] +=", "__invert__(self): \"\"\" Return the multiplicative inverse of self. EXAMPLES:: sage:", "self._zeta_order if order is None: order = self.zeta().multiplicative_order() return order", "20 with values in Rational Field sage: G.order() 4 sage:", "= kronecker_character(1) sage: b = DirichletGroup(2401,QQ)(a) # NOTE -- over", "values in a number field:: sage: R.<x> = PolynomialRing(QQ) sage:", "e(31*37) -zeta4 sage: parent(e(31*37)) Cyclotomic Field of order 4 and", "sage: abs(e.gauss_sum_numerical()) 3.60555127546... sage: abs(f.gauss_sum_numerical()) 3.60555127546... sage: sqrt(13.0) 3.60555127546399 TESTS:", "isinstance(x, DirichletCharacter) class DirichletCharacter(MultiplicativeGroupElement): \"\"\" A Dirichlet character. \"\"\" def", "power of each generator together, where the power is between", "indirect doctest \\hbox{Dirichlet character modulo } 16 \\hbox{ of conductor", "There is conversion between Dirichlet groups of different moduli, but", "Galois group of the prime subfield of the base ring.", "None) sage: l = l[1:]; l (2, None, None) sage:", "() (-1, 1) .. NOTE:: The constructor of :class:`DirichletCharacter` sets", "DirichletCharacter sage: M = FreeModule(Zmod(16), 3) sage: DirichletCharacter(G, M([4, 8,", "`R^*` generated by ``zeta``. If ``zeta_order`` is also given, it", "K) sage: D.change_ring(f) Group of Dirichlet characters modulo 5 with", "if k == 1 else K(bernoulli(k)) elif self(-1) != K((-1)**k):", "base_ring, category=category) self._zeta = zeta self._zeta_order = zeta_order self._modulus =", "This illustrates a canonical coercion:: sage: e = DirichletGroup(5, QQ).0", "precision If the base ring is not a domain (in", "e = self._integers.unit_group_exponent() for d in reversed(e.divisors()): try: zeta =", "((zeta6 - 1,), (-1,), 2*zeta6 + 1) ((zeta6 - 1,),", "+ 1 An example where we give ``zeta``, but not", "characters A :class:`DirichletCharacter` is the extension of a homomorphism ..", "ring `R`, to the map `\\ZZ/N\\ZZ \\to R` obtained by", "= a.primitive_character() sage: L = a.lfunction(algorithm='lcalc'); L L-function with complex", "now compute the input for pari (list of exponents) P", "G = DirichletGroup(20) sage: G.ngens() 2 \"\"\" return len(self.gens()) @cached_method", "small a subfield (or subring) of the base ring as", "integer coprime to q that identifies a Dirichlet character of", "[Coh2007]_, Section 9.4.1. EXAMPLES:: sage: G = DirichletGroup(13) sage: e", "mapping 5 |--> -1 sage: G(DirichletGroup(15).1) Traceback (most recent call", "G = DirichletGroup(5, K, a); G Group of Dirichlet characters", "x.galois_orbit(sort=sort) if reps_only: G.append(x) else: G.append(orbit) for z in orbit:", "modulo 7 with values in Rational Field sage: G.change_ring(CyclotomicField(6)) Group", "does not divide `\\phi(p^n)/\\mbox{\\rm ord}(\\varepsilon)`. EXAMPLES:: sage: chi = DirichletGroup(20).0;", "examples of everything; fix a *lot* of tiny bugs and", "TESTS:: sage: trivial_character(6) == trivial_character(3) # indirect doctest False sage:", "Cyclotomic Field of order 4 and degree 2 \"\"\" N", "} 16 \\hbox{ of conductor } 16 \\hbox{ mapping }", "G = DirichletGroup(13, K) sage: chi = G([z^2]) sage: chi.gauss_sum()", "Gauss sum if `b=0`. This method performs an exact calculation", "was not be a part of the key, the keys", "= trivial_character def kronecker_character(d): \"\"\" Return the quadratic Dirichlet character", "(but is generally much quicker). CACHING: Computed Kloosterman sums are", "modulus(self): \"\"\" The modulus of this character. EXAMPLES:: sage: e", "a (shallow) copy of this Dirichlet character. EXAMPLES:: sage: G.<a>", "range(k+1))) elif algorithm == \"definition\": # This is better since", "g += phi(self(c))*z return g @cached_method def is_even(self): r\"\"\" Return", "conductor 5 mapping 2 |--> 4 sage: chi.multiplicative_order() 4 Other", "sage: G.0.is_odd() True Note that ``is_even`` need not be the", "2 We create the group of Dirichlet character mod 20", "of integers modulo 15 sage: G.gens() (Dirichlet character modulo 5", "``algorithm`` -- 'pari' (default) or 'lcalc' EXAMPLES:: sage: G.<a,b> =", "2:: sage: G.<e> = DirichletGroup(13, GF(4,'a')) sage: e.is_even() True sage:", "of order 12 and degree 4 sage: (e^2).minimize_base_ring().base_ring() Cyclotomic Field", "in range(self.ngens()): g = self.gen(i) n = random.randrange(g.order()) e *=", "about where we want the multiplication to take place. ::", "n. r = rings.IntegerModRing(n)(p).multiplicative_order() Auts = [p**m for m in", "e.restrict(20) Dirichlet character modulo 20 of conductor 4 mapping 11", "x in self.values_on_gens()]) def _richcmp_(self, other, op): \"\"\" Compare ``self``", "degree 4 sage: e.minimize_base_ring().base_ring() Cyclotomic Field of order 12 and", "as possible. .. note:: This function is currently only implemented", "m = P.zeta_order() v = [(vi * oi) // m", "= 1, p, p^2, ..., p^(r-1), # where p^r =", "\"\"\" order = self._zeta_order if order is None: order =", "# The group of n-th roots of unity in the", ":meth:`.kloosterman_sum_numerical`, which gives an inexact answer (but is generally much", "of self. EXAMPLES:: sage: G = DirichletGroup(20) sage: G.gen(0) Dirichlet", "calculates the sum exactly (which is generally slower). INPUT: -", "+ zeta52^15 + zeta52^14 + zeta52^12 - zeta52^11 - zeta52^10", "sums, classical Kloosterman sums, Salié sums, etc. The Kloosterman sum", "<<EMAIL>> # Copyright (C) 2014 <NAME> <<EMAIL>> # # This", "False sage: is_DirichletGroup(DirichletGroup(11).0) False \"\"\" return isinstance(x, DirichletGroup_class) class DirichletGroup_class(WithEqualityById,", "1, the conductor is the smallest p**r such that #", "# For a given r, whether or not the above", "minus 1, inclusive. EXAMPLES:: sage: DirichletGroup(37).random_element() Dirichlet character modulo 37", "\\hbox{ of conductor } %s' % (self.modulus(), self.conductor()) r =", "in Rational Field sage: H = DirichletGroup.create_object(None, l); H Group", "False, True] sage: G = DirichletGroup(100000, CC) sage: G.0.is_odd() True", "associated to Dirichlet character modulo 20 of conductor 4 mapping", "the modulus, which must also be a multiple of the", "x): \"\"\" Construct a Dirichlet character from `x`. EXAMPLES:: sage:", "r\"\"\" Dirichlet characters A :class:`DirichletCharacter` is the extension of a", "the return value is a list of integers. At present", "sage: G, x = DirichletGroup(35).objgens() sage: e = x[0]*x[1]; e", "the Dirichlet groups of prime power modulus corresponding to primes", "- 1,), -3*zeta6 + 2) ((zeta6 - 1,), (-1,), 2*zeta6", "e.modulus() % 2 == 0: if e.modulus() % 4 ==", "a = [] for u in self.unit_gens(): v = u.lift()", "for e in G] [False, True, False, True, False, True,", "False sage: (a*b).is_primitive() True sage: G.<a,b> = DirichletGroup(20, CC) sage:", "2 |--> 1], ..., [Dirichlet character modulo 13 of conductor", "Group of Dirichlet characters modulo 7 with values in Number", "\"\"\" return self.parent().base_ring() def bar(self): \"\"\" Return the complex conjugate", "not None: zeta = base_ring(zeta) if zeta_order is None: zeta_order", "OUTPUT: The group of Dirichlet characters modulo `N` with values", "4 sage: G = DirichletGroup(6) sage: G(DirichletGroup(3).0) Dirichlet character modulo", "return self.zeta_order() @cached_method def _automorphisms(self): \"\"\" Compute the automorphisms of", "None: # We reuse _zeta_order if we know that it", "len(orders) n = G.integers_mod().one() value = val_on_gen.base_ring().zero() while True: #", "(a*b).conductor() 20 TESTS:: sage: G.<a, b> = DirichletGroup(20) sage: type(G(1).conductor())", "over rings with composite characteristic is not implemented sage: G", "integers `\\ZZ/N\\ZZ` where `N` is the modulus of self. EXAMPLES::", "values_on_gens is not None: self.values_on_gens.set_cache(values_on_gens) if element is not None:", "of conductor 5 mapping 2 |--> zeta4] sage: d[0].parent() Group", "later version. # https://www.gnu.org/licenses/ # **************************************************************************** from __future__ import print_function", "unity in the base ring. EXAMPLES:: sage: DirichletGroup(37).zeta() zeta36 sage:", "number field. It's the identity function in characteristic p. EXAMPLES::", "DirichletGroup(5, K, a); G Group of Dirichlet characters modulo 5", "base ring (= Ring of integers modulo 15) must be", "orbits of Dirichlet characters in self, or in v if", "but with values in `\\QQ(\\zeta_n)`:: sage: G = DirichletGroup(20) sage:", "works:: sage: a = DirichletGroup(3, QQ, zeta=1, zeta_order=1)(1) sage: b", "def _element_constructor_(self, x): \"\"\" Construct a Dirichlet character from `x`.", "2 |--> zeta4] sage: d[0].parent() Group of Dirichlet characters modulo", "sage: e = DirichletGroup(100).1 sage: e.order() # same as multiplicative_order,", "[ [Dirichlet character modulo 20 of conductor 20 mapping 11", "of the Jacobi sum J(Y, Z). sage: Y.jacobi_sum(Z); Z.jacobi_sum(Y) -1", "Dirichlet characters - ``x`` -- one of the following: -", "sage: factor(norm(e.gauss_sum())) 13^24 TESTS: The field of algebraic numbers is", "representation of self. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: repr(a)", "to take place. :: sage: G(d[0])*G(d[1]) == c True Conductors", "self._pari_conversion() return pari.znconreyexp(G, v).sage() def lmfdb_page(self): r\"\"\" Open the LMFDB", "group with values in a number field:: sage: R.<x> =", "of unity. EXAMPLES:: sage: G = DirichletGroup(3) sage: e =", "this Dirichlet character as an approximate complex number with prec", "efficient than computing the value of -1 directly using dlog", "2 ] sage: DirichletGroup(20,GF(5)).decomposition() [ Group of Dirichlet characters modulo", "e = DirichletGroup(13).0 sage: e.base_ring() Cyclotomic Field of order 12", "coprime to p, this smallest r such that the #", "!= 0: s += ', ' s += str(self.parent().unit_gens()[i]) +", "of R(zeta) by the DirichletGroup factory. p = R.characteristic() if", "1) ((zeta6,), (-1,), -2*zeta6 - 1) ((zeta6,), (-zeta6,), zeta6 -", "following, but this does not work # pari_orders = G.cyc()", "13 with values in Cyclotomic Field of order 12 and", "zeta156^28 - zeta156^24 - zeta156^22 + zeta156^21 + zeta156^20 -", "|--> -1, 17 |--> 1 sage: c.extend(20) == a True", "has order 1. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.is_trivial()", "with defining polynomial x^4 + 1 An example where we", "if this is the trivial character, i.e., has order 1.", "f = K.complex_embeddings()[0] sage: psi = chi.change_ring(f) sage: psi(2) -1.83697019872103e-16", "4 of conductor 4 mapping 3 |--> -1 sage: f.modulus()", "using ``zeta_order``:: sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6)) Traceback (most recent call", "a cyclotomic field or QQ.\") phi = K.complex_embedding(prec) CC =", "= rings.Integer(d) if d <= 0: raise ValueError(\"d must be", "1, 17 |--> zeta4) sage: G.unit_gens() (11, 17) sage: G.zeta()", "2: return [R.zero(), R.one()] result_list = [R.zero()] * mod gens", "sage: f = D[-2] sage: e.jacobi_sum(f) 3*zeta12^2 + 2*zeta12 -", "Cyclotomic Field of order 4 and degree 2 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val)", "self m = G.modulus() if is_ComplexField(K): return self.gauss_sum_numerical(a=a) elif is_AlgebraicField(K):", "-1, 37 |--> 1 \"\"\" e = self(1) for i", "zeta_order = zeta.multiplicative_order() elif zeta_order is not None: if not", "20 TESTS:: sage: G.<a, b> = DirichletGroup(20) sage: type(G(1).conductor()) <type", "that :trac:`17586` is fixed:: sage: DirichletGroup(1)[0].bernoulli(1) 1/2 \"\"\" if cache:", "is -1 for 2-power modulus elif (euler_phi(e.parent().modulus()) / e.order()) %", "Dirichlet character of modulus q. See https://www.lmfdb.org/knowledge/show/character.dirichlet.conrey EXAMPLES:: sage: chi4", "G.1 sage: e.galois_orbit() [Dirichlet character modulo 30 of conductor 5", "check that trivial sums are being calculated correctly:: sage: N", "-1 sage: e(37) zeta4 sage: e(31*37) -zeta4 sage: parent(e(31*37)) Cyclotomic", "v.real() < 1.0e15 True sage: v.imag() 1.73205080756888 sage: G =", "other.values_on_gens(), op) def __hash__(self): \"\"\" Return the hash of ``self``.", "in D: if e.modulus() % 2 == 0: if e.modulus()", "self as a product of Dirichlet characters of prime power", "The group `V` is determined as follows: - If both", "sage: G(DirichletGroup(15).0) Dirichlet character modulo 6 of conductor 3 mapping", "r\"\"\" Return a Gauss sum associated to this Dirichlet character", "= 'pari' if algorithm == 'pari': from sage.lfunctions.pari import lfun_character,", "1], [7, 13, 17], [2, 2, 2], [0, 0, 0]],", "* b # indirect doctest Dirichlet character modulo 3 of", "Dirichlet character modulo 1, this function returns `B_{1,\\varepsilon} = 1/2`,", "a = 1.732050807568878? :: sage: e = DirichletGroup(13).0 sage: e.change_ring(QQ)", "character modulo 8 of conductor 4 mapping 7 |--> -1,", "degree 2' We can multiply if we're explicit about where", "of :meth:`values_on_gens`. The cache of one of these methods needs", "Cyclotomic Field of order 4 and degree 2 \"\"\" g", "G = DirichletGroup(192) sage: G([i, -1, -1]) Traceback (most recent", "1 sage: G.list() [Dirichlet character modulo 5 of conductor 1", "DirichletGroup(4).gen() sage: E.lmfdb_page() # optional -- webbrowser \"\"\" import webbrowser", "= self m = G.modulus() if is_ComplexField(K): return self.gauss_sum_numerical(a=a) elif", "have to be stored when pickling an instance of :class:`DirichletCharacter`.", "values in Cyclotomic Field of order 4 and degree 2,", "INPUT: - ``k`` -- a non-negative integer - ``algorithm`` --", "DirichletGroup_class) and self.modulus() == X.modulus() and self.base_ring().has_coerce_map_from(X.base_ring()) and (self._zeta is", "over a large prime field:: sage: p = next_prime(10^40) sage:", "zeta=-1, zeta_order=2) Group of Dirichlet characters modulo 5 with values", "len(DirichletGroup(20, GF(2))) 1 sage: len(DirichletGroup(20, GF(3))) 4 \"\"\" return self.order()", "be between 0 and 1 \"\"\" n = int(n) g", "-1, -1]) Traceback (most recent call last): ... ValueError: values", "For a given r, whether or not the above divisibility", "and degree 2 sage: (e^3).minimize_base_ring().base_ring() Cyclotomic Field of order 4", "self, often used in string representations of modular forms EXAMPLES::", "is supported (:trac:`19056`):: sage: G = DirichletGroup(7, QQbar) sage: G[1].gauss_sum()", "zeta30^3 - zeta30 - 1 When a root of unity", "for i in range(len(D))] def extend(self, M): \"\"\" Returns the", "``zeta``. If ``zeta_order`` is also given, it must be the", "elif self(-1) != K((-1)**k): ber = K.zero() elif algorithm ==", "chi(2) i sage: f = K.complex_embeddings()[0] sage: psi = chi.change_ring(f)", "range(1, N)) ber = K(sum(binomial(k,j) * bernoulli(j, **opts) * N**(j-1)", "value `B_1 = -1/2` for the classical Bernoulli number. Some", "algorithm='recurrence', cache=True, **opts): r\"\"\" Returns the generalized Bernoulli number `B_{k,eps}`.", "11 |--> -1, 17 |--> 1 sage: G.gen(1) Dirichlet character", "r, whether or not the above divisibility holds # depends", "sage: e.is_even() True sage: e.is_odd() True \"\"\" R = self.base_ring()", "root of unity. We use the following. Proposition: Suppose eps", "2.2): .. MATH:: \\sum_{a=1}^N \\frac{\\varepsilon(a) t e^{at}}{e^{Nt}-1} = sum_{k=0}^{\\infty} \\frac{B_{k,\\varepsilon}}{k!}", "DirichletGroup(30); e = G.1 sage: e.galois_orbit() [Dirichlet character modulo 30", "the modulus is 2 mod 4, there will be a", "for general finite fields - :func:`sage.rings.padics.misc.gauss_sum` for a `p`-adic version", "r\"\"\" Construct a group of Dirichlet characters modulo `N`. INPUT:", "False \"\"\" return richcmp(self.values_on_gens(), other.values_on_gens(), op) def __hash__(self): \"\"\" Return", "`B_1 = -1/2` for the classical Bernoulli number. Some authors", "indirect doctest 'Dirichlet character modulo 20 of conductor 4 mapping", "sage: e.gauss_sum_numerical(a=2) -...e-15 - 1.7320508075...*I sage: e.gauss_sum_numerical(a=2, prec=100) 4.7331654313260708324703713917e-30 -", "(1,), 5) ((1,), (zeta6,), -1) ((1,), (zeta6 - 1,), -1)", "sage: e = G([1 for u in G.unit_gens()]) sage: e.kloosterman_sum(7,17)", "this (unless `m` equals -1) EXAMPLES:: sage: G = DirichletGroup(60)", "is mutable *only* because immutable vectors are not implemented yet.", "the same thing, but requires # no arith in a", "warning:: Please do not change the entries of the returned", "Return the multiplicative inverse of self. EXAMPLES:: sage: e =", "return s @cached_method def decomposition(self): r\"\"\" Returns the Dirichlet groups", "self._zeta = zeta self._zeta_order = zeta_order self._modulus = modulus self._integers", "0: val *= e.values_on_gens()[0] # first gen is -1 for", "tuple([pows[i] for i in self.element()]) @cached_method(do_pickle=True) def element(self): r\"\"\" Return", "of conductor 13 mapping 2 |--> zeta12^2 sage: e.galois_orbit() [Dirichlet", "= '_DirichletCharacter__values_on_gens' values_on_gens = None state_dict = state[1] if values_on_gens_key", "= -self.element() else: x = tuple(~z for z in self.values_on_gens())", "elif algorithm == \"definition\": # This is better since it", "11 |--> 1, 7 |--> zeta4] Another example:: sage: G", "random power of each generator together, where the power is", "use UniqueFactory to cache DirichletGroups \"\"\" # **************************************************************************** # Copyright", "modulus. EXAMPLES:: sage: e = DirichletGroup(100, QQ).0 sage: e.level() 100", "``True`` if and only if `\\varepsilon(-1) = 1`. EXAMPLES:: sage:", "R.zeta(d) break except ValueError: pass self.zeta_order.set_cache(d) return zeta @cached_method def", "trivial_character(6) == trivial_character(3) # indirect doctest False sage: trivial_character(3) ==", "(default: the cyclotomic field `\\QQ(\\zeta_n)`, where `n` is the exponent", ":class:`DirichletCharacter`). EXAMPLES:: sage: G.<e> = DirichletGroup(13) sage: G Group of", "as multiplicative_order, since group is multiplicative 20 sage: e.multiplicative_order() 20", "trivial_character(3) # indirect doctest False sage: trivial_character(3) == trivial_character(9) False", "G.<a,b> = DirichletGroup(20) sage: repr(a) # indirect doctest 'Dirichlet character", "1, zeta10, zeta10^3 - zeta10^2 + zeta10 - 1, zeta10^2]", "0.988944551741105 With the algorithm \"lcalc\":: sage: a = a.primitive_character() sage:", "of conductor 4 mapping 7 |--> -1, 5 |--> 1,", "not known sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6), zeta_order=6) Group of Dirichlet", "True sage: e.is_odd() True \"\"\" R = self.base_ring() # self(-1)", "sage: G.1 Dirichlet character modulo 20 of conductor 5 mapping", "former case, it also ensures that ``zeta`` is an element", "2 \\hbox{ of conductor } 1 \"\"\" s = r'\\hbox{Dirichlet", "NotImplementedError: factorization of polynomials over rings with composite characteristic is", "of order 4 and degree 2 sage: (e^12).minimize_base_ring().base_ring() Rational Field", "Group of Dirichlet characters modulo 2 with values in Rational", "at -1 using knowledge of its order. This is potentially", "of order 4 and degree 2 sage: d[1].parent() Group of", "in a ring `R`. \"\"\" Element = DirichletCharacter def __init__(self,", "v if v is not None. INPUT: - ``v`` -", "(most recent call last): ... ValueError: values (= (zeta16^4, -1,", "G.gen(1) Dirichlet character modulo 20 of conductor 5 mapping 11", "sage: abs(f.gauss_sum_numerical()) 3.60555127546... sage: sqrt(13.0) 3.60555127546399 TESTS: The field of", "(optional) order of ``zeta`` EXAMPLES:: sage: G = DirichletGroup(7,QQ); G", "modulo 37733 of conductor 37733 mapping 1557 |--> -1, 37346", "return self.element().additive_order() return lcm([z.multiplicative_order() for z in self.values_on_gens()]) def primitive_character(self):", "13 of conductor 1 mapping 2 |--> 1 \"\"\" G", "(-zeta6,), -1) ((zeta6 - 1,), (-zeta6 + 1,), -zeta6 -", "-1, 41 |--> -1, 37 |--> zeta4 sage: e(-1) -1", "|--> -1, 5 |--> 1, Dirichlet character modulo 9 of", "2 \"\"\" if self.parent().zeta.is_in_cache(): return self.element().additive_order() return lcm([z.multiplicative_order() for z", "e.is_odd() True \"\"\" R = self.base_ring() # self(-1) is either", "sage: list(G) [Dirichlet character modulo 20 of conductor 1 mapping", "# optional -- webbrowser \"\"\" import webbrowser lmfdb_url = 'https://www.lmfdb.org/Character/Dirichlet/{}/{}'", "result_list value += val_on_gen[i] n *= gens[i] if exponents[i] <", "or without specifying a root of unity:: sage: DirichletGroup(5, K,", "# (at your option) any later version. # https://www.gnu.org/licenses/ #", "**extra_args): \"\"\" Create the object from the key (extra arguments", "'over this ring') n = zeta.multiplicative_order() zeta = zeta**(n //", "field by its rings of integers as the base ring.", "An example to illustrate that ``base_ring`` is a part of", "as self.order(). EXAMPLES:: sage: len(DirichletGroup(20)) 8 sage: len(DirichletGroup(20, QQ)) 4", "methods needs to be set for the other method to", "modulus, zeta, zeta_order) DirichletGroup = DirichletGroupFactory(\"DirichletGroup\") def is_DirichletGroup(x): \"\"\" Returns", "e = DirichletGroup(16)([-1, 1]) sage: loads(dumps(e)) == e True \"\"\"", "k == e False \"\"\" return richcmp(self.values_on_gens(), other.values_on_gens(), op) def", "will be raised if only ``zeta_order`` is specified:: sage: DirichletGroup(17,", "H = DirichletGroup(10, CyclotomicField(4)) sage: G is H True sage:", "a.append(R(x(v))) return self.element_class(self, a) def _coerce_map_from_(self, X): \"\"\" Decide whether", "n(=-1) must be between 0 and 1 \"\"\" n =", "e.kloosterman_sum(7,17) -2*zeta20^6 + 2*zeta20^4 + 4 TESTS:: sage: G =", "Z = X[1] sage: # Y is trivial and Z", "mapping 2 |--> 1] \"\"\" D = self.parent().decomposition() vals =", "+ 1 :: sage: G.<e> = DirichletGroup(13) sage: loads(G.dumps()) ==", "alternative definition giving `B_{1,\\varepsilon} = -1/2`; see the discussion in", "@cached_method def zeta(self): \"\"\" Return the chosen root of unity", "state['_zeta_order'] = rings.Integer(state['_zeta_order']) super(DirichletGroup_class, self).__setstate__(state) @property def _module(self): \"\"\" Return", "+ 1) sage: DirichletGroup(5, K) Group of Dirichlet characters modulo", "base_ring=Integers(9), zeta=2) # indirect doctest sage: TestSuite(G).run() sage: G.base() #", "Y is trivial and Z is quartic sage: sum([Y(x)*Z(1-x) for", "<NAME> (2014-03-06): use UniqueFactory to cache DirichletGroups \"\"\" # ****************************************************************************", "to a rational We test the case where `R` is", "None: self.element.set_cache(element) class DirichletGroupFactory(UniqueFactory): r\"\"\" Construct a group of Dirichlet", "5 mapping 2 |--> -zeta4 \"\"\" return ~self def bernoulli(self,", "of :class:`DirichletCharacter`. \"\"\" pows = self.parent()._zeta_powers return tuple([pows[i] for i", "in [0..20]] [0, 1, 1, 1, 1, 1, 1, 0,", "self.parent() if G.zeta.is_in_cache(): x = -self.element() else: x = tuple(~z", "E.lmfdb_page() # optional -- webbrowser \"\"\" import webbrowser lmfdb_url =", "G.base_extend(CyclotomicField(6)); H Group of Dirichlet characters modulo 7 with values", "None: if not (zeta is None and zeta_order is None):", "- tuple or list of ring elements: the values of", "0, 1, 1, 1, 1, 1, 1] sage: t(1).parent() Rational", "if G.zeta.is_in_cache(): x = self.element() + other.element() else: x =", "number of values (= {}) on generators (want {})\".format(x, len(orders)))", "in this group. EXAMPLES:: sage: DirichletGroup(5).list() [Dirichlet character modulo 5", "# it under the terms of the GNU General Public", "Field of order 12 and degree 4 sage: e Dirichlet", "h = [0] + [g * ((n*t).exp(prec)) for n in", "is b.element() False sage: a.values_on_gens() is b.values_on_gens() True \"\"\" #", "``prec`` -- integer (default: 53), *bits* of precision - ``a``", "`\\phi(p^n)/\\mbox{\\rm ord}(\\varepsilon)`. EXAMPLES:: sage: chi = DirichletGroup(20).0; chi._DirichletCharacter__eval_at_minus_one() -1 \"\"\"", "-1, 17 |--> zeta4 Multiplying elements whose parents have different", "+= self.modulus() a.append(R(x(v))) return self.element_class(self, a) def _coerce_map_from_(self, X): \"\"\"", "zeta_order): a = a * zeta a._set_multiplicative_order(zeta_order/gcd(zeta_order, i)) w.append(a) else:", "call last): ... TypeError: cannot convert 0 to an element", "hash((-1,1)) True \"\"\" return hash(self.values_on_gens()) def __invert__(self): \"\"\" Return the", "\"\"\" Return the order of the chosen root of unity", "Field of order 4 and degree 2 We can't multiply", "is # finite, and hence this Dirichlet group is finite", "ignored if ``base_ring`` is not ``None``. OUTPUT: The group of", "power series # instead of calls to the Bernoulli function.", "trivial_character(N, base_ring=rings.RationalField()): r\"\"\" Return the trivial character of the given", "should be positive \"\"\" modulus = rings.Integer(N) if modulus <=", "self.values()[1:]: z *= zeta g += phi(c)*z return g def", "sage: e.kloosterman_sum(7,17) -2*E(5) - 4*E(5)^2 - 4*E(5)^3 - 2*E(5)^4 sage:", "= DirichletGroup(7, QQ).0 sage: f = e.change_ring(QuadraticField(3, 'a')) sage: f.parent()", "-zeta12] sage: e = G.0^2; e Dirichlet character modulo 13", "change:: sage: H.zeta() zeta6 This method (in contrast to :meth:`change_ring`)", "sage: e(31*37) -zeta4 sage: parent(e(31*37)) Cyclotomic Field of order 4", "enumerate(self._zeta_powers)} def change_ring(self, R, zeta=None, zeta_order=None): \"\"\" Return the base", "N, base_ring=None, zeta=None, zeta_order=None, names=None, integral=False): \"\"\" Create a key", "to ``R``. INPUT: - ``R`` -- either a ring admitting", "Map from sage.rings.rational_field import is_RationalField from sage.rings.complex_mpfr import is_ComplexField from", "in self.values_on_gens()]) v.set_immutable() return v def __setstate__(self, state): r\"\"\" Restore", "1, 37 |--> 1, Dirichlet character modulo 60 of conductor", "that their values were the same, but not checking that", "p: K = rings.IntegerModRing(p) elif self.order() <= 2: K =", "sage: chi(2) i sage: f = K.complex_embeddings()[0] sage: psi =", "both are specified - ``names`` -- ignored (needed so ``G.<...>", "is taken to be the cyclic subgroup of `R^*` generated", "base ring as possible. .. note:: This function is currently", "call last): ... TypeError: Galois orbits only defined if base", "sage: e.is_odd() True \"\"\" R = self.base_ring() # self(-1) is", "the ``lcalc`` program. INPUT: - ``prec`` -- precision (default 53)", "val_on_gen.base_ring().zero() while True: # record character value on n result_list[n]", "%s generated by %s in \" % (self._zeta_order, self._zeta) s", "character of the given modulus, with values in the given", "%s with values in \" % self.modulus() if self._zeta is", "as for :meth:`.kloosterman_sum` - ``b`` -- integer, as for :meth:`.kloosterman_sum`.", "sage: chi = DirichletGroup(20).0; chi._DirichletCharacter__eval_at_minus_one() -1 \"\"\" D = self.decomposition()", "+ 1,), 1) ((zeta6 - 1,), (zeta6 - 1,), -3*zeta6", "return self K = rings.CyclotomicField(m) return self.change_ring(K) def minimize_base_ring(self): r\"\"\"", "the caching would be broken:: sage: k = k[1:]; k", "code is pretty fast, at least compared to # the", "e = G.0 sage: e Dirichlet character modulo 13 of", "mapping 2 |--> zeta12^3 - zeta12, Dirichlet character modulo 13", "!= char.parent(): raise NotImplementedError(\"Characters must be from the same Dirichlet", "(31, 41, 37) sage: DirichletGroup(20,QQ).unit_gens() (11, 17) \"\"\" return self._integers.unit_gens()", "group. EXAMPLES:: sage: DirichletGroup(5).list() [Dirichlet character modulo 5 of conductor", "of ``zeta`` cannot be determined automatically, we can specify it", "self.__bernoulli: return self.__bernoulli[k] N = self.modulus() K = self.base_ring() if", "- <NAME> (2005-09-02): Fixed bug in comparison of Dirichlet characters.", "sums are *not* cached with this character. EXAMPLES:: sage: G", "positive \"\"\" modulus = rings.Integer(N) if modulus <= 0: raise", "<NAME> (2008-02-16): speed up __call__ method for Dirichlet characters, miscellaneous", "ring is not a domain (in which case the group", "base_ring(zeta) if zeta_order is None: zeta_order = zeta.multiplicative_order() elif zeta_order", "G = DirichletGroup(20) sage: G.gen(0) Dirichlet character modulo 20 of", "exponents[i] < orders[i]: break exponents[i] = 0 i += 1", "== 0: raise ValueError(\"d must be nonzero\") D = fundamental_discriminant(d)", "elif self.modulus() % 4 == 2: # 0 factors at", "2], [0, 0, 0]], [1, 0, 0; 0, 1, 0;", "ValueError: conductor(=4) must divide M(=50) \"\"\" M = int(M) if", "has been called Ring of integers modulo 9 sage: DirichletGroup(13)", "8, 3], [[1, matrix(0,2)], [1, matrix(0,2)], [2, Mat([2, 1])]], [1,", "Field sage: G.change_ring(CyclotomicField(6)) Group of Dirichlet characters modulo 7 with", "modulo 5 of conductor 5 mapping 2 |--> 13 sage:", "= P.zeta() zeta_argument = zeta.argument() v = [int(x.argument() / zeta_argument)", "ring has characteristic 0 or a prime. EXAMPLES:: sage: DirichletGroup(17)._automorphisms()", "zeta4] sage: d[0].parent() Group of Dirichlet characters modulo 4 with", "2*zeta156^30 + zeta156^28 - zeta156^24 - zeta156^22 + zeta156^21 +", "Traceback (most recent call last): ... TypeError: cannot convert 0", "ber = sum([self(a)*h[a][k] for a in range(1,N+1)]) * factorial(k) else:", "= DirichletGroup(20).0; chi._DirichletCharacter__eval_at_minus_one() -1 \"\"\" D = self.decomposition() val =", "DirichletGroup(21).gen(0) ; e.values() [0, 1, -1, 0, 1, -1, 0,", "CyclotomicField(4)) sage: G is H True sage: G3 = DirichletGroup(31,", "k = DirichletGroup.create_key(2, base_ring=QQ); k (Rational Field, 2, None, None)", "chi.multiplicative_order() 4 Other operations only work if ``zeta`` is specified::", "of conductor 4 mapping 51 |--> -1, 77 |--> 1", "+ zeta52^12 - zeta52^11 - zeta52^10 - zeta52^7 - zeta52^5", "error will be raised. EXAMPLES:: sage: DirichletGroup(20).galois_orbits() [ [Dirichlet character", "__pow__(self, n): \"\"\" Return self raised to the power of", "a Dirichlet group. TESTS:: sage: DirichletGroup.create_key(60) (Cyclotomic Field of order", "MATH:: g_a(\\chi) = \\sum_{r \\in \\ZZ/m\\ZZ} \\chi(r)\\,\\zeta^{ar}, where `m` is", "3`:: sage: (DirichletGroup(18).0).decomposition() [Dirichlet character modulo 2 of conductor 1,", "modulo 24 of conductor 24 mapping 7 |--> 1, 13", "... TypeError: unsupported operand parent(s) for *: 'Group of Dirichlet", "R(-1) @cached_method def is_primitive(self): \"\"\" Return ``True`` if and only", "domain EXAMPLES:: sage: e = DirichletGroup(7, QQ).0 sage: f =", "category=category) self._zeta = zeta self._zeta_order = zeta_order self._modulus = modulus", "g**n return e def unit_gens(self): r\"\"\" Returns the minimal generators", "def decomposition(self): r\"\"\" Return the decomposition of self as a", "len(DirichletGroup(20, QQ)) 4 sage: len(DirichletGroup(20, GF(5))) 8 sage: len(DirichletGroup(20, GF(2)))", "sage: G = DirichletGroup(20) sage: G.gen(0) Dirichlet character modulo 20", "create the group of Dirichlet character mod 20 with values", "and `V` must have order ``zeta_order``. Furthermore, a generator ``zeta``", "14266017175 |--> 1 AUTHORS: - <NAME> (2006-08-06) \"\"\" d =", "DirichletGroup(13) sage: G Group of Dirichlet characters modulo 13 with", "z in orbit: seen_so_far.add(tuple(z.element())) G = Sequence(G, cr=True) if sort:", "zeta_order): \"\"\" Create a Dirichlet group. Not to be called", "the order of ``zeta`` is very large. - If ``zeta``", "= [0] + [g * ((n*t).exp(prec)) for n in range(1,N+1)]", "-a^2] We can also restrict the order of the characters,", "= DirichletGroup(16, QQ); H(DirichletGroup(16).1) Traceback (most recent call last): ...", "elements of this Dirichlet group. This is the same as", "1, 36] sage: e = DirichletGroup(21, base_ring=GF(3)).gen(0) ; e.values() [0,", "value ring for the characters in this group (default: the", "5 of conductor 5 mapping 2 |--> 2,) TESTS: Dirichlet", "for j in range(i, p-1)] sage: for s in all_jacobi_sums:", "self._set_element_constructor() if '_zeta_order' in state: state['_zeta_order'] = rings.Integer(state['_zeta_order']) super(DirichletGroup_class, self).__setstate__(state)", "characters modulo 2 with values in Complex Field with 53", "G = DirichletGroup(3) sage: e = G([-1]) sage: e.kloosterman_sum(3,5) -2*zeta6", "License, or # (at your option) any later version. #", "x^2 - 3 with a = 1.732050807568878? :: sage: e", "sage: Z.jacobi_sum(Y) -1 Now let's take a look at a", "have multiplicative orders dividing {}, respectively\" .format(x, orders)) self.values_on_gens.set_cache(x) else:", "if e.modulus() % 4 == 0: val *= e.values_on_gens()[0] #", "can also restrict the order of the characters, either with", "%s' % (self.modulus(), self.conductor()) r = len(self.values_on_gens()) if r !=", "of conductor 1 mapping 2 |--> 1, Dirichlet character modulo", "is .. MATH:: K(a,b,\\chi) = \\sum_{r \\in (\\ZZ/m\\ZZ)^\\times} \\chi(r)\\,\\zeta^{ar+br^{-1}}, where", "13 mapping 2 |--> zeta12^2, Dirichlet character modulo 13 of", "31 of conductor 31 mapping 3 |--> -zeta30^7 + zeta30^5", "Dir(11) \"\"\" s = \"Group of Dirichlet characters modulo %s", "and degree 2 We create the group of Dirichlet character", "def zeta_order(self): \"\"\" Return the order of the chosen root", "order of this character. EXAMPLES:: sage: e = DirichletGroup(100).1 sage:", "is not exact or if the order of ``zeta`` is", "== DirichletGroup(3, QQ).0^2 True \"\"\" return (isinstance(X, DirichletGroup_class) and self.modulus()", "G Group of Dirichlet characters modulo 7 with values in", "mapping 3 |--> -1 sage: f.modulus() 4 \"\"\" return self.restrict(self.conductor())", "a.values_on_gens() is b.values_on_gens() True \"\"\" # This method exists solely", "tol 1e-14 0.988944551741105 - 5.16608739123418e-18*I \"\"\" if algorithm is None:", "sage: e = G.gens()[0] sage: e.kloosterman_sum(5,11) Traceback (most recent call", "sage: G(0) Traceback (most recent call last): ... TypeError: cannot", "n * self.element() else: x = tuple(z**n for z in", "** a g = phi(self(0)) z = CC.one() for c", "= G.1 sage: e.galois_orbit() [Dirichlet character modulo 30 of conductor", "a number field. prec = k+2 R = rings.PowerSeriesRing(rings.QQ, 't')", "of `(\\ZZ/N\\ZZ)^*`, where `N` is the modulus of self. EXAMPLES::", "and `\\zeta` is a primitive `m` th root of unity.", "GF(9,'a')).0 sage: g.jacobi_sum(g**2) 2*a TESTS: This shows that :trac:`6393` has", "a._set_multiplicative_order(zeta_order/gcd(zeta_order, i)) w.append(a) else: for i in range(1, zeta_order): a", "def ngens(self): \"\"\" Returns the number of generators of self.", "Cyclotomic Field of order 4 and degree 2 If the", "of elements:: sage: G = DirichletGroup(5, Zmod(15)); G Group of", "4)) Group of Dirichlet characters modulo 60 with values in", ".format(x, parent.zeta_order(), orders)) self.element.set_cache(x) else: R = parent.base_ring() x =", "in a pickle element_key = '_DirichletCharacter__element' element = None if", "of Dirichlet characters modulo `N`. INPUT: - ``N`` -- positive", "raise ValueError(\"base ring (= %s) must be an integral domain", "since coercion of one element into the other parent fails", "= -1/2` for the classical Bernoulli number. Some authors use", "5 mapping 11 |--> 1, 17 |--> zeta4 sage: G.gen(2)", "mapping 2 |--> -zeta4] \"\"\" return self._list_from_iterator() def modulus(self): \"\"\"", "import lfun_character, LFunction Z = LFunction(lfun_character(self), prec=prec) Z.rename('PARI L-function associated", "LMFDB web page of the character in a browser. See", "EXAMPLES:: sage: e = DirichletGroup(100).0; e Dirichlet character modulo 100", "= tuple(y * z for y, z in zip(self.values_on_gens(), other.values_on_gens()))", "zeta6 + 2) Let's check that trivial sums are being", "; val zeta4 sage: parent(val) Gaussian Integers in Cyclotomic Field", "root of unity is specified, base extension still works if", "sage: e = G.0 sage: e Dirichlet character modulo 13", "3 |--> -1 sage: f.modulus() 4 \"\"\" return self.restrict(self.conductor()) def", "def _zeta_dlog(self): \"\"\" Return a dictionary that can be used", "7 with values in the group of order 6 generated", "zeta is not None: zeta = base_ring(zeta) if zeta_order is", "11 |--> 1, 17 |--> zeta4) sage: G.unit_gens() (11, 17)", "QQ, zeta=1, zeta_order=1)(1) sage: b = DirichletGroup(3, QQ, zeta=-1, zeta_order=2)([-1])", "\"\"\" if self.element.is_in_cache(): return not self.element() one = self.base_ring().one() return", "is_odd, e.g., in characteristic 2:: sage: G.<e> = DirichletGroup(13, GF(4,'a'))", "< 0.5 return self(-1) == R(1) @cached_method def is_odd(self): r\"\"\"", "from sage.structure.gens_py import multiplicative_iterator from sage.structure.parent import Parent from sage.structure.sequence", "to set the cache of element() from that if we", "self(-1) is either +1 or -1 if not R.is_exact(): return", "sage: G = DirichletGroup(3) sage: e = G.0 sage: abs(e.gauss_sum_numerical())", "with values in a subgroup `V` of the multiplicative group", "with values in Ring of integers modulo 15 sage: chi", "zeta6 - 3) ((zeta6,), (-zeta6 + 1,), 1) ((zeta6 -", "this Dirichlet character. EXAMPLES:: sage: e = DirichletGroup(5).0 sage: e", "4 sage: f = e.primitive_character(); f Dirichlet character modulo 4", "factorial(k) else: raise ValueError(\"algorithm = '%s' unknown\"%algorithm) if cache: self.__bernoulli[k]", "mapping 31 |--> -1, 41 |--> -1, 37 |--> zeta4", "sage: b.maximize_base_ring() Dirichlet character modulo 20 of conductor 5 mapping", "e.values_on_gens()[0] # first gen is -1 for 2-power modulus elif", "sage: e.conductor() 4 sage: e.restrict(20) Dirichlet character modulo 20 of", "ring of ``self`` as its domain - ``zeta`` -- (optional)", "`\\gcd(N,x)>1` to `0`. EXAMPLES:: sage: G = DirichletGroup(35) sage: x", "Dirichlet characters in this group. EXAMPLES:: sage: DirichletGroup(5).list() [Dirichlet character", "The element is computed by multiplying a random power of", "because of a bug in the cPickle module -- #", "if element is not None: self.element.set_cache(element) class DirichletGroupFactory(UniqueFactory): r\"\"\" Construct", "self.zeta_order() if is_ComplexField(R): for i in range(1, zeta_order): a =", "the extension of a homomorphism .. MATH:: (\\ZZ/N\\ZZ)^* \\to R^*,", "values in Rational Field sage: H = G.base_extend(CyclotomicField(6)); H Group", "G.order() 4 sage: DirichletGroup(-33) Traceback (most recent call last): ...", "G.gens() (Dirichlet character modulo 5 of conductor 5 mapping 2", "- ``check`` - (optional, default: True) whether or not to", "K)[1] sage: chi(2) i sage: f = K.complex_embeddings()[0] sage: psi", "G.<a, b> = DirichletGroup(20) sage: type(G(1).conductor()) <type 'sage.rings.integer.Integer'> \"\"\" if", "MultiplicativeGroupElement.__init__(self, parent) if check: orders = parent.integers_mod().unit_group().gens_orders() if len(x) !=", "to q that identifies a Dirichlet character of modulus q.", "if `b=0`. This method performs an exact calculation and returns", "True sage: is_DirichletGroup(11) False sage: is_DirichletGroup(DirichletGroup(11).0) False \"\"\" return isinstance(x,", "the same object:: sage: DirichletGroup(60) is DirichletGroup(60) True \"\"\" def", "K).0 sage: chi.minimize_base_ring() Dirichlet character modulo 7 of conductor 7", "zeta.argument() v = [int(x.argument() / zeta_argument) for x in values_on_gens]", "in the group of order 8 generated by a in", "g(t) = t/(e^{Nt}-1) g = t/((N*t).exp(prec) - 1) # h(n)", "sage: e.is_even() False sage: e(-1) -1 sage: [e.is_even() for e", "== hash((-1,1)) True \"\"\" return hash(self.values_on_gens()) def __invert__(self): \"\"\" Return", "character modulo 5 of conductor 5 mapping 2 |--> 2,)", "MATH:: \\varepsilon : (\\ZZ/N\\ZZ)^* \\to \\QQ(\\zeta_n) be a Dirichlet character.", "modulus <= 0: raise ValueError('modulus should be positive') if base_ring", "L = a.lfunction(algorithm='lcalc'); L L-function with complex Dirichlet coefficients sage:", "modulo 13 of conductor 13 mapping 2 |--> -zeta12] sage:", "37 |--> 1, Dirichlet character modulo 60 of conductor 5", "x = [R.one()] * len(self.unit_gens()) except (TypeError, ValueError, ArithmeticError): pass", "zeta=-1, zeta_order=2)([-1]) sage: a * b # indirect doctest Dirichlet", "e True \"\"\" # values_on_gens() used an explicit cache __values_on_gens", "`N` with values in a ring `R`. \"\"\" Element =", "different moduli, but no coercion. This implies that Dirichlet characters", ".. MATH:: J(\\chi, \\psi) = \\sum_{a \\in \\ZZ / N\\ZZ}", "program is free software: you can redistribute it and/or modify", "base rings not implemented\") # The automorphisms in characteristic p", "% 2: val *= -1 return val def __call__(self, m):", "iterate: # increase the exponent vector by 1, # increase", "@cached_method def is_trivial(self): r\"\"\" Returns ``True`` if this is the", "of ``self``. EXAMPLES:: sage: e = DirichletGroup(16)([-1, 1]) sage: hash(e)", "-1, 17 |--> 1 sage: G.gen(1) Dirichlet character modulo 20", "b.element() (0, 1) .. NOTE:: The constructor of :class:`DirichletCharacter` sets", "else: return self try: return self.change_ring(K) except (TypeError, ValueError, ArithmeticError):", "sage: e = DirichletGroup(13).0 sage: f = ~e sage: f*e", "TypeError(\"cannot convert %s to an element of %s\" % (x,", "Multiplying elements whose parents have different zeta orders works:: sage:", "v: z = x.element() e = tuple(z) # change when", "3) ((-zeta6,), (-zeta6,), 3*zeta6 - 1) ((-zeta6,), (-zeta6 + 1,),", "for x in [0..20]] [0, 1, 1, 1, 1, 1,", "11] \"\"\" one = self.base_ring().one() return [x for x in", "with defining polynomial x^4 + 1 sage: G.list() [Dirichlet character", "EXAMPLES:: sage: G = DirichletGroup(7,QQ); G Group of Dirichlet characters", "} 16 \\hbox{ mapping } 15 \\mapsto 1,\\ 5 \\mapsto", "The group of n-th roots of unity in the base", "|--> -1 sage: G(DirichletGroup(15).0) Dirichlet character modulo 6 of conductor", "need to set the cache of values_on_gens() from that if", "\"\"\" N = self.modulus() m = m % N if", "def integers_mod(self): r\"\"\" Returns the group of integers `\\ZZ/N\\ZZ` where", "d, for d0. EXAMPLES:: sage: kronecker_character_upside_down(97*389*997^2) Dirichlet character modulo 37506941597", "in ``base_ring`` - ``zeta_order`` -- (optional) positive integer; this must", "character modulo 5 of conductor 5 mapping 2 |--> zeta4,", "see modsym/manin_symbols.py. G = self.parent() return G.element_class(G, self.values_on_gens(), check=False) def", "character. EXAMPLES:: sage: e = DirichletGroup(100).1 sage: e.order() # same", "DirichletGroup(7)([-1]) sage: k == e False \"\"\" return richcmp(self.values_on_gens(), other.values_on_gens(),", "in Cyclotomic Field of order 4 and degree 2, Group", "i in range(self.ngens()): g = self.gen(i) n = random.randrange(g.order()) e", "\"\"\" R = self.base_ring() return Sequence([DirichletGroup(p**r,R) for p, r \\", "sage: G = DirichletGroup(7, QQbar) sage: G[1].gauss_sum() -2.440133358345538? + 1.022618791871794?*I", "Kloosterman sum associated to this Dirichlet character. This includes Gauss", "mapping 2 |--> zeta4 sage: e.bar() Dirichlet character modulo 5", "|--> 1 sage: b Dirichlet character modulo 20 of conductor", "self(-1) == R(-1) @cached_method def is_primitive(self): \"\"\" Return ``True`` if", "Returns the restriction of this character to a Dirichlet character", "underlying `\\ZZ/n\\ZZ`-module vector of exponents. .. warning:: Please do not", "of conductor 3 mapping 5 |--> -1 sage: G(DirichletGroup(15).1) Traceback", "zeta4, -1, -zeta4] \"\"\" R = self.base_ring() a = R.one()", "in range(1,N+1)] ber = sum([self(a)*h[a][k] for a in range(1,N+1)]) *", "zeta_order): a = a * zeta w.append(a) return w @property", "DirichletGroup(17, GF(11^4, 'a'))._automorphisms() [1, 11, 121, 1331] sage: DirichletGroup(17, Integers(6),", "(-zeta6,), zeta6 - 3) ((zeta6,), (-zeta6 + 1,), 1) ((zeta6", "f = H.0 sage: e.gauss_sum_numerical() -3.07497205... + 1.8826966926...*I sage: f.gauss_sum_numerical()", "with values in Rational Field sage: G.change_ring(CyclotomicField(6)) Group of Dirichlet", "1, 17 |--> zeta4 We next compute several invariants of", "# indirect doctest False sage: trivial_character(3) == trivial_character(9) False sage:", "sage: DirichletGroup(17, Integers(15), zeta_order=4) Traceback (most recent call last): ...", "degree 2 \"\"\" g = rings.IntegerModRing(self.modulus()).unit_group_exponent() if g == 1:", "= [R.one()] * len(self.unit_gens()) except (TypeError, ValueError, ArithmeticError): pass if", "= DirichletGroup(12, QQbar) sage: e = G.gens()[0] sage: e.kloosterman_sum(5,11) Traceback", "is_AlgebraicField(K): from sage.rings.complex_mpfr import ComplexField CC = ComplexField(prec) phi =", "the order of the standard root of unity for ``parent``.", "used an explicit cache __values_on_gens in the past # we", "a list of integers. At present this is only implemented", "sage: trivial_character(3) == trivial_character(9) False sage: trivial_character(3) == DirichletGroup(3, QQ).0^2", "fixed:: sage: G = DirichletGroup(5); X = G.list(); Y =", "g = self.gens() if n<0 or n>=len(g): raise IndexError(\"n(=%s) must", "G.base_ring() if not (number_field.is_CyclotomicField(K) or is_RationalField(K)): raise NotImplementedError(\"Kloosterman sums only", "number of elements of this Dirichlet group. This is the", "this ring \"\"\" G = self.parent() zo = G.zeta_order() m", "that # Order(x) divides EulerPhi(p**r) = p**(r-1)*(p-1). # For a", "not the above divisibility holds # depends only on the", "1,), (-1,), 2*zeta6 + 1) ((zeta6 - 1,), (-zeta6,), -1)", "of elements of this Dirichlet group. This is the same", "7430/13*zeta12^3 - 34750/13*zeta12^2 - 11380/13*zeta12 + 9110/13 sage: eps =", "1: zeta_order = self._zeta_order else: # No root of unity", "These are always given by raising to a power, so", "base ring of ``self`` as its domain EXAMPLES:: sage: e", "whether or not the above divisibility holds # depends only", "`\\varepsilon(-1) = -1` if and only if `p = 2`", "r\"\"\" Returns the generalized Bernoulli number `B_{k,eps}`. INPUT: - ``k``", "for :meth:`.kloosterman_sum`. EXAMPLES:: sage: G = DirichletGroup(3) sage: e =", "Group of Dirichlet characters modulo 5 with values in Ring", "\"\"\" Return the exponent of this group. EXAMPLES:: sage: DirichletGroup(20).exponent()", "= DirichletGroup(13) sage: G.galois_orbits() [ [Dirichlet character modulo 13 of", "orders of the elements must divide the orders of the", "other.element() else: x = tuple(y * z for y, z", "k = DirichletGroup(7)([-1]) sage: k == e False \"\"\" return", "mapping 11 |--> -1, 17 |--> zeta4 Multiplying elements whose", "DirichletGroup(13) sage: e = G.0 sage: e.bernoulli(5) 7430/13*zeta12^3 - 34750/13*zeta12^2", "self.parent() K = G.base_ring() if is_ComplexField(K): phi = lambda t", "the Dirichlet characters in this group. EXAMPLES:: sage: DirichletGroup(5).list() [Dirichlet", "integers modulo 15 sage: chi = G([13]); chi Dirichlet character", "|--> zeta4 Multiplying elements whose parents have different zeta orders", "0 and 1 \"\"\" n = int(n) g = self.gens()", "sage: f = e.restrict(8) sage: e == e True sage:", "values_on_gens = state_dict[values_on_gens_key] del state_dict[values_on_gens_key] # element() used an explicit", "sage: len(DirichletGroup(20, GF(5))) 8 sage: len(DirichletGroup(20, GF(2))) 1 sage: len(DirichletGroup(20,", "of everything; fix a *lot* of tiny bugs and design", "sage: a^2 Dirichlet character modulo 20 of conductor 1 mapping", "``'definition'`` - ``cache`` -- if True, cache answers - ``**opts``", "DirichletGroup(20,GF(2)).exponent() 1 sage: DirichletGroup(37).exponent() 36 \"\"\" return self.zeta_order() @cached_method def", "degree 4 sage: G = DirichletGroup(11, RationalField()) sage: G.gen(0).base_ring() Rational", "be raised. EXAMPLES:: sage: DirichletGroup(20).galois_orbits() [ [Dirichlet character modulo 20", "the value of -1 directly using dlog and a large", "of the prime subfield of the base ring. EXAMPLES:: sage:", "(-zeta6 + 1,), zeta6 + 2) Let's check that trivial", "sage: DirichletGroup(19).zeta_order() 18 \"\"\" order = self._zeta_order if order is", "base ring. EXAMPLES:: sage: t = trivial_character(7) sage: [t(x) for", "conversion map from the base ring of ``self``, or a", "= DirichletGroup(5, K)[1] sage: chi(2) i sage: f = K.complex_embeddings()[0]", "Bernoulli # numbers up to k, which should be done", "-1, 17 |--> -1 sage: chi.conrey_number() 5 sage: chi =", "of Dirichlet characters modulo 7 with values in Rational Field", "-1/2` for the classical Bernoulli number. Some authors use an", "sums where exactly one character is nontrivial (see :trac:`6393`):: sage:", "base_ring=GF(3)).gen(0) ; e.values() [0, 1, 2, 0, 1, 2, 0,", "None, None) sage: l = DirichletGroup.create_key(2, base_ring=CC); l (Complex Field", "= DirichletGroup(16)([-1, 1]) sage: hash(e) == hash((-1,1)) True \"\"\" return", "None). zeta_order = self._zeta_order # Map zeta to the new", "Traceback (most recent call last): ... IndexError: n(=-1) must be", "in factor(self.modulus())], cr=True, universe = cat.Objects()) def exponent(self): \"\"\" Return", "mapping 2 |--> -zeta4 AUTHORS: - <NAME> (2005-09-02): Fixed bug", "-3.07497205... + 1.8826966926...*I sage: abs(e.gauss_sum_numerical()) 3.60555127546... sage: abs(f.gauss_sum_numerical()) 3.60555127546... sage:", "of orbits and the orbits themselves (slightly faster if False).", "(2005-09-02): Fixed bug in comparison of Dirichlet characters. It was", "that :trac:`18479` is fixed:: sage: f = Newforms(Gamma1(25), names='a')[1] sage:", "False \"\"\" return isinstance(x, DirichletCharacter) class DirichletCharacter(MultiplicativeGroupElement): \"\"\" A Dirichlet", "gauss_sum_numerical(self, prec=53, a=1): r\"\"\" Return a Gauss sum associated to", "p^r = 1 (mod n), so r is the mult", "zeta4 sage: G.gen(2) Traceback (most recent call last): ... IndexError:", "eps.bernoulli(3) 10*zeta6 + 4 sage: eps.bernoulli(3, algorithm=\"definition\") 10*zeta6 + 4", "element not known sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6), zeta_order=6) Group of", "(= {}) on generators (want {})\".format(x, len(orders))) if free_module_element.is_FreeModuleElement(x): x", "1 sage: DirichletGroup(60).random_element() Dirichlet character modulo 60 of conductor 3", "order of ``zeta`` cannot be determined automatically, we can specify", "sage: H = G.base_extend(CyclotomicField(6)); H Group of Dirichlet characters modulo", "degree 2' and 'Group of Dirichlet characters modulo 5 with", "positive\") G = DirichletGroup(d, rings.RationalField()) return G([kronecker(u.lift(),d) for u in", "`m` equals -1) EXAMPLES:: sage: G = DirichletGroup(60) sage: e", "characters modulo 2 with values in Rational Field sage: H", "Dirichlet characters modulo %s with values in \" % self.modulus()", "The value -1 above is the correct value of the", "- ``R`` -- either a ring admitting a *coercion* map", "zeta10 - 1, zeta10, zeta10^3 - zeta10^2 + zeta10 -", "n *= gens[i] if exponents[i] < orders[i]: break exponents[i] =", "not (number_field.is_CyclotomicField(K) or is_RationalField(K)): raise NotImplementedError(\"Kloosterman sums only currently implemented", "zeta=Integers(6)(5))._automorphisms() Traceback (most recent call last): ... NotImplementedError: Automorphisms for", "_mul_(self, other): \"\"\" Return the product of self and other.", "|--> 1, 7 |--> zeta4] Another example:: sage: G =", "\"\"\" R = self.base_ring() # self(-1) is either +1 or", "is specified:: sage: G.gens() Traceback (most recent call last): ...", "one = self.base_ring().one() return [x for x in range(self.modulus()) if", "of integers modulo 15 sage: DirichletGroup(17, Integers(15), zeta_order=4) Traceback (most", "sage: H.zeta() zeta6 This method (in contrast to :meth:`change_ring`) requires", "`(\\ZZ/n\\ZZ)^*`. INPUT: - ``parent`` -- :class:`DirichletGroup`, a group of Dirichlet", "the order of ``zeta`` cannot be determined automatically, we can", "When a root of unity is specified, base extension still", "as .. MATH:: J(\\chi, \\psi) = \\sum_{a \\in \\ZZ /", "one] def maximize_base_ring(self): r\"\"\" Let .. MATH:: \\varepsilon : (\\ZZ/N\\ZZ)^*", "G([K.0]) Dirichlet character modulo 13 of conductor 13 mapping 2", "creating examples. - <NAME> (2008-02-16): speed up __call__ method for", "a primitive `m` th root of unity. This reduces to", "\\chi(a) \\psi(1-a) where `\\chi` and `\\psi` are both characters modulo", "1e-14 0.988944551741105 - 5.16608739123418e-18*I \"\"\" if algorithm is None: algorithm", "mapping 2 |--> -zeta12^2 + 1] A non-example:: sage: chi", "def jacobi_sum(self, char, check=True): r\"\"\" Return the Jacobi sum associated", "4 mapping 11 |--> -1, 17 |--> 1' TESTS: Dirichlet", "value of the Jacobi sum J(Y, Z). sage: Y.jacobi_sum(Z); Z.jacobi_sum(Y)", "1, 2, 0, 1, 0, 0, 1, 2, 0, 1,", "__init__(self, parent, x, check=True): r\"\"\" Create a Dirichlet character with", "((zeta6,), (zeta6 - 1,), 2*zeta6 + 1) ((zeta6,), (-1,), -2*zeta6", "in `\\QQ(\\zeta_n)`:: sage: G = DirichletGroup(20) sage: G.1 Dirichlet character", "19 with values in Finite Field of size 10000000000000000000000000000000000000121 Note", "= DirichletGroup(5, Zmod(15), zeta=2); G Group of Dirichlet characters modulo", "(most recent call last): ... TypeError: Unable to coerce zeta12", "R = self.base_ring() return Sequence([DirichletGroup(p**r,R) for p, r \\ in", "`N` is the modulus. EXAMPLES:: sage: e = DirichletGroup(16)([-1, 1])", "q. See https://www.lmfdb.org/knowledge/show/character.dirichlet.conrey EXAMPLES:: sage: chi4 = DirichletGroup(4).gen() sage: chi4.conrey_number()", "G.zeta() zeta4 sage: G.zeta_order() 4 In this example we create", "by ``zeta``. If ``zeta_order`` is also given, it must be", "e = rings.IntegerModRing(modulus).unit_group_exponent() base_ring = rings.CyclotomicField(e) if integral: base_ring =", "self def modulus(self): \"\"\" The modulus of this character. EXAMPLES::", "in range(len(self.unit_gens())): z = zero.__copy__() z[i] = ord//gcd(ord, orders[i]) g.append(self.element_class(self,", "thing, but requires # no arith in a poly ring", "specified:: sage: DirichletGroup(17, Integers(15)) Group of Dirichlet characters modulo 17", "trivial character, i.e., has order 1. EXAMPLES:: sage: G.<a,b> =", "self.decomposition()]) p = F[0][0] # When p is odd, and", "r\"\"\" Return a Gauss sum associated to this Dirichlet character.", "chi = G.one() sage: chi.gauss_sum() 1 .. SEEALSO:: - :func:`sage.arith.misc.gauss_sum`", "Field of order 4 and degree 2, Group of Dirichlet", "this one, but over as small a subfield (or subring)", "= rings.IntegerModRing(self.modulus()).unit_group_exponent() if g == 1: g = 2 z", "bar(self): \"\"\" Return the complex conjugate of this Dirichlet character.", "# The automorphisms in characteristic p are # k-th powering", "sage: eps.minimize_base_ring() == eps True A related bug (see :trac:`18086`)::", "5 with values in Ring of integers modulo 15 sage:", "of order 10 and degree 4 sage: G = DirichletGroup(11,", "zeta52^7 - zeta52^5 + zeta52^4 Check that :trac:`25127` is fixed::", "this Dirichlet character. EXAMPLES:: sage: G = DirichletGroup(11) sage: G.gen(0).base_ring()", "its modulus. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.is_primitive() False", "... ValueError: values (= (4, 8, 8) modulo 16) must", "G.0 sage: e.bernoulli(5) 7430/13*zeta12^3 - 34750/13*zeta12^2 - 11380/13*zeta12 + 9110/13", "ring is not an integral domain, an error will be", "X=G.list(); Y=X[0]; Z=X[1] sage: Y.jacobi_sum(Z) -1 sage: Z.jacobi_sum(Y) -1 Now", "immutable vectors (and below) if e in seen_so_far: continue orbit", "import sage.modules.free_module as free_module import sage.modules.free_module_element as free_module_element import sage.rings.all", "if v is not None. INPUT: - ``v`` - (optional)", "# pari_orders = G.cyc() # pari_gens = G.gen() values_on_gens =", "-3*zeta6 + 2) ((zeta6 - 1,), (-1,), 2*zeta6 + 1)", "|--> -1, 22369178537 |--> -1, 14266017175 |--> 1 AUTHORS: -", "return G.element_class(G, x, check=False) def _repr_short_(self): r\"\"\" A short string", "of Dirichlet characters modulo 20 with values in Cyclotomic Field", "not implemented sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2))._automorphisms() Traceback (most recent call", "conductor 13 mapping 2 |--> zeta12, Dirichlet character modulo 13", "ring of ``self`` as its domain EXAMPLES:: sage: e =", "-zeta4 \"\"\" return ~self def bernoulli(self, k, algorithm='recurrence', cache=True, **opts):", "returned by :meth:`sage.rings.finite_rings.integer_mod_ring.IntegerModRing_generic.unit_gens`. - vector over `\\ZZ/e\\ZZ`, where `e` is", "1/2 \"\"\" R = self.base_ring() if R.is_prime_field(): return self p", "``a`` -- integer, as for :meth:`gauss_sum`. The Gauss sum associated", "free software: you can redistribute it and/or modify # it", "This is better since it computes the same thing, but", "sage: hash(e) == hash((-1,1)) True \"\"\" return hash(self.values_on_gens()) def __invert__(self):", "algorithm = 'pari' if algorithm == 'pari': from sage.lfunctions.pari import", "with prec bits of precision. INPUT: - ``prec`` -- integer", "sage: (DirichletGroup(18).0).decomposition() [Dirichlet character modulo 2 of conductor 1, Dirichlet", "sage: abs(e.gauss_sum_numerical()) 1.7320508075... sage: sqrt(3.0) 1.73205080756888 sage: e.gauss_sum_numerical(a=2) -...e-15 -", "sage: G = DirichletGroup(10) sage: TestSuite(G[1]).run() It is checked that", "return str(list(self.values_on_gens())) def _repr_(self): \"\"\" String representation of self. EXAMPLES::", "0; 0, 1, 0; 0, 0, 1], [7, 13, 17],", "are both characters modulo `N`. EXAMPLES:: sage: D = DirichletGroup(13)", "fundamental_discriminant(d) G = DirichletGroup(abs(D), rings.RationalField()) return G([kronecker(D,u) for u in", "(a*b).is_primitive() True sage: G.<a,b> = DirichletGroup(20, CC) sage: a.is_primitive() False", "k-th powering for # k = 1, p, p^2, ...,", "ArithmeticError): return self def modulus(self): \"\"\" The modulus of this", "= rings.CyclotomicField(self.order()) else: return self try: return self.change_ring(K) except (TypeError,", "else: self.values_on_gens.set_cache(x) @cached_method def __eval_at_minus_one(self): r\"\"\" Efficiently evaluate the character", "1.00000000000000*I \"\"\" if self.base_ring() is R: return self G =", "of a homomorphism .. MATH:: (\\ZZ/N\\ZZ)^* \\to R^*, for some", "z = self.base_ring().zeta() n = z.multiplicative_order() m = lcm(g,n) if", "print representation of this group, which can be renamed. EXAMPLES::", "@cached_method def decomposition(self): r\"\"\" Return the decomposition of self as", "chi = chi.minimize_base_ring() n = lcm(m, G.zeta_order()) L = rings.CyclotomicField(n)", "call last): ... ValueError: base ring (= Ring of integers", "a bug in the cPickle module -- # see modsym/manin_symbols.py.", "`p = 2` and the factor of eps at 4", "p**(r-1) on the right hand side. # Since p-1 is", "`(\\ZZ / N \\ZZ)^*` where `N` is the modulus EXAMPLES::", "indirect doctest False sage: trivial_character(3) == trivial_character(9) False sage: trivial_character(3)", "check=False) def _repr_short_(self): r\"\"\" A short string representation of self,", "the chosen root of unity in the base ring. EXAMPLES::", "and degree 2 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) 17 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) * GF(29)(3)", "the number of generators of self. EXAMPLES:: sage: G =", "values in Complex Field with 53 bits of precision \"\"\"", "an approximate complex number with prec bits of precision. See", "sage: G.<a, b> = DirichletGroup(20) sage: type(G(1).conductor()) <type 'sage.rings.integer.Integer'> \"\"\"", "sage: TestSuite(G).run() sage: G.base() # check that Parent.__init__ has been", "the list of orbits and the orbits themselves (slightly faster", "|--> zeta36^4 sage: DirichletGroup(20).random_element() Dirichlet character modulo 20 of conductor", "should be positive') if base_ring is None: if not (zeta", "e.conductor() 4 \"\"\" return self.parent().modulus() def level(self): \"\"\" Synonym for", "and an error is raised if such ``zeta`` cannot be", "error is raised if such ``zeta`` cannot be found. EXAMPLES:", "sage.categories.groups import Groups category = Groups().Commutative() if base_ring.is_integral_domain() or base_ring.is_finite():", "= G.0 sage: e.is_even() False sage: e(-1) -1.000000... sage: [e.is_even()", "EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.conductor() 4 sage: b.conductor()", "mapping 2 |--> zeta4] sage: d[0].parent() Group of Dirichlet characters", "(needed so ``G.<...> = DirichletGroup(...)`` notation works) - ``integral`` --", "classical Bernoulli numbers using the formula in [Coh2007]_, Proposition 9.4.5;", "dlog = P._zeta_dlog v = [dlog[x] for x in values_on_gens]", "11 are not units mod 22. while x.modulus().gcd(v) != 1:", "6 of conductor 3 mapping 5 |--> -1 sage: G(DirichletGroup(15).1)", "= G.0 sage: abs(e.gauss_sum_numerical()) 1.7320508075... sage: sqrt(3.0) 1.73205080756888 sage: e.gauss_sum_numerical(a=2)", "= DirichletGroup(30); e = G.1 sage: e.galois_orbit() [Dirichlet character modulo", "modulus, which must also be a multiple of the conductor", "print(s) ((1,), (1,), 5) ((1,), (zeta6,), -1) ((1,), (zeta6 -", "= cat.Objects()) def exponent(self): \"\"\" Return the exponent of this", "list of elements of self - ``reps_only`` - (optional: default", "DirichletGroup(100151, CyclotomicField(10)).0 sage: ls = chi.values() ; ls[0:10] [0, 1,", "base ring of this Dirichlet character. EXAMPLES:: sage: G =", "sage: DirichletGroup(2, base_ring=QQ) is DirichletGroup(2, base_ring=CC) False If the base", "ValueError(\"d must be nonzero\") D = fundamental_discriminant(d) G = DirichletGroup(abs(D),", "this group. EXAMPLES:: sage: DirichletGroup(20).exponent() 4 sage: DirichletGroup(20,GF(3)).exponent() 2 sage:", "checking that their values were the same, but not checking", "sage: e = G([-1]) sage: e.kloosterman_sum(3,5) -2*zeta6 + 1 sage:", ":: sage: a = kronecker_character(1) sage: b = DirichletGroup(2401,QQ)(a) #", "an integral domain \"\"\" if not self.base_ring().is_integral_domain(): raise TypeError(\"Galois orbits", "some operations still work, such as creation of elements:: sage:", "character on the generators of `(Z/NZ)^*`:: sage: list(G) [Dirichlet character", "is a character mod `p^n`, where `p` is a prime.", "(isinstance(X, DirichletGroup_class) and self.modulus() == X.modulus() and self.base_ring().has_coerce_map_from(X.base_ring()) and (self._zeta", "G = DirichletGroup(13) sage: e = G.0 sage: e.bernoulli(5) 7430/13*zeta12^3", "- 3]) sage: chi = DirichletGroup(7, K).0 sage: chi.minimize_base_ring() Dirichlet", "14 sage: g.parent().zeta() 14 \"\"\" if not (isinstance(R, Map) or", "zero = M(0) orders = self.integers_mod().unit_group().gens_orders() for i in range(len(self.unit_gens())):", "QuadraticField(-1) sage: chi = DirichletGroup(5, K)[1] sage: chi(2) i sage:", "absolute value `\\sqrt{p}`. CACHING: Computed Gauss sums are *not* cached", "import is_ComplexField from sage.rings.qqbar import is_AlgebraicField from sage.rings.ring import is_Ring", "35 mapping 22 |--> zeta12^3, 31 |--> zeta12^2 sage: e.order()", "parent.base_ring() x = tuple(map(R, x)) if R.is_exact() and any(u**v !=", "a cyclotomic field of order the exponent of `(\\ZZ/N\\ZZ)^*`:: sage:", "sage: loads(e.dumps()) == e True :: sage: G, x =", "of this Dirichlet character. EXAMPLES:: sage: G.<a> = DirichletGroup(11) sage:", "can be renamed. EXAMPLES:: sage: G = DirichletGroup(11) sage: repr(G)", "e.restrict(50) Traceback (most recent call last): ... ValueError: conductor(=4) must", "!= 1 for u, v in zip(x, orders)): raise ValueError(\"values", "of conductor 5 mapping 11 |--> 1, 17 |--> -1,", "character that equals this one, but over as small a", "*= 2 return rings.Integer(cond) @cached_method def decomposition(self): r\"\"\" Return the", "Dirichlet character modulo 60 of conductor 3 mapping 31 |-->", "Other operations only work if ``zeta`` is specified:: sage: G.gens()", "sage: DirichletGroup(37).random_element() Dirichlet character modulo 37 of conductor 37 mapping", "bits of precision. See also :meth:`.kloosterman_sum`, which calculates the sum", "Check that :trac:`25127` is fixed:: sage: G = DirichletGroup(1) sage:", "def values_on_gens(self): r\"\"\" Return a tuple of the values of", "= 0 m = G.modulus() zeta = CC.zeta(m) for c", "often used in string representations of modular forms EXAMPLES:: sage:", "if N == 1: # By definition, the first Bernoulli", "mapping 11 |--> 1, 7 |--> -zeta4, Dirichlet character modulo", "1, 17 |--> -1 sage: b.maximize_base_ring().base_ring() Cyclotomic Field of order", "Mat([2, 1])]], [1, 0, 0; 0, 1, 0; 0, 0,", "\"lcalc\"') @cached_method def conductor(self): \"\"\" Computes and returns the conductor", "+ 1) ((zeta6 - 1,), (-zeta6,), -1) ((zeta6 - 1,),", "= chi.change_ring(f) sage: psi(2) -1.83697019872103e-16 - 1.00000000000000*I \"\"\" if self.base_ring()", "cyclic and a generator for `V` can be found. -", "the image root of unity. We use the following. Proposition:", "# the other algorithm below. That said, I'm sure it", "is coprime to p, this smallest r such that the", "sage: chi = G.one() sage: chi.gauss_sum() 1 .. SEEALSO:: -", "if exponents[i] < orders[i]: break exponents[i] = 0 i +=", "``is_even`` need not be the negation of is_odd, e.g., in", "e.minimize_base_ring().base_ring() Cyclotomic Field of order 12 and degree 4 sage:", "set the cache of values_on_gens() from that if we encounter", "quadratic Dirichlet character (d/.) of minimal conductor. EXAMPLES:: sage: kronecker_character(97*389*997^2)", "Dirichlet character modulo 60 of conductor 60 mapping 31 |-->", "Traceback (most recent call last): ... NotImplementedError: Kloosterman sums not", "its order. This is potentially much more efficient than computing", "[dlog[x] for x in values_on_gens] m = P.zeta_order() v =", "DirichletGroup(17, Integers(15), zeta=7); G Group of Dirichlet characters modulo 17", "= DirichletGroup(13) sage: K = G.base_ring() sage: G(1) Dirichlet character", "Dirichlet group over a large prime field:: sage: p =", "__call__(self, m): \"\"\" Return the value of this character at", "rings.CyclotomicField(m.lcm(zo)) zeta = L.gen(0) try: self(1) * zeta**(a+b) except TypeError:", "list. This may change. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage:", "K = G.base_ring() chi = self m = G.modulus() if", "Lfunction_from_character(self) raise ValueError('algorithm must be \"pari\" or \"lcalc\"') @cached_method def", "is primitive, i.e., its conductor equals its modulus. EXAMPLES:: sage:", "pari (list of exponents) P = self.parent() if is_ComplexField(P.base_ring()): zeta", "-...e-15 - 1.7320508075...*I sage: e.gauss_sum_numerical(a=2, prec=100) 4.7331654313260708324703713917e-30 - 1.7320508075688772935274463415*I sage:", "((zeta6,), (-zeta6 + 1,), 1) ((zeta6 - 1,), (zeta6 -", "conductor 20 mapping 11 |--> -1, 17 |--> zeta4 Multiplying", "Return a list of powers of the distinguished root of", "return (isinstance(X, DirichletGroup_class) and self.modulus() == X.modulus() and self.base_ring().has_coerce_map_from(X.base_ring()) and", "TESTS:: sage: eps1 = DirichletGroup(5)([-1]) sage: eps2 = DirichletGroup(5,QQ)([-1]) sage:", "p-1)] sage: for s in all_jacobi_sums: ....: print(s) ((1,), (1,),", "`(Z/NZ)^*`:: sage: list(G) [Dirichlet character modulo 20 of conductor 1", "mapping 2 |--> 1, Dirichlet character modulo 5 of conductor", "3 mapping 2 |--> -1 \"\"\" G = self.parent() if", "= lambda t : t CC = K elif is_AlgebraicField(K):", "indirect doctest 'Group of Dirichlet characters modulo 11 with values", "== e True TESTS:: sage: G = DirichletGroup(10) sage: TestSuite(G[1]).run()", "Dirichlet characters. It was checking that their values were the", "a g = L(chi(0)) z = L.one() for c in", "pass if isinstance(x, list): # list of values on each", "a list of powers of the distinguished root of unity.", "Kloosterman sums are *not* cached with this character. EXAMPLES:: sage:", "self.values_on_gens.set_cache(x) @cached_method def __eval_at_minus_one(self): r\"\"\" Efficiently evaluate the character at", "all(x == one for x in self.values_on_gens()) def kernel(self): r\"\"\"", "3, zeta4: 1, 1: 0} \"\"\" return {z: i for", "g def gauss_sum_numerical(self, prec=53, a=1): r\"\"\" Return a Gauss sum", "of order 4 and degree 2 \"\"\" base_ring, modulus, zeta,", "sage: DirichletGroup(17, GF(11^4, 'a'))._automorphisms() [1, 11, 121, 1331] sage: DirichletGroup(17,", "values (= (4, 8, 8) modulo 16) must have additive", "the above divisibility holds # depends only on the factor", "around a PARI L-function or around the ``lcalc`` program. INPUT:", "K = rings.QQ elif (isinstance(R, number_field.NumberField_generic) and euler_phi(self.order()) < R.absolute_degree()):", "computes generalized Bernoulli numbers via classical Bernoulli numbers using the", "loads(dumps(e)) == e True \"\"\" # values_on_gens() used an explicit", "R.one(): x = [R.one()] * len(self.unit_gens()) except (TypeError, ValueError, ArithmeticError):", "= self.parent() if G.zeta.is_in_cache(): x = -self.element() else: x =", "and 2 does not divide `\\phi(p^n)/\\mbox{\\rm ord}(\\varepsilon)`. EXAMPLES:: sage: chi", "generalized Bernoulli numbers via classical Bernoulli numbers using the formula", "in rings.IntegerModRing(self.modulus())]) def kloosterman_sum(self, a=1, b=0): r\"\"\" Return the \"twisted\"", "Proposition 9.4.5; this is usually optimal. The ``definition`` algorithm uses", "def __hash__(self): \"\"\" Return the hash of ``self``. EXAMPLES:: sage:", "= copy(a) sage: a is b False sage: a.element() is", "Dirichlet coefficients sage: L.value(4) # abs tol 1e-14 0.988944551741105 -", "cyclotomic field or QQ.\") phi = K.complex_embedding(prec) CC = phi.codomain()", "[P.element_class(P, m * z, check=False) for m in Auts] if", "vector of exponents. .. warning:: Please do not change the", "x is a Dirichlet group. EXAMPLES:: sage: from sage.modular.dirichlet import", "= DirichletGroup(20)(1) sage: e.values() [0, 1, 0, 1, 0, 0,", "self.change_ring(K) except (TypeError, ValueError, ArithmeticError): return self def modulus(self): \"\"\"", "True A related bug (see :trac:`18086`):: sage: K.<a,b>=NumberField([x^2 + 1,", "not a domain (in which case the group of roots", "to sort the list of orbits and the orbits themselves", "nontrivial, then the Gauss sum has absolute value `\\sqrt{p}`. CACHING:", "of element not known sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6), zeta_order=6) Group", "over QQ! sage: b.modulus() 2401 AUTHORS: - <NAME> (2006-08-06) \"\"\"", "2*zeta6 + 1) ((zeta6,), (-1,), -2*zeta6 - 1) ((zeta6,), (-zeta6,),", "= random.randrange(g.order()) e *= g**n return e def unit_gens(self): r\"\"\"", "as well. zeta = self._zeta if zeta_order is None: #", "[False, True, False, True, False, True, False, True, False, True,", "G = DirichletGroup(20) sage: G.gens() (Dirichlet character modulo 20 of", "+ other.element() else: x = tuple(y * z for y,", "GF(25,'a')).zeta_order() 4 sage: DirichletGroup(19).zeta_order() 18 \"\"\" order = self._zeta_order if", "K = self.base_ring() if N == 1: # By definition,", "Dirichlet character from `x`. EXAMPLES:: sage: G = DirichletGroup(13) sage:", "bernoulli(j, **opts) * N**(j-1) * S(k-j) for j in range(k+1)))", "s += ' mapping ' for i in range(r): if", "for u in G.unit_gens()]) sage: e.kloosterman_sum(7,17) -2*zeta20^6 + 2*zeta20^4 +", "1, zeta10^2] TESTS: Test that :trac:`11783` and :trac:`14368` are fixed::", "sage: a.conductor() 4 sage: b.conductor() 5 sage: (a*b).conductor() 20 TESTS::", "usually optimal. The ``definition`` algorithm uses the definition directly. ..", "first Bernoulli number of the trivial # character is 1/2,", "sure it could # be sped up by a factor", "is either +1 or -1 if not R.is_exact(): return abs(self(-1)", "37 mapping 2 |--> zeta36^4 sage: DirichletGroup(20).random_element() Dirichlet character modulo", "r\"\"\" Efficiently evaluate the character at -1 using knowledge of", "a True \"\"\" if M % self.modulus() != 0: raise", "\"\"\" Return the product of self and other. EXAMPLES:: sage:", "finite fields - :func:`sage.rings.padics.misc.gauss_sum` for a `p`-adic version \"\"\" G", "import richcmp from sage.arith.all import (binomial, bernoulli, kronecker, factor, gcd,", "And sums where exactly one character is nontrivial (see :trac:`6393`)::", "finite field:: sage: g = DirichletGroup(17, GF(9,'a')).0 sage: g.jacobi_sum(g**2) 2*a", "(e^3).minimize_base_ring().base_ring() Cyclotomic Field of order 4 and degree 2 sage:", "1, 17 |--> 1, Dirichlet character modulo 20 of conductor", "distinguished root of unity. TESTS:: sage: DirichletGroup(5)._zeta_powers [1, zeta4, -1,", "g Group of Dirichlet characters modulo 19 with values in", "as creation of elements:: sage: G = DirichletGroup(5, Zmod(15)); G", "loads(G.dumps()) == G True :: sage: G = DirichletGroup(19, GF(5))", "composite characteristic is not implemented sage: G = DirichletGroup(5, Zmod(15),", "\\to \\QQ(\\zeta_n) be a Dirichlet character. This function returns an", "parent(val) Gaussian Integers in Cyclotomic Field of order 4 and", "34750/13*zeta12^2 - 11380/13*zeta12 + 9110/13 sage: eps = DirichletGroup(9).0 sage:", "= G.base_ring() if not (number_field.is_CyclotomicField(K) or is_RationalField(K)): raise NotImplementedError(\"Kloosterman sums", "sage: G.gen(-1) Traceback (most recent call last): ... IndexError: n(=-1)", "of order the exponent of `(\\ZZ/N\\ZZ)^*`:: sage: DirichletGroup(20) Group of", "a random element of self. The element is computed by", "= DirichletGroup(20) sage: a.is_trivial() False sage: (a^2).is_trivial() True \"\"\" if", "`p`-adic version \"\"\" G = self.parent() K = G.base_ring() chi", "e = G([-1]) sage: e.kloosterman_sum(3,5) -2*zeta6 + 1 sage: G", "17 |--> 1 sage: e.restrict(4) Dirichlet character modulo 4 of", "is_DirichletCharacter([1]) False \"\"\" return isinstance(x, DirichletCharacter) class DirichletCharacter(MultiplicativeGroupElement): \"\"\" A", "over a number field. prec = k+2 R = rings.PowerSeriesRing(rings.QQ,", "1 mapping 2 |--> 1], ..., [Dirichlet character modulo 13", "Return a print representation of this group, which can be", "ord def random_element(self): \"\"\" Return a random element of self.", "order of the characters, either with or without specifying a", "sage.structure.parent import Parent from sage.structure.sequence import Sequence from sage.structure.factory import", "G.0 The real component of the numerical value of e", "- If ``zeta`` is not specified but ``zeta_order`` is, then", "if is_ComplexField(R): for i in range(1, zeta_order): a = a", "rings.Integer(d) if d == 0: raise ValueError(\"d must be nonzero\")", "recent call last): ... TypeError: Galois orbits only defined if", "4 TESTS: Check that :trac:`17586` is fixed:: sage: DirichletGroup(1)[0].bernoulli(1) 1/2", "|--> zeta12 sage: G(0) Traceback (most recent call last): ...", "for d in self.decomposition()]) p = F[0][0] # When p", "a Gauss sum associated to this Dirichlet character as an", "vectors (and below) if e in seen_so_far: continue orbit =", "next compute several invariants of ``G``:: sage: G.gens() (Dirichlet character", "H.zeta() zeta6 This method (in contrast to :meth:`change_ring`) requires a", "- <NAME> (2006-08-06) \"\"\" d = rings.Integer(d) if d ==", "order 4 generated by 2 in Ring of integers modulo", "(:trac:`19056`):: sage: G = DirichletGroup(7, QQbar) sage: G[1].gauss_sum() -2.440133358345538? +", "`N`. INPUT: - ``N`` -- positive integer - ``base_ring`` --", "depends only on the factor of p**(r-1) on the right", "+ zeta156^21 + zeta156^20 - zeta156^19 + zeta156^18 - zeta156^16", "1, 13 |--> -1, 17 |--> -1 sage: chi._pari_conversion() ([[24,", "== 1 else K(bernoulli(k)) elif self(-1) != K((-1)**k): ber =", "correctly:: sage: N = 13 sage: D = DirichletGroup(N) sage:", "GF(29)(3) 22 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3 22 sage: parent(r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) *", "prime `p` and the character is nontrivial, then the Gauss", "the kernel is returned as a list. This may change.", "mapping 2 |--> -zeta4 \"\"\" return ~self def bernoulli(self, k,", "change_ring(self, R): \"\"\" Return the base extension of ``self`` to", "of Dirichlet characters modulo 5 with values in Cyclotomic Field", "EXAMPLES:: sage: G = DirichletGroup(3) sage: e = G([-1]) sage:", "characters (i.e., J(self,char)). This is defined as .. MATH:: J(\\chi,", "`m` is the least common multiple of `n` and the", "least compared to # the other algorithm below. That said,", "not exact or if the order of ``zeta`` is very", "otherwise it will be recomputed as the order # of", "0, -1] sage: e = DirichletGroup(21).gen(0) ; e.values() [0, 1,", "self.base_ring() if N == 1: # By definition, the first", "warning:: A table of values of the character is made", "r\"\"\" Prepare data for the conversion of the character to", "a *coercion* map from the base ring of ``self``, or", "DirichletGroup(21, base_ring=GF(3)).gen(0) ; e.values() [0, 1, 2, 0, 1, 2,", "(:trac:`18072`):: sage: K.<i> = QuadraticField(-1) sage: f = K.complex_embeddings()[0] sage:", "[self(x) for x in v] G = [] seen_so_far =", "large. - If ``zeta`` is not specified but ``zeta_order`` is,", "36] sage: e = DirichletGroup(21, base_ring=GF(3)).gen(0) ; e.values() [0, 1,", "integral domain sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2)).galois_orbits() Traceback (most recent call", "val zeta4 sage: parent(val) Gaussian Integers in Cyclotomic Field of", "= P.zeta_order() v = [(vi * oi) // m for", "with smaller order than expected (:trac:`6018`):: sage: G = DirichletGroup(10,", "exponents. .. warning:: Please do not change the entries of", ":trac:`18479` is fixed:: sage: f = Newforms(Gamma1(25), names='a')[1] sage: eps", "_pari_conversion(self): r\"\"\" Prepare data for the conversion of the character", "of values_on_gens() from that if we encounter it in a", "sage: len(DirichletGroup(20, GF(2))) 1 sage: len(DirichletGroup(20, GF(3))) 4 \"\"\" return", "# 2 factors at 2. vals[0].append(vals[1][0]) del vals[1] elif self.modulus()", "of conductor 13 mapping 2 |--> zeta12^3 - zeta12, Dirichlet", "additive orders dividing (2, 16, 2), respectively \"\"\" MultiplicativeGroupElement.__init__(self, parent)", "CC = phi.codomain() else: raise NotImplementedError(\"Gauss sums only currently implemented", "Order(x) divides EulerPhi(p**r) = p**(r-1)*(p-1). # For a given r,", "conductor 3 mapping 2 |--> -1 \"\"\" G = self.parent()", "and only if `\\varepsilon(-1) = 1`. EXAMPLES:: sage: G =", "generators for the group, are only implemented if `V` is", "= G([13]); chi Dirichlet character modulo 5 of conductor 5", "is determined as follows: - If both ``zeta`` and ``zeta_order``", "characters modulo `N` with values in a ring `R`. \"\"\"", "the :func:`bernoulli` function if this is called OUTPUT: Let `\\varepsilon`", "M = P._module if is_ComplexField(P.base_ring()): zeta = P.zeta() zeta_argument =", "then `V` is taken to be `R^*`, or equivalently its", "def bernoulli(self, k, algorithm='recurrence', cache=True, **opts): r\"\"\" Returns the generalized", "character associated to self. EXAMPLES:: sage: e = DirichletGroup(100).0; e", "mapping 5 |--> -1 sage: G(DirichletGroup(15).0) Dirichlet character modulo 6", "EXAMPLES:: sage: G = DirichletGroup(13) sage: K = G.base_ring() sage:", "# Order(x) divides EulerPhi(p**r) = p**(r-1)*(p-1). # For a given", "list of orbits and the orbits themselves (slightly faster if", "rings.Integer(1) F = factor(self.modulus()) if len(F) > 1: return prod([d.conductor()", "sage: b.maximize_base_ring().base_ring() Cyclotomic Field of order 4 and degree 2", "sage: G = DirichletGroup(20) sage: G.modulus() 20 \"\"\" return self._modulus", "hash(e) == hash((-1,1)) True \"\"\" return hash(self.values_on_gens()) def __invert__(self): \"\"\"", "implemented yet. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.element() (2,", "sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2)).galois_orbits() Traceback (most recent call last): ...", "|--> -1] ] sage: e = G.0 sage: e Dirichlet", "will be recomputed as the order # of R(zeta) by", "divide `\\phi(p^n)/\\mbox{\\rm ord}(\\varepsilon)`. EXAMPLES:: sage: chi = DirichletGroup(20).0; chi._DirichletCharacter__eval_at_minus_one() -1", "2 \"\"\" g = rings.IntegerModRing(self.modulus()).unit_group_exponent() if g == 1: g", "past # we need to set the cache of element()", "the root of unity has small order, i.e., it is", "4 mapping 3 |--> -1 sage: e.restrict(50) Traceback (most recent", "str(self.base_ring()) return s @cached_method def decomposition(self): r\"\"\" Returns the Dirichlet", "0, 1, 0, -zeta4, 0, 0, 0, zeta4, 0, -1]", "of conductor 1 mapping 2 |--> 1], ..., [Dirichlet character", "of conductor 5 mapping 11 |--> 1, 7 |--> -zeta4,", "return self.change_ring(K) except (TypeError, ValueError, ArithmeticError): return self def modulus(self):", "and zeta_order must be None if base_ring not specified\") e", "sage: G.<a,b> = DirichletGroup(20) sage: repr(a) # indirect doctest 'Dirichlet", "of n-th roots of unity in the base ring is", "is only called if the object was not found in", "the group of order 8 generated by a in Number", "sage: G = DirichletGroup(13, CC) sage: e = G.0 sage:", "DirichletCharacter def __init__(self, base_ring, modulus, zeta, zeta_order): \"\"\" Create a", "This function is currently only implemented when the base ring", "val_on_gen[i] n *= gens[i] if exponents[i] < orders[i]: break exponents[i]", "[DI1995]_, Section 2.2): .. MATH:: \\sum_{a=1}^N \\frac{\\varepsilon(a) t e^{at}}{e^{Nt}-1} =", "group modulo `2^k` is trivial for `k = 1` and", "sage: K30 = CyclotomicField(30) sage: G3.gen(0).base_extend(K30) * G5.gen(0).base_extend(K30) Dirichlet character", "sage: g(3) 14 sage: g.parent().zeta() 14 \"\"\" if not (isinstance(R,", "round from sage.misc.cachefunc import cached_method from sage.misc.fast_methods import WithEqualityById from", "|--> zeta4, Dirichlet character modulo 5 of conductor 5 mapping", "which can be renamed. EXAMPLES:: sage: G = DirichletGroup(11) sage:", "orders)): raise ValueError(\"values (= {}) must have multiplicative orders dividing", "4 sage: DirichletGroup(60).zeta_order() 4 sage: DirichletGroup(60, GF(25,'a')).zeta_order() 4 sage: DirichletGroup(19).zeta_order()", "with values in Cyclotomic Field of order 6 and degree", "pass self.zeta_order.set_cache(d) return zeta @cached_method def zeta_order(self): \"\"\" Return the", "R.has_coerce_map_from(self.base_ring())): raise TypeError(\"no coercion map from %s to %s is", "sage: v.real() < 1.0e15 True sage: v.imag() 1.73205080756888 sage: G", "def primitive_character(self): \"\"\" Returns the primitive character associated to self.", "finite non-field base rings not implemented \"\"\" n = self.zeta_order()", "implies that Dirichlet characters of different moduli do not compare", "sage: G.<a,b> = DirichletGroup(16) sage: latex(b) # indirect doctest \\hbox{Dirichlet", "+= val_on_gen[i] n *= gens[i] if exponents[i] < orders[i]: break", "\"\"\" zeta = self._zeta if zeta is None: R =", "of this character. EXAMPLES:: sage: e = DirichletGroup(100).1 sage: e.order()", "data for the conversion of the character to Pari. OUTPUT:", "In particular, it is finitely generated; the added # FinitelyGenerated()", "0, zeta4, 0, -1] sage: e = DirichletGroup(21).gen(0) ; e.values()", "also be a multiple of the conductor of this character.", "sqrt(13.0) 3.60555127546399 TESTS: The field of algebraic numbers is supported", "== H False If ``base_ring`` was not be a part", "numbers using the formula in [Coh2007]_, Proposition 9.4.5; this is", "2: # 0 factors at 2. vals = [1] +", "uses the definition directly. .. WARNING:: In the case of", "a primitive `m^{th}` root of unity. FACTS: If the modulus", "G([-1]) sage: e.gauss_sum(1) 2*zeta6 - 1 sage: e.gauss_sum(2) -2*zeta6 +", "-1] ] sage: e = G.0 sage: e Dirichlet character", "R = self.base_ring() e = self._integers.unit_group_exponent() for d in reversed(e.divisors()):", "EXAMPLES:: sage: DirichletGroup(17)._automorphisms() [1, 3, 5, 7, 9, 11, 13,", "Dirichlet character. EXAMPLES:: sage: e = DirichletGroup(5).0 sage: e Dirichlet", "= DirichletGroup(31, CyclotomicField(3)) sage: G5 = DirichletGroup(31, CyclotomicField(5)) sage: K30", "break exponents[i] = 0 i += 1 @cached_method(do_pickle=True) def values_on_gens(self):", "return v def gauss_sum(self, a=1): r\"\"\" Return a Gauss sum", "mapping 2 |--> 1] sage: (DirichletGroup(72).0).decomposition() [Dirichlet character modulo 8", "With the algorithm \"lcalc\":: sage: a = a.primitive_character() sage: L", "DirichletGroup(60).unit_gens() (31, 41, 37) sage: DirichletGroup(20,QQ).unit_gens() (11, 17) \"\"\" return", "sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2))._automorphisms() Traceback (most recent call last): ...", "False sage: b.is_primitive() False sage: (a*b).is_primitive() True \"\"\" return (self.conductor()", "operations still work, such as creation of elements:: sage: G", "psi(2) -1.83697019872103e-16 - 1.00000000000000*I \"\"\" if self.base_ring() is R: return", "in accordance with the above definition, but in contrast to", "m = G.modulus() if is_ComplexField(K): return self.gauss_sum_numerical(a=a) elif is_AlgebraicField(K): L", "- zeta52^11 - zeta52^10 - zeta52^7 - zeta52^5 + zeta52^4", "= self._zeta_order # Map zeta to the new parent if", "NotImplementedError(\"Kloosterman sums only currently implemented when the base ring is", "return (G, v) def conrey_number(self): r\"\"\" Return the Conrey number", "self) return Z elif algorithm == 'lcalc': from sage.libs.lcalc.lcalc_Lfunction import", "# indirect doctest Dirichlet character modulo 3 of conductor 3", "as number_field from sage.libs.pari import pari from sage.categories.map import Map", "% self.modulus() if self._zeta is not None: s += \"the", "for finite non-field base rings not implemented\") # The automorphisms", "1.73205080756888 sage: e.gauss_sum_numerical(a=2) -...e-15 - 1.7320508075...*I sage: e.gauss_sum_numerical(a=2, prec=100) 4.7331654313260708324703713917e-30", "%s to %s is defined\" % (self.base_ring(), R)) return self.change_ring(R)", "Rational Field sage: trivial_character(7, Integers(3))(1).parent() Ring of integers modulo 3", "version \"\"\" G = self.parent() K = G.base_ring() chi =", "-1, 0, 1, 0, -1, 0, 1, 0, 0, 0,", "a part of the key:: sage: k = DirichletGroup.create_key(2, base_ring=QQ);", "non-field base rings not implemented sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2))._automorphisms() Traceback", "Dirichlet characters modulo 13 with values in Cyclotomic Field of", "7 |--> 1, 13 |--> -1, 17 |--> -1 sage:", "sage: e = DirichletGroup(21).gen(0) ; e.values() [0, 1, -1, 0,", "precision sage: G == H False If ``base_ring`` was not", "self.change_ring(K) def minimize_base_ring(self): r\"\"\" Return a Dirichlet character that equals", "the generalized Bernoulli number `B_{k,\\varepsilon}`, as defined by the following", "This function returns the generalized Bernoulli number `B_{k,\\varepsilon}`, as defined", "self.order(). EXAMPLES:: sage: len(DirichletGroup(20)) 8 sage: len(DirichletGroup(20, QQ)) 4 sage:", "\"\"\" Return the quadratic Dirichlet character (d/.) of minimal conductor.", "set of generators for the group, are only implemented if", "integral domain\") k = self.order() if k <= 2: return", "11 |--> 1, 7 |--> -zeta4, Dirichlet character modulo 30", "zeta(self): \"\"\" Return the chosen root of unity in the", "field of Fractional ideal (-2*zeta4 + 5) :: sage: DirichletGroup(60,", "# be sped up by a factor of 10 or", "- 1) ((zeta6,), (-zeta6,), zeta6 - 3) ((zeta6,), (-zeta6 +", "TypeError: conductor must divide modulus sage: H = DirichletGroup(16, QQ);", "Group.\") return sum([self(x) * char(1-x) for x in rings.IntegerModRing(self.modulus())]) def", "~self def bernoulli(self, k, algorithm='recurrence', cache=True, **opts): r\"\"\" Returns the", "DirichletGroup(13).0 sage: e.base_ring() Cyclotomic Field of order 12 and degree", "self)) elif not x.conductor().divides(self.modulus()): raise TypeError(\"conductor must divide modulus\") a", "raise NotImplementedError(\"Characters must be from the same Dirichlet Group.\") return", "the cache. TESTS:: sage: K = CyclotomicField(4) sage: DirichletGroup.create_object(None, (K,", "orders dividing (2, 16, 2), respectively \"\"\" MultiplicativeGroupElement.__init__(self, parent) if", "5 mapping 11 |--> 1, 17 |--> zeta4 We next", "<NAME> (2006-08-06) \"\"\" d = rings.Integer(d) if d <= 0:", "character with specified values on generators of `(\\ZZ/n\\ZZ)^*`. INPUT: -", "= DirichletGroup(16)([-1, 1]) sage: f = e.restrict(8) sage: e ==", "evaluate the character at -1 using knowledge of its order.", "of the following: - tuple or list of ring elements:", "each generator together, where the power is between 0 and", "design problem that became clear when creating examples. - <NAME>", "= base_ring.zeta(zeta_order) return (base_ring, modulus, zeta, zeta_order) def create_object(self, version,", "ValueError, ArithmeticError): pass if isinstance(x, list): # list of values", "= R.codomain() return DirichletGroup(self.modulus(), R, zeta=zeta, zeta_order=zeta_order) def base_extend(self, R):", "else: if free_module_element.is_FreeModuleElement(x): self.element.set_cache(x) else: self.values_on_gens.set_cache(x) @cached_method def __eval_at_minus_one(self): r\"\"\"", "self.values_on_gens()]) def _richcmp_(self, other, op): \"\"\" Compare ``self`` to ``other``.", "= Newforms(Gamma1(25), names='a')[1] sage: eps = f.character() sage: eps.minimize_base_ring() ==", "DirichletGroup(7, CC, zeta=exp(2*pi*I/6), zeta_order=6) Group of Dirichlet characters modulo 7", "-- (optional) root of unity in ``base_ring`` - ``zeta_order`` --", "modulo 4 with values in Cyclotomic Field of order 4", "INPUT: - ``N`` -- positive integer - ``base_ring`` -- commutative", "DirichletGroup(5)._zeta_powers [1, zeta4, -1, -zeta4] \"\"\" R = self.base_ring() a", "of tiny bugs and design problem that became clear when", "int(M) if self.modulus()%M != 0: raise ValueError(\"M(=%s) must divide the", "= self.element() exponents = [0] * len(orders) n = G.integers_mod().one()", "else: raise NotImplementedError(\"Gauss sums only currently implemented when the base", "of values on each unit generator return self.element_class(self, x) elif", "b.maximize_base_ring() Dirichlet character modulo 20 of conductor 5 mapping 11", "e.gauss_sum() -zeta156^46 + zeta156^45 + zeta156^42 + zeta156^41 + 2*zeta156^40", "much # more efficiently. v = self.values() S = lambda", "G.gens() Traceback (most recent call last): ... NotImplementedError: factorization of", "is an integral domain\") k = self.order() if k <=", "modulo 15 sage: chi = G([13]); chi Dirichlet character modulo", "order of ``zeta`` if both are specified - ``names`` --", "G(DirichletGroup(3).0) Dirichlet character modulo 6 of conductor 3 mapping 5", "caches have to be stored when pickling an instance of", "I'm sure it could # be sped up by a", "+= str(self.parent().unit_gens()[i]) + ' |--> ' + str(self.values_on_gens()[i]) return s", "to be `R^*`, or equivalently its `n`-torsion subgroup, where `n`", "Dirichlet character modulo 30 of conductor 5 mapping 11 |-->", "# This method exists solely because of a bug in", "unity. EXAMPLES:: sage: G = DirichletGroup(3) sage: e = G.0", "- 1,), (-1,), 2*zeta6 + 1) ((zeta6 - 1,), (-zeta6,),", "rings.ZZ(p).is_prime(): raise NotImplementedError(\"Automorphisms for finite non-field base rings not implemented\")", "character modulo the multiple M of the modulus. EXAMPLES:: sage:", "= DirichletGroup(100, QQ).0 sage: e.level() 100 \"\"\" return self.modulus() @cached_method", "latex(b) # indirect doctest \\hbox{Dirichlet character modulo } 16 \\hbox{", "is not None and self.base_ring()(X._zeta) in self._zeta_powers))) def __len__(self): \"\"\"", "# Since p-1 is coprime to p, this smallest r", "sage: DirichletGroup(37).zeta() zeta36 sage: DirichletGroup(20).zeta() zeta4 sage: DirichletGroup(60).zeta() zeta4 sage:", "6 of conductor 3 mapping 5 |--> -1 sage: G(DirichletGroup(15).0)", "{z: i for i, z in enumerate(self._zeta_powers)} def change_ring(self, R,", "(-zeta6 + 1,), -1) ((zeta6,), (zeta6,), -zeta6 + 3) ((zeta6,),", "Integers(60).unit_gens() (31, 41, 37) sage: e(31) -1 sage: e(41) -1", "= DirichletGroup(M, self.base_ring()) return H(self) @cached_method def values(self): \"\"\" Return", "is the exponent of `(\\ZZ/N\\ZZ)^*`. Many operations, such as finding", "sage: e.gauss_sum(1) 2*zeta6 - 1 sage: e.gauss_sum(2) -2*zeta6 + 1", "the discussion in [Coh2007]_, Section 9.4.1. EXAMPLES:: sage: G =", "if False). - ``check`` - (optional, default: True) whether or", "13 mapping 2 |--> -1] ] sage: e = G.0", "need to set the cache of element() from that if", "e = x[0]*x[1]^2; e Dirichlet character modulo 35 of conductor", "of prime power modulus, where the prime powers exactly divide", "is returned as a list. This may change. EXAMPLES:: sage:", "to Pari. OUTPUT: pair (G, v) where G is `(\\ZZ", "-- over QQ! sage: b.modulus() 2401 AUTHORS: - <NAME> (2006-08-06)", "N**(j-1) * S(k-j) for j in range(k+1))) elif algorithm ==", "of unity in the base ring. EXAMPLES:: sage: DirichletGroup(20).zeta_order() 4", "e.values() [0, 1, -1, 0, 1, -1, 0, 0, -1,", "* z, check=False) for m in Auts] if sort: v.sort()", "is not None: # A root of unity was explicitly", "4 generated by 7 in Ring of integers modulo 15", "must divide modulus sage: H = DirichletGroup(16, QQ); H(DirichletGroup(16).1) Traceback", "sage: G.<e> = DirichletGroup(13, GF(4,'a')) sage: e.is_even() True sage: e.is_odd()", "zeta_order is given, compute the other. if zeta is not", "number of generators of self. EXAMPLES:: sage: G = DirichletGroup(20)", "It was checking that their values were the same, but", "character is made the first time you call this (unless", "a subfield (or subring) of the base ring as possible.", "def kronecker_character_upside_down(d): \"\"\" Return the quadratic Dirichlet character (./d) of", "if sort: G.sort() return G def gen(self, n=0): \"\"\" Return", "|--> 1 AUTHORS: - <NAME> (2006-08-06) \"\"\" d = rings.Integer(d)", "expected (:trac:`6018`):: sage: G = DirichletGroup(10, QQ).base_extend(CyclotomicField(4)) sage: H =", "v = u.lift() # have to do this, since e.g.,", "group is the absolute Galois group of the prime subfield", "b = copy(a) sage: a is b False sage: a.element()", "\"pari\" or \"lcalc\"') @cached_method def conductor(self): \"\"\" Computes and returns", "in Complex Field with 53 bits of precision \"\"\" if", "ring of ``self`` as its domain EXAMPLES:: sage: G =", "= rings.QQ elif (isinstance(R, number_field.NumberField_generic) and euler_phi(self.order()) < R.absolute_degree()): K", "- 2) ((-1,), (-1,), 1) ((-1,), (-zeta6,), -2*zeta6 + 3)", "OUTPUT: The Dirichlet character defined by `x` (type :class:`DirichletCharacter`). EXAMPLES::", "characters modulo 4 with values in Finite Field of size", "-1 sage: e(2) 0 sage: e(7) -zeta4 sage: Integers(60).unit_gens() (31,", "1.8826966926...*I sage: abs(e.gauss_sum_numerical()) 3.60555127546... sage: abs(f.gauss_sum_numerical()) 3.60555127546... sage: sqrt(13.0) 3.60555127546399", "import Map from sage.rings.rational_field import is_RationalField from sage.rings.complex_mpfr import is_ComplexField", "``zeta_order`` is specified:: sage: DirichletGroup(17, Integers(15)) Group of Dirichlet characters", "DirichletGroup(13) sage: loads(G.dumps()) == G True :: sage: G =", "``base_ring`` - ``zeta_order`` -- (optional) positive integer; this must be", "3, 1])], [[8, 8, 3], [[1, matrix(0,2)], [1, matrix(0,2)], [2,", "sage: e = DirichletGroup(7, QQ).0 sage: f = e.change_ring(QuadraticField(3, 'a'))", "cache DirichletGroups \"\"\" # **************************************************************************** # Copyright (C) 2004-2006 <NAME>", "degree 2 sage: (e^12).minimize_base_ring().base_ring() Rational Field TESTS: Check that :trac:`18479`", "keys would compare equal and the caching would be broken::", "e = G([1 for u in G.unit_gens()]) sage: e.kloosterman_sum(7,17) -2*zeta20^6", "Element = DirichletCharacter def __init__(self, base_ring, modulus, zeta, zeta_order): \"\"\"", "K) sage: chi = G([z^2]) sage: chi.gauss_sum() zeta52^22 + zeta52^21", "K = rings.CyclotomicField(self.order()) else: return self try: return self.change_ring(K) except", "mapping 11 |--> -1, 17 |--> 1, Dirichlet character modulo", "multiplicative_iterator def list(self): \"\"\" Return a list of the Dirichlet", "Group of Dirichlet characters modulo 17 with values in the", "instance of :class:`DirichletCharacter`. \"\"\" P = self.parent() M = P._module", "Traceback (most recent call last): ... NotImplementedError: Characters must be", "modulo 37506941597 of conductor 37733 mapping 13533432536 |--> -1, 22369178537", "zo = G.zeta_order() m = G.modulus() g = 0 L", "!= 0: s += r' \\hbox{ mapping } ' for", "values in \" % self.modulus() if self._zeta is not None:", "5 mapping 2 |--> 4 sage: chi.multiplicative_order() 4 Other operations", "continue orbit = x.galois_orbit(sort=sort) if reps_only: G.append(x) else: G.append(orbit) for", "b False sage: a.element() is b.element() False sage: a.values_on_gens() is", "-1 above is the correct value of the Jacobi sum", "in range(i, p-1)] sage: for s in all_jacobi_sums: ....: print(s)", "= L.gen(0) ** (n // m) else: raise NotImplementedError(\"Gauss sums", "R_values = G._zeta_powers val_on_gen = self.element() exponents = [0] *", "-1, 0, 0, -1, 0, 1, -1, 0, 1, 0,", "character modulo 1 of conductor 1 sage: DirichletGroup(2)[0] Dirichlet character", "is specified, base extension still works if the new base", "modulo 31 of conductor 31 mapping 3 |--> -zeta30^7 +", "# pari_gens = G.gen() values_on_gens = (self(x) for x in", "related bug (see :trac:`18086`):: sage: K.<a,b>=NumberField([x^2 + 1, x^2 -", "Mat(1), [3], [2], [0]], Mat(1)], [1]) sage: chi = DirichletGroup(24)([1,-1,-1]);", "this character is primitive, i.e., its conductor equals its modulus.", "factory ensures that either both ``zeta`` and ``zeta_order`` are specified,", "2, 0, 1, 2] :: sage: chi = DirichletGroup(100151, CyclotomicField(10)).0", "Dirichlet character. EXAMPLES:: sage: G = DirichletGroup(11) sage: G.gen(0).base_ring() Cyclotomic", "Y = X[0]; Z = X[1] sage: # Y is", "\"\"\" if check: if self.parent() != char.parent(): raise NotImplementedError(\"Characters must", "browser. See https://www.lmfdb.org EXAMPLES:: sage: E = DirichletGroup(4).gen() sage: E.lmfdb_page()", "- 1 sage: factor(norm(e.gauss_sum())) 13^24 TESTS: The field of algebraic", "from sage.rings.qqbar import is_AlgebraicField from sage.rings.ring import is_Ring from sage.misc.functional", "sage: f = e.change_ring(QuadraticField(3, 'a')) sage: f.parent() Group of Dirichlet", "sort=True): r\"\"\" Return the orbit of this character under the", "is a map (:trac:`18072`):: sage: K.<i> = QuadraticField(-1) sage: chi", "11 |--> 1, 17 |--> 1, Dirichlet character modulo 20", "- 1.7320508075688772935274463415*I sage: G = DirichletGroup(13) sage: H = DirichletGroup(13,", "in zip(x, orders)): raise ValueError(\"values (= {}) must have multiplicative", "last): ... TypeError: cannot convert 0 to an element of", "% (self.modulus(), self.conductor()) r = len(self.values_on_gens()) if r != 0:", "as defined by the following identity of power series (see", "for s in all_jacobi_sums: ....: print(s) ((1,), (1,), 5) ((1,),", "+ zeta30^5 + zeta30^4 + zeta30^3 - zeta30 - 1", "see the discussion in [Coh2007]_, Section 9.4.1. EXAMPLES:: sage: G", "= self.base_ring() if R.is_prime_field(): return self p = R.characteristic() if", "EXAMPLES:: sage: DirichletGroup(20).zeta_order() 4 sage: DirichletGroup(60).zeta_order() 4 sage: DirichletGroup(60, GF(25,'a')).zeta_order()", "optimal. The ``definition`` algorithm uses the definition directly. .. WARNING::", "nontrivial or `p > 2` and 2 does not divide", "13 of conductor 13 mapping 2 |--> zeta12 sage: loads(e.dumps())", "def is_trivial(self): r\"\"\" Returns ``True`` if this is the trivial", "zeta4: 1, 1: 0} \"\"\" return {z: i for i,", "except TypeError: raise NotImplementedError('Kloosterman sums not implemented ' 'over this", "13 mapping 2 |--> -1 sage: G([K.0]) Dirichlet character modulo", "conductor 4 mapping 11 |--> -1, 17 |--> 1, Dirichlet", "l (Complex Field with 53 bits of precision, 2, None,", "= self.base_ring() return Sequence([DirichletGroup(p**r,R) for p, r \\ in factor(self.modulus())],", "Traceback (most recent call last): ... NotImplementedError: factorization of polynomials", "a list of the Dirichlet characters in this group. EXAMPLES::", "of exponents. .. warning:: Please do not change the entries", "mapping 1557 |--> -1, 37346 |--> -1 :: sage: a", "of these methods needs to be set for the other", "z, check=False) for m in Auts] if sort: v.sort() return", "= zero.__copy__() z[i] = ord//gcd(ord, orders[i]) g.append(self.element_class(self, z, check=False)) return", "EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: c = a*b sage:", "0, 1, 0; 0, 0, 1], [7, 13, 17], [2,", "import Lfunction_from_character return Lfunction_from_character(self) raise ValueError('algorithm must be \"pari\" or", "in range(1,n) if gcd(e,n) == 1] else: if not rings.ZZ(p).is_prime():", "Gauss sum associated to this Dirichlet character. The Gauss sum", "1, this function returns `B_{1,\\varepsilon} = 1/2`, in accordance with", "if i != 0: s += r',\\ ' s +=", "of 10 or more in many cases, # especially since", "at a non-prime modulus:: sage: N = 9 sage: D", "in the group of order 4 generated by zeta4 in", "100 of conductor 4 mapping 51 |--> -1, 77 |-->", "Bernoulli number of the trivial # character is 1/2, in", "41, 37) sage: DirichletGroup(20,QQ).unit_gens() (11, 17) \"\"\" return self._integers.unit_gens() @cached_method", "v) def conrey_number(self): r\"\"\" Return the Conrey number for this", "1] sage: e = DirichletGroup(20).gen(0) sage: e.values() [0, 1, 0,", "def unit_gens(self): r\"\"\" Returns the minimal generators for the units", "range(len(D))] def extend(self, M): \"\"\" Returns the extension of this", "`\\chi` and the integers a,b is .. MATH:: K(a,b,\\chi) =", "only if `p = 2` and the factor of eps", "M): \"\"\" Returns the extension of this character to a", "by raising to a power, so the return value is", "((1,), (zeta6 - 1,), -1) ((1,), (-1,), -1) ((1,), (-zeta6,),", "the given modulus, with values in the given base ring.", "'t') t = R.gen() # g(t) = t/(e^{Nt}-1) g =", "\"\"\" Return a list of the values of this character", "-2*zeta6 - 1) ((zeta6,), (-zeta6,), zeta6 - 3) ((zeta6,), (-zeta6", "doctest sage: TestSuite(G).run() sage: G.base() # check that Parent.__init__ has", "sage: e(31) -1 sage: e(41) -1 sage: e(37) zeta4 sage:", "kronecker_character_upside_down(97*389*997^2) Dirichlet character modulo 37506941597 of conductor 37733 mapping 13533432536", "e.is_even() False sage: e(-1) -1.000000... sage: [e.is_even() for e in", "= ord//gcd(ord, orders[i]) g.append(self.element_class(self, z, check=False)) return tuple(g) def integers_mod(self):", "in Finite Field of size 5 ] \"\"\" R =", "order 1. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.is_trivial() False", "is the mult order of p modulo n. r =", "% N if self.values.is_in_cache() or m != N - 1:", "1.7320508075... sage: sqrt(3.0) 1.73205080756888 sage: e.gauss_sum_numerical(a=2) -...e-15 - 1.7320508075...*I sage:", "2 \"\"\" return free_module.FreeModule(rings.IntegerModRing(self.zeta_order()), len(self.unit_gens())) @property def _zeta_powers(self): \"\"\" Return", "root of unity was explicitly given; we use it over", "We can multiply if we're explicit about where we want", "n>=len(g): raise IndexError(\"n(=%s) must be between 0 and %s\"%(n,len(g)-1)) return", "directly. .. WARNING:: In the case of the trivial Dirichlet", "version. # https://www.gnu.org/licenses/ # **************************************************************************** from __future__ import print_function import", "the divisor M of the modulus, which must also be", "taken to be `R^*`, or equivalently its `n`-torsion subgroup, where", "end up computing all the Bernoulli # numbers up to", "= DirichletGroup(20) sage: type(G(1).conductor()) <type 'sage.rings.integer.Integer'> \"\"\" if self.modulus() ==", "defining polynomial x^4 + 1 sage: G.list() [Dirichlet character modulo", "False) if True only returns representatives for the orbits. -", "ignored). This is only called if the object was not", "1331] sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5))._automorphisms() Traceback (most recent call last):", "|--> zeta12, Dirichlet character modulo 13 of conductor 13 mapping", "modulo 2 with values in Complex Field with 53 bits", "of this character. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.conductor()", "to self. EXAMPLES:: sage: e = DirichletGroup(100).0; e Dirichlet character", "41 |--> 1, 37 |--> 1, Dirichlet character modulo 60", "must divide the modulus(=%s)\"%(M,self.modulus())) if M%self.conductor() != 0: raise ValueError(\"conductor(=%s)", "import cached_method from sage.misc.fast_methods import WithEqualityById from sage.structure.element import MultiplicativeGroupElement", "= DirichletGroup(20) sage: a.kernel() [1, 9, 13, 17] sage: b.kernel()", "2 |--> -1] ] sage: e = G.0 sage: e", "group of the prime subfield of Frac(R). If R is", "modulus(=%s)\"%(M,self.modulus())) if M%self.conductor() != 0: raise ValueError(\"conductor(=%s) must divide M(=%s)\"%(self.conductor(),M))", "7 |--> zeta4] Another example:: sage: G = DirichletGroup(13) sage:", "[self] P = self.parent() z = self.element() o = int(z.additive_order())", "# indirect doctest \\hbox{Dirichlet character modulo } 16 \\hbox{ of", "``R`` -- either a ring admitting a *coercion* map from", "base_ring=CC) False If the base ring is not an integral", "J(Y, Z). sage: Y.jacobi_sum(Z); Z.jacobi_sum(Y) -1 -1 \"\"\" if check:", "1 (mod n), so r is the mult order of", "None or (X._zeta is not None and self.base_ring()(X._zeta) in self._zeta_powers)))", "a root of unity is specified, base extension still works", "must be the order of ``zeta`` if both are specified", "of unity is specified, base extension still works if the", "base ring is an integral domain\") k = self.order() if", "method for Dirichlet characters, miscellaneous fixes - <NAME> (2014-03-06): use", "K elif is_AlgebraicField(K): from sage.rings.complex_mpfr import ComplexField CC = ComplexField(prec)", "31 |--> 1, 41 |--> 1, 37 |--> zeta4) sage:", "where `N` is the modulus of self. EXAMPLES:: sage: DirichletGroup(37).unit_gens()", "is not specified but ``zeta_order`` is, then `V` is taken", "is DirichletGroup(2, base_ring=CC) False If the base ring is not", "orbit = x.galois_orbit(sort=sort) if reps_only: G.append(x) else: G.append(orbit) for z", "10 and degree 4' sage: G.rename('Dir(11)') sage: G Dir(11) \"\"\"", "zeta36^4 sage: DirichletGroup(20).random_element() Dirichlet character modulo 20 of conductor 4", "sage: b.is_primitive() False sage: (a*b).is_primitive() True \"\"\" return (self.conductor() ==", "def multiplicative_order(self): \"\"\" The order of this character. EXAMPLES:: sage:", "f Dirichlet character modulo 4 of conductor 4 mapping 3", "def values(self): \"\"\" Return a list of the values of", "|--> 2,) TESTS: Dirichlet groups are cached, creating two groups", "has characteristic 0 or a prime. EXAMPLES:: sage: DirichletGroup(17)._automorphisms() [1,", "mapping 2 |--> zeta12 sage: loads(e.dumps()) == e True ::", "algorithm below. That said, I'm sure it could # be", "1, 0, 0, 0, 1, 0, -1] sage: e =", "of the base ring as possible. .. note:: This function", "e = DirichletGroup(5, QQ).0 sage: f = DirichletGroup(5,CyclotomicField(4)).0 sage: e*f", "* factorial(k) else: raise ValueError(\"algorithm = '%s' unknown\"%algorithm) if cache:", "Parent.__init__ has been called Ring of integers modulo 9 sage:", "m % N if self.values.is_in_cache() or m != N -", "str(list(self.values_on_gens())) def _repr_(self): \"\"\" String representation of self. EXAMPLES:: sage:", "cannot be determined automatically, we can specify it using ``zeta_order``::", "F = factor(self.modulus()) if len(F) > 1: return prod([d.conductor() for", "of integers modulo 3 \"\"\" return DirichletGroup(N, base_ring)(1) TrivialCharacter =", "Used for unpickling old instances. TESTS:: sage: G = DirichletGroup(9)", "2 |--> zeta12 sage: G(0) Traceback (most recent call last):", "1 mapping 2 |--> 1] sage: (DirichletGroup(72).0).decomposition() [Dirichlet character modulo", "and ``zeta_order`` are specified, or that both are ``None``. In", "we use it over the # new base ring as", "DirichletGroup(20,GF(3)).exponent() 2 sage: DirichletGroup(20,GF(2)).exponent() 1 sage: DirichletGroup(37).exponent() 36 \"\"\" return", "sage: e.order() 12 This illustrates a canonical coercion:: sage: e", "group of order %s generated by %s in \" %", "follows: - If both ``zeta`` and ``zeta_order`` are omitted, then", "DirichletGroup(16)([-1, 1]) sage: hash(e) == hash((-1,1)) True \"\"\" return hash(self.values_on_gens())", "DirichletCharacter. EXAMPLES:: sage: from sage.modular.dirichlet import is_DirichletCharacter sage: is_DirichletCharacter(trivial_character(3)) True", "DirichletGroup(9).0 sage: eps.bernoulli(3) 10*zeta6 + 4 sage: eps.bernoulli(3, algorithm=\"definition\") 10*zeta6", "minimal conductor. EXAMPLES:: sage: kronecker_character(97*389*997^2) Dirichlet character modulo 37733 of", "Dirichlet character defined by `x` (type :class:`DirichletCharacter`). EXAMPLES:: sage: G.<e>", "the character on the generators of `(Z/NZ)^*`:: sage: list(G) [Dirichlet", "a positive integer coprime to q that identifies a Dirichlet", "= G.list(); Y = X[0]; Z = X[1] sage: #", "G([-1]) sage: e.kloosterman_sum(3,5) -2*zeta6 + 1 sage: G = DirichletGroup(20)", "import is_Ring from sage.misc.functional import round from sage.misc.cachefunc import cached_method", "in Cyclotomic Field of order 6 and degree 2 Note", "can redistribute it and/or modify # it under the terms", "groups of different moduli, characters of different moduli compare as", "+ 1 sage: G = DirichletGroup(20) sage: e = G([1", "up computing all the Bernoulli # numbers up to k,", "conductor 5 mapping 11 |--> 1, 7 |--> zeta4] Another", "e = D.0 sage: f = D[-2] sage: e.jacobi_sum(f) 3*zeta12^2", "= G.0^2; e Dirichlet character modulo 13 of conductor 13", "the Jacobi sum associated to these Dirichlet characters (i.e., J(self,char)).", "specified\") e = rings.IntegerModRing(modulus).unit_group_exponent() base_ring = rings.CyclotomicField(e) if integral: base_ring", "mapping 22 |--> zeta12^3, 31 |--> zeta12^2 - 1 sage:", "def _pari_conversion(self): r\"\"\" Prepare data for the conversion of the", ".. NOTE:: Since there is no coercion between Dirichlet groups", "0, 0, 36, 0, 1, 36, 0, 1, 0, 0,", "needs to be set for the other method to work", "D(1) sage: g.jacobi_sum(g) 3 We consider a sum with values", "} ' for i in range(r): if i != 0:", "the order of the characters, either with or without specifying", "ValueError: modulus should be positive \"\"\" modulus = rings.Integer(N) if", "p modulo n. r = rings.IntegerModRing(n)(p).multiplicative_order() Auts = [p**m for", "CC.zeta(G.modulus()) ** a g = phi(self(0)) z = CC.one() for", "d [Dirichlet character modulo 4 of conductor 4 mapping 3", "zeta156^5 - zeta156^4 - zeta156^2 - 1 sage: factor(norm(e.gauss_sum())) 13^24", "- R(1)) < 0.5 return self(-1) == R(1) @cached_method def", "def is_odd(self): r\"\"\" Return ``True`` if and only if `\\varepsilon(-1)", "M([4, 8, 8])) Traceback (most recent call last): ... ValueError:", "from that if we encounter it in a pickle values_on_gens_key", "DirichletGroup.create_key(60) (Cyclotomic Field of order 4 and degree 2, 60,", "The ``DirichletGroup`` factory ensures that either both ``zeta`` and ``zeta_order``", "added # FinitelyGenerated() here means that the group has a", "Integers(9), zeta=Integers(9)(2)).galois_orbits() Traceback (most recent call last): ... TypeError: Galois", "1] sage: t(1).parent() Rational Field sage: trivial_character(7, Integers(3))(1).parent() Ring of", "of `(\\ZZ/N\\ZZ)^*`. OUTPUT: The Dirichlet character defined by `x` (type", "(not necessarily primitive) character of modulus `N`. This function returns", "this character at the integer `m`. .. warning:: A table", "class DirichletGroup_class(WithEqualityById, Parent): \"\"\" Group of Dirichlet characters modulo `N`", "tuple(z) # change when there are immutable vectors (and below)", "(use the factory function ``DirichletGroup``). The ``DirichletGroup`` factory ensures that", "kronecker, factor, gcd, lcm, fundamental_discriminant, euler_phi, factorial, valuation) def trivial_character(N,", "DirichletGroup(13) sage: e = G.0 sage: e.gauss_sum() -zeta156^46 + zeta156^45", "**************************************************************************** from __future__ import print_function import sage.categories.all as cat from", "self.order() if k <= 2: return [self] P = self.parent()", "DirichletGroup(1) sage: chi = G.one() sage: chi.gauss_sum() 1 .. SEEALSO::", "requires a coercion map to exist:: sage: G.base_extend(ZZ) Traceback (most", "We can't multiply directly, since coercion of one element into", "G.element_class(G, [R(x) for x in self.values_on_gens()]) def _richcmp_(self, other, op):", "i in range(len(self.unit_gens())): z = zero.__copy__() z[i] = ord//gcd(ord, orders[i])", "value of -1 directly using dlog and a large power", "G.unit_gens() (11, 17) sage: G.zeta() zeta4 sage: G.zeta_order() 4 In", "= 1.732050807568878? :: sage: e = DirichletGroup(13).0 sage: e.change_ring(QQ) Traceback", "in [Coh2007]_, Proposition 9.4.5; this is usually optimal. The ``definition``", "sage: G.base_ring() Rational Field The elements of G print as", "``sort`` - (optional: default True) whether to sort the list", "contrast to the value B_1 = -1/2. ber = K.one()/2", "sage: G.<e> = DirichletGroup(13) sage: loads(G.dumps()) == G True ::", "|--> -1, 17 |--> 1 sage: L(4) 0.988944551741105 With the", "given; we use it over the # new base ring", "modsym/manin_symbols.py. G = self.parent() return G.element_class(G, self.values_on_gens(), check=False) def __pow__(self,", "ber = K(sum(binomial(k,j) * bernoulli(j, **opts) * N**(j-1) * S(k-j)", "G print as lists giving the values of the character", "def base_ring(self): \"\"\" Returns the base ring of this Dirichlet", "they define identical functions on ``ZZ``. EXAMPLES:: sage: e =", "DirichletGroup(13, K) sage: chi = G([z^2]) sage: chi.gauss_sum() zeta52^22 +", "multiplication. The group `V` is determined as follows: - If", "sage: e = DirichletGroup(100).0 sage: e.modulus() 100 sage: e.conductor() 4", "1, 1]) \"\"\" G = pari.znstar(self.modulus(), 1) pari_orders = G[1][1]", "if `p = 2` and the factor of eps at", "cyclic), and `V` must have order ``zeta_order``. Furthermore, a generator", "uniquely determines a Dirichlet group. TESTS:: sage: DirichletGroup.create_key(60) (Cyclotomic Field", "Dirichlet character that equals this one, but over as small", "unity. TESTS:: sage: DirichletGroup(5)._zeta_powers [1, zeta4, -1, -zeta4] \"\"\" R", "= self.gen(i) n = random.randrange(g.order()) e *= g**n return e", "field; see also :meth:`.kloosterman_sum_numerical`, which gives an inexact answer (but", "r\"\"\" Return the orbit of this character under the action", "of algebraic numbers is supported (:trac:`19056`):: sage: G = DirichletGroup(7,", "are specified - ``names`` -- ignored (needed so ``G.<...> =", "elif algorithm == \"recurrence\": # The following code is pretty", "- 1) # h(n) = g(t)*e^{nt} h = [0] +", "element of ``base_ring`` and that ``zeta_order`` is an element of", "between Dirichlet groups of different moduli, but no coercion. This", "in Cyclotomic Field of order 4 and degree 2' We", "return self.element_class(self, a) def _coerce_map_from_(self, X): \"\"\" Decide whether there", "val = G.gens()[2].values_on_gens()[2] ; val zeta4 sage: parent(val) Gaussian Integers", "None, None) sage: k == l False sage: G =", "Dirichlet characters modulo 5 with values in the group of", "= DirichletGroup(20) sage: L = a.lfunction(); L PARI L-function associated", "if d == 0: raise ValueError(\"d must be nonzero\") D", "modulus of self. EXAMPLES:: sage: G = DirichletGroup(20) sage: G.modulus()", "is defined\" % (self.base_ring(), R)) return self.change_ring(R) def _element_constructor_(self, x):", "__eval_at_minus_one(self): r\"\"\" Efficiently evaluate the character at -1 using knowledge", "13, 17]], [[2, 2, 3]~, Vecsmall([3, 3, 1])], [[8, 8,", "-1 sage: f.modulus() 4 \"\"\" return self.restrict(self.conductor()) def restrict(self, M):", "is_ComplexField(K): phi = lambda t : t CC = K", "the other method to work properly, these caches have to", "-1] Next we construct the group of Dirichlet character mod", "of conductor } %s' % (self.modulus(), self.conductor()) r = len(self.values_on_gens())", "zeta156^37 - zeta156^36 - zeta156^34 - zeta156^33 - zeta156^31 +", "sage.modular.dirichlet import is_DirichletGroup sage: is_DirichletGroup(DirichletGroup(11)) True sage: is_DirichletGroup(11) False sage:", "\\to R^*, for some ring `R`, to the map `\\ZZ/N\\ZZ", "CC.one() for c in self.values()[1:]: z *= zeta g +=", "conductor 1 mapping 2 |--> 1] sage: (DirichletGroup(72).0).decomposition() [Dirichlet character", "been fixed:: sage: G = DirichletGroup(5); X = G.list(); Y", "LFunction(lfun_character(self), prec=prec) Z.rename('PARI L-function associated to %s' % self) return", "that the root of unity has small order, i.e., it", "TESTS: Check that :trac:`18479` is fixed:: sage: f = Newforms(Gamma1(25),", "1]) sage: f = e.restrict(8) sage: e == e True", "values in Number Field in a with defining polynomial x^4", "|--> 1, 17 |--> zeta4 sage: G.gen(2) Traceback (most recent", "return self.order() def _repr_(self): \"\"\" Return a print representation of", "0: s += ' mapping ' for i in range(r):", "of Dirichlet characters modulo 4 with values in Cyclotomic Field", "= e.restrict(8) sage: e == e True sage: f ==", "primitive character associated to self. EXAMPLES:: sage: e = DirichletGroup(100).0;", "value group of this Dirichlet group. TESTS:: sage: DirichletGroup(5)._zeta_dlog {-1:", "1,), (-zeta6,), -1) ((zeta6 - 1,), (-zeta6 + 1,), -zeta6", "e.bernoulli(5) 7430/13*zeta12^3 - 34750/13*zeta12^2 - 11380/13*zeta12 + 9110/13 sage: eps", "in a poly ring over a number field. prec =", "present some problems as the multiplicative group modulo `2^k` is", "integer (default: 53), *bits* of precision - ``a`` -- integer,", "conductor(=4) must divide M(=50) \"\"\" M = int(M) if self.modulus()%M", "sage: parent(DirichletGroup(60, integral=True).gens()[2].values_on_gens()[2]) Gaussian Integers in Cyclotomic Field of order", "of unity is not necessarily cyclic), some operations still work,", "for d in reversed(e.divisors()): try: zeta = R.zeta(d) break except", "polynomial x^4 + 1 :: sage: G.<e> = DirichletGroup(13) sage:", "with values in the group of order 8 generated by", "M = int(M) if self.modulus()%M != 0: raise ValueError(\"M(=%s) must", "is the modulus EXAMPLES:: sage: chi4 = DirichletGroup(4).gen() sage: chi4._pari_conversion()", "of unity in the base ring. EXAMPLES:: sage: DirichletGroup(37).zeta() zeta36", "2 of the License, or # (at your option) any", "z = x.element() e = tuple(z) # change when there", "e(31) -1 sage: e(41) -1 sage: e(37) zeta4 sage: e(31*37)", "3 with a = 1.732050807568878? :: sage: e = DirichletGroup(13).0", "of self. EXAMPLES:: sage: G.<a,b> = DirichletGroup(16) sage: latex(b) #", "is_ComplexField from sage.rings.qqbar import is_AlgebraicField from sage.rings.ring import is_Ring from", "`B_{1,\\varepsilon} = -1/2`; see the discussion in [Coh2007]_, Section 9.4.1.", "sage: e Dirichlet character modulo 13 of conductor 13 mapping", "True \"\"\" self._set_element_constructor() if '_zeta_order' in state: state['_zeta_order'] = rings.Integer(state['_zeta_order'])", "sage: chi^2 Dirichlet character modulo 5 of conductor 5 mapping", "eps = DirichletGroup(9).0 sage: eps.bernoulli(3) 10*zeta6 + 4 sage: eps.bernoulli(3,", "quadratic Dirichlet character (./d) of conductor d, for d0. EXAMPLES::", "for d0. EXAMPLES:: sage: kronecker_character_upside_down(97*389*997^2) Dirichlet character modulo 37506941597 of", "function returns the generalized Bernoulli number `B_{k,\\varepsilon}`, as defined by", "eps2.conrey_number() True \"\"\" G, v = self._pari_conversion() return pari.znconreyexp(G, v).sage()", "sage: e = DirichletGroup(16)([-1, 1]) sage: e.values_on_gens () (-1, 1)", "``zeta`` if both are specified - ``names`` -- ignored (needed", "bits of precision sage: G == H False If ``base_ring``", "of size 5, Group of Dirichlet characters modulo 5 with", "`p^n`, where `p` is a prime. Then `\\varepsilon(-1) = -1`", "<= 0: raise ValueError(\"d must be positive\") G = DirichletGroup(d,", "|--> 1] ] sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5)).galois_orbits() Traceback (most recent", "self.values_on_gens()) def kernel(self): r\"\"\" Return the kernel of this character.", "checked that the orders of the elements in `x` are", "result_list[n] = R_values[value] # iterate: # increase the exponent vector", "and degree 2 If the order of ``zeta`` cannot be", "in v] G = [] seen_so_far = set([]) for x", "0; 0, 0, 1]], [0, 1, 1]) \"\"\" G =", "MATH:: K(a,b,\\chi) = \\sum_{r \\in (\\ZZ/m\\ZZ)^\\times} \\chi(r)\\,\\zeta^{ar+br^{-1}}, where `m` is", "the classical Bernoulli number. Some authors use an alternative definition", "Bernoulli number `B_{k,\\varepsilon}`, as defined by the following identity of", "a * b # indirect doctest Dirichlet character modulo 3", "of e is near zero:: sage: v=e.kloosterman_sum_numerical() sage: v.real() <", "Return the number of elements of self. This is the", "+ 4 sage: eps.bernoulli(3, algorithm=\"definition\") 10*zeta6 + 4 TESTS: Check", "of :class:`DirichletCharacter` sets the cache of :meth:`element` or of :meth:`values_on_gens`.", "4 Other operations only work if ``zeta`` is specified:: sage:", "some problems as the multiplicative group modulo `2^k` is trivial", "sage: G([K.0]) Dirichlet character modulo 13 of conductor 13 mapping", "if p: K = rings.IntegerModRing(p) elif self.order() <= 2: K", "self. The element is computed by multiplying a random power", "1,\\ 5 \\mapsto \\zeta_{4} TESTS: Dirichlet characters modulo 1 and", "-1) ((1,), (-zeta6,), -1) ((1,), (-zeta6 + 1,), -1) ((zeta6,),", "-- precision (default 53) - ``algorithm`` -- 'pari' (default) or", "values of the Dirichlet character on the standard generators of", "= [dlog[x] for x in values_on_gens] m = P.zeta_order() v", "Computed Kloosterman sums are *not* cached with this character. EXAMPLES::", "*not* cached with this character. EXAMPLES:: sage: G = DirichletGroup(3)", "G3.gen(0).base_extend(K30) * G5.gen(0).base_extend(K30) Dirichlet character modulo 31 of conductor 31", "conductor. EXAMPLES:: sage: kronecker_character(97*389*997^2) Dirichlet character modulo 37733 of conductor", "raise NotImplementedError(\"Gauss sums only currently implemented when the base ring", "-2*zeta20^6 + 2*zeta20^4 + 4 TESTS:: sage: G = DirichletGroup(20,", "Field of order 4 and degree 2 ] sage: DirichletGroup(20,GF(5)).decomposition()", "DirichletGroup(60, GF(25,'a')).zeta() 2 \"\"\" zeta = self._zeta if zeta is", "self._zeta_order # Map zeta to the new parent if zeta", "|--> 1 \"\"\" G = self.parent() if G.zeta.is_in_cache(): x =", "classical Kloosterman sums, Salié sums, etc. The Kloosterman sum associated", "as the multiplicative group modulo `2^k` is trivial for `k", "generated; the added # FinitelyGenerated() here means that the group", "sage: eps.bernoulli(3) 10*zeta6 + 4 sage: eps.bernoulli(3, algorithm=\"definition\") 10*zeta6 +", "generator for `V` can be found. - If ``zeta`` is", "or a prime. EXAMPLES:: sage: DirichletGroup(17)._automorphisms() [1, 3, 5, 7,", "True, False, True, False, True, False, True, False, True] sage:", "pari_orders = G[1][1] pari_gens = G[1][2] # one should use", "base_ring=GF(37)).gen(0) ; e.values() [0, 1, 36, 0, 1, 36, 0,", "2 |--> 1] \"\"\" D = self.parent().decomposition() vals = [[z]", "``self`` to ``other``. .. NOTE:: Since there is no coercion", "Test that :trac:`11783` and :trac:`14368` are fixed:: sage: chi =", "# first gen is -1 for 2-power modulus elif (euler_phi(e.parent().modulus())", "= self.parent() R = G.base_ring() mod = self.parent().modulus() if mod", "s = \"Group of Dirichlet characters modulo %s with values", "n), so r is the mult order of p modulo", "= DirichletGroup(13, CC) sage: e = G.0 sage: e.is_even() False", "-1 in Number Field in a with defining polynomial x^4", "base ring is a cyclotomic field of order the exponent", "Efficiently evaluate the character at -1 using knowledge of its", "r != 0: s += ' mapping ' for i", "3) ((zeta6,), (-zeta6 + 1,), 1) ((zeta6 - 1,), (zeta6", "generator together, where the power is between 0 and the", "20 \"\"\" return self._modulus def ngens(self): \"\"\" Returns the number", "sage: G.<a,b> = DirichletGroup(20) sage: H.<c> = DirichletGroup(4) sage: c.extend(20)", "if ``base_ring`` is not ``None``. OUTPUT: The group of Dirichlet", "key:: sage: k = DirichletGroup.create_key(2, base_ring=QQ); k (Rational Field, 2,", "15 sage: G.gens() (Dirichlet character modulo 5 of conductor 5", "modulus corresponding to primes dividing modulus. (Note that if the", "if we know that it stays the # same; otherwise", "sage: parent(val) Gaussian Integers in Cyclotomic Field of order 4", "General Public License as published by # the Free Software", "values in Cyclotomic Field of order 4 and degree 2", "of conductor } 1 sage: latex(DirichletGroup(2)[0]) \\hbox{Dirichlet character modulo }", "= \"Group of Dirichlet characters modulo %s with values in", "e = G.0^2; e Dirichlet character modulo 13 of conductor", "return g @cached_method def is_even(self): r\"\"\" Return ``True`` if and", "map (:trac:`18072`):: sage: K.<i> = QuadraticField(-1) sage: f = K.complex_embeddings()[0]", "sage: len(DirichletGroup(20, GF(3))) 4 \"\"\" return self.order() def _repr_(self): \"\"\"", "characters modulo 19 with values in Finite Field of size", "a = R.one() w = [a] zeta = self.zeta() zeta_order", "character modulo 13 of conductor 13 mapping 2 |--> zeta12^2", "integer between 0 and the modulus. EXAMPLES:: sage: e =", "ValueError(\"values (= {}) must have multiplicative orders dividing {}, respectively\"", "a domain (in which case the group of roots of", "ring as possible. .. note:: This function is currently only", "example:: sage: G = DirichletGroup(13) sage: G.galois_orbits() [ [Dirichlet character", "= DirichletGroup(20) sage: G.gens() (Dirichlet character modulo 20 of conductor", "i.e., its conductor equals its modulus. EXAMPLES:: sage: G.<a,b> =", "for x in self.values_on_gens()]) v.set_immutable() return v def __setstate__(self, state):", "some ring `R`, to the map `\\ZZ/N\\ZZ \\to R` obtained", "sage: DirichletGroup(20).decomposition() [ Group of Dirichlet characters modulo 4 with", "its order:: sage: G = DirichletGroup(5, K, a); G Group", "\\to R` obtained by sending those `x\\in\\ZZ/N\\ZZ` with `\\gcd(N,x)>1` to", "call last): ... NotImplementedError: factorization of polynomials over rings with", "# finite, and hence this Dirichlet group is finite too.", "if not self.base_ring().is_integral_domain(): raise TypeError(\"Galois orbits only defined if base", "raise TypeError(\"base_ring (= %s) must be a ring\" % base_ring)", "group. TESTS:: sage: DirichletGroup(5)._zeta_dlog {-1: 2, -zeta4: 3, zeta4: 1,", "sage: latex(DirichletGroup(1)[0]) \\hbox{Dirichlet character modulo } 1 \\hbox{ of conductor", "modulo 20 of conductor 4 mapping 11 |--> -1, 17", "|--> 1 sage: c.extend(20) == a True \"\"\" if M", "Parent.__init__(self, base_ring, category=category) self._zeta = zeta self._zeta_order = zeta_order self._modulus", "False, True, False, True, False, True, False, True] sage: G", "`(\\ZZ/N\\ZZ)^*`, where `N` is the modulus of self. EXAMPLES:: sage:", "miscellaneous fixes - <NAME> (2014-03-06): use UniqueFactory to cache DirichletGroups", "conductor 4 mapping 11 |--> -1, 17 |--> 1' TESTS:", "a is b False sage: a.element() is b.element() False sage:", "same as multiplicative_order, since group is multiplicative 20 sage: e.multiplicative_order()", "integral domain:: sage: f = DirichletGroup(17, ZZ, zeta=-1).0 sage: g", "a finite field:: sage: g = DirichletGroup(17, GF(9,'a')).0 sage: g.jacobi_sum(g**2)", "2 |--> -zeta4] \"\"\" return self._list_from_iterator() def modulus(self): \"\"\" Returns", "CyclotomicField(4).ring_of_integers() sage: G = DirichletGroup(60, r4) sage: G.gens() (Dirichlet character", "None: self.values_on_gens.set_cache(values_on_gens) if element is not None: self.element.set_cache(element) class DirichletGroupFactory(UniqueFactory):", "N == 1: # By definition, the first Bernoulli number", "+ 1/2 \"\"\" R = self.base_ring() if R.is_prime_field(): return self", "in Cyclotomic Field of order 10 and degree 4' sage:", "[e for e in range(1,n) if gcd(e,n) == 1] else:", "G(1)) sage: e Dirichlet character modulo 60 of conductor 60", "in self._zeta_powers))) def __len__(self): \"\"\" Return the number of elements", "sqrt(3.0) 1.73205080756888 sage: e.gauss_sum_numerical(a=2) -...e-15 - 1.7320508075...*I sage: e.gauss_sum_numerical(a=2, prec=100)", "Salié sums, etc. The Kloosterman sum associated to `\\chi` and", "= len(self.values_on_gens()) if r != 0: s += r' \\hbox{", "This is the same as len(self). EXAMPLES:: sage: DirichletGroup(20).order() 8", "cache of values_on_gens() from that if we encounter it in", "(in which case the group of roots of unity is", "DirichletGroup(17, GF(9,'a')).0 sage: g.jacobi_sum(g**2) 2*a TESTS: This shows that :trac:`6393`", "sum([Y(x)*Z(1-x) for x in IntegerModRing(5)]) -1 sage: # The value", "3*zeta12^2 + 2*zeta12 - 3 sage: f.jacobi_sum(e) 3*zeta12^2 + 2*zeta12", "Cyclotomic Field of order 12 and degree 4 sage: G", "pretty fast, at least compared to # the other algorithm", "is an integral domain sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2)).galois_orbits() Traceback (most", "ngens(self): \"\"\" Returns the number of generators of self. EXAMPLES::", "values were the same, but not checking that they had", "s = 'Dirichlet character modulo %s of conductor %s' %", "\"\"\" Returns generators of self. EXAMPLES:: sage: G = DirichletGroup(20)", "not its order:: sage: G = DirichletGroup(5, K, a); G", "of conductor 13 mapping 2 |--> -zeta12^2 + 1] A", "is very large. - If ``zeta`` is not specified but", "ZZ, zeta=-1).0 sage: g = f.base_extend(Integers(15)) sage: g(3) 14 sage:", "of Dirichlet character mod 20 with values in the rational", "DirichletGroup(60) is DirichletGroup(60) True \"\"\" def create_key(self, N, base_ring=None, zeta=None,", "= DP.0 sage: e.jacobi_sum(f) Traceback (most recent call last): ...", "(Complex Field with 53 bits of precision, 2, None, None)", "get roots of unity with smaller order than expected (:trac:`6018`)::", "prec = k+2 R = rings.PowerSeriesRing(rings.QQ, 't') t = R.gen()", "(G, v) where G is `(\\ZZ / N \\ZZ)^*` where", "chi._repr_short_() '[-1, 1, 1]' \"\"\" return str(list(self.values_on_gens())) def _repr_(self): \"\"\"", "by the DirichletGroup factory. p = R.characteristic() if p ==", "i sage: f = K.complex_embeddings()[0] sage: psi = chi.change_ring(f) sage:", "v).sage() def lmfdb_page(self): r\"\"\" Open the LMFDB web page of", "31 |--> 1, 41 |--> -1, 37 |--> 1 \"\"\"", "is the same as self.order(). EXAMPLES:: sage: len(DirichletGroup(20)) 8 sage:", "call last): ... TypeError: Unable to coerce zeta4 to a", "k, which should be done with power series # instead", "if zeta_order is None: zeta_order = zeta.multiplicative_order() elif zeta_order is", "sage: e.jacobi_sum(f) Traceback (most recent call last): ... NotImplementedError: Characters", "G = DirichletGroup(13) sage: e = G.0 sage: e.is_odd() True", "x^4 + 1 An example where we give ``zeta``, but", "and self.base_ring()(X._zeta) in self._zeta_powers))) def __len__(self): \"\"\" Return the number", "== trivial_character(9) False sage: trivial_character(3) == DirichletGroup(3, QQ).0^2 True \"\"\"", "Dirichlet characters modulo `N` with values in a ring `R`.", "largest order root of unity in the field:: sage: g.zeta_order()", "|--> 1, 17 |--> 1, Dirichlet character modulo 20 of", "determined automatically, we can specify it using ``zeta_order``:: sage: DirichletGroup(7,", "software: you can redistribute it and/or modify # it under", "conductor 5 mapping 2 |--> -a^2] We can also restrict", "specified\" % base_ring) zeta_order = rings.Integer(zeta_order) zeta = base_ring.zeta(zeta_order) return", "17 |--> 1 sage: L(4) 0.988944551741105 With the algorithm \"lcalc\"::", "Field of order 4 and degree 2 We create the", "mod `p^n`, where `p` is a prime. Then `\\varepsilon(-1) =", "= None state_dict = state[1] if values_on_gens_key in state_dict: values_on_gens", "-zeta4: 3, zeta4: 1, 1: 0} \"\"\" return {z: i", "sage: e = DirichletGroup(16)([-1, 1]) sage: loads(dumps(e)) == e True", "= x[0]*x[1]^2; e Dirichlet character modulo 35 of conductor 35", "modulo 7 with values in Number Field in a with", "exponent vector by 1, # increase n accordingly, and increase", "def base_extend(self, R): \"\"\" Return the base extension of ``self``", "from `x`. EXAMPLES:: sage: G = DirichletGroup(13) sage: K =", "or m != N - 1: return self.values()[m] else: return", "character modulo 5 of conductor 5 mapping 2 |--> -1,", "-1, 17 |--> 1' TESTS: Dirichlet characters modulo 1 and", "to coerce zeta12 to a rational We test the case", "is the correct value of the Jacobi sum J(Y, Z).", "ring is an integral domain \"\"\" if not self.base_ring().is_integral_domain(): raise", "return self(-1) == R(1) @cached_method def is_odd(self): r\"\"\" Return ``True``", "sage: from sage.modular.dirichlet import is_DirichletGroup sage: is_DirichletGroup(DirichletGroup(11)) True sage: is_DirichletGroup(11)", "generators of `(\\ZZ/n\\ZZ)^*`. INPUT: - ``parent`` -- :class:`DirichletGroup`, a group", "degree 2 \"\"\" base_ring, modulus, zeta, zeta_order = key return", "G] [True, False, True, False, True, False, True, False, True,", "= zeta self._zeta_order = zeta_order self._modulus = modulus self._integers =", "more efficiently. v = self.values() S = lambda n: sum(v[r]", "sage: G.unit_gens() (11, 17) sage: G.zeta() zeta4 sage: G.zeta_order() 4", "return rings.Integer(cond) @cached_method def decomposition(self): r\"\"\" Return the decomposition of", "(= Ring of integers modulo 15) must be an integral", "it also ensures that ``zeta`` is an element of ``base_ring``", "state_dict: values_on_gens = state_dict[values_on_gens_key] del state_dict[values_on_gens_key] # element() used an", "\"\"\" Returns the base ring of this Dirichlet character. EXAMPLES::", "conductor 13 mapping 2 |--> zeta12^2, Dirichlet character modulo 13", "K.<a> = NumberField(x^4 + 1) sage: DirichletGroup(5, K) Group of", "- ``algorithm`` -- either ``'recurrence'`` (default) or ``'definition'`` - ``cache``", "eps1.conrey_number() == eps2.conrey_number() True \"\"\" G, v = self._pari_conversion() return", "e = rings.Mod(c, m) g += self(c) * zeta**int(a*e +", "specified values on generators of `(\\ZZ/n\\ZZ)^*`. INPUT: - ``parent`` --", "13 |--> -1, 17 |--> -1 sage: chi.conrey_number() 5 sage:", "loads(G.dumps()) == G True We compute a Dirichlet group over", "group of order 6 generated by 0.500000000000000 + 0.866025403784439*I in", "unity with smaller order than expected (:trac:`6018`):: sage: G =", "5 mapping 2 |--> a^2, Dirichlet character modulo 5 of", "modulus should be positive \"\"\" modulus = rings.Integer(N) if modulus", "1 :: sage: G.<e> = DirichletGroup(13) sage: loads(G.dumps()) == G", "1 \"\"\" e = self(1) for i in range(self.ngens()): g", "e.kloosterman_sum_numerical(53,3,11) 3.80422606518061 - 3.80422606518061*I \"\"\" G = self.parent() K =", "integers modulo 15 sage: G.gens() (Dirichlet character modulo 5 of", "is also given, it must be the multiplicative order of", "sage: DirichletGroup(17)._automorphisms() [1, 3, 5, 7, 9, 11, 13, 15]", "since group is multiplicative 20 sage: e.multiplicative_order() 20 sage: e", "to be stored when pickling an instance of :class:`DirichletCharacter`. \"\"\"", "DirichletGroup(6) sage: G(DirichletGroup(3).0) Dirichlet character modulo 6 of conductor 3", "= G.0 The real component of the numerical value of", "Return the chosen root of unity in the base ring.", "same level! - <NAME> (2006-01-07): added more examples - <NAME>", "of conductor 13 mapping 2 |--> -1] ] sage: e", "in a number field:: sage: R.<x> = PolynomialRing(QQ) sage: K.<a>", "mapping 11 |--> 1, 17 |--> zeta4 sage: a*b #", "sage: kronecker_character_upside_down(97*389*997^2) Dirichlet character modulo 37506941597 of conductor 37733 mapping", "n EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a^2 Dirichlet character", "x in v] G = [] seen_so_far = set([]) for", "base ring is an integral domain sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2)).galois_orbits()", "the factory function ``DirichletGroup``). The ``DirichletGroup`` factory ensures that either", "for z in self.values_on_gens()) return G.element_class(G, x, check=False) def _repr_short_(self):", "construct the group of Dirichlet character mod 20, but with", "field:: sage: p = next_prime(10^40) sage: g = DirichletGroup(19, GF(p));", "= (self(x) for x in pari_gens) # now compute the", "r\"\"\" Return the underlying `\\ZZ/n\\ZZ`-module vector of exponents. .. warning::", "the hash of ``self``. EXAMPLES:: sage: e = DirichletGroup(16)([-1, 1])", "group is finite too. # In particular, it is finitely", "13 mapping 2 |--> -zeta12^2 + 1] A non-example:: sage:", "sum has absolute value `\\sqrt{p}`. CACHING: Computed Gauss sums are", "2, 0, 0, 2, 0, 1, 2, 0, 1, 0,", "have to do this, since e.g., unit gens mod 11", "if M % self.modulus() != 0: raise ArithmeticError(\"M(=%s) must be", "be the negation of is_odd, e.g., in characteristic 2:: sage:", "divisor M of the modulus, which must also be a", "Cyclotomic Field of order 12 and degree 4 sage: e", "for the group, are only implemented if `V` is cyclic", "== 'pari': from sage.lfunctions.pari import lfun_character, LFunction Z = LFunction(lfun_character(self),", "= G([-1]) sage: e.kloosterman_sum(3,5) -2*zeta6 + 1 sage: G =", "as free_module import sage.modules.free_module_element as free_module_element import sage.rings.all as rings", "and degree 2 Note that the root of unity can", "def lmfdb_page(self): r\"\"\" Open the LMFDB web page of the", "-1, 0, 1, 0, 0, 0, 1, 0, -1] sage:", "lambda n: sum(v[r] * r**n for r in range(1, N))", "Auts] if sort: v.sort() return v def gauss_sum(self, a=1): r\"\"\"", "must be a multiple of the modulus(=%s)\"%(M,self.modulus())) H = DirichletGroup(M,", "= G.base_extend(CyclotomicField(6)); H Group of Dirichlet characters modulo 7 with", "of ``self``. The result is a wrapper around a PARI", "5 \\mapsto \\zeta_{4} TESTS: Dirichlet characters modulo 1 and 2", "integer, as for :meth:`.kloosterman_sum`. EXAMPLES:: sage: G = DirichletGroup(3) sage:", "GF(2))) 1 sage: len(DirichletGroup(20, GF(3))) 4 \"\"\" return self.order() def", "reversed(e.divisors()): try: zeta = R.zeta(d) break except ValueError: pass self.zeta_order.set_cache(d)", "* bernoulli(j, **opts) * N**(j-1) * S(k-j) for j in", "not found in the cache. TESTS:: sage: K = CyclotomicField(4)", "\"\"\" if M % self.modulus() != 0: raise ArithmeticError(\"M(=%s) must", "sums not implemented ' 'over this ring') n = zeta.multiplicative_order()", "at 2. vals = [1] + vals return [D[i](vals[i]) for", "None state_dict = state[1] if values_on_gens_key in state_dict: values_on_gens =", "Dirichlet character modulo 31 of conductor 31 mapping 3 |-->", "can be found. - If ``zeta`` is specified, then `V`", "1, 2, 0, 1, 2] :: sage: chi = DirichletGroup(100151,", "is the least common multiple of `n` and the exponent", "finite non-field base rings not implemented sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2))._automorphisms()", "sage: L.value(4) # abs tol 1e-14 0.988944551741105 - 5.16608739123418e-18*I \"\"\"", "chi.gauss_sum() 1 .. SEEALSO:: - :func:`sage.arith.misc.gauss_sum` for general finite fields", "G.append(orbit) for z in orbit: seen_so_far.add(tuple(z.element())) G = Sequence(G, cr=True)", "self._integers __iter__ = multiplicative_iterator def list(self): \"\"\" Return a list", "zeta12^2 sage: e.galois_orbit() [Dirichlet character modulo 13 of conductor 13", "extension of this character to a Dirichlet character modulo the", "of order 4 and degree 2' and 'Group of Dirichlet", "= ComplexField(prec) phi = CC.coerce_map_from(K) elif number_field.is_CyclotomicField(K) or is_RationalField(K): phi", "representatives for the orbits. - ``sort`` - (optional: default True)", "a poly ring over a number field. prec = k+2", "exponents[i] += 1 except IndexError: # Done! return result_list value", "its domain - ``zeta`` -- (optional) root of unity in", "mapping 3 |--> -1, Dirichlet character modulo 5 of conductor", "def _automorphisms(self): \"\"\" Compute the automorphisms of self. These are", "rings not implemented sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2))._automorphisms() Traceback (most recent", "in characteristic p are # k-th powering for # k", "1 sage: latex(DirichletGroup(2)[0]) \\hbox{Dirichlet character modulo } 2 \\hbox{ of", "ord//gcd(ord, orders[i]) g.append(self.element_class(self, z, check=False)) return tuple(g) def integers_mod(self): r\"\"\"", "[t(x) for x in [0..20]] [0, 1, 1, 1, 1,", "sage: a.values_on_gens() is b.values_on_gens() True \"\"\" # This method exists", "Restore a pickled element from ``state``. TESTS:: sage: e =", "``base_ring`` was not be a part of the key, the", "4 and degree 2 \"\"\" N = self.modulus() m =", "2*zeta12 - 3 sage: p = 7 sage: DP =", "== 0: # 2 factors at 2. vals[0].append(vals[1][0]) del vals[1]", "- zeta52^7 - zeta52^5 + zeta52^4 Check that :trac:`25127` is", "a=1, b=0): r\"\"\" Return the \"twisted\" Kloosterman sum associated to", "Field of size 10000000000000000000000000000000000000121 Note that the root of unity", "R(1) @cached_method def is_odd(self): r\"\"\" Return ``True`` if and only", "} 1 sage: latex(DirichletGroup(2)[0]) \\hbox{Dirichlet character modulo } 2 \\hbox{", "elif number_field.is_CyclotomicField(K) or is_RationalField(K): phi = K.complex_embedding(prec) CC = phi.codomain()", "-- :class:`DirichletGroup`, a group of Dirichlet characters - ``x`` --", "prec=53, algorithm='pari'): \"\"\" Return the L-function of ``self``. The result", "1 except IndexError: # Done! return result_list value += val_on_gen[i]", "from %s to %s is defined\" % (self.base_ring(), R)) return", "specified; use the same zeta_order # (which may still be", "`n` is the exponent of `(\\ZZ/N\\ZZ)^*`. Many operations, such as", "character. EXAMPLES:: sage: G.<a> = DirichletGroup(11) sage: b = copy(a)", "group. This is the same as self.order(). EXAMPLES:: sage: len(DirichletGroup(20))", "z.multiplicative_order() m = lcm(g,n) if n == m: return self", "or that both are ``None``. In the former case, it", "sage: e.multiplicative_order() 20 sage: e = DirichletGroup(100).0 sage: e.multiplicative_order() 2", "one for x in self.values_on_gens()) def kernel(self): r\"\"\" Return the", "order of the generator minus 1, inclusive. EXAMPLES:: sage: DirichletGroup(37).random_element()", "'Group of Dirichlet characters modulo 11 with values in Cyclotomic", "1, 7 |--> -zeta4, Dirichlet character modulo 30 of conductor", "input for pari (list of exponents) P = self.parent() if", "(optional: default False) if True only returns representatives for the", "2: return [self] P = self.parent() z = self.element() o", "DirichletGroup.create_key(2, base_ring=CC); l (Complex Field with 53 bits of precision,", "self.modulus() != 0: raise ArithmeticError(\"M(=%s) must be a multiple of", "conductor 4 mapping 3 |--> -1, Dirichlet character modulo 5", "raise ArithmeticError(\"M(=%s) must be a multiple of the modulus(=%s)\"%(M,self.modulus())) H", "sage: G = DirichletGroup(3) sage: e = G([-1]) sage: e.gauss_sum(1)", "K = rings.IntegerModRing(p) elif self.order() <= 2: K = rings.QQ", "a * zeta w.append(a) return w @property def _zeta_dlog(self): \"\"\"", "level! - <NAME> (2006-01-07): added more examples - <NAME> (2006-05-21):", "a=1): r\"\"\" Return a Gauss sum associated to this Dirichlet", "4 mapping 11 |--> -1, 17 |--> 1 sage: a", "p is odd, and x =/= 1, the conductor is", "e.order() 12 This illustrates a canonical coercion:: sage: e =", "in a browser. See https://www.lmfdb.org EXAMPLES:: sage: E = DirichletGroup(4).gen()", "in the value group of this Dirichlet group. TESTS:: sage:", "must divide the orders of the respective generators of `(\\ZZ/N\\ZZ)^*`.", "sage: e.restrict(20) Dirichlet character modulo 20 of conductor 4 mapping", "in self.values_on_gens()]) def primitive_character(self): \"\"\" Returns the primitive character associated", "[Dirichlet character modulo 13 of conductor 1 mapping 2 |-->", "and design problem that became clear when creating examples. -", "\"\"\" if cache: try: self.__bernoulli except AttributeError: self.__bernoulli = {}", "polynomial x^4 + 1 sage: G.list() [Dirichlet character modulo 5", "by 2 in Ring of integers modulo 15 sage: G.gens()", "'_DirichletCharacter__element' element = None if element_key in state_dict: element =", "conductor 5 mapping 2 |--> -1, Dirichlet character modulo 5", "indirect doctest sage: TestSuite(G).run() sage: G.base() # check that Parent.__init__", "modulus. EXAMPLES:: sage: e = DirichletGroup(16)([-1, 1]) sage: e.values_on_gens ()", "this character to a Dirichlet character modulo the multiple M", "modulo 4 of conductor 4 mapping 3 |--> -1 sage:", "0, 0, 1, 0, 1] sage: e = DirichletGroup(20).gen(0) sage:", "self.parent().unit_gens()[i]._latex_() + r' \\mapsto ' + self.values_on_gens()[i]._latex_() return s def", "v = [self(x) for x in v] G = []", "clear when creating examples. - <NAME> (2008-02-16): speed up __call__", "compare as equal. TESTS:: sage: trivial_character(6) == trivial_character(3) # indirect", "= DirichletGroup(13).0 sage: f = ~e sage: f*e Dirichlet character", "sage: G([i, -1, -1]) Traceback (most recent call last): ...", "(most recent call last): ... NotImplementedError: Kloosterman sums not implemented", "integers modulo 2 \"\"\" return free_module.FreeModule(rings.IntegerModRing(self.zeta_order()), len(self.unit_gens())) @property def _zeta_powers(self):", "generated by 0.500000000000000 + 0.866025403784439*I in Complex Field with 53", "or not to explicitly coerce each element of v into", "both characters modulo `N`. EXAMPLES:: sage: D = DirichletGroup(13) sage:", "= DirichletGroup(3) sage: e = G.0 The real component of", "the modulus of `\\chi` and `\\zeta` is a primitive `m^{th}`", "key return DirichletGroup_class(base_ring, modulus, zeta, zeta_order) DirichletGroup = DirichletGroupFactory(\"DirichletGroup\") def", "|--> -1, 17 |--> -1 sage: chi._pari_conversion() ([[24, [0]], [8,", "of conductor 13 mapping 2 |--> -zeta12^3 + zeta12, Dirichlet", "`\\ZZ/e\\ZZ`, where `e` is the order of the standard root", "self.values_on_gens.set_cache(values_on_gens) if element is not None: self.element.set_cache(element) class DirichletGroupFactory(UniqueFactory): r\"\"\"", "DirichletGroup(60, GF(25,'a')).zeta_order() 4 sage: DirichletGroup(19).zeta_order() 18 \"\"\" order = self._zeta_order", "conductor 4 mapping 3 |--> -1 sage: f.modulus() 4 \"\"\"", "of this Dirichlet character. EXAMPLES:: sage: G = DirichletGroup(11) sage:", "5 mapping 2 |--> 2,) TESTS: Dirichlet groups are cached,", "\"\"\" return free_module.FreeModule(rings.IntegerModRing(self.zeta_order()), len(self.unit_gens())) @property def _zeta_powers(self): \"\"\" Return a", "currently implemented when the base ring is a cyclotomic field,", "everything; fix a *lot* of tiny bugs and design problem", "We test the case where `R` is a map (:trac:`18072`)::", "(self.modulus(), self.conductor()) r = len(self.values_on_gens()) if r != 0: s", "0, 1] sage: e = DirichletGroup(20).gen(0) sage: e.values() [0, 1,", "associated to `\\chi` is .. MATH:: g_a(\\chi) = \\sum_{r \\in", "zeta = CC.zeta(G.modulus()) ** a g = phi(self(0)) z =", "prod([d.conductor() for d in self.decomposition()]) p = F[0][0] # When", "The elements of G print as lists giving the values", "41 |--> -1, 37 |--> 1 \"\"\" e = self(1)", "list of the Dirichlet characters in this group. EXAMPLES:: sage:", "orbits. - ``sort`` - (optional: default True) whether to sort", "Return ``True`` if and only if this character is primitive,", "16, 2), respectively \"\"\" MultiplicativeGroupElement.__init__(self, parent) if check: orders =", "\"\"\" G = self.parent() R = G.base_ring() mod = self.parent().modulus()", "ring as well. zeta = self._zeta if zeta_order is None:", "vals = [1] + vals return [D[i](vals[i]) for i in", "'a')) sage: f.parent() Group of Dirichlet characters modulo 7 with", "This is ignored if ``base_ring`` is not ``None``. OUTPUT: The", "conductor 1 mapping 2 |--> 1 sage: G([-1]) Dirichlet character", "degree 2 We create the group of Dirichlet character mod", "element(self): r\"\"\" Return the underlying `\\ZZ/n\\ZZ`-module vector of exponents. ..", "order 12 and degree 4 sage: G = DirichletGroup(6) sage:", "of integers modulo 9 sage: DirichletGroup(13) == DirichletGroup(13) True sage:", "e.change_ring(QQ) Traceback (most recent call last): ... TypeError: Unable to", "if reps_only: G.append(x) else: G.append(orbit) for z in orbit: seen_so_far.add(tuple(z.element()))", "sage: f.modulus() 4 \"\"\" return self.restrict(self.conductor()) def restrict(self, M): \"\"\"", "of unity in the base ring is # finite, and", "from ``state``. TESTS:: sage: e = DirichletGroup(16)([-1, 1]) sage: loads(dumps(e))", "generator of self. EXAMPLES:: sage: G = DirichletGroup(20) sage: G.gen(0)", "True \"\"\" G, v = self._pari_conversion() return pari.znconreyexp(G, v).sage() def", "t/(e^{Nt}-1) g = t/((N*t).exp(prec) - 1) # h(n) = g(t)*e^{nt}", "is G True \"\"\" self._set_element_constructor() if '_zeta_order' in state: state['_zeta_order']", "the multiplicative group modulo `2^k` is trivial for `k =", "Let .. MATH:: \\varepsilon : (\\ZZ/N\\ZZ)^* \\to \\QQ(\\zeta_n) be a", "of ``self`` on the standard generators of `(\\ZZ/N\\ZZ)^*`, where `N`", "4 sage: e.minimize_base_ring().base_ring() Cyclotomic Field of order 12 and degree", "rings.Integer(state['_zeta_order']) super(DirichletGroup_class, self).__setstate__(state) @property def _module(self): \"\"\" Return the free", "Return a Gauss sum associated to this Dirichlet character. The", "NotImplementedError: Kloosterman sums not implemented over this ring \"\"\" G", "\"\"\" if self.modulus() == 1 or self.is_trivial(): return rings.Integer(1) F", "60 mapping 31 |--> -1, 41 |--> -1, 37 |-->", "sage: G.gen(0) Dirichlet character modulo 20 of conductor 4 mapping", "Dirichlet characters modulo 60 with values in the group of", "raise IndexError(\"n(=%s) must be between 0 and %s\"%(n,len(g)-1)) return g[n]", "5 mapping 2 |--> -1, Dirichlet character modulo 5 of", "sage: chi = DirichletGroup(60)([1,-1,I]) sage: chi.conrey_number() 17 sage: chi =", "%s in \" % (self._zeta_order, self._zeta) s += str(self.base_ring()) return", "sage: G.base_extend(ZZ) Traceback (most recent call last): ... TypeError: no", "37 |--> zeta4 sage: e(-1) -1 sage: e(2) 0 sage:", "a map (:trac:`18072`):: sage: K.<i> = QuadraticField(-1) sage: f =", "chi = DirichletGroup(100151, CyclotomicField(10)).0 sage: ls = chi.values() ; ls[0:10]", "-1, Dirichlet character modulo 9 of conductor 1 mapping 2", "the character is nontrivial, then the Gauss sum has absolute", "'pari' if algorithm == 'pari': from sage.lfunctions.pari import lfun_character, LFunction", "factor of p**(r-1) on the right hand side. # Since", "and degree 4 sage: e Dirichlet character modulo 13 of", "p, this smallest r such that the # divisibility holds", "modulus q. See https://www.lmfdb.org/knowledge/show/character.dirichlet.conrey EXAMPLES:: sage: chi4 = DirichletGroup(4).gen() sage:", "DirichletGroup(20) sage: G.1 Dirichlet character modulo 20 of conductor 5", "zeta156^7 + zeta156^6 + zeta156^5 - zeta156^4 - zeta156^2 -", "DirichletGroup.create_object(None, k); G Group of Dirichlet characters modulo 2 with", "- 1 sage: e.gauss_sum(2) -2*zeta6 + 1 sage: norm(e.gauss_sum()) 3", "element of self. The element is computed by multiplying a", "in G.unit_gens()]) sage: e.kloosterman_sum(7,17) -2*E(5) - 4*E(5)^2 - 4*E(5)^3 -", "Field The elements of G print as lists giving the", "of conductor d, for d0. EXAMPLES:: sage: kronecker_character_upside_down(97*389*997^2) Dirichlet character", "\\sum_{r \\in (\\ZZ/m\\ZZ)^\\times} \\chi(r)\\,\\zeta^{ar+br^{-1}}, where `m` is the modulus of", "= self._pari_conversion() return pari.znconreyexp(G, v).sage() def lmfdb_page(self): r\"\"\" Open the", "Return a Gauss sum associated to this Dirichlet character as", "2 with values in Rational Field sage: H = DirichletGroup.create_object(None,", "characters - ``x`` -- one of the following: - tuple", "By definition, the first Bernoulli number of the trivial #", "it over the # new base ring as well. zeta", "|--> -1, Dirichlet character modulo 20 of conductor 20 mapping", "must have multiplicative orders dividing {}, respectively\" .format(x, orders)) self.values_on_gens.set_cache(x)", "' mapping ' for i in range(r): if i !=", "= DirichletGroupFactory(\"DirichletGroup\") def is_DirichletGroup(x): \"\"\" Returns True if x is", "8])) Traceback (most recent call last): ... ValueError: values (=", "modulo 15 sage: DirichletGroup(17, Integers(15), zeta_order=4) Traceback (most recent call", "i, z in enumerate(self._zeta_powers)} def change_ring(self, R, zeta=None, zeta_order=None): \"\"\"", "is a list of integers. At present this is only", "zeta g += L(c)*z return g def gauss_sum_numerical(self, prec=53, a=1):", "DirichletGroup(20).galois_orbits() [ [Dirichlet character modulo 20 of conductor 20 mapping", "rational numbers:: sage: G = DirichletGroup(20, QQ); G Group of", "`V` is taken to be `R^*`, or equivalently its `n`-torsion", "(2006-08-06) \"\"\" d = rings.Integer(d) if d <= 0: raise", "zeta**(n // m) for c in m.coprime_integers(m): e = rings.Mod(c,", "of unity was explicitly given; we use it over the", "of conductor 5 mapping 11 |--> 1, 7 |--> zeta4]", "to the power of n EXAMPLES:: sage: G.<a,b> = DirichletGroup(20)", "integral=False): \"\"\" Create a key that uniquely determines a Dirichlet", "sage: G = DirichletGroup(5); X=G.list(); Y=X[0]; Z=X[1] sage: Y.jacobi_sum(Z) -1", "= K.complex_embedding(prec) CC = phi.codomain() g = 0 m =", "sage: G[1].gauss_sum_numerical() -2.44013335834554 + 1.02261879187179*I \"\"\" G = self.parent() K", "the free module used to represent Dirichlet characters. TESTS:: sage:", "sage: k = k[1:]; k (2, None, None) sage: l", "we encounter it in a pickle values_on_gens_key = '_DirichletCharacter__values_on_gens' values_on_gens", "e.values() [0, 1, 2, 0, 1, 2, 0, 0, 2,", "Create a key that uniquely determines a Dirichlet group. TESTS::", "\"\"\" R = self.base_ring() try: if x == R.one(): x", "chi.galois_orbit() Traceback (most recent call last): ... TypeError: Galois orbits", "an approximate complex number with prec bits of precision. INPUT:", "for u, v in zip(x, orders)): raise ValueError(\"values (= {}", "ring (= Ring of integers modulo 15) must be an", "self.parent() zo = G.zeta_order() m = G.modulus() g = 0", "+ 1, x^2 - 3]) sage: chi = DirichletGroup(7, K).0", "if is_ComplexField(K): phi = lambda t : t CC =", "the Gauss sum if `b=0`. This method performs an exact", "DirichletGroup(13, CC) sage: e = G.0 sage: e.is_even() False sage:", "12 and degree 4 sage: e Dirichlet character modulo 13", "for :meth:`.kloosterman_sum` - ``b`` -- integer, as for :meth:`.kloosterman_sum`. EXAMPLES::", "G = DirichletGroup(13) sage: G.galois_orbits() [ [Dirichlet character modulo 13", "rings.Integer(1) for g in self.gens(): ord *= int(g.order()) return ord", "values in Number Field in a with defining polynomial x^2", "is None or (X._zeta is not None and self.base_ring()(X._zeta) in", "((zeta6,), (zeta6,), -zeta6 + 3) ((zeta6,), (zeta6 - 1,), 2*zeta6", "return [self] P = self.parent() z = self.element() o =", "2] :: sage: chi = DirichletGroup(100151, CyclotomicField(10)).0 sage: ls =", "let's take a look at a non-prime modulus:: sage: N", "pickle values_on_gens_key = '_DirichletCharacter__values_on_gens' values_on_gens = None state_dict = state[1]", "be used to compute discrete logarithms in the value group", "and (self._zeta is None or (X._zeta is not None and", "sage: e = DirichletGroup(5, QQ).0 sage: f = DirichletGroup(5,CyclotomicField(4)).0 sage:", "None: zeta_order = zeta.multiplicative_order() elif zeta_order is not None: if", "of conductor 5 mapping 2 |--> -a^2] We can also", "b.maximize_base_ring().base_ring() Cyclotomic Field of order 4 and degree 2 sage:", "DirichletGroup(-33) Traceback (most recent call last): ... ValueError: modulus should", "b=0): r\"\"\" Return the \"twisted\" Kloosterman sum associated to this", "ring is a cyclotomic field or QQ.\") phi = K.complex_embedding(prec)", "sage: f.parent() Group of Dirichlet characters modulo 7 with values", "product of self and other. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20)", "(\\ZZ/N\\ZZ)^* \\to R^*, for some ring `R`, to the map", "# In particular, it is finitely generated; the added #", "either zeta or zeta_order is given, compute the other. if", "an element of %s\" % (x, self)) elif not x.conductor().divides(self.modulus()):", "Return the Jacobi sum associated to these Dirichlet characters (i.e.,", "True, cache answers - ``**opts`` -- optional arguments; not used", "k[1:]; k (2, None, None) sage: l = l[1:]; l", "the identity function in characteristic p. EXAMPLES:: sage: G =", "sage: e = DirichletGroup(21, base_ring=GF(37)).gen(0) ; e.values() [0, 1, 36,", "- ``k`` -- a non-negative integer - ``algorithm`` -- either", "elif number_field.is_CyclotomicField(K) or is_RationalField(K): chi = chi.minimize_base_ring() n = lcm(m,", "`\\QQ(\\zeta_n)`, where `n` is the exponent of `(\\ZZ/N\\ZZ)^*`) - ``zeta``", "== 0: Auts = [e for e in range(1,n) if", "Group of Dirichlet characters modulo 7 with values in the", "zeta10^3 - zeta10^2 + zeta10 - 1, zeta10, zeta10^3 -", "R` obtained by sending those `x\\in\\ZZ/N\\ZZ` with `\\gcd(N,x)>1` to `0`.", "DirichletGroup(7, Integers(9), zeta = Integers(9)(2)).0 sage: chi.galois_orbit() Traceback (most recent", "character modulo 4 of conductor 4 mapping 3 |--> -1", "is a Dirichlet group. EXAMPLES:: sage: from sage.modular.dirichlet import is_DirichletGroup", "the base ring is a cyclotomic field or QQ.\") phi", "modulo } 16 \\hbox{ of conductor } 16 \\hbox{ mapping", "g == 1: g = 2 z = self.base_ring().zeta() n", "in range(r): if i != 0: s += ', '", "G.<a,b> = DirichletGroup(20) sage: a Dirichlet character modulo 20 of", "sum if `b=0`. This method performs an exact calculation and", "__future__ import print_function import sage.categories.all as cat from sage.misc.all import", "(-zeta6 + 1,), 1) ((zeta6 - 1,), (zeta6 - 1,),", "@cached_method def __eval_at_minus_one(self): r\"\"\" Efficiently evaluate the character at -1", "of conductor 1 mapping 11 |--> 1, 17 |--> 1", "|--> zeta12^2 sage: e.order() 12 sage: loads(e.dumps()) == e True", "raised. EXAMPLES:: sage: DirichletGroup(20).galois_orbits() [ [Dirichlet character modulo 20 of", "# see modsym/manin_symbols.py. G = self.parent() return G.element_class(G, self.values_on_gens(), check=False)", "v in zip(x, orders)): raise ValueError(\"values (= {} modulo {})", "sage: g = DirichletGroup(17, GF(9,'a')).0 sage: g.jacobi_sum(g**2) 2*a TESTS: This", "as its domain EXAMPLES:: sage: e = DirichletGroup(7, QQ).0 sage:", "cPickle module -- # see modsym/manin_symbols.py. G = self.parent() return", "t(1).parent() Rational Field sage: trivial_character(7, Integers(3))(1).parent() Ring of integers modulo", "Dirichlet group. EXAMPLES:: sage: from sage.modular.dirichlet import is_DirichletGroup sage: is_DirichletGroup(DirichletGroup(11))", "range(len(self.unit_gens())): z = zero.__copy__() z[i] = ord//gcd(ord, orders[i]) g.append(self.element_class(self, z,", "be done with power series # instead of calls to", "EXAMPLES:: sage: G = DirichletGroup(3) sage: e = G.0 sage:", "and degree 4 sage: G = DirichletGroup(11, RationalField()) sage: G.gen(0).base_ring()", "integers. At present this is only implemented if the base", "had the same level! - <NAME> (2006-01-07): added more examples", "there is no coercion between Dirichlet groups of different moduli,", "else: x = tuple(y * z for y, z in", "4 mapping 51 |--> -1, 77 |--> 1 sage: e.conductor()", "zeta4, Dirichlet character modulo 5 of conductor 5 mapping 2", "= L(chi(0)) z = L.one() for c in chi.values()[1:]: z", "i.e., has order 1. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage:", "0, 0, 2, 0, 1, 2, 0, 1, 0, 0,", "necessarily cyclic), some operations still work, such as creation of", "2 |--> -1, Dirichlet character modulo 5 of conductor 5", "of exponents) P = self.parent() if is_ComplexField(P.base_ring()): zeta = P.zeta()", "if self(x) == one] def maximize_base_ring(self): r\"\"\" Let .. MATH::", "for x in v: z = x.element() e = tuple(z)", "a*b sage: d = c.decomposition(); d [Dirichlet character modulo 4", "def conrey_number(self): r\"\"\" Return the Conrey number for this character.", "sage: b = copy(a) sage: a is b False sage:", "0, 36, 0, 1, 36, 0, 1, 0, 0, 1,", "return tuple(g) def integers_mod(self): r\"\"\" Returns the group of integers", "..., p^(r-1), # where p^r = 1 (mod n), so", "5 |--> -1 sage: G(DirichletGroup(15).0) Dirichlet character modulo 6 of", "v in zip(x, orders)): raise ValueError(\"values (= {}) must have", "i in range(p-1) for j in range(i, p-1)] sage: for", "in contrast to the value `B_1 = -1/2` for the", "R.absolute_degree()): K = rings.CyclotomicField(self.order()) else: return self try: return self.change_ring(K)", "<NAME> <<EMAIL>> # # This program is free software: you", "in characteristic 2:: sage: G.<e> = DirichletGroup(13, GF(4,'a')) sage: e.is_even()", "case, `R` must be a domain (so `V` is cyclic),", "is odd, and x =/= 1, the conductor is the", "requires # no arith in a poly ring over a", "FACTS: If the modulus is a prime `p` and the", "P.zeta() zeta_argument = zeta.argument() v = [int(x.argument() / zeta_argument) for", "the trivial # character is 1/2, in contrast to the", "This is the same as self.order(). EXAMPLES:: sage: len(DirichletGroup(20)) 8", "part of the key, the keys would compare equal and", "DirichletGroup(7, QQbar) sage: G[1].gauss_sum_numerical() -2.44013335834554 + 1.02261879187179*I \"\"\" G =", "this example we create a Dirichlet group with values in", "Z = LFunction(lfun_character(self), prec=prec) Z.rename('PARI L-function associated to %s' %", "This implies that Dirichlet characters of different moduli do not", "self.values_on_gens()]) def primitive_character(self): \"\"\" Returns the primitive character associated to", "prime. Then `\\varepsilon(-1) = -1` if and only if `p", "len(self.unit_gens())) @property def _zeta_powers(self): \"\"\" Return a list of powers", "k); G Group of Dirichlet characters modulo 2 with values", "- ``zeta_order`` -- (optional) order of ``zeta`` EXAMPLES:: sage: G", "DirichletGroup(13) sage: e = G.0 sage: e.is_odd() True sage: [e.is_odd()", "in a pickle values_on_gens_key = '_DirichletCharacter__values_on_gens' values_on_gens = None state_dict", "1 for u, v in zip(x, orders)): raise ValueError(\"values (=", "Field in a with defining polynomial x^4 + 1 An", "take place. :: sage: G(d[0])*G(d[1]) == c True Conductors that", "the exponent of `(\\ZZ/N\\ZZ)^*`. Many operations, such as finding a", "except (TypeError, ValueError, ArithmeticError): return self def modulus(self): \"\"\" The", "x in self.values_on_gens()]) else: dlog = P._zeta_dlog v = M([dlog[x]", "zeta, zeta_order) DirichletGroup = DirichletGroupFactory(\"DirichletGroup\") def is_DirichletGroup(x): \"\"\" Returns True", "ring is a cyclotomic field, QQ, QQbar, or a complex", "30 of conductor 5 mapping 11 |--> 1, 7 |-->", "1 sage: a Dirichlet character modulo 20 of conductor 4", "algorithm=\"definition\") 10*zeta6 + 4 TESTS: Check that :trac:`17586` is fixed::", "(\\ZZ/m\\ZZ)^\\times} \\chi(r)\\,\\zeta^{ar+br^{-1}}, where `m` is the modulus of `\\chi` and", "= zeta**(n // m) for c in m.coprime_integers(m): e =", "\"\"\" Returns the extension of this character to a Dirichlet", "url = lmfdb_url.format(self.modulus(), self.conrey_number()) webbrowser.open(url) def galois_orbit(self, sort=True): r\"\"\" Return", "len(DirichletGroup(20)) 8 sage: len(DirichletGroup(20, QQ)) 4 sage: len(DirichletGroup(20, GF(5))) 8", "= self.base_ring().one() return all(x == one for x in self.values_on_gens())", "number of elements of self. This is the same as", "modulo `N` with values in a subgroup `V` of the", "if self.modulus() % 8 == 0: # 2 factors at", "of is_odd, e.g., in characteristic 2:: sage: G.<e> = DirichletGroup(13,", "associated to `\\chi` and the integers a,b is .. MATH::", "order 2 generated by -1 in Number Field in a", "roots of unity is not necessarily cyclic), some operations still", "where `N` is the modulus of self. EXAMPLES:: sage: G", "1 mapping 11 |--> 1, 17 |--> 1, Dirichlet character", "result is a wrapper around a PARI L-function or around", "generator return self.element_class(self, x) elif not isinstance(x, DirichletCharacter): raise TypeError(\"cannot", "an element of a suitable cyclotomic field; see also :meth:`.kloosterman_sum_numerical`,", "= DirichletGroup(5,QQ)([-1]) sage: eps1.conrey_number() == eps2.conrey_number() True \"\"\" G, v", "mapping 2 |--> -1 \"\"\" G = self.parent() if G.zeta.is_in_cache():", "characters modulo 7 with values in Number Field in a", "|--> zeta4) sage: G.unit_gens() (11, 17) sage: G.zeta() zeta4 sage:", "r \\ in factor(self.modulus())], cr=True, universe = cat.Objects()) def exponent(self):", "of the numerical value of e is near zero:: sage:", "conrey_number(self): r\"\"\" Return the Conrey number for this character. This", "is no coercion between Dirichlet groups of different moduli, characters", "group (default: the cyclotomic field `\\QQ(\\zeta_n)`, where `n` is the", "the same Dirichlet Group. sage: all_jacobi_sums = [(DP[i].values_on_gens(),DP[j].values_on_gens(),DP[i].jacobi_sum(DP[j])) ....: for", "1, 2, 0, 1, 2, 0, 0, 2, 0, 1,", "ring elements: the values of the Dirichlet character on the", "of :meth:`element` or of :meth:`values_on_gens`. The cache of one of", "2, 0, 1, 0, 0, 1, 2, 0, 1, 2]", "def __eval_at_minus_one(self): r\"\"\" Efficiently evaluate the character at -1 using", "1, -1, 0, 1, -1] sage: e = DirichletGroup(21, base_ring=GF(37)).gen(0)", "character at -1 using knowledge of its order. This is", "if this character is primitive, i.e., its conductor equals its", "3, 5, 7, 9, 11, 13, 15] sage: DirichletGroup(17, GF(11^4,", "= '%s' unknown\"%algorithm) if cache: self.__bernoulli[k] = ber return ber", "where `p` is a prime. Then `\\varepsilon(-1) = -1` if", "that either both ``zeta`` and ``zeta_order`` are specified, or that", "s += r' \\hbox{ mapping } ' for i in", "factor(self.modulus()) if len(F) > 1: return prod([d.conductor() for d in", "groups are cached, creating two groups with the same parameters", "if base_ring.is_integral_domain() or base_ring.is_finite(): # The group of n-th roots", "a.is_primitive() False sage: b.is_primitive() False sage: (a*b).is_primitive() True \"\"\" return", "the same as len(self). EXAMPLES:: sage: DirichletGroup(20).order() 8 sage: DirichletGroup(37).order()", "modulo `N`. EXAMPLES:: sage: D = DirichletGroup(13) sage: e =", "+ 2*zeta12 - 3 sage: p = 7 sage: DP", "map `\\ZZ/N\\ZZ \\to R` obtained by sending those `x\\in\\ZZ/N\\ZZ` with", "the group of order 2 generated by -1 in Number", "values of the character is made the first time you", "Return ``True`` if and only if `\\varepsilon(-1) = 1`. EXAMPLES::", "try: if x == R.one(): x = [R.one()] * len(self.unit_gens())", "= DirichletGroup(20, CC) sage: a.is_primitive() False sage: b.is_primitive() False sage:", "or \"lcalc\"') @cached_method def conductor(self): \"\"\" Computes and returns the", "1, -1, 0, 1, -1, 0, 0, -1, 0, 1,", "trivial sums are being calculated correctly:: sage: N = 13", "Complex Field with 53 bits of precision sage: G ==", "values in Cyclotomic Field of order 12 and degree 4", "a coercion map to exist:: sage: G.base_extend(ZZ) Traceback (most recent", "1, p, p^2, ..., p^(r-1), # where p^r = 1", "g = self.gen(i) n = random.randrange(g.order()) e *= g**n return", "except (TypeError, ValueError, ArithmeticError): pass if isinstance(x, list): # list", "is the trivial group.) EXAMPLES:: sage: DirichletGroup(20).decomposition() [ Group of", "if isinstance(R, Map): R = R.codomain() return DirichletGroup(self.modulus(), R, zeta=zeta,", "the cache of :meth:`element` or of :meth:`values_on_gens`. The cache of", "def list(self): \"\"\" Return a list of the Dirichlet characters", "1 sage: L(4) 0.988944551741105 With the algorithm \"lcalc\":: sage: a", "\"\"\" D = self.parent().decomposition() vals = [[z] for z in", "Dirichlet characters modulo 7 with values in Cyclotomic Field of", "DirichletGroup_class) class DirichletGroup_class(WithEqualityById, Parent): \"\"\" Group of Dirichlet characters modulo", "of roots of unity of order dividing ``zeta_order`` in `R`.", "is, then `V` is taken to be the group of", "Return the n-th generator of self. EXAMPLES:: sage: G =", "raising to a power, so the return value is a", "s += ', ' s += str(self.parent().unit_gens()[i]) + ' |-->", "in Rational Field sage: H = G.base_extend(CyclotomicField(6)); H Group of", "(see for example [DI1995]_, Section 2.2): .. MATH:: \\sum_{a=1}^N \\frac{\\varepsilon(a)", "group over a large prime field:: sage: p = next_prime(10^40)", "as its domain EXAMPLES:: sage: G = DirichletGroup(7,QQ); G Group", "sage: chi.gauss_sum() 1 .. SEEALSO:: - :func:`sage.arith.misc.gauss_sum` for general finite", "17 |--> -1] Next we construct the group of Dirichlet", "sage: N = 9 sage: D = DirichletGroup(N) sage: g", "= G([1 for u in G.unit_gens()]) sage: e.kloosterman_sum(7,17) -2*E(5) -", "None and self.base_ring()(X._zeta) in self._zeta_powers))) def __len__(self): \"\"\" Return the", "ignored (needed so ``G.<...> = DirichletGroup(...)`` notation works) - ``integral``", "``zeta`` is an element of ``base_ring`` and that ``zeta_order`` is", "define identical functions on ``ZZ``. EXAMPLES:: sage: e = DirichletGroup(16)([-1,", "+ 3) ((-zeta6 + 1,), (-zeta6 + 1,), zeta6 +", "check=True): r\"\"\" Create a Dirichlet character with specified values on", "ring \"\"\" G = self.parent() zo = G.zeta_order() m =", "mapping 31 |--> -1, 41 |--> 1, 37 |--> 1,", "NotImplementedError: Automorphisms for finite non-field base rings not implemented sage:", "13 of conductor 13 mapping 2 |--> zeta12 sage: e.galois_orbit()", "defined Base-extended Dirichlet groups do not silently get roots of", "modulo 5 with values in the group of order 8", "conductor 9 mapping 2 |--> zeta6] sage: (DirichletGroup(36).0).decomposition() [Dirichlet character", "a ring admitting a *coercion* map from the base ring", "(most recent call last): ... NotImplementedError: Characters must be from", "Galois orbits of Dirichlet characters in self, or in v", "of G print as lists giving the values of the", "the default cyclotomic field by its rings of integers as", "the multiplicative group `R^*` of ``base_ring``. This is the group", "= f.character() sage: eps.minimize_base_ring() == eps True A related bug", "zeta=None, zeta_order=None, names=None, integral=False): \"\"\" Create a key that uniquely", "it and/or modify # it under the terms of the", "of calls to the Bernoulli function. Likewise # computing all", "we give ``zeta``, but not its order:: sage: G =", "+ 1.02261879187179*I \"\"\" G = self.parent() K = G.base_ring() if", "a set of generators for the group, are only implemented", "character modulo 13 of conductor 13 mapping 2 |--> -zeta12]", "compute discrete logarithms in the value group of this Dirichlet", "= t/((N*t).exp(prec) - 1) # h(n) = g(t)*e^{nt} h =", "``zeta`` EXAMPLES:: sage: G = DirichletGroup(7,QQ); G Group of Dirichlet", "we need to set the cache of values_on_gens() from that", "True only returns representatives for the orbits. - ``sort`` -", "e = G.0 sage: abs(e.gauss_sum_numerical()) 1.7320508075... sage: sqrt(3.0) 1.73205080756888 sage:", "7 of conductor 7 mapping 3 |--> -1/2*b*a + 1/2", "in Rational Field sage: G.change_ring(CyclotomicField(6)) Group of Dirichlet characters modulo", "generator ``zeta`` of `V` is computed, and an error is", "4, there will be a \"factor\" of `(\\ZZ/2\\ZZ)^*`, which is", "0 factors at 2. vals = [1] + vals return", "return v def __setstate__(self, state): r\"\"\" Restore a pickled element", "\\QQ(\\zeta_n) be a Dirichlet character. This function returns an equal", "hash(self.values_on_gens()) def __invert__(self): \"\"\" Return the multiplicative inverse of self.", "= rings.PowerSeriesRing(rings.QQ, 't') t = R.gen() # g(t) = t/(e^{Nt}-1)", "be the cyclic subgroup of `R^*` generated by ``zeta``. If", "orders)): raise ValueError(\"values (= {} modulo {}) must have additive", "z, check=False)) return tuple(g) def integers_mod(self): r\"\"\" Returns the group", "from sage.rings.complex_mpfr import is_ComplexField from sage.rings.qqbar import is_AlgebraicField from sage.rings.ring", "-1, 17 |--> -1] Next we construct the group of", "-1, 0, 1, -1, 0, 0, -1, 0, 1, -1,", "return self.parent().modulus() def level(self): \"\"\" Synonym for modulus. EXAMPLES:: sage:", "0, 1, 36] sage: e = DirichletGroup(21, base_ring=GF(3)).gen(0) ; e.values()", "is a wrapper around a PARI L-function or around the", "sage: G = DirichletGroup(100000, CC) sage: G.1.is_even() True Note that", "0, 1, 2, 0, 0, 2, 0, 1, 2, 0,", "self.values_on_gens()) return G.element_class(G, x, check=False) def _mul_(self, other): \"\"\" Return", "self(c) * zeta**int(a*e + b*e**(-1)) return g def kloosterman_sum_numerical(self, prec=53,", "DirichletGroup(N) sage: g = D(1) sage: g.jacobi_sum(g) 3 We consider", "only called if the object was not found in the", "as small a subfield (or subring) of the base ring", "\\QQ(\\zeta_m) where `m` is the least common multiple of `n`", "1 sage: b^2 Dirichlet character modulo 20 of conductor 5", "sage: chi.conrey_number() 17 sage: chi = DirichletGroup(420)([1,-1,-I,1]) sage: chi.conrey_number() 113", "L(4) 0.988944551741105 With the algorithm \"lcalc\":: sage: a = a.primitive_character()", "of this character at the integer `m`. .. warning:: A", "val *= -1 return val def __call__(self, m): \"\"\" Return", "(\\ZZ/N\\ZZ)^* \\to \\QQ(\\zeta_n) be a Dirichlet character. This function returns", "f.gauss_sum_numerical() -3.07497205... + 1.8826966926...*I sage: abs(e.gauss_sum_numerical()) 3.60555127546... sage: abs(f.gauss_sum_numerical()) 3.60555127546...", "|--> -1, 17 |--> 1 sage: a Dirichlet character modulo", "value is a list of integers. At present this is", "= G._zeta_powers val_on_gen = self.element() exponents = [0] * len(orders)", "== 1: zeta_order = self._zeta_order else: # No root of", "In the case of the trivial Dirichlet character modulo 1,", "boolean (default: ``False``); whether to replace the default cyclotomic field", "values in the group of order 8 generated by a", "a browser. See https://www.lmfdb.org EXAMPLES:: sage: E = DirichletGroup(4).gen() sage:", "zeta_order is specified sage: G = DirichletGroup(17, Integers(15), zeta=7); G", "the LMFDB web page of the character in a browser.", "characteristic 2:: sage: G.<e> = DirichletGroup(13, GF(4,'a')) sage: e.is_even() True", "done with power series # instead of calls to the", "\"\"\" G = self.parent() K = G.base_ring() if is_ComplexField(K): phi", "order(self): \"\"\" Return the number of elements of self. This", "sage: DirichletGroup(20).base_ring() Cyclotomic Field of order 4 and degree 2", "was not found in the cache. TESTS:: sage: K =", "the past # we need to set the cache of", "admissible (see :trac:`17283`):: sage: k.<i> = CyclotomicField(4) sage: G =", "self.element.set_cache(x) else: R = parent.base_ring() x = tuple(map(R, x)) if", "\"\"\" G = self.parent() zo = G.zeta_order() m = G.modulus()", "0 i += 1 @cached_method(do_pickle=True) def values_on_gens(self): r\"\"\" Return a", "to be set for the other method to work properly,", "mapping 11 |--> -1, 17 |--> 1 sage: c.extend(20) ==", "(2,) sage: DirichletGroup(20).unit_gens() (11, 17) sage: DirichletGroup(60).unit_gens() (31, 41, 37)", "Cyclotomic Field of order 6 and degree 2 sage: (e^3).minimize_base_ring().base_ring()", "with values in Finite Field of size 5 ] \"\"\"", "x is of type DirichletCharacter. EXAMPLES:: sage: from sage.modular.dirichlet import", "zeta.multiplicative_order() elif zeta_order is not None: if not base_ring.is_integral_domain(): raise", "matrix(0,2)], [1, matrix(0,2)], [2, Mat([2, 1])]], [1, 0, 0; 0,", "self(-1) == R(1) @cached_method def is_odd(self): r\"\"\" Return ``True`` if", "0, 1, 2, 0, 1, 0, 0, 1, 2, 0,", "0 L = rings.CyclotomicField(m.lcm(zo)) zeta = L.gen(0) try: self(1) *", "sage: K.<i> = QuadraticField(-1) sage: chi = DirichletGroup(5, K)[1] sage:", "zeta**(a+b) except TypeError: raise NotImplementedError('Kloosterman sums not implemented ' 'over", "4 mapping 11 |--> -1, 17 |--> 1 sage: L(4)", "= G.base_ring() chi = self m = G.modulus() if is_ComplexField(K):", "where `m` is the least common multiple of `n` and", "associated to self. EXAMPLES:: sage: e = DirichletGroup(100).0; e Dirichlet", "rings.IntegerModRing(self.modulus())]) def kloosterman_sum(self, a=1, b=0): r\"\"\" Return the \"twisted\" Kloosterman", "`V` must have order ``zeta_order``. Furthermore, a generator ``zeta`` of", "= K elif is_AlgebraicField(K): from sage.rings.complex_mpfr import ComplexField CC =", "mapping 2 |--> a^2, Dirichlet character modulo 5 of conductor", "sage: f = DP.0 sage: e.jacobi_sum(f) Traceback (most recent call", "multiplicative orders dividing (2, 16, 2), respectively sage: from sage.modular.dirichlet", "is_ComplexField(R): for i in range(1, zeta_order): a = a *", "zeta156^33 - zeta156^31 + 2*zeta156^30 + zeta156^28 - zeta156^24 -", "2 \"\"\" zeta = self._zeta if zeta is None: R", "odd, and x =/= 1, the conductor is the smallest", "is_primitive(self): \"\"\" Return ``True`` if and only if this character", "Traceback (most recent call last): ... NotImplementedError: order of element", "(optional) root of unity in ``R`` - ``zeta_order`` -- (optional)", "True, False, True, False, True, False] sage: G = DirichletGroup(13,", "is fixed:: sage: G = DirichletGroup(1) sage: chi = G.one()", "[] ord = self.zeta_order() M = self._module zero = M(0)", "order 4 and degree 2 \"\"\" N = self.modulus() m", "of Dirichlet characters modulo 17 with values in the group", "we want the multiplication to take place. :: sage: G(d[0])*G(d[1])", "hand side. # Since p-1 is coprime to p, this", "sage: a.kernel() [1, 9, 13, 17] sage: b.kernel() [1, 11]", "with values in Complex Field with 53 bits of precision", "order 4 and degree 2 \"\"\" base_ring, modulus, zeta, zeta_order", "Dirichlet character modulo 5 of conductor 5 mapping 2 |-->", "root of unity. EXAMPLES:: sage: G = DirichletGroup(3) sage: e", "(which is generally slower). INPUT: - ``prec`` -- integer (default:", "positive') if base_ring is None: if not (zeta is None", "- <NAME> (2008-02-16): speed up __call__ method for Dirichlet characters,", "Cyclotomic Field of order 4 and degree 2 We can't", "Map): R = R.codomain() return DirichletGroup(self.modulus(), R, zeta=zeta, zeta_order=zeta_order) def", "over this ring \"\"\" G = self.parent() zo = G.zeta_order()", "a = kronecker_character(1) sage: b = DirichletGroup(2401,QQ)(a) # NOTE --", "the base ring is # finite, and hence this Dirichlet", "taken to be the group of roots of unity of", "x.conductor().divides(self.modulus()): raise TypeError(\"conductor must divide modulus\") a = [] for", "at 2. vals[0].append(vals[1][0]) del vals[1] elif self.modulus() % 4 ==", "5 of conductor 5 mapping 2 |--> -zeta4 AUTHORS: -", "G.unit_gens()]) def is_DirichletCharacter(x): r\"\"\" Return True if x is of", "sage: DirichletGroup(60,QQ).zeta() -1 sage: DirichletGroup(60, GF(25,'a')).zeta() 2 \"\"\" zeta =", "20 with values in Cyclotomic Field of order 4 and", "% self) return Z elif algorithm == 'lcalc': from sage.libs.lcalc.lcalc_Lfunction", "series # instead of calls to the Bernoulli function. Likewise", "The group of Dirichlet characters modulo `N` with values in", "of order 4 and degree 2, Group of Dirichlet characters", "R.<x> = PolynomialRing(QQ) sage: K.<a> = NumberField(x^4 + 1) sage:", "used to represent Dirichlet characters. TESTS:: sage: DirichletGroup(12)._module Vector space", "algorithm == \"definition\": # This is better since it computes", "-- (optional) positive integer; this must be the order of", "x in v: z = x.element() e = tuple(z) #", "self.zeta_order() R = self.base_ring() p = R.characteristic() if p ==", "DirichletGroup.create_object(None, l); H Group of Dirichlet characters modulo 2 with", "unity in the base ring. EXAMPLES:: sage: DirichletGroup(20).zeta_order() 4 sage:", "values in the group of order 2 generated by -1", "# The following code is pretty fast, at least compared", "(zeta is None and zeta_order is None): raise ValueError(\"zeta and", "non-field base rings not implemented\") # The automorphisms in characteristic", "} 1 \"\"\" s = r'\\hbox{Dirichlet character modulo } %s", "character modulo 2 of conductor 1 \"\"\" s = 'Dirichlet", "is_AlgebraicField(K): L = K zeta = L.zeta(m) elif number_field.is_CyclotomicField(K) or", "character modulo 60 of conductor 5 mapping 31 |--> 1,", "Dirichlet character modulo 35 of conductor 35 mapping 22 |-->", "Dirichlet character as an approximate complex number with prec bits", "If the base ring is not an integral domain, an", "a rational We test the case where `R` is a", "in the base ring. EXAMPLES:: sage: DirichletGroup(37).zeta() zeta36 sage: DirichletGroup(20).zeta()", "cases:: sage: d[0]*d[1] == c Traceback (most recent call last):", "\"\"\" d = rings.Integer(d) if d <= 0: raise ValueError(\"d", "G = DirichletGroup(13) sage: H = DirichletGroup(13, CC) sage: e", "x^4 + 1 :: sage: G.<e> = DirichletGroup(13) sage: loads(G.dumps())", "chi.minimize_base_ring() Dirichlet character modulo 7 of conductor 7 mapping 3", "\"\"\" Return the L-function of ``self``. The result is a", "Return the value of this character at the integer `m`.", "Traceback (most recent call last): ... ValueError: modulus should be", "of this character to a Dirichlet character modulo the divisor", "R^*, for some ring `R`, to the map `\\ZZ/N\\ZZ \\to", "L-function or around the ``lcalc`` program. INPUT: - ``prec`` --", "# increase the exponent vector by 1, # increase n", "characters, either with or without specifying a root of unity::", "import pari from sage.categories.map import Map from sage.rings.rational_field import is_RationalField", "sage: b.element() (0, 1) .. NOTE:: The constructor of :class:`DirichletCharacter`", "!= N - 1: return self.values()[m] else: return self.__eval_at_minus_one() def", "0, -1, 0, 1, 0, -1, 0, 1, 0, 0,", "want the multiplication to take place. :: sage: G(d[0])*G(d[1]) ==", "base_ring=QQ) is DirichletGroup(2, base_ring=CC) False If the base ring is", "G.base() # check that Parent.__init__ has been called Ring of", "DirichletGroup(100000, CC) sage: G.0.is_odd() True Note that ``is_even`` need not", "or zeta_order is given, compute the other. if zeta is", "1: return prod([d.conductor() for d in self.decomposition()]) p = F[0][0]", "G Group of Dirichlet characters modulo 2 with values in", "convert %s to an element of %s\" % (x, self))", "is None: if not (zeta is None and zeta_order is", "[1]) sage: chi = DirichletGroup(24)([1,-1,-1]); chi Dirichlet character modulo 24", "G is H True sage: G3 = DirichletGroup(31, CyclotomicField(3)) sage:", "sage: sqrt(13.0) 3.60555127546399 TESTS: The field of algebraic numbers is", "the order of ``zeta`` if both are specified - ``names``", "DirichletGroup(3) sage: e = G.0 sage: abs(e.gauss_sum_numerical()) 1.7320508075... sage: sqrt(3.0)", "[D[i](vals[i]) for i in range(len(D))] def extend(self, M): \"\"\" Returns", "of `V` is computed, and an error is raised if", "sage: DirichletCharacter(G, M([4, 8, 8])) Traceback (most recent call last):", "decomposition(self): r\"\"\" Returns the Dirichlet groups of prime power modulus", "return rings.Integer(1) F = factor(self.modulus()) if len(F) > 1: return", "= DirichletGroup(...)`` notation works) - ``integral`` -- boolean (default: ``False``);", "base_ring=None, zeta=None, zeta_order=None, names=None, integral=False): \"\"\" Create a key that", "returns representatives for the orbits. - ``sort`` - (optional: default", "root of unity. This reduces to the Gauss sum if", "# we need to set the cache of element() from", "self.parent().decomposition() vals = [[z] for z in self.values_on_gens()] if self.modulus()", "sage: G(d[0])*G(d[1]) == c True Conductors that are divisible by", "of the character on the generators of `(Z/NZ)^*`:: sage: list(G)", "k (2, None, None) sage: l = l[1:]; l (2,", "such ``zeta`` cannot be found. EXAMPLES: The default base ring", "characteristic 0 or a prime. EXAMPLES:: sage: DirichletGroup(17)._automorphisms() [1, 3,", "-1 sage: G(DirichletGroup(15).0) Dirichlet character modulo 6 of conductor 3", "n accordingly, and increase value i = 0 while True:", "or a complex field\") zeta = CC.zeta(G.modulus()) ** a g", "dividing modulus. (Note that if the modulus is 2 mod", "sage: G == H False If ``base_ring`` was not be", "zeta_order = key return DirichletGroup_class(base_ring, modulus, zeta, zeta_order) DirichletGroup =", "self.values_on_gens()[1].multiplicative_order() != 1: cond *= 2 return rings.Integer(cond) @cached_method def", "of conductor 4 mapping 3 |--> -1, Dirichlet character modulo", "DirichletGroup(20) sage: G.gen(0) Dirichlet character modulo 20 of conductor 4", "sage: H = DirichletGroup(13, CC) sage: e = G.0 sage:", "= modulus self._integers = rings.IntegerModRing(modulus) def __setstate__(self, state): \"\"\" Used", "37 |--> 1 \"\"\" e = self(1) for i in", "to exist:: sage: G.base_extend(ZZ) Traceback (most recent call last): ...", "QQ, QQbar, or a complex field\") zeta = zeta **", "in Rational Field sage: G.order() 4 sage: G.base_ring() Rational Field", "i in self.element()]) @cached_method(do_pickle=True) def element(self): r\"\"\" Return the underlying", "of Dirichlet characters modulo 17 with values in Ring of", "modulo 5 of conductor 5 mapping 2 |--> zeta4 sage:", "stored when pickling an instance of :class:`DirichletCharacter`. \"\"\" pows =", "invariants of ``G``:: sage: G.gens() (Dirichlet character modulo 20 of", "return self.change_ring(R) def _element_constructor_(self, x): \"\"\" Construct a Dirichlet character", "not None: s += \"the group of order %s generated", "c in m.coprime_integers(m): e = rings.Mod(c, m) z = zeta", "in self.values_on_gens()) return G.element_class(G, x, check=False) def _mul_(self, other): \"\"\"", "same parameters yields the same object:: sage: DirichletGroup(60) is DirichletGroup(60)", "r**n for r in range(1, N)) ber = K(sum(binomial(k,j) *", "characters modulo `N`. EXAMPLES:: sage: D = DirichletGroup(13) sage: e", "DirichletGroup(20) sage: G.integers_mod() Ring of integers modulo 20 \"\"\" return", "- ``**opts`` -- optional arguments; not used directly, but passed", "= Groups().Commutative() if base_ring.is_integral_domain() or base_ring.is_finite(): # The group of", "DirichletGroup(17, Integers(15), zeta_order=4) Traceback (most recent call last): ... ValueError:", "== trivial_character(3) # indirect doctest False sage: trivial_character(3) == trivial_character(9)", "len(orders): raise ValueError(\"wrong number of values (= {}) on generators", "EXAMPLES:: sage: kronecker_character_upside_down(97*389*997^2) Dirichlet character modulo 37506941597 of conductor 37733", "f.base_extend(Integers(15)) sage: g(3) 14 sage: g.parent().zeta() 14 \"\"\" if not", "group of homomorphisms `(\\ZZ/N\\ZZ)^* \\to V` with pointwise multiplication. The", "character modulo 9 of conductor 9 mapping 2 |--> zeta6]", "% 8 == 0: # 2 factors at 2. vals[0].append(vals[1][0])", "= rings.Integer(state['_zeta_order']) super(DirichletGroup_class, self).__setstate__(state) @property def _module(self): \"\"\" Return the", "mapping 31 |--> 1, 41 |--> 1, 37 |--> zeta4)", "chi = DirichletGroup(1).list()[0] sage: chi.values() [1] sage: chi(1) 1 \"\"\"", "\"\"\" Used for unpickling old instances. TESTS:: sage: G =", "def kronecker_character(d): \"\"\" Return the quadratic Dirichlet character (d/.) of", "the right hand side. # Since p-1 is coprime to", "and non-cyclic for `k \\ge 3`:: sage: (DirichletGroup(18).0).decomposition() [Dirichlet character", "one character is nontrivial (see :trac:`6393`):: sage: G = DirichletGroup(5);", "if p == 0: Auts = [e for e in", "orders = self.integers_mod().unit_group().gens_orders() for i in range(len(self.unit_gens())): z = zero.__copy__()", "zeta156^42 + zeta156^41 + 2*zeta156^40 + zeta156^37 - zeta156^36 -", "2 |--> zeta4 sage: e.bar() Dirichlet character modulo 5 of", "FinitelyGenerated() here means that the group has a # distinguished", "chi = G([z^2]) sage: chi.gauss_sum() zeta52^22 + zeta52^21 + zeta52^19", "`\\ZZ/n\\ZZ`-module vector of exponents. .. warning:: Please do not change", "this function returns `B_{1,\\varepsilon} = 1/2`, in accordance with the", "mapping 2 |--> zeta12, Dirichlet character modulo 13 of conductor", "character modulo 35 of conductor 35 mapping 22 |--> zeta12^3,", "= a.lfunction(); L PARI L-function associated to Dirichlet character modulo", "elements in `x` are admissible (see :trac:`17283`):: sage: k.<i> =", "DirichletGroup(20) sage: type(G(1).conductor()) <type 'sage.rings.integer.Integer'> \"\"\" if self.modulus() == 1", "CC = ComplexField(prec) phi = CC.coerce_map_from(K) elif number_field.is_CyclotomicField(K) or is_RationalField(K):", "G Group of Dirichlet characters modulo 5 with values in", "n = int(n) g = self.gens() if n<0 or n>=len(g):", "return self.__eval_at_minus_one() def change_ring(self, R): \"\"\" Return the base extension", "implemented if `V` is cyclic and a generator for `V`", "= DirichletGroup(d, rings.RationalField()) return G([kronecker(u.lift(),d) for u in G.unit_gens()]) def", "unity can change:: sage: H.zeta() zeta6 This method (in contrast", "(isinstance(R, Map) or R.has_coerce_map_from(self.base_ring())): raise TypeError(\"no coercion map from %s", "G Group of Dirichlet characters modulo 13 with values in", "with values in a number field:: sage: R.<x> = PolynomialRing(QQ)", "algorithm == 'lcalc': from sage.libs.lcalc.lcalc_Lfunction import Lfunction_from_character return Lfunction_from_character(self) raise", "== one] def maximize_base_ring(self): r\"\"\" Let .. MATH:: \\varepsilon :", "``zeta`` is specified, then `V` is taken to be the", "of power series (see for example [DI1995]_, Section 2.2): ..", "if algorithm is None: algorithm = 'pari' if algorithm ==", "# values_on_gens() used an explicit cache __values_on_gens in the past", "sets the cache of :meth:`element` or of :meth:`values_on_gens`. The cache", "this Dirichlet group. TESTS:: sage: DirichletGroup(5)._zeta_dlog {-1: 2, -zeta4: 3,", "last): ... ValueError: values (= (4, 8, 8) modulo 16)", "``parent``. In both cases, the orders of the elements must", "%s' % self) return Z elif algorithm == 'lcalc': from", "G5 = DirichletGroup(31, CyclotomicField(5)) sage: K30 = CyclotomicField(30) sage: G3.gen(0).base_extend(K30)", "be recomputed as the order # of R(zeta) by the", "order 4 and degree 2 If the order of ``zeta``", "for x in values_on_gens] else: dlog = P._zeta_dlog v =", "the modulus is a prime `p` and the character is", "17 with values in the group of order 4 generated", "of self. This is the same as len(self). EXAMPLES:: sage:", "your option) any later version. # https://www.gnu.org/licenses/ # **************************************************************************** from", "orders)) self.element.set_cache(x) else: R = parent.base_ring() x = tuple(map(R, x))", "modulo 16) must have additive orders dividing (2, 16, 2),", "Group of Dirichlet characters modulo 20 with values in Rational", "inexact answer (but is generally much quicker). CACHING: Computed Kloosterman", "G.zeta_order() 4 In this example we create a Dirichlet group", "modulo 17 with values in Ring of integers modulo 15", "last): ... TypeError: Unable to coerce zeta4 to a rational", "`(\\ZZ/2\\ZZ)^*`, which is the trivial group.) EXAMPLES:: sage: DirichletGroup(20).decomposition() [", "defined by the following identity of power series (see for", "method exists solely because of a bug in the cPickle", "(self(x) for x in pari_gens) # now compute the input", "eps = f.character() sage: eps.minimize_base_ring() == eps True A related", "value += val_on_gen[i] n *= gens[i] if exponents[i] < orders[i]:", "of conductor 24 mapping 7 |--> 1, 13 |--> -1,", "replace the default cyclotomic field by its rings of integers", "have different zeta orders works:: sage: a = DirichletGroup(3, QQ,", "of ``self``, or a ring homomorphism with the base ring", "= self.base_ring().one() return [x for x in range(self.modulus()) if self(x)", "a,b is .. MATH:: K(a,b,\\chi) = \\sum_{r \\in (\\ZZ/m\\ZZ)^\\times} \\chi(r)\\,\\zeta^{ar+br^{-1}},", "sage: DirichletGroup(20,GF(5)).decomposition() [ Group of Dirichlet characters modulo 4 with", "orders = G.integers_mod().unit_group().gens_orders() R_values = G._zeta_powers val_on_gen = self.element() exponents", "of self. EXAMPLES:: sage: e = DirichletGroup(13).0 sage: f =", "no coercion. This implies that Dirichlet characters of different moduli", "base_ring=CC); l (Complex Field with 53 bits of precision, 2,", "Return the kernel of this character. OUTPUT: Currently the kernel", "zeta4) sage: G.unit_gens() (11, 17) sage: G.zeta() zeta4 sage: G.zeta_order()", "|--> -1, 77 |--> 1 sage: e.conductor() 4 sage: f", "2, 3]~, Vecsmall([3, 3, 1])], [[8, 8, 3], [[1, matrix(0,2)],", "DirichletGroup(60, integral=True) Group of Dirichlet characters modulo 60 with values", "not specified\") e = rings.IntegerModRing(modulus).unit_group_exponent() base_ring = rings.CyclotomicField(e) if integral:", "lcm([z.multiplicative_order() for z in self.values_on_gens()]) def primitive_character(self): \"\"\" Returns the", "g += phi(c)*z return g def jacobi_sum(self, char, check=True): r\"\"\"", "a ring admitting a conversion map from the base ring", "p, r \\ in factor(self.modulus())], cr=True, universe = cat.Objects()) def", "L L-function with complex Dirichlet coefficients sage: L.value(4) # abs", "Dirichlet group. Not to be called directly (use the factory", "16) must have additive orders dividing (2, 16, 2), respectively", "z in self.values_on_gens()]) def primitive_character(self): \"\"\" Returns the primitive character", "the orders of the elements in `x` are admissible (see", "c in self.values()[1:]: z *= zeta g += phi(c)*z return", "roots of unity of order dividing ``zeta_order`` in `R`. In", "G.zeta_order()) L = rings.CyclotomicField(n) zeta = L.gen(0) ** (n //", "(2, None, None) sage: l = l[1:]; l (2, None,", "5 mapping 31 |--> 1, 41 |--> 1, 37 |-->", "defining polynomial x^2 - 3 with a = 1.732050807568878? ::", "prime field:: sage: p = next_prime(10^40) sage: g = DirichletGroup(19,", "to the value B_1 = -1/2. ber = K.one()/2 if", "- ``v`` - (optional) list of elements of self -", "with the above definition, but in contrast to the value", "!= len(orders): raise ValueError(\"wrong number of values (= {}) on", "< 0.5 return self(-1) == R(-1) @cached_method def is_primitive(self): \"\"\"", "``self`` to ``R``. INPUT: - ``R`` -- either a ring", "v.set_immutable() return v def __setstate__(self, state): r\"\"\" Restore a pickled", "modulo } 1 \\hbox{ of conductor } 1 sage: latex(DirichletGroup(2)[0])", "characteristic p are # k-th powering for # k =", "self(-1) != K((-1)**k): ber = K.zero() elif algorithm == \"recurrence\":", "\"\"\" G, v = self._pari_conversion() return pari.znconreyexp(G, v).sage() def lmfdb_page(self):", "e.gauss_sum_numerical(a=2, prec=100) 4.7331654313260708324703713917e-30 - 1.7320508075688772935274463415*I sage: G = DirichletGroup(13) sage:", "potentially much more efficient than computing the value of -1", "map from `X`. There is conversion between Dirichlet groups of", "-1/2. ber = K.one()/2 if k == 1 else K(bernoulli(k))", "We reuse _zeta_order if we know that it stays the", "\"Group of Dirichlet characters modulo %s with values in \"", "\\mapsto 1,\\ 5 \\mapsto \\zeta_{4} TESTS: Dirichlet characters modulo 1", "LaTeX representation of self. EXAMPLES:: sage: G.<a,b> = DirichletGroup(16) sage:", "-1 sage: chi._pari_conversion() ([[24, [0]], [8, [2, 2, 2], [7,", "def _coerce_map_from_(self, X): \"\"\" Decide whether there is a coercion", "K, a); G Group of Dirichlet characters modulo 5 with", "if e in seen_so_far: continue orbit = x.galois_orbit(sort=sort) if reps_only:", "- ``prec`` -- integer (default: 53), *bits* of precision -", "K(a,b,\\chi) = \\sum_{r \\in (\\ZZ/m\\ZZ)^\\times} \\chi(r)\\,\\zeta^{ar+br^{-1}}, where `m` is the", "60 with values in Gaussian Integers in Cyclotomic Field of", "of precision, 2, None, None) sage: k == l False", "for e in G] [True, False, True, False, True, False,", "mapping 2 |--> 1 \"\"\" G = self.parent() if G.zeta.is_in_cache():", "associated to these Dirichlet characters (i.e., J(self,char)). This is defined", "1,), -1) ((1,), (-1,), -1) ((1,), (-zeta6,), -1) ((1,), (-zeta6", "modulo 4 of conductor 4 mapping 3 |--> -1, Dirichlet", "the value ring for the characters in this group (default:", "l = l[1:]; l (2, None, None) sage: k ==", "or n>=len(g): raise IndexError(\"n(=%s) must be between 0 and %s\"%(n,len(g)-1))", "if r != 0: s += ' mapping ' for", "an integral domain sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2)).galois_orbits() Traceback (most recent", "for x in self.values_on_gens()]) def _richcmp_(self, other, op): \"\"\" Compare", "\\chi(r)\\,\\zeta^{ar+br^{-1}}, where `m` is the modulus of `\\chi` and `\\zeta`", "= self.parent() if G.zeta.is_in_cache(): x = n * self.element() else:", "and x =/= 1, the conductor is the smallest p**r", "9 sage: D = DirichletGroup(N) sage: g = D(1) sage:", "a = a * zeta w.append(a) return w @property def", "phi = lambda t : t CC = K elif", "A short string representation of self, often used in string", "G = DirichletGroup(20) sage: e = G([1 for u in", "True sage: DirichletGroup(13) == DirichletGroup(13, QQ) False \"\"\" from sage.categories.groups", "recent call last): ... ValueError: values (= (4, 8, 8)", "zeta4 sage: G.zeta_order() 4 In this example we create a", "``G``:: sage: G.gens() (Dirichlet character modulo 20 of conductor 4", "sage: G = DirichletGroup(20) sage: G.integers_mod() Ring of integers modulo", "algorithm uses the definition directly. .. WARNING:: In the case", "recent call last): ... TypeError: conductor must divide modulus sage:", "from sage.modular.dirichlet import is_DirichletCharacter sage: is_DirichletCharacter(trivial_character(3)) True sage: is_DirichletCharacter([1]) False", "in Cyclotomic Field of order 4 and degree 2 \"\"\"", "or a complex field\") zeta = zeta ** a g", "cyclic subgroup of `R^*` generated by ``zeta``. If ``zeta_order`` is", "R(-1)) < 0.5 return self(-1) == R(-1) @cached_method def is_primitive(self):", "ValueError: base ring (= Ring of integers modulo 15) must", "``zeta`` of `V` is computed, and an error is raised", "\"\"\" G = self.parent() K = G.base_ring() if not (number_field.is_CyclotomicField(K)", "field of order the exponent of `(\\ZZ/N\\ZZ)^*`:: sage: DirichletGroup(20) Group", "recent call last): ... TypeError: no coercion map from Rational", "domain:: sage: f = DirichletGroup(17, ZZ, zeta=-1).0 sage: g =", "self.gens(): ord *= int(g.order()) return ord def random_element(self): \"\"\" Return", "repr(a) # indirect doctest 'Dirichlet character modulo 20 of conductor", "it is not the largest order root of unity in", "the definition directly. .. WARNING:: In the case of the", "chi.conrey_number() 5 sage: chi = DirichletGroup(60)([1,-1,I]) sage: chi.conrey_number() 17 sage:", "sage: e == e True sage: f == f True", "can't multiply directly, since coercion of one element into the", "sage: e.bernoulli(5) 7430/13*zeta12^3 - 34750/13*zeta12^2 - 11380/13*zeta12 + 9110/13 sage:", "exponents[i] = 0 i += 1 @cached_method(do_pickle=True) def values_on_gens(self): r\"\"\"", "sage: G.zeta() zeta4 sage: G.zeta_order() 4 In this example we", "power is between 0 and the order of the generator", "UniversalCyclotomicField()) sage: e = G([1 for u in G.unit_gens()]) sage:", "factorial, valuation) def trivial_character(N, base_ring=rings.RationalField()): r\"\"\" Return the trivial character", "zeta10^2 + zeta10 - 1, zeta10, zeta10^3 - zeta10^2 +", "kernel is returned as a list. This may change. EXAMPLES::", "... TypeError: conductor must divide modulus sage: H = DirichletGroup(16,", "(:trac:`6018`):: sage: G = DirichletGroup(10, QQ).base_extend(CyclotomicField(4)) sage: H = DirichletGroup(10,", "p^2, ..., p^(r-1), # where p^r = 1 (mod n),", "== self.modulus()) @cached_method def is_trivial(self): r\"\"\" Returns ``True`` if this", "group of integers `\\ZZ/N\\ZZ` where `N` is the modulus of", "in zip(self.values_on_gens(), other.values_on_gens())) return G.element_class(G, x, check=False) def __copy__(self): \"\"\"", "Group of Dirichlet characters modulo 20 with values in Cyclotomic", "k.<i> = CyclotomicField(4) sage: G = DirichletGroup(192) sage: G([i, -1,", "dlog = P._zeta_dlog v = M([dlog[x] for x in self.values_on_gens()])", "is_DirichletGroup(11) False sage: is_DirichletGroup(DirichletGroup(11).0) False \"\"\" return isinstance(x, DirichletGroup_class) class", "chi.conrey_number() 113 TESTS:: sage: eps1 = DirichletGroup(5)([-1]) sage: eps2 =", "-zeta10^3, -zeta10, -zeta10, 1, zeta10^3 - zeta10^2 + zeta10 -", "EXAMPLES:: sage: G = DirichletGroup(20) sage: G.integers_mod() Ring of integers", "modulo 20 with values in Rational Field sage: G.order() 4", "== e True \"\"\" # values_on_gens() used an explicit cache", "raised to the power of n EXAMPLES:: sage: G.<a,b> =", "True, False, True, False, True, False] sage: G = DirichletGroup(100000,", ":: sage: G.<e> = DirichletGroup(13) sage: loads(G.dumps()) == G True", "= rings.IntegerModRing(n)(p).multiplicative_order() Auts = [p**m for m in range(0,r)] return", "is not an integral domain, an error will be raised", "in IntegerModRing(5)]) -1 sage: # The value -1 above is", "Foundation, either version 2 of the License, or # (at", "primitive, i.e., its conductor equals its modulus. EXAMPLES:: sage: G.<a,b>", "[1, zeta4, -1, -zeta4] \"\"\" R = self.base_ring() a =", "that ``base_ring`` is a part of the key:: sage: k", "= DirichletGroup(100).1 sage: e.order() # same as multiplicative_order, since group", "numbers is supported (:trac:`19056`):: sage: G = DirichletGroup(7, QQbar) sage:", "\"\"\" import webbrowser lmfdb_url = 'https://www.lmfdb.org/Character/Dirichlet/{}/{}' url = lmfdb_url.format(self.modulus(), self.conrey_number())", "mod == 1: return [R.one()] elif mod == 2: return", "(most recent call last): ... IndexError: n(=-1) must be between", "5 of conductor 5 mapping 2 |--> 4 sage: chi.multiplicative_order()", "1, -zeta10^3, -zeta10, -zeta10, 1, zeta10^3 - zeta10^2 + zeta10", "return self._integers.unit_gens() @cached_method def zeta(self): \"\"\" Return the chosen root", "``algorithm`` -- either ``'recurrence'`` (default) or ``'definition'`` - ``cache`` --", "sage: x = G.gens() sage: e = x[0]*x[1]^2; e Dirichlet", "The Kloosterman sum associated to `\\chi` and the integers a,b", "is specified\" % base_ring) zeta_order = rings.Integer(zeta_order) zeta = base_ring.zeta(zeta_order)", "values_on_gens = None state_dict = state[1] if values_on_gens_key in state_dict:", "gcd, lcm, fundamental_discriminant, euler_phi, factorial, valuation) def trivial_character(N, base_ring=rings.RationalField()): r\"\"\"", "logarithms in the value group of this Dirichlet group. TESTS::", "DirichletGroup(20).gen(0) sage: e.values() [0, 1, 0, -1, 0, 0, 0,", "-- (optional) order of ``zeta`` EXAMPLES:: sage: G = DirichletGroup(7,QQ);", "3]~, Vecsmall([3, 3, 1])], [[8, 8, 3], [[1, matrix(0,2)], [1,", "return [R.zero(), R.one()] result_list = [R.zero()] * mod gens =", "G.unit_gens()]) def kronecker_character_upside_down(d): \"\"\" Return the quadratic Dirichlet character (./d)", "Residue field of Fractional ideal (-2*zeta4 + 5) :: sage:", "(zeta6 - 1,), 2*zeta6 + 1) ((zeta6,), (-1,), -2*zeta6 -", "self.zeta_order() M = self._module zero = M(0) orders = self.integers_mod().unit_group().gens_orders()", "and degree 2 \"\"\" N = self.modulus() m = m", "QQ! sage: b.modulus() 2401 AUTHORS: - <NAME> (2006-08-06) \"\"\" d", "Return the product of self and other. EXAMPLES:: sage: G.<a,b>", "of unity with smaller order than expected (:trac:`6018`):: sage: G", "call this (unless `m` equals -1) EXAMPLES:: sage: G =", "that the root of unity can change:: sage: H.zeta() zeta6", "20 of conductor 1 mapping 11 |--> 1, 17 |-->", "doctest \\hbox{Dirichlet character modulo } 16 \\hbox{ of conductor }", "large power of the image root of unity. We use", "of conductor 13 mapping 2 |--> zeta12 sage: e.galois_orbit() [Dirichlet", "not None: # A root of unity was explicitly given;", "sage: f = K.complex_embeddings()[0] sage: D = DirichletGroup(5, K) sage:", "equivalently its `n`-torsion subgroup, where `n` is the exponent of", "Ring of integers modulo 15 sage: G.order() 4 sage: DirichletGroup(-33)", "and the orbits themselves (slightly faster if False). - ``check``", "be from the same Dirichlet Group.\") return sum([self(x) * char(1-x)", "rings.Integer(zeta_order) zeta = base_ring.zeta(zeta_order) return (base_ring, modulus, zeta, zeta_order) def", "an element of ``ZZ``. TESTS:: sage: G = DirichletGroup(7, base_ring=Integers(9),", "of conductor 1 mapping 11 |--> 1, 17 |--> 1]", "of conductor 5 mapping 11 |--> 1, 17 |--> -1", "TypeError(\"conductor must divide modulus\") a = [] for u in", "2, 0, 1, 2, 0, 0, 2, 0, 1, 2,", "import multiplicative_iterator from sage.structure.parent import Parent from sage.structure.sequence import Sequence", "character. EXAMPLES:: sage: G = DirichletGroup(3) sage: e = G([-1])", "zeta4 We next compute several invariants of ``G``:: sage: G.gens()", "many cases, # especially since we end up computing all", "self._list_from_iterator() def modulus(self): \"\"\" Returns the modulus of self. EXAMPLES::", "group of n-th roots of unity in the base ring", "a given r, whether or not the above divisibility holds", "|--> zeta4 sage: e.bar() Dirichlet character modulo 5 of conductor", "while True: try: exponents[i] += 1 except IndexError: # Done!", "R): \"\"\" Return the base extension of ``self`` to ``R``.", "inverse of self. EXAMPLES:: sage: e = DirichletGroup(13).0 sage: f", "G.one() sage: chi.gauss_sum() 1 .. SEEALSO:: - :func:`sage.arith.misc.gauss_sum` for general", "self(1) for i in range(self.ngens()): g = self.gen(i) n =", "size 5 ] \"\"\" R = self.base_ring() return Sequence([DirichletGroup(p**r,R) for", "are *not* cached with this character. EXAMPLES:: sage: G =", "7 with values in Number Field in a with defining", "sage: e.galois_orbit() [Dirichlet character modulo 30 of conductor 5 mapping", "is_Ring(base_ring): raise TypeError(\"base_ring (= %s) must be a ring\" %", "v=None, reps_only=False, sort=True, check=True): \"\"\" Return a list of the", "sage: G = DirichletGroup(192) sage: G([i, -1, -1]) Traceback (most", "0, zeta4, 0, -1, 0, 1, 0, -zeta4, 0, 0,", "modulo 5 of conductor 5 mapping 2 |--> a^2, Dirichlet", "is b False sage: a.element() is b.element() False sage: a.values_on_gens()", "[8, [2, 2, 2], [7, 13, 17]], [[2, 2, 3]~,", "``False``); whether to replace the default cyclotomic field by its", "A root of unity was explicitly given; we use it", "*bits* of precision - ``a`` -- integer, as for :meth:`.kloosterman_sum`", "INPUT: - ``R`` -- either a ring admitting a *coercion*", "*only* because immutable vectors are not implemented yet. EXAMPLES:: sage:", "24 mapping 7 |--> 1, 13 |--> -1, 17 |-->", "Gauss sum has absolute value `\\sqrt{p}`. CACHING: Computed Gauss sums", "True \"\"\" if M % self.modulus() != 0: raise ArithmeticError(\"M(=%s)", "It's the identity function in characteristic p. EXAMPLES:: sage: G", "\"\"\" Element = DirichletCharacter def __init__(self, base_ring, modulus, zeta, zeta_order):", "-1, 0, 1, -1, 0, 1, 0, 0, 1, -1,", "for i in range(r): if i != 0: s +=", "DirichletGroup(21, base_ring=GF(37)).gen(0) ; e.values() [0, 1, 36, 0, 1, 36,", "G.<a,b> = DirichletGroup(20) sage: H.<c> = DirichletGroup(4) sage: c.extend(20) Dirichlet", "and degree 2 \"\"\" g = rings.IntegerModRing(self.modulus()).unit_group_exponent() if g ==", "coprime to q that identifies a Dirichlet character of modulus", "the multiple M of the modulus. EXAMPLES:: sage: G.<a,b> =", "s += r',\\ ' s += self.parent().unit_gens()[i]._latex_() + r' \\mapsto", "is a primitive `m^{th}` root of unity. FACTS: If the", "in self.values_on_gens()] if self.modulus() % 8 == 0: # 2", "used to compute discrete logarithms in the value group of", "%s of conductor %s' % (self.modulus(), self.conductor()) r = len(self.values_on_gens())", "|--> -1 sage: chi.conrey_number() 5 sage: chi = DirichletGroup(60)([1,-1,I]) sage:", "taken to be the cyclic subgroup of `R^*` generated by", "e.modulus() 100 sage: e.conductor() 4 \"\"\" return self.parent().modulus() def level(self):", "in comparison of Dirichlet characters. It was checking that their", "self.parent() return G.element_class(G, self.values_on_gens(), check=False) def __pow__(self, n): \"\"\" Return", "= DirichletGroup(13) sage: e = G.0 sage: e.is_even() False sage:", ":: sage: G, x = DirichletGroup(35).objgens() sage: e = x[0]*x[1];", "`p > 2` and 2 does not divide `\\phi(p^n)/\\mbox{\\rm ord}(\\varepsilon)`.", "fails in both cases:: sage: d[0]*d[1] == c Traceback (most", "fixed:: sage: DirichletGroup(1)[0].bernoulli(1) 1/2 \"\"\" if cache: try: self.__bernoulli except", "to :meth:`change_ring`) requires a coercion map to exist:: sage: G.base_extend(ZZ)", "_automorphisms(self): \"\"\" Compute the automorphisms of self. These are always", "0: Auts = [e for e in range(1,n) if gcd(e,n)", "modify # it under the terms of the GNU General", "sort=True, check=True): \"\"\" Return a list of the Galois orbits", "as an approximate complex number with prec bits of precision.", "= k+2 R = rings.PowerSeriesRing(rings.QQ, 't') t = R.gen() #", "\"\"\" pows = self.parent()._zeta_powers return tuple([pows[i] for i in self.element()])", "algorithm is None: algorithm = 'pari' if algorithm == 'pari':", "10 or more in many cases, # especially since we", ":: sage: G(d[0])*G(d[1]) == c True Conductors that are divisible", "unity is not necessarily cyclic), some operations still work, such", "by `x` (type :class:`DirichletCharacter`). EXAMPLES:: sage: G.<e> = DirichletGroup(13) sage:", "be positive') if base_ring is None: if not (zeta is", "modulus, with values in the given base ring. EXAMPLES:: sage:", "of ``self`` to ``R``. INPUT: - ``R`` -- either a", "possible. .. note:: This function is currently only implemented when", "sage: G.<a,b> = DirichletGroup(20) sage: a.element() (2, 0) sage: b.element()", "map to exist:: sage: G.base_extend(ZZ) Traceback (most recent call last):", "g(t)*e^{nt} h = [0] + [g * ((n*t).exp(prec)) for n", "True] sage: G = DirichletGroup(100000, CC) sage: G.0.is_odd() True Note", "DirichletGroup(4) sage: c.extend(20) Dirichlet character modulo 20 of conductor 4", "other algorithm below. That said, I'm sure it could #", "2 |--> -zeta12] sage: e = G.0^2; e Dirichlet character", "len(self.gens()) @cached_method def order(self): \"\"\" Return the number of elements", "try: self(1) * zeta**(a+b) except TypeError: raise NotImplementedError('Kloosterman sums not", "ensures that ``zeta`` is an element of ``base_ring`` and that", "for c in m.coprime_integers(m): e = rings.Mod(c, m) g +=", "{}) on generators (want {})\".format(x, len(orders))) if free_module_element.is_FreeModuleElement(x): x =", "conductor 4 mapping 3 |--> -1, Dirichlet character modulo 9", "zeta_order = self.zeta_order() if is_ComplexField(R): for i in range(1, zeta_order):", "MATH:: (\\ZZ/N\\ZZ)^* \\to R^*, for some ring `R`, to the", "integers a,b is .. MATH:: K(a,b,\\chi) = \\sum_{r \\in (\\ZZ/m\\ZZ)^\\times}", "# numbers up to k, which should be done with", "DirichletGroup(17, Integers(9), zeta=Integers(9)(2))._automorphisms() Traceback (most recent call last): ... NotImplementedError:", "0, 1, 2, 0, 1, 2] :: sage: chi =", "(zeta6 - 1,), -3*zeta6 + 2) ((zeta6 - 1,), (-1,),", "2 |--> zeta12, Dirichlet character modulo 13 of conductor 13", "from the same Dirichlet Group.\") return sum([self(x) * char(1-x) for", "sage: a is b False sage: a.element() is b.element() False", "is a coercion map from `X`. There is conversion between", "also :meth:`.kloosterman_sum`, which calculates the sum exactly (which is generally", "2*zeta12 - 3 sage: f.jacobi_sum(e) 3*zeta12^2 + 2*zeta12 - 3", "complex number with prec bits of precision. See also :meth:`.kloosterman_sum`,", "< R.absolute_degree()): K = rings.CyclotomicField(self.order()) else: return self try: return", "given modulus, with values in the given base ring. EXAMPLES::", "1' TESTS: Dirichlet characters modulo 1 and 2 are printed", "(TypeError, ValueError, ArithmeticError): return self def modulus(self): \"\"\" The modulus", "r\"\"\" Return a Dirichlet character that equals this one, but", "where `N` is the modulus EXAMPLES:: sage: chi4 = DirichletGroup(4).gen()", "sage: a.element() (2, 0) sage: b.element() (0, 1) .. NOTE::", "x, check=False) def _repr_short_(self): r\"\"\" A short string representation of", "generally much quicker). CACHING: Computed Kloosterman sums are *not* cached", "return g[n] @cached_method def gens(self): \"\"\" Returns generators of self.", "time you call this (unless `m` equals -1) EXAMPLES:: sage:", "the same Dirichlet Group.\") return sum([self(x) * char(1-x) for x", "p = R.characteristic() if p: K = rings.IntegerModRing(p) elif self.order()", "[ [Dirichlet character modulo 13 of conductor 1 mapping 2", "G.base_ring() Rational Field The elements of G print as lists", "values_on_gens() from that if we encounter it in a pickle", "Parent from sage.structure.sequence import Sequence from sage.structure.factory import UniqueFactory from", "True \"\"\" def create_key(self, N, base_ring=None, zeta=None, zeta_order=None, names=None, integral=False):", "modulo 20 \"\"\" return self._integers __iter__ = multiplicative_iterator def list(self):", "N = self.modulus() K = self.base_ring() if N == 1:", "G is `(\\ZZ / N \\ZZ)^*` where `N` is the", "' + self.values_on_gens()[i]._latex_() return s def base_ring(self): \"\"\" Returns the", "of conductor 7 mapping 3 |--> -1/2*b*a + 1/2 \"\"\"", "x^2 - 3]) sage: chi = DirichletGroup(7, K).0 sage: chi.minimize_base_ring()", "in `R`. In this case, `R` must be a domain", "DirichletCharacter(MultiplicativeGroupElement): \"\"\" A Dirichlet character. \"\"\" def __init__(self, parent, x,", "60 of conductor 4 mapping 31 |--> -1, 41 |-->", "if n<0 or n>=len(g): raise IndexError(\"n(=%s) must be between 0", "base rings not implemented \"\"\" n = self.zeta_order() R =", "k = k[1:]; k (2, None, None) sage: l =", "n == m: return self K = rings.CyclotomicField(m) return self.change_ring(K)", "given r, whether or not the above divisibility holds #", "the standard generators of `(\\ZZ/N\\ZZ)^*`, where `N` is the modulus.", "0 sage: e(7) -zeta4 sage: Integers(60).unit_gens() (31, 41, 37) sage:", "conductor 1, Dirichlet character modulo 9 of conductor 9 mapping", "0, -1, 0, 1, 0, -zeta4, 0, 0, 0, zeta4,", "121, 1331] sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5))._automorphisms() Traceback (most recent call", "/ N \\ZZ)^*` where `N` is the modulus EXAMPLES:: sage:", "H = DirichletGroup(M, self.base_ring()) return H(self) @cached_method def values(self): \"\"\"", "of this character. OUTPUT: Currently the kernel is returned as", "/ zeta_argument) for x in values_on_gens] else: dlog = P._zeta_dlog", "0: raise ValueError(\"d must be nonzero\") D = fundamental_discriminant(d) G", "self.element.set_cache(x) else: self.values_on_gens.set_cache(x) @cached_method def __eval_at_minus_one(self): r\"\"\" Efficiently evaluate the", "0.5 return self(-1) == R(1) @cached_method def is_odd(self): r\"\"\" Return", "G = DirichletGroup(10, QQ).base_extend(CyclotomicField(4)) sage: H = DirichletGroup(10, CyclotomicField(4)) sage:", "the orbits themselves (slightly faster if False). - ``check`` -", "N = self.modulus() m = m % N if self.values.is_in_cache()", "``self``. EXAMPLES:: sage: e = DirichletGroup(16)([-1, 1]) sage: hash(e) ==", "1]) sage: hash(e) == hash((-1,1)) True \"\"\" return hash(self.values_on_gens()) def", "the group of order 6 generated by 0.500000000000000 + 0.866025403784439*I", "Group of Dirichlet characters modulo 19 with values in Finite", "1 :: sage: G.gen(-1) Traceback (most recent call last): ...", "-- either a ring admitting a *coercion* map from the", "lfun_character, LFunction Z = LFunction(lfun_character(self), prec=prec) Z.rename('PARI L-function associated to", "-- if True, cache answers - ``**opts`` -- optional arguments;", "d = c.decomposition(); d [Dirichlet character modulo 4 of conductor", "modulo 20 with values in Cyclotomic Field of order 4", "M % self.modulus() != 0: raise ArithmeticError(\"M(=%s) must be a", "and increase value i = 0 while True: try: exponents[i]", "41 |--> 1, 37 |--> zeta4) sage: val = G.gens()[2].values_on_gens()[2]", "self.parent() K = G.base_ring() chi = self m = G.modulus()", "self.unit_gens(): v = u.lift() # have to do this, since", "CyclotomicField(5)) sage: K30 = CyclotomicField(30) sage: G3.gen(0).base_extend(K30) * G5.gen(0).base_extend(K30) Dirichlet", "is_RationalField(K): chi = chi.minimize_base_ring() n = lcm(m, G.zeta_order()) L =", "- 3 sage: f.jacobi_sum(e) 3*zeta12^2 + 2*zeta12 - 3 sage:", "or -1 if not R.is_exact(): return abs(self(-1) - R(-1)) <", "3.60555127546... sage: abs(f.gauss_sum_numerical()) 3.60555127546... sage: sqrt(13.0) 3.60555127546399 TESTS: The field", "4 \"\"\" return self.order() def _repr_(self): \"\"\" Return a print", "degree 2 TESTS: We test the case where `R` is", "if self.parent() != char.parent(): raise NotImplementedError(\"Characters must be from the", "degree 4 sage: G = DirichletGroup(6) sage: G(DirichletGroup(3).0) Dirichlet character", "sage.rings.number_field.number_field as number_field from sage.libs.pari import pari from sage.categories.map import", "len(x) != len(orders): raise ValueError(\"wrong number of values (= {})", "last): ... ValueError: modulus should be positive \"\"\" modulus =", "31 mapping 3 |--> -zeta30^7 + zeta30^5 + zeta30^4 +", "v=e.kloosterman_sum_numerical() sage: v.real() < 1.0e15 True sage: v.imag() 1.73205080756888 sage:", "12 This illustrates a canonical coercion:: sage: e = DirichletGroup(5,", "[1, 9, 13, 17] sage: b.kernel() [1, 11] \"\"\" one", "(optional) list of elements of self - ``reps_only`` - (optional:", "-1) ((1,), (-zeta6 + 1,), -1) ((zeta6,), (zeta6,), -zeta6 +", "can be done much # more efficiently. v = self.values()", "11 |--> -1, 17 |--> 1 sage: e.restrict(4) Dirichlet character", "sage: loads(dumps(G)) is G True \"\"\" self._set_element_constructor() if '_zeta_order' in", "following. Proposition: Suppose eps is a character mod `p^n`, where", "n result_list[n] = R_values[value] # iterate: # increase the exponent", "defined\" % (self.base_ring(), R)) return self.change_ring(R) def _element_constructor_(self, x): \"\"\"", "+ 1 sage: DirichletGroup(5, K, zeta_order=2) Group of Dirichlet characters", "2*zeta156^14 - zeta156^10 + zeta156^8 + zeta156^7 + zeta156^6 +", "the character is made the first time you call this", "in the group of order 2 generated by -1 in", "that Parent.__init__ has been called Ring of integers modulo 9", "QQ); G Group of Dirichlet characters modulo 20 with values", "not silently get roots of unity with smaller order than", "-1 sage: e(41) -1 sage: e(37) zeta4 sage: e(31*37) -zeta4", "R.is_exact() and any(u**v != 1 for u, v in zip(x,", "conductor 5 mapping 2 |--> 13 sage: chi^2 Dirichlet character", "DirichletGroup(20) sage: a^2 Dirichlet character modulo 20 of conductor 1", "= DirichletGroup(5)([-1]) sage: eps2 = DirichletGroup(5,QQ)([-1]) sage: eps1.conrey_number() == eps2.conrey_number()", "of generators of self. EXAMPLES:: sage: G = DirichletGroup(20) sage:", "the absolute Galois group of the prime subfield of Frac(R).", "on the standard generators of `(\\ZZ/N\\ZZ)^*`, where `N` is the", "\"\"\" R = self.base_ring() if R.is_prime_field(): return self p =", "* ((n*t).exp(prec)) for n in range(1,N+1)] ber = sum([self(a)*h[a][k] for", "a suitable cyclotomic field; see also :meth:`.kloosterman_sum_numerical`, which gives an", "= state_dict[values_on_gens_key] del state_dict[values_on_gens_key] # element() used an explicit cache", "base ring is a cyclotomic field, QQ, QQbar, or a", "\"\"\" Return the complex conjugate of this Dirichlet character. EXAMPLES::", "not R.is_exact(): return abs(self(-1) - R(1)) < 0.5 return self(-1)", "G.modulus() 20 \"\"\" return self._modulus def ngens(self): \"\"\" Returns the", "5 mapping 11 |--> 1, 7 |--> -zeta4, Dirichlet character", "of ``self`` as its domain EXAMPLES:: sage: e = DirichletGroup(7,", "Map zeta to the new parent if zeta is not", "if algorithm == 'pari': from sage.lfunctions.pari import lfun_character, LFunction Z", "doctest False sage: trivial_character(3) == trivial_character(9) False sage: trivial_character(3) ==", "1, Dirichlet character modulo 5 of conductor 5 mapping 2", "of integers modulo 20 \"\"\" return self._integers __iter__ = multiplicative_iterator", "2 and self.values_on_gens()[1].multiplicative_order() != 1: cond *= 2 return rings.Integer(cond)", "ls = chi.values() ; ls[0:10] [0, 1, -zeta10^3, -zeta10, -zeta10,", "zeta156^10 + zeta156^8 + zeta156^7 + zeta156^6 + zeta156^5 -", "oi) // m for vi, oi in zip(v, pari_orders)] return", "of conductor 35 mapping 22 |--> zeta12^3, 31 |--> zeta12^2", "in the group of order 4 generated by 2 in", "function is currently only implemented when the base ring is", "element_key = '_DirichletCharacter__element' element = None if element_key in state_dict:", "sage: k == l False sage: G = DirichletGroup.create_object(None, k);", "Cyclotomic Field of order 10 and degree 4 sage: G", "- ``integral`` -- boolean (default: ``False``); whether to replace the", "`\\zeta` is a primitive `m^{th}` root of unity. FACTS: If", "root of unity. FACTS: If the modulus is a prime", "-1.000000... sage: [e.is_even() for e in G] [True, False, True,", "\"\"\" Create the object from the key (extra arguments are", "``zeta_order`` is also given, it must be the multiplicative order", "not self.base_ring().is_integral_domain(): raise TypeError(\"Galois orbits only defined if base ring", "to p, this smallest r such that the # divisibility", "G Group of Dirichlet characters modulo 20 with values in", "was checking that their values were the same, but not", "4 and degree 2' and 'Group of Dirichlet characters modulo", "of ``zeta`` is very large. - If ``zeta`` is not", "== 0: val *= e.values_on_gens()[0] # first gen is -1", "below) if e in seen_so_far: continue orbit = x.galois_orbit(sort=sort) if", "a list of the Galois orbits of Dirichlet characters in", "cases, # especially since we end up computing all the", "1) # h(n) = g(t)*e^{nt} h = [0] + [g", "= self.gens() if n<0 or n>=len(g): raise IndexError(\"n(=%s) must be", "mod 20, but with values in `\\QQ(\\zeta_n)`:: sage: G =", "sage: trivial_character(7, Integers(3))(1).parent() Ring of integers modulo 3 \"\"\" return", "= DirichletGroup(20) sage: e = G([1 for u in G.unit_gens()])", "of Dirichlet characters modulo 5 with values in the group", "True sage: DirichletGroup(2, base_ring=QQ) is DirichletGroup(2, base_ring=CC) False If the", "pari_gens = G[1][2] # one should use the following, but", "speed up __call__ method for Dirichlet characters, miscellaneous fixes -", "sage: is_DirichletGroup(11) False sage: is_DirichletGroup(DirichletGroup(11).0) False \"\"\" return isinstance(x, DirichletGroup_class)", "+ 2*zeta156^40 + zeta156^37 - zeta156^36 - zeta156^34 - zeta156^33", "5 mapping 2 |--> 13 sage: chi^2 Dirichlet character modulo", "of Dirichlet characters modulo 2 with values in Rational Field", "corresponding to primes dividing modulus. (Note that if the modulus", "is an integral domain \"\"\" if v is None: v", "e = G([1 for u in G.unit_gens()]) sage: e.kloosterman_sum(7,17) -2*E(5)", "Base-extended Dirichlet groups do not silently get roots of unity", "n=0): \"\"\" Return the n-th generator of self. EXAMPLES:: sage:", "values of the character on the generators of `(Z/NZ)^*`:: sage:", "last): ... TypeError: conductor must divide modulus sage: H =", "[g * ((n*t).exp(prec)) for n in range(1,N+1)] ber = sum([self(a)*h[a][k]", "range(self.modulus()) if self(x) == one] def maximize_base_ring(self): r\"\"\" Let ..", "True sage: G.<a,b> = DirichletGroup(20, CC) sage: a.is_primitive() False sage:", "(4, 8, 8) modulo 16) must have additive orders dividing", "trivial Dirichlet character modulo 1, this function returns `B_{1,\\varepsilon} =", "Fractional ideal (-2*zeta4 + 5) :: sage: DirichletGroup(60, integral=True) Group", "G.<a> = DirichletGroup(11) sage: b = copy(a) sage: a is", "sage: e = DirichletGroup(100, QQ).0 sage: e.modulus() 100 sage: e.conductor()", "# -*- coding: utf-8 -*- r\"\"\" Dirichlet characters A :class:`DirichletCharacter`", "and degree 2 ] sage: DirichletGroup(20,GF(5)).decomposition() [ Group of Dirichlet", "DirichletGroup(100000, CC) sage: G.1.is_even() True Note that ``is_even`` need not", "= \\sum_{a \\in \\ZZ / N\\ZZ} \\chi(a) \\psi(1-a) where `\\chi`", "of order 6 and degree 2 TESTS: We test the", "defining polynomial x^4 + 1 sage: DirichletGroup(5, K, zeta_order=2) Group", "Zmod(15)); G Group of Dirichlet characters modulo 5 with values", "answer (but is generally much quicker). CACHING: Computed Kloosterman sums", "# h(n) = g(t)*e^{nt} h = [0] + [g *", "modulo 13 with values in Cyclotomic Field of order 12", "1.73205080756888 sage: G = DirichletGroup(20) sage: e = G.1 sage:", "DP = DirichletGroup(p) sage: f = DP.0 sage: e.jacobi_sum(f) Traceback", "the base ring of ``self``, or a ring homomorphism with", "Please do not change the entries of the returned vector;", "return not self.element() one = self.base_ring().one() return all(x == one", "properly, these caches have to be stored when pickling an", "for x in pari_gens) # now compute the input for", "= DirichletGroup(N) sage: g = D(1) sage: g.jacobi_sum(g) 3 We", "Field of order 10 and degree 4' sage: G.rename('Dir(11)') sage:", "sage: G = DirichletGroup(12, QQbar) sage: e = G.gens()[0] sage:", "We use the following. Proposition: Suppose eps is a character", "recent call last): ... ValueError: base ring (= Ring of", "and `\\psi` are both characters modulo `N`. EXAMPLES:: sage: D", "2, 2], [7, 13, 17]], [[2, 2, 3]~, Vecsmall([3, 3,", "= 2` and the factor of eps at 4 is", "In this example we create a Dirichlet group with values", "False, True, False, True, False, True, False, True, False] sage:", "15) must be an integral domain if only zeta_order is", "sage: val = G.gens()[2].values_on_gens()[2] ; val zeta4 sage: parent(val) Gaussian", "K.complex_embeddings()[0] sage: D = DirichletGroup(5, K) sage: D.change_ring(f) Group of", "but not its order:: sage: G = DirichletGroup(5, K, a);", "on each unit generator return self.element_class(self, x) elif not isinstance(x,", "of :class:`DirichletCharacter`. \"\"\" P = self.parent() M = P._module if", "z in zip(self.values_on_gens(), other.values_on_gens())) return G.element_class(G, x, check=False) def __copy__(self):", "computing the value of -1 directly using dlog and a", "sage: b = DirichletGroup(2401,QQ)(a) # NOTE -- over QQ! sage:", "the integer `m`. .. warning:: A table of values of", "decomposition(self): r\"\"\" Return the decomposition of self as a product", "fast, at least compared to # the other algorithm below.", "v += self.modulus() a.append(R(x(v))) return self.element_class(self, a) def _coerce_map_from_(self, X):", "G.zeta.is_in_cache(): x = self.element() + other.element() else: x = tuple(y", "f = D[-2] sage: e.jacobi_sum(f) 3*zeta12^2 + 2*zeta12 - 3", "DirichletGroup(3) sage: e = G([-1]) sage: e.kloosterman_sum(3,5) -2*zeta6 + 1", "] sage: DirichletGroup(20,GF(5)).decomposition() [ Group of Dirichlet characters modulo 4", "EXAMPLES:: sage: DirichletGroup(37).unit_gens() (2,) sage: DirichletGroup(20).unit_gens() (11, 17) sage: DirichletGroup(60).unit_gens()", "group of this Dirichlet group. TESTS:: sage: DirichletGroup(5)._zeta_dlog {-1: 2,", "G.modulus() g = 0 L = rings.CyclotomicField(m.lcm(zo)) zeta = L.gen(0)", "def create_key(self, N, base_ring=None, zeta=None, zeta_order=None, names=None, integral=False): \"\"\" Create", "elif zeta_order is not None: if not base_ring.is_integral_domain(): raise ValueError(\"base", "unequal, even if they define identical functions on ``ZZ``. EXAMPLES::", "|--> 1, Dirichlet character modulo 60 of conductor 5 mapping", "conductor 5 mapping 2 |--> zeta4 sage: e.bar() Dirichlet character", "as the base ring. This is ignored if ``base_ring`` is", "to a Dirichlet character modulo the multiple M of the", "; e.values() [0, 1, 2, 0, 1, 2, 0, 0,", "an integral domain:: sage: f = DirichletGroup(17, ZZ, zeta=-1).0 sage:", "DirichletGroup(20).zeta_order() 4 sage: DirichletGroup(60).zeta_order() 4 sage: DirichletGroup(60, GF(25,'a')).zeta_order() 4 sage:", "integers modulo 9 sage: DirichletGroup(13) == DirichletGroup(13) True sage: DirichletGroup(13)", "characters modulo `N`. INPUT: - ``N`` -- positive integer -", "of each generator together, where the power is between 0", "1, 2, 0, 0, 2, 0, 1, 2, 0, 1,", "1 and 2 are printed correctly (see :trac:`17338`):: sage: latex(DirichletGroup(1)[0])", "|--> 1, 17 |--> zeta4 We next compute several invariants", "modulo 60 of conductor 5 mapping 31 |--> 1, 41", "n = zeta.multiplicative_order() zeta = zeta**(n // m) for c", "when there are immutable vectors (and below) if e in", "tuple of the values of ``self`` on the standard generators", "option) any later version. # https://www.gnu.org/licenses/ # **************************************************************************** from __future__", "of Dirichlet characters modulo %s with values in \" %", "\" % self.modulus() if self._zeta is not None: s +=", "-zeta4 sage: parent(e(31*37)) Cyclotomic Field of order 4 and degree", "2 |--> 2,) TESTS: Dirichlet groups are cached, creating two", "1 mapping 2 |--> 1, Dirichlet character modulo 5 of", "number_field.NumberField_generic) and euler_phi(self.order()) < R.absolute_degree()): K = rings.CyclotomicField(self.order()) else: return", "modulo 20 of conductor 5 mapping 11 |--> 1, 17", "cond *= 2 return rings.Integer(cond) @cached_method def decomposition(self): r\"\"\" Return", "of one element into the other parent fails in both", "\"\"\" # values_on_gens() used an explicit cache __values_on_gens in the", "..., [Dirichlet character modulo 13 of conductor 13 mapping 2", "return G([kronecker(u.lift(),d) for u in G.unit_gens()]) def is_DirichletCharacter(x): r\"\"\" Return", "G = self.parent() if G.zeta.is_in_cache(): x = self.element() + other.element()", "self._modulus def ngens(self): \"\"\" Returns the number of generators of", "character modulo 13 of conductor 13 mapping 2 |--> zeta12^2,", "free_module_element import sage.rings.all as rings import sage.rings.number_field.number_field as number_field from", "DirichletGroup(16) sage: latex(b) # indirect doctest \\hbox{Dirichlet character modulo }", "77 |--> 1 sage: e.conductor() 4 sage: f = e.primitive_character();", "kernel of this character. OUTPUT: Currently the kernel is returned", "((1,), (-zeta6,), -1) ((1,), (-zeta6 + 1,), -1) ((zeta6,), (zeta6,),", "one of these methods needs to be set for the", "Field of order 12 and degree 4 sage: (e^2).minimize_base_ring().base_ring() Cyclotomic", "``zeta_order`` are specified, or that both are ``None``. In the", "sage: G.ngens() 2 \"\"\" return len(self.gens()) @cached_method def order(self): \"\"\"", "sage: g = D(1) sage: g.jacobi_sum(g) 3 We consider a", "5 of conductor 5 mapping 2 |--> -zeta4 \"\"\" return", "be a Dirichlet character. This function returns an equal Dirichlet", "of the values of this character on each integer between", "multiply directly, since coercion of one element into the other", "*: 'Group of Dirichlet characters modulo 4 with values in", "the base ring of ``self`` as its domain EXAMPLES:: sage:", "self._integers = rings.IntegerModRing(modulus) def __setstate__(self, state): \"\"\" Used for unpickling", "K.<a,b>=NumberField([x^2 + 1, x^2 - 3]) sage: chi = DirichletGroup(7,", "sage: G = DirichletGroup(13) sage: H = DirichletGroup(13, CC) sage:", "copy of this Dirichlet character. EXAMPLES:: sage: G.<a> = DirichletGroup(11)", "|--> 1, 13 |--> -1, 17 |--> -1 sage: chi.conrey_number()", "conductor } %s' % (self.modulus(), self.conductor()) r = len(self.values_on_gens()) if", "e = G.0 sage: e.bernoulli(5) 7430/13*zeta12^3 - 34750/13*zeta12^2 - 11380/13*zeta12", "set([]) for x in v: z = x.element() e =", "that it stays the # same; otherwise it will be", "self.parent() R = G.base_ring() mod = self.parent().modulus() if mod ==", "4 generated by zeta4 in Cyclotomic Field of order 4", "a with defining polynomial x^4 + 1 :: sage: G.<e>", "sage: a.is_primitive() False sage: b.is_primitive() False sage: (a*b).is_primitive() True sage:", "a root of unity:: sage: DirichletGroup(5, K, zeta=-1, zeta_order=2) Group", "\"\"\" return self._modulus def ngens(self): \"\"\" Returns the number of", "sage: t(1).parent() Rational Field sage: trivial_character(7, Integers(3))(1).parent() Ring of integers", "DirichletGroup(13) True sage: DirichletGroup(13) == DirichletGroup(13, QQ) False \"\"\" from", "Field of order 6 and degree 2 TESTS: We test", "a = a * zeta a._set_multiplicative_order(zeta_order/gcd(zeta_order, i)) w.append(a) else: for", "-1) ((zeta6,), (zeta6,), -zeta6 + 3) ((zeta6,), (zeta6 - 1,),", "``zeta`` is specified:: sage: G.gens() Traceback (most recent call last):", "= DirichletGroup(13, CC) sage: e = G.0 sage: f =", "mapping 31 |--> 1, 41 |--> -1, 37 |--> 1", "m.coprime_integers(m): e = rings.Mod(c, m) g += self(c) * zeta**int(a*e", "must be positive\") G = DirichletGroup(d, rings.RationalField()) return G([kronecker(u.lift(),d) for", "H = DirichletGroup.create_object(None, l); H Group of Dirichlet characters modulo", "size 10000000000000000000000000000000000000121 Note that the root of unity has small", "sage: chi = G([z^2]) sage: chi.gauss_sum() zeta52^22 + zeta52^21 +", "sage: a = a.primitive_character() sage: L = a.lfunction(algorithm='lcalc'); L L-function", "G.element_class(G, x, check=False) def __copy__(self): \"\"\" Return a (shallow) copy", ":: sage: G = DirichletGroup(13) sage: e = G.0 sage:", "-1 sage: [e.is_even() for e in G] [True, False, True,", "NumberField(x^4 + 1) sage: DirichletGroup(5, K) Group of Dirichlet characters", "return (base_ring, modulus, zeta, zeta_order) def create_object(self, version, key, **extra_args):", "of conductor 5 mapping 31 |--> 1, 41 |--> 1,", "D.change_ring(f) Group of Dirichlet characters modulo 5 with values in", "self.parent() != char.parent(): raise NotImplementedError(\"Characters must be from the same", "return G.element_class(G, self.values_on_gens(), check=False) def __pow__(self, n): \"\"\" Return self", "if not (isinstance(R, Map) or R.has_coerce_map_from(self.base_ring())): raise TypeError(\"no coercion map", "|--> -1] Next we construct the group of Dirichlet character", "complex number with prec bits of precision. INPUT: - ``prec``", "not base_ring.is_integral_domain(): raise ValueError(\"base ring (= %s) must be an", "sage: latex(DirichletGroup(2)[0]) \\hbox{Dirichlet character modulo } 2 \\hbox{ of conductor", "e in D: if e.modulus() % 2 == 0: if", "mapping 2 |--> zeta6] sage: (DirichletGroup(36).0).decomposition() [Dirichlet character modulo 4", "GNU General Public License as published by # the Free", "order than expected (:trac:`6018`):: sage: G = DirichletGroup(10, QQ).base_extend(CyclotomicField(4)) sage:", "Another example:: sage: G = DirichletGroup(13) sage: G.galois_orbits() [ [Dirichlet", "- zeta52^16 + zeta52^15 + zeta52^14 + zeta52^12 - zeta52^11", "conductor 7 mapping 3 |--> -1/2*b*a + 1/2 \"\"\" R", "Return a random element of self. The element is computed", "the multiplication to take place. :: sage: G(d[0])*G(d[1]) == c", "- 3.80422606518061*I \"\"\" G = self.parent() K = G.base_ring() if", "a cyclotomic field, QQ, QQbar, or a complex field\") zeta", "((-1,), (-1,), 1) ((-1,), (-zeta6,), -2*zeta6 + 3) ((-1,), (-zeta6", "more efficient than computing the value of -1 directly using", "character modulo 5 of conductor 5 mapping 2 |--> zeta4]", "return pari.znconreyexp(G, v).sage() def lmfdb_page(self): r\"\"\" Open the LMFDB web", "Dirichlet characters, miscellaneous fixes - <NAME> (2014-03-06): use UniqueFactory to", "chi = DirichletGroup(5, K)[1] sage: chi(2) i sage: f =", "m = G.modulus() g = 0 L = rings.CyclotomicField(m.lcm(zo)) zeta", "= DirichletGroup(20) sage: a.conductor() 4 sage: b.conductor() 5 sage: (a*b).conductor()", "Vecsmall([2])], [[4], [[1, matrix(0,2)]], Mat(1), [3], [2], [0]], Mat(1)], [1])", "the correct value of the Jacobi sum J(Y, Z). sage:", "(most recent call last): ... TypeError: Unable to coerce zeta4", "conductor equals its modulus. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage:", "L = rings.CyclotomicField(m.lcm(zo)) zeta = L.gen(0) try: self(1) * zeta**(a+b)", "DirichletGroup(24).0 sage: chi._repr_short_() '[-1, 1, 1]' \"\"\" return str(list(self.values_on_gens())) def", "Traceback (most recent call last): ... TypeError: no coercion map", "QQ.\") phi = K.complex_embedding(prec) CC = phi.codomain() g = 0", "`R`, to the map `\\ZZ/N\\ZZ \\to R` obtained by sending", "- 1,), -1) ((1,), (-1,), -1) ((1,), (-zeta6,), -1) ((1,),", "Ring of integers modulo 3 \"\"\" return DirichletGroup(N, base_ring)(1) TrivialCharacter", "integers_mod(self): r\"\"\" Returns the group of integers `\\ZZ/N\\ZZ` where `N`", "J(\\chi, \\psi) = \\sum_{a \\in \\ZZ / N\\ZZ} \\chi(a) \\psi(1-a)", "G.base_ring() if is_ComplexField(K): phi = lambda t : t CC", "by 1, # increase n accordingly, and increase value i", "(and below) if e in seen_so_far: continue orbit = x.galois_orbit(sort=sort)", "zeta ** a g = L(chi(0)) z = L.one() for", "def conductor(self): \"\"\" Computes and returns the conductor of this", "= self.parent() M = P._module if is_ComplexField(P.base_ring()): zeta = P.zeta()", "is R: return self G = self.parent().change_ring(R) return G.element_class(G, [R(x)", "val def __call__(self, m): \"\"\" Return the value of this", "computing all the Bernoulli # numbers up to k, which", "0.866025403784439*I in Complex Field with 53 bits of precision If", "-1 for 2-power modulus elif (euler_phi(e.parent().modulus()) / e.order()) % 2:", "base_ring is None: if not (zeta is None and zeta_order", "\"\"\" one = self.base_ring().one() return [x for x in range(self.modulus())", "element is not None: self.element.set_cache(element) class DirichletGroupFactory(UniqueFactory): r\"\"\" Construct a", "sage: c.extend(20) Dirichlet character modulo 20 of conductor 4 mapping", "bug in the cPickle module -- # see modsym/manin_symbols.py. G", "11 And sums where exactly one character is nontrivial (see", "g.jacobi_sum(g) 11 sage: sum([g(x)*g(1-x) for x in IntegerModRing(N)]) 11 And", "This is defined as .. MATH:: J(\\chi, \\psi) = \\sum_{a", "solely because of a bug in the cPickle module --", "TESTS:: sage: DirichletGroup.create_key(60) (Cyclotomic Field of order 4 and degree", "conductor 5 mapping 2 |--> -zeta4] \"\"\" return self._list_from_iterator() def", "sage: e = DirichletGroup(21, base_ring=GF(3)).gen(0) ; e.values() [0, 1, 2,", "of unity:: sage: DirichletGroup(5, K, zeta=-1, zeta_order=2) Group of Dirichlet", "sage: DirichletGroup(2)[0] Dirichlet character modulo 2 of conductor 1 \"\"\"", "(optional) root of unity in ``base_ring`` - ``zeta_order`` -- (optional)", "smallest p**r such that # Order(x) divides EulerPhi(p**r) = p**(r-1)*(p-1).", "the number of elements of this Dirichlet group. This is", "|--> zeta4 sage: G.gen(2) Traceback (most recent call last): ...", "unity in ``base_ring`` - ``zeta_order`` -- (optional) positive integer; this", "of the values of ``self`` on the standard generators of", "G = self.parent() if G.zeta.is_in_cache(): x = -self.element() else: x", "only implemented when the base ring is a number field.", "self.parent().modulus() if mod == 1: return [R.one()] elif mod ==", "(isinstance(R, number_field.NumberField_generic) and euler_phi(self.order()) < R.absolute_degree()): K = rings.CyclotomicField(self.order()) else:", "key, **extra_args): \"\"\" Create the object from the key (extra", "This method performs an exact calculation and returns an element", "DirichletGroup.create_key(2, base_ring=QQ); k (Rational Field, 2, None, None) sage: l", "|--> 1, 17 |--> zeta4) \"\"\" g = [] ord", "sage.modular.dirichlet import is_DirichletCharacter sage: is_DirichletCharacter(trivial_character(3)) True sage: is_DirichletCharacter([1]) False \"\"\"", "over as small a subfield (or subring) of the base", "\"\"\" return ~self def bernoulli(self, k, algorithm='recurrence', cache=True, **opts): r\"\"\"", "= rings.CyclotomicField(n) zeta = L.gen(0) ** (n // m) else:", "number `B_{k,\\varepsilon}`, as defined by the following identity of power", "= Integers(9)(2)).0 sage: chi.galois_orbit() Traceback (most recent call last): ...", "for `V` can be found. - If ``zeta`` is specified,", "character modulo 13 of conductor 13 mapping 2 |--> -1", "None, None) sage: l = l[1:]; l (2, None, None)", "modulo {}) must have additive orders dividing {}, respectively\" .format(x,", "[True, False, True, False, True, False, True, False, True, False,", "a Dirichlet group over a large prime field:: sage: p", "G True We compute a Dirichlet group over a large", "if n == m: return self K = rings.CyclotomicField(m) return", "- 1, zeta10^2] TESTS: Test that :trac:`11783` and :trac:`14368` are", "mapping 11 |--> 1, 17 |--> zeta4 We next compute", "the following: - tuple or list of ring elements: the", "with defining polynomial x^4 + 1 :: sage: G.<e> =", "exponent of `(\\ZZ/N\\ZZ)^*`:: sage: DirichletGroup(20) Group of Dirichlet characters modulo", "character modulo 30 of conductor 5 mapping 11 |--> 1,", "recent call last): ... TypeError: cannot convert 0 to an", "11 |--> -1, 17 |--> -1] Next we construct the", "work # pari_orders = G.cyc() # pari_gens = G.gen() values_on_gens", "currently only implemented when the base ring is a number", "self. EXAMPLES:: sage: e = DirichletGroup(100).0; e Dirichlet character modulo", "will be a \"factor\" of `(\\ZZ/2\\ZZ)^*`, which is the trivial", "sage: G = DirichletGroup(5, K, a); G Group of Dirichlet", "sum([self(x) * char(1-x) for x in rings.IntegerModRing(self.modulus())]) def kloosterman_sum(self, a=1,", "G.sort() return G def gen(self, n=0): \"\"\" Return the n-th", "= DirichletGroup(60) sage: e = prod(G.gens(), G(1)) sage: e Dirichlet", "This is the group of homomorphisms `(\\ZZ/N\\ZZ)^* \\to V` with", "= K.one()/2 if k == 1 else K(bernoulli(k)) elif self(-1)", "sage: e(-1) -1 sage: e(2) 0 sage: e(7) -zeta4 sage:", "ring over a number field. prec = k+2 R =", "DirichletGroup(M, self.base_ring()) return H(self) @cached_method def values(self): \"\"\" Return a", "<NAME> (2006-01-07): added more examples - <NAME> (2006-05-21): added examples", "ring is # finite, and hence this Dirichlet group is", "return prod([d.conductor() for d in self.decomposition()]) p = F[0][0] #", "This shows that :trac:`6393` has been fixed:: sage: G =", "of self. EXAMPLES:: sage: G = DirichletGroup(20) sage: G.gens() (Dirichlet", "= QuadraticField(-1) sage: f = K.complex_embeddings()[0] sage: D = DirichletGroup(5,", "`N`. This function returns the generalized Bernoulli number `B_{k,\\varepsilon}`, as", "[e.is_even() for e in G] [True, False, True, False, True,", "= DirichletGroup(5, Zmod(15)); G Group of Dirichlet characters modulo 5", "that both are ``None``. In the former case, it also", "TESTS: We test the case where `R` is a map", "else: G.append(orbit) for z in orbit: seen_so_far.add(tuple(z.element())) G = Sequence(G,", "values in the group of order 4 generated by 7", "The modulus of this character. EXAMPLES:: sage: e = DirichletGroup(100,", "self.modulus() % 4 == 2: # 0 factors at 2.", "r4.residue_field(r4.ideal(29).factor()[0][0])(val) * GF(29)(3) 22 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3 22 sage:", "<NAME> <<EMAIL>> # Copyright (C) 2014 <NAME> <<EMAIL>> # #", "(see :trac:`17338`):: sage: DirichletGroup(1)[0] Dirichlet character modulo 1 of conductor", "int(n) g = self.gens() if n<0 or n>=len(g): raise IndexError(\"n(=%s)", "a Dirichlet character with specified values on generators of `(\\ZZ/n\\ZZ)^*`.", "|--> -1, 14266017175 |--> 1 AUTHORS: - <NAME> (2006-08-06) \"\"\"", "QQ).0 sage: f = e.change_ring(QuadraticField(3, 'a')) sage: f.parent() Group of", "Dirichlet character modulo 9 of conductor 1 mapping 2 |-->", "sage: G = DirichletGroup(7,QQ); G Group of Dirichlet characters modulo", "exists solely because of a bug in the cPickle module", "recent call last): ... NotImplementedError: Automorphisms for finite non-field base", "to represent Dirichlet characters. TESTS:: sage: DirichletGroup(12)._module Vector space of", "x^4 + 1 sage: G.list() [Dirichlet character modulo 5 of", "is potentially much more efficient than computing the value of", "..., [Dirichlet character modulo 20 of conductor 1 mapping 11", "= DirichletGroup(13) sage: G Group of Dirichlet characters modulo 13", "DirichletGroup(7, QQbar) sage: G[1].gauss_sum() -2.440133358345538? + 1.022618791871794?*I Check that :trac:`19060`", "be a part of the key, the keys would compare", "G = DirichletGroup(7, QQbar) sage: G[1].gauss_sum_numerical() -2.44013335834554 + 1.02261879187179*I \"\"\"", "doctest Dirichlet character modulo 3 of conductor 3 mapping 2", "degree 2 If the order of ``zeta`` cannot be determined", "of elements of self - ``reps_only`` - (optional: default False)", "c.extend(20) == a True \"\"\" if M % self.modulus() !=", "Sequence(G, cr=True) if sort: G.sort() return G def gen(self, n=0):", "13533432536 |--> -1, 22369178537 |--> -1, 14266017175 |--> 1 AUTHORS:", "``zeta`` is not specified but ``zeta_order`` is, then `V` is", "= x.galois_orbit(sort=sort) if reps_only: G.append(x) else: G.append(orbit) for z in", "conductor 4 mapping 7 |--> -1, 5 |--> 1, Dirichlet", "of unity has small order, i.e., it is not the", "is fixed:: sage: DirichletGroup(1)[0].bernoulli(1) 1/2 \"\"\" if cache: try: self.__bernoulli", "DirichletGroup(3) sage: e = G.0 The real component of the", "\\to V` with pointwise multiplication. The group `V` is determined", "G True \"\"\" self._set_element_constructor() if '_zeta_order' in state: state['_zeta_order'] =", "of unity. TESTS:: sage: DirichletGroup(5)._zeta_powers [1, zeta4, -1, -zeta4] \"\"\"", "in Auts] if sort: v.sort() return v def gauss_sum(self, a=1):", "in Cyclotomic Field of order 4 and degree 2 ]", "sage: e.conductor() 4 \"\"\" return self.parent().modulus() def level(self): \"\"\" Synonym", ".format(x, orders)) self.values_on_gens.set_cache(x) else: if free_module_element.is_FreeModuleElement(x): self.element.set_cache(x) else: self.values_on_gens.set_cache(x) @cached_method", "sage: H = DirichletGroup(10, CyclotomicField(4)) sage: G is H True", "\"\"\" Return the multiplicative inverse of self. EXAMPLES:: sage: e", "1, 0, 0, 0, 1, 0, 1, 0, 1, 0,", "= int(M) if self.modulus()%M != 0: raise ValueError(\"M(=%s) must divide", "base ring. This is ignored if ``base_ring`` is not ``None``.", "the characters, either with or without specifying a root of", "Returns the modulus of self. EXAMPLES:: sage: G = DirichletGroup(20)", "+ zeta156^7 + zeta156^6 + zeta156^5 - zeta156^4 - zeta156^2", "(number_field.is_CyclotomicField(K) or is_RationalField(K)): raise NotImplementedError(\"Kloosterman sums only currently implemented when", "zeta = R(zeta) if isinstance(R, Map): R = R.codomain() return", "specified, or that both are ``None``. In the former case,", "sage: G.1.is_even() True Note that ``is_even`` need not be the", "of Dirichlet characters modulo 60 with values in the group", "EXAMPLES:: sage: DirichletGroup(37).zeta() zeta36 sage: DirichletGroup(20).zeta() zeta4 sage: DirichletGroup(60).zeta() zeta4", "is cyclic), and `V` must have order ``zeta_order``. Furthermore, a", "None, None) An example to illustrate that ``base_ring`` is a", "-1, 37 |--> zeta4 sage: e(-1) -1 sage: e(2) 0", "sage: G = DirichletGroup(60, r4) sage: G.gens() (Dirichlet character modulo", "character modulo 7 of conductor 7 mapping 3 |--> -1/2*b*a", "change. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.kernel() [1, 9,", "return isinstance(x, DirichletCharacter) class DirichletCharacter(MultiplicativeGroupElement): \"\"\" A Dirichlet character. \"\"\"", "must be a ring\" % base_ring) # If either zeta", "for some ring `R`, to the map `\\ZZ/N\\ZZ \\to R`", "minimize_base_ring(self): r\"\"\" Return a Dirichlet character that equals this one,", "\"\"\" MultiplicativeGroupElement.__init__(self, parent) if check: orders = parent.integers_mod().unit_group().gens_orders() if len(x)", "and `\\zeta` is a primitive `m^{th}` root of unity. FACTS:", "Dirichlet character .. MATH:: \\chi : (\\ZZ/N\\ZZ)^* \\to \\QQ(\\zeta_m) where", "# character is 1/2, in contrast to the value B_1", "absolute Galois group of the prime subfield of the base", "under the terms of the GNU General Public License as", "it using ``zeta_order``:: sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6)) Traceback (most recent", "= R.one() w = [a] zeta = self.zeta() zeta_order =", "= M([int(round(x.argument() / zeta_argument)) for x in self.values_on_gens()]) else: dlog", "Let `\\varepsilon` be a (not necessarily primitive) character of modulus", "R.codomain() return DirichletGroup(self.modulus(), R, zeta=zeta, zeta_order=zeta_order) def base_extend(self, R): \"\"\"", "if modulus <= 0: raise ValueError('modulus should be positive') if", "2` and the factor of eps at 4 is nontrivial", "= DirichletGroup(20) sage: H.<c> = DirichletGroup(4) sage: c.extend(20) Dirichlet character", "1/2, in contrast to the value B_1 = -1/2. ber", "if they define identical functions on ``ZZ``. EXAMPLES:: sage: e", "implemented when the base ring is a number field. It's", "conductor } 16 \\hbox{ mapping } 15 \\mapsto 1,\\ 5", "|--> 1, Dirichlet character modulo 9 of conductor 1 mapping", "= rings.CyclotomicField(e) if integral: base_ring = base_ring.ring_of_integers() if not is_Ring(base_ring):", "same thing, but requires # no arith in a poly", "DirichletGroup(37).unit_gens() (2,) sage: DirichletGroup(20).unit_gens() (11, 17) sage: DirichletGroup(60).unit_gens() (31, 41,", "for u in G.unit_gens()]) def is_DirichletCharacter(x): r\"\"\" Return True if", "if G.zeta.is_in_cache(): x = n * self.element() else: x =", "EXAMPLES:: sage: G = DirichletGroup(13) sage: e = G.0 sage:", "m = lcm(g,n) if n == m: return self K", "EXAMPLES:: sage: DirichletGroup(20).exponent() 4 sage: DirichletGroup(20,GF(3)).exponent() 2 sage: DirichletGroup(20,GF(2)).exponent() 1", "G = DirichletGroup(100000, CC) sage: G.0.is_odd() True Note that ``is_even``", "e is near zero:: sage: v=e.kloosterman_sum_numerical() sage: v.real() < 1.0e15", "17 |--> 1, Dirichlet character modulo 20 of conductor 4", "+= phi(c)*z return g def jacobi_sum(self, char, check=True): r\"\"\" Return", "t : t CC = K elif is_AlgebraicField(K): from sage.rings.complex_mpfr", "zeta w.append(a) return w @property def _zeta_dlog(self): \"\"\" Return a", "is multiplicative 20 sage: e.multiplicative_order() 20 sage: e = DirichletGroup(100).0", "sage: G.gens() (Dirichlet character modulo 5 of conductor 5 mapping", "sage: G(1) Dirichlet character modulo 13 of conductor 1 mapping", "TypeError(\"Galois orbits only defined if base ring is an integral", "of order %s generated by %s in \" % (self._zeta_order,", "the generalized Bernoulli number `B_{k,eps}`. INPUT: - ``k`` -- a", "character of modulus `N`. This function returns the generalized Bernoulli", "13 of conductor 13 mapping 2 |--> zeta12^2 sage: e.galois_orbit()", "DirichletGroup(20) sage: a.kernel() [1, 9, 13, 17] sage: b.kernel() [1,", "True if x is of type DirichletCharacter. EXAMPLES:: sage: from", "f = DirichletGroup(17, ZZ, zeta=-1).0 sage: g = f.base_extend(Integers(15)) sage:", "zeta12^2 sage: e.order() 12 sage: loads(e.dumps()) == e True TESTS::", "CyclotomicField(10)).0 sage: ls = chi.values() ; ls[0:10] [0, 1, -zeta10^3,", "zeta52^12 - zeta52^11 - zeta52^10 - zeta52^7 - zeta52^5 +", "have additive orders dividing (2, 16, 2), respectively \"\"\" MultiplicativeGroupElement.__init__(self,", "G[1].gauss_sum_numerical() -2.44013335834554 + 1.02261879187179*I \"\"\" G = self.parent() K =", "by 0.500000000000000 + 0.866025403784439*I in Complex Field with 53 bits", "} 1 \\hbox{ of conductor } 1 sage: latex(DirichletGroup(2)[0]) \\hbox{Dirichlet", "6 and degree 2 sage: (e^3).minimize_base_ring().base_ring() Cyclotomic Field of order", "4 sage: (e^2).minimize_base_ring().base_ring() Cyclotomic Field of order 6 and degree", "1, 36, 0, 1, 0, 0, 1, 36, 0, 1,", "of generators for the group, are only implemented if `V`", "and ``zeta_order`` are omitted, then `V` is taken to be", "that the orders of the elements in `x` are admissible", "the base ring. EXAMPLES:: sage: DirichletGroup(37).zeta() zeta36 sage: DirichletGroup(20).zeta() zeta4", "13 sage: chi^2 Dirichlet character modulo 5 of conductor 5", "self.__bernoulli = {} if k in self.__bernoulli: return self.__bernoulli[k] N", "DirichletGroup(11) sage: b = copy(a) sage: a is b False", "= G.0 sage: f = H.0 sage: e.gauss_sum_numerical() -3.07497205... +", "modulus:: sage: N = 9 sage: D = DirichletGroup(N) sage:", "x[0]*x[1]; e Dirichlet character modulo 35 of conductor 35 mapping", "e(37) zeta4 sage: e(31*37) -zeta4 sage: parent(e(31*37)) Cyclotomic Field of", "e = DirichletGroup(100).0; e Dirichlet character modulo 100 of conductor", "TESTS:: sage: DirichletGroup(5)._zeta_powers [1, zeta4, -1, -zeta4] \"\"\" R =", "0, -zeta4, 0, 0, 0, zeta4, 0, -1, 0, 1,", "d == 0: raise ValueError(\"d must be nonzero\") D =", "of the respective generators of `(\\ZZ/N\\ZZ)^*`. OUTPUT: The Dirichlet character", "*coercion* map from the base ring of ``self``, or a", "domain EXAMPLES:: sage: G = DirichletGroup(7,QQ); G Group of Dirichlet", "or -1 if not R.is_exact(): return abs(self(-1) - R(1)) <", "sage: is_DirichletGroup(DirichletGroup(11)) True sage: is_DirichletGroup(11) False sage: is_DirichletGroup(DirichletGroup(11).0) False \"\"\"", "*= g**n return e def unit_gens(self): r\"\"\" Returns the minimal", "sage: G = DirichletGroup(13) sage: e = G.0 sage: e.bernoulli(5)", "= CC.zeta(G.modulus()) ** a g = phi(self(0)) z = CC.one()", "31 |--> -1, 41 |--> 1, 37 |--> 1, Dirichlet", "coefficients sage: L.value(4) # abs tol 1e-14 0.988944551741105 - 5.16608739123418e-18*I", ":func:`sage.rings.padics.misc.gauss_sum` for a `p`-adic version \"\"\" G = self.parent() K", "than expected (:trac:`6018`):: sage: G = DirichletGroup(10, QQ).base_extend(CyclotomicField(4)) sage: H", "Field, 2, None, None) sage: l = DirichletGroup.create_key(2, base_ring=CC); l", "+ zeta52^21 + zeta52^19 - zeta52^16 + zeta52^15 + zeta52^14", "self.zeta_order() @cached_method def _automorphisms(self): \"\"\" Compute the automorphisms of self.", "not implemented sage: G = DirichletGroup(5, Zmod(15), zeta=2); G Group", "\"\"\" Return the quadratic Dirichlet character (./d) of conductor d,", "that ``zeta`` is an element of ``base_ring`` and that ``zeta_order``", "both cases, the orders of the elements must divide the", "53 bits of precision sage: G == H False If", "QQbar, or a complex field\") zeta = zeta ** a", "CC = K elif is_AlgebraicField(K): from sage.rings.complex_mpfr import ComplexField CC", "= 9 sage: D = DirichletGroup(N) sage: g = D(1)", "sage: G.gen(2) Traceback (most recent call last): ... IndexError: n(=2)", "ALGORITHM: The ``'recurrence'`` algorithm computes generalized Bernoulli numbers via classical", "that ``is_even`` need not be the negation of is_odd, e.g.,", "= DirichletGroup(7)([-1]) sage: k == e False \"\"\" return richcmp(self.values_on_gens(),", "calculated correctly:: sage: N = 13 sage: D = DirichletGroup(N)", "this is useful if the base ring is not exact", "DirichletGroup_class(base_ring, modulus, zeta, zeta_order) DirichletGroup = DirichletGroupFactory(\"DirichletGroup\") def is_DirichletGroup(x): \"\"\"", "orders dividing (2, 16, 2), respectively sage: from sage.modular.dirichlet import", "((1,), (-1,), -1) ((1,), (-zeta6,), -1) ((1,), (-zeta6 + 1,),", "for g in self.gens(): ord *= int(g.order()) return ord def", "of conductor 4 mapping 31 |--> -1, 41 |--> 1,", "ensures that either both ``zeta`` and ``zeta_order`` are specified, or", "|--> -1 sage: f.modulus() 4 \"\"\" return self.restrict(self.conductor()) def restrict(self,", "DirichletGroup(5, K) sage: D.change_ring(f) Group of Dirichlet characters modulo 5", "of size 5 ] \"\"\" R = self.base_ring() return Sequence([DirichletGroup(p**r,R)", "mod 22. while x.modulus().gcd(v) != 1: v += self.modulus() a.append(R(x(v)))", "r'\\hbox{Dirichlet character modulo } %s \\hbox{ of conductor } %s'", "5 mapping 2 |--> zeta4, Dirichlet character modulo 5 of", "e True sage: f == f True sage: e ==", "= chi.minimize_base_ring() n = lcm(m, G.zeta_order()) L = rings.CyclotomicField(n) zeta", "so r is the mult order of p modulo n.", "-zeta4, 0, 0, 0, zeta4, 0, -1] sage: e =", "Dirichlet characters modulo 4 with values in Finite Field of", "same as len(self). EXAMPLES:: sage: DirichletGroup(20).order() 8 sage: DirichletGroup(37).order() 36", "such that the # divisibility holds equals Valuation(Order(x),p)+1. cond =", "G5.gen(0).base_extend(K30) Dirichlet character modulo 31 of conductor 31 mapping 3", "order 10 and degree 4 sage: G = DirichletGroup(11, RationalField())", ".. warning:: A table of values of the character is", "P.zeta_order() v = [(vi * oi) // m for vi,", "False, True, False, True] sage: G = DirichletGroup(100000, CC) sage:", "zeta = self.zeta() zeta_order = self.zeta_order() if is_ComplexField(R): for i", "check: orders = parent.integers_mod().unit_group().gens_orders() if len(x) != len(orders): raise ValueError(\"wrong", "self.base_ring()(1) for e in D: if e.modulus() % 2 ==", "better since it computes the same thing, but requires #", "sage: D.change_ring(f) Group of Dirichlet characters modulo 5 with values", "G = pari.znstar(self.modulus(), 1) pari_orders = G[1][1] pari_gens = G[1][2]", "character modulo 37 of conductor 37 mapping 2 |--> zeta36^4", "Z=X[1] sage: Y.jacobi_sum(Z) -1 sage: Z.jacobi_sum(Y) -1 Now let's take", "M of the modulus, which must also be a multiple", "True, False, True, False, True, False, True, False] sage: G", "e = DirichletGroup(7, QQ).0 sage: f = e.change_ring(QuadraticField(3, 'a')) sage:", "|--> 1, 17 |--> 1 sage: b^2 Dirichlet character modulo", "\"\"\" # **************************************************************************** # Copyright (C) 2004-2006 <NAME> <<EMAIL>> #", "K((-1)**k): ber = K.zero() elif algorithm == \"recurrence\": # The", "sage: G = DirichletGroup(100000, CC) sage: G.0.is_odd() True Note that", "4 sage: eps.bernoulli(3, algorithm=\"definition\") 10*zeta6 + 4 TESTS: Check that", "sage: chi = DirichletGroup(100151, CyclotomicField(10)).0 sage: ls = chi.values() ;", "generally slower). INPUT: - ``prec`` -- integer (default: 53), *bits*", "of precision. See also :meth:`.kloosterman_sum`, which calculates the sum exactly", "mapping 2 |--> 13 sage: chi^2 Dirichlet character modulo 5", "ValueError('modulus should be positive') if base_ring is None: if not", "rational We test the case where `R` is a map", "sage: [e.is_even() for e in G] [True, False, True, False,", "instead of calls to the Bernoulli function. Likewise # computing", "primitive `m^{th}` root of unity. FACTS: If the modulus is", "60, None, None) An example to illustrate that ``base_ring`` is", "sage: g.parent().zeta() 14 \"\"\" if not (isinstance(R, Map) or R.has_coerce_map_from(self.base_ring())):", "'pari': from sage.lfunctions.pari import lfun_character, LFunction Z = LFunction(lfun_character(self), prec=prec)", "orbits and the orbits themselves (slightly faster if False). -", "``zeta`` is very large. - If ``zeta`` is not specified", "we need to set the cache of element() from that", "of 2 present some problems as the multiplicative group modulo", "where the power is between 0 and the order of", "`\\varepsilon(-1) = -1`. EXAMPLES:: sage: G = DirichletGroup(13) sage: e", "conductor 13 mapping 2 |--> -zeta12] sage: e = G.0^2;", "= DirichletGroup(20) sage: a.is_primitive() False sage: b.is_primitive() False sage: (a*b).is_primitive()", "(most recent call last): ... TypeError: conductor must divide modulus", "*= -1 return val def __call__(self, m): \"\"\" Return the", "character modulo 60 of conductor 3 mapping 31 |--> 1,", "= G.0 sage: e.is_even() False sage: e(-1) -1 sage: [e.is_even()", "17 |--> 1' TESTS: Dirichlet characters modulo 1 and 2", "G] [False, True, False, True, False, True, False, True, False,", "DirichletGroup(10, QQ).base_extend(CyclotomicField(4)) sage: H = DirichletGroup(10, CyclotomicField(4)) sage: G is", "\"\"\" if self.base_ring() is R: return self G = self.parent().change_ring(R)", "1: g = 2 z = self.base_ring().zeta() n = z.multiplicative_order()", "`N` with values in a subgroup `V` of the multiplicative", "of modulus `N`. This function returns the generalized Bernoulli number", "`n` and the exponent of `(\\ZZ/N\\ZZ)^*`. EXAMPLES:: sage: G.<a,b> =", "7 sage: DP = DirichletGroup(p) sage: f = DP.0 sage:", "the restriction of this character to a Dirichlet character modulo", "a); G Group of Dirichlet characters modulo 5 with values", "2*zeta6 - 3) ((-zeta6,), (-zeta6,), 3*zeta6 - 1) ((-zeta6,), (-zeta6", "character. EXAMPLES:: sage: e = DirichletGroup(5).0 sage: e Dirichlet character", "self.change_ring(R) def _element_constructor_(self, x): \"\"\" Construct a Dirichlet character from", ".. MATH:: g_a(\\chi) = \\sum_{r \\in \\ZZ/m\\ZZ} \\chi(r)\\,\\zeta^{ar}, where `m`", "character is nontrivial (see :trac:`6393`):: sage: G = DirichletGroup(5); X=G.list();", "raise ValueError(\"conductor(=%s) must divide M(=%s)\"%(self.conductor(),M)) H = DirichletGroup(M, self.base_ring()) return", "DirichletGroup(1)[0] Dirichlet character modulo 1 of conductor 1 sage: DirichletGroup(2)[0]", "x in pari_gens) # now compute the input for pari", "in [Coh2007]_, Section 9.4.1. EXAMPLES:: sage: G = DirichletGroup(13) sage:", "with defining polynomial x^2 - 3 with a = 1.732050807568878?", "[int(x.argument() / zeta_argument) for x in values_on_gens] else: dlog =", "if this is called OUTPUT: Let `\\varepsilon` be a (not", "M([int(round(x.argument() / zeta_argument)) for x in self.values_on_gens()]) else: dlog =", "field `\\QQ(\\zeta_n)`, where `n` is the exponent of `(\\ZZ/N\\ZZ)^*`) -", "3.80422606518061 - 3.80422606518061*I \"\"\" G = self.parent() K = G.base_ring()", "= DirichletGroup(13) sage: H = DirichletGroup(13, CC) sage: e =", "3 |--> -zeta30^7 + zeta30^5 + zeta30^4 + zeta30^3 -", "self).__setstate__(state) @property def _module(self): \"\"\" Return the free module used", "and 1 :: sage: G.gen(-1) Traceback (most recent call last):", "of conductor 13 mapping 2 |--> -1 sage: G([K.0]) Dirichlet", "Dirichlet character (./d) of conductor d, for d0. EXAMPLES:: sage:", "also restrict the order of the characters, either with or", "the key, the keys would compare equal and the caching", "4 mapping 3 |--> -1 sage: f.modulus() 4 \"\"\" return", "given, compute the other. if zeta is not None: zeta", "must be a domain (so `V` is cyclic), and `V`", "same Dirichlet Group.\") return sum([self(x) * char(1-x) for x in", "modulus, zeta, zeta_order = key return DirichletGroup_class(base_ring, modulus, zeta, zeta_order)", "3.60555127546... sage: sqrt(13.0) 3.60555127546399 TESTS: The field of algebraic numbers", "- 4*E(5)^3 - 2*E(5)^4 sage: G = DirichletGroup(12, QQbar) sage:", "|--> 1 sage: b^2 Dirichlet character modulo 20 of conductor", "e.values() [0, 1, 0, -1, 0, 0, 0, -1, 0,", "using dlog and a large power of the image root", "sage: G.gens() (Dirichlet character modulo 20 of conductor 4 mapping", "n<0 or n>=len(g): raise IndexError(\"n(=%s) must be between 0 and", "17 |--> -1], ..., [Dirichlet character modulo 20 of conductor", "modulus of self. EXAMPLES:: sage: DirichletGroup(37).unit_gens() (2,) sage: DirichletGroup(20).unit_gens() (11,", "sage: G = DirichletGroup(20) sage: e = G.1 sage: e.kloosterman_sum_numerical(53,3,11)", "of conductor 5 mapping 2 |--> -zeta4] \"\"\" return self._list_from_iterator()", "zeta or zeta_order is given, compute the other. if zeta", "13 of conductor 13 mapping 2 |--> -zeta12] sage: e", "NOTE:: The constructor of :class:`DirichletCharacter` sets the cache of :meth:`element`", "= CC.coerce_map_from(K) elif number_field.is_CyclotomicField(K) or is_RationalField(K): phi = K.complex_embedding(prec) CC", "for the characters in this group (default: the cyclotomic field", "z = zero.__copy__() z[i] = ord//gcd(ord, orders[i]) g.append(self.element_class(self, z, check=False))", "QQ)) 4 sage: len(DirichletGroup(20, GF(5))) 8 sage: len(DirichletGroup(20, GF(2))) 1", "be a multiple of the modulus(=%s)\"%(M,self.modulus())) H = DirichletGroup(M, self.base_ring())", "sage.misc.fast_methods import WithEqualityById from sage.structure.element import MultiplicativeGroupElement from sage.structure.gens_py import", "self).__setstate__(state) if values_on_gens is not None: self.values_on_gens.set_cache(values_on_gens) if element is", "chi.change_ring(f) sage: psi(2) -1.83697019872103e-16 - 1.00000000000000*I \"\"\" if self.base_ring() is", "base ring is a cyclotomic field or QQ.\") phi =", "v = [dlog[x] for x in values_on_gens] m = P.zeta_order()", "the multiplicative inverse of self. EXAMPLES:: sage: e = DirichletGroup(13).0", "= 0 i += 1 @cached_method(do_pickle=True) def values_on_gens(self): r\"\"\" Return", "group of order 2 generated by -1 in Number Field", "order = self._zeta_order if order is None: order = self.zeta().multiplicative_order()", "with a = 1.732050807568878? :: sage: e = DirichletGroup(13).0 sage:", "4 and degree 2' We can multiply if we're explicit", "-- ignored (needed so ``G.<...> = DirichletGroup(...)`` notation works) -", "of order dividing ``zeta_order`` in `R`. In this case, `R`", "Integers(15), zeta=7); G Group of Dirichlet characters modulo 17 with", "zeta12^2, Dirichlet character modulo 13 of conductor 13 mapping 2", "ValueError: values (= (4, 8, 8) modulo 16) must have", "if and only if `p = 2` and the factor", "0, 1, 0, 1, 0, 1, 0, 1, 0, 0,", "do not silently get roots of unity with smaller order", "between 0 and %s\"%(n,len(g)-1)) return g[n] @cached_method def gens(self): \"\"\"", "G = self.parent() K = G.base_ring() if not (number_field.is_CyclotomicField(K) or", "given, it must be the multiplicative order of ``zeta``; this", "if R.is_exact() and any(u**v != 1 for u, v in", "degree 2 sage: d[1].parent() Group of Dirichlet characters modulo 5", "0, 1, 0, 0, 0, 1, 0, 1] sage: e", "11 |--> -1, 17 |--> -1], ..., [Dirichlet character modulo", "TrivialCharacter = trivial_character def kronecker_character(d): \"\"\" Return the quadratic Dirichlet", "== \"definition\": # This is better since it computes the", "Group of Dirichlet characters modulo 5 with values in the", "= DirichletGroup(20, QQ); G Group of Dirichlet characters modulo 20", "is_Ring from sage.misc.functional import round from sage.misc.cachefunc import cached_method from", "= DirichletGroup(20) sage: G.1 Dirichlet character modulo 20 of conductor", "base ring. EXAMPLES:: sage: DirichletGroup(37).zeta() zeta36 sage: DirichletGroup(20).zeta() zeta4 sage:", "return g def gauss_sum_numerical(self, prec=53, a=1): r\"\"\" Return a Gauss", "*= zeta g += phi(c)*z return g def jacobi_sum(self, char,", "is_ComplexField(K): return self.gauss_sum_numerical(a=a) elif is_AlgebraicField(K): L = K zeta =", "sage: chi.conrey_number() 5 sage: chi = DirichletGroup(60)([1,-1,I]) sage: chi.conrey_number() 17", "`(\\ZZ/N\\ZZ)^*`. OUTPUT: The Dirichlet character defined by `x` (type :class:`DirichletCharacter`).", "or a ring homomorphism with the base ring of ``self``", "it computes the same thing, but requires # no arith", "list of values on each unit generator return self.element_class(self, x)", "with values in Cyclotomic Field of order 12 and degree", "exponents) P = self.parent() if is_ComplexField(P.base_ring()): zeta = P.zeta() zeta_argument", "NotImplementedError(\"Characters must be from the same Dirichlet Group.\") return sum([self(x)", "Software Foundation, either version 2 of the License, or #", "\"\"\" Group of Dirichlet characters modulo `N` with values in", "15] sage: DirichletGroup(17, GF(11^4, 'a'))._automorphisms() [1, 11, 121, 1331] sage:", "sage: H = DirichletGroup.create_object(None, l); H Group of Dirichlet characters", "ring is not exact or if the order of ``zeta``", "] sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5)).galois_orbits() Traceback (most recent call last):", "v into self. The Galois group is the absolute Galois", "the negation of is_odd, e.g., in characteristic 2:: sage: G.<e>", "= rings.Integer(zeta_order) zeta = base_ring.zeta(zeta_order) return (base_ring, modulus, zeta, zeta_order)", "with values in `\\QQ(\\zeta_n)`:: sage: G = DirichletGroup(20) sage: G.1", "== \"recurrence\": # The following code is pretty fast, at", "False). - ``check`` - (optional, default: True) whether or not", "made the first time you call this (unless `m` equals", "state[1] if values_on_gens_key in state_dict: values_on_gens = state_dict[values_on_gens_key] del state_dict[values_on_gens_key]", "sage: l = DirichletGroup.create_key(2, base_ring=CC); l (Complex Field with 53", "Ring is defined Base-extended Dirichlet groups do not silently get", "o for m in P._automorphisms()]) v = [P.element_class(P, m *", "return w @property def _zeta_dlog(self): \"\"\" Return a dictionary that", "PARI L-function or around the ``lcalc`` program. INPUT: - ``prec``", "with values in Finite Field of size 10000000000000000000000000000000000000121 Note that", "complex Dirichlet coefficients sage: L.value(4) # abs tol 1e-14 0.988944551741105", "EXAMPLES:: sage: chi = DirichletGroup(24).0 sage: chi._repr_short_() '[-1, 1, 1]'", "2 |--> zeta6] sage: (DirichletGroup(36).0).decomposition() [Dirichlet character modulo 4 of", "nonzero\") D = fundamental_discriminant(d) G = DirichletGroup(abs(D), rings.RationalField()) return G([kronecker(D,u)", "a prime `p` and the character is nontrivial, then the", "L-function associated to Dirichlet character modulo 20 of conductor 4", "] \"\"\" R = self.base_ring() return Sequence([DirichletGroup(p**r,R) for p, r", "of values (= {}) on generators (want {})\".format(x, len(orders))) if", "self. EXAMPLES:: sage: G = DirichletGroup(20) sage: G.ngens() 2 \"\"\"", "and any(u**v != 1 for u, v in zip(x, orders)):", "-1 sage: G(DirichletGroup(15).1) Traceback (most recent call last): ... TypeError:", "factors at 2. vals = [1] + vals return [D[i](vals[i])", "G = DirichletGroup(20) sage: e = G.1 sage: e.kloosterman_sum_numerical(53,3,11) 3.80422606518061", "for x in rings.IntegerModRing(self.modulus())]) def kloosterman_sum(self, a=1, b=0): r\"\"\" Return", "4 \"\"\" return self.parent().modulus() def level(self): \"\"\" Synonym for modulus.", "!= 0: s += ' mapping ' for i in", "trivial_character(3) == trivial_character(9) False sage: trivial_character(3) == DirichletGroup(3, QQ).0^2 True", "v = M([int(round(x.argument() / zeta_argument)) for x in self.values_on_gens()]) else:", "x = G.gens() sage: e = x[0]*x[1]^2; e Dirichlet character", "the # divisibility holds equals Valuation(Order(x),p)+1. cond = p**(valuation(self.order(),p) +", "trivial_character(9) False sage: trivial_character(3) == DirichletGroup(3, QQ).0^2 True \"\"\" return", "+1 or -1 if not R.is_exact(): return abs(self(-1) - R(1))", "!= 0: raise ValueError(\"conductor(=%s) must divide M(=%s)\"%(self.conductor(),M)) H = DirichletGroup(M,", "EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a Dirichlet character modulo", "a Dirichlet character. This function returns an equal Dirichlet character", "b # indirect doctest Dirichlet character modulo 3 of conductor", ".. note:: This function is currently only implemented when the", "s += self.parent().unit_gens()[i]._latex_() + r' \\mapsto ' + self.values_on_gens()[i]._latex_() return", "numbers via classical Bernoulli numbers using the formula in [Coh2007]_,", "= p**(valuation(self.order(),p) + 1) if p == 2 and F[0][1]", "sage: DirichletGroup(5)._zeta_powers [1, zeta4, -1, -zeta4] \"\"\" R = self.base_ring()", "sage: a.element() is b.element() False sage: a.values_on_gens() is b.values_on_gens() True", "13 of conductor 1 mapping 2 |--> 1], ..., [Dirichlet", "only ``zeta_order`` is specified:: sage: DirichletGroup(17, Integers(15)) Group of Dirichlet", "True \"\"\" return (self.conductor() == self.modulus()) @cached_method def is_trivial(self): r\"\"\"", "the exponent of `(\\ZZ/N\\ZZ)^*`:: sage: DirichletGroup(20) Group of Dirichlet characters", "rings.Integer(N) if modulus <= 0: raise ValueError('modulus should be positive')", "def modulus(self): \"\"\" Returns the modulus of self. EXAMPLES:: sage:", "notation works) - ``integral`` -- boolean (default: ``False``); whether to", "the minimal generators for the units of `(\\ZZ/N\\ZZ)^*`, where `N`", "obtained by sending those `x\\in\\ZZ/N\\ZZ` with `\\gcd(N,x)>1` to `0`. EXAMPLES::", "-- 'pari' (default) or 'lcalc' EXAMPLES:: sage: G.<a,b> = DirichletGroup(20)", "either both ``zeta`` and ``zeta_order`` are specified, or that both", "(most recent call last): ... ValueError: base ring (= Ring", "2 |--> zeta4, Dirichlet character modulo 5 of conductor 5", "in the past # we need to set the cache", "fix a *lot* of tiny bugs and design problem that", "|--> zeta12^2, Dirichlet character modulo 13 of conductor 13 mapping", "be between 0 and 1 :: sage: G.gen(-1) Traceback (most", "its domain EXAMPLES:: sage: e = DirichletGroup(7, QQ).0 sage: f", "DirichletGroup(20).0; chi._DirichletCharacter__eval_at_minus_one() -1 \"\"\" D = self.decomposition() val = self.base_ring()(1)", "return isinstance(x, DirichletGroup_class) class DirichletGroup_class(WithEqualityById, Parent): \"\"\" Group of Dirichlet", "in range(k+1))) elif algorithm == \"definition\": # This is better", "\"\"\" D = self.decomposition() val = self.base_ring()(1) for e in", "QQ).base_extend(CyclotomicField(4)) sage: H = DirichletGroup(10, CyclotomicField(4)) sage: G is H", "EXAMPLES:: sage: G = DirichletGroup(11) sage: repr(G) # indirect doctest", "in self, or in v if v is not None.", "G = DirichletGroup(20) sage: G.1 Dirichlet character modulo 20 of", "TESTS:: sage: G = DirichletGroup(9) sage: loads(dumps(G)) is G True", "DirichletGroup(1).list()[0] sage: chi.values() [1] sage: chi(1) 1 \"\"\" G =", "b = DirichletGroup(2401,QQ)(a) # NOTE -- over QQ! sage: b.modulus()", "rings.RationalField()) return G([kronecker(u.lift(),d) for u in G.unit_gens()]) def is_DirichletCharacter(x): r\"\"\"", "import Parent from sage.structure.sequence import Sequence from sage.structure.factory import UniqueFactory", "sage: g.jacobi_sum(g**2) 2*a TESTS: This shows that :trac:`6393` has been", "mapping 11 |--> 1, 17 |--> 1, Dirichlet character modulo", "elif algorithm == 'lcalc': from sage.libs.lcalc.lcalc_Lfunction import Lfunction_from_character return Lfunction_from_character(self)", "[2, 2, 2], [0, 0, 0]], [1, 0, 0; 0,", "exact or if the order of ``zeta`` is very large.", "-1, 17 |--> 1 sage: DirichletGroup(60).random_element() Dirichlet character modulo 60", "zeta_order(self): \"\"\" Return the order of the chosen root of", "DirichletGroup(17)._automorphisms() [1, 3, 5, 7, 9, 11, 13, 15] sage:", "= state[1] if values_on_gens_key in state_dict: values_on_gens = state_dict[values_on_gens_key] del", "there are immutable vectors (and below) if e in seen_so_far:", "return zeta @cached_method def zeta_order(self): \"\"\" Return the order of", "element of v into self. The Galois group is the", "True \"\"\" # values_on_gens() used an explicit cache __values_on_gens in", "G = DirichletGroup(6) sage: G(DirichletGroup(3).0) Dirichlet character modulo 6 of", "\"\"\" Compute the automorphisms of self. These are always given", "(-zeta6,), 3*zeta6 - 1) ((-zeta6,), (-zeta6 + 1,), -2*zeta6 +", "1, 0, 1, 0, 1, 0, 0, 0, 1, 0,", "'Group of Dirichlet characters modulo 5 with values in Cyclotomic", "1 \"\"\" s = 'Dirichlet character modulo %s of conductor", "DirichletGroup(20, QQ); G Group of Dirichlet characters modulo 20 with", "D = DirichletGroup(N) sage: g = D(1) sage: g.jacobi_sum(g) 3", "G.0 sage: e.is_even() False sage: e(-1) -1.000000... sage: [e.is_even() for", "1, 41 |--> 1, 37 |--> zeta4) sage: val =", "sage: e.kloosterman_sum(7,17) -2*zeta20^6 + 2*zeta20^4 + 4 TESTS:: sage: G", "number with prec bits of precision. See also :meth:`.kloosterman_sum`, which", "g.append(self.element_class(self, z, check=False)) return tuple(g) def integers_mod(self): r\"\"\" Returns the", "from sage.arith.all import (binomial, bernoulli, kronecker, factor, gcd, lcm, fundamental_discriminant,", "of precision \"\"\" if zeta is None and self._zeta is", "p are # k-th powering for # k = 1,", "False, True, False, True, False, True, False, True, False, True]", "= DirichletGroup(16)([-1, 1]) sage: loads(dumps(e)) == e True \"\"\" #", "be positive\") G = DirichletGroup(d, rings.RationalField()) return G([kronecker(u.lift(),d) for u", "sage.misc.prandom as random import sage.modules.free_module as free_module import sage.modules.free_module_element as", "the # same; otherwise it will be recomputed as the", "def random_element(self): \"\"\" Return a random element of self. The", "but ``zeta_order`` is, then `V` is taken to be the", "# one should use the following, but this does not", "a.is_trivial() False sage: (a^2).is_trivial() True \"\"\" if self.element.is_in_cache(): return not", "modulus is a prime `p` and the character is nontrivial,", "= self.integers_mod().unit_group().gens_orders() for i in range(len(self.unit_gens())): z = zero.__copy__() z[i]", "sage: L = a.lfunction(); L PARI L-function associated to Dirichlet", "17 |--> 1 sage: b^2 Dirichlet character modulo 20 of", "orders)) self.values_on_gens.set_cache(x) else: if free_module_element.is_FreeModuleElement(x): self.element.set_cache(x) else: self.values_on_gens.set_cache(x) @cached_method def", "order of the standard root of unity for ``parent``. In", "the new base ring is not an integral domain:: sage:", "self. EXAMPLES:: sage: G = DirichletGroup(20) sage: G.modulus() 20 \"\"\"", "INPUT: - ``prec`` -- integer (default: 53), *bits* of precision", "of Dirichlet character mod 20, but with values in `\\QQ(\\zeta_n)`::", "(so `V` is cyclic), and `V` must have order ``zeta_order``.", "4 sage: DirichletGroup(20,GF(3)).exponent() 2 sage: DirichletGroup(20,GF(2)).exponent() 1 sage: DirichletGroup(37).exponent() 36", "0: s += r' \\hbox{ mapping } ' for i", "unity in ``R`` - ``zeta_order`` -- (optional) order of ``zeta``", "arguments are ignored). This is only called if the object", "H = G.base_extend(CyclotomicField(6)); H Group of Dirichlet characters modulo 7", "this must be the order of ``zeta`` if both are", "= self.base_ring() e = self._integers.unit_group_exponent() for d in reversed(e.divisors()): try:", "1 else K(bernoulli(k)) elif self(-1) != K((-1)**k): ber = K.zero()", "(= %s) must be a ring\" % base_ring) # If", "31 |--> -1, 41 |--> -1, 37 |--> zeta4 sage:", "DirichletGroup(20) sage: e = G([1 for u in G.unit_gens()]) sage:", "(most recent call last): ... ValueError: conductor(=4) must divide M(=50)", "a `p`-adic version \"\"\" G = self.parent() K = G.base_ring()", "of the returned vector; this vector is mutable *only* because", "sage: G = DirichletGroup(20) sage: G.ngens() 2 \"\"\" return len(self.gens())", "(31, 41, 37) sage: e(31) -1 sage: e(41) -1 sage:", "in a with defining polynomial x^4 + 1 An example", "sage: e.galois_orbit() [Dirichlet character modulo 13 of conductor 13 mapping", "sage: g = f.base_extend(Integers(15)) sage: g(3) 14 sage: g.parent().zeta() 14", "Auts = set([m % o for m in P._automorphisms()]) v", "are printed correctly (see :trac:`17338`):: sage: DirichletGroup(1)[0] Dirichlet character modulo", "of unity in the field:: sage: g.zeta_order() 2 :: sage:", "i in range(1, zeta_order): a = a * zeta w.append(a)", "1 sage: G([-1]) Dirichlet character modulo 13 of conductor 13", "is the smallest p**r such that # Order(x) divides EulerPhi(p**r)", "DirichletGroup(5, Zmod(15)); G Group of Dirichlet characters modulo 5 with", "self.base_ring().zeta() n = z.multiplicative_order() m = lcm(g,n) if n ==", "the object from the key (extra arguments are ignored). This", "the group of order 4 generated by zeta4 in Cyclotomic", "|--> zeta12 sage: loads(e.dumps()) == e True :: sage: G,", "Z.jacobi_sum(Y) -1 Now let's take a look at a non-prime", "or `p > 2` and 2 does not divide `\\phi(p^n)/\\mbox{\\rm", "modular forms EXAMPLES:: sage: chi = DirichletGroup(24).0 sage: chi._repr_short_() '[-1,", "|--> zeta12^2 sage: e.galois_orbit() [Dirichlet character modulo 13 of conductor", "# Y is trivial and Z is quartic sage: sum([Y(x)*Z(1-x)", "Dirichlet character mod 20 with values in the rational numbers::", "|--> 1 sage: G([-1]) Dirichlet character modulo 13 of conductor", "of element() from that if we encounter it in a", "exponent of this group. EXAMPLES:: sage: DirichletGroup(20).exponent() 4 sage: DirichletGroup(20,GF(3)).exponent()", "ValueError: pass self.zeta_order.set_cache(d) return zeta @cached_method def zeta_order(self): \"\"\" Return", "``ZZ``. EXAMPLES:: sage: e = DirichletGroup(16)([-1, 1]) sage: f =", "\\sum_{a \\in \\ZZ / N\\ZZ} \\chi(a) \\psi(1-a) where `\\chi` and", "; e.values() [0, 1, -1, 0, 1, -1, 0, 0,", "a Dirichlet group with values in a number field:: sage:", "the order of the chosen root of unity in the", "the group of roots of unity is not necessarily cyclic),", "generated by ``zeta``. If ``zeta_order`` is also given, it must", "\"the group of order %s generated by %s in \"", "parent._module(x) if any(u * v for u, v in zip(x,", "to %s' % self) return Z elif algorithm == 'lcalc':", "G.1 sage: e.kloosterman_sum_numerical(53,3,11) 3.80422606518061 - 3.80422606518061*I \"\"\" G = self.parent()", "of this group. EXAMPLES:: sage: DirichletGroup(20).exponent() 4 sage: DirichletGroup(20,GF(3)).exponent() 2", "sage: p = next_prime(10^40) sage: g = DirichletGroup(19, GF(p)); g", "None and zeta_order is None): raise ValueError(\"zeta and zeta_order must", "the mult order of p modulo n. r = rings.IntegerModRing(n)(p).multiplicative_order()", "``zeta`` and ``zeta_order`` are specified, or that both are ``None``.", "-1, 0, 1, -1] sage: e = DirichletGroup(21, base_ring=GF(37)).gen(0) ;", "the Kloosterman sum associated to this Dirichlet character as an", "from sage.modular.dirichlet import is_DirichletGroup sage: is_DirichletGroup(DirichletGroup(11)) True sage: is_DirichletGroup(11) False", "conductor 5 mapping 11 |--> 1, 17 |--> -1, Dirichlet", "\"\"\" Return the n-th generator of self. EXAMPLES:: sage: G", "of Dirichlet characters modulo 4 with values in Finite Field", "|--> -1, 17 |--> 1 sage: G.gen(1) Dirichlet character modulo", "order 12 and degree 4 sage: e Dirichlet character modulo", "L(chi(0)) z = L.one() for c in chi.values()[1:]: z *=", "``b`` -- integer, as for :meth:`.kloosterman_sum`. EXAMPLES:: sage: G =", "cache of one of these methods needs to be set", "respectively sage: from sage.modular.dirichlet import DirichletCharacter sage: M = FreeModule(Zmod(16),", "char(1-x) for x in rings.IntegerModRing(self.modulus())]) def kloosterman_sum(self, a=1, b=0): r\"\"\"", "sage: b.is_primitive() False sage: (a*b).is_primitive() True sage: G.<a,b> = DirichletGroup(20,", "0, 1, -1, 0, 1, 0, 0, 1, -1, 0,", "dividing {}, respectively\" .format(x, orders)) self.values_on_gens.set_cache(x) else: if free_module_element.is_FreeModuleElement(x): self.element.set_cache(x)", "default base ring is a cyclotomic field of order the", "sage: D = DirichletGroup(5, K) sage: D.change_ring(f) Group of Dirichlet", "mapping 2 |--> -zeta12^3 + zeta12, Dirichlet character modulo 13", "return sum([self(x) * char(1-x) for x in rings.IntegerModRing(self.modulus())]) def kloosterman_sum(self,", "= self.parent()._zeta_powers return tuple([pows[i] for i in self.element()]) @cached_method(do_pickle=True) def", "not work # pari_orders = G.cyc() # pari_gens = G.gen()", "Gauss sums, classical Kloosterman sums, Salié sums, etc. The Kloosterman", "hash of ``self``. EXAMPLES:: sage: e = DirichletGroup(16)([-1, 1]) sage:", "Integers(6), zeta=Integers(6)(5))._automorphisms() Traceback (most recent call last): ... NotImplementedError: Automorphisms", "mapping 2 |--> zeta12 sage: e.galois_orbit() [Dirichlet character modulo 13", "of the modulus, which must also be a multiple of", "self.parent().base_ring() def bar(self): \"\"\" Return the complex conjugate of this", "base_ring, modulus, zeta, zeta_order = key return DirichletGroup_class(base_ring, modulus, zeta,", "which is the trivial group.) EXAMPLES:: sage: DirichletGroup(20).decomposition() [ Group", "8 sage: len(DirichletGroup(20, QQ)) 4 sage: len(DirichletGroup(20, GF(5))) 8 sage:", "G = self.parent() R = G.base_ring() mod = self.parent().modulus() if", "DirichletGroup(16)([-1, 1]) sage: loads(dumps(e)) == e True \"\"\" # values_on_gens()", "defined if base ring is an integral domain \"\"\" if", "sage: e.values() [0, 1, 0, 1, 0, 0, 0, 1,", "where `n` is the exponent of `(\\ZZ/N\\ZZ)^*`) - ``zeta`` --", "with composite characteristic is not implemented sage: G = DirichletGroup(5,", "are fixed:: sage: chi = DirichletGroup(1).list()[0] sage: chi.values() [1] sage:", "characteristic is not implemented sage: G = DirichletGroup(5, Zmod(15), zeta=2);", "values in Rational Field sage: H = DirichletGroup.create_object(None, l); H", "not ``None``. OUTPUT: The group of Dirichlet characters modulo `N`", "character modulo 13 of conductor 13 mapping 2 |--> zeta12", "= G.zeta_order() m = G.modulus() g = 0 L =", "a with defining polynomial x^4 + 1 An example where", "Dirichlet characters modulo 5 with values in Number Field in", "c Traceback (most recent call last): ... TypeError: unsupported operand", "modulo 5 with values in Finite Field of size 5", "character on the standard generators of `(\\ZZ/N\\ZZ)^*` as returned by", "13, 15] sage: DirichletGroup(17, GF(11^4, 'a'))._automorphisms() [1, 11, 121, 1331]", "2, None, None) sage: k == l False sage: G", "R = self.base_ring() # self(-1) is either +1 or -1", "H = DirichletGroup(M, self.base_ring()) return H(self) def _pari_conversion(self): r\"\"\" Prepare", "mapping 2 |--> -1] ] sage: e = G.0 sage:", "DirichletGroup(7, QQ).0 sage: f = e.change_ring(QuadraticField(3, 'a')) sage: f.parent() Group", "= R.characteristic() if p == 0: Auts = [e for", "ring. EXAMPLES:: sage: G = DirichletGroup(30); e = G.1 sage:", "* r**n for r in range(1, N)) ber = K(sum(binomial(k,j)", "modulus of this character. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage:", "correctly (see :trac:`17338`):: sage: latex(DirichletGroup(1)[0]) \\hbox{Dirichlet character modulo } 1", "return ~self def bernoulli(self, k, algorithm='recurrence', cache=True, **opts): r\"\"\" Returns", "k = 1, p, p^2, ..., p^(r-1), # where p^r", "12 and degree 4 sage: e.minimize_base_ring().base_ring() Cyclotomic Field of order", "e = DirichletGroup(20).gen(1) sage: e.values() [0, 1, 0, -zeta4, 0,", "eps.bernoulli(3, algorithm=\"definition\") 10*zeta6 + 4 TESTS: Check that :trac:`17586` is", "not None: self.values_on_gens.set_cache(values_on_gens) if element is not None: self.element.set_cache(element) class", "- ``b`` -- integer, as for :meth:`.kloosterman_sum`. EXAMPLES:: sage: G", "group. EXAMPLES:: sage: DirichletGroup(20).exponent() 4 sage: DirichletGroup(20,GF(3)).exponent() 2 sage: DirichletGroup(20,GF(2)).exponent()", "Construct a Dirichlet character from `x`. EXAMPLES:: sage: G =", "(2006-01-07): added more examples - <NAME> (2006-05-21): added examples of", "= self._zeta if zeta_order is None: # We reuse _zeta_order", "4 mapping 11 |--> -1, 17 |--> 1 sage: DirichletGroup(60).random_element()", "the algorithm \"lcalc\":: sage: a = a.primitive_character() sage: L =", "+ 1,), -2*zeta6 + 3) ((-zeta6 + 1,), (-zeta6 +", "\"\"\" return isinstance(x, DirichletCharacter) class DirichletCharacter(MultiplicativeGroupElement): \"\"\" A Dirichlet character.", "- <NAME> (2014-03-06): use UniqueFactory to cache DirichletGroups \"\"\" #", "DirichletGroup(5, K) Group of Dirichlet characters modulo 5 with values", "the returned vector; this vector is mutable *only* because immutable", "modulo %s with values in \" % self.modulus() if self._zeta", "[[4], [[1, matrix(0,2)]], Mat(1), [3], [2], [0]], Mat(1)], [1]) sage:", "hence this Dirichlet group is finite too. # In particular,", "program. INPUT: - ``prec`` -- precision (default 53) - ``algorithm``", "NOTE -- over QQ! sage: b.modulus() 2401 AUTHORS: - <NAME>", "(Dirichlet character modulo 20 of conductor 4 mapping 11 |-->", "TESTS:: sage: G = DirichletGroup(10) sage: TestSuite(G[1]).run() It is checked", "a complex field\") zeta = zeta ** a g =", "all_jacobi_sums: ....: print(s) ((1,), (1,), 5) ((1,), (zeta6,), -1) ((1,),", "e(2) 0 sage: e(7) -zeta4 sage: Integers(60).unit_gens() (31, 41, 37)", "with or without specifying a root of unity:: sage: DirichletGroup(5,", "mapping 11 |--> -1, 17 |--> 1 sage: DirichletGroup(60).random_element() Dirichlet", "characters modulo 20 with values in Rational Field sage: G.order()", "r' \\hbox{ mapping } ' for i in range(r): if", "zeta52^5 + zeta52^4 Check that :trac:`25127` is fixed:: sage: G", "-1, 17 |--> 1 sage: b Dirichlet character modulo 20", "parent(DirichletGroup(60, integral=True).gens()[2].values_on_gens()[2]) Gaussian Integers in Cyclotomic Field of order 4", "self.list() else: if check: v = [self(x) for x in", "= self.parent() K = G.base_ring() if not (number_field.is_CyclotomicField(K) or is_RationalField(K)):", "modulo 13 of conductor 13 mapping 2 |--> -zeta12^2 +", "-zeta12^3 + zeta12, Dirichlet character modulo 13 of conductor 13", "convert 0 to an element of Group of Dirichlet characters", "if g == 1: g = 2 z = self.base_ring().zeta()", "parent if zeta is not None: zeta = R(zeta) if", "a=1, b=0): r\"\"\" Return the Kloosterman sum associated to this", "r in range(1, N)) ber = K(sum(binomial(k,j) * bernoulli(j, **opts)", "QQ).0 sage: f = DirichletGroup(5,CyclotomicField(4)).0 sage: e*f Dirichlet character modulo", "on the right hand side. # Since p-1 is coprime", "conductor 3 mapping 5 |--> -1 sage: G(DirichletGroup(15).1) Traceback (most", "modulo 5 with values in Complex Field with 53 bits", "of conductor 4 mapping 11 |--> -1, 17 |--> 1", "= DirichletGroup(13, K) sage: chi = G([z^2]) sage: chi.gauss_sum() zeta52^22", "sage: e(-1) -1 sage: [e.is_even() for e in G] [True,", "which should be done with power series # instead of", "of order 4 and degree 2 sage: parent(DirichletGroup(60, integral=True).gens()[2].values_on_gens()[2]) Gaussian", "zeta10^3 - zeta10^2 + zeta10 - 1, zeta10^2] TESTS: Test", "% (self.base_ring(), R)) return self.change_ring(R) def _element_constructor_(self, x): \"\"\" Construct", "r4.residue_field(r4.ideal(29).factor()[0][0])(val) 17 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) * GF(29)(3) 22 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) *", "then the Gauss sum has absolute value `\\sqrt{p}`. CACHING: Computed", "1] \"\"\" D = self.parent().decomposition() vals = [[z] for z", "is finitely generated; the added # FinitelyGenerated() here means that", "M(0) orders = self.integers_mod().unit_group().gens_orders() for i in range(len(self.unit_gens())): z =", "1, 36, 0, 1, 36, 0, 0, 36, 0, 1,", "extension of a homomorphism .. MATH:: (\\ZZ/N\\ZZ)^* \\to R^*, for", "in many cases, # especially since we end up computing", "Returns the number of generators of self. EXAMPLES:: sage: G", "under the action of the absolute Galois group of the", "to a rational \"\"\" R = self.base_ring() try: if x", "-1) EXAMPLES:: sage: G = DirichletGroup(60) sage: e = prod(G.gens(),", "tuple(y * z for y, z in zip(self.values_on_gens(), other.values_on_gens())) return", "called Ring of integers modulo 9 sage: DirichletGroup(13) == DirichletGroup(13)", "and 'Group of Dirichlet characters modulo 5 with values in", "17] sage: b.kernel() [1, 11] \"\"\" one = self.base_ring().one() return", "chi4.conrey_number() 3 sage: chi = DirichletGroup(24)([1,-1,-1]); chi Dirichlet character modulo", "of self. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: repr(a) #", "`b=0`. This method performs an exact calculation and returns an", "said, I'm sure it could # be sped up by", "|--> zeta6] sage: (DirichletGroup(36).0).decomposition() [Dirichlet character modulo 4 of conductor", "G.integers_mod().unit_group().gens_orders() R_values = G._zeta_powers val_on_gen = self.element() exponents = [0]", "page of the character in a browser. See https://www.lmfdb.org EXAMPLES::", "= chi.values() ; ls[0:10] [0, 1, -zeta10^3, -zeta10, -zeta10, 1,", "True] sage: G = DirichletGroup(13) sage: e = G.0 sage:", "is 2 mod 4, there will be a \"factor\" of", "of order 4 and degree 2 \"\"\" N = self.modulus()", "sage: G = DirichletGroup(6) sage: G(DirichletGroup(3).0) Dirichlet character modulo 6", "0: raise ValueError(\"d must be positive\") G = DirichletGroup(d, rings.RationalField())", "+ 1,), 2*zeta6 - 3) ((-zeta6,), (-zeta6,), 3*zeta6 - 1)", "# no arith in a poly ring over a number", "element is computed by multiplying a random power of each", "the prime subfield of the base ring. EXAMPLES:: sage: G", "integer; this must be the order of ``zeta`` if both", "\"\"\" Return the chosen root of unity in the base", "G[1][1] pari_gens = G[1][2] # one should use the following,", "sage: G = DirichletGroup.create_object(None, k); G Group of Dirichlet characters", "found in the cache. TESTS:: sage: K = CyclotomicField(4) sage:", "sage: f = DirichletGroup(17, ZZ, zeta=-1).0 sage: g = f.base_extend(Integers(15))", "11, 13, 15] sage: DirichletGroup(17, GF(11^4, 'a'))._automorphisms() [1, 11, 121,", "equals its modulus. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.is_primitive()", "modulo 3 of conductor 3 mapping 2 |--> -1 \"\"\"", "1 \\hbox{ of conductor } 1 sage: latex(DirichletGroup(2)[0]) \\hbox{Dirichlet character", "in the group of order 6 generated by 0.500000000000000 +", "self. EXAMPLES:: sage: e = DirichletGroup(13).0 sage: f = ~e", "= G.modulus() if is_ComplexField(K): return self.gauss_sum_numerical(a=a) elif is_AlgebraicField(K): L =", "instance of :class:`DirichletCharacter`. \"\"\" pows = self.parent()._zeta_powers return tuple([pows[i] for", "is currently only implemented when the base ring is a", "G(1) Dirichlet character modulo 13 of conductor 1 mapping 2", "groups do not silently get roots of unity with smaller", "conductor of this character. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage:", "unity. FACTS: If the modulus is a prime `p` and", "of conductor 1 mapping 2 |--> 1 \"\"\" G =", "D = fundamental_discriminant(d) G = DirichletGroup(abs(D), rings.RationalField()) return G([kronecker(D,u) for", "Dirichlet character modulo 4 of conductor 4 mapping 3 |-->", "2 sage: (e^12).minimize_base_ring().base_ring() Rational Field TESTS: Check that :trac:`18479` is", "base_ring=rings.RationalField()): r\"\"\" Return the trivial character of the given modulus,", "\"\"\" return self._integers __iter__ = multiplicative_iterator def list(self): \"\"\" Return", "up by a factor of 10 or more in many", "-1 sage: b.maximize_base_ring().base_ring() Cyclotomic Field of order 4 and degree", "self.values_on_gens(), check=False) def __pow__(self, n): \"\"\" Return self raised to", "= rings.IntegerModRing(p) elif self.order() <= 2: K = rings.QQ elif", "returned vector; this vector is mutable *only* because immutable vectors", "e(7) -zeta4 sage: Integers(60).unit_gens() (31, 41, 37) sage: e(31) -1", "of the modulus. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: H.<c>", "\"\"\" The order of this character. EXAMPLES:: sage: e =", "DirichletGroup(100).0; e Dirichlet character modulo 100 of conductor 4 mapping", "== 'lcalc': from sage.libs.lcalc.lcalc_Lfunction import Lfunction_from_character return Lfunction_from_character(self) raise ValueError('algorithm", "a ring\" % base_ring) # If either zeta or zeta_order", "* G5.gen(0).base_extend(K30) Dirichlet character modulo 31 of conductor 31 mapping", ":trac:`17283`):: sage: k.<i> = CyclotomicField(4) sage: G = DirichletGroup(192) sage:", "should use the following, but this does not work #", "1) sage: DirichletGroup(5, K) Group of Dirichlet characters modulo 5", "from sage.categories.map import Map from sage.rings.rational_field import is_RationalField from sage.rings.complex_mpfr", "37506941597 of conductor 37733 mapping 13533432536 |--> -1, 22369178537 |-->", "fields - :func:`sage.rings.padics.misc.gauss_sum` for a `p`-adic version \"\"\" G =", "v = [(vi * oi) // m for vi, oi", "return result_list value += val_on_gen[i] n *= gens[i] if exponents[i]", "the modulus. EXAMPLES:: sage: e = DirichletGroup(20)(1) sage: e.values() [0,", "1: v += self.modulus() a.append(R(x(v))) return self.element_class(self, a) def _coerce_map_from_(self,", "> 2` and 2 does not divide `\\phi(p^n)/\\mbox{\\rm ord}(\\varepsilon)`. EXAMPLES::", "be called directly (use the factory function ``DirichletGroup``). The ``DirichletGroup``", "if the object was not found in the cache. TESTS::", "default: True) whether or not to explicitly coerce each element", "implemented ' 'over this ring') n = zeta.multiplicative_order() zeta =", "- ``N`` -- positive integer - ``base_ring`` -- commutative ring;", "the values of the character on the generators of `(Z/NZ)^*`::", "DirichletGroup(4).gen() sage: chi4._pari_conversion() ([[4, [0]], [2, [2], [3]], [[2]~, Vecsmall([2])],", "[a] zeta = self.zeta() zeta_order = self.zeta_order() if is_ComplexField(R): for", "2 |--> 1 sage: G([-1]) Dirichlet character modulo 13 of", "1, 36, 0, 1, 36] sage: e = DirichletGroup(21, base_ring=GF(3)).gen(0)", "- ``parent`` -- :class:`DirichletGroup`, a group of Dirichlet characters -", "G.rename('Dir(11)') sage: G Dir(11) \"\"\" s = \"Group of Dirichlet", "+= self.parent().unit_gens()[i]._latex_() + r' \\mapsto ' + self.values_on_gens()[i]._latex_() return s", "sage: G = DirichletGroup(17, Integers(15), zeta=7); G Group of Dirichlet", "char, check=True): r\"\"\" Return the Jacobi sum associated to these", "DirichletGroup(13) sage: K = G.base_ring() sage: G(1) Dirichlet character modulo", "-- # see modsym/manin_symbols.py. G = self.parent() return G.element_class(G, self.values_on_gens(),", "base extension still works if the new base ring is", "# have to do this, since e.g., unit gens mod", "create a Dirichlet group with values in a number field::", "must be between 0 and 1 :: sage: G.gen(-1) Traceback", "\"\"\" Synonym for modulus. EXAMPLES:: sage: e = DirichletGroup(100, QQ).0", "slower). INPUT: - ``prec`` -- integer (default: 53), *bits* of", "the value B_1 = -1/2. ber = K.one()/2 if k", "raise ValueError('algorithm must be \"pari\" or \"lcalc\"') @cached_method def conductor(self):", "from sage.structure.parent import Parent from sage.structure.sequence import Sequence from sage.structure.factory", "Vector space of dimension 2 over Ring of integers modulo", "range(r): if i != 0: s += r',\\ ' s", "of p modulo n. r = rings.IntegerModRing(n)(p).multiplicative_order() Auts = [p**m", "Ring of integers modulo 2 \"\"\" return free_module.FreeModule(rings.IntegerModRing(self.zeta_order()), len(self.unit_gens())) @property", "[[1, matrix(0,2)]], Mat(1), [3], [2], [0]], Mat(1)], [1]) sage: chi", "<type 'sage.rings.integer.Integer'> \"\"\" if self.modulus() == 1 or self.is_trivial(): return", "a dictionary that can be used to compute discrete logarithms", "number field. prec = k+2 R = rings.PowerSeriesRing(rings.QQ, 't') t", "0.500000000000000 + 0.866025403784439*I in Complex Field with 53 bits of", "and euler_phi(self.order()) < R.absolute_degree()): K = rings.CyclotomicField(self.order()) else: return self", "sage: L(4) 0.988944551741105 With the algorithm \"lcalc\":: sage: a =", "using knowledge of its order. This is potentially much more", "raise NotImplementedError(\"Kloosterman sums only currently implemented when the base ring", "8, 8) modulo 16) must have additive orders dividing (2,", "Dirichlet characters modulo 7 with values in the group of", "modulo 5 with values in the group of order 2", "|--> -zeta4, Dirichlet character modulo 30 of conductor 5 mapping", "a with defining polynomial x^2 - 3 with a =", "11380/13*zeta12 + 9110/13 sage: eps = DirichletGroup(9).0 sage: eps.bernoulli(3) 10*zeta6", "with values in Rational Field sage: H = G.base_extend(CyclotomicField(6)); H", "60 of conductor 60 mapping 31 |--> -1, 41 |-->", "CC) sage: G.1.is_even() True Note that ``is_even`` need not be", "= DirichletGroup(5); X=G.list(); Y=X[0]; Z=X[1] sage: Y.jacobi_sum(Z) -1 sage: Z.jacobi_sum(Y)", "def kernel(self): r\"\"\" Return the kernel of this character. OUTPUT:", "= self.modulus() K = self.base_ring() if N == 1: #", "eps2 = DirichletGroup(5,QQ)([-1]) sage: eps1.conrey_number() == eps2.conrey_number() True \"\"\" G,", "+ zeta156^20 - zeta156^19 + zeta156^18 - zeta156^16 - zeta156^15", "sage: sum([g(x)*g(1-x) for x in IntegerModRing(N)]) 11 And sums where", "of conductor 1 mapping 2 |--> 1] \"\"\" D =", "rings.CyclotomicField(m) return self.change_ring(K) def minimize_base_ring(self): r\"\"\" Return a Dirichlet character", "TESTS: Dirichlet characters modulo 1 and 2 are printed correctly", "sum associated to these Dirichlet characters (i.e., J(self,char)). This is", "zeta = Integers(9)(2)).0 sage: chi.galois_orbit() Traceback (most recent call last):", "TESTS:: sage: e = DirichletGroup(16)([-1, 1]) sage: loads(dumps(e)) == e", "do not change the entries of the returned vector; this", "!= 0: raise ValueError(\"M(=%s) must divide the modulus(=%s)\"%(M,self.modulus())) if M%self.conductor()", "1, 0, 1, 1, 1, 1, 1, 1] sage: t(1).parent()", "zeta156^21 + zeta156^20 - zeta156^19 + zeta156^18 - zeta156^16 -", "characters modulo 7 with values in the group of order", "(Dirichlet character modulo 60 of conductor 4 mapping 31 |-->", "r = len(self.values_on_gens()) if r != 0: s += r'", "[Dirichlet character modulo 2 of conductor 1, Dirichlet character modulo", "doctest 'Dirichlet character modulo 20 of conductor 4 mapping 11", "b> = DirichletGroup(20) sage: type(G(1).conductor()) <type 'sage.rings.integer.Integer'> \"\"\" if self.modulus()", "integral=True) Group of Dirichlet characters modulo 60 with values in", "r such that the # divisibility holds equals Valuation(Order(x),p)+1. cond", "from sage.libs.pari import pari from sage.categories.map import Map from sage.rings.rational_field", "# same; otherwise it will be recomputed as the order", "sage: for s in all_jacobi_sums: ....: print(s) ((1,), (1,), 5)", "|--> 1, 17 |--> 1] ] sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5)).galois_orbits()", "that :trac:`11783` and :trac:`14368` are fixed:: sage: chi = DirichletGroup(1).list()[0]", "if only zeta_order is specified sage: G = DirichletGroup(17, Integers(15),", "not to explicitly coerce each element of v into self.", "an explicit cache __values_on_gens in the past # we need", "return self.gauss_sum_numerical(a=a) elif is_AlgebraicField(K): L = K zeta = L.zeta(m)", "DirichletGroup(3, QQ, zeta=1, zeta_order=1)(1) sage: b = DirichletGroup(3, QQ, zeta=-1,", "power of n EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a^2", "K, zeta_order=2) Group of Dirichlet characters modulo 5 with values", "orders = parent.integers_mod().unit_group().gens_orders() if len(x) != len(orders): raise ValueError(\"wrong number", "\\varepsilon : (\\ZZ/N\\ZZ)^* \\to \\QQ(\\zeta_n) be a Dirichlet character. This", "specified, base extension still works if the new base ring", "representation of self, often used in string representations of modular", "of conductor %s' % (self.modulus(), self.conductor()) r = len(self.values_on_gens()) if", "# change when there are immutable vectors (and below) if", "G.ngens() 2 \"\"\" return len(self.gens()) @cached_method def order(self): \"\"\" Return", "if the base ring is not exact or if the", "zip(v, pari_orders)] return (G, v) def conrey_number(self): r\"\"\" Return the", "DirichletGroup(13) sage: H = DirichletGroup(13, CC) sage: e = G.0", "became clear when creating examples. - <NAME> (2008-02-16): speed up", "the cPickle module -- # see modsym/manin_symbols.py. G = self.parent()", "in a finite field:: sage: g = DirichletGroup(17, GF(9,'a')).0 sage:", "the base ring is not an integral domain, an error", "R = parent.base_ring() x = tuple(map(R, x)) if R.is_exact() and", "chi = DirichletGroup(420)([1,-1,-I,1]) sage: chi.conrey_number() 113 TESTS:: sage: eps1 =", "if we encounter it in a pickle element_key = '_DirichletCharacter__element'", "modulo `2^k` is trivial for `k = 1` and non-cyclic", "sums not implemented over this ring \"\"\" G = self.parent()", "if not base_ring.is_integral_domain(): raise ValueError(\"base ring (= %s) must be", "1, 0, 0, 1, 2, 0, 1, 2] :: sage:", "37 |--> zeta4) sage: val = G.gens()[2].values_on_gens()[2] ; val zeta4", "equal. TESTS:: sage: trivial_character(6) == trivial_character(3) # indirect doctest False", "== 2: return [R.zero(), R.one()] result_list = [R.zero()] * mod", "13 sage: D = DirichletGroup(N) sage: g = D(1) sage:", "zeta4 sage: e(-1) -1 sage: e(2) 0 sage: e(7) -zeta4", "of `n` and the exponent of `(\\ZZ/N\\ZZ)^*`. EXAMPLES:: sage: G.<a,b>", "the automorphisms of self. These are always given by raising", "QQbar) sage: G[1].gauss_sum_numerical() -2.44013335834554 + 1.02261879187179*I \"\"\" G = self.parent()", "number_field.is_CyclotomicField(K) or is_RationalField(K): phi = K.complex_embedding(prec) CC = phi.codomain() else:", "for x in v] G = [] seen_so_far = set([])", "modulo 13 of conductor 1 mapping 2 |--> 1], ...,", "divide the modulus(=%s)\"%(M,self.modulus())) if M%self.conductor() != 0: raise ValueError(\"conductor(=%s) must", "unity of order dividing ``zeta_order`` in `R`. In this case,", "with values in Rational Field sage: H = DirichletGroup.create_object(None, l);", "the trivial Dirichlet character modulo 1, this function returns `B_{1,\\varepsilon}", "compare equal and the caching would be broken:: sage: k", "G.gen(0).base_ring() Rational Field \"\"\" return self.parent().base_ring() def bar(self): \"\"\" Return", "`0`. EXAMPLES:: sage: G = DirichletGroup(35) sage: x = G.gens()", "p = R.characteristic() if p == 0 or p.gcd(self._zeta_order) ==", "the License, or # (at your option) any later version.", "and degree 2' We can multiply if we're explicit about", "|--> 1, Dirichlet character modulo 60 of conductor 3 mapping", "= -1/2`; see the discussion in [Coh2007]_, Section 9.4.1. EXAMPLES::", "== 1 or self.is_trivial(): return rings.Integer(1) F = factor(self.modulus()) if", "admitting a conversion map from the base ring of ``self``,", "f.jacobi_sum(e) 3*zeta12^2 + 2*zeta12 - 3 sage: p = 7", "dividing ``zeta_order`` in `R`. In this case, `R` must be", "zeta_order) def create_object(self, version, key, **extra_args): \"\"\" Create the object", "of conductor 37733 mapping 13533432536 |--> -1, 22369178537 |--> -1,", "1 mapping 11 |--> 1, 17 |--> 1 sage: b^2", "= G.base_ring() sage: G(1) Dirichlet character modulo 13 of conductor", "5, 7, 9, 11, 13, 15] sage: DirichletGroup(17, GF(11^4, 'a'))._automorphisms()", "+ 2) ((zeta6 - 1,), (-1,), 2*zeta6 + 1) ((zeta6", "sage: loads(e.dumps()) == e True TESTS:: sage: G = DirichletGroup(10)", "prod(G.gens(), G(1)) sage: e Dirichlet character modulo 60 of conductor", "ls[0:10] [0, 1, -zeta10^3, -zeta10, -zeta10, 1, zeta10^3 - zeta10^2", "defined if base ring is an integral domain sage: DirichletGroup(17,", "values_on_gens] else: dlog = P._zeta_dlog v = [dlog[x] for x", "1]' \"\"\" return str(list(self.values_on_gens())) def _repr_(self): \"\"\" String representation of", "mapping 31 |--> 1, 41 |--> -1, 37 |--> 1,", "modulo 11 with values in Cyclotomic Field of order 10", "Field of order 4 and degree 2' and 'Group of", "2 TESTS: We test the case where `R` is a", "\"\"\" n = int(n) g = self.gens() if n<0 or", "would be broken:: sage: k = k[1:]; k (2, None,", "characters modulo 5 with values in Cyclotomic Field of order", "called directly (use the factory function ``DirichletGroup``). The ``DirichletGroup`` factory", "Bernoulli numbers using the formula in [Coh2007]_, Proposition 9.4.5; this", "p = R.characteristic() if p == 0: Auts = [e", "def is_DirichletCharacter(x): r\"\"\" Return True if x is of type", "sage: DirichletGroup(17, Integers(15)) Group of Dirichlet characters modulo 17 with", "can multiply if we're explicit about where we want the", "|--> 1], ..., [Dirichlet character modulo 13 of conductor 13", "K zeta = L.zeta(m) elif number_field.is_CyclotomicField(K) or is_RationalField(K): chi =", "default cyclotomic field by its rings of integers as the", "|--> -1 sage: G(DirichletGroup(15).1) Traceback (most recent call last): ...", "check: v = [self(x) for x in v] G =", "Group of Dirichlet characters modulo 7 with values in Rational", "in self.values_on_gens()]) def _richcmp_(self, other, op): \"\"\" Compare ``self`` to", "by 7 in Ring of integers modulo 15 sage: G.order()", "3*zeta12^2 + 2*zeta12 - 3 sage: p = 7 sage:", "characters modulo 11 with values in Cyclotomic Field of order", "return Z elif algorithm == 'lcalc': from sage.libs.lcalc.lcalc_Lfunction import Lfunction_from_character", "-1/2*b*a + 1/2 \"\"\" R = self.base_ring() if R.is_prime_field(): return", "= P._module if is_ComplexField(P.base_ring()): zeta = P.zeta() zeta_argument = zeta.argument()", "number with prec bits of precision. INPUT: - ``prec`` --", "coercion of one element into the other parent fails in", "G.<a,b> = DirichletGroup(20) sage: c = a*b sage: d =", "def __setstate__(self, state): \"\"\" Used for unpickling old instances. TESTS::", "\"\"\" M = int(M) if self.modulus()%M != 0: raise ValueError(\"M(=%s)", "conductor 13 mapping 2 |--> -zeta12^2 + 1] A non-example::", "mapping 11 |--> -1, 17 |--> 1' TESTS: Dirichlet characters", "of Dirichlet characters modulo 7 with values in the group", "to primes dividing modulus. (Note that if the modulus is", "Dirichlet characters modulo 2 with values in Complex Field with", "= G.integers_mod().unit_group().gens_orders() R_values = G._zeta_powers val_on_gen = self.element() exponents =", "\"\"\" Return a random element of self. The element is", "of ``ZZ``. TESTS:: sage: G = DirichletGroup(7, base_ring=Integers(9), zeta=2) #", "since e.g., unit gens mod 11 are not units mod", "k+2 R = rings.PowerSeriesRing(rings.QQ, 't') t = R.gen() # g(t)", "of the character in a browser. See https://www.lmfdb.org EXAMPLES:: sage:", "values in Rational Field sage: G.change_ring(CyclotomicField(6)) Group of Dirichlet characters", "G = DirichletGroup(13) sage: K = G.base_ring() sage: G(1) Dirichlet", "order 4 and degree 2, Group of Dirichlet characters modulo", "vals[0].append(vals[1][0]) del vals[1] elif self.modulus() % 4 == 2: #", "version, key, **extra_args): \"\"\" Create the object from the key", "sage: parent(e(31*37)) Cyclotomic Field of order 4 and degree 2", "with values in Finite Field of size 5, Group of", "mapping 11 |--> 1, 17 |--> zeta4) sage: G.unit_gens() (11,", "K(sum(binomial(k,j) * bernoulli(j, **opts) * N**(j-1) * S(k-j) for j", "one element into the other parent fails in both cases::", "G.1.is_even() True Note that ``is_even`` need not be the negation", "13 of conductor 13 mapping 2 |--> zeta12^3 - zeta12,", "NOTE:: Since there is no coercion between Dirichlet groups of", "order 4 and degree 2 We create the group of", "a ring `R`. \"\"\" Element = DirichletCharacter def __init__(self, base_ring,", "of precision - ``a`` -- integer, as for :meth:`.kloosterman_sum` -", "= self._zeta if zeta is None: R = self.base_ring() e", "to work properly, these caches have to be stored when", "in Cyclotomic Field of order 4 and degree 2' and", "((zeta6,), (-zeta6,), zeta6 - 3) ((zeta6,), (-zeta6 + 1,), 1)", "of self. These are always given by raising to a", "order 4 and degree 2' and 'Group of Dirichlet characters", "exact calculation and returns an element of a suitable cyclotomic", "|--> -1 sage: b.maximize_base_ring().base_ring() Cyclotomic Field of order 4 and", "\" % (self._zeta_order, self._zeta) s += str(self.base_ring()) return s @cached_method", "``k`` -- a non-negative integer - ``algorithm`` -- either ``'recurrence'``", "(:trac:`19056`):: sage: G = DirichletGroup(7, QQbar) sage: G[1].gauss_sum_numerical() -2.44013335834554 +", "0 while True: try: exponents[i] += 1 except IndexError: #", "modulo 5 of conductor 5 mapping 2 |--> zeta4] sage:", "@cached_method def multiplicative_order(self): \"\"\" The order of this character. EXAMPLES::", "to the map `\\ZZ/N\\ZZ \\to R` obtained by sending those", "modulus\") a = [] for u in self.unit_gens(): v =", "trivial_character(7) sage: [t(x) for x in [0..20]] [0, 1, 1,", "correctly (see :trac:`17338`):: sage: DirichletGroup(1)[0] Dirichlet character modulo 1 of", "character modulo 13 of conductor 13 mapping 2 |--> zeta12^3", "modulus. (Note that if the modulus is 2 mod 4,", "DirichletGroup(5, Zmod(15), zeta=2); G Group of Dirichlet characters modulo 5", "is not the largest order root of unity in the", "automatically, we can specify it using ``zeta_order``:: sage: DirichletGroup(7, CC,", "class DirichletCharacter(MultiplicativeGroupElement): \"\"\" A Dirichlet character. \"\"\" def __init__(self, parent,", "int(z.additive_order()) Auts = set([m % o for m in P._automorphisms()])", "2 |--> -zeta4 \"\"\" return ~self def bernoulli(self, k, algorithm='recurrence',", "order 4 and degree 2 We can't multiply directly, since", ":: sage: G = DirichletGroup(19, GF(5)) sage: loads(G.dumps()) == G", "Auts def galois_orbits(self, v=None, reps_only=False, sort=True, check=True): \"\"\" Return a", "return lcm([z.multiplicative_order() for z in self.values_on_gens()]) def primitive_character(self): \"\"\" Returns", "an integral domain\") k = self.order() if k <= 2:", "# instead of calls to the Bernoulli function. Likewise #", "= R.characteristic() if p == 0 or p.gcd(self._zeta_order) == 1:", "EXAMPLES:: sage: G.<a,b> = DirichletGroup(16) sage: latex(b) # indirect doctest", "False, True, False, True] sage: G = DirichletGroup(13) sage: e", "i for i, z in enumerate(self._zeta_powers)} def change_ring(self, R, zeta=None,", "e *= g**n return e def unit_gens(self): r\"\"\" Returns the", "\"\"\" return str(list(self.values_on_gens())) def _repr_(self): \"\"\" String representation of self.", "= DirichletGroup(20).gen(0) sage: e.values() [0, 1, 0, -1, 0, 0,", "z *= zeta g += phi(c)*z return g def jacobi_sum(self,", "mapping 3 |--> -1/2*b*a + 1/2 \"\"\" R = self.base_ring()", "11 |--> -1, 17 |--> 1, Dirichlet character modulo 20", "w.append(a) return w @property def _zeta_dlog(self): \"\"\" Return a dictionary", "- ``names`` -- ignored (needed so ``G.<...> = DirichletGroup(...)`` notation", "13 mapping 2 |--> zeta12 sage: loads(e.dumps()) == e True", "-1 Now let's take a look at a non-prime modulus::", "0: raise ValueError(\"conductor(=%s) must divide M(=%s)\"%(self.conductor(),M)) H = DirichletGroup(M, self.base_ring())", "= fundamental_discriminant(d) G = DirichletGroup(abs(D), rings.RationalField()) return G([kronecker(D,u) for u", "to be the cyclic subgroup of `R^*` generated by ``zeta``.", "unity is specified, base extension still works if the new", "if free_module_element.is_FreeModuleElement(x): x = parent._module(x) if any(u * v for", "zeta = P.zeta() zeta_argument = zeta.argument() v = [int(x.argument() /", "``base_ring``. This is the group of homomorphisms `(\\ZZ/N\\ZZ)^* \\to V`", "|--> 1 sage: G.gen(1) Dirichlet character modulo 20 of conductor", "same as self.order(). EXAMPLES:: sage: len(DirichletGroup(20)) 8 sage: len(DirichletGroup(20, QQ))", "be stored when pickling an instance of :class:`DirichletCharacter`. \"\"\" P", "sage: e = x[0]*x[1]; e Dirichlet character modulo 35 of", "to this Dirichlet character. The Gauss sum associated to `\\chi`", "of conductor 31 mapping 3 |--> -zeta30^7 + zeta30^5 +", "and that ``zeta_order`` is an element of ``ZZ``. TESTS:: sage:", "defined as .. MATH:: J(\\chi, \\psi) = \\sum_{a \\in \\ZZ", "*bits* of precision - ``a`` -- integer, as for :meth:`gauss_sum`.", "restrict the order of the characters, either with or without", "is not a domain (in which case the group of", "zeta_order self._modulus = modulus self._integers = rings.IntegerModRing(modulus) def __setstate__(self, state):", "conductor 5 mapping 11 |--> 1, 17 |--> -1 sage:", "integers modulo 15 sage: G.order() 4 sage: DirichletGroup(-33) Traceback (most", "R_values[value] # iterate: # increase the exponent vector by 1,", "eps is a character mod `p^n`, where `p` is a", "NotImplementedError('Kloosterman sums not implemented ' 'over this ring') n =", "modulo } 2 \\hbox{ of conductor } 1 \"\"\" s", "G = DirichletGroup(5, Zmod(15), zeta=2); G Group of Dirichlet characters", "value = val_on_gen.base_ring().zero() while True: # record character value on", "Z is quartic sage: sum([Y(x)*Z(1-x) for x in IntegerModRing(5)]) -1", "(most recent call last): ... ValueError: modulus should be positive", "base ring is not exact or if the order of", "5 with values in Finite Field of size 5 ]", "characters modulo 17 with values in the group of order", "== 2 and F[0][1] > 2 and self.values_on_gens()[1].multiplicative_order() != 1:", "character modulo 13 of conductor 13 mapping 2 |--> -zeta12^3", "modulus, zeta, zeta_order): \"\"\" Create a Dirichlet group. Not to", "G.gens()[2].values_on_gens()[2] ; val zeta4 sage: parent(val) Gaussian Integers in Cyclotomic", "** a g = L(chi(0)) z = L.one() for c", "Traceback (most recent call last): ... TypeError: Galois orbits only", "of Dirichlet characters modulo 2 with values in Complex Field", "of the standard root of unity for ``parent``. In both", "string representation of self, often used in string representations of", "in string representations of modular forms EXAMPLES:: sage: chi =", "sage: e = G.0 sage: f = H.0 sage: e.gauss_sum_numerical()", "this character. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: c =", "If the base ring is not a domain (in which", "-2*zeta6 + 1 sage: norm(e.gauss_sum()) 3 :: sage: G =", "of conductor 13 mapping 2 |--> -zeta12] sage: e =", "-- integer, as for :meth:`.kloosterman_sum`. EXAMPLES:: sage: G = DirichletGroup(3)", "generalized Bernoulli number `B_{k,eps}`. INPUT: - ``k`` -- a non-negative", "+ zeta156^8 + zeta156^7 + zeta156^6 + zeta156^5 - zeta156^4", "r\"\"\" Return the Kloosterman sum associated to this Dirichlet character", "zeta to the new parent if zeta is not None:", "if v is None: v = self.list() else: if check:", "g_a(\\chi) = \\sum_{r \\in \\ZZ/m\\ZZ} \\chi(r)\\,\\zeta^{ar}, where `m` is the", "= G([z^2]) sage: chi.gauss_sum() zeta52^22 + zeta52^21 + zeta52^19 -", "is usually optimal. The ``definition`` algorithm uses the definition directly.", "= DirichletGroup(24)([1,-1,-1]); chi Dirichlet character modulo 24 of conductor 24", "generators of self. EXAMPLES:: sage: G = DirichletGroup(20) sage: G.gens()", "if not (zeta is None and zeta_order is None): raise", "\"\"\" G = self.parent() if G.zeta.is_in_cache(): x = -self.element() else:", "in self.values()[1:]: z *= zeta g += phi(c)*z return g", "of Dirichlet characters. It was checking that their values were", "integer, as for :meth:`.kloosterman_sum` - ``b`` -- integer, as for", "= rings.Mod(c, m) z = zeta ** int(a*e + b*(e**(-1)))", "``ZZ``. TESTS:: sage: G = DirichletGroup(7, base_ring=Integers(9), zeta=2) # indirect", "finite, and hence this Dirichlet group is finite too. #", "17 |--> zeta4 sage: a*b # indirect doctest Dirichlet character", "for m in P._automorphisms()]) v = [P.element_class(P, m * z,", "only if `\\varepsilon(-1) = 1`. EXAMPLES:: sage: G = DirichletGroup(13)", "pointwise multiplication. The group `V` is determined as follows: -", "4 sage: DirichletGroup(-33) Traceback (most recent call last): ... ValueError:", "= DirichletGroup(3, QQ, zeta=-1, zeta_order=2)([-1]) sage: a * b #", "self._zeta is not None: # A root of unity was", "not is_Ring(base_ring): raise TypeError(\"base_ring (= %s) must be a ring\"", "G(0) Traceback (most recent call last): ... TypeError: cannot convert", "know that it stays the # same; otherwise it will", "implemented sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2))._automorphisms() Traceback (most recent call last):", ".. MATH:: \\chi : (\\ZZ/N\\ZZ)^* \\to \\QQ(\\zeta_m) where `m` is", "e True :: sage: G, x = DirichletGroup(35).objgens() sage: e", "need not be the negation of is_odd, e.g., in characteristic", "G.<e> = DirichletGroup(13) sage: loads(G.dumps()) == G True :: sage:", "sage: a = kronecker_character(1) sage: b = DirichletGroup(2401,QQ)(a) # NOTE", "= DirichletGroup(N) sage: g = D(1) sage: g.jacobi_sum(g) 11 sage:", "only defined if base ring is an integral domain sage:", "modulo 5 of conductor 5 mapping 2 |--> -1, Dirichlet", "parent, x, check=True): r\"\"\" Create a Dirichlet character with specified", "-- integer, as for :meth:`.kloosterman_sum` - ``b`` -- integer, as", "DirichletGroup(17, Integers(15)) Group of Dirichlet characters modulo 17 with values", "characters of prime power modulus, where the prime powers exactly", "zeta4] Another example:: sage: G = DirichletGroup(13) sage: G.galois_orbits() [", "an integral domain if only zeta_order is specified sage: G", "= DirichletGroup.create_key(2, base_ring=CC); l (Complex Field with 53 bits of", "faster if False). - ``check`` - (optional, default: True) whether", "= tuple(~z for z in self.values_on_gens()) return G.element_class(G, x, check=False)", "= g(t)*e^{nt} h = [0] + [g * ((n*t).exp(prec)) for", "use the following, but this does not work # pari_orders", "|--> zeta4] Another example:: sage: G = DirichletGroup(13) sage: G.galois_orbits()", "character mod 20, but with values in `\\QQ(\\zeta_n)`:: sage: G", "sage: v=e.kloosterman_sum_numerical() sage: v.real() < 1.0e15 True sage: v.imag() 1.73205080756888", "modulus, zeta, zeta_order) def create_object(self, version, key, **extra_args): \"\"\" Create", "G = DirichletGroup(1) sage: chi = G.one() sage: chi.gauss_sum() 1", "sage: chi4 = DirichletGroup(4).gen() sage: chi4.conrey_number() 3 sage: chi =", "only implemented if `V` is cyclic and a generator for", "import sage.rings.all as rings import sage.rings.number_field.number_field as number_field from sage.libs.pari", "modulo 15 sage: G.order() 4 sage: DirichletGroup(-33) Traceback (most recent", "3) sage: DirichletCharacter(G, M([4, 8, 8])) Traceback (most recent call", "OUTPUT: pair (G, v) where G is `(\\ZZ / N", "is not a domain, an error will be raised. EXAMPLES::", "sage: (a^2).is_trivial() True \"\"\" if self.element.is_in_cache(): return not self.element() one", "note:: This function is currently only implemented when the base", "encounter it in a pickle element_key = '_DirichletCharacter__element' element =", "If ``zeta`` is specified, then `V` is taken to be", "4 and degree 2 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) 17 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) *", "Group of Dirichlet characters modulo 17 with values in Ring", "but this does not work # pari_orders = G.cyc() #", "5 of conductor 5 mapping 2 |--> -1, Dirichlet character", "object was not found in the cache. TESTS:: sage: K", "13 mapping 2 |--> zeta12^2 sage: e.galois_orbit() [Dirichlet character modulo", "ring is a cyclotomic field of order the exponent of", "... NotImplementedError: order of element not known sage: DirichletGroup(7, CC,", "phi = K.complex_embedding(prec) CC = phi.codomain() g = 0 m", "of Dirichlet characters modulo 11 with values in Cyclotomic Field", "Integers(9), zeta = Integers(9)(2)).0 sage: chi.galois_orbit() Traceback (most recent call", "zeta156^24 - zeta156^22 + zeta156^21 + zeta156^20 - zeta156^19 +", "the group of order 4 generated by 2 in Ring", "vi, oi in zip(v, pari_orders)] return (G, v) def conrey_number(self):", "`p` and the character is nontrivial, then the Gauss sum", "TESTS: The field of algebraic numbers is supported (:trac:`19056`):: sage:", "sage: G = DirichletGroup(13) sage: e = G.0 sage: e.gauss_sum()", "conductor 3 mapping 31 |--> 1, 41 |--> -1, 37", "to an element of %s\" % (x, self)) elif not", "!= 0: raise ArithmeticError(\"M(=%s) must be a multiple of the", "None if base_ring not specified\") e = rings.IntegerModRing(modulus).unit_group_exponent() base_ring =", "or p.gcd(self._zeta_order) == 1: zeta_order = self._zeta_order else: # No", "if zeta is None: R = self.base_ring() e = self._integers.unit_group_exponent()", "the n-th generator of self. EXAMPLES:: sage: G = DirichletGroup(20)", "chi = DirichletGroup(7, Integers(9), zeta = Integers(9)(2)).0 sage: chi.galois_orbit() Traceback", "-1 sage: DirichletGroup(60, GF(25,'a')).zeta() 2 \"\"\" zeta = self._zeta if", "{}, respectively\" .format(x, parent.zeta_order(), orders)) self.element.set_cache(x) else: R = parent.base_ring()", "Field with 53 bits of precision sage: G == H", "- ``zeta`` -- (optional) root of unity in ``base_ring`` -", "0, 1, 36, 0, 1, 36] sage: e = DirichletGroup(21,", "+ 2) Let's check that trivial sums are being calculated", "- 3) ((zeta6,), (-zeta6 + 1,), 1) ((zeta6 - 1,),", "of ``self`` as its domain EXAMPLES:: sage: G = DirichletGroup(7,QQ);", "this character on each integer between 0 and the modulus.", "bits of precision, 2, None, None) sage: k == l", "G.0 sage: abs(e.gauss_sum_numerical()) 1.7320508075... sage: sqrt(3.0) 1.73205080756888 sage: e.gauss_sum_numerical(a=2) -...e-15", "not self.element() one = self.base_ring().one() return all(x == one for", "a.kernel() [1, 9, 13, 17] sage: b.kernel() [1, 11] \"\"\"", "G([-1]) Dirichlet character modulo 13 of conductor 13 mapping 2", "sage: e = DirichletGroup(13).0 sage: e.base_ring() Cyclotomic Field of order", "more examples - <NAME> (2006-05-21): added examples of everything; fix", "character. \"\"\" def __init__(self, parent, x, check=True): r\"\"\" Create a", "Dirichlet character modulo 6 of conductor 3 mapping 5 |-->", "where `m` is the modulus of `\\chi` and `\\zeta` is", "as follows: - If both ``zeta`` and ``zeta_order`` are omitted,", "are cached, creating two groups with the same parameters yields", "and :trac:`14368` are fixed:: sage: chi = DirichletGroup(1).list()[0] sage: chi.values()", "31 |--> zeta12^2 sage: e.order() 12 sage: loads(e.dumps()) == e", "self.modulus() == 1 or self.is_trivial(): return rings.Integer(1) F = factor(self.modulus())", "`x`. EXAMPLES:: sage: G = DirichletGroup(13) sage: K = G.base_ring()", "r = len(self.values_on_gens()) if r != 0: s += '", "or not the above divisibility holds # depends only on", "whether to sort the list of orbits and the orbits", "4 sage: G = DirichletGroup(11, RationalField()) sage: G.gen(0).base_ring() Rational Field", "def is_DirichletGroup(x): \"\"\" Returns True if x is a Dirichlet", "a multiple of the conductor of this character. EXAMPLES:: sage:", "None: if not base_ring.is_integral_domain(): raise ValueError(\"base ring (= %s) must", "call last): ... NotImplementedError: Kloosterman sums not implemented over this", "Field of order 12 and degree 4 sage: G =", "and zeta_order is None): raise ValueError(\"zeta and zeta_order must be", "identity of power series (see for example [DI1995]_, Section 2.2):", "5 mapping 2 |--> -zeta4 AUTHORS: - <NAME> (2005-09-02): Fixed", "__element in the past # we need to set the", "any later version. # https://www.gnu.org/licenses/ # **************************************************************************** from __future__ import", "that if we encounter it in a pickle element_key =", "a PARI L-function or around the ``lcalc`` program. INPUT: -", "if element_key in state_dict: element = state_dict[element_key] del state_dict[element_key] super(DirichletCharacter,", "conversion between Dirichlet groups of different moduli, but no coercion.", "self - ``reps_only`` - (optional: default False) if True only", "Return the hash of ``self``. EXAMPLES:: sage: e = DirichletGroup(16)([-1,", "right hand side. # Since p-1 is coprime to p,", "- <NAME> (2006-08-06) \"\"\" d = rings.Integer(d) if d <=", "v def gauss_sum(self, a=1): r\"\"\" Return a Gauss sum associated", "sage: G.zeta_order() 4 In this example we create a Dirichlet", "try: exponents[i] += 1 except IndexError: # Done! return result_list", "generators of `(\\ZZ/N\\ZZ)^*` as returned by :meth:`sage.rings.finite_rings.integer_mod_ring.IntegerModRing_generic.unit_gens`. - vector over", "self.parent()._zeta_powers return tuple([pows[i] for i in self.element()]) @cached_method(do_pickle=True) def element(self):", "else: # No root of unity specified; use the same", "import webbrowser lmfdb_url = 'https://www.lmfdb.org/Character/Dirichlet/{}/{}' url = lmfdb_url.format(self.modulus(), self.conrey_number()) webbrowser.open(url)", "= DirichletGroup(9) sage: loads(dumps(G)) is G True \"\"\" self._set_element_constructor() if", "IndexError: n(=2) must be between 0 and 1 :: sage:", "sage: e.level() 100 \"\"\" return self.modulus() @cached_method def multiplicative_order(self): \"\"\"", "= P._zeta_dlog v = M([dlog[x] for x in self.values_on_gens()]) v.set_immutable()", "True sage: is_DirichletCharacter([1]) False \"\"\" return isinstance(x, DirichletCharacter) class DirichletCharacter(MultiplicativeGroupElement):", "elements of self - ``reps_only`` - (optional: default False) if", "Dirichlet characters A :class:`DirichletCharacter` is the extension of a homomorphism", "20 mapping 11 |--> -1, 17 |--> -1], ..., [Dirichlet", "PolynomialRing(QQ) sage: K.<a> = NumberField(x^4 + 1) sage: DirichletGroup(5, K)", "of the Galois orbits of Dirichlet characters in self, or", "l); H Group of Dirichlet characters modulo 2 with values", "values of this character on each integer between 0 and", "kronecker_character_upside_down(d): \"\"\" Return the quadratic Dirichlet character (./d) of conductor", "... IndexError: n(=2) must be between 0 and 1 ::", "printed correctly (see :trac:`17338`):: sage: DirichletGroup(1)[0] Dirichlet character modulo 1", "cr=True, universe = cat.Objects()) def exponent(self): \"\"\" Return the exponent", "[1, 11, 121, 1331] sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5))._automorphisms() Traceback (most", "last): ... NotImplementedError: Automorphisms for finite non-field base rings not", "{}) must have multiplicative orders dividing {}, respectively\" .format(x, orders))", "implemented when the base ring is a cyclotomic field, QQ,", "Dirichlet character modulo the divisor M of the modulus, which", "Z elif algorithm == 'lcalc': from sage.libs.lcalc.lcalc_Lfunction import Lfunction_from_character return", "contrast to the value `B_1 = -1/2` for the classical", "raise ValueError(\"values (= {}) must have multiplicative orders dividing {},", "the base ring is a cyclotomic field, QQ, QQbar, or", "0, 0, 0, 1, 0, -1] sage: e = DirichletGroup(20).gen(1)", "numerical value of e is near zero:: sage: v=e.kloosterman_sum_numerical() sage:", "-1` if and only if `p = 2` and the", "(e^2).minimize_base_ring().base_ring() Cyclotomic Field of order 6 and degree 2 sage:", "modulo 2 with values in Rational Field sage: H =", "(Note that if the modulus is 2 mod 4, there", "r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3 22 sage: parent(r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3) Residue field", "0]], [1, 0, 0; 0, 1, 0; 0, 0, 1]],", "lcm(g,n) if n == m: return self K = rings.CyclotomicField(m)", "This is only called if the object was not found", "also given, it must be the multiplicative order of ``zeta``;", "should be done with power series # instead of calls", "return self G = self.parent().change_ring(R) return G.element_class(G, [R(x) for x", "of conductor 13 mapping 2 |--> zeta12 sage: G(0) Traceback", "+= 1 except IndexError: # Done! return result_list value +=", "True TESTS:: sage: G = DirichletGroup(10) sage: TestSuite(G[1]).run() It is", "i in range(1, zeta_order): a = a * zeta a._set_multiplicative_order(zeta_order/gcd(zeta_order,", "mapping 11 |--> 1, 17 |--> -1 \"\"\" G =", "* 3) Residue field of Fractional ideal (-2*zeta4 + 5)", "Field of order 4 and degree 2 If the order", "self.base_ring() if R.is_prime_field(): return self p = R.characteristic() if p:", "= zeta_order self._modulus = modulus self._integers = rings.IntegerModRing(modulus) def __setstate__(self,", "... ValueError: modulus should be positive \"\"\" modulus = rings.Integer(N)", "but in contrast to the value `B_1 = -1/2` for", "\\mapsto ' + self.values_on_gens()[i]._latex_() return s def base_ring(self): \"\"\" Returns", "modulo 30 of conductor 5 mapping 11 |--> 1, 7", "Return the free module used to represent Dirichlet characters. TESTS::", "coercion map from %s to %s is defined\" % (self.base_ring(),", "1, zeta10^3 - zeta10^2 + zeta10 - 1, zeta10, zeta10^3", "in self.decomposition()]) p = F[0][0] # When p is odd,", "this character. EXAMPLES:: sage: e = DirichletGroup(100, QQ).0 sage: e.modulus()", "== eps True A related bug (see :trac:`18086`):: sage: K.<a,b>=NumberField([x^2", "sage: G.galois_orbits() [ [Dirichlet character modulo 13 of conductor 1", "sage: e Dirichlet character modulo 60 of conductor 60 mapping", "= self.base_ring() try: if x == R.one(): x = [R.one()]", "Dirichlet character modulo 20 of conductor 20 mapping 11 |-->", "DirichletGroup(100, QQ).0 sage: e.level() 100 \"\"\" return self.modulus() @cached_method def", "MATH:: \\sum_{a=1}^N \\frac{\\varepsilon(a) t e^{at}}{e^{Nt}-1} = sum_{k=0}^{\\infty} \\frac{B_{k,\\varepsilon}}{k!} t^k. ALGORITHM:", "over the # new base ring as well. zeta =", "self.parent() if is_ComplexField(P.base_ring()): zeta = P.zeta() zeta_argument = zeta.argument() v", "= self.parent() zo = G.zeta_order() m = G.modulus() g =", "= self.decomposition() val = self.base_ring()(1) for e in D: if", "4 sage: DirichletGroup(19).zeta_order() 18 \"\"\" order = self._zeta_order if order", "is_trivial(self): r\"\"\" Returns ``True`` if this is the trivial character,", "(-zeta6 + 1,), -2*zeta6 + 3) ((-zeta6 + 1,), (-zeta6", "\\hbox{ mapping } 15 \\mapsto 1,\\ 5 \\mapsto \\zeta_{4} TESTS:", "= 1`. EXAMPLES:: sage: G = DirichletGroup(13) sage: e =", "2 and F[0][1] > 2 and self.values_on_gens()[1].multiplicative_order() != 1: cond", "[] for u in self.unit_gens(): v = u.lift() # have", "list(self): \"\"\" Return a list of the Dirichlet characters in", "modulo 60 with values in the group of order 4", "old instances. TESTS:: sage: G = DirichletGroup(9) sage: loads(dumps(G)) is", "sage: H = DirichletGroup(16, QQ); H(DirichletGroup(16).1) Traceback (most recent call", "def zeta(self): \"\"\" Return the chosen root of unity in", "from sage.structure.element import MultiplicativeGroupElement from sage.structure.gens_py import multiplicative_iterator from sage.structure.parent", "-1/2`; see the discussion in [Coh2007]_, Section 9.4.1. EXAMPLES:: sage:", "implemented if the base ring has characteristic 0 or a", "e = G.0 sage: e.gauss_sum() -zeta156^46 + zeta156^45 + zeta156^42", "directly, but passed to the :func:`bernoulli` function if this is", ":meth:`.kloosterman_sum` - ``b`` -- integer, as for :meth:`.kloosterman_sum`. EXAMPLES:: sage:", "of precision sage: G == H False If ``base_ring`` was", "# indirect doctest sage: TestSuite(G).run() sage: G.base() # check that", "this character. This is a positive integer coprime to q", "there is a coercion map from `X`. There is conversion", "orbits themselves (slightly faster if False). - ``check`` - (optional,", "sage: G = DirichletGroup(13) sage: e = G.0 sage: e.is_odd()", "object from the key (extra arguments are ignored). This is", "x = self.element() + other.element() else: x = tuple(y *", "conductor 35 mapping 22 |--> zeta12^3, 31 |--> zeta12^2 -", "sage: G.<a,b> = DirichletGroup(20) sage: a.is_trivial() False sage: (a^2).is_trivial() True", "-1, 0, 1, 0, -zeta4, 0, 0, 0, zeta4, 0,", "(unless `m` equals -1) EXAMPLES:: sage: G = DirichletGroup(60) sage:", "None) An example to illustrate that ``base_ring`` is a part", "0.5 return self(-1) == R(-1) @cached_method def is_primitive(self): \"\"\" Return", "sage: e.restrict(4) Dirichlet character modulo 4 of conductor 4 mapping", "% base_ring) # If either zeta or zeta_order is given,", "of conductor 3 mapping 31 |--> 1, 41 |--> -1,", "function ``DirichletGroup``). The ``DirichletGroup`` factory ensures that either both ``zeta``", "character. EXAMPLES:: sage: e = DirichletGroup(100, QQ).0 sage: e.modulus() 100", "x, check=True): r\"\"\" Create a Dirichlet character with specified values", "* oi) // m for vi, oi in zip(v, pari_orders)]", "DirichletGroup(20) sage: e = G.1 sage: e.kloosterman_sum_numerical(53,3,11) 3.80422606518061 - 3.80422606518061*I", "unity in the field:: sage: g.zeta_order() 2 :: sage: r4", "well. zeta = self._zeta if zeta_order is None: # We", "j in range(k+1))) elif algorithm == \"definition\": # This is", "* self.element() else: x = tuple(z**n for z in self.values_on_gens())", "of unity. This reduces to the Gauss sum if `b=0`.", "== l True sage: DirichletGroup(2, base_ring=QQ) is DirichletGroup(2, base_ring=CC) False", "D(1) sage: g.jacobi_sum(g) 11 sage: sum([g(x)*g(1-x) for x in IntegerModRing(N)])", "if '_zeta_order' in state: state['_zeta_order'] = rings.Integer(state['_zeta_order']) super(DirichletGroup_class, self).__setstate__(state) @property", "be a multiple of the conductor of this character. EXAMPLES::", "11 |--> 1, 17 |--> 1 sage: b^2 Dirichlet character", "2: val *= -1 return val def __call__(self, m): \"\"\"", "modulo 5 with values in Ring of integers modulo 15", "= DirichletGroup(7, QQbar) sage: G[1].gauss_sum() -2.440133358345538? + 1.022618791871794?*I Check that", "character. EXAMPLES:: sage: G = DirichletGroup(11) sage: G.gen(0).base_ring() Cyclotomic Field", "sage: (DirichletGroup(72).0).decomposition() [Dirichlet character modulo 8 of conductor 4 mapping", "is_odd(self): r\"\"\" Return ``True`` if and only if `\\varepsilon(-1) =", "e*f Dirichlet character modulo 5 of conductor 5 mapping 2", "-1 sage: G([K.0]) Dirichlet character modulo 13 of conductor 13", "2*a TESTS: This shows that :trac:`6393` has been fixed:: sage:", "R.one()] result_list = [R.zero()] * mod gens = G.unit_gens() orders", "the characters in this group (default: the cyclotomic field `\\QQ(\\zeta_n)`,", "for e in range(1,n) if gcd(e,n) == 1] else: if", "= parent.base_ring() x = tuple(map(R, x)) if R.is_exact() and any(u**v", "def _latex_(self): r\"\"\" LaTeX representation of self. EXAMPLES:: sage: G.<a,b>", "characters modulo 20 with values in Cyclotomic Field of order", "character modulo 13 of conductor 13 mapping 2 |--> -1]", "directly, since coercion of one element into the other parent", "conductor 5 mapping 11 |--> 1, 17 |--> zeta4 We", "a \"factor\" of `(\\ZZ/2\\ZZ)^*`, which is the trivial group.) EXAMPLES::", "EXAMPLES:: sage: e = DirichletGroup(100, QQ).0 sage: e.modulus() 100 sage:", "EXAMPLES:: sage: G = DirichletGroup(60) sage: e = prod(G.gens(), G(1))", "sage: [e.is_odd() for e in G] [False, True, False, True,", "Field of order 4 and degree 2 sage: (e^12).minimize_base_ring().base_ring() Rational", "sage: DirichletGroup(60) is DirichletGroup(60) True \"\"\" def create_key(self, N, base_ring=None,", "f = DirichletGroup(5,CyclotomicField(4)).0 sage: e*f Dirichlet character modulo 5 of", "a large power of the image root of unity. We", "if both are specified - ``names`` -- ignored (needed so", "G = DirichletGroup(60) sage: e = prod(G.gens(), G(1)) sage: e", "self.parent() z = self.element() o = int(z.additive_order()) Auts = set([m", "to this Dirichlet character. This includes Gauss sums, classical Kloosterman", "element into the other parent fails in both cases:: sage:", "works) - ``integral`` -- boolean (default: ``False``); whether to replace", "= self._module zero = M(0) orders = self.integers_mod().unit_group().gens_orders() for i", "module used to represent Dirichlet characters. TESTS:: sage: DirichletGroup(12)._module Vector", "1 mapping 11 |--> 1, 17 |--> 1] ] sage:", "chi = DirichletGroup(7, K).0 sage: chi.minimize_base_ring() Dirichlet character modulo 7", "random_element(self): \"\"\" Return a random element of self. The element", "sage: loads(G.dumps()) == G True :: sage: G = DirichletGroup(19,", "# The value -1 above is the correct value of", "stored when pickling an instance of :class:`DirichletCharacter`. \"\"\" P =", "are only implemented if `V` is cyclic and a generator", "e = DirichletGroup(5).0 sage: e Dirichlet character modulo 5 of", "`\\chi` is .. MATH:: g_a(\\chi) = \\sum_{r \\in \\ZZ/m\\ZZ} \\chi(r)\\,\\zeta^{ar},", "1]) \"\"\" G = pari.znstar(self.modulus(), 1) pari_orders = G[1][1] pari_gens", "self.modulus() a.append(R(x(v))) return self.element_class(self, a) def _coerce_map_from_(self, X): \"\"\" Decide", "character modulo %s of conductor %s' % (self.modulus(), self.conductor()) r", "sage: G.list() [Dirichlet character modulo 5 of conductor 1 mapping", "N = 13 sage: D = DirichletGroup(N) sage: g =", "37) sage: DirichletGroup(20,QQ).unit_gens() (11, 17) \"\"\" return self._integers.unit_gens() @cached_method def", "17) \"\"\" return self._integers.unit_gens() @cached_method def zeta(self): \"\"\" Return the", "characters modulo 4 with values in Cyclotomic Field of order", "= DirichletGroup(35).objgens() sage: e = x[0]*x[1]; e Dirichlet character modulo", "= DirichletGroup(3) sage: e = G([-1]) sage: e.kloosterman_sum(3,5) -2*zeta6 +", "u, v in zip(x, orders)): raise ValueError(\"values (= {}) must", "sage: # The value -1 above is the correct value", "else: for i in range(1, zeta_order): a = a *", "3*zeta6 - 1) ((-zeta6,), (-zeta6 + 1,), -2*zeta6 + 3)", "r\"\"\" Return the Conrey number for this character. This is", "P.zeta() zeta_argument = zeta.argument() v = M([int(round(x.argument() / zeta_argument)) for", "ring\" % base_ring) # If either zeta or zeta_order is", "N if self.values.is_in_cache() or m != N - 1: return", "associated to this Dirichlet character as an approximate complex number", "= 13 sage: D = DirichletGroup(N) sage: g = D(1)", "examples. - <NAME> (2008-02-16): speed up __call__ method for Dirichlet", "z *= zeta g += L(c)*z return g def gauss_sum_numerical(self,", "QQ).0 sage: e.level() 100 \"\"\" return self.modulus() @cached_method def multiplicative_order(self):", "(X._zeta is not None and self.base_ring()(X._zeta) in self._zeta_powers))) def __len__(self):", "to cache DirichletGroups \"\"\" # **************************************************************************** # Copyright (C) 2004-2006", "def minimize_base_ring(self): r\"\"\" Return a Dirichlet character that equals this", ".. warning:: Please do not change the entries of the", "for :meth:`gauss_sum`. The Gauss sum associated to `\\chi` is ..", "next_prime(10^40) sage: g = DirichletGroup(19, GF(p)); g Group of Dirichlet", "self.values_on_gens.set_cache(x) else: if free_module_element.is_FreeModuleElement(x): self.element.set_cache(x) else: self.values_on_gens.set_cache(x) @cached_method def __eval_at_minus_one(self):", "is H True sage: G3 = DirichletGroup(31, CyclotomicField(3)) sage: G5", "G.base_extend(ZZ) Traceback (most recent call last): ... TypeError: no coercion", "the orbit of this character under the action of the", "\\hbox{ of conductor } 1 sage: latex(DirichletGroup(2)[0]) \\hbox{Dirichlet character modulo", "of modular forms EXAMPLES:: sage: chi = DirichletGroup(24).0 sage: chi._repr_short_()", "checking that they had the same level! - <NAME> (2006-01-07):", "sage: f = H.0 sage: e.gauss_sum_numerical() -3.07497205... + 1.8826966926...*I sage:", "m) else: raise NotImplementedError(\"Gauss sums only currently implemented when the", "generators of `(Z/NZ)^*`:: sage: list(G) [Dirichlet character modulo 20 of", "zeta156^34 - zeta156^33 - zeta156^31 + 2*zeta156^30 + zeta156^28 -", "the character in a browser. See https://www.lmfdb.org EXAMPLES:: sage: E", "but requires # no arith in a poly ring over", "to coerce zeta4 to a rational \"\"\" R = self.base_ring()", "comparison of Dirichlet characters. It was checking that their values", "DirichletGroup(420)([1,-1,-I,1]) sage: chi.conrey_number() 113 TESTS:: sage: eps1 = DirichletGroup(5)([-1]) sage:", ".. SEEALSO:: - :func:`sage.arith.misc.gauss_sum` for general finite fields - :func:`sage.rings.padics.misc.gauss_sum`", "self.modulus()) @cached_method def is_trivial(self): r\"\"\" Returns ``True`` if this is", "in G.unit_gens()]) def is_DirichletCharacter(x): r\"\"\" Return True if x is", "41 |--> -1, 37 |--> zeta4 sage: e(-1) -1 sage:", "2 mod 4, there will be a \"factor\" of `(\\ZZ/2\\ZZ)^*`,", "MultiplicativeGroupElement from sage.structure.gens_py import multiplicative_iterator from sage.structure.parent import Parent from", "mapping 2 |--> zeta12^2, Dirichlet character modulo 13 of conductor", "caching would be broken:: sage: k = k[1:]; k (2,", "``DirichletGroup``). The ``DirichletGroup`` factory ensures that either both ``zeta`` and", "either a ring admitting a *coercion* map from the base", "- ``R`` -- either a ring admitting a conversion map", "of conductor 5 mapping 2 |--> 4 sage: chi.multiplicative_order() 4", "Return the base extension of ``self`` to ``R``. INPUT: -", "character modulo } 1 \\hbox{ of conductor } 1 sage:", "= DirichletGroup(13) sage: e = DirichletGroup(13).0 sage: e.base_ring() Cyclotomic Field", "Dirichlet character (d/.) of minimal conductor. EXAMPLES:: sage: kronecker_character(97*389*997^2) Dirichlet", "sage: e = G([-1]) sage: e.gauss_sum(1) 2*zeta6 - 1 sage:", "to `0`. EXAMPLES:: sage: G = DirichletGroup(35) sage: x =", "order 4 and degree 2, 60, None, None) An example", "sage: chi.conrey_number() 113 TESTS:: sage: eps1 = DirichletGroup(5)([-1]) sage: eps2", "self.base_ring()) return H(self) def _pari_conversion(self): r\"\"\" Prepare data for the", "zeta12^3, 31 |--> zeta12^2 sage: e.order() 12 sage: loads(e.dumps()) ==", "Dirichlet character modulo 37733 of conductor 37733 mapping 1557 |-->", "10*zeta6 + 4 sage: eps.bernoulli(3, algorithm=\"definition\") 10*zeta6 + 4 TESTS:", "groups of prime power modulus corresponding to primes dividing modulus.", "binomial coefficients can be done much # more efficiently. v", "for z in self.values_on_gens()] if self.modulus() % 8 == 0:", "R = R.codomain() return DirichletGroup(self.modulus(), R, zeta=zeta, zeta_order=zeta_order) def base_extend(self,", "increase the exponent vector by 1, # increase n accordingly,", "last): ... NotImplementedError: factorization of polynomials over rings with composite", "cached_method from sage.misc.fast_methods import WithEqualityById from sage.structure.element import MultiplicativeGroupElement from", "sage: e.values_on_gens () (-1, 1) .. NOTE:: The constructor of", "= set([m % o for m in P._automorphisms()]) v =", "n: sum(v[r] * r**n for r in range(1, N)) ber", "\"\"\" Create a key that uniquely determines a Dirichlet group.", "orders of the respective generators of `(\\ZZ/N\\ZZ)^*`. OUTPUT: The Dirichlet", "CC = phi.codomain() g = 0 m = G.modulus() zeta", "of conductor 4 mapping 11 |--> -1, 17 |--> 1,", "is not None: self.values_on_gens.set_cache(values_on_gens) if element is not None: self.element.set_cache(element)", "factorization of polynomials over rings with composite characteristic is not", "or if the order of ``zeta`` is very large. -", "only if `\\varepsilon(-1) = -1`. EXAMPLES:: sage: G = DirichletGroup(13)", "__len__(self): \"\"\" Return the number of elements of this Dirichlet", "a domain, an error will be raised. EXAMPLES:: sage: DirichletGroup(20).galois_orbits()", "(or subring) of the base ring as possible. .. note::", "1 mapping 2 |--> 1] \"\"\" D = self.parent().decomposition() vals", "None: v = self.list() else: if check: v = [self(x)", "len(self.unit_gens()) except (TypeError, ValueError, ArithmeticError): pass if isinstance(x, list): #", "0: s += r',\\ ' s += self.parent().unit_gens()[i]._latex_() + r'", "is the same as len(self). EXAMPLES:: sage: DirichletGroup(20).order() 8 sage:", "unity. This reduces to the Gauss sum if `b=0`. This", "be a ring\" % base_ring) # If either zeta or", "e.kloosterman_sum(5,11) Traceback (most recent call last): ... NotImplementedError: Kloosterman sums", "modulo 20 of conductor 1 mapping 11 |--> 1, 17", "-*- coding: utf-8 -*- r\"\"\" Dirichlet characters A :class:`DirichletCharacter` is", "K) Group of Dirichlet characters modulo 5 with values in", "base ring has characteristic 0 or a prime. EXAMPLES:: sage:", "DirichletGroup(13, QQ) False \"\"\" from sage.categories.groups import Groups category =", "or base_ring.is_finite(): # The group of n-th roots of unity", "def galois_orbits(self, v=None, reps_only=False, sort=True, check=True): \"\"\" Return a list", "= R.characteristic() if p: K = rings.IntegerModRing(p) elif self.order() <=", "R, zeta=zeta, zeta_order=zeta_order) def base_extend(self, R): \"\"\" Return the base", "y, z in zip(self.values_on_gens(), other.values_on_gens())) return G.element_class(G, x, check=False) def", "- ``a`` -- integer, as for :meth:`gauss_sum`. The Gauss sum", "sage: e = D.0 sage: f = D[-2] sage: e.jacobi_sum(f)", "+ zeta10 - 1, zeta10, zeta10^3 - zeta10^2 + zeta10", "of `(\\ZZ/N\\ZZ)^*`) - ``zeta`` -- (optional) root of unity in", "= self.zeta_order() M = self._module zero = M(0) orders =", "sage: DirichletGroup(20).unit_gens() (11, 17) sage: DirichletGroup(60).unit_gens() (31, 41, 37) sage:", "of self as a product of Dirichlet characters of prime", "] sage: e = G.0 sage: e Dirichlet character modulo", "- zeta12, Dirichlet character modulo 13 of conductor 13 mapping", "= category.Finite().FinitelyGenerated() Parent.__init__(self, base_ring, category=category) self._zeta = zeta self._zeta_order =", "self.values_on_gens()[i]._latex_() return s def base_ring(self): \"\"\" Returns the base ring", "G = DirichletGroup(5, Zmod(15)); G Group of Dirichlet characters modulo", "L-function associated to %s' % self) return Z elif algorithm", "e = prod(G.gens(), G(1)) sage: e Dirichlet character modulo 60", "webbrowser.open(url) def galois_orbit(self, sort=True): r\"\"\" Return the orbit of this", "is quartic sage: sum([Y(x)*Z(1-x) for x in IntegerModRing(5)]) -1 sage:", "DirichletGroup(60, r4) sage: G.gens() (Dirichlet character modulo 60 of conductor", "Number Field in a with defining polynomial x^2 - 3", "Bernoulli function. Likewise # computing all binomial coefficients can be", "and the exponent of `(\\ZZ/N\\ZZ)^*`. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20,QQ)", "The automorphisms in characteristic p are # k-th powering for", "NotImplementedError(\"Automorphisms for finite non-field base rings not implemented\") # The", "Returns True if x is a Dirichlet group. EXAMPLES:: sage:", "which case the group of roots of unity is not", "a list of the values of this character on each", "|--> zeta4 sage: a*b # indirect doctest Dirichlet character modulo", "a ring homomorphism with the base ring of ``self`` as", "i in range(r): if i != 0: s += r',\\", "character modulo } %s \\hbox{ of conductor } %s' %", "``zeta_order`` is, then `V` is taken to be the group", "else: x = tuple(z**n for z in self.values_on_gens()) return G.element_class(G,", "k <= 2: return [self] P = self.parent() z =", "= DirichletCharacter def __init__(self, base_ring, modulus, zeta, zeta_order): \"\"\" Create", "`\\ZZ/N\\ZZ` where `N` is the modulus of self. EXAMPLES:: sage:", "# distinguished set of generators. category = category.Finite().FinitelyGenerated() Parent.__init__(self, base_ring,", "K.<z> = CyclotomicField(8) sage: G = DirichletGroup(13, K) sage: chi", "Jacobi sum J(Y, Z). sage: Y.jacobi_sum(Z); Z.jacobi_sum(Y) -1 -1 \"\"\"", "MATH:: \\chi : (\\ZZ/N\\ZZ)^* \\to \\QQ(\\zeta_m) where `m` is the", "DirichletGroup(20, CC) sage: a.is_primitive() False sage: b.is_primitive() False sage: (a*b).is_primitive()", "generators. category = category.Finite().FinitelyGenerated() Parent.__init__(self, base_ring, category=category) self._zeta = zeta", "SEEALSO:: - :func:`sage.arith.misc.gauss_sum` for general finite fields - :func:`sage.rings.padics.misc.gauss_sum` for", "sums only currently implemented when the base ring is a", "= lcm(g,n) if n == m: return self K =", "Dirichlet characters modulo 19 with values in Finite Field of", "if only ``zeta_order`` is specified:: sage: DirichletGroup(17, Integers(15)) Group of", "return G.element_class(G, [R(x) for x in self.values_on_gens()]) def _richcmp_(self, other,", "powers exactly divide the modulus of this character. EXAMPLES:: sage:", "sage: DirichletGroup(5, K, zeta_order=2) Group of Dirichlet characters modulo 5", "from `X`. There is conversion between Dirichlet groups of different", "Dirichlet characters in self, or in v if v is", "GF(25,'a')).zeta() 2 \"\"\" zeta = self._zeta if zeta is None:", "1]) sage: loads(dumps(e)) == e True \"\"\" # values_on_gens() used", "b.is_primitive() False sage: (a*b).is_primitive() True sage: G.<a,b> = DirichletGroup(20, CC)", "if values_on_gens_key in state_dict: values_on_gens = state_dict[values_on_gens_key] del state_dict[values_on_gens_key] #", "G = DirichletGroup(60, r4) sage: G.gens() (Dirichlet character modulo 60", "k in self.__bernoulli: return self.__bernoulli[k] N = self.modulus() K =", "last): ... IndexError: n(=-1) must be between 0 and 1", "0, 1, 0; 0, 0, 1]], [0, 1, 1]) \"\"\"", "r4 = CyclotomicField(4).ring_of_integers() sage: G = DirichletGroup(60, r4) sage: G.gens()", "the Free Software Foundation, either version 2 of the License,", "x^4 + 1 sage: DirichletGroup(5, K, zeta_order=2) Group of Dirichlet", "change_ring(self, R, zeta=None, zeta_order=None): \"\"\" Return the base extension of", "3 22 sage: parent(r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3) Residue field of Fractional", "`V` is taken to be the cyclic subgroup of `R^*`", "(-1,), -2*zeta6 - 1) ((zeta6,), (-zeta6,), zeta6 - 3) ((zeta6,),", "(= {} modulo {}) must have additive orders dividing {},", "divide the modulus of this character. EXAMPLES:: sage: G.<a,b> =", "of Dirichlet characters modulo `N` with values in a ring", "rational \"\"\" R = self.base_ring() try: if x == R.one():", "+= r',\\ ' s += self.parent().unit_gens()[i]._latex_() + r' \\mapsto '", "+ zeta10 - 1, zeta10^2] TESTS: Test that :trac:`11783` and", "characters modulo 5 with values in Number Field in a", "= self._zeta_order else: # No root of unity specified; use", "zeta = L.zeta(m) elif number_field.is_CyclotomicField(K) or is_RationalField(K): chi = chi.minimize_base_ring()", "DirichletGroup(20).zeta() zeta4 sage: DirichletGroup(60).zeta() zeta4 sage: DirichletGroup(60,QQ).zeta() -1 sage: DirichletGroup(60,", "sage: kronecker_character(97*389*997^2) Dirichlet character modulo 37733 of conductor 37733 mapping", "not implemented \"\"\" n = self.zeta_order() R = self.base_ring() p", "(optional: default True) whether to sort the list of orbits", "self.__eval_at_minus_one() def change_ring(self, R): \"\"\" Return the base extension of", "UniqueFactory to cache DirichletGroups \"\"\" # **************************************************************************** # Copyright (C)", "not checking that they had the same level! - <NAME>", "1, Dirichlet character modulo 20 of conductor 5 mapping 11", "Field of order 4 and degree 2 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) 17", "we construct the group of Dirichlet character mod 20, but", "|--> zeta4 sage: e(-1) -1 sage: e(2) 0 sage: e(7)", "2 |--> 1 \"\"\" G = self.parent() if G.zeta.is_in_cache(): x", "illustrates a canonical coercion:: sage: e = DirichletGroup(5, QQ).0 sage:", "EXAMPLES:: sage: G = DirichletGroup(11) sage: G.gen(0).base_ring() Cyclotomic Field of", "self.modulus() % 8 == 0: # 2 factors at 2.", "those `x\\in\\ZZ/N\\ZZ` with `\\gcd(N,x)>1` to `0`. EXAMPLES:: sage: G =", "def is_even(self): r\"\"\" Return ``True`` if and only if `\\varepsilon(-1)", "to the Bernoulli function. Likewise # computing all binomial coefficients", "-2*zeta6 + 3) ((-zeta6 + 1,), (-zeta6 + 1,), zeta6", "sage: t = trivial_character(7) sage: [t(x) for x in [0..20]]", "1, 0, 1, 0, 0, 0, 1, 0, 1] sage:", "so the return value is a list of integers. At", "check=False) def _mul_(self, other): \"\"\" Return the product of self", "sage: e.jacobi_sum(f) 3*zeta12^2 + 2*zeta12 - 3 sage: f.jacobi_sum(e) 3*zeta12^2", "conductor } 1 \"\"\" s = r'\\hbox{Dirichlet character modulo }", "Furthermore, a generator ``zeta`` of `V` is computed, and an", "# list of values on each unit generator return self.element_class(self,", "free module used to represent Dirichlet characters. TESTS:: sage: DirichletGroup(12)._module", "EXAMPLES:: sage: e = DirichletGroup(5).0 sage: e Dirichlet character modulo", "root of unity has small order, i.e., it is not", "ring; the value ring for the characters in this group", "K(bernoulli(k)) elif self(-1) != K((-1)**k): ber = K.zero() elif algorithm", "0: # 2 factors at 2. vals[0].append(vals[1][0]) del vals[1] elif", "it could # be sped up by a factor of", "3 |--> -1, Dirichlet character modulo 9 of conductor 1", "[(DP[i].values_on_gens(),DP[j].values_on_gens(),DP[i].jacobi_sum(DP[j])) ....: for i in range(p-1) for j in range(i,", "self.base_ring().has_coerce_map_from(X.base_ring()) and (self._zeta is None or (X._zeta is not None", ":: sage: e = DirichletGroup(13).0 sage: e.change_ring(QQ) Traceback (most recent", "while True: # record character value on n result_list[n] =", "[ Group of Dirichlet characters modulo 4 with values in", "Dirichlet group. TESTS:: sage: DirichletGroup.create_key(60) (Cyclotomic Field of order 4", "modulo 5 with values in Number Field in a with", "order 10 and degree 4' sage: G.rename('Dir(11)') sage: G Dir(11)", "(-1,), 1) ((-1,), (-zeta6,), -2*zeta6 + 3) ((-1,), (-zeta6 +", "import (binomial, bernoulli, kronecker, factor, gcd, lcm, fundamental_discriminant, euler_phi, factorial,", "if G.zeta.is_in_cache(): x = -self.element() else: x = tuple(~z for", "degree 2, Group of Dirichlet characters modulo 5 with values", "= self.zeta_order() R = self.base_ring() p = R.characteristic() if p", "bernoulli(self, k, algorithm='recurrence', cache=True, **opts): r\"\"\" Returns the generalized Bernoulli", "ring is an integral domain\") k = self.order() if k", "and 2 are printed correctly (see :trac:`17338`):: sage: DirichletGroup(1)[0] Dirichlet", "for x in IntegerModRing(5)]) -1 sage: # The value -1", "sage: e.values() [0, 1, 0, -1, 0, 0, 0, -1,", "= u.lift() # have to do this, since e.g., unit", "``base_ring`` -- commutative ring; the value ring for the characters", "sage: e = DirichletGroup(20).gen(0) sage: e.values() [0, 1, 0, -1,", "13 mapping 2 |--> -zeta12^3 + zeta12, Dirichlet character modulo", "component of the numerical value of e is near zero::", "the numerical value of e is near zero:: sage: v=e.kloosterman_sum_numerical()", "== a True \"\"\" if M % self.modulus() != 0:", "conductor 37 mapping 2 |--> zeta36^4 sage: DirichletGroup(20).random_element() Dirichlet character", "be sped up by a factor of 10 or more", "[3]], [[2]~, Vecsmall([2])], [[4], [[1, matrix(0,2)]], Mat(1), [3], [2], [0]],", "zeta156^15 - 2*zeta156^14 - zeta156^10 + zeta156^8 + zeta156^7 +", "zeta4 sage: e.bar() Dirichlet character modulo 5 of conductor 5", "from that if we encounter it in a pickle element_key", "generators for the units of `(\\ZZ/N\\ZZ)^*`, where `N` is the", "G == H False If ``base_ring`` was not be a", "a Dirichlet character modulo the multiple M of the modulus.", "Return a tuple of the values of ``self`` on the", "abs(self(-1) - R(-1)) < 0.5 return self(-1) == R(-1) @cached_method", "in reversed(e.divisors()): try: zeta = R.zeta(d) break except ValueError: pass", "r != 0: s += r' \\hbox{ mapping } '", "character (d/.) of minimal conductor. EXAMPLES:: sage: kronecker_character(97*389*997^2) Dirichlet character", "indirect doctest Dirichlet character modulo 3 of conductor 3 mapping", "P._zeta_dlog v = [dlog[x] for x in values_on_gens] m =", "- 1,), 2*zeta6 + 1) ((zeta6,), (-1,), -2*zeta6 - 1)", "and degree 2' and 'Group of Dirichlet characters modulo 5", "return s def _latex_(self): r\"\"\" LaTeX representation of self. EXAMPLES::", "= \\sum_{r \\in (\\ZZ/m\\ZZ)^\\times} \\chi(r)\\,\\zeta^{ar+br^{-1}}, where `m` is the modulus", "sage: e = G.0 sage: e.is_even() False sage: e(-1) -1", "group `R^*` of ``base_ring``. This is the group of homomorphisms", "3 sage: chi = DirichletGroup(24)([1,-1,-1]); chi Dirichlet character modulo 24", "a non-negative integer - ``algorithm`` -- either ``'recurrence'`` (default) or", "character modulo 6 of conductor 3 mapping 5 |--> -1", "has small order, i.e., it is not the largest order", "sum exactly (which is generally slower). INPUT: - ``prec`` --", "= DirichletGroup.create_object(None, k); G Group of Dirichlet characters modulo 2", "of conductor 13 mapping 2 |--> zeta12^2, Dirichlet character modulo", "\"\"\" g = rings.IntegerModRing(self.modulus()).unit_group_exponent() if g == 1: g =", "could # be sped up by a factor of 10", "= ~e sage: f*e Dirichlet character modulo 13 of conductor", "+ 1 sage: norm(e.gauss_sum()) 3 :: sage: G = DirichletGroup(13)", "None: zeta = base_ring(zeta) if zeta_order is None: zeta_order =", "factor of 10 or more in many cases, # especially", "g = [] ord = self.zeta_order() M = self._module zero", "r = rings.IntegerModRing(n)(p).multiplicative_order() Auts = [p**m for m in range(0,r)]", "5 of conductor 1 mapping 2 |--> 1, Dirichlet character", "quartic sage: sum([Y(x)*Z(1-x) for x in IntegerModRing(5)]) -1 sage: #", "1,), zeta6 + 2) Let's check that trivial sums are", "algorithm == \"recurrence\": # The following code is pretty fast,", "modulo 20 of conductor 20 mapping 11 |--> -1, 17", "Group of Dirichlet characters modulo 5 with values in Number", "in state_dict: element = state_dict[element_key] del state_dict[element_key] super(DirichletCharacter, self).__setstate__(state) if", "sage: e = DirichletGroup(100).0 sage: e.multiplicative_order() 2 \"\"\" if self.parent().zeta.is_in_cache():", "in m.coprime_integers(m): e = rings.Mod(c, m) z = zeta **", "conductor 1 mapping 2 |--> 1], ..., [Dirichlet character modulo", "type(G(1).conductor()) <type 'sage.rings.integer.Integer'> \"\"\" if self.modulus() == 1 or self.is_trivial():", "1`. EXAMPLES:: sage: G = DirichletGroup(13) sage: e = G.0", "is_ComplexField(P.base_ring()): zeta = P.zeta() zeta_argument = zeta.argument() v = [int(x.argument()", "modulo 5 of conductor 5 mapping 2 |--> -a^2] We", "order the exponent of `(\\ZZ/N\\ZZ)^*`:: sage: DirichletGroup(20) Group of Dirichlet", "degree 2 \"\"\" N = self.modulus() m = m %", "= rings.IntegerModRing(modulus).unit_group_exponent() base_ring = rings.CyclotomicField(e) if integral: base_ring = base_ring.ring_of_integers()", "5 with values in Number Field in a with defining", "with values in Rational Field sage: G.order() 4 sage: G.base_ring()", "is a positive integer coprime to q that identifies a", "'pari' (default) or 'lcalc' EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage:", "on generators of `(\\ZZ/n\\ZZ)^*`. INPUT: - ``parent`` -- :class:`DirichletGroup`, a", "k == l False sage: G = DirichletGroup.create_object(None, k); G", "mapping 13533432536 |--> -1, 22369178537 |--> -1, 14266017175 |--> 1", "(./d) of conductor d, for d0. EXAMPLES:: sage: kronecker_character_upside_down(97*389*997^2) Dirichlet", "G = DirichletGroup(100000, CC) sage: G.1.is_even() True Note that ``is_even``", "the decomposition of self as a product of Dirichlet characters", "mapping 7 |--> -1, 5 |--> 1, Dirichlet character modulo", "AUTHORS: - <NAME> (2006-08-06) \"\"\" d = rings.Integer(d) if d", "# g(t) = t/(e^{Nt}-1) g = t/((N*t).exp(prec) - 1) #", "# we need to set the cache of values_on_gens() from", "+ 1) if p == 2 and F[0][1] > 2", "TypeError: Galois orbits only defined if base ring is an", "the case where `R` is a map (:trac:`18072`):: sage: K.<i>", "> 1: return prod([d.conductor() for d in self.decomposition()]) p =", "with values in Cyclotomic Field of order 4 and degree", "2 |--> -1 sage: G([K.0]) Dirichlet character modulo 13 of", "oi in zip(v, pari_orders)] return (G, v) def conrey_number(self): r\"\"\"", "sage: Y.jacobi_sum(Z) -1 sage: Z.jacobi_sum(Y) -1 Now let's take a", "the integers a,b is .. MATH:: K(a,b,\\chi) = \\sum_{r \\in", "G.append(x) else: G.append(orbit) for z in orbit: seen_so_far.add(tuple(z.element())) G =", "a g = phi(self(0)) z = CC.one() for c in", "see also :meth:`.kloosterman_sum_numerical`, which gives an inexact answer (but is", "and only if `p = 2` and the factor of", "chi Dirichlet character modulo 5 of conductor 5 mapping 2", "characters modulo 60 with values in Gaussian Integers in Cyclotomic", "G([1 for u in G.unit_gens()]) sage: e.kloosterman_sum(7,17) -2*zeta20^6 + 2*zeta20^4", "and/or modify # it under the terms of the GNU", "characters modulo 1 and 2 are printed correctly (see :trac:`17338`)::", "Group of Dirichlet characters modulo 5 with values in Finite", "-1, 14266017175 |--> 1 AUTHORS: - <NAME> (2006-08-06) \"\"\" d", "root of unity for ``parent``. In both cases, the orders", "|--> 1, 41 |--> -1, 37 |--> 1, Dirichlet character", "ValueError(\"values (= {} modulo {}) must have additive orders dividing", "= DirichletGroup(3) sage: e = G([-1]) sage: e.gauss_sum(1) 2*zeta6 -", "- <NAME> (2006-05-21): added examples of everything; fix a *lot*", "even if they define identical functions on ``ZZ``. EXAMPLES:: sage:", "elif not x.conductor().divides(self.modulus()): raise TypeError(\"conductor must divide modulus\") a =", "self.values_on_gens()] if self.modulus() % 8 == 0: # 2 factors", "-1 directly using dlog and a large power of the", "sage: f.gauss_sum_numerical() -3.07497205... + 1.8826966926...*I sage: abs(e.gauss_sum_numerical()) 3.60555127546... sage: abs(f.gauss_sum_numerical())", "last): ... NotImplementedError: Kloosterman sums not implemented over this ring", "2, 2], [0, 0, 0]], [1, 0, 0; 0, 1,", "orbit of this character under the action of the absolute", "None) sage: k == l False sage: G = DirichletGroup.create_object(None,", "def __init__(self, base_ring, modulus, zeta, zeta_order): \"\"\" Create a Dirichlet", "group. Not to be called directly (use the factory function", "defining polynomial x^4 + 1 An example where we give", "G.integers_mod().one() value = val_on_gen.base_ring().zero() while True: # record character value", "G([i, -1, -1]) Traceback (most recent call last): ... ValueError:", "b=0): r\"\"\" Return the Kloosterman sum associated to this Dirichlet", "is an element of ``base_ring`` and that ``zeta_order`` is an", "True, False, True] sage: G = DirichletGroup(100000, CC) sage: G.0.is_odd()", "with pointwise multiplication. The group `V` is determined as follows:", "is raised if such ``zeta`` cannot be found. EXAMPLES: The", "sage: DirichletGroup(37).unit_gens() (2,) sage: DirichletGroup(20).unit_gens() (11, 17) sage: DirichletGroup(60).unit_gens() (31,", "4 is nontrivial or `p > 2` and 2 does", "'[-1, 1, 1]' \"\"\" return str(list(self.values_on_gens())) def _repr_(self): \"\"\" String", "vector is mutable *only* because immutable vectors are not implemented", "# **************************************************************************** from __future__ import print_function import sage.categories.all as cat", "it under the terms of the GNU General Public License", "which calculates the sum exactly (which is generally slower). INPUT:", "shows that :trac:`6393` has been fixed:: sage: G = DirichletGroup(5);", "we create a Dirichlet group with values in a number", "= G.modulus() g = 0 L = rings.CyclotomicField(m.lcm(zo)) zeta =", "order root of unity in the field:: sage: g.zeta_order() 2", "5 with values in the group of order 4 generated", "complex conjugate of this Dirichlet character. EXAMPLES:: sage: e =", "e Dirichlet character modulo 5 of conductor 5 mapping 2", "from sage.misc.functional import round from sage.misc.cachefunc import cached_method from sage.misc.fast_methods", "order ``zeta_order``. Furthermore, a generator ``zeta`` of `V` is computed,", "If ``base_ring`` was not be a part of the key,", "modulus = rings.Integer(N) if modulus <= 0: raise ValueError('modulus should", "call last): ... ValueError: conductor(=4) must divide M(=50) \"\"\" M", "if self.values.is_in_cache() or m != N - 1: return self.values()[m]", "where p^r = 1 (mod n), so r is the", "%s\" % (x, self)) elif not x.conductor().divides(self.modulus()): raise TypeError(\"conductor must", "\\mapsto \\zeta_{4} TESTS: Dirichlet characters modulo 1 and 2 are", "sage: G.base() # check that Parent.__init__ has been called Ring", "`2^k` is trivial for `k = 1` and non-cyclic for", "import prod import sage.misc.prandom as random import sage.modules.free_module as free_module", "== R(-1) @cached_method def is_primitive(self): \"\"\" Return ``True`` if and", "- zeta156^34 - zeta156^33 - zeta156^31 + 2*zeta156^30 + zeta156^28", "values (= (zeta16^4, -1, -1)) must have multiplicative orders dividing", "1, 0, -1, 0, 0, 0, -1, 0, 1, 0,", "+ zeta156^41 + 2*zeta156^40 + zeta156^37 - zeta156^36 - zeta156^34", "of generators. category = category.Finite().FinitelyGenerated() Parent.__init__(self, base_ring, category=category) self._zeta =", "Group of Dirichlet characters modulo 60 with values in the", "P._zeta_dlog v = M([dlog[x] for x in self.values_on_gens()]) v.set_immutable() return", "base_ring.ring_of_integers() if not is_Ring(base_ring): raise TypeError(\"base_ring (= %s) must be", "integral domain \"\"\" if not self.base_ring().is_integral_domain(): raise TypeError(\"Galois orbits only", "by # the Free Software Foundation, either version 2 of", "e = DirichletGroup(100).1 sage: e.order() # same as multiplicative_order, since", "sage: e = G.0^2; e Dirichlet character modulo 13 of", "+ zeta30^4 + zeta30^3 - zeta30 - 1 When a", "# k = 1, p, p^2, ..., p^(r-1), # where", "class DirichletGroupFactory(UniqueFactory): r\"\"\" Construct a group of Dirichlet characters modulo", "not be a part of the key, the keys would", "sage: G = DirichletGroup(20) sage: G.gens() (Dirichlet character modulo 20", "g = f.base_extend(Integers(15)) sage: g(3) 14 sage: g.parent().zeta() 14 \"\"\"", "sage: DirichletGroup(20).zeta_order() 4 sage: DirichletGroup(60).zeta_order() 4 sage: DirichletGroup(60, GF(25,'a')).zeta_order() 4", "def __init__(self, parent, x, check=True): r\"\"\" Create a Dirichlet character", "Finite Field of size 5, Group of Dirichlet characters modulo", "for unpickling old instances. TESTS:: sage: G = DirichletGroup(9) sage:", "= sum([self(a)*h[a][k] for a in range(1,N+1)]) * factorial(k) else: raise", "60, K.gen(), 4)) Group of Dirichlet characters modulo 60 with", "+= L(c)*z return g def gauss_sum_numerical(self, prec=53, a=1): r\"\"\" Return", "sage.libs.lcalc.lcalc_Lfunction import Lfunction_from_character return Lfunction_from_character(self) raise ValueError('algorithm must be \"pari\"", "this is usually optimal. The ``definition`` algorithm uses the definition", "character at the integer `m`. .. warning:: A table of", "mapping 2 |--> zeta12 sage: G(0) Traceback (most recent call", "- zeta156^4 - zeta156^2 - 1 sage: factor(norm(e.gauss_sum())) 13^24 TESTS:", "G = [] seen_so_far = set([]) for x in v:", "113 TESTS:: sage: eps1 = DirichletGroup(5)([-1]) sage: eps2 = DirichletGroup(5,QQ)([-1])", "7 with values in Rational Field sage: G.change_ring(CyclotomicField(6)) Group of", "is_DirichletGroup(DirichletGroup(11)) True sage: is_DirichletGroup(11) False sage: is_DirichletGroup(DirichletGroup(11).0) False \"\"\" return", "QQbar, or a complex field\") zeta = CC.zeta(G.modulus()) ** a", "DirichletGroup(24)([1,-1,-1]); chi Dirichlet character modulo 24 of conductor 24 mapping", "0} \"\"\" return {z: i for i, z in enumerate(self._zeta_powers)}", "zeta4 sage: parent(val) Gaussian Integers in Cyclotomic Field of order", "Dirichlet character modulo 37506941597 of conductor 37733 mapping 13533432536 |-->", "= DirichletGroup(7,QQ); G Group of Dirichlet characters modulo 7 with", "= DirichletGroup(13) sage: e = G.0 sage: e.is_odd() True sage:", "the values of this character on each integer between 0", "36 \"\"\" return self.zeta_order() @cached_method def _automorphisms(self): \"\"\" Compute the", "__call__ method for Dirichlet characters, miscellaneous fixes - <NAME> (2014-03-06):", "Integers(15)) Group of Dirichlet characters modulo 17 with values in", "in the rational numbers:: sage: G = DirichletGroup(20, QQ); G", "root of unity in ``base_ring`` - ``zeta_order`` -- (optional) positive", "is generally much quicker). CACHING: Computed Kloosterman sums are *not*", "* z for y, z in zip(self.values_on_gens(), other.values_on_gens())) return G.element_class(G,", "1, 1, 1, 1, 1] sage: t(1).parent() Rational Field sage:", "of integers modulo 15 sage: G.order() 4 sage: DirichletGroup(-33) Traceback", "5 of conductor 5 mapping 2 |--> zeta4 sage: e.bar()", "= DirichletGroup(6) sage: G(DirichletGroup(3).0) Dirichlet character modulo 6 of conductor", "If either zeta or zeta_order is given, compute the other.", "base_ring.zeta(zeta_order) return (base_ring, modulus, zeta, zeta_order) def create_object(self, version, key,", "are admissible (see :trac:`17283`):: sage: k.<i> = CyclotomicField(4) sage: G", "G3 = DirichletGroup(31, CyclotomicField(3)) sage: G5 = DirichletGroup(31, CyclotomicField(5)) sage:", "with values in \" % self.modulus() if self._zeta is not", "return richcmp(self.values_on_gens(), other.values_on_gens(), op) def __hash__(self): \"\"\" Return the hash", "a*b # indirect doctest Dirichlet character modulo 20 of conductor", "implemented over this ring \"\"\" G = self.parent() zo =", "self. EXAMPLES:: sage: G.<a,b> = DirichletGroup(16) sage: latex(b) # indirect", "modulus. EXAMPLES:: sage: e = DirichletGroup(20)(1) sage: e.values() [0, 1,", "only on the factor of p**(r-1) on the right hand", "modulo 13 of conductor 13 mapping 2 |--> zeta12, Dirichlet", "because immutable vectors are not implemented yet. EXAMPLES:: sage: G.<a,b>", "self.base_ring() is R: return self G = self.parent().change_ring(R) return G.element_class(G,", "\\zeta_{4} TESTS: Dirichlet characters modulo 1 and 2 are printed", "mapping 3 |--> -1, Dirichlet character modulo 9 of conductor", "+ b*e**(-1)) return g def kloosterman_sum_numerical(self, prec=53, a=1, b=0): r\"\"\"", "Field of order 4 and degree 2 \"\"\" base_ring, modulus,", "e(41) -1 sage: e(37) zeta4 sage: e(31*37) -zeta4 sage: parent(e(31*37))", "gens(self): \"\"\" Returns generators of self. EXAMPLES:: sage: G =", "= K(sum(binomial(k,j) * bernoulli(j, **opts) * N**(j-1) * S(k-j) for", "X[1] sage: # Y is trivial and Z is quartic", "the sum exactly (which is generally slower). INPUT: - ``prec``", "2) ((-1,), (-1,), 1) ((-1,), (-zeta6,), -2*zeta6 + 3) ((-1,),", "repr(G) # indirect doctest 'Group of Dirichlet characters modulo 11", "G.gens() (Dirichlet character modulo 60 of conductor 4 mapping 31", "mod gens = G.unit_gens() orders = G.integers_mod().unit_group().gens_orders() R_values = G._zeta_powers", "when pickling an instance of :class:`DirichletCharacter`. \"\"\" P = self.parent()", "Cyclotomic Field of order 6 and degree 2 TESTS: We", "self.values_on_gens()) return G.element_class(G, x, check=False) def _repr_short_(self): r\"\"\" A short", "DirichletGroup(2, base_ring=CC) False If the base ring is not an", "self.parent() if G.zeta.is_in_cache(): x = n * self.element() else: x", "domain \"\"\" if not self.base_ring().is_integral_domain(): raise TypeError(\"Galois orbits only defined", "DirichletGroup(...)`` notation works) - ``integral`` -- boolean (default: ``False``); whether", "of Dirichlet characters modulo 5 with values in Number Field", "= K.complex_embedding(prec) CC = phi.codomain() else: raise NotImplementedError(\"Gauss sums only", "must be between 0 and %s\"%(n,len(g)-1)) return g[n] @cached_method def", "sage: DirichletGroup(60).zeta_order() 4 sage: DirichletGroup(60, GF(25,'a')).zeta_order() 4 sage: DirichletGroup(19).zeta_order() 18", "which must also be a multiple of the conductor of", "4 mapping 11 |--> -1, 17 |--> 1, Dirichlet character", "self._zeta if zeta_order is None: # We reuse _zeta_order if", "definition, the first Bernoulli number of the trivial # character", "`k \\ge 3`:: sage: (DirichletGroup(18).0).decomposition() [Dirichlet character modulo 2 of", "order 4 and degree 2' We can multiply if we're", "= self.parent().modulus() if mod == 1: return [R.one()] elif mod", "\"\"\" if v is None: v = self.list() else: if", "3 mapping 31 |--> 1, 41 |--> -1, 37 |-->", "def exponent(self): \"\"\" Return the exponent of this group. EXAMPLES::", "modulo 13 of conductor 13 mapping 2 |--> -1 sage:", "character. EXAMPLES:: sage: e = DirichletGroup(100).0 sage: e.modulus() 100 sage:", "return (self.conductor() == self.modulus()) @cached_method def is_trivial(self): r\"\"\" Returns ``True``", "d[0]*d[1] == c Traceback (most recent call last): ... TypeError:", "= DirichletGroup(20) sage: G.gen(0) Dirichlet character modulo 20 of conductor", "conductor 5 mapping 11 |--> 1, 17 |--> zeta4 sage:", "Traceback (most recent call last): ... NotImplementedError: Automorphisms for finite", "_zeta_powers(self): \"\"\" Return a list of powers of the distinguished", "defining polynomial x^4 + 1 :: sage: G.<e> = DirichletGroup(13)", "= DirichletGroup(10, QQ).base_extend(CyclotomicField(4)) sage: H = DirichletGroup(10, CyclotomicField(4)) sage: G", "GF(11^4, 'a'))._automorphisms() [1, 11, 121, 1331] sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5))._automorphisms()", "character. This function returns an equal Dirichlet character .. MATH::", "modulo 2 of conductor 1, Dirichlet character modulo 9 of", "1, 0, 0, 1, 36, 0, 1, 36] sage: e", "- ``base_ring`` -- commutative ring; the value ring for the", "TESTS: Check that :trac:`17586` is fixed:: sage: DirichletGroup(1)[0].bernoulli(1) 1/2 \"\"\"", "of dimension 2 over Ring of integers modulo 2 \"\"\"", "Bernoulli numbers via classical Bernoulli numbers using the formula in", "etc. The Kloosterman sum associated to `\\chi` and the integers", "sage: c = a*b sage: d = c.decomposition(); d [Dirichlet", "20 of conductor 5 mapping 11 |--> 1, 17 |-->", "self.conductor()) r = len(self.values_on_gens()) if r != 0: s +=", "-1, 0, 0, 0, -1, 0, 1, 0, -1, 0,", "2 in Ring of integers modulo 15 sage: G.gens() (Dirichlet", "power modulus corresponding to primes dividing modulus. (Note that if", "prec bits of precision. See also :meth:`.kloosterman_sum`, which calculates the", "galois_orbits(self, v=None, reps_only=False, sort=True, check=True): \"\"\" Return a list of", "Field of order 4 and degree 2 \"\"\" g =", "z = L.one() for c in chi.values()[1:]: z *= zeta", "5 mapping 11 |--> 1, 17 |--> zeta4) \"\"\" g", "lcm, fundamental_discriminant, euler_phi, factorial, valuation) def trivial_character(N, base_ring=rings.RationalField()): r\"\"\" Return", "self.base_ring().one() return [x for x in range(self.modulus()) if self(x) ==", "of this Dirichlet group. This is the same as self.order().", "defined by `x` (type :class:`DirichletCharacter`). EXAMPLES:: sage: G.<e> = DirichletGroup(13)", "wrapper around a PARI L-function or around the ``lcalc`` program.", "= t/(e^{Nt}-1) g = t/((N*t).exp(prec) - 1) # h(n) =", "Cyclotomic Field of order 4 and degree 2 sage: DirichletGroup(20).base_ring()", "character defined by `x` (type :class:`DirichletCharacter`). EXAMPLES:: sage: G.<e> =", "self.values_on_gens()]) else: dlog = P._zeta_dlog v = M([dlog[x] for x", "= K zeta = L.zeta(m) elif number_field.is_CyclotomicField(K) or is_RationalField(K): chi", "(see :trac:`17338`):: sage: latex(DirichletGroup(1)[0]) \\hbox{Dirichlet character modulo } 1 \\hbox{", "and degree 2 \"\"\" base_ring, modulus, zeta, zeta_order = key", "free_module import sage.modules.free_module_element as free_module_element import sage.rings.all as rings import", "that if the modulus is 2 mod 4, there will", "of Dirichlet characters modulo 19 with values in Finite Field", "G.<a,b> = DirichletGroup(20) sage: a.is_trivial() False sage: (a^2).is_trivial() True \"\"\"", "of Dirichlet characters of prime power modulus, where the prime", "== f False sage: k = DirichletGroup(7)([-1]) sage: k ==", "of conductor 60 mapping 31 |--> -1, 41 |--> -1,", "return H(self) @cached_method def values(self): \"\"\" Return a list of", "range(0,r)] return Auts def galois_orbits(self, v=None, reps_only=False, sort=True, check=True): \"\"\"", "default False) if True only returns representatives for the orbits.", "map from Rational Field to Integer Ring is defined Base-extended", "1 or self.is_trivial(): return rings.Integer(1) F = factor(self.modulus()) if len(F)", "element_key in state_dict: element = state_dict[element_key] del state_dict[element_key] super(DirichletCharacter, self).__setstate__(state)", "character modulo 13 of conductor 1 mapping 2 |--> 1", "5 mapping 2 |--> zeta4 sage: e.bar() Dirichlet character modulo", "values in the group of order 4 generated by zeta4", "Dirichlet characters modulo 4 with values in Cyclotomic Field of", "CC) sage: G.0.is_odd() True Note that ``is_even`` need not be", "field of algebraic numbers is supported (:trac:`19056`):: sage: G =", "... ValueError: conductor(=4) must divide M(=50) \"\"\" M = int(M)", "of order 6 and degree 2 sage: (e^3).minimize_base_ring().base_ring() Cyclotomic Field", "divisibility holds # depends only on the factor of p**(r-1)", "4 mapping 3 |--> -1, Dirichlet character modulo 9 of", "CC) sage: e = G.0 sage: e.is_even() False sage: e(-1)", "raise TypeError(\"cannot convert %s to an element of %s\" %", "Field in a with defining polynomial x^4 + 1 sage:", "x = n * self.element() else: x = tuple(z**n for", "not be the negation of is_odd, e.g., in characteristic 2::", "4 sage: G.base_ring() Rational Field The elements of G print", "[R.zero(), R.one()] result_list = [R.zero()] * mod gens = G.unit_gens()", "the power of n EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage:", "self.zeta_order.set_cache(d) return zeta @cached_method def zeta_order(self): \"\"\" Return the order", "17 |--> 1 sage: G.gen(1) Dirichlet character modulo 20 of", "= DirichletGroup(4) sage: c.extend(20) Dirichlet character modulo 20 of conductor", "field or QQ.\") phi = K.complex_embedding(prec) CC = phi.codomain() g", "self. The Galois group is the absolute Galois group of", "'a'))._automorphisms() [1, 11, 121, 1331] sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5))._automorphisms() Traceback", "or of :meth:`values_on_gens`. The cache of one of these methods", "a^2, Dirichlet character modulo 5 of conductor 5 mapping 2", "list of powers of the distinguished root of unity. TESTS::", "11 |--> -1, 17 |--> 1 sage: b Dirichlet character", "following identity of power series (see for example [DI1995]_, Section", "in P._automorphisms()]) v = [P.element_class(P, m * z, check=False) for", "check: if self.parent() != char.parent(): raise NotImplementedError(\"Characters must be from", "kernel(self): r\"\"\" Return the kernel of this character. OUTPUT: Currently", "a with defining polynomial x^4 + 1 sage: DirichletGroup(5, K,", "= DirichletGroup(16)([-1, 1]) sage: e.values_on_gens () (-1, 1) .. NOTE::", "parameters yields the same object:: sage: DirichletGroup(60) is DirichletGroup(60) True", "done much # more efficiently. v = self.values() S =", "9110/13 sage: eps = DirichletGroup(9).0 sage: eps.bernoulli(3) 10*zeta6 + 4", "DirichletGroup(60)([1,-1,I]) sage: chi.conrey_number() 17 sage: chi = DirichletGroup(420)([1,-1,-I,1]) sage: chi.conrey_number()", "an error will be raised if only ``zeta_order`` is specified::", "\"\"\" modulus = rings.Integer(N) if modulus <= 0: raise ValueError('modulus", "if R.is_prime_field(): return self p = R.characteristic() if p: K", "1,), 2*zeta6 + 1) ((zeta6,), (-1,), -2*zeta6 - 1) ((zeta6,),", "mapping 51 |--> -1, 77 |--> 1 sage: e.conductor() 4", "self. EXAMPLES:: sage: DirichletGroup(37).unit_gens() (2,) sage: DirichletGroup(20).unit_gens() (11, 17) sage:", "base_ring, modulus, zeta, zeta_order): \"\"\" Create a Dirichlet group. Not", "n = random.randrange(g.order()) e *= g**n return e def unit_gens(self):", "s def base_ring(self): \"\"\" Returns the base ring of this", "its conductor equals its modulus. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20)", "14 \"\"\" if not (isinstance(R, Map) or R.has_coerce_map_from(self.base_ring())): raise TypeError(\"no", "admitting a *coercion* map from the base ring of ``self``,", "(-1,), -1) ((1,), (-zeta6,), -1) ((1,), (-zeta6 + 1,), -1)", "between 0 and the order of the generator minus 1,", "modulus is 2 mod 4, there will be a \"factor\"", "a (not necessarily primitive) character of modulus `N`. This function", "supported (:trac:`19056`):: sage: G = DirichletGroup(7, QQbar) sage: G[1].gauss_sum() -2.440133358345538?", "case of the trivial Dirichlet character modulo 1, this function", "= P.zeta() zeta_argument = zeta.argument() v = M([int(round(x.argument() / zeta_argument))", "pari_orders)] return (G, v) def conrey_number(self): r\"\"\" Return the Conrey", "and the caching would be broken:: sage: k = k[1:];", "sage: g.jacobi_sum(g) 3 We consider a sum with values in", "Returns ``True`` if this is the trivial character, i.e., has", "state_dict[element_key] super(DirichletCharacter, self).__setstate__(state) if values_on_gens is not None: self.values_on_gens.set_cache(values_on_gens) if", "\"\"\" Return a print representation of this group, which can", "V` with pointwise multiplication. The group `V` is determined as", "if True only returns representatives for the orbits. - ``sort``", "\"\"\" Create a Dirichlet group. Not to be called directly", "n in range(1,N+1)] ber = sum([self(a)*h[a][k] for a in range(1,N+1)])", "Many operations, such as finding a set of generators for", "character modulo 20 of conductor 5 mapping 11 |--> 1,", "sage: chi._pari_conversion() ([[24, [0]], [8, [2, 2, 2], [7, 13,", "0; 0, 1, 0; 0, 0, 1]], [0, 1, 1])", "= phi.codomain() g = 0 m = G.modulus() zeta =", "e.gauss_sum(1) 2*zeta6 - 1 sage: e.gauss_sum(2) -2*zeta6 + 1 sage:", "= self.parent() K = G.base_ring() chi = self m =", "kloosterman_sum(self, a=1, b=0): r\"\"\" Return the \"twisted\" Kloosterman sum associated", "zeta, zeta_order) def create_object(self, version, key, **extra_args): \"\"\" Create the", "1 sage: DirichletGroup(5, K, zeta_order=2) Group of Dirichlet characters modulo", "characters modulo 5 with values in Finite Field of size", "definition directly. .. WARNING:: In the case of the trivial", "= F[0][0] # When p is odd, and x =/=", "no coercion map from Rational Field to Integer Ring is", "conductor 5 mapping 31 |--> 1, 41 |--> 1, 37", "None, None) sage: k == l True sage: DirichletGroup(2, base_ring=QQ)", "automorphisms of self. These are always given by raising to", "G = DirichletGroup(3) sage: e = G.0 sage: abs(e.gauss_sum_numerical()) 1.7320508075...", "conductor 13 mapping 2 |--> zeta12^2 sage: e.galois_orbit() [Dirichlet character", "= H.0 sage: e.gauss_sum_numerical() -3.07497205... + 1.8826966926...*I sage: f.gauss_sum_numerical() -3.07497205...", "= G.0 sage: e.bernoulli(5) 7430/13*zeta12^3 - 34750/13*zeta12^2 - 11380/13*zeta12 +", "- 1: return self.values()[m] else: return self.__eval_at_minus_one() def change_ring(self, R):", "x in IntegerModRing(N)]) 11 And sums where exactly one character", "ber def lfunction(self, prec=53, algorithm='pari'): \"\"\" Return the L-function of", "= DirichletGroup(16) sage: latex(b) # indirect doctest \\hbox{Dirichlet character modulo", "bits of precision. INPUT: - ``prec`` -- integer (default: 53),", "DirichletGroup(35).objgens() sage: e = x[0]*x[1]; e Dirichlet character modulo 35", "``G.<...> = DirichletGroup(...)`` notation works) - ``integral`` -- boolean (default:", "/ zeta_argument)) for x in self.values_on_gens()]) else: dlog = P._zeta_dlog", "= DirichletGroup(19, GF(p)); g Group of Dirichlet characters modulo 19", "Dirichlet characters modulo 1 and 2 are printed correctly (see", "`R`. In this case, `R` must be a domain (so", "Number Field in a with defining polynomial x^4 + 1", "+ zeta156^5 - zeta156^4 - zeta156^2 - 1 sage: factor(norm(e.gauss_sum()))", "import sage.misc.prandom as random import sage.modules.free_module as free_module import sage.modules.free_module_element", "be between 0 and %s\"%(n,len(g)-1)) return g[n] @cached_method def gens(self):", "0: raise ValueError(\"M(=%s) must divide the modulus(=%s)\"%(M,self.modulus())) if M%self.conductor() !=", "+= ' mapping ' for i in range(r): if i", "# **************************************************************************** # Copyright (C) 2004-2006 <NAME> <<EMAIL>> # Copyright", "conductor of this character. EXAMPLES:: sage: e = DirichletGroup(100).0 sage:", "e.modulus() 100 sage: e.conductor() 4 sage: e.restrict(20) Dirichlet character modulo", "of ``zeta`` EXAMPLES:: sage: G = DirichletGroup(7,QQ); G Group of", "= L.one() for c in chi.values()[1:]: z *= zeta g", "sage: e.kloosterman_sum(3,5) -2*zeta6 + 1 sage: G = DirichletGroup(20) sage:", "in characteristic p. EXAMPLES:: sage: G = DirichletGroup(13) sage: e", ":trac:`25127` is fixed:: sage: G = DirichletGroup(1) sage: chi =", "mapping 2 |--> 1], ..., [Dirichlet character modulo 13 of", "last): ... IndexError: n(=2) must be between 0 and 1", "chi = self m = G.modulus() if is_ComplexField(K): return self.gauss_sum_numerical(a=a)", "x = DirichletGroup(35).objgens() sage: e = x[0]*x[1]; e Dirichlet character", "sage: len(DirichletGroup(20, QQ)) 4 sage: len(DirichletGroup(20, GF(5))) 8 sage: len(DirichletGroup(20,", "is conversion between Dirichlet groups of different moduli, but no", "orbits only defined if base ring is an integral domain", "chi(1) 1 \"\"\" G = self.parent() R = G.base_ring() mod", "Group of Dirichlet characters modulo 60 with values in Gaussian", "= G.integers_mod().one() value = val_on_gen.base_ring().zero() while True: # record character", "60 with values in the group of order 4 generated", "== R.one(): x = [R.one()] * len(self.unit_gens()) except (TypeError, ValueError,", "restrict(self, M): \"\"\" Returns the restriction of this character to", "MATH:: J(\\chi, \\psi) = \\sum_{a \\in \\ZZ / N\\ZZ} \\chi(a)", "and the character is nontrivial, then the Gauss sum has", "you call this (unless `m` equals -1) EXAMPLES:: sage: G", "where we want the multiplication to take place. :: sage:", "field:: sage: g.zeta_order() 2 :: sage: r4 = CyclotomicField(4).ring_of_integers() sage:", "... TypeError: no coercion map from Rational Field to Integer", "def decomposition(self): r\"\"\" Returns the Dirichlet groups of prime power", "mapping 11 |--> -1, 17 |--> -1] Next we construct", "the smallest p**r such that # Order(x) divides EulerPhi(p**r) =", "``definition`` algorithm uses the definition directly. .. WARNING:: In the", "modulo %s of conductor %s' % (self.modulus(), self.conductor()) r =", "cache of :meth:`element` or of :meth:`values_on_gens`. The cache of one", "31 |--> 1, 41 |--> -1, 37 |--> 1, Dirichlet", "is trivial for `k = 1` and non-cyclic for `k", "DirichletGroup(20).exponent() 4 sage: DirichletGroup(20,GF(3)).exponent() 2 sage: DirichletGroup(20,GF(2)).exponent() 1 sage: DirichletGroup(37).exponent()", "character modulo 5 of conductor 5 mapping 2 |--> -zeta4]", "last): ... ValueError: conductor(=4) must divide M(=50) \"\"\" M =", "(most recent call last): ... TypeError: cannot convert 0 to", "CC.coerce_map_from(K) elif number_field.is_CyclotomicField(K) or is_RationalField(K): phi = K.complex_embedding(prec) CC =", "else: if check: v = [self(x) for x in v]", "= DirichletGroup(20) sage: G.modulus() 20 \"\"\" return self._modulus def ngens(self):", "free_module_element.is_FreeModuleElement(x): self.element.set_cache(x) else: self.values_on_gens.set_cache(x) @cached_method def __eval_at_minus_one(self): r\"\"\" Efficiently evaluate", "- zeta156^33 - zeta156^31 + 2*zeta156^30 + zeta156^28 - zeta156^24", "algorithm == 'pari': from sage.lfunctions.pari import lfun_character, LFunction Z =", "X.modulus() and self.base_ring().has_coerce_map_from(X.base_ring()) and (self._zeta is None or (X._zeta is", "action of the absolute Galois group of the prime subfield", "self.decomposition() val = self.base_ring()(1) for e in D: if e.modulus()", "g def kloosterman_sum_numerical(self, prec=53, a=1, b=0): r\"\"\" Return the Kloosterman", "0, 1, 0, 0, 0, 1, 0, 1, 0, 1,", "dimension 2 over Ring of integers modulo 2 \"\"\" return", "37 of conductor 37 mapping 2 |--> zeta36^4 sage: DirichletGroup(20).random_element()", "x = tuple(y * z for y, z in zip(self.values_on_gens(),", "of different moduli, but no coercion. This implies that Dirichlet", "e.kloosterman_sum(7,17) -2*E(5) - 4*E(5)^2 - 4*E(5)^3 - 2*E(5)^4 sage: G", "@cached_method def decomposition(self): r\"\"\" Returns the Dirichlet groups of prime", "L PARI L-function associated to Dirichlet character modulo 20 of", "order 4 and degree 2 \"\"\" g = rings.IntegerModRing(self.modulus()).unit_group_exponent() if", "DirichletGroup(5)([-1]) sage: eps2 = DirichletGroup(5,QQ)([-1]) sage: eps1.conrey_number() == eps2.conrey_number() True", "R(1)) < 0.5 return self(-1) == R(1) @cached_method def is_odd(self):", "jacobi_sum(self, char, check=True): r\"\"\" Return the Jacobi sum associated to", "== DirichletGroup(13, QQ) False \"\"\" from sage.categories.groups import Groups category", "sage: DirichletGroup(5)._zeta_dlog {-1: 2, -zeta4: 3, zeta4: 1, 1: 0}", "positive integer coprime to q that identifies a Dirichlet character", "EXAMPLES:: sage: from sage.modular.dirichlet import is_DirichletGroup sage: is_DirichletGroup(DirichletGroup(11)) True sage:", "is a map (:trac:`18072`):: sage: K.<i> = QuadraticField(-1) sage: f", "n-th generator of self. EXAMPLES:: sage: G = DirichletGroup(20) sage:", "character as an approximate complex number with prec bits of", "a pickle element_key = '_DirichletCharacter__element' element = None if element_key", "2 |--> zeta12^2 sage: e.galois_orbit() [Dirichlet character modulo 13 of", "self.base_ring() e = self._integers.unit_group_exponent() for d in reversed(e.divisors()): try: zeta", "then `V` is taken to be the cyclic subgroup of", "orders[i]) g.append(self.element_class(self, z, check=False)) return tuple(g) def integers_mod(self): r\"\"\" Returns", "GF(p)); g Group of Dirichlet characters modulo 19 with values", "in ``R`` - ``zeta_order`` -- (optional) order of ``zeta`` EXAMPLES::", "order of element not known sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6), zeta_order=6)", "the same as self.order(). EXAMPLES:: sage: len(DirichletGroup(20)) 8 sage: len(DirichletGroup(20,", "sage: e.values() [0, 1, 0, -zeta4, 0, 0, 0, zeta4,", "... NotImplementedError: Characters must be from the same Dirichlet Group.", "if zeta is not None: zeta = base_ring(zeta) if zeta_order", "|--> 1 sage: e.restrict(4) Dirichlet character modulo 4 of conductor", "sage: type(G(1).conductor()) <type 'sage.rings.integer.Integer'> \"\"\" if self.modulus() == 1 or", "DirichletGroup(31, CyclotomicField(5)) sage: K30 = CyclotomicField(30) sage: G3.gen(0).base_extend(K30) * G5.gen(0).base_extend(K30)", "= DirichletGroup(4).gen() sage: chi4._pari_conversion() ([[4, [0]], [2, [2], [3]], [[2]~,", "2 |--> 1] sage: (DirichletGroup(72).0).decomposition() [Dirichlet character modulo 8 of", "generators of self. EXAMPLES:: sage: G = DirichletGroup(20) sage: G.ngens()", "Integers(9)(2)).0 sage: chi.galois_orbit() Traceback (most recent call last): ... TypeError:", "not a domain, an error will be raised. EXAMPLES:: sage:", "that ``zeta_order`` is an element of ``ZZ``. TESTS:: sage: G", "multiple M of the modulus. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20)", "character modulo } 2 \\hbox{ of conductor } 1 \"\"\"", "def lfunction(self, prec=53, algorithm='pari'): \"\"\" Return the L-function of ``self``.", "11 |--> -1, 17 |--> 1' TESTS: Dirichlet characters modulo", "13 of conductor 13 mapping 2 |--> -zeta12^2 + 1]", "supported (:trac:`19056`):: sage: G = DirichletGroup(7, QQbar) sage: G[1].gauss_sum_numerical() -2.44013335834554", "``zeta`` cannot be found. EXAMPLES: The default base ring is", "integers as the base ring. This is ignored if ``base_ring``", "= r'\\hbox{Dirichlet character modulo } %s \\hbox{ of conductor }", "1/2 \"\"\" if cache: try: self.__bernoulli except AttributeError: self.__bernoulli =", "characters in self, or in v if v is not", "Dirichlet characters modulo 20 with values in Rational Field sage:", "Group of Dirichlet characters modulo 2 with values in Complex", "Open the LMFDB web page of the character in a", "-2.440133358345538? + 1.022618791871794?*I Check that :trac:`19060` is fixed:: sage: K.<z>", "`n`-torsion subgroup, where `n` is the exponent of `(\\ZZ/N\\ZZ)^*`. Many", "loads(e.dumps()) == e True :: sage: G, x = DirichletGroup(35).objgens()", "\"\"\" d = rings.Integer(d) if d == 0: raise ValueError(\"d", "this is called OUTPUT: Let `\\varepsilon` be a (not necessarily", "<<EMAIL>> # # This program is free software: you can", "'sage.rings.integer.Integer'> \"\"\" if self.modulus() == 1 or self.is_trivial(): return rings.Integer(1)", "the modulus(=%s)\"%(M,self.modulus())) if M%self.conductor() != 0: raise ValueError(\"conductor(=%s) must divide", "G.unit_gens() orders = G.integers_mod().unit_group().gens_orders() R_values = G._zeta_powers val_on_gen = self.element()", "not implemented ' 'over this ring') n = zeta.multiplicative_order() zeta", "i)) w.append(a) else: for i in range(1, zeta_order): a =", "Automorphisms for finite non-field base rings not implemented \"\"\" n", "last): ... ValueError: values (= (zeta16^4, -1, -1)) must have", "in the cPickle module -- # see modsym/manin_symbols.py. G =", "= f.base_extend(Integers(15)) sage: g(3) 14 sage: g.parent().zeta() 14 \"\"\" if", "(a*b).is_primitive() True \"\"\" return (self.conductor() == self.modulus()) @cached_method def is_trivial(self):", "base rings not implemented sage: DirichletGroup(17, Integers(9), zeta=Integers(9)(2))._automorphisms() Traceback (most", "respectively\" .format(x, orders)) self.values_on_gens.set_cache(x) else: if free_module_element.is_FreeModuleElement(x): self.element.set_cache(x) else: self.values_on_gens.set_cache(x)", "15 \\mapsto 1,\\ 5 \\mapsto \\zeta_{4} TESTS: Dirichlet characters modulo", "of Dirichlet characters - ``x`` -- one of the following:", "return self.values()[m] else: return self.__eval_at_minus_one() def change_ring(self, R): \"\"\" Return", "20 of conductor 4 mapping 11 |--> -1, 17 |-->", "+ 2*zeta12 - 3 sage: f.jacobi_sum(e) 3*zeta12^2 + 2*zeta12 -", "which gives an inexact answer (but is generally much quicker).", "- zeta30 - 1 When a root of unity is", "sage: eps = DirichletGroup(9).0 sage: eps.bernoulli(3) 10*zeta6 + 4 sage:", "0, 1, 36, 0, 1, 0, 0, 1, 36, 0,", "of a suitable cyclotomic field; see also :meth:`.kloosterman_sum_numerical`, which gives", "pari_gens) # now compute the input for pari (list of", "1] sage: (DirichletGroup(72).0).decomposition() [Dirichlet character modulo 8 of conductor 4", "EXAMPLES:: sage: chi4 = DirichletGroup(4).gen() sage: chi4._pari_conversion() ([[4, [0]], [2,", "|--> zeta4) \"\"\" g = [] ord = self.zeta_order() M", "value of this character at the integer `m`. .. warning::", "self.integers_mod().unit_group().gens_orders() for i in range(len(self.unit_gens())): z = zero.__copy__() z[i] =", "This includes Gauss sums, classical Kloosterman sums, Salié sums, etc.", "function returns `B_{1,\\varepsilon} = 1/2`, in accordance with the above", "g = L(chi(0)) z = L.one() for c in chi.values()[1:]:", "the former case, it also ensures that ``zeta`` is an", "in Ring of integers modulo 15 sage: G.gens() (Dirichlet character", "= D(1) sage: g.jacobi_sum(g) 11 sage: sum([g(x)*g(1-x) for x in", "be an integral domain if only zeta_order is specified sage:", "= zeta.multiplicative_order() zeta = zeta**(n // m) for c in", "This may change. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.kernel()", "Rational Field sage: G.change_ring(CyclotomicField(6)) Group of Dirichlet characters modulo 7", "this ring') n = zeta.multiplicative_order() zeta = zeta**(n // m)", "recent call last): ... NotImplementedError: Kloosterman sums not implemented over", "_zeta_order if we know that it stays the # same;", "the trivial character, i.e., has order 1. EXAMPLES:: sage: G.<a,b>", "check=False)) return tuple(g) def integers_mod(self): r\"\"\" Returns the group of", "sage: repr(a) # indirect doctest 'Dirichlet character modulo 20 of", "9 of conductor 1 mapping 2 |--> 1] sage: (DirichletGroup(72).0).decomposition()", "= a.lfunction(algorithm='lcalc'); L L-function with complex Dirichlet coefficients sage: L.value(4)", "Auts = [p**m for m in range(0,r)] return Auts def", "seen_so_far.add(tuple(z.element())) G = Sequence(G, cr=True) if sort: G.sort() return G", "sage: f = K.complex_embeddings()[0] sage: psi = chi.change_ring(f) sage: psi(2)", "if the new base ring is not an integral domain::", "1, 17 |--> -1 \"\"\" G = self.parent() if G.zeta.is_in_cache():", "b.conductor() 5 sage: (a*b).conductor() 20 TESTS:: sage: G.<a, b> =", "Galois group is the absolute Galois group of the prime", "zeta_order=None, names=None, integral=False): \"\"\" Create a key that uniquely determines", "3 mapping 5 |--> -1 sage: G(DirichletGroup(15).1) Traceback (most recent", "|--> 1 sage: L(4) 0.988944551741105 With the algorithm \"lcalc\":: sage:", "K.gen(), 4)) Group of Dirichlet characters modulo 60 with values", "[0] * len(orders) n = G.integers_mod().one() value = val_on_gen.base_ring().zero() while", "is not None: zeta = base_ring(zeta) if zeta_order is None:", "2 |--> -zeta12^2 + 1] A non-example:: sage: chi =", "R = rings.PowerSeriesRing(rings.QQ, 't') t = R.gen() # g(t) =", "G.list(); Y = X[0]; Z = X[1] sage: # Y", "DirichletGroup factory. p = R.characteristic() if p == 0 or", "still works if the new base ring is not an", "distinguished set of generators. category = category.Finite().FinitelyGenerated() Parent.__init__(self, base_ring, category=category)", "zeta30^4 + zeta30^3 - zeta30 - 1 When a root", "zeta.argument() v = M([int(round(x.argument() / zeta_argument)) for x in self.values_on_gens()])", "Create a Dirichlet character with specified values on generators of", "G.0 sage: e.gauss_sum() -zeta156^46 + zeta156^45 + zeta156^42 + zeta156^41", "sage: R.<x> = PolynomialRing(QQ) sage: K.<a> = NumberField(x^4 + 1)", "H = DirichletGroup(13, CC) sage: e = G.0 sage: f", "[Coh2007]_, Proposition 9.4.5; this is usually optimal. The ``definition`` algorithm", "character mod `p^n`, where `p` is a prime. Then `\\varepsilon(-1)", "`\\varepsilon(-1) = 1`. EXAMPLES:: sage: G = DirichletGroup(13) sage: e", "16 \\hbox{ of conductor } 16 \\hbox{ mapping } 15", "def _zeta_powers(self): \"\"\" Return a list of powers of the", "1, 7 |--> zeta4] Another example:: sage: G = DirichletGroup(13)", "DirichletGroup(13) sage: e = G.0 sage: e.is_even() False sage: e(-1)", "with prec bits of precision. See also :meth:`.kloosterman_sum`, which calculates", "\\in \\ZZ / N\\ZZ} \\chi(a) \\psi(1-a) where `\\chi` and `\\psi`", "if self.element.is_in_cache(): return not self.element() one = self.base_ring().one() return all(x", "the base ring. EXAMPLES:: sage: DirichletGroup(20).zeta_order() 4 sage: DirichletGroup(60).zeta_order() 4", "sum J(Y, Z). sage: Y.jacobi_sum(Z); Z.jacobi_sum(Y) -1 -1 \"\"\" if", "this Dirichlet character. EXAMPLES:: sage: G.<a> = DirichletGroup(11) sage: b", "field. It's the identity function in characteristic p. EXAMPLES:: sage:", "r\"\"\" Create a Dirichlet character with specified values on generators", "17 |--> 1 sage: a Dirichlet character modulo 20 of", "0, 1, -1, 0, 0, -1, 0, 1, -1, 0,", "41 |--> -1, 37 |--> 1, Dirichlet character modulo 60", "canonical coercion:: sage: e = DirichletGroup(5, QQ).0 sage: f =", "entries of the returned vector; this vector is mutable *only*", "and self.modulus() == X.modulus() and self.base_ring().has_coerce_map_from(X.base_ring()) and (self._zeta is None", "of `(\\ZZ/N\\ZZ)^*`, where `N` is the modulus. EXAMPLES:: sage: e", "for i in self.element()]) @cached_method(do_pickle=True) def element(self): r\"\"\" Return the", "number of the trivial # character is 1/2, in contrast", "order 12 and degree 4 sage: e.minimize_base_ring().base_ring() Cyclotomic Field of", "list of ring elements: the values of the Dirichlet character", "2) ((zeta6 - 1,), (-1,), 2*zeta6 + 1) ((zeta6 -", "n): \"\"\" Return self raised to the power of n", "D = DirichletGroup(5, K) sage: D.change_ring(f) Group of Dirichlet characters", "character. This includes Gauss sums, classical Kloosterman sums, Salié sums,", "3) ((-1,), (-zeta6 + 1,), 2*zeta6 - 3) ((-zeta6,), (-zeta6,),", "sage: eps1 = DirichletGroup(5)([-1]) sage: eps2 = DirichletGroup(5,QQ)([-1]) sage: eps1.conrey_number()", "prec bits of precision. INPUT: - ``prec`` -- integer (default:", "consider a sum with values in a finite field:: sage:", "character value on n result_list[n] = R_values[value] # iterate: #", "2 \"\"\" N = self.modulus() m = m % N", "not units mod 22. while x.modulus().gcd(v) != 1: v +=", "that :trac:`25127` is fixed:: sage: G = DirichletGroup(1) sage: chi", "primitive_character(self): \"\"\" Returns the primitive character associated to self. EXAMPLES::", "(DirichletGroup(36).0).decomposition() [Dirichlet character modulo 4 of conductor 4 mapping 3", "2 over Ring of integers modulo 2 \"\"\" return free_module.FreeModule(rings.IntegerModRing(self.zeta_order()),", "Z). sage: Y.jacobi_sum(Z); Z.jacobi_sum(Y) -1 -1 \"\"\" if check: if", "When p is odd, and x =/= 1, the conductor", "characters modulo 5 with values in the group of order", "# This program is free software: you can redistribute it", "17 |--> 1] ] sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5)).galois_orbits() Traceback (most", "The following code is pretty fast, at least compared to", "... TypeError: Unable to coerce zeta12 to a rational We", "Valuation(Order(x),p)+1. cond = p**(valuation(self.order(),p) + 1) if p == 2", "if self.modulus() == 1 or self.is_trivial(): return rings.Integer(1) F =", "only defined if base ring is an integral domain\") k", "\"\"\" G = pari.znstar(self.modulus(), 1) pari_orders = G[1][1] pari_gens =", "10 and degree 4 sage: G = DirichletGroup(11, RationalField()) sage:", "\"\"\" Return the number of elements of this Dirichlet group.", "cyclotomic field `\\QQ(\\zeta_n)`, where `n` is the exponent of `(\\ZZ/N\\ZZ)^*`)", "self.base_ring()(X._zeta) in self._zeta_powers))) def __len__(self): \"\"\" Return the number of", "equals this one, but over as small a subfield (or", "must have additive orders dividing {}, respectively\" .format(x, parent.zeta_order(), orders))", "def kloosterman_sum(self, a=1, b=0): r\"\"\" Return the \"twisted\" Kloosterman sum", "prod import sage.misc.prandom as random import sage.modules.free_module as free_module import", "of integers `\\ZZ/N\\ZZ` where `N` is the modulus of self.", "(default: 53), *bits* of precision - ``a`` -- integer, as", "m in range(0,r)] return Auts def galois_orbits(self, v=None, reps_only=False, sort=True,", "False sage: (a^2).is_trivial() True \"\"\" if self.element.is_in_cache(): return not self.element()", "(self.base_ring(), R)) return self.change_ring(R) def _element_constructor_(self, x): \"\"\" Construct a", "factor(self.modulus())], cr=True, universe = cat.Objects()) def exponent(self): \"\"\" Return the", "rings not implemented\") # The automorphisms in characteristic p are", "|--> -zeta4 AUTHORS: - <NAME> (2005-09-02): Fixed bug in comparison", "zeta ** int(a*e + b*(e**(-1))) g += phi(self(c))*z return g", "-1 \"\"\" G = self.parent() if G.zeta.is_in_cache(): x = self.element()", "l (2, None, None) sage: k == l True sage:", "zeta is not None: zeta = R(zeta) if isinstance(R, Map):", "|--> 1, 37 |--> 1, Dirichlet character modulo 60 of", "the conductor of this character. EXAMPLES:: sage: e = DirichletGroup(100).0", "additive orders dividing {}, respectively\" .format(x, parent.zeta_order(), orders)) self.element.set_cache(x) else:", "return DirichletGroup_class(base_ring, modulus, zeta, zeta_order) DirichletGroup = DirichletGroupFactory(\"DirichletGroup\") def is_DirichletGroup(x):", "Dirichlet characters modulo 11 with values in Cyclotomic Field of", "a list. This may change. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20)", "the base ring of ``self`` as its domain - ``zeta``", "still be None). zeta_order = self._zeta_order # Map zeta to", "(11, 17) \"\"\" return self._integers.unit_gens() @cached_method def zeta(self): \"\"\" Return", "given base ring. EXAMPLES:: sage: t = trivial_character(7) sage: [t(x)", "G = DirichletGroup(5); X = G.list(); Y = X[0]; Z", "1 sage: DirichletGroup(2)[0] Dirichlet character modulo 2 of conductor 1", "the distinguished root of unity. TESTS:: sage: DirichletGroup(5)._zeta_powers [1, zeta4,", "def __len__(self): \"\"\" Return the number of elements of this", "parent.zeta_order(), orders)) self.element.set_cache(x) else: R = parent.base_ring() x = tuple(map(R,", "EXAMPLES:: sage: e = DirichletGroup(7, QQ).0 sage: f = e.change_ring(QuadraticField(3,", "e True TESTS:: sage: G = DirichletGroup(10) sage: TestSuite(G[1]).run() It", "r\"\"\" Returns the minimal generators for the units of `(\\ZZ/N\\ZZ)^*`,", "not R.is_exact(): return abs(self(-1) - R(-1)) < 0.5 return self(-1)", "Field of order 4 and degree 2' We can multiply", "are printed correctly (see :trac:`17338`):: sage: latex(DirichletGroup(1)[0]) \\hbox{Dirichlet character modulo", "= DirichletGroup(3) sage: e = G.0 sage: abs(e.gauss_sum_numerical()) 1.7320508075... sage:", "1, 1, 1] sage: t(1).parent() Rational Field sage: trivial_character(7, Integers(3))(1).parent()", "G = self.parent() K = G.base_ring() chi = self m", "a part of the key, the keys would compare equal", "- 2*E(5)^4 sage: G = DirichletGroup(12, QQbar) sage: e =", "of order 8 generated by a in Number Field in", "since it computes the same thing, but requires # no", "mapping 2 |--> -a^2] We can also restrict the order", "passed to the :func:`bernoulli` function if this is called OUTPUT:", "values in Gaussian Integers in Cyclotomic Field of order 4", "modulo 7 with values in Rational Field sage: H =", "sage: e = DirichletGroup(20)(1) sage: e.values() [0, 1, 0, 1,", "DirichletGroup(5); X = G.list(); Y = X[0]; Z = X[1]", "exponent of `(\\ZZ/N\\ZZ)^*`. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20,QQ) sage: b.maximize_base_ring()", "`V` can be found. - If ``zeta`` is specified, then", "Gauss sums are *not* cached with this character. EXAMPLES:: sage:", "sums are being calculated correctly:: sage: N = 13 sage:", "that uniquely determines a Dirichlet group. TESTS:: sage: DirichletGroup.create_key(60) (Cyclotomic", "G.<a,b> = DirichletGroup(20) sage: L = a.lfunction(); L PARI L-function", "lcm(m, G.zeta_order()) L = rings.CyclotomicField(n) zeta = L.gen(0) ** (n", "zeta = zeta**(n // m) for c in m.coprime_integers(m): e", "2 |--> 1, Dirichlet character modulo 5 of conductor 5", "and degree 2 sage: (e^12).minimize_base_ring().base_ring() Rational Field TESTS: Check that", "sage: G.gens() (Dirichlet character modulo 60 of conductor 4 mapping", "* N**(j-1) * S(k-j) for j in range(k+1))) elif algorithm", "be broken:: sage: k = k[1:]; k (2, None, None)", ":meth:`values_on_gens`. The cache of one of these methods needs to", "arguments; not used directly, but passed to the :func:`bernoulli` function", "5 mapping 2 |--> -zeta4] \"\"\" return self._list_from_iterator() def modulus(self):", "len(orders))) if free_module_element.is_FreeModuleElement(x): x = parent._module(x) if any(u * v", "The ``definition`` algorithm uses the definition directly. .. WARNING:: In", "if not R.is_exact(): return abs(self(-1) - R(-1)) < 0.5 return", "-2*E(5) - 4*E(5)^2 - 4*E(5)^3 - 2*E(5)^4 sage: G =", "D = DirichletGroup(N) sage: g = D(1) sage: g.jacobi_sum(g) 11", "sum associated to this Dirichlet character as an approximate complex", "base_ring = rings.CyclotomicField(e) if integral: base_ring = base_ring.ring_of_integers() if not", "a multiple of the modulus(=%s)\"%(M,self.modulus())) H = DirichletGroup(M, self.base_ring()) return", "Frac(R). If R is not a domain, an error will", "(list of exponents) P = self.parent() if is_ComplexField(P.base_ring()): zeta =", "domain if only zeta_order is specified\" % base_ring) zeta_order =", "+ zeta156^6 + zeta156^5 - zeta156^4 - zeta156^2 - 1", "base_extend(self, R): \"\"\" Return the base extension of ``self`` to", "table of values of the character is made the first", "is nontrivial, then the Gauss sum has absolute value `\\sqrt{p}`.", "modulo 5 of conductor 1 mapping 2 |--> 1, Dirichlet", "[1, matrix(0,2)], [2, Mat([2, 1])]], [1, 0, 0; 0, 1,", "self. This is the same as len(self). EXAMPLES:: sage: DirichletGroup(20).order()", "Traceback (most recent call last): ... TypeError: conductor must divide", "multiple of the modulus(=%s)\"%(M,self.modulus())) H = DirichletGroup(M, self.base_ring()) return H(self)", "if base_ring is None: if not (zeta is None and", "L-function of ``self``. The result is a wrapper around a", "modulo 13 of conductor 13 mapping 2 |--> -1] ]", "a in range(1,N+1)]) * factorial(k) else: raise ValueError(\"algorithm = '%s'", "DirichletGroup(100).1 sage: e.order() # same as multiplicative_order, since group is", "over `\\ZZ/e\\ZZ`, where `e` is the order of the standard", "f.parent() Group of Dirichlet characters modulo 7 with values in", "....: for i in range(p-1) for j in range(i, p-1)]", "non-example:: sage: chi = DirichletGroup(7, Integers(9), zeta = Integers(9)(2)).0 sage:", "group, are only implemented if `V` is cyclic and a", "the map `\\ZZ/N\\ZZ \\to R` obtained by sending those `x\\in\\ZZ/N\\ZZ`", "past # we need to set the cache of values_on_gens()", "base_ring.is_finite(): # The group of n-th roots of unity in", "of self. EXAMPLES:: sage: G = DirichletGroup(20) sage: G.integers_mod() Ring", "of integers. At present this is only implemented if the", "conductor 13 mapping 2 |--> zeta12^3 - zeta12, Dirichlet character", "zeta10^2] TESTS: Test that :trac:`11783` and :trac:`14368` are fixed:: sage:", "G = DirichletGroup(19, GF(5)) sage: loads(G.dumps()) == G True We", "- 1 When a root of unity is specified, base", "sage: G.<a> = DirichletGroup(11) sage: b = copy(a) sage: a", "2 |--> zeta12 sage: e.galois_orbit() [Dirichlet character modulo 13 of", "zeta_order=zeta_order) def base_extend(self, R): \"\"\" Return the base extension of", "of order 6 and degree 2 Note that the root", "the Gauss sum has absolute value `\\sqrt{p}`. CACHING: Computed Gauss", "of conductor 5 mapping 2 |--> 2,) TESTS: Dirichlet groups", "sage: DirichletGroup(60, GF(25,'a')).zeta_order() 4 sage: DirichletGroup(19).zeta_order() 18 \"\"\" order =", "if self.parent().zeta.is_in_cache(): return self.element().additive_order() return lcm([z.multiplicative_order() for z in self.values_on_gens()])", "in Ring of integers modulo 15 sage: G.order() 4 sage:", "vals[1] elif self.modulus() % 4 == 2: # 0 factors", "1, -1] sage: e = DirichletGroup(21, base_ring=GF(37)).gen(0) ; e.values() [0,", "norm(e.gauss_sum()) 3 :: sage: G = DirichletGroup(13) sage: e =", "not implemented over this ring \"\"\" G = self.parent() zo", "1) pari_orders = G[1][1] pari_gens = G[1][2] # one should", "5 of conductor 5 mapping 2 |--> zeta4, Dirichlet character", "sage: e.base_ring() Cyclotomic Field of order 12 and degree 4", "(2, 16, 2), respectively sage: from sage.modular.dirichlet import DirichletCharacter sage:", "Done! return result_list value += val_on_gen[i] n *= gens[i] if", "or is_RationalField(K): chi = chi.minimize_base_ring() n = lcm(m, G.zeta_order()) L", "def level(self): \"\"\" Synonym for modulus. EXAMPLES:: sage: e =", "TestSuite(G[1]).run() It is checked that the orders of the elements", "= DirichletGroup(13) sage: loads(G.dumps()) == G True :: sage: G", "must also be a multiple of the conductor of this", "values_on_gens = (self(x) for x in pari_gens) # now compute", "or QQ.\") phi = K.complex_embedding(prec) CC = phi.codomain() g =", "DirichletGroup(10) sage: TestSuite(G[1]).run() It is checked that the orders of", "|--> 1' TESTS: Dirichlet characters modulo 1 and 2 are", "in Cyclotomic Field of order 4 and degree 2 If", "Returns the base ring of this Dirichlet character. EXAMPLES:: sage:", "factory function ``DirichletGroup``). The ``DirichletGroup`` factory ensures that either both", "x = tuple(map(R, x)) if R.is_exact() and any(u**v != 1", "vals return [D[i](vals[i]) for i in range(len(D))] def extend(self, M):", "orders of the elements in `x` are admissible (see :trac:`17283`)::", "G.zeta.is_in_cache(): x = -self.element() else: x = tuple(~z for z", "in the given base ring. EXAMPLES:: sage: t = trivial_character(7)", "(Dirichlet character modulo 5 of conductor 5 mapping 2 |-->", "7 with values in Rational Field sage: H = G.base_extend(CyclotomicField(6));", "2 of conductor 1, Dirichlet character modulo 9 of conductor", "True sage: G3 = DirichletGroup(31, CyclotomicField(3)) sage: G5 = DirichletGroup(31,", "4 sage: e.restrict(20) Dirichlet character modulo 20 of conductor 4", "not None: zeta = R(zeta) if isinstance(R, Map): R =", "is not None: zeta = R(zeta) if isinstance(R, Map): R", "sage.misc.functional import round from sage.misc.cachefunc import cached_method from sage.misc.fast_methods import", "= '_DirichletCharacter__element' element = None if element_key in state_dict: element", "list(G) [Dirichlet character modulo 20 of conductor 1 mapping 11", "base ring is an integral domain \"\"\" if v is", "Field of order 12 and degree 4 sage: e.minimize_base_ring().base_ring() Cyclotomic", "<NAME> (2006-08-06) \"\"\" d = rings.Integer(d) if d == 0:", "with complex Dirichlet coefficients sage: L.value(4) # abs tol 1e-14", "1,), (-zeta6 + 1,), -zeta6 - 2) ((-1,), (-1,), 1)", "base_ring=QQ); k (Rational Field, 2, None, None) sage: l =", "``lcalc`` program. INPUT: - ``prec`` -- precision (default 53) -", "Dirichlet character modulo 3 of conductor 3 mapping 2 |-->", "if p == 2 and F[0][1] > 2 and self.values_on_gens()[1].multiplicative_order()", "5 of conductor 5 mapping 2 |--> -zeta4] \"\"\" return", "by the following identity of power series (see for example", "zeta_order # (which may still be None). zeta_order = self._zeta_order", "and degree 4 sage: e.minimize_base_ring().base_ring() Cyclotomic Field of order 12", "create_object(self, version, key, **extra_args): \"\"\" Create the object from the", "Conductors that are divisible by various powers of 2 present", "specify it using ``zeta_order``:: sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6)) Traceback (most", "= -1`. EXAMPLES:: sage: G = DirichletGroup(13) sage: e =", "[0, 1, -zeta10^3, -zeta10, -zeta10, 1, zeta10^3 - zeta10^2 +", "field. prec = k+2 R = rings.PowerSeriesRing(rings.QQ, 't') t =", "sage: G = DirichletGroup(20) sage: G.1 Dirichlet character modulo 20", "Complex Field with 53 bits of precision \"\"\" if zeta", "the base ring of this Dirichlet character. EXAMPLES:: sage: G", "Z.rename('PARI L-function associated to %s' % self) return Z elif", "G([kronecker(u.lift(),d) for u in G.unit_gens()]) def is_DirichletCharacter(x): r\"\"\" Return True", "to # the other algorithm below. That said, I'm sure", "in self.gens(): ord *= int(g.order()) return ord def random_element(self): \"\"\"", "homomorphisms `(\\ZZ/N\\ZZ)^* \\to V` with pointwise multiplication. The group `V`", "except IndexError: # Done! return result_list value += val_on_gen[i] n", "Field of size 5 ] \"\"\" R = self.base_ring() return", "prec=53, a=1): r\"\"\" Return a Gauss sum associated to this", "m) z = zeta ** int(a*e + b*(e**(-1))) g +=", "modulo 5 of conductor 5 mapping 2 |--> 2,) TESTS:", "13 mapping 2 |--> zeta12 sage: e.galois_orbit() [Dirichlet character modulo", "category = Groups().Commutative() if base_ring.is_integral_domain() or base_ring.is_finite(): # The group", "redistribute it and/or modify # it under the terms of", "== 2: # 0 factors at 2. vals = [1]", "\"\"\" return self.order() def _repr_(self): \"\"\" Return a print representation", "prime powers exactly divide the modulus of this character. EXAMPLES::", "EXAMPLES:: sage: G.<e> = DirichletGroup(13) sage: G Group of Dirichlet", "'https://www.lmfdb.org/Character/Dirichlet/{}/{}' url = lmfdb_url.format(self.modulus(), self.conrey_number()) webbrowser.open(url) def galois_orbit(self, sort=True): r\"\"\"", "|--> 1 sage: a Dirichlet character modulo 20 of conductor", "p = next_prime(10^40) sage: g = DirichletGroup(19, GF(p)); g Group", "e Dirichlet character modulo 35 of conductor 35 mapping 22", "if base ring is an integral domain \"\"\" if v", "|--> -1, 17 |--> zeta4 Multiplying elements whose parents have", "G.<a,b> = DirichletGroup(20, CC) sage: a.is_primitive() False sage: b.is_primitive() False", "much quicker). CACHING: Computed Kloosterman sums are *not* cached with", "0, 1, 36, 0, 0, 36, 0, 1, 36, 0,", "|--> zeta4 We next compute several invariants of ``G``:: sage:", "seen_so_far: continue orbit = x.galois_orbit(sort=sort) if reps_only: G.append(x) else: G.append(orbit)", "sage: DirichletGroup(20,QQ).unit_gens() (11, 17) \"\"\" return self._integers.unit_gens() @cached_method def zeta(self):", "group. EXAMPLES:: sage: from sage.modular.dirichlet import is_DirichletGroup sage: is_DirichletGroup(DirichletGroup(11)) True", "in Number Field in a with defining polynomial x^4 +", "|--> 1, 41 |--> 1, 37 |--> zeta4) sage: val", "is None: zeta_order = zeta.multiplicative_order() elif zeta_order is not None:", "group of order 4 generated by zeta4 in Cyclotomic Field", "space of dimension 2 over Ring of integers modulo 2", "mult order of p modulo n. r = rings.IntegerModRing(n)(p).multiplicative_order() Auts", "r\"\"\" Return ``True`` if and only if `\\varepsilon(-1) = 1`.", "|--> zeta12^2 - 1 sage: e.order() 12 This illustrates a", "phi = K.complex_embedding(prec) CC = phi.codomain() else: raise NotImplementedError(\"Gauss sums", "sage: e(-1) -1.000000... sage: [e.is_even() for e in G] [True,", "try: return self.change_ring(K) except (TypeError, ValueError, ArithmeticError): return self def", "20 \"\"\" return self._integers __iter__ = multiplicative_iterator def list(self): \"\"\"", "-1 return val def __call__(self, m): \"\"\" Return the value", "1, # increase n accordingly, and increase value i =", "0, 1, 0, -1, 0, 1, 0, 0, 0, 1,", "will be raised. EXAMPLES:: sage: DirichletGroup(20).galois_orbits() [ [Dirichlet character modulo", "call last): ... IndexError: n(=2) must be between 0 and", "zeta = base_ring.zeta(zeta_order) return (base_ring, modulus, zeta, zeta_order) def create_object(self,", "q that identifies a Dirichlet character of modulus q. See", "# especially since we end up computing all the Bernoulli", "zeta=zeta, zeta_order=zeta_order) def base_extend(self, R): \"\"\" Return the base extension", "def trivial_character(N, base_ring=rings.RationalField()): r\"\"\" Return the trivial character of the", "|--> -zeta4] \"\"\" return self._list_from_iterator() def modulus(self): \"\"\" Returns the", "zero:: sage: v=e.kloosterman_sum_numerical() sage: v.real() < 1.0e15 True sage: v.imag()", "Not to be called directly (use the factory function ``DirichletGroup``).", "the value `B_1 = -1/2` for the classical Bernoulli number.", "computed, and an error is raised if such ``zeta`` cannot", "category.Finite().FinitelyGenerated() Parent.__init__(self, base_ring, category=category) self._zeta = zeta self._zeta_order = zeta_order", "chi.values()[1:]: z *= zeta g += L(c)*z return g def", "5 sage: (a*b).conductor() 20 TESTS:: sage: G.<a, b> = DirichletGroup(20)", "4 mapping 31 |--> -1, 41 |--> 1, 37 |-->", "<NAME> (2005-09-02): Fixed bug in comparison of Dirichlet characters. It", "of conductor 1 \"\"\" s = 'Dirichlet character modulo %s", "sage: k == l True sage: DirichletGroup(2, base_ring=QQ) is DirichletGroup(2,", "Dirichlet character mod 20, but with values in `\\QQ(\\zeta_n)`:: sage:", "the Galois orbits of Dirichlet characters in self, or in", "Unable to coerce zeta4 to a rational \"\"\" R =", "standard generators of `(\\ZZ/N\\ZZ)^*` as returned by :meth:`sage.rings.finite_rings.integer_mod_ring.IntegerModRing_generic.unit_gens`. - vector", "self.__bernoulli except AttributeError: self.__bernoulli = {} if k in self.__bernoulli:", "functions on ``ZZ``. EXAMPLES:: sage: e = DirichletGroup(16)([-1, 1]) sage:", "|--> -zeta4 \"\"\" return ~self def bernoulli(self, k, algorithm='recurrence', cache=True,", "r\"\"\" Returns the group of integers `\\ZZ/N\\ZZ` where `N` is", "range(p-1) for j in range(i, p-1)] sage: for s in", "= zeta.argument() v = [int(x.argument() / zeta_argument) for x in", "common multiple of `n` and the exponent of `(\\ZZ/N\\ZZ)^*`. EXAMPLES::", "ring admitting a *coercion* map from the base ring of", "random.randrange(g.order()) e *= g**n return e def unit_gens(self): r\"\"\" Returns", "sage: G.<a,b> = DirichletGroup(20, CC) sage: a.is_primitive() False sage: b.is_primitive()", "sage: H.<c> = DirichletGroup(4) sage: c.extend(20) Dirichlet character modulo 20", "= DirichletGroup(20).gen(1) sage: e.values() [0, 1, 0, -zeta4, 0, 0,", "the orders of the elements must divide the orders of", "EXAMPLES:: sage: G = DirichletGroup(13) sage: e = DirichletGroup(13).0 sage:", "is a primitive `m` th root of unity. This reduces", "6 and degree 2 Note that the root of unity", "b^2 Dirichlet character modulo 20 of conductor 5 mapping 11", "``'recurrence'`` (default) or ``'definition'`` - ``cache`` -- if True, cache", "import DirichletCharacter sage: M = FreeModule(Zmod(16), 3) sage: DirichletCharacter(G, M([4,", "... ValueError: base ring (= Ring of integers modulo 15)", "the character at -1 using knowledge of its order. This", ".. MATH:: \\sum_{a=1}^N \\frac{\\varepsilon(a) t e^{at}}{e^{Nt}-1} = sum_{k=0}^{\\infty} \\frac{B_{k,\\varepsilon}}{k!} t^k.", "primitive) character of modulus `N`. This function returns the generalized", "= DirichletGroup(5,CyclotomicField(4)).0 sage: e*f Dirichlet character modulo 5 of conductor", "9 sage: DirichletGroup(13) == DirichletGroup(13) True sage: DirichletGroup(13) == DirichletGroup(13,", "case, it also ensures that ``zeta`` is an element of", "of Dirichlet characters modulo 5 with values in Finite Field", "DP.0 sage: e.jacobi_sum(f) Traceback (most recent call last): ... NotImplementedError:", "modulus of `\\chi` and `\\zeta` is a primitive `m` th", "TESTS: This shows that :trac:`6393` has been fixed:: sage: G", "value `\\sqrt{p}`. CACHING: Computed Gauss sums are *not* cached with", "zip(x, orders)): raise ValueError(\"values (= {} modulo {}) must have", "known sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6), zeta_order=6) Group of Dirichlet characters", "False, True] sage: G = DirichletGroup(13) sage: e = G.0", "on the factor of p**(r-1) on the right hand side.", "use the following. Proposition: Suppose eps is a character mod", "DirichletGroup(35) sage: x = G.gens() sage: e = x[0]*x[1]^2; e", "g = 2 z = self.base_ring().zeta() n = z.multiplicative_order() m", "yet. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.element() (2, 0)", "Then `\\varepsilon(-1) = -1` if and only if `p =", "conductor 13 mapping 2 |--> zeta12 sage: e.galois_orbit() [Dirichlet character", "p == 0 or p.gcd(self._zeta_order) == 1: zeta_order = self._zeta_order", "We next compute several invariants of ``G``:: sage: G.gens() (Dirichlet", "sage: g = D(1) sage: g.jacobi_sum(g) 11 sage: sum([g(x)*g(1-x) for", "the exponent vector by 1, # increase n accordingly, and", "chi^2 Dirichlet character modulo 5 of conductor 5 mapping 2", "the same level! - <NAME> (2006-01-07): added more examples -", "sage: G = DirichletGroup(35) sage: x = G.gens() sage: e", "root of unity specified; use the same zeta_order # (which", "|--> 1, 17 |--> -1 sage: b.maximize_base_ring().base_ring() Cyclotomic Field of", "1 \"\"\" n = int(n) g = self.gens() if n<0", "that :trac:`19060` is fixed:: sage: K.<z> = CyclotomicField(8) sage: G", "0, 1, 0, 1] sage: e = DirichletGroup(20).gen(0) sage: e.values()", "that the # divisibility holds equals Valuation(Order(x),p)+1. cond = p**(valuation(self.order(),p)", "place. :: sage: G(d[0])*G(d[1]) == c True Conductors that are", "prime power modulus, where the prime powers exactly divide the", "EXAMPLES:: sage: from sage.modular.dirichlet import is_DirichletCharacter sage: is_DirichletCharacter(trivial_character(3)) True sage:", "\\ZZ / N\\ZZ} \\chi(a) \\psi(1-a) where `\\chi` and `\\psi` are", "l True sage: DirichletGroup(2, base_ring=QQ) is DirichletGroup(2, base_ring=CC) False If", "mapping 22 |--> zeta12^3, 31 |--> zeta12^2 sage: e.order() 12", "is cyclic and a generator for `V` can be found.", "set the cache of element() from that if we encounter", "mapping 2 |--> 2,) TESTS: Dirichlet groups are cached, creating", "`m` th root of unity. This reduces to the Gauss", "sage: DirichletGroup(20).zeta() zeta4 sage: DirichletGroup(60).zeta() zeta4 sage: DirichletGroup(60,QQ).zeta() -1 sage:", "determines a Dirichlet group. TESTS:: sage: DirichletGroup.create_key(60) (Cyclotomic Field of", "super(DirichletGroup_class, self).__setstate__(state) @property def _module(self): \"\"\" Return the free module", "since we end up computing all the Bernoulli # numbers", "parents have different zeta orders works:: sage: a = DirichletGroup(3,", "precision. INPUT: - ``prec`` -- integer (default: 53), *bits* of", "EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.kernel() [1, 9, 13,", "= DirichletGroup(3, QQ, zeta=1, zeta_order=1)(1) sage: b = DirichletGroup(3, QQ,", "bug (see :trac:`18086`):: sage: K.<a,b>=NumberField([x^2 + 1, x^2 - 3])", "can change:: sage: H.zeta() zeta6 This method (in contrast to", "EXAMPLES:: sage: len(DirichletGroup(20)) 8 sage: len(DirichletGroup(20, QQ)) 4 sage: len(DirichletGroup(20,", "= DirichletGroup(19, GF(5)) sage: loads(G.dumps()) == G True We compute", "x in values_on_gens] else: dlog = P._zeta_dlog v = [dlog[x]", "in \" % self.modulus() if self._zeta is not None: s", "returns `B_{1,\\varepsilon} = 1/2`, in accordance with the above definition,", "character. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.conductor() 4 sage:", "an integral domain if only zeta_order is specified\" % base_ring)", "*= e.values_on_gens()[0] # first gen is -1 for 2-power modulus", "`(\\ZZ/N\\ZZ)^*`. Many operations, such as finding a set of generators", "base_ring.is_integral_domain() or base_ring.is_finite(): # The group of n-th roots of", "if d <= 0: raise ValueError(\"d must be positive\") G", "53), *bits* of precision - ``a`` -- integer, as for", "chi4._pari_conversion() ([[4, [0]], [2, [2], [3]], [[2]~, Vecsmall([2])], [[4], [[1,", "the value of this character at the integer `m`. ..", "Group of Dirichlet characters modulo 5 with values in Complex", "DirichletGroup(20) sage: G.ngens() 2 \"\"\" return len(self.gens()) @cached_method def order(self):", "return free_module.FreeModule(rings.IntegerModRing(self.zeta_order()), len(self.unit_gens())) @property def _zeta_powers(self): \"\"\" Return a list", "chi = DirichletGroup(24).0 sage: chi._repr_short_() '[-1, 1, 1]' \"\"\" return", "is better since it computes the same thing, but requires", "2 We can't multiply directly, since coercion of one element", "domain \"\"\" if v is None: v = self.list() else:", "recomputed as the order # of R(zeta) by the DirichletGroup", "# https://www.gnu.org/licenses/ # **************************************************************************** from __future__ import print_function import sage.categories.all", "is pretty fast, at least compared to # the other", "cyclotomic field by its rings of integers as the base", "17 |--> 1 sage: b Dirichlet character modulo 20 of", "2 of conductor 1 \"\"\" s = 'Dirichlet character modulo", "[R.one()] * len(self.unit_gens()) except (TypeError, ValueError, ArithmeticError): pass if isinstance(x,", "mapping 7 |--> 1, 13 |--> -1, 17 |--> -1", "Jacobi sum associated to these Dirichlet characters (i.e., J(self,char)). This", "universe = cat.Objects()) def exponent(self): \"\"\" Return the exponent of", "m in Auts] if sort: v.sort() return v def gauss_sum(self,", "values_on_gens_key = '_DirichletCharacter__values_on_gens' values_on_gens = None state_dict = state[1] if", "e.kloosterman_sum(3,5) -2*zeta6 + 1 sage: G = DirichletGroup(20) sage: e", "G([1 for u in G.unit_gens()]) sage: e.kloosterman_sum(7,17) -2*E(5) - 4*E(5)^2", "-1 :: sage: a = kronecker_character(1) sage: b = DirichletGroup(2401,QQ)(a)", "= QuadraticField(-1) sage: chi = DirichletGroup(5, K)[1] sage: chi(2) i", "0, 0; 0, 1, 0; 0, 0, 1], [7, 13,", "4 mapping 7 |--> -1, 5 |--> 1, Dirichlet character", "e = DirichletGroup(100).0 sage: e.modulus() 100 sage: e.conductor() 4 sage:", "field, QQ, QQbar, or a complex field\") zeta = CC.zeta(G.modulus())", "matrix(0,2)]], Mat(1), [3], [2], [0]], Mat(1)], [1]) sage: chi =", "sage.structure.richcmp import richcmp from sage.arith.all import (binomial, bernoulli, kronecker, factor,", "to do this, since e.g., unit gens mod 11 are", "richcmp(self.values_on_gens(), other.values_on_gens(), op) def __hash__(self): \"\"\" Return the hash of", "Return ``True`` if and only if `\\varepsilon(-1) = -1`. EXAMPLES::", "= DirichletGroup(5).0 sage: e Dirichlet character modulo 5 of conductor", "-1`. EXAMPLES:: sage: G = DirichletGroup(13) sage: e = G.0", "-zeta12^2 + 1] A non-example:: sage: chi = DirichletGroup(7, Integers(9),", "str(self.values_on_gens()[i]) return s def _latex_(self): r\"\"\" LaTeX representation of self.", "elif mod == 2: return [R.zero(), R.one()] result_list = [R.zero()]", "doctest Dirichlet character modulo 20 of conductor 20 mapping 11", "root of unity in the base ring. EXAMPLES:: sage: DirichletGroup(37).zeta()", "modulo 1 of conductor 1 sage: DirichletGroup(2)[0] Dirichlet character modulo", "chi4 = DirichletGroup(4).gen() sage: chi4._pari_conversion() ([[4, [0]], [2, [2], [3]],", "not an integral domain, an error will be raised if", "Cyclotomic Field of order 12 and degree 4 sage: (e^2).minimize_base_ring().base_ring()", "if self.base_ring() is R: return self G = self.parent().change_ring(R) return", "// m) for c in m.coprime_integers(m): e = rings.Mod(c, m)", "was explicitly given; we use it over the # new", "zeta_order=6) Group of Dirichlet characters modulo 7 with values in", "An example where we give ``zeta``, but not its order::", "`x` are admissible (see :trac:`17283`):: sage: k.<i> = CyclotomicField(4) sage:", "the modulus(=%s)\"%(M,self.modulus())) H = DirichletGroup(M, self.base_ring()) return H(self) def _pari_conversion(self):", "+ 2*zeta20^4 + 4 TESTS:: sage: G = DirichletGroup(20, UniversalCyclotomicField())", "TESTS: Test that :trac:`11783` and :trac:`14368` are fixed:: sage: chi", "- 1.7320508075...*I sage: e.gauss_sum_numerical(a=2, prec=100) 4.7331654313260708324703713917e-30 - 1.7320508075688772935274463415*I sage: G", "`\\chi` and `\\psi` are both characters modulo `N`. EXAMPLES:: sage:", "modulo 9 sage: DirichletGroup(13) == DirichletGroup(13) True sage: DirichletGroup(13) ==", "*= int(g.order()) return ord def random_element(self): \"\"\" Return a random", "unit generator return self.element_class(self, x) elif not isinstance(x, DirichletCharacter): raise", "G = self.parent() if G.zeta.is_in_cache(): x = n * self.element()", "OUTPUT: Let `\\varepsilon` be a (not necessarily primitive) character of", "modulo 6 of conductor 3 mapping 5 |--> -1 sage:", "character modulo 31 of conductor 31 mapping 3 |--> -zeta30^7", "the conductor of this character. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20)", "= phi(self(0)) z = CC.one() for c in self.values()[1:]: z", "dlog and a large power of the image root of", "or more in many cases, # especially since we end", "|--> 1, 17 |--> zeta4) sage: G.unit_gens() (11, 17) sage:", "[Dirichlet character modulo 30 of conductor 5 mapping 11 |-->", "kronecker_character(97*389*997^2) Dirichlet character modulo 37733 of conductor 37733 mapping 1557", "self. EXAMPLES:: sage: G = DirichletGroup(20) sage: G.integers_mod() Ring of", "and degree 4 sage: (e^2).minimize_base_ring().base_ring() Cyclotomic Field of order 6", "respective generators of `(\\ZZ/N\\ZZ)^*`. OUTPUT: The Dirichlet character defined by", "if k <= 2: return [self] P = self.parent() z", "Dirichlet Group.\") return sum([self(x) * char(1-x) for x in rings.IntegerModRing(self.modulus())])", "return hash(self.values_on_gens()) def __invert__(self): \"\"\" Return the multiplicative inverse of", "groups of different moduli, but no coercion. This implies that", "EXAMPLES:: sage: G = DirichletGroup(20) sage: G.gens() (Dirichlet character modulo", "+ 1.8826966926...*I sage: abs(e.gauss_sum_numerical()) 3.60555127546... sage: abs(f.gauss_sum_numerical()) 3.60555127546... sage: sqrt(13.0)", "3) Residue field of Fractional ideal (-2*zeta4 + 5) ::", "(2, None, None) sage: k == l True sage: DirichletGroup(2,", "modulo 17 with values in the group of order 4", "raise ValueError('modulus should be positive') if base_ring is None: if", "3 :: sage: G = DirichletGroup(13) sage: e = G.0", "True sage: [e.is_odd() for e in G] [False, True, False,", "would compare equal and the caching would be broken:: sage:", "modulus self._integers = rings.IntegerModRing(modulus) def __setstate__(self, state): \"\"\" Used for", "(i.e., J(self,char)). This is defined as .. MATH:: J(\\chi, \\psi)", "= c.decomposition(); d [Dirichlet character modulo 4 of conductor 4", "in the base ring. EXAMPLES:: sage: DirichletGroup(20).zeta_order() 4 sage: DirichletGroup(60).zeta_order()", "1, 1]' \"\"\" return str(list(self.values_on_gens())) def _repr_(self): \"\"\" String representation", "is_DirichletCharacter sage: is_DirichletCharacter(trivial_character(3)) True sage: is_DirichletCharacter([1]) False \"\"\" return isinstance(x,", "- 1.00000000000000*I \"\"\" if self.base_ring() is R: return self G", "4.7331654313260708324703713917e-30 - 1.7320508075688772935274463415*I sage: G = DirichletGroup(13) sage: H =", "self.parent().change_ring(R) return G.element_class(G, [R(x) for x in self.values_on_gens()]) def _richcmp_(self,", "the base ring. EXAMPLES:: sage: G = DirichletGroup(30); e =", "EXAMPLES:: sage: e = DirichletGroup(100, QQ).0 sage: e.level() 100 \"\"\"", "|--> 1, 13 |--> -1, 17 |--> -1 sage: chi._pari_conversion()", "Return the L-function of ``self``. The result is a wrapper", "is an integral domain \"\"\" if not self.base_ring().is_integral_domain(): raise TypeError(\"Galois", "= base_ring.ring_of_integers() if not is_Ring(base_ring): raise TypeError(\"base_ring (= %s) must", "modulo } %s \\hbox{ of conductor } %s' % (self.modulus(),", "5 of conductor 5 mapping 2 |--> zeta4] sage: d[0].parent()", "is supported (:trac:`19056`):: sage: G = DirichletGroup(7, QQbar) sage: G[1].gauss_sum_numerical()", "P = self.parent() if is_ComplexField(P.base_ring()): zeta = P.zeta() zeta_argument =", "zeta12 sage: e.galois_orbit() [Dirichlet character modulo 13 of conductor 13", "coercion map from Rational Field to Integer Ring is defined", "and F[0][1] > 2 and self.values_on_gens()[1].multiplicative_order() != 1: cond *=", "zeta52^14 + zeta52^12 - zeta52^11 - zeta52^10 - zeta52^7 -", "values in a ring `R`. \"\"\" Element = DirichletCharacter def", "error will be raised if only ``zeta_order`` is specified:: sage:", ":meth:`.kloosterman_sum`, which calculates the sum exactly (which is generally slower).", "2 \"\"\" return len(self.gens()) @cached_method def order(self): \"\"\" Return the", "be `R^*`, or equivalently its `n`-torsion subgroup, where `n` is", "not divide `\\phi(p^n)/\\mbox{\\rm ord}(\\varepsilon)`. EXAMPLES:: sage: chi = DirichletGroup(20).0; chi._DirichletCharacter__eval_at_minus_one()", "-1] sage: e = DirichletGroup(21).gen(0) ; e.values() [0, 1, -1,", "+ 1] A non-example:: sage: chi = DirichletGroup(7, Integers(9), zeta", "base_ring)(1) TrivialCharacter = trivial_character def kronecker_character(d): \"\"\" Return the quadratic", "``reps_only`` - (optional: default False) if True only returns representatives", "v = [P.element_class(P, m * z, check=False) for m in", "= rings.Integer(N) if modulus <= 0: raise ValueError('modulus should be", "0 to an element of Group of Dirichlet characters modulo", "= X[1] sage: # Y is trivial and Z is", "character under the action of the absolute Galois group of", "g = t/((N*t).exp(prec) - 1) # h(n) = g(t)*e^{nt} h", "+ 4 TESTS: Check that :trac:`17586` is fixed:: sage: DirichletGroup(1)[0].bernoulli(1)", "0, 0, 1, -1, 0, 1, -1] sage: e =", "*= zeta g += L(c)*z return g def gauss_sum_numerical(self, prec=53,", "where `R` is a map (:trac:`18072`):: sage: K.<i> = QuadraticField(-1)", "subfield of Frac(R). If R is not a domain, an", "sage: e = G.0 sage: e.bernoulli(5) 7430/13*zeta12^3 - 34750/13*zeta12^2 -", "check that Parent.__init__ has been called Ring of integers modulo", "G.<a,b> = DirichletGroup(20) sage: a.kernel() [1, 9, 13, 17] sage:", "conductor(self): \"\"\" Computes and returns the conductor of this character.", "Cyclotomic Field of order 4 and degree 2' We can", "values(self): \"\"\" Return a list of the values of this", "of minimal conductor. EXAMPLES:: sage: kronecker_character(97*389*997^2) Dirichlet character modulo 37733", "= CyclotomicField(4) sage: DirichletGroup.create_object(None, (K, 60, K.gen(), 4)) Group of", "if gcd(e,n) == 1] else: if not rings.ZZ(p).is_prime(): raise NotImplementedError(\"Automorphisms", "one of the following: - tuple or list of ring", "zeta, zeta_order): \"\"\" Create a Dirichlet group. Not to be", "\\psi(1-a) where `\\chi` and `\\psi` are both characters modulo `N`.", "len(self.values_on_gens()) if r != 0: s += r' \\hbox{ mapping", "e = DirichletGroup(13).0 sage: f = ~e sage: f*e Dirichlet", "- ``zeta`` -- (optional) root of unity in ``R`` -", "into self. The Galois group is the absolute Galois group", "of ``base_ring`` and that ``zeta_order`` is an element of ``ZZ``.", "ber = K.one()/2 if k == 1 else K(bernoulli(k)) elif", "not None. INPUT: - ``v`` - (optional) list of elements", "cached, creating two groups with the same parameters yields the", "4 and degree 2, 60, None, None) An example to", "__setstate__(self, state): r\"\"\" Restore a pickled element from ``state``. TESTS::", "Dirichlet characters modulo `N`. INPUT: - ``N`` -- positive integer", "is free software: you can redistribute it and/or modify #", "mapping 11 |--> 1, 17 |--> 1 sage: b^2 Dirichlet", "elif self.order() <= 2: K = rings.QQ elif (isinstance(R, number_field.NumberField_generic)", "character modulo 100 of conductor 4 mapping 51 |--> -1,", "\"\"\" return self.zeta_order() @cached_method def _automorphisms(self): \"\"\" Compute the automorphisms", "DirichletGroup(5, K, zeta_order=2) Group of Dirichlet characters modulo 5 with", "G = DirichletGroup(7, base_ring=Integers(9), zeta=2) # indirect doctest sage: TestSuite(G).run()", "9.4.5; this is usually optimal. The ``definition`` algorithm uses the", "sage: sum([Y(x)*Z(1-x) for x in IntegerModRing(5)]) -1 sage: # The", "by a in Number Field in a with defining polynomial", "e = DirichletGroup(16)([-1, 1]) sage: f = e.restrict(8) sage: e", "in self.unit_gens(): v = u.lift() # have to do this,", "(which may still be None). zeta_order = self._zeta_order # Map", "Now let's take a look at a non-prime modulus:: sage:", "it must be the multiplicative order of ``zeta``; this is", "self.element.is_in_cache(): return not self.element() one = self.base_ring().one() return all(x ==", "sage: G.gens() Traceback (most recent call last): ... NotImplementedError: factorization", "2, None, None) sage: l = DirichletGroup.create_key(2, base_ring=CC); l (Complex", "from the key (extra arguments are ignored). This is only", "for u, v in zip(x, orders)): raise ValueError(\"values (= {})", "implemented sage: G = DirichletGroup(5, Zmod(15), zeta=2); G Group of", "v is not None. INPUT: - ``v`` - (optional) list", "5 with values in the group of order 2 generated", "pows = self.parent()._zeta_powers return tuple([pows[i] for i in self.element()]) @cached_method(do_pickle=True)", "modulo 60 with values in Gaussian Integers in Cyclotomic Field", "QQ, QQbar, or a complex field\") zeta = CC.zeta(G.modulus()) **", "if base ring is an integral domain \"\"\" if not", "finite non-field base rings not implemented\") # The automorphisms in", "\"\"\" # This method exists solely because of a bug", "-1 sage: # The value -1 above is the correct", "of conductor 9 mapping 2 |--> zeta6] sage: (DirichletGroup(36).0).decomposition() [Dirichlet", "-1 \"\"\" D = self.decomposition() val = self.base_ring()(1) for e", "must divide M(=%s)\"%(self.conductor(),M)) H = DirichletGroup(M, self.base_ring()) return H(self) @cached_method", "not implemented yet. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a.element()", "2004-2006 <NAME> <<EMAIL>> # Copyright (C) 2014 <NAME> <<EMAIL>> #", "EXAMPLES:: sage: chi4 = DirichletGroup(4).gen() sage: chi4.conrey_number() 3 sage: chi", "i.e., it is not the largest order root of unity", "copy(a) sage: a is b False sage: a.element() is b.element()", "a prime. Then `\\varepsilon(-1) = -1` if and only if", "This is a positive integer coprime to q that identifies", "is the trivial character, i.e., has order 1. EXAMPLES:: sage:", "self._modulus = modulus self._integers = rings.IntegerModRing(modulus) def __setstate__(self, state): \"\"\"", "-1], ..., [Dirichlet character modulo 20 of conductor 1 mapping", "field:: sage: R.<x> = PolynomialRing(QQ) sage: K.<a> = NumberField(x^4 +", "0, 0, 0, zeta4, 0, -1] sage: e = DirichletGroup(21).gen(0)", "u in G.unit_gens()]) def is_DirichletCharacter(x): r\"\"\" Return True if x", "} 2 \\hbox{ of conductor } 1 \"\"\" s =", "zeta=None, zeta_order=None): \"\"\" Return the base extension of ``self`` to", "base extension of ``self`` to ``R``. INPUT: - ``R`` --", "https://www.gnu.org/licenses/ # **************************************************************************** from __future__ import print_function import sage.categories.all as", "not compare as equal. TESTS:: sage: trivial_character(6) == trivial_character(3) #", "1 An example where we give ``zeta``, but not its", "import round from sage.misc.cachefunc import cached_method from sage.misc.fast_methods import WithEqualityById", "= 0 L = rings.CyclotomicField(m.lcm(zo)) zeta = L.gen(0) try: self(1)", "Unable to coerce zeta12 to a rational We test the", "if self.modulus()%M != 0: raise ValueError(\"M(=%s) must divide the modulus(=%s)\"%(M,self.modulus()))", "- 1 sage: e.order() 12 This illustrates a canonical coercion::", "knowledge of its order. This is potentially much more efficient", "be None if base_ring not specified\") e = rings.IntegerModRing(modulus).unit_group_exponent() base_ring", "Return the quadratic Dirichlet character (./d) of conductor d, for", "2 |--> zeta12^3 - zeta12, Dirichlet character modulo 13 of", "the units of `(\\ZZ/N\\ZZ)^*`, where `N` is the modulus of", "unpickling old instances. TESTS:: sage: G = DirichletGroup(9) sage: loads(dumps(G))", "1` and non-cyclic for `k \\ge 3`:: sage: (DirichletGroup(18).0).decomposition() [Dirichlet", "the modulus of self. EXAMPLES:: sage: DirichletGroup(37).unit_gens() (2,) sage: DirichletGroup(20).unit_gens()", "return self._integers __iter__ = multiplicative_iterator def list(self): \"\"\" Return a", "= self.base_ring() p = R.characteristic() if p == 0: Auts", "[Dirichlet character modulo 13 of conductor 13 mapping 2 |-->", "def modulus(self): \"\"\" The modulus of this character. EXAMPLES:: sage:", "53) - ``algorithm`` -- 'pari' (default) or 'lcalc' EXAMPLES:: sage:", "Kloosterman sum associated to this Dirichlet character as an approximate", "the key (extra arguments are ignored). This is only called", "} 15 \\mapsto 1,\\ 5 \\mapsto \\zeta_{4} TESTS: Dirichlet characters", "pickling an instance of :class:`DirichletCharacter`. \"\"\" P = self.parent() M", "modulo 100 of conductor 4 mapping 51 |--> -1, 77", "of order 12 and degree 4 sage: e.minimize_base_ring().base_ring() Cyclotomic Field", "in IntegerModRing(N)]) 11 And sums where exactly one character is", "that became clear when creating examples. - <NAME> (2008-02-16): speed", "on the generators of `(Z/NZ)^*`:: sage: list(G) [Dirichlet character modulo", "`x` (type :class:`DirichletCharacter`). EXAMPLES:: sage: G.<e> = DirichletGroup(13) sage: G", "True, False, True, False] sage: G = DirichletGroup(13, CC) sage:", "if and only if this character is primitive, i.e., its", "1 and 2 are printed correctly (see :trac:`17338`):: sage: DirichletGroup(1)[0]", "reps_only=False, sort=True, check=True): \"\"\" Return a list of the Galois", "the Conrey number for this character. This is a positive", "5 mapping 11 |--> 1, 17 |--> zeta4 sage: a*b", "(self.conductor() == self.modulus()) @cached_method def is_trivial(self): r\"\"\" Returns ``True`` if", "sage: DirichletGroup(7, CC, zeta=exp(2*pi*I/6), zeta_order=6) Group of Dirichlet characters modulo", "= next_prime(10^40) sage: g = DirichletGroup(19, GF(p)); g Group of", "with specified values on generators of `(\\ZZ/n\\ZZ)^*`. INPUT: - ``parent``", "22 |--> zeta12^3, 31 |--> zeta12^2 - 1 sage: e.order()", "sage: c.extend(20) == a True \"\"\" if M % self.modulus()", "generated by %s in \" % (self._zeta_order, self._zeta) s +=", "not isinstance(x, DirichletCharacter): raise TypeError(\"cannot convert %s to an element", "%s) must be a ring\" % base_ring) # If either", "modulus sage: H = DirichletGroup(16, QQ); H(DirichletGroup(16).1) Traceback (most recent", "a.element() (2, 0) sage: b.element() (0, 1) .. NOTE:: The", "with values in Cyclotomic Field of order 10 and degree", "new base ring as well. zeta = self._zeta if zeta_order", "cache. TESTS:: sage: K = CyclotomicField(4) sage: DirichletGroup.create_object(None, (K, 60,", "zeta52^10 - zeta52^7 - zeta52^5 + zeta52^4 Check that :trac:`25127`", "commutative ring; the value ring for the characters in this", "PARI L-function associated to Dirichlet character modulo 20 of conductor", "+ [g * ((n*t).exp(prec)) for n in range(1,N+1)] ber =", "of order 4 generated by 7 in Ring of integers", "1, 17 |--> zeta4) \"\"\" g = [] ord =", "DirichletGroup(7, base_ring=Integers(9), zeta=2) # indirect doctest sage: TestSuite(G).run() sage: G.base()", "by -1 in Number Field in a with defining polynomial", "[(vi * oi) // m for vi, oi in zip(v,", "- ``x`` -- one of the following: - tuple or", "modulo 5 of conductor 5 mapping 2 |--> zeta4, Dirichlet", "X = G.list(); Y = X[0]; Z = X[1] sage:", "conductor 3 mapping 5 |--> -1 sage: G(DirichletGroup(15).0) Dirichlet character", "G.integers_mod() Ring of integers modulo 20 \"\"\" return self._integers __iter__", "ring homomorphism with the base ring of ``self`` as its", "if values_on_gens is not None: self.values_on_gens.set_cache(values_on_gens) if element is not", "e in seen_so_far: continue orbit = x.galois_orbit(sort=sort) if reps_only: G.append(x)", "i != 0: s += r',\\ ' s += self.parent().unit_gens()[i]._latex_()", "directly (use the factory function ``DirichletGroup``). The ``DirichletGroup`` factory ensures", "It is checked that the orders of the elements in", "0, -1, 0, 0, 0, -1, 0, 1, 0, -1,", "be a \"factor\" of `(\\ZZ/2\\ZZ)^*`, which is the trivial group.)", "g.jacobi_sum(g) 3 We consider a sum with values in a", "zeta_order=4) Traceback (most recent call last): ... ValueError: base ring", "DirichletGroup(p) sage: f = DP.0 sage: e.jacobi_sum(f) Traceback (most recent", "a generator for `V` can be found. - If ``zeta``", "e = DirichletGroup(20).gen(0) sage: e.values() [0, 1, 0, -1, 0,", "sage.rings.qqbar import is_AlgebraicField from sage.rings.ring import is_Ring from sage.misc.functional import", "conductor 1 mapping 2 |--> 1 \"\"\" G = self.parent()", "41, 37) sage: e(31) -1 sage: e(41) -1 sage: e(37)", "if sort: v.sort() return v def gauss_sum(self, a=1): r\"\"\" Return", "11, 121, 1331] sage: DirichletGroup(17, Integers(6), zeta=Integers(6)(5))._automorphisms() Traceback (most recent", "_repr_(self): \"\"\" Return a print representation of this group, which", "that if we encounter it in a pickle values_on_gens_key =", "``self`` as its domain EXAMPLES:: sage: G = DirichletGroup(7,QQ); G", "finite too. # In particular, it is finitely generated; the", "1/2`, in accordance with the above definition, but in contrast", "r\"\"\" LaTeX representation of self. EXAMPLES:: sage: G.<a,b> = DirichletGroup(16)", "G.list() [Dirichlet character modulo 5 of conductor 1 mapping 2", "group of Dirichlet characters modulo `N`. INPUT: - ``N`` --", "either a ring admitting a conversion map from the base", "domain, an error will be raised if only ``zeta_order`` is", "return DirichletGroup(N, base_ring)(1) TrivialCharacter = trivial_character def kronecker_character(d): \"\"\" Return", "R = self.base_ring() if R.is_prime_field(): return self p = R.characteristic()", "sage.categories.map import Map from sage.rings.rational_field import is_RationalField from sage.rings.complex_mpfr import", "len(DirichletGroup(20, GF(3))) 4 \"\"\" return self.order() def _repr_(self): \"\"\" Return", "calculation and returns an element of a suitable cyclotomic field;", "def restrict(self, M): \"\"\" Returns the restriction of this character", "sage: e.conductor() 4 sage: f = e.primitive_character(); f Dirichlet character", "sage: g = DirichletGroup(19, GF(p)); g Group of Dirichlet characters", "Proposition: Suppose eps is a character mod `p^n`, where `p`", "`V` is cyclic and a generator for `V` can be", "= M(0) orders = self.integers_mod().unit_group().gens_orders() for i in range(len(self.unit_gens())): z", "zeta=7); G Group of Dirichlet characters modulo 17 with values", "is None and self._zeta is not None: # A root", "DirichletGroup(20) sage: a.element() (2, 0) sage: b.element() (0, 1) ..", "L = a.lfunction(); L PARI L-function associated to Dirichlet character", "for i, z in enumerate(self._zeta_powers)} def change_ring(self, R, zeta=None, zeta_order=None):", "-3.07497205... + 1.8826966926...*I sage: f.gauss_sum_numerical() -3.07497205... + 1.8826966926...*I sage: abs(e.gauss_sum_numerical())", "return G([kronecker(D,u) for u in G.unit_gens()]) def kronecker_character_upside_down(d): \"\"\" Return", "TypeError: no coercion map from Rational Field to Integer Ring", "17 sage: chi = DirichletGroup(420)([1,-1,-I,1]) sage: chi.conrey_number() 113 TESTS:: sage:", "GF(3))) 4 \"\"\" return self.order() def _repr_(self): \"\"\" Return a", "sage: chi4._pari_conversion() ([[4, [0]], [2, [2], [3]], [[2]~, Vecsmall([2])], [[4],", "sage: K.<a,b>=NumberField([x^2 + 1, x^2 - 3]) sage: chi =", "``zeta`` cannot be determined automatically, we can specify it using", "self.values.is_in_cache() or m != N - 1: return self.values()[m] else:", "from Rational Field to Integer Ring is defined Base-extended Dirichlet", "of this group, which can be renamed. EXAMPLES:: sage: G", "a map (:trac:`18072`):: sage: K.<i> = QuadraticField(-1) sage: chi =", "; e.values() [0, 1, 36, 0, 1, 36, 0, 0,", "modulo 2 \"\"\" return free_module.FreeModule(rings.IntegerModRing(self.zeta_order()), len(self.unit_gens())) @property def _zeta_powers(self): \"\"\"", "specified sage: G = DirichletGroup(17, Integers(15), zeta=7); G Group of", "self G = self.parent().change_ring(R) return G.element_class(G, [R(x) for x in", "B_1 = -1/2. ber = K.one()/2 if k == 1", "sage: # Y is trivial and Z is quartic sage:", "sage: Y.jacobi_sum(Z); Z.jacobi_sum(Y) -1 -1 \"\"\" if check: if self.parent()", "[[2, 2, 3]~, Vecsmall([3, 3, 1])], [[8, 8, 3], [[1,", "EXAMPLES:: sage: DirichletGroup(20).order() 8 sage: DirichletGroup(37).order() 36 \"\"\" ord =", "2 If the order of ``zeta`` cannot be determined automatically,", "e.gauss_sum(2) -2*zeta6 + 1 sage: norm(e.gauss_sum()) 3 :: sage: G", "unity. We use the following. Proposition: Suppose eps is a", "zeta=Integers(6)(5)).galois_orbits() Traceback (most recent call last): ... TypeError: Galois orbits", "Returns the generalized Bernoulli number `B_{k,eps}`. INPUT: - ``k`` --", "1,), 1) ((zeta6 - 1,), (zeta6 - 1,), -3*zeta6 +", "use the same zeta_order # (which may still be None).", "side. # Since p-1 is coprime to p, this smallest", "-1 sage: chi.conrey_number() 5 sage: chi = DirichletGroup(60)([1,-1,I]) sage: chi.conrey_number()", "and other. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: a Dirichlet", "DirichletGroup(7, K).0 sage: chi.minimize_base_ring() Dirichlet character modulo 7 of conductor", "these caches have to be stored when pickling an instance", "and degree 2 We can't multiply directly, since coercion of", "``a`` -- integer, as for :meth:`.kloosterman_sum` - ``b`` -- integer,", "TESTS:: sage: G.<a, b> = DirichletGroup(20) sage: type(G(1).conductor()) <type 'sage.rings.integer.Integer'>", "sage: K.<a> = NumberField(x^4 + 1) sage: DirichletGroup(5, K) Group", "raise TypeError(\"conductor must divide modulus\") a = [] for u", "DirichletGroup(60).zeta_order() 4 sage: DirichletGroup(60, GF(25,'a')).zeta_order() 4 sage: DirichletGroup(19).zeta_order() 18 \"\"\"", "r\"\"\" Return the decomposition of self as a product of", "conductor 1 mapping 11 |--> 1, 17 |--> 1, Dirichlet", "of the elements must divide the orders of the respective", "is the modulus of `\\chi` and `\\zeta` is a primitive", "2-power modulus elif (euler_phi(e.parent().modulus()) / e.order()) % 2: val *=", "self.zeta() zeta_order = self.zeta_order() if is_ComplexField(R): for i in range(1,", "this vector is mutable *only* because immutable vectors are not", "order 4 and degree 2 sage: (e^12).minimize_base_ring().base_ring() Rational Field TESTS:", "# By definition, the first Bernoulli number of the trivial", "sage: e.kloosterman_sum(5,11) Traceback (most recent call last): ... NotImplementedError: Kloosterman", "mapping 11 |--> 1, 17 |--> -1, Dirichlet character modulo", "values (= {}) on generators (want {})\".format(x, len(orders))) if free_module_element.is_FreeModuleElement(x):", "implemented when the base ring is a cyclotomic field or", "1, Dirichlet character modulo 20 of conductor 4 mapping 11", "self.base_ring() a = R.one() w = [a] zeta = self.zeta()", "e.multiplicative_order() 20 sage: e = DirichletGroup(100).0 sage: e.multiplicative_order() 2 \"\"\"", "the order # of R(zeta) by the DirichletGroup factory. p", "no arith in a poly ring over a number field.", "conductor 13 mapping 2 |--> zeta12 sage: G(0) Traceback (most", "constructor of :class:`DirichletCharacter` sets the cache of :meth:`element` or of", "a in Number Field in a with defining polynomial x^4", "one = self.base_ring().one() return all(x == one for x in", "Dirichlet characters modulo 60 with values in Gaussian Integers in", "TestSuite(G).run() sage: G.base() # check that Parent.__init__ has been called", "+ zeta156^18 - zeta156^16 - zeta156^15 - 2*zeta156^14 - zeta156^10", "last): ... NotImplementedError: Characters must be from the same Dirichlet", "e = G.0 sage: e.is_even() False sage: e(-1) -1.000000... sage:", "= self.list() else: if check: v = [self(x) for x", "first gen is -1 for 2-power modulus elif (euler_phi(e.parent().modulus()) /", "1 sage: factor(norm(e.gauss_sum())) 13^24 TESTS: The field of algebraic numbers", "self K = rings.CyclotomicField(m) return self.change_ring(K) def minimize_base_ring(self): r\"\"\" Return", "loads(e.dumps()) == e True TESTS:: sage: G = DirichletGroup(10) sage:", "val *= e.values_on_gens()[0] # first gen is -1 for 2-power", "= DirichletGroup(7, Integers(9), zeta = Integers(9)(2)).0 sage: chi.galois_orbit() Traceback (most", "ValueError(\"M(=%s) must divide the modulus(=%s)\"%(M,self.modulus())) if M%self.conductor() != 0: raise", "dictionary that can be used to compute discrete logarithms in", "= DirichletGroup(31, CyclotomicField(5)) sage: K30 = CyclotomicField(30) sage: G3.gen(0).base_extend(K30) *", "= parent._module(x) if any(u * v for u, v in", "abs(e.gauss_sum_numerical()) 1.7320508075... sage: sqrt(3.0) 1.73205080756888 sage: e.gauss_sum_numerical(a=2) -...e-15 - 1.7320508075...*I", "x in self.values_on_gens()]) v.set_immutable() return v def __setstate__(self, state): r\"\"\"", "map from %s to %s is defined\" % (self.base_ring(), R))", "Integers in Cyclotomic Field of order 4 and degree 2", "Cyclotomic Field of order 4 and degree 2 \"\"\" base_ring,", "IndexError(\"n(=%s) must be between 0 and %s\"%(n,len(g)-1)) return g[n] @cached_method", "of conductor 37733 mapping 1557 |--> -1, 37346 |--> -1", "1, 37 |--> zeta4) sage: val = G.gens()[2].values_on_gens()[2] ; val", "can be used to compute discrete logarithms in the value", "return self.element_class(self, x) elif not isinstance(x, DirichletCharacter): raise TypeError(\"cannot convert", "[R.zero()] * mod gens = G.unit_gens() orders = G.integers_mod().unit_group().gens_orders() R_values", "k, algorithm='recurrence', cache=True, **opts): r\"\"\" Returns the generalized Bernoulli number", "pickled element from ``state``. TESTS:: sage: e = DirichletGroup(16)([-1, 1])", "r is the mult order of p modulo n. r", "13 of conductor 1 mapping 2 |--> 1 sage: G([-1])", "0, 0, 1, 0, -1] sage: e = DirichletGroup(20).gen(1) sage:", "generated by 7 in Ring of integers modulo 15 sage:", "sage.structure.gens_py import multiplicative_iterator from sage.structure.parent import Parent from sage.structure.sequence import", "``True`` if this is the trivial character, i.e., has order", "a non-prime modulus:: sage: N = 9 sage: D =", "chi = DirichletGroup(24)([1,-1,-1]); chi Dirichlet character modulo 24 of conductor", "11 |--> 1, 17 |--> zeta4 We next compute several", "{}, respectively\" .format(x, orders)) self.values_on_gens.set_cache(x) else: if free_module_element.is_FreeModuleElement(x): self.element.set_cache(x) else:", "|--> -1 sage: e.restrict(50) Traceback (most recent call last): ...", "e.change_ring(QuadraticField(3, 'a')) sage: f.parent() Group of Dirichlet characters modulo 7", "= K.complex_embeddings()[0] sage: D = DirichletGroup(5, K) sage: D.change_ring(f) Group", "Dirichlet groups of different moduli, but no coercion. This implies", "== 1: return [R.one()] elif mod == 2: return [R.zero(),", "\"\"\" def create_key(self, N, base_ring=None, zeta=None, zeta_order=None, names=None, integral=False): \"\"\"", "0 or p.gcd(self._zeta_order) == 1: zeta_order = self._zeta_order else: #", "degree 2 ] sage: DirichletGroup(20,GF(5)).decomposition() [ Group of Dirichlet characters", "ord = self.zeta_order() M = self._module zero = M(0) orders", "1: cond *= 2 return rings.Integer(cond) @cached_method def decomposition(self): r\"\"\"", "% 4 == 2: # 0 factors at 2. vals", "degree 4 sage: e Dirichlet character modulo 13 of conductor", "EXAMPLES:: sage: G = DirichletGroup(30); e = G.1 sage: e.galois_orbit()", "mapping 11 |--> 1, 7 |--> zeta4] Another example:: sage:", "zeta_order) DirichletGroup = DirichletGroupFactory(\"DirichletGroup\") def is_DirichletGroup(x): \"\"\" Returns True if", "lmfdb_page(self): r\"\"\" Open the LMFDB web page of the character", "5 with values in Cyclotomic Field of order 4 and", "to `\\chi` and the integers a,b is .. MATH:: K(a,b,\\chi)", "the GNU General Public License as published by # the", "len(F) > 1: return prod([d.conductor() for d in self.decomposition()]) p", "DirichletGroup(31, CyclotomicField(3)) sage: G5 = DirichletGroup(31, CyclotomicField(5)) sage: K30 =", "8 sage: DirichletGroup(37).order() 36 \"\"\" ord = rings.Integer(1) for g", "= DirichletGroup(7, K).0 sage: chi.minimize_base_ring() Dirichlet character modulo 7 of", "a conversion map from the base ring of ``self``, or", "3.60555127546399 TESTS: The field of algebraic numbers is supported (:trac:`19056`)::", "chi.values() [1] sage: chi(1) 1 \"\"\" G = self.parent() R", "Synonym for modulus. EXAMPLES:: sage: e = DirichletGroup(100, QQ).0 sage:", "20 sage: e.multiplicative_order() 20 sage: e = DirichletGroup(100).0 sage: e.multiplicative_order()", "def __setstate__(self, state): r\"\"\" Restore a pickled element from ``state``.", "raise ValueError(\"wrong number of values (= {}) on generators (want", "as its domain - ``zeta`` -- (optional) root of unity", "|--> zeta12^3 - zeta12, Dirichlet character modulo 13 of conductor", "sage: G = DirichletGroup(3) sage: e = G.0 The real", "% base_ring) zeta_order = rings.Integer(zeta_order) zeta = base_ring.zeta(zeta_order) return (base_ring,", "-self.element() else: x = tuple(~z for z in self.values_on_gens()) return", "-1, 5 |--> 1, Dirichlet character modulo 9 of conductor", "DirichletGroup(60,QQ).zeta() -1 sage: DirichletGroup(60, GF(25,'a')).zeta() 2 \"\"\" zeta = self._zeta", "_latex_(self): r\"\"\" LaTeX representation of self. EXAMPLES:: sage: G.<a,b> =", "G(d[0])*G(d[1]) == c True Conductors that are divisible by various", ":func:`sage.arith.misc.gauss_sum` for general finite fields - :func:`sage.rings.padics.misc.gauss_sum` for a `p`-adic", "orders works:: sage: a = DirichletGroup(3, QQ, zeta=1, zeta_order=1)(1) sage:", "+ zeta156^42 + zeta156^41 + 2*zeta156^40 + zeta156^37 - zeta156^36", "Computes and returns the conductor of this character. EXAMPLES:: sage:", "rings.QQ elif (isinstance(R, number_field.NumberField_generic) and euler_phi(self.order()) < R.absolute_degree()): K =", "import print_function import sage.categories.all as cat from sage.misc.all import prod", "ber return ber def lfunction(self, prec=53, algorithm='pari'): \"\"\" Return the", "1, -1, 0, 0, -1, 0, 1, -1, 0, 1,", "1]) sage: e.values_on_gens () (-1, 1) .. NOTE:: The constructor", "sage: eps = f.character() sage: eps.minimize_base_ring() == eps True A", "DirichletGroup(17, Integers(6), zeta=Integers(6)(5))._automorphisms() Traceback (most recent call last): ... NotImplementedError:", "zeta52^19 - zeta52^16 + zeta52^15 + zeta52^14 + zeta52^12 -", "have additive orders dividing {}, respectively\" .format(x, parent.zeta_order(), orders)) self.element.set_cache(x)", "else: x = tuple(~z for z in self.values_on_gens()) return G.element_class(G,", "|--> 13 sage: chi^2 Dirichlet character modulo 5 of conductor", "eps1 = DirichletGroup(5)([-1]) sage: eps2 = DirichletGroup(5,QQ)([-1]) sage: eps1.conrey_number() ==", "factor of eps at 4 is nontrivial or `p >", "False, True, False] sage: G = DirichletGroup(13, CC) sage: e", "\"\"\" return DirichletGroup(N, base_ring)(1) TrivialCharacter = trivial_character def kronecker_character(d): \"\"\"", "character mod 20 with values in the rational numbers:: sage:", "self.base_ring().is_integral_domain(): raise TypeError(\"Galois orbits only defined if base ring is", "Dirichlet characters modulo 7 with values in Rational Field sage:", "11 |--> -1, 17 |--> zeta4 Multiplying elements whose parents", "a Gauss sum associated to this Dirichlet character. The Gauss", "ring of this Dirichlet character. EXAMPLES:: sage: G = DirichletGroup(11)", "17 |--> -1 sage: chi.conrey_number() 5 sage: chi = DirichletGroup(60)([1,-1,I])", "optional -- webbrowser \"\"\" import webbrowser lmfdb_url = 'https://www.lmfdb.org/Character/Dirichlet/{}/{}' url", "of powers of the distinguished root of unity. TESTS:: sage:", "9, 13, 17] sage: b.kernel() [1, 11] \"\"\" one =", "isinstance(x, DirichletCharacter): raise TypeError(\"cannot convert %s to an element of", "`(\\ZZ/N\\ZZ)^* \\to V` with pointwise multiplication. The group `V` is", "sage: b = DirichletGroup(3, QQ, zeta=-1, zeta_order=2)([-1]) sage: a *", "c in m.coprime_integers(m): e = rings.Mod(c, m) g += self(c)", "\"\"\" Decide whether there is a coercion map from `X`.", "zeta = self._zeta if zeta is None: R = self.base_ring()", "G.0^2; e Dirichlet character modulo 13 of conductor 13 mapping", "-zeta4] \"\"\" R = self.base_ring() a = R.one() w =", "to be the group of roots of unity of order", "v for u, v in zip(x, orders)): raise ValueError(\"values (=", "e.primitive_character(); f Dirichlet character modulo 4 of conductor 4 mapping", "36, 0, 1, 0, 0, 1, 36, 0, 1, 36]", "character to Pari. OUTPUT: pair (G, v) where G is", "rings.Mod(c, m) g += self(c) * zeta**int(a*e + b*e**(-1)) return", "as cat from sage.misc.all import prod import sage.misc.prandom as random", "v.imag() 1.73205080756888 sage: G = DirichletGroup(20) sage: e = G.1", "is the group of homomorphisms `(\\ZZ/N\\ZZ)^* \\to V` with pointwise", "e = DirichletGroup(16)([-1, 1]) sage: hash(e) == hash((-1,1)) True \"\"\"", "\"\"\" Return ``True`` if and only if this character is", "[p**m for m in range(0,r)] return Auts def galois_orbits(self, v=None,", "zeta156^19 + zeta156^18 - zeta156^16 - zeta156^15 - 2*zeta156^14 -", "EXAMPLES:: sage: DirichletGroup(5).list() [Dirichlet character modulo 5 of conductor 1", "character. OUTPUT: Currently the kernel is returned as a list.", "-1 \"\"\" if check: if self.parent() != char.parent(): raise NotImplementedError(\"Characters", "= G.gens()[0] sage: e.kloosterman_sum(5,11) Traceback (most recent call last): ...", "False sage: e(-1) -1 sage: [e.is_even() for e in G]", "to explicitly coerce each element of v into self. The", "(euler_phi(e.parent().modulus()) / e.order()) % 2: val *= -1 return val", "from the same Dirichlet Group. sage: all_jacobi_sums = [(DP[i].values_on_gens(),DP[j].values_on_gens(),DP[i].jacobi_sum(DP[j])) ....:", "gens mod 11 are not units mod 22. while x.modulus().gcd(v)", "for i in range(p-1) for j in range(i, p-1)] sage:", ":trac:`17338`):: sage: DirichletGroup(1)[0] Dirichlet character modulo 1 of conductor 1", "'_zeta_order' in state: state['_zeta_order'] = rings.Integer(state['_zeta_order']) super(DirichletGroup_class, self).__setstate__(state) @property def", "def __copy__(self): \"\"\" Return a (shallow) copy of this Dirichlet", "= self.base_ring() if N == 1: # By definition, the", "group of Dirichlet character mod 20, but with values in", "method (in contrast to :meth:`change_ring`) requires a coercion map to", "e^{at}}{e^{Nt}-1} = sum_{k=0}^{\\infty} \\frac{B_{k,\\varepsilon}}{k!} t^k. ALGORITHM: The ``'recurrence'`` algorithm computes", "= e.change_ring(QuadraticField(3, 'a')) sage: f.parent() Group of Dirichlet characters modulo", "we end up computing all the Bernoulli # numbers up", "EXAMPLES:: sage: kronecker_character(97*389*997^2) Dirichlet character modulo 37733 of conductor 37733", "[1, 0, 0; 0, 1, 0; 0, 0, 1], [7,", "e.jacobi_sum(f) Traceback (most recent call last): ... NotImplementedError: Characters must", "is the modulus. EXAMPLES:: sage: e = DirichletGroup(16)([-1, 1]) sage:", "ArithmeticError): pass if isinstance(x, list): # list of values on", "lists giving the values of the character on the generators", "Automorphisms for finite non-field base rings not implemented sage: DirichletGroup(17,", "for u in G.unit_gens()]) def kronecker_character_upside_down(d): \"\"\" Return the quadratic", "sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) * GF(29)(3) 22 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(G.gens()[2].values_on_gens()[2]) * 3 22", "powering for # k = 1, p, p^2, ..., p^(r-1),", "values in the rational numbers:: sage: G = DirichletGroup(20, QQ);", "Return the complex conjugate of this Dirichlet character. EXAMPLES:: sage:", "sage: e.is_odd() True sage: [e.is_odd() for e in G] [False,", "for m in Auts] if sort: v.sort() return v def", "giving the values of the character on the generators of", "= K.zero() elif algorithm == \"recurrence\": # The following code", "are omitted, then `V` is taken to be `R^*`, or", "DirichletGroup(12)._module Vector space of dimension 2 over Ring of integers", "self(x) == one] def maximize_base_ring(self): r\"\"\" Let .. MATH:: \\varepsilon", "a Dirichlet character of modulus q. See https://www.lmfdb.org/knowledge/show/character.dirichlet.conrey EXAMPLES:: sage:", "for the units of `(\\ZZ/N\\ZZ)^*`, where `N` is the modulus", "G.<e> = DirichletGroup(13) sage: G Group of Dirichlet characters modulo", "- 4*E(5)^2 - 4*E(5)^3 - 2*E(5)^4 sage: G = DirichletGroup(12,", "sage: G.order() 4 sage: DirichletGroup(-33) Traceback (most recent call last):", "`m` is the modulus of `\\chi` and `\\zeta` is a", "True, False, True, False, True, False, True, False, True, False,", "None: s += \"the group of order %s generated by", "self. EXAMPLES:: sage: G = DirichletGroup(20) sage: G.gens() (Dirichlet character", "self.base_ring().one() return all(x == one for x in self.values_on_gens()) def", "roots of unity with smaller order than expected (:trac:`6018`):: sage:", "values in a finite field:: sage: g = DirichletGroup(17, GF(9,'a')).0", "and only if this character is primitive, i.e., its conductor", "<= 0: raise ValueError('modulus should be positive') if base_ring is", "== 1: # By definition, the first Bernoulli number of", "a random power of each generator together, where the power", "by sending those `x\\in\\ZZ/N\\ZZ` with `\\gcd(N,x)>1` to `0`. EXAMPLES:: sage:", "EXAMPLES:: sage: e = DirichletGroup(16)([-1, 1]) sage: f = e.restrict(8)", "``zeta``; this is useful if the base ring is not", "means that the group has a # distinguished set of", "extend(self, M): \"\"\" Returns the extension of this character to", "sage: G = DirichletGroup(9) sage: loads(dumps(G)) is G True \"\"\"", "sage: G.<a,b> = DirichletGroup(20) sage: c = a*b sage: d", "Dirichlet character modulo 13 of conductor 1 mapping 2 |-->", "have order ``zeta_order``. Furthermore, a generator ``zeta`` of `V` is", "0 and %s\"%(n,len(g)-1)) return g[n] @cached_method def gens(self): \"\"\" Returns", "call last): ... TypeError: Unable to coerce zeta12 to a", "characters modulo 7 with values in Cyclotomic Field of order", "conductor 1 sage: DirichletGroup(2)[0] Dirichlet character modulo 2 of conductor", "lmfdb_url.format(self.modulus(), self.conrey_number()) webbrowser.open(url) def galois_orbit(self, sort=True): r\"\"\" Return the orbit", "or is_RationalField(K): phi = K.complex_embedding(prec) CC = phi.codomain() else: raise", "sage: DirichletGroup(60, integral=True) Group of Dirichlet characters modulo 60 with", "// m for vi, oi in zip(v, pari_orders)] return (G,", "this smallest r such that the # divisibility holds equals", "None and self._zeta is not None: # A root of", "+ str(self.values_on_gens()[i]) return s def _latex_(self): r\"\"\" LaTeX representation of", "if not (number_field.is_CyclotomicField(K) or is_RationalField(K)): raise NotImplementedError(\"Kloosterman sums only currently", "precision, 2, None, None) sage: k == l False sage:", "sage: G is H True sage: G3 = DirichletGroup(31, CyclotomicField(3))", "conductor 13 mapping 2 |--> zeta12 sage: loads(e.dumps()) == e", "isinstance(x, DirichletGroup_class) class DirichletGroup_class(WithEqualityById, Parent): \"\"\" Group of Dirichlet characters", "+ zeta156^28 - zeta156^24 - zeta156^22 + zeta156^21 + zeta156^20", "in range(1, zeta_order): a = a * zeta a._set_multiplicative_order(zeta_order/gcd(zeta_order, i))", "of conductor 13 mapping 2 |--> zeta12, Dirichlet character modulo", "found. EXAMPLES: The default base ring is a cyclotomic field", "renamed. EXAMPLES:: sage: G = DirichletGroup(11) sage: repr(G) # indirect", "multiplication to take place. :: sage: G(d[0])*G(d[1]) == c True", "b Dirichlet character modulo 20 of conductor 5 mapping 11", "``R`` -- either a ring admitting a conversion map from", "zeta orders works:: sage: a = DirichletGroup(3, QQ, zeta=1, zeta_order=1)(1)", "zeta_argument) for x in values_on_gens] else: dlog = P._zeta_dlog v", "2 |--> -zeta12^3 + zeta12, Dirichlet character modulo 13 of", "x.element() e = tuple(z) # change when there are immutable", "sage.structure.element import MultiplicativeGroupElement from sage.structure.gens_py import multiplicative_iterator from sage.structure.parent import", "= self.parent() K = G.base_ring() if is_ComplexField(K): phi = lambda", "EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: H.<c> = DirichletGroup(4) sage:", "order 4 and degree 2 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val) 17 sage: r4.residue_field(r4.ideal(29).factor()[0][0])(val)", "``base_ring`` is a part of the key:: sage: k =", "@cached_method def is_primitive(self): \"\"\" Return ``True`` if and only if", "the values of ``self`` on the standard generators of `(\\ZZ/N\\ZZ)^*`,", "zeta_order is None): raise ValueError(\"zeta and zeta_order must be None", "Field of order 6 and degree 2 sage: (e^3).minimize_base_ring().base_ring() Cyclotomic", "is computed by multiplying a random power of each generator", "+1 or -1 if not R.is_exact(): return abs(self(-1) - R(-1))", "is a part of the key:: sage: k = DirichletGroup.create_key(2,", "E = DirichletGroup(4).gen() sage: E.lmfdb_page() # optional -- webbrowser \"\"\"", "True \"\"\" R = self.base_ring() # self(-1) is either +1", "order. This is potentially much more efficient than computing the", "= tuple(z**n for z in self.values_on_gens()) return G.element_class(G, x, check=False)", "X): \"\"\" Decide whether there is a coercion map from", "mapping 11 |--> -1, 17 |--> 1 sage: b Dirichlet", "elements: the values of the Dirichlet character on the standard", "of conductor 20 mapping 11 |--> -1, 17 |--> -1]", "-1 using knowledge of its order. This is potentially much", "abs tol 1e-14 0.988944551741105 - 5.16608739123418e-18*I \"\"\" if algorithm is", "= CC.zeta(m) for c in m.coprime_integers(m): e = rings.Mod(c, m)", "0, 0, -1, 0, 1, -1, 0, 1, 0, 0,", "3) ((zeta6,), (zeta6 - 1,), 2*zeta6 + 1) ((zeta6,), (-1,),", "the other parent fails in both cases:: sage: d[0]*d[1] ==", "1, 0; 0, 0, 1], [7, 13, 17], [2, 2,", "(default: ``False``); whether to replace the default cyclotomic field by", "3], [[1, matrix(0,2)], [1, matrix(0,2)], [2, Mat([2, 1])]], [1, 0,", "multiplicative group `R^*` of ``base_ring``. This is the group of", "+ zeta52^4 Check that :trac:`25127` is fixed:: sage: G =", "e = DirichletGroup(21, base_ring=GF(3)).gen(0) ; e.values() [0, 1, 2, 0,", "1, 1, 1, 0, 1, 1, 1, 1, 1, 1]", "= lambda n: sum(v[r] * r**n for r in range(1,", "algorithm \"lcalc\":: sage: a = a.primitive_character() sage: L = a.lfunction(algorithm='lcalc');", "1, 1, 1, 1, 0, 1, 1, 1, 1, 1,", "dividing (2, 16, 2), respectively \"\"\" MultiplicativeGroupElement.__init__(self, parent) if check:", "Groups().Commutative() if base_ring.is_integral_domain() or base_ring.is_finite(): # The group of n-th", "valuation) def trivial_character(N, base_ring=rings.RationalField()): r\"\"\" Return the trivial character of", "the Dirichlet character on the standard generators of `(\\ZZ/N\\ZZ)^*` as", "character to a Dirichlet character modulo the multiple M of", "__iter__ = multiplicative_iterator def list(self): \"\"\" Return a list of", "specifying a root of unity:: sage: DirichletGroup(5, K, zeta=-1, zeta_order=2)", "as unequal, even if they define identical functions on ``ZZ``.", "multiplicative_iterator from sage.structure.parent import Parent from sage.structure.sequence import Sequence from", "H = DirichletGroup(16, QQ); H(DirichletGroup(16).1) Traceback (most recent call last):", "\"\"\" String representation of self. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20)", "the elements in `x` are admissible (see :trac:`17283`):: sage: k.<i>", "m) for c in m.coprime_integers(m): e = rings.Mod(c, m) g", "zeta52^4 Check that :trac:`25127` is fixed:: sage: G = DirichletGroup(1)", "2 |--> zeta36^4 sage: DirichletGroup(20).random_element() Dirichlet character modulo 20 of", "N)) ber = K(sum(binomial(k,j) * bernoulli(j, **opts) * N**(j-1) *", "False] sage: G = DirichletGroup(13, CC) sage: e = G.0", "DirichletGroup(20, UniversalCyclotomicField()) sage: e = G([1 for u in G.unit_gens()])", "4 and degree 2 sage: (e^12).minimize_base_ring().base_ring() Rational Field TESTS: Check", "a Dirichlet character modulo 20 of conductor 4 mapping 11", "this group (default: the cyclotomic field `\\QQ(\\zeta_n)`, where `n` is", "the Jacobi sum J(Y, Z). sage: Y.jacobi_sum(Z); Z.jacobi_sum(Y) -1 -1", "The Gauss sum associated to `\\chi` is .. MATH:: g_a(\\chi)", "= [] for u in self.unit_gens(): v = u.lift() #", "still work, such as creation of elements:: sage: G =", "Dirichlet Group. sage: all_jacobi_sums = [(DP[i].values_on_gens(),DP[j].values_on_gens(),DP[i].jacobi_sum(DP[j])) ....: for i in", "if zeta_order is None: # We reuse _zeta_order if we", "If the order of ``zeta`` cannot be determined automatically, we", "modulo n. r = rings.IntegerModRing(n)(p).multiplicative_order() Auts = [p**m for m", "is `(\\ZZ / N \\ZZ)^*` where `N` is the modulus", "from sage.misc.all import prod import sage.misc.prandom as random import sage.modules.free_module", "ring. EXAMPLES:: sage: DirichletGroup(20).zeta_order() 4 sage: DirichletGroup(60).zeta_order() 4 sage: DirichletGroup(60,", "in Finite Field of size 5, Group of Dirichlet characters", "of `(\\ZZ/n\\ZZ)^*`. INPUT: - ``parent`` -- :class:`DirichletGroup`, a group of", "sum associated to this Dirichlet character. This includes Gauss sums,", "DirichletGroup(1)[0].bernoulli(1) 1/2 \"\"\" if cache: try: self.__bernoulli except AttributeError: self.__bernoulli", "e.galois_orbit() [Dirichlet character modulo 13 of conductor 13 mapping 2", "values in the group of order 6 generated by 0.500000000000000", "instances. TESTS:: sage: G = DirichletGroup(9) sage: loads(dumps(G)) is G", "state_dict[values_on_gens_key] del state_dict[values_on_gens_key] # element() used an explicit cache __element", "not change the entries of the returned vector; this vector", "element of %s\" % (x, self)) elif not x.conductor().divides(self.modulus()): raise", "x =/= 1, the conductor is the smallest p**r such", "prime. EXAMPLES:: sage: DirichletGroup(17)._automorphisms() [1, 3, 5, 7, 9, 11,", "**opts) * N**(j-1) * S(k-j) for j in range(k+1))) elif", "e = DirichletGroup(100).0 sage: e.multiplicative_order() 2 \"\"\" if self.parent().zeta.is_in_cache(): return", "= rings.Integer(1) for g in self.gens(): ord *= int(g.order()) return", "``True`` if and only if `\\varepsilon(-1) = -1`. EXAMPLES:: sage:", "parent(e(31*37)) Cyclotomic Field of order 4 and degree 2 \"\"\"", "Return the decomposition of self as a product of Dirichlet", "Next we construct the group of Dirichlet character mod 20,", "image root of unity. We use the following. Proposition: Suppose", "def create_object(self, version, key, **extra_args): \"\"\" Create the object from", "cache answers - ``**opts`` -- optional arguments; not used directly,", "return abs(self(-1) - R(1)) < 0.5 return self(-1) == R(1)", "True :: sage: G, x = DirichletGroup(35).objgens() sage: e =", "tuple(z**n for z in self.values_on_gens()) return G.element_class(G, x, check=False) def", "element of a suitable cyclotomic field; see also :meth:`.kloosterman_sum_numerical`, which", "sage: K = G.base_ring() sage: G(1) Dirichlet character modulo 13", "# 0 factors at 2. vals = [1] + vals", "g[n] @cached_method def gens(self): \"\"\" Returns generators of self. EXAMPLES::", "def _repr_(self): \"\"\" String representation of self. EXAMPLES:: sage: G.<a,b>", "G = DirichletGroup(12, QQbar) sage: e = G.gens()[0] sage: e.kloosterman_sum(5,11)", "17 |--> 1 sage: DirichletGroup(60).random_element() Dirichlet character modulo 60 of", "especially since we end up computing all the Bernoulli #", "return self.modulus() @cached_method def multiplicative_order(self): \"\"\" The order of this", "must be between 0 and 1 \"\"\" n = int(n)", "Dirichlet group with values in a number field:: sage: R.<x>", "recent call last): ... ValueError: conductor(=4) must divide M(=50) \"\"\"", "of the character is made the first time you call", "not specified but ``zeta_order`` is, then `V` is taken to", "def _richcmp_(self, other, op): \"\"\" Compare ``self`` to ``other``. ..", "if `V` is cyclic and a generator for `V` can", "in state_dict: values_on_gens = state_dict[values_on_gens_key] del state_dict[values_on_gens_key] # element() used", "|--> -1 sage: G([K.0]) Dirichlet character modulo 13 of conductor", "conductor 5 mapping 2 |--> -zeta4 \"\"\" return ~self def", "is the extension of a homomorphism .. MATH:: (\\ZZ/N\\ZZ)^* \\to", "12 and degree 4 sage: G = DirichletGroup(6) sage: G(DirichletGroup(3).0)", "== G True :: sage: G = DirichletGroup(19, GF(5)) sage:", "INPUT: - ``R`` -- either a ring admitting a conversion", "We can also restrict the order of the characters, either", "a power, so the return value is a list of", "0, 1, -1] sage: e = DirichletGroup(21, base_ring=GF(37)).gen(0) ; e.values()", "sage: D = DirichletGroup(N) sage: g = D(1) sage: g.jacobi_sum(g)", "zeta_order is specified\" % base_ring) zeta_order = rings.Integer(zeta_order) zeta =", "2 |--> -zeta4 AUTHORS: - <NAME> (2005-09-02): Fixed bug in", "power series (see for example [DI1995]_, Section 2.2): .. MATH::", "-1) ((zeta6 - 1,), (-zeta6 + 1,), -zeta6 - 2)", "= [self(x) for x in v] G = [] seen_so_far", "Integer Ring is defined Base-extended Dirichlet groups do not silently", "set of generators. category = category.Finite().FinitelyGenerated() Parent.__init__(self, base_ring, category=category) self._zeta", "1], ..., [Dirichlet character modulo 13 of conductor 13 mapping", "an inexact answer (but is generally much quicker). CACHING: Computed", "is 1/2, in contrast to the value B_1 = -1/2.", "of order 4 and degree 2 ] sage: DirichletGroup(20,GF(5)).decomposition() [", "is not an integral domain:: sage: f = DirichletGroup(17, ZZ,", "calls to the Bernoulli function. Likewise # computing all binomial", "self.parent() M = P._module if is_ComplexField(P.base_ring()): zeta = P.zeta() zeta_argument", "= DirichletGroup(20) sage: repr(a) # indirect doctest 'Dirichlet character modulo", "0, 0, 0, 1, 0, 1, 0, 1, 0, 1,", "11 with values in Cyclotomic Field of order 10 and", "sage: eps.bernoulli(3, algorithm=\"definition\") 10*zeta6 + 4 TESTS: Check that :trac:`17586`", "DirichletGroup(20).gen(1) sage: e.values() [0, 1, 0, -zeta4, 0, 0, 0,", "sage.structure.sequence import Sequence from sage.structure.factory import UniqueFactory from sage.structure.richcmp import", "cached with this character. EXAMPLES:: sage: G = DirichletGroup(3) sage:", "0, 1, 2] :: sage: chi = DirichletGroup(100151, CyclotomicField(10)).0 sage:", "= DirichletGroup.create_key(2, base_ring=QQ); k (Rational Field, 2, None, None) sage:", "e.values() [0, 1, 36, 0, 1, 36, 0, 0, 36,", "\"\"\" Returns the primitive character associated to self. EXAMPLES:: sage:", "= DirichletGroup(M, self.base_ring()) return H(self) def _pari_conversion(self): r\"\"\" Prepare data", "fixed:: sage: chi = DirichletGroup(1).list()[0] sage: chi.values() [1] sage: chi(1)", "G = DirichletGroup(5); X=G.list(); Y=X[0]; Z=X[1] sage: Y.jacobi_sum(Z) -1 sage:", "of conductor 5 mapping 2 |--> -zeta4 AUTHORS: - <NAME>", "G = DirichletGroup(d, rings.RationalField()) return G([kronecker(u.lift(),d) for u in G.unit_gens()])", "return [D[i](vals[i]) for i in range(len(D))] def extend(self, M): \"\"\"", "NotImplementedError: Characters must be from the same Dirichlet Group. sage:", "domain\") k = self.order() if k <= 2: return [self]", "conductor 5 mapping 11 |--> 1, 17 |--> zeta4) \"\"\"", "= a*b sage: d = c.decomposition(); d [Dirichlet character modulo", "2 sage: DirichletGroup(20,GF(2)).exponent() 1 sage: DirichletGroup(37).exponent() 36 \"\"\" return self.zeta_order()", "-1 if not R.is_exact(): return abs(self(-1) - R(1)) < 0.5", "INPUT: - ``v`` - (optional) list of elements of self", "modulus of self. EXAMPLES:: sage: G = DirichletGroup(20) sage: G.integers_mod()", "|--> zeta4] sage: d[0].parent() Group of Dirichlet characters modulo 4", "following code is pretty fast, at least compared to #", "base ring. EXAMPLES:: sage: DirichletGroup(20).zeta_order() 4 sage: DirichletGroup(60).zeta_order() 4 sage:", "zeta156^2 - 1 sage: factor(norm(e.gauss_sum())) 13^24 TESTS: The field of", "in Gaussian Integers in Cyclotomic Field of order 4 and", "if the order of ``zeta`` is very large. - If", "`N` is the modulus EXAMPLES:: sage: chi4 = DirichletGroup(4).gen() sage:", "[2], [3]], [[2]~, Vecsmall([2])], [[4], [[1, matrix(0,2)]], Mat(1), [3], [2],", "== one for x in self.values_on_gens()) def kernel(self): r\"\"\" Return", "pari_gens = G.gen() values_on_gens = (self(x) for x in pari_gens)", "3]) sage: chi = DirichletGroup(7, K).0 sage: chi.minimize_base_ring() Dirichlet character", "36, 0, 1, 36, 0, 0, 36, 0, 1, 36,", "= G.0 sage: e Dirichlet character modulo 13 of conductor", "character modulo 20 of conductor 20 mapping 11 |--> -1,", "-1) ((1,), (zeta6 - 1,), -1) ((1,), (-1,), -1) ((1,),", "return g def kloosterman_sum_numerical(self, prec=53, a=1, b=0): r\"\"\" Return the", "((1,), (zeta6,), -1) ((1,), (zeta6 - 1,), -1) ((1,), (-1,),", "computing all binomial coefficients can be done much # more", "this is only implemented if the base ring has characteristic", "example [DI1995]_, Section 2.2): .. MATH:: \\sum_{a=1}^N \\frac{\\varepsilon(a) t e^{at}}{e^{Nt}-1}", "sage: chi = DirichletGroup(420)([1,-1,-I,1]) sage: chi.conrey_number() 113 TESTS:: sage: eps1", "in zip(x, orders)): raise ValueError(\"values (= {} modulo {}) must", "coerce zeta12 to a rational We test the case where", "= G.unit_gens() orders = G.integers_mod().unit_group().gens_orders() R_values = G._zeta_powers val_on_gen =", "\"\"\" Construct a Dirichlet character from `x`. EXAMPLES:: sage: G", "from sage.misc.cachefunc import cached_method from sage.misc.fast_methods import WithEqualityById from sage.structure.element", "G.zeta.is_in_cache(): x = n * self.element() else: x = tuple(z**n", "m: return self K = rings.CyclotomicField(m) return self.change_ring(K) def minimize_base_ring(self):", "* zeta a._set_multiplicative_order(zeta_order/gcd(zeta_order, i)) w.append(a) else: for i in range(1,", "recent call last): ... IndexError: n(=2) must be between 0", "`m`. .. warning:: A table of values of the character", "(self._zeta is None or (X._zeta is not None and self.base_ring()(X._zeta)", "37733 mapping 1557 |--> -1, 37346 |--> -1 :: sage:", "+ 1,), -zeta6 - 2) ((-1,), (-1,), 1) ((-1,), (-zeta6,),", "l False sage: G = DirichletGroup.create_object(None, k); G Group of", "return tuple([pows[i] for i in self.element()]) @cached_method(do_pickle=True) def element(self): r\"\"\"", "use an alternative definition giving `B_{1,\\varepsilon} = -1/2`; see the", "factor(norm(e.gauss_sum())) 13^24 TESTS: The field of algebraic numbers is supported", "of different moduli, characters of different moduli compare as unequal,", "6 generated by 0.500000000000000 + 0.866025403784439*I in Complex Field with", "the following, but this does not work # pari_orders =", "order of p modulo n. r = rings.IntegerModRing(n)(p).multiplicative_order() Auts =", "sage: G = DirichletGroup(5, Zmod(15)); G Group of Dirichlet characters", "sage: v.imag() 1.73205080756888 sage: G = DirichletGroup(20) sage: e =", "they had the same level! - <NAME> (2006-01-07): added more", "of this character. EXAMPLES:: sage: G.<a,b> = DirichletGroup(20) sage: c", "character modulo 3 of conductor 3 mapping 2 |--> -1", "sage: G([-1]) Dirichlet character modulo 13 of conductor 13 mapping", "= trivial_character(7) sage: [t(x) for x in [0..20]] [0, 1,", "in enumerate(self._zeta_powers)} def change_ring(self, R, zeta=None, zeta_order=None): \"\"\" Return the", "of Dirichlet characters modulo 7 with values in Number Field", "11 |--> -1, 17 |--> 1 sage: L(4) 0.988944551741105 With", "is not None: self.element.set_cache(element) class DirichletGroupFactory(UniqueFactory): r\"\"\" Construct a group", "zeta_order = self._zeta_order # Map zeta to the new parent", "order 8 generated by a in Number Field in a", "Dirichlet character modulo 24 of conductor 24 mapping 7 |-->", "whose parents have different zeta orders works:: sage: a =" ]
[ "write(self, file): \"\"\" Write the contents of this :class:`File` object", "Base class for all line based text files. When reading", "name if isinstance(file, str): with open(file, \"r\") as f: while", "file (or file-like object). Parameters ---------- file_name : file-like object", "isinstance(file, str): with open(file, \"r\") as f: while True: line", "f.write(\"\\n\".join(self.lines) + \"\\n\") else: if not is_text(file): raise TypeError(\"A file", "file_object.lines = lines return file_object @staticmethod def read_iter(file): \"\"\" Create", "subclass representing the parsed file. \"\"\" pass def _deprecated_read(self, file,", "subsequent # 'read()' calls are delegated to the instance method", "for all line based text files. When reading a file,", "disk (or a file-like object from other sources). In order", "e.g. 'TemporaryFile' elif hasattr(file, \"file\") and isinstance(file.file, io.TextIOBase): return True", "lines.append(text[i : i+width]) return lines def is_binary(file): if isinstance(file, io.BufferedIOBase):", "List of string representing the lines in the text file.", "\"r\") as f: lines = f.read().splitlines() # File object else:", "that can be filled with data using the class specific", "data from the newly created :class:`File` object \"\"\" warnings.warn( \"Instance", "str): with open(file, \"r\") as f: while True: line =", "does not contain the required data or because the file", "required data or because the file is malformed. \"\"\" pass", "modify from outside. \"\"\" def __init__(self): super().__init__() self.lines = []", "the data from the newly created :class:`File` object \"\"\" warnings.warn(", "parsed file. \"\"\" pass def _deprecated_read(self, file, *args, **kwargs): \"\"\"", "__init__(self): # Support for deprecated instance method 'read()': # When", "Parameters ---------- file_name : file-like object or str The file", "+ \"\\n\") else: if not is_text(file): raise TypeError(\"A file opened", "as f: while True: line = f.readline() if not line:", "clone.lines = copy.copy(self.lines) def __str__(self): return(\"\\n\".join(self.lines)) class InvalidFileError(Exception): \"\"\" Indicates", "the file does not contain the required data or because", "etc. \"\"\" lines = [] for i in range(0, len(text),", "an instance, the 'read()' class method is # replaced by", "replaces the data in `self` with the data from the", "the text file. PROTECTED: Do not modify from outside. \"\"\"", "line def write(self, file): \"\"\" Write the contents of this", "object). Parameters ---------- file : file-like object or str The", "with the data from the newly created :class:`File` object \"\"\"", "function simply wraps the given `text` after `width` characters, ignoring", "\"\"\" Base class for all file classes. The constructor creates", "yield line def write(self, file): \"\"\" Write the contents of", "class specific setter methods. Conversely, the class method :func:`read()` reads", "file to be read. Alternatively a file path can be", "= file.read().splitlines() file_object = cls(*args, **kwargs) file_object.lines = lines return", "\"use class method instead\", DeprecationWarning ) cls = type(self) new_file", "method :func:`read()`. Internally this calls the :func:`read()` class method and", "break yield line # File object else: if not is_text(file):", "given `text` after `width` characters, ignoring sentences, whitespaces, etc. \"\"\"", "file_object : File An instance from the respective :class:`File` subclass", "with data using the class specific setter methods. Conversely, the", "Conversely, the class method :func:`read()` reads a file from disk", "file. Parameters ---------- file : file-like object or str The", "read(cls, file, *args, **kwargs): # File name if isinstance(file, str):", "# replaced by the instance method, so that subsequent #", "open(file, \"r\") as f: lines = f.read().splitlines() # File object", "lines = [] for i in range(0, len(text), width): lines.append(text[i", "the 3-Clause BSD License. Please see 'LICENSE.rst' for further #", "object else: if not is_text(file): raise TypeError(\"A file opened in", "Support for deprecated instance method 'read()': # When creating an", "delegated to the instance method self.read = self._deprecated_read @classmethod @abc.abstractmethod", "object). Parameters ---------- file_name : file-like object or str The", "write the instance content into a file the :func:`write()` method", "= [] @classmethod def read(cls, file, *args, **kwargs): # File", "over each line of the given text file. Parameters ----------", "open(file, \"w\") as f: f.write(\"\\n\".join(self.lines) + \"\\n\") else: if not", "one for each line. When writing a file, this list", "Indicates that the file is not suitable for the requested", "read. Alternatively a file path can be supplied. Returns -------", "the parsed file. \"\"\" pass def _deprecated_read(self, file, *args, **kwargs):", "is written into the file. Attributes ---------- lines : list", "file classes. The constructor creates an empty file, that can", "newly created :class:`File` object \"\"\" warnings.warn( \"Instance method 'read()' is", "Parameters ---------- file : file-like object or str The file", "file. Parameters ---------- file_name : file-like object or str The", "into a file (or file-like object). Parameters ---------- file_name :", "TypeError(\"A file opened in 'text' mode is required\") while True:", "class for all line based text files. When reading a", "= copy.copy(self.lines) def __str__(self): return(\"\\n\".join(self.lines)) class InvalidFileError(Exception): \"\"\" Indicates that", "__all__ = [\"File\", \"TextFile\", \"InvalidFileError\"] import abc import io import", "f: lines = f.read().splitlines() # File object else: if not", "required\") while True: line = file.readline() if not line: break", "width): lines.append(text[i : i+width]) return lines def is_binary(file): if isinstance(file,", "if isinstance(file, io.TextIOBase): return True # for file wrappers, e.g.", "written into the file. Attributes ---------- lines : list List", "\"\\n\") def __copy_fill__(self, clone): super().__copy_fill__(clone) clone.lines = copy.copy(self.lines) def __str__(self):", "When writing a file, this list is written into the", "= [\"File\", \"TextFile\", \"InvalidFileError\"] import abc import io import warnings", "the file. Attributes ---------- lines : list List of string", "a file from disk (or a file-like object from other", "InvalidFileError(Exception): \"\"\" Indicates that the file is not suitable for", "= cls(*args, **kwargs) file_object.lines = lines return file_object @staticmethod def", "in the file. \"\"\" # File name if isinstance(file, str):", "method self.read = self._deprecated_read @classmethod @abc.abstractmethod def read(cls, file): \"\"\"", "File An instance from the respective :class:`File` subclass representing the", "be supplied. \"\"\" pass class TextFile(File, metaclass=abc.ABCMeta): \"\"\" Base class", "method 'read()' is deprecated, \" \"use class method instead\", DeprecationWarning", "given text file. Parameters ---------- file : file-like object or", "read. Alternatively a file path can be supplied. Yields ------", "data or because the file is malformed. \"\"\" pass def", "if isinstance(file, str): with open(file, \"w\") as f: f.write(\"\\n\".join(self.lines) +", "while True: line = f.readline() if not line: break yield", "= cls.read(file, *args, **kwargs) self.__dict__.update(new_file.__dict__) @abc.abstractmethod def write(self, file): \"\"\"", "text file. PROTECTED: Do not modify from outside. \"\"\" def", "wrappers, e.g. 'TemporaryFile' elif hasattr(file, \"file\") and isinstance(file.file, io.TextIOBase): return", "the lines in the text file. PROTECTED: Do not modify", "'text' mode is required\") while True: line = file.readline() if", "the requested action, either because the file does not contain", "self.lines = [] @classmethod def read(cls, file, *args, **kwargs): #", "can be supplied. Yields ------ line : str The current", "import abc import io import warnings from .copyable import Copyable", "setter methods. Conversely, the class method :func:`read()` reads a file", "metaclass=abc.ABCMeta): \"\"\" Base class for all file classes. The constructor", "is_text(file): raise TypeError(\"A file opened in 'text' mode is required\")", "if not is_text(file): raise TypeError(\"A file opened in 'text' mode", ": i+width]) return lines def is_binary(file): if isinstance(file, io.BufferedIOBase): return", "(or file-like object). Parameters ---------- file_name : file-like object or", "classes. The constructor creates an empty file, that can be", "def _deprecated_read(self, file, *args, **kwargs): \"\"\" Support for deprecated instance", "the instance method self.read = self._deprecated_read @classmethod @abc.abstractmethod def read(cls,", "object into a file. Parameters ---------- file_name : file-like object", "str): with open(file, \"r\") as f: lines = f.read().splitlines() #", "line: break yield line # File object else: if not", "@classmethod @abc.abstractmethod def read(cls, file): \"\"\" Parse a file (or", "Attributes ---------- lines : list List of string representing the", "or str The file to be read. Alternatively a file", "i+width]) return lines def is_binary(file): if isinstance(file, io.BufferedIOBase): return True", "is not suitable for the requested action, either because the", "self.__dict__.update(new_file.__dict__) @abc.abstractmethod def write(self, file): \"\"\" Write the contents of", "3-Clause BSD License. Please see 'LICENSE.rst' for further # information.", "suitable for the requested action, either because the file does", "written to. Alternatively a file path can be supplied. \"\"\"", "supplied. \"\"\" pass class TextFile(File, metaclass=abc.ABCMeta): \"\"\" Base class for", "wrappers, e.g. 'TemporaryFile' elif hasattr(file, \"file\") and isinstance(file.file, io.BufferedIOBase): return", "ignoring sentences, whitespaces, etc. \"\"\" lines = [] for i", "with open(file, \"r\") as f: while True: line = f.readline()", "Returns ------- file_object : File An instance from the respective", "isinstance(file, str): with open(file, \"r\") as f: lines = f.read().splitlines()", "warnings from .copyable import Copyable import copy class File(Copyable, metaclass=abc.ABCMeta):", "hence much more efficient version of `textwrap.wrap()`. This function simply", "method 'read()': # When creating an instance, the 'read()' class", "methods. Conversely, the class method :func:`read()` reads a file from", "lines return file_object @staticmethod def read_iter(file): \"\"\" Create an iterator", "\"Instance method 'read()' is deprecated, \" \"use class method instead\",", "this list is written into the file. Attributes ---------- lines", "def read(cls, file, *args, **kwargs): # File name if isinstance(file,", ":func:`read()`. Internally this calls the :func:`read()` class method and replaces", "'read()': # When creating an instance, the 'read()' class method", "file.write(\"\\n\".join(self.lines) + \"\\n\") def __copy_fill__(self, clone): super().__copy_fill__(clone) clone.lines = copy.copy(self.lines)", "characters, ignoring sentences, whitespaces, etc. \"\"\" lines = [] for", "raise TypeError(\"A file opened in 'text' mode is required\") lines", "file, the text content is saved as list of strings,", "in `self` with the data from the newly created :class:`File`", "if isinstance(file, str): with open(file, \"r\") as f: while True:", "return lines def is_binary(file): if isinstance(file, io.BufferedIOBase): return True #", "respective :class:`File` subclass representing the parsed file. \"\"\" pass def", "and is distributed # under the 3-Clause BSD License. Please", "filled with data using the class specific setter methods. Conversely,", "_deprecated_read(self, file, *args, **kwargs): \"\"\" Support for deprecated instance method", "The file to be written to. Alternatively a file path", "file, this list is written into the file. Attributes ----------", "import io import warnings from .copyable import Copyable import copy", "Base class for all file classes. The constructor creates an", "(or a file-like object from other sources). In order to", "for the requested action, either because the file does not", "using the class specific setter methods. Conversely, the class method", "based text files. When reading a file, the text content", "def __str__(self): return(\"\\n\".join(self.lines)) class InvalidFileError(Exception): \"\"\" Indicates that the file", "True else: return False def is_text(file): if isinstance(file, io.TextIOBase): return", "in 'text' mode is required\") lines = file.read().splitlines() file_object =", "else: return False def is_text(file): if isinstance(file, io.TextIOBase): return True", "pass def _deprecated_read(self, file, *args, **kwargs): \"\"\" Support for deprecated", "\"biotite\" __author__ = \"<NAME>\" __all__ = [\"File\", \"TextFile\", \"InvalidFileError\"] import", "if not line: break yield line # File object else:", "Alternatively a file path can be supplied. \"\"\" if isinstance(file,", "Copyable import copy class File(Copyable, metaclass=abc.ABCMeta): \"\"\" Base class for", "representing the parsed file. \"\"\" pass def _deprecated_read(self, file, *args,", "------ line : str The current line in the file.", "or because the file is malformed. \"\"\" pass def wrap_string(text,", "Create an iterator over each line of the given text", "required\") file.write(\"\\n\".join(self.lines) + \"\\n\") def __copy_fill__(self, clone): super().__copy_fill__(clone) clone.lines =", "hasattr(file, \"file\") and isinstance(file.file, io.BufferedIOBase): return True else: return False", "file, that can be filled with data using the class", "return False def is_text(file): if isinstance(file, io.TextIOBase): return True #", "from other sources). In order to write the instance content", "strings, one for each line. When writing a file, this", "is # replaced by the instance method, so that subsequent", "to write the instance content into a file the :func:`write()`", "the instance content into a file the :func:`write()` method is", "file_name : file-like object or str The file to be", "def __copy_fill__(self, clone): super().__copy_fill__(clone) clone.lines = copy.copy(self.lines) def __str__(self): return(\"\\n\".join(self.lines))", "<reponame>danijoo/biotite # This source code is part of the Biotite", "and replaces the data in `self` with the data from", "elif hasattr(file, \"file\") and isinstance(file.file, io.BufferedIOBase): return True else: return", "string representing the lines in the text file. PROTECTED: Do", "current line in the file. \"\"\" # File name if", "list is written into the file. Attributes ---------- lines :", "a file, the text content is saved as list of", "class TextFile(File, metaclass=abc.ABCMeta): \"\"\" Base class for all line based", "isinstance(file, io.TextIOBase): return True # for file wrappers, e.g. 'TemporaryFile'", "while True: line = file.readline() if not line: break yield", "reads a file from disk (or a file-like object from", "because the file is malformed. \"\"\" pass def wrap_string(text, width):", "__str__(self): return(\"\\n\".join(self.lines)) class InvalidFileError(Exception): \"\"\" Indicates that the file is", "object or str The file to be written to. Alternatively", "opened in 'text' mode is required\") lines = file.read().splitlines() file_object", "file is malformed. \"\"\" pass def wrap_string(text, width): \"\"\" A", "so that subsequent # 'read()' calls are delegated to the", "f: f.write(\"\\n\".join(self.lines) + \"\\n\") else: if not is_text(file): raise TypeError(\"A", "Alternatively a file path can be supplied. \"\"\" pass class", "line: break yield line def write(self, file): \"\"\" Write the", "'TemporaryFile' elif hasattr(file, \"file\") and isinstance(file.file, io.BufferedIOBase): return True else:", "class method instead\", DeprecationWarning ) cls = type(self) new_file =", "deprecated instance method :func:`read()`. Internally this calls the :func:`read()` class", "efficient version of `textwrap.wrap()`. This function simply wraps the given", "'LICENSE.rst' for further # information. __name__ = \"biotite\" __author__ =", "for further # information. __name__ = \"biotite\" __author__ = \"<NAME>\"", "data using the class specific setter methods. Conversely, the class", "file path can be supplied. Returns ------- file_object : File", "new_file = cls.read(file, *args, **kwargs) self.__dict__.update(new_file.__dict__) @abc.abstractmethod def write(self, file):", "\"\"\" warnings.warn( \"Instance method 'read()' is deprecated, \" \"use class", "if isinstance(file, io.BufferedIOBase): return True # for file wrappers, e.g.", "file-like object from other sources). In order to write the", "Please see 'LICENSE.rst' for further # information. __name__ = \"biotite\"", ":class:`File` object \"\"\" warnings.warn( \"Instance method 'read()' is deprecated, \"", "is malformed. \"\"\" pass def wrap_string(text, width): \"\"\" A much", "action, either because the file does not contain the required", "of `textwrap.wrap()`. This function simply wraps the given `text` after", "in 'text' mode is required\") file.write(\"\\n\".join(self.lines) + \"\\n\") def __copy_fill__(self,", "(or file-like object). Parameters ---------- file : file-like object or", "a file (or file-like object). Parameters ---------- file : file-like", "further # information. __name__ = \"biotite\" __author__ = \"<NAME>\" __all__", "raise TypeError(\"A file opened in 'text' mode is required\") file.write(\"\\n\".join(self.lines)", ":func:`read()` reads a file from disk (or a file-like object", "source code is part of the Biotite package and is", ": file-like object or str The file to be written", "isinstance(file, io.BufferedIOBase): return True # for file wrappers, e.g. 'TemporaryFile'", "simpler and hence much more efficient version of `textwrap.wrap()`. This", "super().__init__() self.lines = [] @classmethod def read(cls, file, *args, **kwargs):", "file does not contain the required data or because the", "file-like object). Parameters ---------- file_name : file-like object or str", "\"\"\" Create an iterator over each line of the given", "of the given text file. Parameters ---------- file : file-like", "object \"\"\" warnings.warn( \"Instance method 'read()' is deprecated, \" \"use", "iterator over each line of the given text file. Parameters", "deprecated, \" \"use class method instead\", DeprecationWarning ) cls =", "\"w\") as f: f.write(\"\\n\".join(self.lines) + \"\\n\") else: if not is_text(file):", "file (or file-like object). Parameters ---------- file : file-like object", "mode is required\") lines = file.read().splitlines() file_object = cls(*args, **kwargs)", "License. Please see 'LICENSE.rst' for further # information. __name__ =", "the file is malformed. \"\"\" pass def wrap_string(text, width): \"\"\"", "hasattr(file, \"file\") and isinstance(file.file, io.TextIOBase): return True else: return False", "\"\"\" Parse a file (or file-like object). Parameters ---------- file", "This source code is part of the Biotite package and", "\"InvalidFileError\"] import abc import io import warnings from .copyable import", "@staticmethod def read_iter(file): \"\"\" Create an iterator over each line", "is required\") lines = file.read().splitlines() file_object = cls(*args, **kwargs) file_object.lines", "under the 3-Clause BSD License. Please see 'LICENSE.rst' for further", "def write(self, file): \"\"\" Write the contents of this :class:`File`", ".copyable import Copyable import copy class File(Copyable, metaclass=abc.ABCMeta): \"\"\" Base", "created :class:`File` object \"\"\" warnings.warn( \"Instance method 'read()' is deprecated,", "content is saved as list of strings, one for each", "\"<NAME>\" __all__ = [\"File\", \"TextFile\", \"InvalidFileError\"] import abc import io", "that the file is not suitable for the requested action,", "sources). In order to write the instance content into a", "opened in 'text' mode is required\") file.write(\"\\n\".join(self.lines) + \"\\n\") def", "**kwargs) self.__dict__.update(new_file.__dict__) @abc.abstractmethod def write(self, file): \"\"\" Write the contents", "deprecated instance method 'read()': # When creating an instance, the", "file : file-like object or str The file to be", "can be filled with data using the class specific setter", "Yields ------ line : str The current line in the", "files. When reading a file, the text content is saved", "metaclass=abc.ABCMeta): \"\"\" Base class for all line based text files.", "contain the required data or because the file is malformed.", "be supplied. Returns ------- file_object : File An instance from", "is required\") file.write(\"\\n\".join(self.lines) + \"\\n\") def __copy_fill__(self, clone): super().__copy_fill__(clone) clone.lines", "return(\"\\n\".join(self.lines)) class InvalidFileError(Exception): \"\"\" Indicates that the file is not", "calls are delegated to the instance method self.read = self._deprecated_read", "class method and replaces the data in `self` with the", "replaced by the instance method, so that subsequent # 'read()'", "file. Attributes ---------- lines : list List of string representing", "PROTECTED: Do not modify from outside. \"\"\" def __init__(self): super().__init__()", "wrap_string(text, width): \"\"\" A much simpler and hence much more", "\"\"\" Write the contents of this :class:`File` object into a", "file from disk (or a file-like object from other sources).", "file-like object or str The file to be written to.", "not line: break yield line # File object else: if", "file_object @staticmethod def read_iter(file): \"\"\" Create an iterator over each", "abc import io import warnings from .copyable import Copyable import", "break yield line def write(self, file): \"\"\" Write the contents", "range(0, len(text), width): lines.append(text[i : i+width]) return lines def is_binary(file):", "file-like object or str The file to be read. Alternatively", "as list of strings, one for each line. When writing", "used. \"\"\" def __init__(self): # Support for deprecated instance method", "can be supplied. \"\"\" pass class TextFile(File, metaclass=abc.ABCMeta): \"\"\" Base", "TypeError(\"A file opened in 'text' mode is required\") file.write(\"\\n\".join(self.lines) +", "is part of the Biotite package and is distributed #", "file.read().splitlines() file_object = cls(*args, **kwargs) file_object.lines = lines return file_object", "either because the file does not contain the required data", "A much simpler and hence much more efficient version of", "part of the Biotite package and is distributed # under", "empty file, that can be filled with data using the", "supplied. Returns ------- file_object : File An instance from the", "File object else: if not is_text(file): raise TypeError(\"A file opened", "line : str The current line in the file. \"\"\"", "len(text), width): lines.append(text[i : i+width]) return lines def is_binary(file): if", "the file. \"\"\" # File name if isinstance(file, str): with", "class for all file classes. The constructor creates an empty", "[] for i in range(0, len(text), width): lines.append(text[i : i+width])", ": str The current line in the file. \"\"\" #", "the 'read()' class method is # replaced by the instance", "type(self) new_file = cls.read(file, *args, **kwargs) self.__dict__.update(new_file.__dict__) @abc.abstractmethod def write(self,", "to be written to. Alternatively a file path can be", "Write the contents of this object into a file (or", "raise TypeError(\"A file opened in 'text' mode is required\") while", "into a file the :func:`write()` method is used. \"\"\" def", "`textwrap.wrap()`. This function simply wraps the given `text` after `width`", "for deprecated instance method 'read()': # When creating an instance,", "The file to be read. Alternatively a file path can", "a file the :func:`write()` method is used. \"\"\" def __init__(self):", "be read. Alternatively a file path can be supplied. Returns", "can be supplied. Returns ------- file_object : File An instance", "---------- file_name : file-like object or str The file to", "file. \"\"\" pass def _deprecated_read(self, file, *args, **kwargs): \"\"\" Support", "open(file, \"r\") as f: while True: line = f.readline() if", "This function simply wraps the given `text` after `width` characters,", "copy.copy(self.lines) def __str__(self): return(\"\\n\".join(self.lines)) class InvalidFileError(Exception): \"\"\" Indicates that the", ":func:`read()` class method and replaces the data in `self` with", "lines = f.read().splitlines() # File object else: if not is_text(file):", "'text' mode is required\") lines = file.read().splitlines() file_object = cls(*args,", "def __init__(self): # Support for deprecated instance method 'read()': #", "for deprecated instance method :func:`read()`. Internally this calls the :func:`read()`", "file is not suitable for the requested action, either because", "be filled with data using the class specific setter methods.", "with open(file, \"w\") as f: f.write(\"\\n\".join(self.lines) + \"\\n\") else: if", "@abc.abstractmethod def read(cls, file): \"\"\" Parse a file (or file-like", "file): \"\"\" Write the contents of this :class:`File` object into", "be written to. Alternatively a file path can be supplied.", "malformed. \"\"\" pass def wrap_string(text, width): \"\"\" A much simpler", "class File(Copyable, metaclass=abc.ABCMeta): \"\"\" Base class for all file classes.", ": list List of string representing the lines in the", "of this object into a file (or file-like object). Parameters", "the :func:`write()` method is used. \"\"\" def __init__(self): # Support", "Support for deprecated instance method :func:`read()`. Internally this calls the", "line = f.readline() if not line: break yield line #", "copy class File(Copyable, metaclass=abc.ABCMeta): \"\"\" Base class for all file", "specific setter methods. Conversely, the class method :func:`read()` reads a", "cls = type(self) new_file = cls.read(file, *args, **kwargs) self.__dict__.update(new_file.__dict__) @abc.abstractmethod", "method instead\", DeprecationWarning ) cls = type(self) new_file = cls.read(file,", "requested action, either because the file does not contain the", "lines = file.read().splitlines() file_object = cls(*args, **kwargs) file_object.lines = lines", "an iterator over each line of the given text file.", "__author__ = \"<NAME>\" __all__ = [\"File\", \"TextFile\", \"InvalidFileError\"] import abc", "`width` characters, ignoring sentences, whitespaces, etc. \"\"\" lines = []", "def read_iter(file): \"\"\" Create an iterator over each line of", "can be supplied. \"\"\" if isinstance(file, str): with open(file, \"w\")", "\"\\n\") else: if not is_text(file): raise TypeError(\"A file opened in", "read_iter(file): \"\"\" Create an iterator over each line of the", "__name__ = \"biotite\" __author__ = \"<NAME>\" __all__ = [\"File\", \"TextFile\",", "if isinstance(file, str): with open(file, \"r\") as f: lines =", "import copy class File(Copyable, metaclass=abc.ABCMeta): \"\"\" Base class for all", "`text` after `width` characters, ignoring sentences, whitespaces, etc. \"\"\" lines", "[] @classmethod def read(cls, file, *args, **kwargs): # File name", "a file (or file-like object). Parameters ---------- file_name : file-like", "'TemporaryFile' elif hasattr(file, \"file\") and isinstance(file.file, io.TextIOBase): return True else:", "contents of this object into a file (or file-like object).", "# This source code is part of the Biotite package", "method and replaces the data in `self` with the data", "f.read().splitlines() # File object else: if not is_text(file): raise TypeError(\"A", "write(self, file): \"\"\" Write the contents of this object into", "all file classes. The constructor creates an empty file, that", "file path can be supplied. \"\"\" if isinstance(file, str): with", "is deprecated, \" \"use class method instead\", DeprecationWarning ) cls", "'read()' is deprecated, \" \"use class method instead\", DeprecationWarning )", "a file. Parameters ---------- file_name : file-like object or str", "be read. Alternatively a file path can be supplied. Yields", "the class specific setter methods. Conversely, the class method :func:`read()`", "instance method, so that subsequent # 'read()' calls are delegated", "\"file\") and isinstance(file.file, io.BufferedIOBase): return True else: return False def", "---------- file : file-like object or str The file to", "line of the given text file. Parameters ---------- file :", "file): \"\"\" Parse a file (or file-like object). Parameters ----------", "import warnings from .copyable import Copyable import copy class File(Copyable,", "from disk (or a file-like object from other sources). In", "after `width` characters, ignoring sentences, whitespaces, etc. \"\"\" lines =", "# File name if isinstance(file, str): with open(file, \"r\") as", "from .copyable import Copyable import copy class File(Copyable, metaclass=abc.ABCMeta): \"\"\"", "a file, this list is written into the file. Attributes", "In order to write the instance content into a file", "reading a file, the text content is saved as list", "str The file to be written to. Alternatively a file", "the Biotite package and is distributed # under the 3-Clause", "if not line: break yield line def write(self, file): \"\"\"", "cls.read(file, *args, **kwargs) self.__dict__.update(new_file.__dict__) @abc.abstractmethod def write(self, file): \"\"\" Write", "a file path can be supplied. Yields ------ line :", "lines : list List of string representing the lines in", "str): with open(file, \"w\") as f: f.write(\"\\n\".join(self.lines) + \"\\n\") else:", "\"\"\" pass class TextFile(File, metaclass=abc.ABCMeta): \"\"\" Base class for all", "method, so that subsequent # 'read()' calls are delegated to", "text files. When reading a file, the text content is", "mode is required\") file.write(\"\\n\".join(self.lines) + \"\\n\") def __copy_fill__(self, clone): super().__copy_fill__(clone)", "as f: lines = f.read().splitlines() # File object else: if", "= lines return file_object @staticmethod def read_iter(file): \"\"\" Create an", "file, *args, **kwargs): # File name if isinstance(file, str): with", "wraps the given `text` after `width` characters, ignoring sentences, whitespaces,", "# information. __name__ = \"biotite\" __author__ = \"<NAME>\" __all__ =", "the file is not suitable for the requested action, either", "**kwargs) file_object.lines = lines return file_object @staticmethod def read_iter(file): \"\"\"", "and hence much more efficient version of `textwrap.wrap()`. This function", "Alternatively a file path can be supplied. Returns ------- file_object", "\"\"\" A much simpler and hence much more efficient version", "path can be supplied. Returns ------- file_object : File An", "io.BufferedIOBase): return True # for file wrappers, e.g. 'TemporaryFile' elif", "required\") lines = file.read().splitlines() file_object = cls(*args, **kwargs) file_object.lines =", "file): \"\"\" Write the contents of this object into a", "supplied. Yields ------ line : str The current line in", "object or str The file to be read. Alternatively a", "for each line. When writing a file, this list is", "file opened in 'text' mode is required\") while True: line", "is_binary(file): if isinstance(file, io.BufferedIOBase): return True # for file wrappers,", "from the newly created :class:`File` object \"\"\" warnings.warn( \"Instance method", "each line of the given text file. Parameters ---------- file", "information. __name__ = \"biotite\" __author__ = \"<NAME>\" __all__ = [\"File\",", "to. Alternatively a file path can be supplied. \"\"\" if", "cls(*args, **kwargs) file_object.lines = lines return file_object @staticmethod def read_iter(file):", "is distributed # under the 3-Clause BSD License. Please see", "whitespaces, etc. \"\"\" lines = [] for i in range(0,", "f: while True: line = f.readline() if not line: break", "**kwargs): # File name if isinstance(file, str): with open(file, \"r\")", "= [] for i in range(0, len(text), width): lines.append(text[i :", "object into a file (or file-like object). Parameters ---------- file_name", "class InvalidFileError(Exception): \"\"\" Indicates that the file is not suitable", "much simpler and hence much more efficient version of `textwrap.wrap()`.", "read(cls, file): \"\"\" Parse a file (or file-like object). Parameters", "more efficient version of `textwrap.wrap()`. This function simply wraps the", "as f: f.write(\"\\n\".join(self.lines) + \"\\n\") else: if not is_text(file): raise", "a file path can be supplied. \"\"\" pass class TextFile(File,", "i in range(0, len(text), width): lines.append(text[i : i+width]) return lines", "Parse a file (or file-like object). Parameters ---------- file :", "a file path can be supplied. Returns ------- file_object :", "code is part of the Biotite package and is distributed", "line based text files. When reading a file, the text", "saved as list of strings, one for each line. When", "the newly created :class:`File` object \"\"\" warnings.warn( \"Instance method 'read()'", "When creating an instance, the 'read()' class method is #", "Do not modify from outside. \"\"\" def __init__(self): super().__init__() self.lines", "into the file. Attributes ---------- lines : list List of", "*args, **kwargs): # File name if isinstance(file, str): with open(file,", "be supplied. Yields ------ line : str The current line", "width): \"\"\" A much simpler and hence much more efficient", "False def is_text(file): if isinstance(file, io.TextIOBase): return True # for", "other sources). In order to write the instance content into", "@abc.abstractmethod def write(self, file): \"\"\" Write the contents of this", "= self._deprecated_read @classmethod @abc.abstractmethod def read(cls, file): \"\"\" Parse a", "the given text file. Parameters ---------- file : file-like object", ") cls = type(self) new_file = cls.read(file, *args, **kwargs) self.__dict__.update(new_file.__dict__)", "the respective :class:`File` subclass representing the parsed file. \"\"\" pass", "each line. When writing a file, this list is written", "The constructor creates an empty file, that can be filled", "= type(self) new_file = cls.read(file, *args, **kwargs) self.__dict__.update(new_file.__dict__) @abc.abstractmethod def", "\"\"\" lines = [] for i in range(0, len(text), width):", "instance from the respective :class:`File` subclass representing the parsed file.", "content into a file the :func:`write()` method is used. \"\"\"", "the data in `self` with the data from the newly", "---------- lines : list List of string representing the lines", "not contain the required data or because the file is", "__copy_fill__(self, clone): super().__copy_fill__(clone) clone.lines = copy.copy(self.lines) def __str__(self): return(\"\\n\".join(self.lines)) class", "\" \"use class method instead\", DeprecationWarning ) cls = type(self)", "the class method :func:`read()` reads a file from disk (or", "instance method self.read = self._deprecated_read @classmethod @abc.abstractmethod def read(cls, file):", "path can be supplied. Yields ------ line : str The", "yield line # File object else: if not is_text(file): raise", "mode is required\") while True: line = file.readline() if not", "\"\"\" Write the contents of this object into a file", "is used. \"\"\" def __init__(self): # Support for deprecated instance", "file opened in 'text' mode is required\") file.write(\"\\n\".join(self.lines) + \"\\n\")", ":class:`File` object into a file. Parameters ---------- file_name : file-like", "file.readline() if not line: break yield line def write(self, file):", "is saved as list of strings, one for each line.", "method :func:`read()` reads a file from disk (or a file-like", "__init__(self): super().__init__() self.lines = [] @classmethod def read(cls, file, *args,", "The current line in the file. \"\"\" # File name", "True: line = file.readline() if not line: break yield line", "are delegated to the instance method self.read = self._deprecated_read @classmethod", "BSD License. Please see 'LICENSE.rst' for further # information. __name__", "outside. \"\"\" def __init__(self): super().__init__() self.lines = [] @classmethod def", "elif hasattr(file, \"file\") and isinstance(file.file, io.TextIOBase): return True else: return", "of string representing the lines in the text file. PROTECTED:", "opened in 'text' mode is required\") while True: line =", ":class:`File` subclass representing the parsed file. \"\"\" pass def _deprecated_read(self,", "TextFile(File, metaclass=abc.ABCMeta): \"\"\" Base class for all line based text", "------- file_object : File An instance from the respective :class:`File`", "File(Copyable, metaclass=abc.ABCMeta): \"\"\" Base class for all file classes. The", "representing the lines in the text file. PROTECTED: Do not", "for i in range(0, len(text), width): lines.append(text[i : i+width]) return", "\"\"\" def __init__(self): # Support for deprecated instance method 'read()':", "file-like object). Parameters ---------- file : file-like object or str", "import Copyable import copy class File(Copyable, metaclass=abc.ABCMeta): \"\"\" Base class", "file_object = cls(*args, **kwargs) file_object.lines = lines return file_object @staticmethod", "pass class TextFile(File, metaclass=abc.ABCMeta): \"\"\" Base class for all line", "When reading a file, the text content is saved as", "isinstance(file.file, io.BufferedIOBase): return True else: return False def is_text(file): if", "instance, the 'read()' class method is # replaced by the", "lines def is_binary(file): if isinstance(file, io.BufferedIOBase): return True # for", "this object into a file (or file-like object). Parameters ----------", "supplied. \"\"\" if isinstance(file, str): with open(file, \"w\") as f:", "list of strings, one for each line. When writing a", "@classmethod def read(cls, file, *args, **kwargs): # File name if", "to be read. Alternatively a file path can be supplied.", "File name if isinstance(file, str): with open(file, \"r\") as f:", "instead\", DeprecationWarning ) cls = type(self) new_file = cls.read(file, *args,", "text file. Parameters ---------- file : file-like object or str", "= file.readline() if not line: break yield line def write(self,", "file path can be supplied. Yields ------ line : str", "or str The file to be written to. Alternatively a", "\"\"\" Indicates that the file is not suitable for the", "the :func:`read()` class method and replaces the data in `self`", "def __init__(self): super().__init__() self.lines = [] @classmethod def read(cls, file,", "order to write the instance content into a file the", "self.read = self._deprecated_read @classmethod @abc.abstractmethod def read(cls, file): \"\"\" Parse", "path can be supplied. \"\"\" if isinstance(file, str): with open(file,", "def is_text(file): if isinstance(file, io.TextIOBase): return True # for file", "the required data or because the file is malformed. \"\"\"", "instance content into a file the :func:`write()` method is used.", "of this :class:`File` object into a file. Parameters ---------- file_name", "sentences, whitespaces, etc. \"\"\" lines = [] for i in", ": File An instance from the respective :class:`File` subclass representing", "file. \"\"\" # File name if isinstance(file, str): with open(file,", "path can be supplied. \"\"\" pass class TextFile(File, metaclass=abc.ABCMeta): \"\"\"", "object from other sources). In order to write the instance", "`self` with the data from the newly created :class:`File` object", "of the Biotite package and is distributed # under the", "with open(file, \"r\") as f: lines = f.read().splitlines() # File", "Alternatively a file path can be supplied. Yields ------ line", "class method :func:`read()` reads a file from disk (or a", "def read(cls, file): \"\"\" Parse a file (or file-like object).", "io.BufferedIOBase): return True else: return False def is_text(file): if isinstance(file,", "distributed # under the 3-Clause BSD License. Please see 'LICENSE.rst'", "\"r\") as f: while True: line = f.readline() if not", "the text content is saved as list of strings, one", "for all file classes. The constructor creates an empty file,", "file wrappers, e.g. 'TemporaryFile' elif hasattr(file, \"file\") and isinstance(file.file, io.TextIOBase):", "# Support for deprecated instance method 'read()': # When creating", "isinstance(file, str): with open(file, \"w\") as f: f.write(\"\\n\".join(self.lines) + \"\\n\")", "'text' mode is required\") file.write(\"\\n\".join(self.lines) + \"\\n\") def __copy_fill__(self, clone):", "be supplied. \"\"\" if isinstance(file, str): with open(file, \"w\") as", "= f.read().splitlines() # File object else: if not is_text(file): raise", "creates an empty file, that can be filled with data", "An instance from the respective :class:`File` subclass representing the parsed", "file opened in 'text' mode is required\") lines = file.read().splitlines()", "class method is # replaced by the instance method, so", "from the respective :class:`File` subclass representing the parsed file. \"\"\"", "the contents of this object into a file (or file-like", "version of `textwrap.wrap()`. This function simply wraps the given `text`", "much more efficient version of `textwrap.wrap()`. This function simply wraps", "an empty file, that can be filled with data using", "not suitable for the requested action, either because the file", "instance method 'read()': # When creating an instance, the 'read()'", "pass def wrap_string(text, width): \"\"\" A much simpler and hence", "to the instance method self.read = self._deprecated_read @classmethod @abc.abstractmethod def", "\"\"\" Base class for all line based text files. When", "name if isinstance(file, str): with open(file, \"r\") as f: lines", "super().__copy_fill__(clone) clone.lines = copy.copy(self.lines) def __str__(self): return(\"\\n\".join(self.lines)) class InvalidFileError(Exception): \"\"\"", ": file-like object or str The file to be read.", "the instance method, so that subsequent # 'read()' calls are", "the given `text` after `width` characters, ignoring sentences, whitespaces, etc.", "str The current line in the file. \"\"\" # File", "# When creating an instance, the 'read()' class method is", "in 'text' mode is required\") while True: line = file.readline()", "= f.readline() if not line: break yield line # File", "str The file to be read. Alternatively a file path", "[\"File\", \"TextFile\", \"InvalidFileError\"] import abc import io import warnings from", "all line based text files. When reading a file, the", "e.g. 'TemporaryFile' elif hasattr(file, \"file\") and isinstance(file.file, io.BufferedIOBase): return True", "# File object else: if not is_text(file): raise TypeError(\"A file", "and isinstance(file.file, io.BufferedIOBase): return True else: return False def is_text(file):", "\"TextFile\", \"InvalidFileError\"] import abc import io import warnings from .copyable", "\"\"\" pass def _deprecated_read(self, file, *args, **kwargs): \"\"\" Support for", ":func:`write()` method is used. \"\"\" def __init__(self): # Support for", "file. PROTECTED: Do not modify from outside. \"\"\" def __init__(self):", "because the file does not contain the required data or", "# 'read()' calls are delegated to the instance method self.read", "file the :func:`write()` method is used. \"\"\" def __init__(self): #", "calls the :func:`read()` class method and replaces the data in", "DeprecationWarning ) cls = type(self) new_file = cls.read(file, *args, **kwargs)", "list List of string representing the lines in the text", "return True # for file wrappers, e.g. 'TemporaryFile' elif hasattr(file,", "Write the contents of this :class:`File` object into a file.", "= \"<NAME>\" __all__ = [\"File\", \"TextFile\", \"InvalidFileError\"] import abc import", "constructor creates an empty file, that can be filled with", "line = file.readline() if not line: break yield line def", "method is used. \"\"\" def __init__(self): # Support for deprecated", "file, *args, **kwargs): \"\"\" Support for deprecated instance method :func:`read()`.", "a file path can be supplied. \"\"\" if isinstance(file, str):", "file wrappers, e.g. 'TemporaryFile' elif hasattr(file, \"file\") and isinstance(file.file, io.BufferedIOBase):", "clone): super().__copy_fill__(clone) clone.lines = copy.copy(self.lines) def __str__(self): return(\"\\n\".join(self.lines)) class InvalidFileError(Exception):", "for file wrappers, e.g. 'TemporaryFile' elif hasattr(file, \"file\") and isinstance(file.file,", "line. When writing a file, this list is written into", "def wrap_string(text, width): \"\"\" A much simpler and hence much", "def write(self, file): \"\"\" Write the contents of this object", "not modify from outside. \"\"\" def __init__(self): super().__init__() self.lines =", "line # File object else: if not is_text(file): raise TypeError(\"A", "\"\"\" pass def wrap_string(text, width): \"\"\" A much simpler and", "by the instance method, so that subsequent # 'read()' calls", "return True else: return False def is_text(file): if isinstance(file, io.TextIOBase):", "text content is saved as list of strings, one for", "\"\"\" def __init__(self): super().__init__() self.lines = [] @classmethod def read(cls,", "= \"biotite\" __author__ = \"<NAME>\" __all__ = [\"File\", \"TextFile\", \"InvalidFileError\"]", "'read()' class method is # replaced by the instance method,", "line in the file. \"\"\" # File name if isinstance(file,", "*args, **kwargs) self.__dict__.update(new_file.__dict__) @abc.abstractmethod def write(self, file): \"\"\" Write the", "not line: break yield line def write(self, file): \"\"\" Write", "of strings, one for each line. When writing a file,", "def is_binary(file): if isinstance(file, io.BufferedIOBase): return True # for file", "data in `self` with the data from the newly created", "from outside. \"\"\" def __init__(self): super().__init__() self.lines = [] @classmethod", "io.TextIOBase): return True # for file wrappers, e.g. 'TemporaryFile' elif", "return file_object @staticmethod def read_iter(file): \"\"\" Create an iterator over", "True: line = f.readline() if not line: break yield line", "file to be written to. Alternatively a file path can", "not is_text(file): raise TypeError(\"A file opened in 'text' mode is", "this calls the :func:`read()` class method and replaces the data", "Biotite package and is distributed # under the 3-Clause BSD", "a file-like object from other sources). In order to write", "file path can be supplied. \"\"\" pass class TextFile(File, metaclass=abc.ABCMeta):", "Internally this calls the :func:`read()` class method and replaces the", "\"\"\" if isinstance(file, str): with open(file, \"w\") as f: f.write(\"\\n\".join(self.lines)", "package and is distributed # under the 3-Clause BSD License.", "method is # replaced by the instance method, so that", "into a file. Parameters ---------- file_name : file-like object or", "writing a file, this list is written into the file.", "is_text(file): if isinstance(file, io.TextIOBase): return True # for file wrappers,", "this :class:`File` object into a file. Parameters ---------- file_name :", "simply wraps the given `text` after `width` characters, ignoring sentences,", "# for file wrappers, e.g. 'TemporaryFile' elif hasattr(file, \"file\") and", "to. Alternatively a file path can be supplied. \"\"\" pass", "see 'LICENSE.rst' for further # information. __name__ = \"biotite\" __author__", "'read()' calls are delegated to the instance method self.read =", "the contents of this :class:`File` object into a file. Parameters", "True # for file wrappers, e.g. 'TemporaryFile' elif hasattr(file, \"file\")", "self._deprecated_read @classmethod @abc.abstractmethod def read(cls, file): \"\"\" Parse a file", "TypeError(\"A file opened in 'text' mode is required\") lines =", "instance method :func:`read()`. Internally this calls the :func:`read()` class method", "\"\"\" Support for deprecated instance method :func:`read()`. Internally this calls", "lines in the text file. PROTECTED: Do not modify from", "that subsequent # 'read()' calls are delegated to the instance", "f.readline() if not line: break yield line # File object", "in the text file. PROTECTED: Do not modify from outside.", "contents of this :class:`File` object into a file. Parameters ----------", "**kwargs): \"\"\" Support for deprecated instance method :func:`read()`. Internally this", "\"\"\" # File name if isinstance(file, str): with open(file, \"r\")", "is required\") while True: line = file.readline() if not line:", "else: if not is_text(file): raise TypeError(\"A file opened in 'text'", "+ \"\\n\") def __copy_fill__(self, clone): super().__copy_fill__(clone) clone.lines = copy.copy(self.lines) def", "creating an instance, the 'read()' class method is # replaced", "# under the 3-Clause BSD License. Please see 'LICENSE.rst' for", "warnings.warn( \"Instance method 'read()' is deprecated, \" \"use class method", "in range(0, len(text), width): lines.append(text[i : i+width]) return lines def", "io import warnings from .copyable import Copyable import copy class", "*args, **kwargs): \"\"\" Support for deprecated instance method :func:`read()`. Internally" ]
[ "if res.status_code == 200: logger.info(\"%r sent, FCM id: %r\", pnt,", "prepared push notification translation to be sent :type pnt: ~cms.models.push_notifications.push_notification_translation.PushNotificationTranslation", ").slug # Testumgebung - prevent sending PNs to actual users", "\"\"\" status = True for pnt in self.prepared_pnts: res =", "= push_notification self.prepared_pnts = [] self.primary_pnt = PushNotificationTranslation.objects.get( push_notification=push_notification, language=push_notification.region.default_language,", "\"data\": { \"lanCode\": pnt.language.slug, \"city\": self.push_notification.region.slug, }, } headers =", "pnt.text}, \"data\": { \"lanCode\": pnt.language.slug, \"city\": self.push_notification.region.slug, }, } headers", "is available :return: all prepared push notification translations are valid", "that should be sent :type push_notification: ~cms.models.push_notifications.push_notification.PushNotification \"\"\" self.push_notification =", "push notification translations are valid :rtype: bool \"\"\" if self.auth_key", "= True for pnt in self.prepared_pnts: res = self.send_pn(pnt) if", "self.push_notification = push_notification self.prepared_pnts = [] self.primary_pnt = PushNotificationTranslation.objects.get( push_notification=push_notification,", "notification translations are valid :rtype: bool \"\"\" if self.auth_key is", ":type pnt: ~cms.models.push_notifications.push_notification_translation.PushNotificationTranslation :return: Response of the :mod:`requests` library :rtype:", "= self.get_auth_key() def load_secondary_pnts(self): \"\"\" Load push notification translations in", "if ( secondary_pnt.title == \"\" and pnt_const.USE_MAIN_LANGUAGE == self.push_notification.mode ):", "= self.primary_pnt.text self.prepared_pnts.append(secondary_pnt) if len(secondary_pnt.title) > 0: self.prepared_pnts.append(secondary_pnt) def is_valid(self):", "API auth key :return: FCM API auth key :rtype: str", "def is_valid(self): \"\"\" Check if all data for sending push", "secondary_pnt in secondary_pnts: if ( secondary_pnt.title == \"\" and pnt_const.USE_MAIN_LANGUAGE", "False logger.warning( \"Received invalid response from FCM for %r, status:", "status: %r, body: %r\", pnt, res.status_code, res.text, ) return status", "\"\"\" Load relevant push notification translations and prepare content for", "region_slug = Region.objects.get( id=settings.TEST_BLOG_ID ).slug # Testumgebung - prevent sending", "should be sent :type push_notification: ~cms.models.push_notifications.push_notification.PushNotification \"\"\" self.push_notification = push_notification", "headers = {\"Authorization\": f\"key={self.auth_key}\"} return requests.post(self.fcm_url, json=payload, headers=headers) # pylint:", "\"\"\" fcm_url = \"https://fcm.googleapis.com/fcm/send\" def __init__(self, push_notification): \"\"\" Load relevant", "push notification translation to be sent :type pnt: ~cms.models.push_notifications.push_notification_translation.PushNotificationTranslation :return:", ").exclude(id=self.primary_pnt.id) for secondary_pnt in secondary_pnts: if ( secondary_pnt.title == \"\"", "send_all(self): \"\"\" Send all prepared push notification translations :return: Success", "import settings from ...models import PushNotificationTranslation from ...models import Region", "# pylint: disable=too-many-arguments def send_all(self): \"\"\" Send all prepared push", "import requests from django.conf import settings from ...models import PushNotificationTranslation", "\"\"\" import logging import requests from django.conf import settings from", "notification translations :return: Success status :rtype: bool \"\"\" status =", "translations in other languages \"\"\" secondary_pnts = PushNotificationTranslation.objects.filter( push_notification=self.push_notification ).exclude(id=self.primary_pnt.id)", "~cms.models.push_notifications.push_notification.PushNotification \"\"\" self.push_notification = push_notification self.prepared_pnts = [] self.primary_pnt =", "return False return True @staticmethod def get_auth_key(): \"\"\" Get FCM", "PushNotificationTranslation.objects.get( push_notification=push_notification, language=push_notification.region.default_language, ) if len(self.primary_pnt.title) > 0: self.prepared_pnts.append(self.primary_pnt) self.load_secondary_pnts()", "get %r from configuration database\", fcm_auth_config_key ) return None def", "if self.auth_key is None: return False for pnt in self.prepared_pnts:", "from ...constants import push_notifications as pnt_const logger = logging.getLogger(__name__) #", "in other languages \"\"\" secondary_pnts = PushNotificationTranslation.objects.filter( push_notification=self.push_notification ).exclude(id=self.primary_pnt.id) for", "self.send_pn(pnt) if res.status_code == 200: logger.info(\"%r sent, FCM id: %r\",", "~cms.models.push_notifications.push_notification_translation.PushNotificationTranslation :return: Response of the :mod:`requests` library :rtype: ~requests.Response \"\"\"", "language=push_notification.region.default_language, ) if len(self.primary_pnt.title) > 0: self.prepared_pnts.append(self.primary_pnt) self.load_secondary_pnts() self.auth_key =", ":rtype: bool \"\"\" status = True for pnt in self.prepared_pnts:", "self.auth_key is None: return False for pnt in self.prepared_pnts: if", "requests.post(self.fcm_url, json=payload, headers=headers) # pylint: disable=too-many-arguments def send_all(self): \"\"\" Send", "translations and prepare content for sending :param push_notification: the push", "prepared push notification translations :return: Success status :rtype: bool \"\"\"", "sending Push Notifications \"\"\" import logging import requests from django.conf", "return False for pnt in self.prepared_pnts: if not pnt.title: logger.debug(\"%r", "self.prepared_pnts: res = self.send_pn(pnt) if res.status_code == 200: logger.info(\"%r sent,", "~requests.Response \"\"\" if settings.DEBUG: region_slug = Region.objects.get( id=settings.TEST_BLOG_ID ).slug #", "payload = { \"to\": f\"/topics/{region_slug}-{pnt.language.slug}-{self.push_notification.channel}\", \"notification\": {\"title\": pnt.title, \"body\": pnt.text},", "fcm_auth_key from database\") return auth_key.first().value logger.warning( \"Could not get %r", "prepare content for sending :param push_notification: the push notification that", "self.prepared_pnts.append(secondary_pnt) if len(secondary_pnt.title) > 0: self.prepared_pnts.append(secondary_pnt) def is_valid(self): \"\"\" Check", "API auth key :rtype: str \"\"\" fcm_auth_config_key = \"fcm_auth_key\" auth_key", "prevent sending PNs to actual users in development else: region_slug", "for secondary_pnt in secondary_pnts: if ( secondary_pnt.title == \"\" and", "the prepared push notification translation to be sent :type pnt:", "sending :param push_notification: the push notification that should be sent", "> 0: self.prepared_pnts.append(secondary_pnt) def is_valid(self): \"\"\" Check if all data", "notifications is available :return: all prepared push notification translations are", "0: self.prepared_pnts.append(secondary_pnt) def is_valid(self): \"\"\" Check if all data for", "def get_auth_key(): \"\"\" Get FCM API auth key :return: FCM", "configuration database\", fcm_auth_config_key ) return None def send_pn(self, pnt): \"\"\"", "push_notification: the push notification that should be sent :type push_notification:", "import push_notifications as pnt_const logger = logging.getLogger(__name__) # pylint: disable=too-few-public-methods", "= [] self.primary_pnt = PushNotificationTranslation.objects.get( push_notification=push_notification, language=push_notification.region.default_language, ) if len(self.primary_pnt.title)", "}, } headers = {\"Authorization\": f\"key={self.auth_key}\"} return requests.post(self.fcm_url, json=payload, headers=headers)", "push_notification: ~cms.models.push_notifications.push_notification.PushNotification \"\"\" self.push_notification = push_notification self.prepared_pnts = [] self.primary_pnt", "all data for sending push notifications is available :return: all", "= False logger.warning( \"Received invalid response from FCM for %r,", "self.primary_pnt.title secondary_pnt.text = self.primary_pnt.text self.prepared_pnts.append(secondary_pnt) if len(secondary_pnt.title) > 0: self.prepared_pnts.append(secondary_pnt)", "pylint: disable=too-many-arguments def send_all(self): \"\"\" Send all prepared push notification", "res.json()[\"message_id\"]) else: status = False logger.warning( \"Received invalid response from", "def load_secondary_pnts(self): \"\"\" Load push notification translations in other languages", "= \"https://fcm.googleapis.com/fcm/send\" def __init__(self, push_notification): \"\"\" Load relevant push notification", "key :rtype: str \"\"\" fcm_auth_config_key = \"fcm_auth_key\" auth_key = settings.FCM_KEY", ":return: all prepared push notification translations are valid :rtype: bool", "pnt.language.slug, \"city\": self.push_notification.region.slug, }, } headers = {\"Authorization\": f\"key={self.auth_key}\"} return", "disable=too-many-arguments def send_all(self): \"\"\" Send all prepared push notification translations", "import PushNotificationTranslation from ...models import Region from ...constants import push_notifications", "in secondary_pnts: if ( secondary_pnt.title == \"\" and pnt_const.USE_MAIN_LANGUAGE ==", "): secondary_pnt.title = self.primary_pnt.title secondary_pnt.text = self.primary_pnt.text self.prepared_pnts.append(secondary_pnt) if len(secondary_pnt.title)", "\"\"\" Load push notification translations in other languages \"\"\" secondary_pnts", "pylint: disable=too-few-public-methods class PushNotificationSender: \"\"\" Sends push notifications via FCM", "push notification translation :param pnt: the prepared push notification translation", "settings.DEBUG: region_slug = Region.objects.get( id=settings.TEST_BLOG_ID ).slug # Testumgebung - prevent", "as pnt_const logger = logging.getLogger(__name__) # pylint: disable=too-few-public-methods class PushNotificationSender:", "be sent :type pnt: ~cms.models.push_notifications.push_notification_translation.PushNotificationTranslation :return: Response of the :mod:`requests`", "no title\", pnt) return False return True @staticmethod def get_auth_key():", "secondary_pnts = PushNotificationTranslation.objects.filter( push_notification=self.push_notification ).exclude(id=self.primary_pnt.id) for secondary_pnt in secondary_pnts: if", "PushNotificationTranslation from ...models import Region from ...constants import push_notifications as", "False for pnt in self.prepared_pnts: if not pnt.title: logger.debug(\"%r has", "sent :type push_notification: ~cms.models.push_notifications.push_notification.PushNotification \"\"\" self.push_notification = push_notification self.prepared_pnts =", "notifications via FCM HTTP API. Definition: https://firebase.google.com/docs/cloud-messaging/http-server-ref#downstream-http-messages-json \"\"\" fcm_url =", "len(self.primary_pnt.title) > 0: self.prepared_pnts.append(self.primary_pnt) self.load_secondary_pnts() self.auth_key = self.get_auth_key() def load_secondary_pnts(self):", "sending push notifications is available :return: all prepared push notification", "Success status :rtype: bool \"\"\" status = True for pnt", "\"\"\" if self.auth_key is None: return False for pnt in", "pnt): \"\"\" Send single push notification translation :param pnt: the", "if len(secondary_pnt.title) > 0: self.prepared_pnts.append(secondary_pnt) def is_valid(self): \"\"\" Check if", "requests from django.conf import settings from ...models import PushNotificationTranslation from", "if settings.DEBUG: region_slug = Region.objects.get( id=settings.TEST_BLOG_ID ).slug # Testumgebung -", "self.primary_pnt.text self.prepared_pnts.append(secondary_pnt) if len(secondary_pnt.title) > 0: self.prepared_pnts.append(secondary_pnt) def is_valid(self): \"\"\"", "translation :param pnt: the prepared push notification translation to be", "None: return False for pnt in self.prepared_pnts: if not pnt.title:", "HTTP API. Definition: https://firebase.google.com/docs/cloud-messaging/http-server-ref#downstream-http-messages-json \"\"\" fcm_url = \"https://fcm.googleapis.com/fcm/send\" def __init__(self,", "sent, FCM id: %r\", pnt, res.json()[\"message_id\"]) else: status = False", "via FCM HTTP API. Definition: https://firebase.google.com/docs/cloud-messaging/http-server-ref#downstream-http-messages-json \"\"\" fcm_url = \"https://fcm.googleapis.com/fcm/send\"", "API. Definition: https://firebase.google.com/docs/cloud-messaging/http-server-ref#downstream-http-messages-json \"\"\" fcm_url = \"https://fcm.googleapis.com/fcm/send\" def __init__(self, push_notification):", "return auth_key.first().value logger.warning( \"Could not get %r from configuration database\",", "translations are valid :rtype: bool \"\"\" if self.auth_key is None:", "FCM API auth key :return: FCM API auth key :rtype:", "( secondary_pnt.title == \"\" and pnt_const.USE_MAIN_LANGUAGE == self.push_notification.mode ): secondary_pnt.title", "prepared push notification translations are valid :rtype: bool \"\"\" if", "of the :mod:`requests` library :rtype: ~requests.Response \"\"\" if settings.DEBUG: region_slug", "self.push_notification.region.slug payload = { \"to\": f\"/topics/{region_slug}-{pnt.language.slug}-{self.push_notification.channel}\", \"notification\": {\"title\": pnt.title, \"body\":", "f\"key={self.auth_key}\"} return requests.post(self.fcm_url, json=payload, headers=headers) # pylint: disable=too-many-arguments def send_all(self):", "Send all prepared push notification translations :return: Success status :rtype:", "auth_key = settings.FCM_KEY if auth_key.exists(): logger.debug(\"Got fcm_auth_key from database\") return", "False return True @staticmethod def get_auth_key(): \"\"\" Get FCM API", "True for pnt in self.prepared_pnts: res = self.send_pn(pnt) if res.status_code", "json=payload, headers=headers) # pylint: disable=too-many-arguments def send_all(self): \"\"\" Send all", "# Testumgebung - prevent sending PNs to actual users in", "Notifications \"\"\" import logging import requests from django.conf import settings", "self.push_notification.region.slug, }, } headers = {\"Authorization\": f\"key={self.auth_key}\"} return requests.post(self.fcm_url, json=payload,", "push_notification self.prepared_pnts = [] self.primary_pnt = PushNotificationTranslation.objects.get( push_notification=push_notification, language=push_notification.region.default_language, )", "is_valid(self): \"\"\" Check if all data for sending push notifications", "...models import Region from ...constants import push_notifications as pnt_const logger", "push_notification=push_notification, language=push_notification.region.default_language, ) if len(self.primary_pnt.title) > 0: self.prepared_pnts.append(self.primary_pnt) self.load_secondary_pnts() self.auth_key", "self.prepared_pnts.append(secondary_pnt) def is_valid(self): \"\"\" Check if all data for sending", "= PushNotificationTranslation.objects.get( push_notification=push_notification, language=push_notification.region.default_language, ) if len(self.primary_pnt.title) > 0: self.prepared_pnts.append(self.primary_pnt)", "} headers = {\"Authorization\": f\"key={self.auth_key}\"} return requests.post(self.fcm_url, json=payload, headers=headers) #", "[] self.primary_pnt = PushNotificationTranslation.objects.get( push_notification=push_notification, language=push_notification.region.default_language, ) if len(self.primary_pnt.title) >", "pnt.title, \"body\": pnt.text}, \"data\": { \"lanCode\": pnt.language.slug, \"city\": self.push_notification.region.slug, },", "200: logger.info(\"%r sent, FCM id: %r\", pnt, res.json()[\"message_id\"]) else: status", "region_slug = self.push_notification.region.slug payload = { \"to\": f\"/topics/{region_slug}-{pnt.language.slug}-{self.push_notification.channel}\", \"notification\": {\"title\":", "\"\"\" Send single push notification translation :param pnt: the prepared", "0: self.prepared_pnts.append(self.primary_pnt) self.load_secondary_pnts() self.auth_key = self.get_auth_key() def load_secondary_pnts(self): \"\"\" Load", "if auth_key.exists(): logger.debug(\"Got fcm_auth_key from database\") return auth_key.first().value logger.warning( \"Could", "\"to\": f\"/topics/{region_slug}-{pnt.language.slug}-{self.push_notification.channel}\", \"notification\": {\"title\": pnt.title, \"body\": pnt.text}, \"data\": { \"lanCode\":", "push notification translations in other languages \"\"\" secondary_pnts = PushNotificationTranslation.objects.filter(", "and pnt_const.USE_MAIN_LANGUAGE == self.push_notification.mode ): secondary_pnt.title = self.primary_pnt.title secondary_pnt.text =", "push notifications via FCM HTTP API. Definition: https://firebase.google.com/docs/cloud-messaging/http-server-ref#downstream-http-messages-json \"\"\" fcm_url", "\"\"\" fcm_auth_config_key = \"fcm_auth_key\" auth_key = settings.FCM_KEY if auth_key.exists(): logger.debug(\"Got", "else: status = False logger.warning( \"Received invalid response from FCM", "return requests.post(self.fcm_url, json=payload, headers=headers) # pylint: disable=too-many-arguments def send_all(self): \"\"\"", "__init__(self, push_notification): \"\"\" Load relevant push notification translations and prepare", "= logging.getLogger(__name__) # pylint: disable=too-few-public-methods class PushNotificationSender: \"\"\" Sends push", "all prepared push notification translations :return: Success status :rtype: bool", "from FCM for %r, status: %r, body: %r\", pnt, res.status_code,", "be sent :type push_notification: ~cms.models.push_notifications.push_notification.PushNotification \"\"\" self.push_notification = push_notification self.prepared_pnts", "for sending push notifications is available :return: all prepared push", "logger.debug(\"%r has no title\", pnt) return False return True @staticmethod", ":param push_notification: the push notification that should be sent :type", "self.auth_key = self.get_auth_key() def load_secondary_pnts(self): \"\"\" Load push notification translations", "\"Could not get %r from configuration database\", fcm_auth_config_key ) return", "Module for sending Push Notifications \"\"\" import logging import requests", "return None def send_pn(self, pnt): \"\"\" Send single push notification", "\"notification\": {\"title\": pnt.title, \"body\": pnt.text}, \"data\": { \"lanCode\": pnt.language.slug, \"city\":", "Definition: https://firebase.google.com/docs/cloud-messaging/http-server-ref#downstream-http-messages-json \"\"\" fcm_url = \"https://fcm.googleapis.com/fcm/send\" def __init__(self, push_notification): \"\"\"", "self.load_secondary_pnts() self.auth_key = self.get_auth_key() def load_secondary_pnts(self): \"\"\" Load push notification", "\"city\": self.push_notification.region.slug, }, } headers = {\"Authorization\": f\"key={self.auth_key}\"} return requests.post(self.fcm_url,", ":rtype: str \"\"\" fcm_auth_config_key = \"fcm_auth_key\" auth_key = settings.FCM_KEY if", "load_secondary_pnts(self): \"\"\" Load push notification translations in other languages \"\"\"", "to be sent :type pnt: ~cms.models.push_notifications.push_notification_translation.PushNotificationTranslation :return: Response of the", ":return: Response of the :mod:`requests` library :rtype: ~requests.Response \"\"\" if", "are valid :rtype: bool \"\"\" if self.auth_key is None: return", "send_pn(self, pnt): \"\"\" Send single push notification translation :param pnt:", "Load push notification translations in other languages \"\"\" secondary_pnts =", "\"fcm_auth_key\" auth_key = settings.FCM_KEY if auth_key.exists(): logger.debug(\"Got fcm_auth_key from database\")", "{\"title\": pnt.title, \"body\": pnt.text}, \"data\": { \"lanCode\": pnt.language.slug, \"city\": self.push_notification.region.slug,", "logging import requests from django.conf import settings from ...models import", "single push notification translation :param pnt: the prepared push notification", "= { \"to\": f\"/topics/{region_slug}-{pnt.language.slug}-{self.push_notification.channel}\", \"notification\": {\"title\": pnt.title, \"body\": pnt.text}, \"data\":", "PushNotificationSender: \"\"\" Sends push notifications via FCM HTTP API. Definition:", "- prevent sending PNs to actual users in development else:", "from database\") return auth_key.first().value logger.warning( \"Could not get %r from", "push_notification=self.push_notification ).exclude(id=self.primary_pnt.id) for secondary_pnt in secondary_pnts: if ( secondary_pnt.title ==", "development else: region_slug = self.push_notification.region.slug payload = { \"to\": f\"/topics/{region_slug}-{pnt.language.slug}-{self.push_notification.channel}\",", "else: region_slug = self.push_notification.region.slug payload = { \"to\": f\"/topics/{region_slug}-{pnt.language.slug}-{self.push_notification.channel}\", \"notification\":", "len(secondary_pnt.title) > 0: self.prepared_pnts.append(secondary_pnt) def is_valid(self): \"\"\" Check if all", "%r, status: %r, body: %r\", pnt, res.status_code, res.text, ) return", "id=settings.TEST_BLOG_ID ).slug # Testumgebung - prevent sending PNs to actual", "from configuration database\", fcm_auth_config_key ) return None def send_pn(self, pnt):", "languages \"\"\" secondary_pnts = PushNotificationTranslation.objects.filter( push_notification=self.push_notification ).exclude(id=self.primary_pnt.id) for secondary_pnt in", ":return: Success status :rtype: bool \"\"\" status = True for", "# pylint: disable=too-few-public-methods class PushNotificationSender: \"\"\" Sends push notifications via", "%r from configuration database\", fcm_auth_config_key ) return None def send_pn(self,", "content for sending :param push_notification: the push notification that should", "f\"/topics/{region_slug}-{pnt.language.slug}-{self.push_notification.channel}\", \"notification\": {\"title\": pnt.title, \"body\": pnt.text}, \"data\": { \"lanCode\": pnt.language.slug,", "\"\"\" if settings.DEBUG: region_slug = Region.objects.get( id=settings.TEST_BLOG_ID ).slug # Testumgebung", "PNs to actual users in development else: region_slug = self.push_notification.region.slug", "self.primary_pnt = PushNotificationTranslation.objects.get( push_notification=push_notification, language=push_notification.region.default_language, ) if len(self.primary_pnt.title) > 0:", "https://firebase.google.com/docs/cloud-messaging/http-server-ref#downstream-http-messages-json \"\"\" fcm_url = \"https://fcm.googleapis.com/fcm/send\" def __init__(self, push_notification): \"\"\" Load", "settings.FCM_KEY if auth_key.exists(): logger.debug(\"Got fcm_auth_key from database\") return auth_key.first().value logger.warning(", "FCM id: %r\", pnt, res.json()[\"message_id\"]) else: status = False logger.warning(", "logger = logging.getLogger(__name__) # pylint: disable=too-few-public-methods class PushNotificationSender: \"\"\" Sends", "self.prepared_pnts: if not pnt.title: logger.debug(\"%r has no title\", pnt) return", "push_notification): \"\"\" Load relevant push notification translations and prepare content", "if len(self.primary_pnt.title) > 0: self.prepared_pnts.append(self.primary_pnt) self.load_secondary_pnts() self.auth_key = self.get_auth_key() def", "FCM API auth key :rtype: str \"\"\" fcm_auth_config_key = \"fcm_auth_key\"", "relevant push notification translations and prepare content for sending :param", "push notification that should be sent :type push_notification: ~cms.models.push_notifications.push_notification.PushNotification \"\"\"", "and prepare content for sending :param push_notification: the push notification", "if not pnt.title: logger.debug(\"%r has no title\", pnt) return False", "FCM HTTP API. Definition: https://firebase.google.com/docs/cloud-messaging/http-server-ref#downstream-http-messages-json \"\"\" fcm_url = \"https://fcm.googleapis.com/fcm/send\" def", "notification translation to be sent :type pnt: ~cms.models.push_notifications.push_notification_translation.PushNotificationTranslation :return: Response", "valid :rtype: bool \"\"\" if self.auth_key is None: return False", "for %r, status: %r, body: %r\", pnt, res.status_code, res.text, )", "{ \"to\": f\"/topics/{region_slug}-{pnt.language.slug}-{self.push_notification.channel}\", \"notification\": {\"title\": pnt.title, \"body\": pnt.text}, \"data\": {", "in self.prepared_pnts: res = self.send_pn(pnt) if res.status_code == 200: logger.info(\"%r", "sending PNs to actual users in development else: region_slug =", "fcm_auth_config_key = \"fcm_auth_key\" auth_key = settings.FCM_KEY if auth_key.exists(): logger.debug(\"Got fcm_auth_key", "headers=headers) # pylint: disable=too-many-arguments def send_all(self): \"\"\" Send all prepared", "notification translation :param pnt: the prepared push notification translation to", "django.conf import settings from ...models import PushNotificationTranslation from ...models import", ") return None def send_pn(self, pnt): \"\"\" Send single push", "pnt: the prepared push notification translation to be sent :type", ":type push_notification: ~cms.models.push_notifications.push_notification.PushNotification \"\"\" self.push_notification = push_notification self.prepared_pnts = []", "> 0: self.prepared_pnts.append(self.primary_pnt) self.load_secondary_pnts() self.auth_key = self.get_auth_key() def load_secondary_pnts(self): \"\"\"", "is None: return False for pnt in self.prepared_pnts: if not", ":return: FCM API auth key :rtype: str \"\"\" fcm_auth_config_key =", "pnt in self.prepared_pnts: res = self.send_pn(pnt) if res.status_code == 200:", "res = self.send_pn(pnt) if res.status_code == 200: logger.info(\"%r sent, FCM", "secondary_pnt.text = self.primary_pnt.text self.prepared_pnts.append(secondary_pnt) if len(secondary_pnt.title) > 0: self.prepared_pnts.append(secondary_pnt) def", "for sending :param push_notification: the push notification that should be", "\"lanCode\": pnt.language.slug, \"city\": self.push_notification.region.slug, }, } headers = {\"Authorization\": f\"key={self.auth_key}\"}", "Send single push notification translation :param pnt: the prepared push", "...models import PushNotificationTranslation from ...models import Region from ...constants import", "Testumgebung - prevent sending PNs to actual users in development", "= self.primary_pnt.title secondary_pnt.text = self.primary_pnt.text self.prepared_pnts.append(secondary_pnt) if len(secondary_pnt.title) > 0:", "data for sending push notifications is available :return: all prepared", "push notification translations and prepare content for sending :param push_notification:", "notification translations in other languages \"\"\" secondary_pnts = PushNotificationTranslation.objects.filter( push_notification=self.push_notification", "\"\"\" Send all prepared push notification translations :return: Success status", "Push Notifications \"\"\" import logging import requests from django.conf import", "actual users in development else: region_slug = self.push_notification.region.slug payload =", "not pnt.title: logger.debug(\"%r has no title\", pnt) return False return", "pnt.title: logger.debug(\"%r has no title\", pnt) return False return True", "True @staticmethod def get_auth_key(): \"\"\" Get FCM API auth key", "\"\"\" secondary_pnts = PushNotificationTranslation.objects.filter( push_notification=self.push_notification ).exclude(id=self.primary_pnt.id) for secondary_pnt in secondary_pnts:", "Region.objects.get( id=settings.TEST_BLOG_ID ).slug # Testumgebung - prevent sending PNs to", "import logging import requests from django.conf import settings from ...models", "to actual users in development else: region_slug = self.push_notification.region.slug payload", "self.push_notification.mode ): secondary_pnt.title = self.primary_pnt.title secondary_pnt.text = self.primary_pnt.text self.prepared_pnts.append(secondary_pnt) if", "database\", fcm_auth_config_key ) return None def send_pn(self, pnt): \"\"\" Send", "other languages \"\"\" secondary_pnts = PushNotificationTranslation.objects.filter( push_notification=self.push_notification ).exclude(id=self.primary_pnt.id) for secondary_pnt", ":param pnt: the prepared push notification translation to be sent", ":rtype: bool \"\"\" if self.auth_key is None: return False for", "for pnt in self.prepared_pnts: res = self.send_pn(pnt) if res.status_code ==", "settings from ...models import PushNotificationTranslation from ...models import Region from", "self.get_auth_key() def load_secondary_pnts(self): \"\"\" Load push notification translations in other", "secondary_pnts: if ( secondary_pnt.title == \"\" and pnt_const.USE_MAIN_LANGUAGE == self.push_notification.mode", "logger.debug(\"Got fcm_auth_key from database\") return auth_key.first().value logger.warning( \"Could not get", "\"https://fcm.googleapis.com/fcm/send\" def __init__(self, push_notification): \"\"\" Load relevant push notification translations", "\"\"\" self.push_notification = push_notification self.prepared_pnts = [] self.primary_pnt = PushNotificationTranslation.objects.get(", "= Region.objects.get( id=settings.TEST_BLOG_ID ).slug # Testumgebung - prevent sending PNs", "notification that should be sent :type push_notification: ~cms.models.push_notifications.push_notification.PushNotification \"\"\" self.push_notification", "pnt in self.prepared_pnts: if not pnt.title: logger.debug(\"%r has no title\",", "Sends push notifications via FCM HTTP API. Definition: https://firebase.google.com/docs/cloud-messaging/http-server-ref#downstream-http-messages-json \"\"\"", "database\") return auth_key.first().value logger.warning( \"Could not get %r from configuration", "auth_key.exists(): logger.debug(\"Got fcm_auth_key from database\") return auth_key.first().value logger.warning( \"Could not", "\"\"\" Check if all data for sending push notifications is", "Region from ...constants import push_notifications as pnt_const logger = logging.getLogger(__name__)", "= settings.FCM_KEY if auth_key.exists(): logger.debug(\"Got fcm_auth_key from database\") return auth_key.first().value", "\"body\": pnt.text}, \"data\": { \"lanCode\": pnt.language.slug, \"city\": self.push_notification.region.slug, }, }", "all prepared push notification translations are valid :rtype: bool \"\"\"", "{ \"lanCode\": pnt.language.slug, \"city\": self.push_notification.region.slug, }, } headers = {\"Authorization\":", "pnt: ~cms.models.push_notifications.push_notification_translation.PushNotificationTranslation :return: Response of the :mod:`requests` library :rtype: ~requests.Response", "Load relevant push notification translations and prepare content for sending", "FCM for %r, status: %r, body: %r\", pnt, res.status_code, res.text,", "not get %r from configuration database\", fcm_auth_config_key ) return None", "None def send_pn(self, pnt): \"\"\" Send single push notification translation", "str \"\"\" fcm_auth_config_key = \"fcm_auth_key\" auth_key = settings.FCM_KEY if auth_key.exists():", "PushNotificationTranslation.objects.filter( push_notification=self.push_notification ).exclude(id=self.primary_pnt.id) for secondary_pnt in secondary_pnts: if ( secondary_pnt.title", "status :rtype: bool \"\"\" status = True for pnt in", "bool \"\"\" if self.auth_key is None: return False for pnt", ":rtype: ~requests.Response \"\"\" if settings.DEBUG: region_slug = Region.objects.get( id=settings.TEST_BLOG_ID ).slug", "title\", pnt) return False return True @staticmethod def get_auth_key(): \"\"\"", "logger.warning( \"Could not get %r from configuration database\", fcm_auth_config_key )", "self.prepared_pnts.append(self.primary_pnt) self.load_secondary_pnts() self.auth_key = self.get_auth_key() def load_secondary_pnts(self): \"\"\" Load push", "res.status_code == 200: logger.info(\"%r sent, FCM id: %r\", pnt, res.json()[\"message_id\"])", "for pnt in self.prepared_pnts: if not pnt.title: logger.debug(\"%r has no", "sent :type pnt: ~cms.models.push_notifications.push_notification_translation.PushNotificationTranslation :return: Response of the :mod:`requests` library", "return True @staticmethod def get_auth_key(): \"\"\" Get FCM API auth", "the :mod:`requests` library :rtype: ~requests.Response \"\"\" if settings.DEBUG: region_slug =", "Get FCM API auth key :return: FCM API auth key", "for sending Push Notifications \"\"\" import logging import requests from", "auth key :rtype: str \"\"\" fcm_auth_config_key = \"fcm_auth_key\" auth_key =", "id: %r\", pnt, res.json()[\"message_id\"]) else: status = False logger.warning( \"Received", "\"\" and pnt_const.USE_MAIN_LANGUAGE == self.push_notification.mode ): secondary_pnt.title = self.primary_pnt.title secondary_pnt.text", "Check if all data for sending push notifications is available", "\"\"\" Get FCM API auth key :return: FCM API auth", "response from FCM for %r, status: %r, body: %r\", pnt,", "logging.getLogger(__name__) # pylint: disable=too-few-public-methods class PushNotificationSender: \"\"\" Sends push notifications", "translations :return: Success status :rtype: bool \"\"\" status = True", "pnt, res.json()[\"message_id\"]) else: status = False logger.warning( \"Received invalid response", "has no title\", pnt) return False return True @staticmethod def", "get_auth_key(): \"\"\" Get FCM API auth key :return: FCM API", "pnt) return False return True @staticmethod def get_auth_key(): \"\"\" Get", "= PushNotificationTranslation.objects.filter( push_notification=self.push_notification ).exclude(id=self.primary_pnt.id) for secondary_pnt in secondary_pnts: if (", "= self.send_pn(pnt) if res.status_code == 200: logger.info(\"%r sent, FCM id:", "class PushNotificationSender: \"\"\" Sends push notifications via FCM HTTP API.", "pnt_const logger = logging.getLogger(__name__) # pylint: disable=too-few-public-methods class PushNotificationSender: \"\"\"", "= \"fcm_auth_key\" auth_key = settings.FCM_KEY if auth_key.exists(): logger.debug(\"Got fcm_auth_key from", "from ...models import Region from ...constants import push_notifications as pnt_const", "== 200: logger.info(\"%r sent, FCM id: %r\", pnt, res.json()[\"message_id\"]) else:", "push notification translations :return: Success status :rtype: bool \"\"\" status", "translation to be sent :type pnt: ~cms.models.push_notifications.push_notification_translation.PushNotificationTranslation :return: Response of", "import Region from ...constants import push_notifications as pnt_const logger =", "self.prepared_pnts = [] self.primary_pnt = PushNotificationTranslation.objects.get( push_notification=push_notification, language=push_notification.region.default_language, ) if", "available :return: all prepared push notification translations are valid :rtype:", "Response of the :mod:`requests` library :rtype: ~requests.Response \"\"\" if settings.DEBUG:", "invalid response from FCM for %r, status: %r, body: %r\",", "== \"\" and pnt_const.USE_MAIN_LANGUAGE == self.push_notification.mode ): secondary_pnt.title = self.primary_pnt.title", "users in development else: region_slug = self.push_notification.region.slug payload = {", "{\"Authorization\": f\"key={self.auth_key}\"} return requests.post(self.fcm_url, json=payload, headers=headers) # pylint: disable=too-many-arguments def", "bool \"\"\" status = True for pnt in self.prepared_pnts: res", "status = False logger.warning( \"Received invalid response from FCM for", "= self.push_notification.region.slug payload = { \"to\": f\"/topics/{region_slug}-{pnt.language.slug}-{self.push_notification.channel}\", \"notification\": {\"title\": pnt.title,", "in development else: region_slug = self.push_notification.region.slug payload = { \"to\":", "secondary_pnt.title == \"\" and pnt_const.USE_MAIN_LANGUAGE == self.push_notification.mode ): secondary_pnt.title =", "\"\"\" Sends push notifications via FCM HTTP API. Definition: https://firebase.google.com/docs/cloud-messaging/http-server-ref#downstream-http-messages-json", ":mod:`requests` library :rtype: ~requests.Response \"\"\" if settings.DEBUG: region_slug = Region.objects.get(", "def __init__(self, push_notification): \"\"\" Load relevant push notification translations and", "\"\"\" Module for sending Push Notifications \"\"\" import logging import", "status = True for pnt in self.prepared_pnts: res = self.send_pn(pnt)", "library :rtype: ~requests.Response \"\"\" if settings.DEBUG: region_slug = Region.objects.get( id=settings.TEST_BLOG_ID", "= {\"Authorization\": f\"key={self.auth_key}\"} return requests.post(self.fcm_url, json=payload, headers=headers) # pylint: disable=too-many-arguments", "key :return: FCM API auth key :rtype: str \"\"\" fcm_auth_config_key", "fcm_auth_config_key ) return None def send_pn(self, pnt): \"\"\" Send single", "push notifications is available :return: all prepared push notification translations", "logger.warning( \"Received invalid response from FCM for %r, status: %r,", "...constants import push_notifications as pnt_const logger = logging.getLogger(__name__) # pylint:", "auth key :return: FCM API auth key :rtype: str \"\"\"", "from ...models import PushNotificationTranslation from ...models import Region from ...constants", "auth_key.first().value logger.warning( \"Could not get %r from configuration database\", fcm_auth_config_key", "logger.info(\"%r sent, FCM id: %r\", pnt, res.json()[\"message_id\"]) else: status =", "in self.prepared_pnts: if not pnt.title: logger.debug(\"%r has no title\", pnt)", "== self.push_notification.mode ): secondary_pnt.title = self.primary_pnt.title secondary_pnt.text = self.primary_pnt.text self.prepared_pnts.append(secondary_pnt)", ") if len(self.primary_pnt.title) > 0: self.prepared_pnts.append(self.primary_pnt) self.load_secondary_pnts() self.auth_key = self.get_auth_key()", "the push notification that should be sent :type push_notification: ~cms.models.push_notifications.push_notification.PushNotification", "pnt_const.USE_MAIN_LANGUAGE == self.push_notification.mode ): secondary_pnt.title = self.primary_pnt.title secondary_pnt.text = self.primary_pnt.text", "if all data for sending push notifications is available :return:", "disable=too-few-public-methods class PushNotificationSender: \"\"\" Sends push notifications via FCM HTTP", "%r\", pnt, res.json()[\"message_id\"]) else: status = False logger.warning( \"Received invalid", "from django.conf import settings from ...models import PushNotificationTranslation from ...models", "push_notifications as pnt_const logger = logging.getLogger(__name__) # pylint: disable=too-few-public-methods class", "fcm_url = \"https://fcm.googleapis.com/fcm/send\" def __init__(self, push_notification): \"\"\" Load relevant push", "@staticmethod def get_auth_key(): \"\"\" Get FCM API auth key :return:", "\"Received invalid response from FCM for %r, status: %r, body:", "notification translations and prepare content for sending :param push_notification: the", "def send_all(self): \"\"\" Send all prepared push notification translations :return:", "def send_pn(self, pnt): \"\"\" Send single push notification translation :param", "secondary_pnt.title = self.primary_pnt.title secondary_pnt.text = self.primary_pnt.text self.prepared_pnts.append(secondary_pnt) if len(secondary_pnt.title) >" ]
[ "decription: CREATE ASC INDEX # # Dependencies: # CREATE DATABASE", "id: functional.index.create.03 # title: CREATE ASC INDEX # decription: CREATE", "# resources: None substitutions_1 = [] init_script_1 = \"\"\"CREATE TABLE", "from firebird.qa import db_factory, isql_act, Action # version: 1.0 #", "substitutions_1 = [] init_script_1 = \"\"\"CREATE TABLE t( a INTEGER);", "INDEX test;\"\"\" act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\"TEST", "# title: CREATE ASC INDEX # decription: CREATE ASC INDEX", "test ON t(a); SHOW INDEX test;\"\"\" act_1 = isql_act('db_1', test_script_1,", "# id: functional.index.create.03 # title: CREATE ASC INDEX # decription:", "= \"\"\"CREATE ASC INDEX test ON t(a); SHOW INDEX test;\"\"\"", "isql_act, Action # version: 1.0 # resources: None substitutions_1 =", "SHOW INDEX # tracker_id: # min_versions: [] # versions: 1.0", "t( a INTEGER); commit;\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1) test_script_1 =", "INDEX # # Dependencies: # CREATE DATABASE # CREATE TABLE", "import db_factory, isql_act, Action # version: 1.0 # resources: None", "1.0 # qmid: functional.index.create.create_index_03 import pytest from firebird.qa import db_factory,", "qmid: functional.index.create.create_index_03 import pytest from firebird.qa import db_factory, isql_act, Action", "# CREATE DATABASE # CREATE TABLE # SHOW INDEX #", "INDEX # decription: CREATE ASC INDEX # # Dependencies: #", "resources: None substitutions_1 = [] init_script_1 = \"\"\"CREATE TABLE t(", "# # Dependencies: # CREATE DATABASE # CREATE TABLE #", "commit;\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1) test_script_1 = \"\"\"CREATE ASC INDEX", "test_1(act_1: Action): act_1.expected_stdout = expected_stdout_1 act_1.execute() assert act_1.clean_expected_stdout == act_1.clean_stdout", "DATABASE # CREATE TABLE # SHOW INDEX # tracker_id: #", "INDEX # tracker_id: # min_versions: [] # versions: 1.0 #", "test;\"\"\" act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\"TEST INDEX", "CREATE ASC INDEX # # Dependencies: # CREATE DATABASE #", "db_factory(sql_dialect=3, init=init_script_1) test_script_1 = \"\"\"CREATE ASC INDEX test ON t(a);", "# SHOW INDEX # tracker_id: # min_versions: [] # versions:", "init_script_1 = \"\"\"CREATE TABLE t( a INTEGER); commit;\"\"\" db_1 =", "ASC INDEX test ON t(a); SHOW INDEX test;\"\"\" act_1 =", "CREATE TABLE # SHOW INDEX # tracker_id: # min_versions: []", "None substitutions_1 = [] init_script_1 = \"\"\"CREATE TABLE t( a", "ASC INDEX # # Dependencies: # CREATE DATABASE # CREATE", "CREATE ASC INDEX # decription: CREATE ASC INDEX # #", "INTEGER); commit;\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1) test_script_1 = \"\"\"CREATE ASC", "\"\"\"CREATE ASC INDEX test ON t(a); SHOW INDEX test;\"\"\" act_1", "[] init_script_1 = \"\"\"CREATE TABLE t( a INTEGER); commit;\"\"\" db_1", "= \"\"\"CREATE TABLE t( a INTEGER); commit;\"\"\" db_1 = db_factory(sql_dialect=3,", "functional.index.create.create_index_03 import pytest from firebird.qa import db_factory, isql_act, Action #", "SHOW INDEX test;\"\"\" act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 =", "CREATE DATABASE # CREATE TABLE # SHOW INDEX # tracker_id:", "ASC INDEX # decription: CREATE ASC INDEX # # Dependencies:", "# # id: functional.index.create.03 # title: CREATE ASC INDEX #", "# tracker_id: # min_versions: [] # versions: 1.0 # qmid:", "INDEX test ON t(a); SHOW INDEX test;\"\"\" act_1 = isql_act('db_1',", "INDEX ON T(A)\"\"\" @pytest.mark.version('>=1.0') def test_1(act_1: Action): act_1.expected_stdout = expected_stdout_1", "title: CREATE ASC INDEX # decription: CREATE ASC INDEX #", "isql_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\"TEST INDEX ON T(A)\"\"\" @pytest.mark.version('>=1.0')", "[] # versions: 1.0 # qmid: functional.index.create.create_index_03 import pytest from", "versions: 1.0 # qmid: functional.index.create.create_index_03 import pytest from firebird.qa import", "db_1 = db_factory(sql_dialect=3, init=init_script_1) test_script_1 = \"\"\"CREATE ASC INDEX test", "# version: 1.0 # resources: None substitutions_1 = [] init_script_1", "expected_stdout_1 = \"\"\"TEST INDEX ON T(A)\"\"\" @pytest.mark.version('>=1.0') def test_1(act_1: Action):", "Dependencies: # CREATE DATABASE # CREATE TABLE # SHOW INDEX", "= \"\"\"TEST INDEX ON T(A)\"\"\" @pytest.mark.version('>=1.0') def test_1(act_1: Action): act_1.expected_stdout", "init=init_script_1) test_script_1 = \"\"\"CREATE ASC INDEX test ON t(a); SHOW", "# decription: CREATE ASC INDEX # # Dependencies: # CREATE", "# min_versions: [] # versions: 1.0 # qmid: functional.index.create.create_index_03 import", "# Dependencies: # CREATE DATABASE # CREATE TABLE # SHOW", "t(a); SHOW INDEX test;\"\"\" act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1", "def test_1(act_1: Action): act_1.expected_stdout = expected_stdout_1 act_1.execute() assert act_1.clean_expected_stdout ==", "= [] init_script_1 = \"\"\"CREATE TABLE t( a INTEGER); commit;\"\"\"", "functional.index.create.03 # title: CREATE ASC INDEX # decription: CREATE ASC", "substitutions=substitutions_1) expected_stdout_1 = \"\"\"TEST INDEX ON T(A)\"\"\" @pytest.mark.version('>=1.0') def test_1(act_1:", "T(A)\"\"\" @pytest.mark.version('>=1.0') def test_1(act_1: Action): act_1.expected_stdout = expected_stdout_1 act_1.execute() assert", "test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\"TEST INDEX ON T(A)\"\"\" @pytest.mark.version('>=1.0') def", "db_factory, isql_act, Action # version: 1.0 # resources: None substitutions_1", "@pytest.mark.version('>=1.0') def test_1(act_1: Action): act_1.expected_stdout = expected_stdout_1 act_1.execute() assert act_1.clean_expected_stdout", "tracker_id: # min_versions: [] # versions: 1.0 # qmid: functional.index.create.create_index_03", "ON t(a); SHOW INDEX test;\"\"\" act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)", "test_script_1 = \"\"\"CREATE ASC INDEX test ON t(a); SHOW INDEX", "pytest from firebird.qa import db_factory, isql_act, Action # version: 1.0", "#coding:utf-8 # # id: functional.index.create.03 # title: CREATE ASC INDEX", "TABLE # SHOW INDEX # tracker_id: # min_versions: [] #", "= isql_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\"TEST INDEX ON T(A)\"\"\"", "Action # version: 1.0 # resources: None substitutions_1 = []", "\"\"\"TEST INDEX ON T(A)\"\"\" @pytest.mark.version('>=1.0') def test_1(act_1: Action): act_1.expected_stdout =", "# qmid: functional.index.create.create_index_03 import pytest from firebird.qa import db_factory, isql_act,", "firebird.qa import db_factory, isql_act, Action # version: 1.0 # resources:", "ON T(A)\"\"\" @pytest.mark.version('>=1.0') def test_1(act_1: Action): act_1.expected_stdout = expected_stdout_1 act_1.execute()", "import pytest from firebird.qa import db_factory, isql_act, Action # version:", "version: 1.0 # resources: None substitutions_1 = [] init_script_1 =", "# versions: 1.0 # qmid: functional.index.create.create_index_03 import pytest from firebird.qa", "TABLE t( a INTEGER); commit;\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1) test_script_1", "# CREATE TABLE # SHOW INDEX # tracker_id: # min_versions:", "act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = \"\"\"TEST INDEX ON", "a INTEGER); commit;\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1) test_script_1 = \"\"\"CREATE", "\"\"\"CREATE TABLE t( a INTEGER); commit;\"\"\" db_1 = db_factory(sql_dialect=3, init=init_script_1)", "= db_factory(sql_dialect=3, init=init_script_1) test_script_1 = \"\"\"CREATE ASC INDEX test ON", "min_versions: [] # versions: 1.0 # qmid: functional.index.create.create_index_03 import pytest", "1.0 # resources: None substitutions_1 = [] init_script_1 = \"\"\"CREATE" ]
[ "information about the pare we are refering, index, items per", "contains information about the pare we are refering, index, items", "are refering, index, items per page, etc. \"\"\" page_index =", "we are refering, index, items per page, etc. \"\"\" page_index", "Page object file \"\"\" class Page(): \"\"\" Page object, it", "items_per_page, page_index): \"\"\" Creates the page \"\"\" self.page_index = int(page_index)", "def __init__(self, items_per_page, page_index): \"\"\" Creates the page \"\"\" self.page_index", "pare we are refering, index, items per page, etc. \"\"\"", "<filename>app/logic/httpcommon/Page.py<gh_stars>1-10 \"\"\" Page object file \"\"\" class Page(): \"\"\" Page", "it contains information about the pare we are refering, index,", "\"\"\" class Page(): \"\"\" Page object, it contains information about", "about the pare we are refering, index, items per page,", "etc. \"\"\" page_index = 0 items_per_page = 0 def __init__(self,", "per page, etc. \"\"\" page_index = 0 items_per_page = 0", "Page(): \"\"\" Page object, it contains information about the pare", "class Page(): \"\"\" Page object, it contains information about the", "items per page, etc. \"\"\" page_index = 0 items_per_page =", "page, etc. \"\"\" page_index = 0 items_per_page = 0 def", "\"\"\" Page object, it contains information about the pare we", "\"\"\" Page object file \"\"\" class Page(): \"\"\" Page object,", "items_per_page = 0 def __init__(self, items_per_page, page_index): \"\"\" Creates the", "= 0 items_per_page = 0 def __init__(self, items_per_page, page_index): \"\"\"", "0 items_per_page = 0 def __init__(self, items_per_page, page_index): \"\"\" Creates", "0 def __init__(self, items_per_page, page_index): \"\"\" Creates the page \"\"\"", "index, items per page, etc. \"\"\" page_index = 0 items_per_page", "= 0 def __init__(self, items_per_page, page_index): \"\"\" Creates the page", "Page object, it contains information about the pare we are", "__init__(self, items_per_page, page_index): \"\"\" Creates the page \"\"\" self.page_index =", "\"\"\" page_index = 0 items_per_page = 0 def __init__(self, items_per_page,", "file \"\"\" class Page(): \"\"\" Page object, it contains information", "refering, index, items per page, etc. \"\"\" page_index = 0", "page_index = 0 items_per_page = 0 def __init__(self, items_per_page, page_index):", "page_index): \"\"\" Creates the page \"\"\" self.page_index = int(page_index) self.items_per_page", "Creates the page \"\"\" self.page_index = int(page_index) self.items_per_page = int(items_per_page)", "object file \"\"\" class Page(): \"\"\" Page object, it contains", "the pare we are refering, index, items per page, etc.", "object, it contains information about the pare we are refering,", "\"\"\" Creates the page \"\"\" self.page_index = int(page_index) self.items_per_page =" ]
[ "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "get_names(): return list(__factory.keys()) def init_dataset(name, *args, **kwargs): if name not", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "2.0 (the \"License\"); # you may not use this file", "agreed to in writing, software # distributed under the License", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Unless required by applicable law or agreed to in writing,", "distributed under the License is distributed on an \"AS IS\"", "*args, **kwargs): if name not in __factory.keys(): raise KeyError(\"Unknown datasets:", "return list(__factory.keys()) def init_dataset(name, *args, **kwargs): if name not in", "the specific language governing permissions and # limitations under the", "def init_dataset(name, *args, **kwargs): if name not in __factory.keys(): raise", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "list(__factory.keys()) def init_dataset(name, *args, **kwargs): if name not in __factory.keys():", "name not in __factory.keys(): raise KeyError(\"Unknown datasets: {}\".format(name)) return __factory[name](*args,", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "import Market1501 __factory = { 'market1501': Market1501 } def get_names():", "except in compliance with the License. # You may obtain", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "writing, software # distributed under the License is distributed on", "re from os import path as osp from .market1501 import", "in writing, software # distributed under the License is distributed", "Market1501 } def get_names(): return list(__factory.keys()) def init_dataset(name, *args, **kwargs):", "not in __factory.keys(): raise KeyError(\"Unknown datasets: {}\".format(name)) return __factory[name](*args, **kwargs)", "you may not use this file except in compliance with", "# Copyright 2019 Xilinx Inc. # # Licensed under the", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "language governing permissions and # limitations under the License. from", "os import path as osp from .market1501 import Market1501 __factory", "init_dataset(name, *args, **kwargs): if name not in __factory.keys(): raise KeyError(\"Unknown", "= { 'market1501': Market1501 } def get_names(): return list(__factory.keys()) def", "use this file except in compliance with the License. #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "CONDITIONS OF ANY KIND, either express or implied. # See", "License. from __future__ import print_function, absolute_import import glob import re", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "governing permissions and # limitations under the License. from __future__", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "the License. from __future__ import print_function, absolute_import import glob import", "# You may obtain a copy of the License at", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "{ 'market1501': Market1501 } def get_names(): return list(__factory.keys()) def init_dataset(name,", "def get_names(): return list(__factory.keys()) def init_dataset(name, *args, **kwargs): if name", "under the License is distributed on an \"AS IS\" BASIS,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "} def get_names(): return list(__factory.keys()) def init_dataset(name, *args, **kwargs): if", "License for the specific language governing permissions and # limitations", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "Xilinx Inc. # # Licensed under the Apache License, Version", "the License for the specific language governing permissions and #", "(the \"License\"); # you may not use this file except", "Apache License, Version 2.0 (the \"License\"); # you may not", "# you may not use this file except in compliance", "from __future__ import print_function, absolute_import import glob import re from", "as osp from .market1501 import Market1501 __factory = { 'market1501':", "either express or implied. # See the License for the", "under the License. from __future__ import print_function, absolute_import import glob", "OR CONDITIONS OF ANY KIND, either express or implied. #", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "and # limitations under the License. from __future__ import print_function,", "from os import path as osp from .market1501 import Market1501", "the License is distributed on an \"AS IS\" BASIS, #", "in compliance with the License. # You may obtain a", "permissions and # limitations under the License. from __future__ import", "software # distributed under the License is distributed on an", ".market1501 import Market1501 __factory = { 'market1501': Market1501 } def", "from .market1501 import Market1501 __factory = { 'market1501': Market1501 }", "# # Unless required by applicable law or agreed to", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "__factory = { 'market1501': Market1501 } def get_names(): return list(__factory.keys())", "absolute_import import glob import re from os import path as", "Version 2.0 (the \"License\"); # you may not use this", "Market1501 __factory = { 'market1501': Market1501 } def get_names(): return", "law or agreed to in writing, software # distributed under", "# limitations under the License. from __future__ import print_function, absolute_import", "glob import re from os import path as osp from", "implied. # See the License for the specific language governing", "osp from .market1501 import Market1501 __factory = { 'market1501': Market1501", "under the Apache License, Version 2.0 (the \"License\"); # you", "\"License\"); # you may not use this file except in", "import glob import re from os import path as osp", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "import re from os import path as osp from .market1501", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "print_function, absolute_import import glob import re from os import path", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "to in writing, software # distributed under the License is", "2019 Xilinx Inc. # # Licensed under the Apache License,", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "Inc. # # Licensed under the Apache License, Version 2.0", "# See the License for the specific language governing permissions", "**kwargs): if name not in __factory.keys(): raise KeyError(\"Unknown datasets: {}\".format(name))", "Copyright 2019 Xilinx Inc. # # Licensed under the Apache", "You may obtain a copy of the License at #", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "limitations under the License. from __future__ import print_function, absolute_import import", "import print_function, absolute_import import glob import re from os import", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "__future__ import print_function, absolute_import import glob import re from os", "import path as osp from .market1501 import Market1501 __factory =", "with the License. # You may obtain a copy of", "this file except in compliance with the License. # You", "the Apache License, Version 2.0 (the \"License\"); # you may", "'market1501': Market1501 } def get_names(): return list(__factory.keys()) def init_dataset(name, *args,", "path as osp from .market1501 import Market1501 __factory = {", "if name not in __factory.keys(): raise KeyError(\"Unknown datasets: {}\".format(name)) return" ]
[ "Decrease in Profits: {minmonth} (${minpl})\\\")\\n\" ] }, { \"cell_type\": \"code\",", "\"print(f\\\"Total Months: {n_months}\\\")\\n\", \"print(f\\\"Total: ${round(pl_total,2)}\\\")\\n\", \"print(f\\\"Average Change: ${round(avg_pl_change,2)}\\\")\\n\", \"print(f\\\"Greatest Increase", "for max and min PL changes\\n\", \"max_i = pl_changes.index(maxpl) +1", "len(pl_changes)\\n\", \"maxpl = max(pl_changes)\\n\", \"minpl = min(pl_changes)\\n\", \"#print(avg_pl_change, maxpl, minpl)\\n\",", "PL\\n\", \"pl_changes = [] #list of P&L Changes\\n\", \"n_months =", "Increase in Profits: {maxmonth} (${maxpl})\\\\n\\\")\\n\", \" output.write(f\\\"Greatest Decrease in Profits:", "\"print(\\\"-\\\"*69)\\n\", \"print(f\\\"Total Months: {n_months}\\\")\\n\", \"print(f\\\"Total: ${round(pl_total,2)}\\\")\\n\", \"print(f\\\"Average Change: ${round(avg_pl_change,2)}\\\")\\n\", \"print(f\\\"Greatest", "#adding +1 since the changes are calculated one row above\\n\",", "{ \"cell_type\": \"code\", \"execution_count\": 69, \"metadata\": {}, \"outputs\": [], \"source\":", "\"outputs\": [], \"source\": [ \"#variables for the script\\n\", \"months =", "\"#calculate the average PL Changes, max and min\\n\", \"avg_pl_change =", "sum(pl_changes) / len(pl_changes)\\n\", \"maxpl = max(pl_changes)\\n\", \"minpl = min(pl_changes)\\n\", \"#print(avg_pl_change,", "in Profits: {maxmonth} (${maxpl})\\\\n\\\")\\n\", \" output.write(f\\\"Greatest Decrease in Profits: {minmonth}", "65, \"metadata\": {}, \"outputs\": [], \"source\": [ \"#variables for the", "\"outputs\": [], \"source\": [ \"# write summary to txt file\\n\",", "\"metadata\": { \"kernelspec\": { \"display_name\": \"Python 3\", \"language\": \"python\", \"name\":", "in Profits: Sep-2013 ($-2196167)\\n\" ] } ], \"source\": [ \"#find", "\"Greatest Decrease in Profits: Sep-2013 ($-2196167)\\n\" ] } ], \"source\":", "\"output_type\": \"stream\", \"text\": [ \"Financial Analysis\\n\", \"---------------------------------------------------------------------\\n\", \"Total Months: 86\\n\",", "months\\n\", \"pl_total = 0 #total of P&L\\n\", \"plc = 0", "in Profits: {maxmonth} (${maxpl})\\\")\\n\", \"print(f\\\"Greatest Decrease in Profits: {minmonth} (${minpl})\\\")\\n\"", "len(pl)):\\n\", \" pl_changes.append(int(pl[i]) - plc)\\n\", \" plc = int(pl[i])\\n\", \"", "\"source\": [ \"# Import libraries\\n\", \"import os, csv\" ] },", "\"metadata\": {}, \"outputs\": [], \"source\": [ \"# write summary to", "\"execution_count\": 65, \"metadata\": {}, \"outputs\": [], \"source\": [ \"#variables for", "1\\n\", \"#print(pl_changes)\" ] }, { \"cell_type\": \"code\", \"execution_count\": 67, \"metadata\":", "and min\\n\", \"avg_pl_change = sum(pl_changes) / len(pl_changes)\\n\", \"maxpl = max(pl_changes)\\n\",", "\"#print(len(pl_changes))\" ] }, { \"cell_type\": \"code\", \"execution_count\": 68, \"metadata\": {},", "\" output.write(f\\\"Greatest Decrease in Profits: {minmonth} (${minpl})\\\\n\\\")\" ] } ],", "0 #average of changes in PL\\n\", \"maxpl = 0 #maximum", "Profits: {maxmonth} (${maxpl})\\\")\\n\", \"print(f\\\"Greatest Decrease in Profits: {minmonth} (${minpl})\\\")\\n\" ]", "+= 1\\n\", \"#print(pl_changes)\" ] }, { \"cell_type\": \"code\", \"execution_count\": 67,", "max(pl_changes)\\n\", \"minpl = min(pl_changes)\\n\", \"#print(avg_pl_change, maxpl, minpl)\\n\", \"#print(pl_changes.index(maxpl))\\n\", \"#print(len(pl_changes))\" ]", "'w') as output:\\n\", \" output.write(\\\"Financial Analysis\\\\n\\\")\\n\", \" output.write(\\\"-\\\"*69 + \\\"\\\\n\\\")\\n\",", "Profits: Sep-2013 ($-2196167)\\n\" ] } ], \"source\": [ \"#find dates", "PL changes\\n\", \"max_i = pl_changes.index(maxpl) +1 #adding +1 since the", "\"Average Change: $-2315.12\\n\", \"Greatest Increase in Profits: Feb-2012 ($1926159)\\n\", \"Greatest", "#index for max pl\\n\", \"min_i = 0 #index for min", "as csv_file:\\n\", \" csv_reader = csv.reader(csv_file,delimiter=\\\",\\\")\\n\", \" header = next(csv_reader)\\n\",", "{ \"cell_type\": \"code\", \"execution_count\": 66, \"metadata\": {}, \"outputs\": [], \"source\":", "Changes\\n\", \"n_months = 0 #count of months\\n\", \"pl_total = 0", "\"pl_changes = [] \\n\", \"plc = int(pl[0])\\n\", \"for i in", "\"language_info\": { \"codemirror_mode\": { \"name\": \"ipython\", \"version\": 3 }, \"file_extension\":", "use \\\"\\\\n\\\" to create a new line\\n\", \"with open(output, 'w')", "\"code\", \"execution_count\": 65, \"metadata\": {}, \"outputs\": [], \"source\": [ \"#variables", "[ { \"name\": \"stdout\", \"output_type\": \"stream\", \"text\": [ \"Financial Analysis\\n\",", "\"metadata\": {}, \"outputs\": [ { \"name\": \"stdout\", \"output_type\": \"stream\", \"text\":", "the average PL Changes, max and min\\n\", \"avg_pl_change = sum(pl_changes)", "changes are calculated one row above\\n\", \"min_i = pl_changes.index(minpl) +1\\n\",", "\"plc = 0 #variable to track PL changes\\n\", \"avg_pl_change =", "\"outputs\": [], \"source\": [ \"# Import libraries\\n\", \"import os, csv\"", "Change: $-2315.12\\n\", \"Greatest Increase in Profits: Feb-2012 ($1926159)\\n\", \"Greatest Decrease", "\"#print output to the terminal\\n\", \"\\n\", \"print(\\\"Financial Analysis\\\")\\n\", \"print(\\\"-\\\"*69)\\n\", \"print(f\\\"Total", "in profits\\n\", \"minpl = 0 #maximum decrease in losses\\n\", \"max_i", "months[min_i]\\n\", \"\\n\", \"#print output to the terminal\\n\", \"\\n\", \"print(\\\"Financial Analysis\\\")\\n\",", "[] \\n\", \"plc = int(pl[0])\\n\", \"for i in range(1, len(pl)):\\n\",", "\"minmonth = months[min_i]\\n\", \"\\n\", \"#print output to the terminal\\n\", \"\\n\",", "] } ], \"metadata\": { \"kernelspec\": { \"display_name\": \"Python 3\",", "\"maxpl = 0 #maximum increase in profits\\n\", \"minpl = 0", "+ \\\"\\\\n\\\")\\n\", \" output.write(f\\\"Total Months: {n_months}\\\\n\\\")\\n\", \" output.write(f\\\"Total: ${round(pl_total,2)}\\\\n\\\")\\n\", \"", "csv_reader:\\n\", \" n_months += 1\\n\", \" pl_total += int(row[1])\\n\", \"", "the script\\n\", \"months = [] #list of months\\n\", \"pl =[]", "to update the counters and lists\\n\", \" for row in", "\"\\n\", \"#print output to the terminal\\n\", \"\\n\", \"print(\\\"Financial Analysis\\\")\\n\", \"print(\\\"-\\\"*69)\\n\",", "- plc)\\n\", \" plc = int(pl[i])\\n\", \" i += 1\\n\",", "file\\n\", \"output = os.path.join(\\\".\\\",\\\"Analysis\\\", \\\"summary.txt\\\")\\n\", \"\\n\", \"# use \\\"\\\\n\\\" to", "[], \"source\": [ \"#calculate the average PL Changes, max and", "{}, \"outputs\": [ { \"name\": \"stdout\", \"output_type\": \"stream\", \"text\": [", "#count of months\\n\", \"pl_total = 0 #total of P&L\\n\", \"plc", "\\\"Resources\\\", \\\"budget_data.csv\\\") #set path\\n\", \"\\n\", \"\\n\", \"#read file\\n\", \"with open(bankcsv,", "{}, \"outputs\": [], \"source\": [ \"#variables for the script\\n\", \"months", "${round(pl_total,2)}\\\\n\\\")\\n\", \" output.write(f\\\"Average Change: ${round(avg_pl_change,2)}\\\\n\\\")\\n\", \" output.write(f\\\"Greatest Increase in Profits:", "= [] #list of P&L Changes\\n\", \"n_months = 0 #count", "}, { \"cell_type\": \"code\", \"execution_count\": 66, \"metadata\": {}, \"outputs\": [],", "\"text\": [ \"Financial Analysis\\n\", \"---------------------------------------------------------------------\\n\", \"Total Months: 86\\n\", \"Total: $38382578\\n\",", "\" plc = int(pl[i])\\n\", \" i += 1\\n\", \"#print(pl_changes)\" ]", "values\\n\", \"pl_changes = [] \\n\", \"plc = int(pl[0])\\n\", \"for i", "+1\\n\", \"\\n\", \"maxmonth = months[max_i]\\n\", \"minmonth = months[min_i]\\n\", \"\\n\", \"#print", "plc)\\n\", \" plc = int(pl[i])\\n\", \" i += 1\\n\", \"#print(pl_changes)\"", "to the terminal\\n\", \"\\n\", \"print(\\\"Financial Analysis\\\")\\n\", \"print(\\\"-\\\"*69)\\n\", \"print(f\\\"Total Months: {n_months}\\\")\\n\",", "n_months += 1\\n\", \" pl_total += int(row[1])\\n\", \" pl.append(row[1])\\n\", \"", "PL Changes, max and min\\n\", \"avg_pl_change = sum(pl_changes) / len(pl_changes)\\n\",", "(${minpl})\\\")\\n\" ] }, { \"cell_type\": \"code\", \"execution_count\": 69, \"metadata\": {},", "= [] #list of months\\n\", \"pl =[] #list of monthly", "Profits: {minmonth} (${minpl})\\\\n\\\")\" ] } ], \"metadata\": { \"kernelspec\": {", "the counters and lists\\n\", \" for row in csv_reader:\\n\", \"", "{maxmonth} (${maxpl})\\\")\\n\", \"print(f\\\"Greatest Decrease in Profits: {minmonth} (${minpl})\\\")\\n\" ] },", "of P&L\\n\", \"plc = 0 #variable to track PL changes\\n\",", "\"name\": \"stdout\", \"output_type\": \"stream\", \"text\": [ \"Financial Analysis\\n\", \"---------------------------------------------------------------------\\n\", \"Total", "output.write(f\\\"Greatest Decrease in Profits: {minmonth} (${minpl})\\\\n\\\")\" ] } ], \"metadata\":", "update the counters and lists\\n\", \" for row in csv_reader:\\n\",", "\"execution_count\": 68, \"metadata\": {}, \"outputs\": [ { \"name\": \"stdout\", \"output_type\":", "\"execution_count\": 69, \"metadata\": {}, \"outputs\": [], \"source\": [ \"# write", "\"min_i = pl_changes.index(minpl) +1\\n\", \"\\n\", \"maxmonth = months[max_i]\\n\", \"minmonth =", "68, \"metadata\": {}, \"outputs\": [ { \"name\": \"stdout\", \"output_type\": \"stream\",", "= 0 #variable to track PL changes\\n\", \"avg_pl_change = 0", "\"plc = int(pl[0])\\n\", \"for i in range(1, len(pl)):\\n\", \" pl_changes.append(int(pl[i])", "Decrease in Profits: {minmonth} (${minpl})\\\\n\\\")\" ] } ], \"metadata\": {", "resource file\\n\", \"bankcsv = os.path.join(\\\".\\\", \\\"Resources\\\", \\\"budget_data.csv\\\") #set path\\n\", \"\\n\",", "{minmonth} (${minpl})\\\\n\\\")\" ] } ], \"metadata\": { \"kernelspec\": { \"display_name\":", "\"outputs\": [ { \"name\": \"stdout\", \"output_type\": \"stream\", \"text\": [ \"Financial", "in csv_reader:\\n\", \" n_months += 1\\n\", \" pl_total += int(row[1])\\n\",", "min PL changes\\n\", \"max_i = pl_changes.index(maxpl) +1 #adding +1 since", "write summary to txt file\\n\", \"output = os.path.join(\\\".\\\",\\\"Analysis\\\", \\\"summary.txt\\\")\\n\", \"\\n\",", "\"\\n\", \"maxmonth = months[max_i]\\n\", \"minmonth = months[min_i]\\n\", \"\\n\", \"#print output", "open(output, 'w') as output:\\n\", \" output.write(\\\"Financial Analysis\\\\n\\\")\\n\", \" output.write(\\\"-\\\"*69 +", "64, \"metadata\": {}, \"outputs\": [], \"source\": [ \"# Import libraries\\n\",", "'r') as csv_file:\\n\", \" csv_reader = csv.reader(csv_file,delimiter=\\\",\\\")\\n\", \" header =", "\"metadata\": {}, \"outputs\": [], \"source\": [ \"#variables for the script\\n\",", "for max pl\\n\", \"min_i = 0 #index for min pl\\n\",", "#for loop to update the counters and lists\\n\", \" for", "int(pl[i])\\n\", \" i += 1\\n\", \"#print(pl_changes)\" ] }, { \"cell_type\":", "\"python\", \"name\": \"python3\" }, \"language_info\": { \"codemirror_mode\": { \"name\": \"ipython\",", "of months\\n\", \"pl =[] #list of monthly PL\\n\", \"pl_changes =", "\"code\", \"execution_count\": 67, \"metadata\": {}, \"outputs\": [], \"source\": [ \"#calculate", "header = next(csv_reader)\\n\", \" \\n\", \" #for loop to update", "}, { \"cell_type\": \"code\", \"execution_count\": 65, \"metadata\": {}, \"outputs\": [],", "\"minpl = 0 #maximum decrease in losses\\n\", \"max_i = 0", "output.write(\\\"Financial Analysis\\\\n\\\")\\n\", \" output.write(\\\"-\\\"*69 + \\\"\\\\n\\\")\\n\", \" output.write(f\\\"Total Months: {n_months}\\\\n\\\")\\n\",", "= os.path.join(\\\".\\\",\\\"Analysis\\\", \\\"summary.txt\\\")\\n\", \"\\n\", \"# use \\\"\\\\n\\\" to create a", "\"code\", \"execution_count\": 68, \"metadata\": {}, \"outputs\": [ { \"name\": \"stdout\",", "\"minpl = min(pl_changes)\\n\", \"#print(avg_pl_change, maxpl, minpl)\\n\", \"#print(pl_changes.index(maxpl))\\n\", \"#print(len(pl_changes))\" ] },", "= 0 #maximum increase in profits\\n\", \"minpl = 0 #maximum", "output to the terminal\\n\", \"\\n\", \"print(\\\"Financial Analysis\\\")\\n\", \"print(\\\"-\\\"*69)\\n\", \"print(f\\\"Total Months:", "to track the PL change values\\n\", \"pl_changes = [] \\n\",", "\"source\": [ \"#calculate the average PL Changes, max and min\\n\",", "lists\\n\", \" for row in csv_reader:\\n\", \" n_months += 1\\n\",", "\" output.write(f\\\"Total: ${round(pl_total,2)}\\\\n\\\")\\n\", \" output.write(f\\\"Average Change: ${round(avg_pl_change,2)}\\\\n\\\")\\n\", \" output.write(f\\\"Greatest Increase", "\"display_name\": \"Python 3\", \"language\": \"python\", \"name\": \"python3\" }, \"language_info\": {", "pl\\n\", \"\\n\", \"#read the resource file\\n\", \"bankcsv = os.path.join(\\\".\\\", \\\"Resources\\\",", "track PL changes\\n\", \"avg_pl_change = 0 #average of changes in", "= int(pl[i])\\n\", \" i += 1\\n\", \"#print(pl_changes)\" ] }, {", "[], \"source\": [ \"# loop to track the PL change", "maxpl, minpl)\\n\", \"#print(pl_changes.index(maxpl))\\n\", \"#print(len(pl_changes))\" ] }, { \"cell_type\": \"code\", \"execution_count\":", "plc = int(pl[i])\\n\", \" i += 1\\n\", \"#print(pl_changes)\" ] },", "\"metadata\": {}, \"outputs\": [], \"source\": [ \"#calculate the average PL", "0 #variable to track PL changes\\n\", \"avg_pl_change = 0 #average", "one row above\\n\", \"min_i = pl_changes.index(minpl) +1\\n\", \"\\n\", \"maxmonth =", "\"source\": [ \"# write summary to txt file\\n\", \"output =", "\"execution_count\": 64, \"metadata\": {}, \"outputs\": [], \"source\": [ \"# Import", "a new line\\n\", \"with open(output, 'w') as output:\\n\", \" output.write(\\\"Financial", "\"Financial Analysis\\n\", \"---------------------------------------------------------------------\\n\", \"Total Months: 86\\n\", \"Total: $38382578\\n\", \"Average Change:", "\"cell_type\": \"code\", \"execution_count\": 67, \"metadata\": {}, \"outputs\": [], \"source\": [", "\"text/x-python\", \"name\": \"python\", \"nbconvert_exporter\": \"python\", \"pygments_lexer\": \"ipython3\", \"version\": \"3.8.5\" }", "\"Total: $38382578\\n\", \"Average Change: $-2315.12\\n\", \"Greatest Increase in Profits: Feb-2012", "3\", \"language\": \"python\", \"name\": \"python3\" }, \"language_info\": { \"codemirror_mode\": {", "changes in PL\\n\", \"maxpl = 0 #maximum increase in profits\\n\",", "($1926159)\\n\", \"Greatest Decrease in Profits: Sep-2013 ($-2196167)\\n\" ] } ],", "\"import os, csv\" ] }, { \"cell_type\": \"code\", \"execution_count\": 65,", "= next(csv_reader)\\n\", \" \\n\", \" #for loop to update the", "above\\n\", \"min_i = pl_changes.index(minpl) +1\\n\", \"\\n\", \"maxmonth = months[max_i]\\n\", \"minmonth", "= 0 #total of P&L\\n\", \"plc = 0 #variable to", "losses\\n\", \"max_i = 0 #index for max pl\\n\", \"min_i =", "\" header = next(csv_reader)\\n\", \" \\n\", \" #for loop to", "\" csv_reader = csv.reader(csv_file,delimiter=\\\",\\\")\\n\", \" header = next(csv_reader)\\n\", \" \\n\",", "\" n_months += 1\\n\", \" pl_total += int(row[1])\\n\", \" pl.append(row[1])\\n\",", "#total of P&L\\n\", \"plc = 0 #variable to track PL", "in PL\\n\", \"maxpl = 0 #maximum increase in profits\\n\", \"minpl", "[ \"# write summary to txt file\\n\", \"output = os.path.join(\\\".\\\",\\\"Analysis\\\",", "Profits: Feb-2012 ($1926159)\\n\", \"Greatest Decrease in Profits: Sep-2013 ($-2196167)\\n\" ]", "row above\\n\", \"min_i = pl_changes.index(minpl) +1\\n\", \"\\n\", \"maxmonth = months[max_i]\\n\",", "} ], \"source\": [ \"#find dates for max and min", "next(csv_reader)\\n\", \" \\n\", \" #for loop to update the counters", "66, \"metadata\": {}, \"outputs\": [], \"source\": [ \"# loop to", "the PL change values\\n\", \"pl_changes = [] \\n\", \"plc =", "0 #maximum decrease in losses\\n\", \"max_i = 0 #index for", "\"avg_pl_change = sum(pl_changes) / len(pl_changes)\\n\", \"maxpl = max(pl_changes)\\n\", \"minpl =", "profits\\n\", \"minpl = 0 #maximum decrease in losses\\n\", \"max_i =", "Months: 86\\n\", \"Total: $38382578\\n\", \"Average Change: $-2315.12\\n\", \"Greatest Increase in", "(${maxpl})\\\\n\\\")\\n\", \" output.write(f\\\"Greatest Decrease in Profits: {minmonth} (${minpl})\\\\n\\\")\" ] }", "\"stdout\", \"output_type\": \"stream\", \"text\": [ \"Financial Analysis\\n\", \"---------------------------------------------------------------------\\n\", \"Total Months:", "P&L\\n\", \"plc = 0 #variable to track PL changes\\n\", \"avg_pl_change", "os.path.join(\\\".\\\", \\\"Resources\\\", \\\"budget_data.csv\\\") #set path\\n\", \"\\n\", \"\\n\", \"#read file\\n\", \"with", "0 #count of months\\n\", \"pl_total = 0 #total of P&L\\n\",", "since the changes are calculated one row above\\n\", \"min_i =", "\"outputs\": [], \"source\": [ \"#calculate the average PL Changes, max", "\"source\": [ \"# loop to track the PL change values\\n\",", "Decrease in Profits: Sep-2013 ($-2196167)\\n\" ] } ], \"source\": [", "] }, { \"cell_type\": \"code\", \"execution_count\": 68, \"metadata\": {}, \"outputs\":", "path\\n\", \"\\n\", \"\\n\", \"#read file\\n\", \"with open(bankcsv, 'r') as csv_file:\\n\",", "\"name\": \"python3\" }, \"language_info\": { \"codemirror_mode\": { \"name\": \"ipython\", \"version\":", "{}, \"outputs\": [], \"source\": [ \"#calculate the average PL Changes,", "\"---------------------------------------------------------------------\\n\", \"Total Months: 86\\n\", \"Total: $38382578\\n\", \"Average Change: $-2315.12\\n\", \"Greatest", "+= 1\\n\", \" pl_total += int(row[1])\\n\", \" pl.append(row[1])\\n\", \" months.append(row[0])\"", "{minmonth} (${minpl})\\\")\\n\" ] }, { \"cell_type\": \"code\", \"execution_count\": 69, \"metadata\":", "pl_changes.append(int(pl[i]) - plc)\\n\", \" plc = int(pl[i])\\n\", \" i +=", "\"n_months = 0 #count of months\\n\", \"pl_total = 0 #total", "Increase in Profits: {maxmonth} (${maxpl})\\\")\\n\", \"print(f\\\"Greatest Decrease in Profits: {minmonth}", "${round(avg_pl_change,2)}\\\")\\n\", \"print(f\\\"Greatest Increase in Profits: {maxmonth} (${maxpl})\\\")\\n\", \"print(f\\\"Greatest Decrease in", "min(pl_changes)\\n\", \"#print(avg_pl_change, maxpl, minpl)\\n\", \"#print(pl_changes.index(maxpl))\\n\", \"#print(len(pl_changes))\" ] }, { \"cell_type\":", "}, { \"cell_type\": \"code\", \"execution_count\": 69, \"metadata\": {}, \"outputs\": [],", "\"avg_pl_change = 0 #average of changes in PL\\n\", \"maxpl =", "txt file\\n\", \"output = os.path.join(\\\".\\\",\\\"Analysis\\\", \\\"summary.txt\\\")\\n\", \"\\n\", \"# use \\\"\\\\n\\\"", "}, \"language_info\": { \"codemirror_mode\": { \"name\": \"ipython\", \"version\": 3 },", "the resource file\\n\", \"bankcsv = os.path.join(\\\".\\\", \\\"Resources\\\", \\\"budget_data.csv\\\") #set path\\n\",", "\"language\": \"python\", \"name\": \"python3\" }, \"language_info\": { \"codemirror_mode\": { \"name\":", "\"# write summary to txt file\\n\", \"output = os.path.join(\\\".\\\",\\\"Analysis\\\", \\\"summary.txt\\\")\\n\",", "\"months = [] #list of months\\n\", \"pl =[] #list of", "\"kernelspec\": { \"display_name\": \"Python 3\", \"language\": \"python\", \"name\": \"python3\" },", "{ \"display_name\": \"Python 3\", \"language\": \"python\", \"name\": \"python3\" }, \"language_info\":", "pl_changes.index(maxpl) +1 #adding +1 since the changes are calculated one", "\"name\": \"python\", \"nbconvert_exporter\": \"python\", \"pygments_lexer\": \"ipython3\", \"version\": \"3.8.5\" } },", "= os.path.join(\\\".\\\", \\\"Resources\\\", \\\"budget_data.csv\\\") #set path\\n\", \"\\n\", \"\\n\", \"#read file\\n\",", "Feb-2012 ($1926159)\\n\", \"Greatest Decrease in Profits: Sep-2013 ($-2196167)\\n\" ] }", "of months\\n\", \"pl_total = 0 #total of P&L\\n\", \"plc =", "= 0 #maximum decrease in losses\\n\", \"max_i = 0 #index", "csv\" ] }, { \"cell_type\": \"code\", \"execution_count\": 65, \"metadata\": {},", "monthly PL\\n\", \"pl_changes = [] #list of P&L Changes\\n\", \"n_months", "Months: {n_months}\\\\n\\\")\\n\", \" output.write(f\\\"Total: ${round(pl_total,2)}\\\\n\\\")\\n\", \" output.write(f\\\"Average Change: ${round(avg_pl_change,2)}\\\\n\\\")\\n\", \"", "output:\\n\", \" output.write(\\\"Financial Analysis\\\\n\\\")\\n\", \" output.write(\\\"-\\\"*69 + \\\"\\\\n\\\")\\n\", \" output.write(f\\\"Total", "= max(pl_changes)\\n\", \"minpl = min(pl_changes)\\n\", \"#print(avg_pl_change, maxpl, minpl)\\n\", \"#print(pl_changes.index(maxpl))\\n\", \"#print(len(pl_changes))\"", "} ], \"metadata\": { \"kernelspec\": { \"display_name\": \"Python 3\", \"language\":", "in Profits: {minmonth} (${minpl})\\\\n\\\")\" ] } ], \"metadata\": { \"kernelspec\":", "\"print(f\\\"Greatest Increase in Profits: {maxmonth} (${maxpl})\\\")\\n\", \"print(f\\\"Greatest Decrease in Profits:", "\"# loop to track the PL change values\\n\", \"pl_changes =", "\" i += 1\\n\", \"#print(pl_changes)\" ] }, { \"cell_type\": \"code\",", "3 }, \"file_extension\": \".py\", \"mimetype\": \"text/x-python\", \"name\": \"python\", \"nbconvert_exporter\": \"python\",", "#variable to track PL changes\\n\", \"avg_pl_change = 0 #average of", "[], \"source\": [ \"# Import libraries\\n\", \"import os, csv\" ]", "\"metadata\": {}, \"outputs\": [], \"source\": [ \"# Import libraries\\n\", \"import", "changes\\n\", \"max_i = pl_changes.index(maxpl) +1 #adding +1 since the changes", "range(1, len(pl)):\\n\", \" pl_changes.append(int(pl[i]) - plc)\\n\", \" plc = int(pl[i])\\n\",", "\"codemirror_mode\": { \"name\": \"ipython\", \"version\": 3 }, \"file_extension\": \".py\", \"mimetype\":", "= 0 #index for max pl\\n\", \"min_i = 0 #index", "\"cells\": [ { \"cell_type\": \"code\", \"execution_count\": 64, \"metadata\": {}, \"outputs\":", "PL\\n\", \"maxpl = 0 #maximum increase in profits\\n\", \"minpl =", "1\\n\", \" pl_total += int(row[1])\\n\", \" pl.append(row[1])\\n\", \" months.append(row[0])\" ]", "#set path\\n\", \"\\n\", \"\\n\", \"#read file\\n\", \"with open(bankcsv, 'r') as", "os, csv\" ] }, { \"cell_type\": \"code\", \"execution_count\": 65, \"metadata\":", "= sum(pl_changes) / len(pl_changes)\\n\", \"maxpl = max(pl_changes)\\n\", \"minpl = min(pl_changes)\\n\",", "\"pygments_lexer\": \"ipython3\", \"version\": \"3.8.5\" } }, \"nbformat\": 4, \"nbformat_minor\": 4", "\"python\", \"nbconvert_exporter\": \"python\", \"pygments_lexer\": \"ipython3\", \"version\": \"3.8.5\" } }, \"nbformat\":", "\"ipython\", \"version\": 3 }, \"file_extension\": \".py\", \"mimetype\": \"text/x-python\", \"name\": \"python\",", "for row in csv_reader:\\n\", \" n_months += 1\\n\", \" pl_total", "{n_months}\\\\n\\\")\\n\", \" output.write(f\\\"Total: ${round(pl_total,2)}\\\\n\\\")\\n\", \" output.write(f\\\"Average Change: ${round(avg_pl_change,2)}\\\\n\\\")\\n\", \" output.write(f\\\"Greatest", "loop to update the counters and lists\\n\", \" for row", "= pl_changes.index(minpl) +1\\n\", \"\\n\", \"maxmonth = months[max_i]\\n\", \"minmonth = months[min_i]\\n\",", "= [] \\n\", \"plc = int(pl[0])\\n\", \"for i in range(1,", "output.write(f\\\"Total Months: {n_months}\\\\n\\\")\\n\", \" output.write(f\\\"Total: ${round(pl_total,2)}\\\\n\\\")\\n\", \" output.write(f\\\"Average Change: ${round(avg_pl_change,2)}\\\\n\\\")\\n\",", "\"file_extension\": \".py\", \"mimetype\": \"text/x-python\", \"name\": \"python\", \"nbconvert_exporter\": \"python\", \"pygments_lexer\": \"ipython3\",", "[ \"# loop to track the PL change values\\n\", \"pl_changes", "\"pl_changes = [] #list of P&L Changes\\n\", \"n_months = 0", "\"maxpl = max(pl_changes)\\n\", \"minpl = min(pl_changes)\\n\", \"#print(avg_pl_change, maxpl, minpl)\\n\", \"#print(pl_changes.index(maxpl))\\n\",", "{}, \"outputs\": [], \"source\": [ \"# Import libraries\\n\", \"import os,", "to track PL changes\\n\", \"avg_pl_change = 0 #average of changes", "0 #maximum increase in profits\\n\", \"minpl = 0 #maximum decrease", "\"print(f\\\"Total: ${round(pl_total,2)}\\\")\\n\", \"print(f\\\"Average Change: ${round(avg_pl_change,2)}\\\")\\n\", \"print(f\\\"Greatest Increase in Profits: {maxmonth}", "\" \\n\", \" #for loop to update the counters and", "change values\\n\", \"pl_changes = [] \\n\", \"plc = int(pl[0])\\n\", \"for", "= months[max_i]\\n\", \"minmonth = months[min_i]\\n\", \"\\n\", \"#print output to the", "calculated one row above\\n\", \"min_i = pl_changes.index(minpl) +1\\n\", \"\\n\", \"maxmonth", "\"print(f\\\"Greatest Decrease in Profits: {minmonth} (${minpl})\\\")\\n\" ] }, { \"cell_type\":", "average PL Changes, max and min\\n\", \"avg_pl_change = sum(pl_changes) /", "= csv.reader(csv_file,delimiter=\\\",\\\")\\n\", \" header = next(csv_reader)\\n\", \" \\n\", \" #for", "{ \"cell_type\": \"code\", \"execution_count\": 68, \"metadata\": {}, \"outputs\": [ {", "decrease in losses\\n\", \"max_i = 0 #index for max pl\\n\",", "\"Python 3\", \"language\": \"python\", \"name\": \"python3\" }, \"language_info\": { \"codemirror_mode\":", "\"code\", \"execution_count\": 66, \"metadata\": {}, \"outputs\": [], \"source\": [ \"#", "[], \"source\": [ \"# write summary to txt file\\n\", \"output", "67, \"metadata\": {}, \"outputs\": [], \"source\": [ \"#calculate the average", "in Profits: {minmonth} (${minpl})\\\")\\n\" ] }, { \"cell_type\": \"code\", \"execution_count\":", "], \"source\": [ \"#find dates for max and min PL", "\\\"\\\\n\\\" to create a new line\\n\", \"with open(output, 'w') as", "min pl\\n\", \"\\n\", \"#read the resource file\\n\", \"bankcsv = os.path.join(\\\".\\\",", "P&L Changes\\n\", \"n_months = 0 #count of months\\n\", \"pl_total =", "\" output.write(f\\\"Greatest Increase in Profits: {maxmonth} (${maxpl})\\\\n\\\")\\n\", \" output.write(f\\\"Greatest Decrease", "+1 #adding +1 since the changes are calculated one row", "] } ], \"source\": [ \"#find dates for max and", "\"\\n\", \"#read file\\n\", \"with open(bankcsv, 'r') as csv_file:\\n\", \" csv_reader", "increase in profits\\n\", \"minpl = 0 #maximum decrease in losses\\n\",", "output.write(f\\\"Average Change: ${round(avg_pl_change,2)}\\\\n\\\")\\n\", \" output.write(f\\\"Greatest Increase in Profits: {maxmonth} (${maxpl})\\\\n\\\")\\n\",", "0 #index for max pl\\n\", \"min_i = 0 #index for", "counters and lists\\n\", \" for row in csv_reader:\\n\", \" n_months", "\"execution_count\": 67, \"metadata\": {}, \"outputs\": [], \"source\": [ \"#calculate the", "output.write(f\\\"Greatest Increase in Profits: {maxmonth} (${maxpl})\\\\n\\\")\\n\", \" output.write(f\\\"Greatest Decrease in", "\\\"\\\\n\\\")\\n\", \" output.write(f\\\"Total Months: {n_months}\\\\n\\\")\\n\", \" output.write(f\\\"Total: ${round(pl_total,2)}\\\\n\\\")\\n\", \" output.write(f\\\"Average", "${round(avg_pl_change,2)}\\\\n\\\")\\n\", \" output.write(f\\\"Greatest Increase in Profits: {maxmonth} (${maxpl})\\\\n\\\")\\n\", \" output.write(f\\\"Greatest", "\"maxmonth = months[max_i]\\n\", \"minmonth = months[min_i]\\n\", \"\\n\", \"#print output to", "+1 since the changes are calculated one row above\\n\", \"min_i", "{}, \"outputs\": [], \"source\": [ \"# loop to track the", "\"#find dates for max and min PL changes\\n\", \"max_i =", "dates for max and min PL changes\\n\", \"max_i = pl_changes.index(maxpl)", "\"with open(output, 'w') as output:\\n\", \" output.write(\\\"Financial Analysis\\\\n\\\")\\n\", \" output.write(\\\"-\\\"*69", "\"ipython3\", \"version\": \"3.8.5\" } }, \"nbformat\": 4, \"nbformat_minor\": 4 }", "\"\\n\", \"#read the resource file\\n\", \"bankcsv = os.path.join(\\\".\\\", \\\"Resources\\\", \\\"budget_data.csv\\\")", "as output:\\n\", \" output.write(\\\"Financial Analysis\\\\n\\\")\\n\", \" output.write(\\\"-\\\"*69 + \\\"\\\\n\\\")\\n\", \"", "loop to track the PL change values\\n\", \"pl_changes = []", "= 0 #count of months\\n\", \"pl_total = 0 #total of", "\"max_i = 0 #index for max pl\\n\", \"min_i = 0", "\"print(f\\\"Average Change: ${round(avg_pl_change,2)}\\\")\\n\", \"print(f\\\"Greatest Increase in Profits: {maxmonth} (${maxpl})\\\")\\n\", \"print(f\\\"Greatest", "\"name\": \"ipython\", \"version\": 3 }, \"file_extension\": \".py\", \"mimetype\": \"text/x-python\", \"name\":", "\"output = os.path.join(\\\".\\\",\\\"Analysis\\\", \\\"summary.txt\\\")\\n\", \"\\n\", \"# use \\\"\\\\n\\\" to create", "pl_total += int(row[1])\\n\", \" pl.append(row[1])\\n\", \" months.append(row[0])\" ] }, {", "[] #list of months\\n\", \"pl =[] #list of monthly PL\\n\",", "{ \"cell_type\": \"code\", \"execution_count\": 67, \"metadata\": {}, \"outputs\": [], \"source\":", "[ \"Financial Analysis\\n\", \"---------------------------------------------------------------------\\n\", \"Total Months: 86\\n\", \"Total: $38382578\\n\", \"Average", "{ \"name\": \"ipython\", \"version\": 3 }, \"file_extension\": \".py\", \"mimetype\": \"text/x-python\",", "the changes are calculated one row above\\n\", \"min_i = pl_changes.index(minpl)", "Profits: {minmonth} (${minpl})\\\")\\n\" ] }, { \"cell_type\": \"code\", \"execution_count\": 69,", "\".py\", \"mimetype\": \"text/x-python\", \"name\": \"python\", \"nbconvert_exporter\": \"python\", \"pygments_lexer\": \"ipython3\", \"version\":", "= months[min_i]\\n\", \"\\n\", \"#print output to the terminal\\n\", \"\\n\", \"print(\\\"Financial", "\"cell_type\": \"code\", \"execution_count\": 65, \"metadata\": {}, \"outputs\": [], \"source\": [", "\"#print(avg_pl_change, maxpl, minpl)\\n\", \"#print(pl_changes.index(maxpl))\\n\", \"#print(len(pl_changes))\" ] }, { \"cell_type\": \"code\",", "{ \"name\": \"stdout\", \"output_type\": \"stream\", \"text\": [ \"Financial Analysis\\n\", \"---------------------------------------------------------------------\\n\",", "#maximum decrease in losses\\n\", \"max_i = 0 #index for max", "\"max_i = pl_changes.index(maxpl) +1 #adding +1 since the changes are", "minpl)\\n\", \"#print(pl_changes.index(maxpl))\\n\", \"#print(len(pl_changes))\" ] }, { \"cell_type\": \"code\", \"execution_count\": 68,", "output.write(f\\\"Total: ${round(pl_total,2)}\\\\n\\\")\\n\", \" output.write(f\\\"Average Change: ${round(avg_pl_change,2)}\\\\n\\\")\\n\", \" output.write(f\\\"Greatest Increase in", "\\n\", \"plc = int(pl[0])\\n\", \"for i in range(1, len(pl)):\\n\", \"", "create a new line\\n\", \"with open(output, 'w') as output:\\n\", \"", "{ \"cell_type\": \"code\", \"execution_count\": 64, \"metadata\": {}, \"outputs\": [], \"source\":", "months.append(row[0])\" ] }, { \"cell_type\": \"code\", \"execution_count\": 66, \"metadata\": {},", "Changes, max and min\\n\", \"avg_pl_change = sum(pl_changes) / len(pl_changes)\\n\", \"maxpl", "\"cell_type\": \"code\", \"execution_count\": 64, \"metadata\": {}, \"outputs\": [], \"source\": [", "#list of P&L Changes\\n\", \"n_months = 0 #count of months\\n\",", "libraries\\n\", \"import os, csv\" ] }, { \"cell_type\": \"code\", \"execution_count\":", "/ len(pl_changes)\\n\", \"maxpl = max(pl_changes)\\n\", \"minpl = min(pl_changes)\\n\", \"#print(avg_pl_change, maxpl,", "int(row[1])\\n\", \" pl.append(row[1])\\n\", \" months.append(row[0])\" ] }, { \"cell_type\": \"code\",", "max and min PL changes\\n\", \"max_i = pl_changes.index(maxpl) +1 #adding", "=[] #list of monthly PL\\n\", \"pl_changes = [] #list of", "Profits: {maxmonth} (${maxpl})\\\\n\\\")\\n\", \" output.write(f\\\"Greatest Decrease in Profits: {minmonth} (${minpl})\\\\n\\\")\"", "and lists\\n\", \" for row in csv_reader:\\n\", \" n_months +=", "\"stream\", \"text\": [ \"Financial Analysis\\n\", \"---------------------------------------------------------------------\\n\", \"Total Months: 86\\n\", \"Total:", "\"Total Months: 86\\n\", \"Total: $38382578\\n\", \"Average Change: $-2315.12\\n\", \"Greatest Increase", "i += 1\\n\", \"#print(pl_changes)\" ] }, { \"cell_type\": \"code\", \"execution_count\":", "max pl\\n\", \"min_i = 0 #index for min pl\\n\", \"\\n\",", "}, { \"cell_type\": \"code\", \"execution_count\": 68, \"metadata\": {}, \"outputs\": [", "86\\n\", \"Total: $38382578\\n\", \"Average Change: $-2315.12\\n\", \"Greatest Increase in Profits:", "pl.append(row[1])\\n\", \" months.append(row[0])\" ] }, { \"cell_type\": \"code\", \"execution_count\": 66,", "of monthly PL\\n\", \"pl_changes = [] #list of P&L Changes\\n\",", "{}, \"outputs\": [], \"source\": [ \"# write summary to txt", "to create a new line\\n\", \"with open(output, 'w') as output:\\n\",", "[ \"# Import libraries\\n\", \"import os, csv\" ] }, {", "{ \"cell_type\": \"code\", \"execution_count\": 65, \"metadata\": {}, \"outputs\": [], \"source\":", "\"#print(pl_changes.index(maxpl))\\n\", \"#print(len(pl_changes))\" ] }, { \"cell_type\": \"code\", \"execution_count\": 68, \"metadata\":", "\"\\n\", \"# use \\\"\\\\n\\\" to create a new line\\n\", \"with", "}, { \"cell_type\": \"code\", \"execution_count\": 67, \"metadata\": {}, \"outputs\": [],", "#index for min pl\\n\", \"\\n\", \"#read the resource file\\n\", \"bankcsv", "#list of months\\n\", \"pl =[] #list of monthly PL\\n\", \"pl_changes", "[] #list of P&L Changes\\n\", \"n_months = 0 #count of", "], \"metadata\": { \"kernelspec\": { \"display_name\": \"Python 3\", \"language\": \"python\",", "pl\\n\", \"min_i = 0 #index for min pl\\n\", \"\\n\", \"#read", "= 0 #average of changes in PL\\n\", \"maxpl = 0", "\"for i in range(1, len(pl)):\\n\", \" pl_changes.append(int(pl[i]) - plc)\\n\", \"", "i in range(1, len(pl)):\\n\", \" pl_changes.append(int(pl[i]) - plc)\\n\", \" plc", "Analysis\\\")\\n\", \"print(\\\"-\\\"*69)\\n\", \"print(f\\\"Total Months: {n_months}\\\")\\n\", \"print(f\\\"Total: ${round(pl_total,2)}\\\")\\n\", \"print(f\\\"Average Change: ${round(avg_pl_change,2)}\\\")\\n\",", "\"python3\" }, \"language_info\": { \"codemirror_mode\": { \"name\": \"ipython\", \"version\": 3", "\" pl.append(row[1])\\n\", \" months.append(row[0])\" ] }, { \"cell_type\": \"code\", \"execution_count\":", "0 #total of P&L\\n\", \"plc = 0 #variable to track", "69, \"metadata\": {}, \"outputs\": [], \"source\": [ \"# write summary", "\\n\", \" #for loop to update the counters and lists\\n\",", "[], \"source\": [ \"#variables for the script\\n\", \"months = []", "track the PL change values\\n\", \"pl_changes = [] \\n\", \"plc", "[ \"#variables for the script\\n\", \"months = [] #list of", "csv.reader(csv_file,delimiter=\\\",\\\")\\n\", \" header = next(csv_reader)\\n\", \" \\n\", \" #for loop", "for min pl\\n\", \"\\n\", \"#read the resource file\\n\", \"bankcsv =", "#average of changes in PL\\n\", \"maxpl = 0 #maximum increase", "\" pl_changes.append(int(pl[i]) - plc)\\n\", \" plc = int(pl[i])\\n\", \" i", "int(pl[0])\\n\", \"for i in range(1, len(pl)):\\n\", \" pl_changes.append(int(pl[i]) - plc)\\n\",", "{ \"kernelspec\": { \"display_name\": \"Python 3\", \"language\": \"python\", \"name\": \"python3\"", "\"cell_type\": \"code\", \"execution_count\": 69, \"metadata\": {}, \"outputs\": [], \"source\": [", "Change: ${round(avg_pl_change,2)}\\\")\\n\", \"print(f\\\"Greatest Increase in Profits: {maxmonth} (${maxpl})\\\")\\n\", \"print(f\\\"Greatest Decrease", "of P&L Changes\\n\", \"n_months = 0 #count of months\\n\", \"pl_total", "to txt file\\n\", \"output = os.path.join(\\\".\\\",\\\"Analysis\\\", \\\"summary.txt\\\")\\n\", \"\\n\", \"# use", "\"mimetype\": \"text/x-python\", \"name\": \"python\", \"nbconvert_exporter\": \"python\", \"pygments_lexer\": \"ipython3\", \"version\": \"3.8.5\"", "] }, { \"cell_type\": \"code\", \"execution_count\": 65, \"metadata\": {}, \"outputs\":", "output.write(\\\"-\\\"*69 + \\\"\\\\n\\\")\\n\", \" output.write(f\\\"Total Months: {n_months}\\\\n\\\")\\n\", \" output.write(f\\\"Total: ${round(pl_total,2)}\\\\n\\\")\\n\",", "\" for row in csv_reader:\\n\", \" n_months += 1\\n\", \"", "\"pl_total = 0 #total of P&L\\n\", \"plc = 0 #variable", "${round(pl_total,2)}\\\")\\n\", \"print(f\\\"Average Change: ${round(avg_pl_change,2)}\\\")\\n\", \"print(f\\\"Greatest Increase in Profits: {maxmonth} (${maxpl})\\\")\\n\",", "\"execution_count\": 66, \"metadata\": {}, \"outputs\": [], \"source\": [ \"# loop", "{n_months}\\\")\\n\", \"print(f\\\"Total: ${round(pl_total,2)}\\\")\\n\", \"print(f\\\"Average Change: ${round(avg_pl_change,2)}\\\")\\n\", \"print(f\\\"Greatest Increase in Profits:", "terminal\\n\", \"\\n\", \"print(\\\"Financial Analysis\\\")\\n\", \"print(\\\"-\\\"*69)\\n\", \"print(f\\\"Total Months: {n_months}\\\")\\n\", \"print(f\\\"Total: ${round(pl_total,2)}\\\")\\n\",", "\"# Import libraries\\n\", \"import os, csv\" ] }, { \"cell_type\":", "\"\\n\", \"print(\\\"Financial Analysis\\\")\\n\", \"print(\\\"-\\\"*69)\\n\", \"print(f\\\"Total Months: {n_months}\\\")\\n\", \"print(f\\\"Total: ${round(pl_total,2)}\\\")\\n\", \"print(f\\\"Average", "= int(pl[0])\\n\", \"for i in range(1, len(pl)):\\n\", \" pl_changes.append(int(pl[i]) -", "file\\n\", \"bankcsv = os.path.join(\\\".\\\", \\\"Resources\\\", \\\"budget_data.csv\\\") #set path\\n\", \"\\n\", \"\\n\",", "Increase in Profits: Feb-2012 ($1926159)\\n\", \"Greatest Decrease in Profits: Sep-2013", "months[max_i]\\n\", \"minmonth = months[min_i]\\n\", \"\\n\", \"#print output to the terminal\\n\",", "the terminal\\n\", \"\\n\", \"print(\\\"Financial Analysis\\\")\\n\", \"print(\\\"-\\\"*69)\\n\", \"print(f\\\"Total Months: {n_months}\\\")\\n\", \"print(f\\\"Total:", "\"#read file\\n\", \"with open(bankcsv, 'r') as csv_file:\\n\", \" csv_reader =", "pl_changes.index(minpl) +1\\n\", \"\\n\", \"maxmonth = months[max_i]\\n\", \"minmonth = months[min_i]\\n\", \"\\n\",", "months\\n\", \"pl =[] #list of monthly PL\\n\", \"pl_changes = []", "\\\"budget_data.csv\\\") #set path\\n\", \"\\n\", \"\\n\", \"#read file\\n\", \"with open(bankcsv, 'r')", "\"min_i = 0 #index for min pl\\n\", \"\\n\", \"#read the", "summary to txt file\\n\", \"output = os.path.join(\\\".\\\",\\\"Analysis\\\", \\\"summary.txt\\\")\\n\", \"\\n\", \"#", "script\\n\", \"months = [] #list of months\\n\", \"pl =[] #list", "\"#variables for the script\\n\", \"months = [] #list of months\\n\",", "PL changes\\n\", \"avg_pl_change = 0 #average of changes in PL\\n\",", "new line\\n\", \"with open(output, 'w') as output:\\n\", \" output.write(\\\"Financial Analysis\\\\n\\\")\\n\",", "\" output.write(\\\"Financial Analysis\\\\n\\\")\\n\", \" output.write(\\\"-\\\"*69 + \\\"\\\\n\\\")\\n\", \" output.write(f\\\"Total Months:", "and min PL changes\\n\", \"max_i = pl_changes.index(maxpl) +1 #adding +1", "[ { \"cell_type\": \"code\", \"execution_count\": 64, \"metadata\": {}, \"outputs\": [],", "$-2315.12\\n\", \"Greatest Increase in Profits: Feb-2012 ($1926159)\\n\", \"Greatest Decrease in", "(${maxpl})\\\")\\n\", \"print(f\\\"Greatest Decrease in Profits: {minmonth} (${minpl})\\\")\\n\" ] }, {", "Analysis\\n\", \"---------------------------------------------------------------------\\n\", \"Total Months: 86\\n\", \"Total: $38382578\\n\", \"Average Change: $-2315.12\\n\",", "\"# use \\\"\\\\n\\\" to create a new line\\n\", \"with open(output,", "in losses\\n\", \"max_i = 0 #index for max pl\\n\", \"min_i", "\" months.append(row[0])\" ] }, { \"cell_type\": \"code\", \"execution_count\": 66, \"metadata\":", "\"metadata\": {}, \"outputs\": [], \"source\": [ \"# loop to track", "PL change values\\n\", \"pl_changes = [] \\n\", \"plc = int(pl[0])\\n\",", "for the script\\n\", \"months = [] #list of months\\n\", \"pl", "\"version\": 3 }, \"file_extension\": \".py\", \"mimetype\": \"text/x-python\", \"name\": \"python\", \"nbconvert_exporter\":", "\" output.write(f\\\"Total Months: {n_months}\\\\n\\\")\\n\", \" output.write(f\\\"Total: ${round(pl_total,2)}\\\\n\\\")\\n\", \" output.write(f\\\"Average Change:", "+= int(row[1])\\n\", \" pl.append(row[1])\\n\", \" months.append(row[0])\" ] }, { \"cell_type\":", "\" output.write(f\\\"Average Change: ${round(avg_pl_change,2)}\\\\n\\\")\\n\", \" output.write(f\\\"Greatest Increase in Profits: {maxmonth}", "= 0 #index for min pl\\n\", \"\\n\", \"#read the resource", "\"bankcsv = os.path.join(\\\".\\\", \\\"Resources\\\", \\\"budget_data.csv\\\") #set path\\n\", \"\\n\", \"\\n\", \"#read", "line\\n\", \"with open(output, 'w') as output:\\n\", \" output.write(\\\"Financial Analysis\\\\n\\\")\\n\", \"", "\" #for loop to update the counters and lists\\n\", \"", "Analysis\\\\n\\\")\\n\", \" output.write(\\\"-\\\"*69 + \\\"\\\\n\\\")\\n\", \" output.write(f\\\"Total Months: {n_months}\\\\n\\\")\\n\", \"", "\" pl_total += int(row[1])\\n\", \" pl.append(row[1])\\n\", \" months.append(row[0])\" ] },", "\"cell_type\": \"code\", \"execution_count\": 66, \"metadata\": {}, \"outputs\": [], \"source\": [", "\"python\", \"pygments_lexer\": \"ipython3\", \"version\": \"3.8.5\" } }, \"nbformat\": 4, \"nbformat_minor\":", "in Profits: Feb-2012 ($1926159)\\n\", \"Greatest Decrease in Profits: Sep-2013 ($-2196167)\\n\"", "open(bankcsv, 'r') as csv_file:\\n\", \" csv_reader = csv.reader(csv_file,delimiter=\\\",\\\")\\n\", \" header", "\"\\n\", \"\\n\", \"#read file\\n\", \"with open(bankcsv, 'r') as csv_file:\\n\", \"", "\"pl =[] #list of monthly PL\\n\", \"pl_changes = [] #list", "\"with open(bankcsv, 'r') as csv_file:\\n\", \" csv_reader = csv.reader(csv_file,delimiter=\\\",\\\")\\n\", \"", "] }, { \"cell_type\": \"code\", \"execution_count\": 66, \"metadata\": {}, \"outputs\":", "[ \"#calculate the average PL Changes, max and min\\n\", \"avg_pl_change", "\"source\": [ \"#variables for the script\\n\", \"months = [] #list", "] }, { \"cell_type\": \"code\", \"execution_count\": 69, \"metadata\": {}, \"outputs\":", "os.path.join(\\\".\\\",\\\"Analysis\\\", \\\"summary.txt\\\")\\n\", \"\\n\", \"# use \\\"\\\\n\\\" to create a new", "] }, { \"cell_type\": \"code\", \"execution_count\": 67, \"metadata\": {}, \"outputs\":", "= min(pl_changes)\\n\", \"#print(avg_pl_change, maxpl, minpl)\\n\", \"#print(pl_changes.index(maxpl))\\n\", \"#print(len(pl_changes))\" ] }, {", "min\\n\", \"avg_pl_change = sum(pl_changes) / len(pl_changes)\\n\", \"maxpl = max(pl_changes)\\n\", \"minpl", "#maximum increase in profits\\n\", \"minpl = 0 #maximum decrease in", "0 #index for min pl\\n\", \"\\n\", \"#read the resource file\\n\",", "\"cell_type\": \"code\", \"execution_count\": 68, \"metadata\": {}, \"outputs\": [ { \"name\":", "\"source\": [ \"#find dates for max and min PL changes\\n\",", "changes\\n\", \"avg_pl_change = 0 #average of changes in PL\\n\", \"maxpl", "\"code\", \"execution_count\": 69, \"metadata\": {}, \"outputs\": [], \"source\": [ \"#", "{maxmonth} (${maxpl})\\\\n\\\")\\n\", \" output.write(f\\\"Greatest Decrease in Profits: {minmonth} (${minpl})\\\\n\\\")\" ]", "}, \"file_extension\": \".py\", \"mimetype\": \"text/x-python\", \"name\": \"python\", \"nbconvert_exporter\": \"python\", \"pygments_lexer\":", "Import libraries\\n\", \"import os, csv\" ] }, { \"cell_type\": \"code\",", "\"outputs\": [], \"source\": [ \"# loop to track the PL", "Sep-2013 ($-2196167)\\n\" ] } ], \"source\": [ \"#find dates for", "(${minpl})\\\\n\\\")\" ] } ], \"metadata\": { \"kernelspec\": { \"display_name\": \"Python", "\\\"summary.txt\\\")\\n\", \"\\n\", \"# use \\\"\\\\n\\\" to create a new line\\n\",", "$38382578\\n\", \"Average Change: $-2315.12\\n\", \"Greatest Increase in Profits: Feb-2012 ($1926159)\\n\",", "Change: ${round(avg_pl_change,2)}\\\\n\\\")\\n\", \" output.write(f\\\"Greatest Increase in Profits: {maxmonth} (${maxpl})\\\\n\\\")\\n\", \"", "{ \"cells\": [ { \"cell_type\": \"code\", \"execution_count\": 64, \"metadata\": {},", "\"#read the resource file\\n\", \"bankcsv = os.path.join(\\\".\\\", \\\"Resources\\\", \\\"budget_data.csv\\\") #set", "csv_file:\\n\", \" csv_reader = csv.reader(csv_file,delimiter=\\\",\\\")\\n\", \" header = next(csv_reader)\\n\", \"", "[ \"#find dates for max and min PL changes\\n\", \"max_i", "= pl_changes.index(maxpl) +1 #adding +1 since the changes are calculated", "\" output.write(\\\"-\\\"*69 + \\\"\\\\n\\\")\\n\", \" output.write(f\\\"Total Months: {n_months}\\\\n\\\")\\n\", \" output.write(f\\\"Total:", "of changes in PL\\n\", \"maxpl = 0 #maximum increase in", "max and min\\n\", \"avg_pl_change = sum(pl_changes) / len(pl_changes)\\n\", \"maxpl =", "\"#print(pl_changes)\" ] }, { \"cell_type\": \"code\", \"execution_count\": 67, \"metadata\": {},", "\"Greatest Increase in Profits: Feb-2012 ($1926159)\\n\", \"Greatest Decrease in Profits:", "($-2196167)\\n\" ] } ], \"source\": [ \"#find dates for max", "\"nbconvert_exporter\": \"python\", \"pygments_lexer\": \"ipython3\", \"version\": \"3.8.5\" } }, \"nbformat\": 4,", "in range(1, len(pl)):\\n\", \" pl_changes.append(int(pl[i]) - plc)\\n\", \" plc =", "#list of monthly PL\\n\", \"pl_changes = [] #list of P&L", "\"code\", \"execution_count\": 64, \"metadata\": {}, \"outputs\": [], \"source\": [ \"#", "\"print(\\\"Financial Analysis\\\")\\n\", \"print(\\\"-\\\"*69)\\n\", \"print(f\\\"Total Months: {n_months}\\\")\\n\", \"print(f\\\"Total: ${round(pl_total,2)}\\\")\\n\", \"print(f\\\"Average Change:", "Months: {n_months}\\\")\\n\", \"print(f\\\"Total: ${round(pl_total,2)}\\\")\\n\", \"print(f\\\"Average Change: ${round(avg_pl_change,2)}\\\")\\n\", \"print(f\\\"Greatest Increase in", "file\\n\", \"with open(bankcsv, 'r') as csv_file:\\n\", \" csv_reader = csv.reader(csv_file,delimiter=\\\",\\\")\\n\",", "{ \"codemirror_mode\": { \"name\": \"ipython\", \"version\": 3 }, \"file_extension\": \".py\",", "are calculated one row above\\n\", \"min_i = pl_changes.index(minpl) +1\\n\", \"\\n\",", "csv_reader = csv.reader(csv_file,delimiter=\\\",\\\")\\n\", \" header = next(csv_reader)\\n\", \" \\n\", \"", "row in csv_reader:\\n\", \" n_months += 1\\n\", \" pl_total +=" ]
[ "@dll_import('OleAut32') def VariantInit( pvarg : POINTER(VARIANT) ) -> None: ...", "..wintypes import VARIANT, dll_import @dll_import('OleAut32') def VariantInit( pvarg : POINTER(VARIANT)", "from ctypes import POINTER, Structure from ..wintypes import VARIANT, dll_import", "VARIANT, dll_import @dll_import('OleAut32') def VariantInit( pvarg : POINTER(VARIANT) ) ->", "import VARIANT, dll_import @dll_import('OleAut32') def VariantInit( pvarg : POINTER(VARIANT) )", "ctypes import POINTER, Structure from ..wintypes import VARIANT, dll_import @dll_import('OleAut32')", "POINTER, Structure from ..wintypes import VARIANT, dll_import @dll_import('OleAut32') def VariantInit(", "Structure from ..wintypes import VARIANT, dll_import @dll_import('OleAut32') def VariantInit( pvarg", "<reponame>jkennedyvz/DeepFaceLive<gh_stars>0 from ctypes import POINTER, Structure from ..wintypes import VARIANT,", "dll_import @dll_import('OleAut32') def VariantInit( pvarg : POINTER(VARIANT) ) -> None:", "import POINTER, Structure from ..wintypes import VARIANT, dll_import @dll_import('OleAut32') def", "from ..wintypes import VARIANT, dll_import @dll_import('OleAut32') def VariantInit( pvarg :" ]
[ ".models import EventTypeDescriptor from .models import ExternalConfigurationDescriptor from .models import", "from .models import EventTypeDescriptor from .models import ExternalConfigurationDescriptor from .models", "will be lost if the code is regenerated. # --------------------------------------------------------------------------------------------", "InputFilterCondition from .models import InputValidation from .models import InputValue from", "Notification from .models import NotificationDetails from .models import NotificationResultsSummaryDetail from", "from .models import InputDescriptor from .models import InputFilter from .models", ".models import NotificationDetails from .models import NotificationResultsSummaryDetail from .models import", "from .models import NotificationSummary from .models import Publisher from .models", "'IdentityRef', 'InputDescriptor', 'InputFilter', 'InputFilterCondition', 'InputValidation', 'InputValue', 'InputValues', 'InputValuesError', 'InputValuesQuery', 'Notification',", "'NotificationDetails', 'NotificationResultsSummaryDetail', 'NotificationsQuery', 'NotificationSummary', 'Publisher', 'PublisherEvent', 'PublishersQuery', 'ReferenceLinks', 'ResourceContainer', 'SessionToken',", "the MIT License. See License.txt in the project root for", "import InputValidation from .models import InputValue from .models import InputValues", "import Publisher from .models import PublisherEvent from .models import PublishersQuery", "import InputFilter from .models import InputFilterCondition from .models import InputValidation", "-------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes may", "'Consumer', 'ConsumerAction', 'Event', 'EventTypeDescriptor', 'ExternalConfigurationDescriptor', 'FormattedEventMessage', 'IdentityRef', 'InputDescriptor', 'InputFilter', 'InputFilterCondition',", "from .models import SubscriptionsQuery from .models import VersionedResource __all__ =", "'EventTypeDescriptor', 'ExternalConfigurationDescriptor', 'FormattedEventMessage', 'IdentityRef', 'InputDescriptor', 'InputFilter', 'InputFilterCondition', 'InputValidation', 'InputValue', 'InputValues',", ".models import Publisher from .models import PublisherEvent from .models import", "import InputValues from .models import InputValuesError from .models import InputValuesQuery", "project root for license information. # -------------------------------------------------------------------------------------------- # Generated file,", "import FormattedEventMessage from .models import IdentityRef from .models import InputDescriptor", "InputFilter from .models import InputFilterCondition from .models import InputValidation from", "DO NOT EDIT # Changes may cause incorrect behavior and", "root for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO", "is regenerated. # -------------------------------------------------------------------------------------------- from .models import Consumer from .models", ".models import InputFilterCondition from .models import InputValidation from .models import", "# -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes", "ExternalConfigurationDescriptor from .models import FormattedEventMessage from .models import IdentityRef from", "See License.txt in the project root for license information. #", ".models import NotificationsQuery from .models import NotificationSummary from .models import", ".models import Notification from .models import NotificationDetails from .models import", "Changes may cause incorrect behavior and will be lost if", "from .models import PublishersQuery from .models import ReferenceLinks from .models", ".models import Subscription from .models import SubscriptionsQuery from .models import", "'InputValuesQuery', 'Notification', 'NotificationDetails', 'NotificationResultsSummaryDetail', 'NotificationsQuery', 'NotificationSummary', 'Publisher', 'PublisherEvent', 'PublishersQuery', 'ReferenceLinks',", "incorrect behavior and will be lost if the code is", "# Generated file, DO NOT EDIT # Changes may cause", "InputDescriptor from .models import InputFilter from .models import InputFilterCondition from", "import PublisherEvent from .models import PublishersQuery from .models import ReferenceLinks", "import InputValue from .models import InputValues from .models import InputValuesError", ".models import SessionToken from .models import Subscription from .models import", "and will be lost if the code is regenerated. #", "from .models import InputValidation from .models import InputValue from .models", "from .models import InputFilter from .models import InputFilterCondition from .models", ".models import PublisherEvent from .models import PublishersQuery from .models import", "import PublishersQuery from .models import ReferenceLinks from .models import ResourceContainer", "Consumer from .models import ConsumerAction from .models import Event from", "may cause incorrect behavior and will be lost if the", "'InputValuesError', 'InputValuesQuery', 'Notification', 'NotificationDetails', 'NotificationResultsSummaryDetail', 'NotificationsQuery', 'NotificationSummary', 'Publisher', 'PublisherEvent', 'PublishersQuery',", "NotificationDetails from .models import NotificationResultsSummaryDetail from .models import NotificationsQuery from", "'NotificationsQuery', 'NotificationSummary', 'Publisher', 'PublisherEvent', 'PublishersQuery', 'ReferenceLinks', 'ResourceContainer', 'SessionToken', 'Subscription', 'SubscriptionsQuery',", ".models import PublishersQuery from .models import ReferenceLinks from .models import", "# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed", "InputValues from .models import InputValuesError from .models import InputValuesQuery from", "NotificationsQuery from .models import NotificationSummary from .models import Publisher from", "'NotificationSummary', 'Publisher', 'PublisherEvent', 'PublishersQuery', 'ReferenceLinks', 'ResourceContainer', 'SessionToken', 'Subscription', 'SubscriptionsQuery', 'VersionedResource',", "import ReferenceLinks from .models import ResourceContainer from .models import SessionToken", "from .models import VersionedResource __all__ = [ 'Consumer', 'ConsumerAction', 'Event',", "from .models import Consumer from .models import ConsumerAction from .models", "MIT License. See License.txt in the project root for license", "if the code is regenerated. # -------------------------------------------------------------------------------------------- from .models import", "from .models import InputValue from .models import InputValues from .models", "from .models import Subscription from .models import SubscriptionsQuery from .models", "import SubscriptionsQuery from .models import VersionedResource __all__ = [ 'Consumer',", "from .models import NotificationDetails from .models import NotificationResultsSummaryDetail from .models", ".models import InputValuesError from .models import InputValuesQuery from .models import", "FormattedEventMessage from .models import IdentityRef from .models import InputDescriptor from", "import IdentityRef from .models import InputDescriptor from .models import InputFilter", "reserved. # Licensed under the MIT License. See License.txt in", "from .models import NotificationsQuery from .models import NotificationSummary from .models", "SessionToken from .models import Subscription from .models import SubscriptionsQuery from", "from .models import IdentityRef from .models import InputDescriptor from .models", "import NotificationDetails from .models import NotificationResultsSummaryDetail from .models import NotificationsQuery", "from .models import NotificationResultsSummaryDetail from .models import NotificationsQuery from .models", ".models import IdentityRef from .models import InputDescriptor from .models import", "from .models import InputValuesQuery from .models import Notification from .models", "from .models import SessionToken from .models import Subscription from .models", "ConsumerAction from .models import Event from .models import EventTypeDescriptor from", ".models import ExternalConfigurationDescriptor from .models import FormattedEventMessage from .models import", "import InputValuesError from .models import InputValuesQuery from .models import Notification", "'InputValidation', 'InputValue', 'InputValues', 'InputValuesError', 'InputValuesQuery', 'Notification', 'NotificationDetails', 'NotificationResultsSummaryDetail', 'NotificationsQuery', 'NotificationSummary',", "Corporation. All rights reserved. # Licensed under the MIT License.", "# Licensed under the MIT License. See License.txt in the", "import Subscription from .models import SubscriptionsQuery from .models import VersionedResource", "-------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. #", "from .models import InputFilterCondition from .models import InputValidation from .models", "behavior and will be lost if the code is regenerated.", "'FormattedEventMessage', 'IdentityRef', 'InputDescriptor', 'InputFilter', 'InputFilterCondition', 'InputValidation', 'InputValue', 'InputValues', 'InputValuesError', 'InputValuesQuery',", "# Changes may cause incorrect behavior and will be lost", "ResourceContainer from .models import SessionToken from .models import Subscription from", "from .models import Event from .models import EventTypeDescriptor from .models", ".models import InputValidation from .models import InputValue from .models import", "PublisherEvent from .models import PublishersQuery from .models import ReferenceLinks from", "License.txt in the project root for license information. # --------------------------------------------------------------------------------------------", ".models import InputFilter from .models import InputFilterCondition from .models import", ".models import NotificationSummary from .models import Publisher from .models import", "import InputFilterCondition from .models import InputValidation from .models import InputValue", "from .models import PublisherEvent from .models import PublishersQuery from .models", "InputValidation from .models import InputValue from .models import InputValues from", ".models import InputValuesQuery from .models import Notification from .models import", "EventTypeDescriptor from .models import ExternalConfigurationDescriptor from .models import FormattedEventMessage from", "file, DO NOT EDIT # Changes may cause incorrect behavior", "the code is regenerated. # -------------------------------------------------------------------------------------------- from .models import Consumer", "import NotificationResultsSummaryDetail from .models import NotificationsQuery from .models import NotificationSummary", "from .models import Publisher from .models import PublisherEvent from .models", "Subscription from .models import SubscriptionsQuery from .models import VersionedResource __all__", "'Notification', 'NotificationDetails', 'NotificationResultsSummaryDetail', 'NotificationsQuery', 'NotificationSummary', 'Publisher', 'PublisherEvent', 'PublishersQuery', 'ReferenceLinks', 'ResourceContainer',", "License. See License.txt in the project root for license information.", "IdentityRef from .models import InputDescriptor from .models import InputFilter from", "# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved.", "__all__ = [ 'Consumer', 'ConsumerAction', 'Event', 'EventTypeDescriptor', 'ExternalConfigurationDescriptor', 'FormattedEventMessage', 'IdentityRef',", "from .models import FormattedEventMessage from .models import IdentityRef from .models", "InputValue from .models import InputValues from .models import InputValuesError from", "'InputValue', 'InputValues', 'InputValuesError', 'InputValuesQuery', 'Notification', 'NotificationDetails', 'NotificationResultsSummaryDetail', 'NotificationsQuery', 'NotificationSummary', 'Publisher',", "import Notification from .models import NotificationDetails from .models import NotificationResultsSummaryDetail", "# -------------------------------------------------------------------------------------------- from .models import Consumer from .models import ConsumerAction", ".models import Event from .models import EventTypeDescriptor from .models import", "from .models import InputValuesError from .models import InputValuesQuery from .models", "'InputDescriptor', 'InputFilter', 'InputFilterCondition', 'InputValidation', 'InputValue', 'InputValues', 'InputValuesError', 'InputValuesQuery', 'Notification', 'NotificationDetails',", ".models import FormattedEventMessage from .models import IdentityRef from .models import", "import Consumer from .models import ConsumerAction from .models import Event", "InputValuesQuery from .models import Notification from .models import NotificationDetails from", "import NotificationSummary from .models import Publisher from .models import PublisherEvent", "'Event', 'EventTypeDescriptor', 'ExternalConfigurationDescriptor', 'FormattedEventMessage', 'IdentityRef', 'InputDescriptor', 'InputFilter', 'InputFilterCondition', 'InputValidation', 'InputValue',", "for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT", "from .models import ResourceContainer from .models import SessionToken from .models", "'Publisher', 'PublisherEvent', 'PublishersQuery', 'ReferenceLinks', 'ResourceContainer', 'SessionToken', 'Subscription', 'SubscriptionsQuery', 'VersionedResource', ]", "PublishersQuery from .models import ReferenceLinks from .models import ResourceContainer from", ".models import SubscriptionsQuery from .models import VersionedResource __all__ = [", ".models import VersionedResource __all__ = [ 'Consumer', 'ConsumerAction', 'Event', 'EventTypeDescriptor',", "VersionedResource __all__ = [ 'Consumer', 'ConsumerAction', 'Event', 'EventTypeDescriptor', 'ExternalConfigurationDescriptor', 'FormattedEventMessage',", "Generated file, DO NOT EDIT # Changes may cause incorrect", "SubscriptionsQuery from .models import VersionedResource __all__ = [ 'Consumer', 'ConsumerAction',", "'ConsumerAction', 'Event', 'EventTypeDescriptor', 'ExternalConfigurationDescriptor', 'FormattedEventMessage', 'IdentityRef', 'InputDescriptor', 'InputFilter', 'InputFilterCondition', 'InputValidation',", "under the MIT License. See License.txt in the project root", ".models import ConsumerAction from .models import Event from .models import", "import EventTypeDescriptor from .models import ExternalConfigurationDescriptor from .models import FormattedEventMessage", "'NotificationResultsSummaryDetail', 'NotificationsQuery', 'NotificationSummary', 'Publisher', 'PublisherEvent', 'PublishersQuery', 'ReferenceLinks', 'ResourceContainer', 'SessionToken', 'Subscription',", "NOT EDIT # Changes may cause incorrect behavior and will", "information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT #", "in the project root for license information. # -------------------------------------------------------------------------------------------- #", "import InputDescriptor from .models import InputFilter from .models import InputFilterCondition", "cause incorrect behavior and will be lost if the code", "(c) Microsoft Corporation. All rights reserved. # Licensed under the", "All rights reserved. # Licensed under the MIT License. See", ".models import Consumer from .models import ConsumerAction from .models import", "from .models import Notification from .models import NotificationDetails from .models", "EDIT # Changes may cause incorrect behavior and will be", "'InputValues', 'InputValuesError', 'InputValuesQuery', 'Notification', 'NotificationDetails', 'NotificationResultsSummaryDetail', 'NotificationsQuery', 'NotificationSummary', 'Publisher', 'PublisherEvent',", "import ConsumerAction from .models import Event from .models import EventTypeDescriptor", "NotificationResultsSummaryDetail from .models import NotificationsQuery from .models import NotificationSummary from", "import InputValuesQuery from .models import Notification from .models import NotificationDetails", "from .models import ReferenceLinks from .models import ResourceContainer from .models", "license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT", "InputValuesError from .models import InputValuesQuery from .models import Notification from", "'InputFilterCondition', 'InputValidation', 'InputValue', 'InputValues', 'InputValuesError', 'InputValuesQuery', 'Notification', 'NotificationDetails', 'NotificationResultsSummaryDetail', 'NotificationsQuery',", "Event from .models import EventTypeDescriptor from .models import ExternalConfigurationDescriptor from", ".models import InputValue from .models import InputValues from .models import", ".models import NotificationResultsSummaryDetail from .models import NotificationsQuery from .models import", "Microsoft Corporation. All rights reserved. # Licensed under the MIT", "Licensed under the MIT License. See License.txt in the project", "'ExternalConfigurationDescriptor', 'FormattedEventMessage', 'IdentityRef', 'InputDescriptor', 'InputFilter', 'InputFilterCondition', 'InputValidation', 'InputValue', 'InputValues', 'InputValuesError',", "ReferenceLinks from .models import ResourceContainer from .models import SessionToken from", "-------------------------------------------------------------------------------------------- from .models import Consumer from .models import ConsumerAction from", "regenerated. # -------------------------------------------------------------------------------------------- from .models import Consumer from .models import", ".models import ReferenceLinks from .models import ResourceContainer from .models import", "import NotificationsQuery from .models import NotificationSummary from .models import Publisher", "rights reserved. # Licensed under the MIT License. See License.txt", ".models import ResourceContainer from .models import SessionToken from .models import", "be lost if the code is regenerated. # -------------------------------------------------------------------------------------------- from", "import ResourceContainer from .models import SessionToken from .models import Subscription", ".models import InputDescriptor from .models import InputFilter from .models import", "NotificationSummary from .models import Publisher from .models import PublisherEvent from", "import ExternalConfigurationDescriptor from .models import FormattedEventMessage from .models import IdentityRef", "Publisher from .models import PublisherEvent from .models import PublishersQuery from", "lost if the code is regenerated. # -------------------------------------------------------------------------------------------- from .models", "[ 'Consumer', 'ConsumerAction', 'Event', 'EventTypeDescriptor', 'ExternalConfigurationDescriptor', 'FormattedEventMessage', 'IdentityRef', 'InputDescriptor', 'InputFilter',", "from .models import ExternalConfigurationDescriptor from .models import FormattedEventMessage from .models", "from .models import InputValues from .models import InputValuesError from .models", "from .models import ConsumerAction from .models import Event from .models", "import VersionedResource __all__ = [ 'Consumer', 'ConsumerAction', 'Event', 'EventTypeDescriptor', 'ExternalConfigurationDescriptor',", "'InputFilter', 'InputFilterCondition', 'InputValidation', 'InputValue', 'InputValues', 'InputValuesError', 'InputValuesQuery', 'Notification', 'NotificationDetails', 'NotificationResultsSummaryDetail',", "code is regenerated. # -------------------------------------------------------------------------------------------- from .models import Consumer from", "= [ 'Consumer', 'ConsumerAction', 'Event', 'EventTypeDescriptor', 'ExternalConfigurationDescriptor', 'FormattedEventMessage', 'IdentityRef', 'InputDescriptor',", "import SessionToken from .models import Subscription from .models import SubscriptionsQuery", "the project root for license information. # -------------------------------------------------------------------------------------------- # Generated", ".models import InputValues from .models import InputValuesError from .models import", "import Event from .models import EventTypeDescriptor from .models import ExternalConfigurationDescriptor", "Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under" ]
[ "def __init__(self): global __logger if self.__logger: raise RuntimeError(\"Logger instance already", "\"INFO\") def log_warning(self, warning): self.__log(warning, \"WARNING\") def log_error(self, error): self.__log(error,", "instance already exists\") @staticmethod def get_logger(): global __logger if not", "def log_fatal(self, fatal): self.__log(fatal, \"FATAL\") def __log(self, msg, lvl): date_str", "RuntimeError(\"Logger instance already exists\") @staticmethod def get_logger(): global __logger if", "= PizdyukLogger() return PizdyukLogger._PizdyukLogger__logger def log_info(self, msg): self.__log(msg, \"INFO\") def", "__init__(self): global __logger if self.__logger: raise RuntimeError(\"Logger instance already exists\")", "warning): self.__log(warning, \"WARNING\") def log_error(self, error): self.__log(error, \"ERROR\") def log_fatal(self,", "import datetime as date from pzd_utils import datetime_to_str class PizdyukLogger:", "if not PizdyukLogger._PizdyukLogger__logger: PizdyukLogger._PizdyukLogger__logger = PizdyukLogger() return PizdyukLogger._PizdyukLogger__logger def log_info(self,", "already exists\") @staticmethod def get_logger(): global __logger if not PizdyukLogger._PizdyukLogger__logger:", "PizdyukLogger._PizdyukLogger__logger = PizdyukLogger() return PizdyukLogger._PizdyukLogger__logger def log_info(self, msg): self.__log(msg, \"INFO\")", "@staticmethod def get_logger(): global __logger if not PizdyukLogger._PizdyukLogger__logger: PizdyukLogger._PizdyukLogger__logger =", "self.__log(error, \"ERROR\") def log_fatal(self, fatal): self.__log(fatal, \"FATAL\") def __log(self, msg,", "msg, lvl): date_str = datetime_to_str(date.datetime.now()) log = \"[{0}] [{1}] {2}\".format(lvl,", "self.__logger: raise RuntimeError(\"Logger instance already exists\") @staticmethod def get_logger(): global", "\"ERROR\") def log_fatal(self, fatal): self.__log(fatal, \"FATAL\") def __log(self, msg, lvl):", "msg): self.__log(msg, \"INFO\") def log_warning(self, warning): self.__log(warning, \"WARNING\") def log_error(self,", "if self.__logger: raise RuntimeError(\"Logger instance already exists\") @staticmethod def get_logger():", "import datetime_to_str class PizdyukLogger: __logger = None def __init__(self): global", "global __logger if self.__logger: raise RuntimeError(\"Logger instance already exists\") @staticmethod", "PizdyukLogger._PizdyukLogger__logger: PizdyukLogger._PizdyukLogger__logger = PizdyukLogger() return PizdyukLogger._PizdyukLogger__logger def log_info(self, msg): self.__log(msg,", "not PizdyukLogger._PizdyukLogger__logger: PizdyukLogger._PizdyukLogger__logger = PizdyukLogger() return PizdyukLogger._PizdyukLogger__logger def log_info(self, msg):", "as date from pzd_utils import datetime_to_str class PizdyukLogger: __logger =", "<filename>pizdyuk/pzd_logging.py<gh_stars>1-10 import datetime as date from pzd_utils import datetime_to_str class", "exists\") @staticmethod def get_logger(): global __logger if not PizdyukLogger._PizdyukLogger__logger: PizdyukLogger._PizdyukLogger__logger", "def log_error(self, error): self.__log(error, \"ERROR\") def log_fatal(self, fatal): self.__log(fatal, \"FATAL\")", "global __logger if not PizdyukLogger._PizdyukLogger__logger: PizdyukLogger._PizdyukLogger__logger = PizdyukLogger() return PizdyukLogger._PizdyukLogger__logger", "= None def __init__(self): global __logger if self.__logger: raise RuntimeError(\"Logger", "log_error(self, error): self.__log(error, \"ERROR\") def log_fatal(self, fatal): self.__log(fatal, \"FATAL\") def", "datetime_to_str class PizdyukLogger: __logger = None def __init__(self): global __logger", "error): self.__log(error, \"ERROR\") def log_fatal(self, fatal): self.__log(fatal, \"FATAL\") def __log(self,", "self.__log(fatal, \"FATAL\") def __log(self, msg, lvl): date_str = datetime_to_str(date.datetime.now()) log", "__log(self, msg, lvl): date_str = datetime_to_str(date.datetime.now()) log = \"[{0}] [{1}]", "PizdyukLogger() return PizdyukLogger._PizdyukLogger__logger def log_info(self, msg): self.__log(msg, \"INFO\") def log_warning(self,", "return PizdyukLogger._PizdyukLogger__logger def log_info(self, msg): self.__log(msg, \"INFO\") def log_warning(self, warning):", "__logger if not PizdyukLogger._PizdyukLogger__logger: PizdyukLogger._PizdyukLogger__logger = PizdyukLogger() return PizdyukLogger._PizdyukLogger__logger def", "\"FATAL\") def __log(self, msg, lvl): date_str = datetime_to_str(date.datetime.now()) log =", "def get_logger(): global __logger if not PizdyukLogger._PizdyukLogger__logger: PizdyukLogger._PizdyukLogger__logger = PizdyukLogger()", "PizdyukLogger: __logger = None def __init__(self): global __logger if self.__logger:", "class PizdyukLogger: __logger = None def __init__(self): global __logger if", "from pzd_utils import datetime_to_str class PizdyukLogger: __logger = None def", "get_logger(): global __logger if not PizdyukLogger._PizdyukLogger__logger: PizdyukLogger._PizdyukLogger__logger = PizdyukLogger() return", "self.__log(warning, \"WARNING\") def log_error(self, error): self.__log(error, \"ERROR\") def log_fatal(self, fatal):", "None def __init__(self): global __logger if self.__logger: raise RuntimeError(\"Logger instance", "log_fatal(self, fatal): self.__log(fatal, \"FATAL\") def __log(self, msg, lvl): date_str =", "__logger = None def __init__(self): global __logger if self.__logger: raise", "PizdyukLogger._PizdyukLogger__logger def log_info(self, msg): self.__log(msg, \"INFO\") def log_warning(self, warning): self.__log(warning,", "log_warning(self, warning): self.__log(warning, \"WARNING\") def log_error(self, error): self.__log(error, \"ERROR\") def", "\"WARNING\") def log_error(self, error): self.__log(error, \"ERROR\") def log_fatal(self, fatal): self.__log(fatal,", "def __log(self, msg, lvl): date_str = datetime_to_str(date.datetime.now()) log = \"[{0}]", "lvl): date_str = datetime_to_str(date.datetime.now()) log = \"[{0}] [{1}] {2}\".format(lvl, date_str,", "log_info(self, msg): self.__log(msg, \"INFO\") def log_warning(self, warning): self.__log(warning, \"WARNING\") def", "date from pzd_utils import datetime_to_str class PizdyukLogger: __logger = None", "__logger if self.__logger: raise RuntimeError(\"Logger instance already exists\") @staticmethod def", "def log_info(self, msg): self.__log(msg, \"INFO\") def log_warning(self, warning): self.__log(warning, \"WARNING\")", "def log_warning(self, warning): self.__log(warning, \"WARNING\") def log_error(self, error): self.__log(error, \"ERROR\")", "pzd_utils import datetime_to_str class PizdyukLogger: __logger = None def __init__(self):", "fatal): self.__log(fatal, \"FATAL\") def __log(self, msg, lvl): date_str = datetime_to_str(date.datetime.now())", "date_str = datetime_to_str(date.datetime.now()) log = \"[{0}] [{1}] {2}\".format(lvl, date_str, msg)", "datetime as date from pzd_utils import datetime_to_str class PizdyukLogger: __logger", "raise RuntimeError(\"Logger instance already exists\") @staticmethod def get_logger(): global __logger", "= datetime_to_str(date.datetime.now()) log = \"[{0}] [{1}] {2}\".format(lvl, date_str, msg) print(log)", "self.__log(msg, \"INFO\") def log_warning(self, warning): self.__log(warning, \"WARNING\") def log_error(self, error):" ]
[ "give unique orientations when the # Burgers transformation is applied", "cubic_syms = Quat.symEqv(\"cubic\") # subset of cubic symmetries that give", "unq_hex_syms = [ hex_syms[0], hex_syms[5], hex_syms[4], hex_syms[2], hex_syms[10], hex_syms[11] ]", "the # Burgers transformation is applied unq_hex_syms = [ hex_syms[0],", "transformation is applied unq_cub_syms = [ cubic_syms[0], cubic_syms[7], cubic_syms[9], cubic_syms[1],", "subset of cubic symmetries that give unique orientations when the", "unq_cub_syms = [ cubic_syms[0], cubic_syms[7], cubic_syms[9], cubic_syms[1], cubic_syms[22], cubic_syms[16], cubic_syms[12],", "# HCP -> BCC burg_eulers = np.array([135, 90, 354.74]) *", "hex_syms[0], hex_syms[5], hex_syms[4], hex_syms[2], hex_syms[10], hex_syms[11] ] cubic_syms = Quat.symEqv(\"cubic\")", "hex_syms = Quat.symEqv(\"hexagonal\") # subset of hexagonal symmetries that give", "of cubic symmetries that give unique orientations when the #", "HCP -> BCC burg_eulers = np.array([135, 90, 354.74]) * np.pi", "numpy as np from defdap.quat import Quat hex_syms = Quat.symEqv(\"hexagonal\")", "# Burgers transformation is applied unq_cub_syms = [ cubic_syms[0], cubic_syms[7],", "= np.array([135, 90, 354.74]) * np.pi / 180 burg_trans =", "= [ cubic_syms[0], cubic_syms[7], cubic_syms[9], cubic_syms[1], cubic_syms[22], cubic_syms[16], cubic_syms[12], cubic_syms[15],", "cubic_syms[8], cubic_syms[21], cubic_syms[20] ] # HCP -> BCC burg_eulers =", "that give unique orientations when the # Burgers transformation is", "the # Burgers transformation is applied unq_cub_syms = [ cubic_syms[0],", "applied unq_hex_syms = [ hex_syms[0], hex_syms[5], hex_syms[4], hex_syms[2], hex_syms[10], hex_syms[11]", "transformation is applied unq_hex_syms = [ hex_syms[0], hex_syms[5], hex_syms[4], hex_syms[2],", "cubic_syms[1], cubic_syms[22], cubic_syms[16], cubic_syms[12], cubic_syms[15], cubic_syms[4], cubic_syms[8], cubic_syms[21], cubic_syms[20] ]", "-> BCC burg_eulers = np.array([135, 90, 354.74]) * np.pi /", "burg_eulers = np.array([135, 90, 354.74]) * np.pi / 180 burg_trans", "import Quat hex_syms = Quat.symEqv(\"hexagonal\") # subset of hexagonal symmetries", "symmetries that give unique orientations when the # Burgers transformation", "when the # Burgers transformation is applied unq_hex_syms = [", "cubic_syms[16], cubic_syms[12], cubic_syms[15], cubic_syms[4], cubic_syms[8], cubic_syms[21], cubic_syms[20] ] # HCP", "orientations when the # Burgers transformation is applied unq_cub_syms =", "np from defdap.quat import Quat hex_syms = Quat.symEqv(\"hexagonal\") # subset", "cubic_syms[0], cubic_syms[7], cubic_syms[9], cubic_syms[1], cubic_syms[22], cubic_syms[16], cubic_syms[12], cubic_syms[15], cubic_syms[4], cubic_syms[8],", "from defdap.quat import Quat hex_syms = Quat.symEqv(\"hexagonal\") # subset of", "import numpy as np from defdap.quat import Quat hex_syms =", "as np from defdap.quat import Quat hex_syms = Quat.symEqv(\"hexagonal\") #", "Quat hex_syms = Quat.symEqv(\"hexagonal\") # subset of hexagonal symmetries that", "[ cubic_syms[0], cubic_syms[7], cubic_syms[9], cubic_syms[1], cubic_syms[22], cubic_syms[16], cubic_syms[12], cubic_syms[15], cubic_syms[4],", "cubic_syms[7], cubic_syms[9], cubic_syms[1], cubic_syms[22], cubic_syms[16], cubic_syms[12], cubic_syms[15], cubic_syms[4], cubic_syms[8], cubic_syms[21],", "cubic_syms[20] ] # HCP -> BCC burg_eulers = np.array([135, 90,", "BCC burg_eulers = np.array([135, 90, 354.74]) * np.pi / 180", "is applied unq_hex_syms = [ hex_syms[0], hex_syms[5], hex_syms[4], hex_syms[2], hex_syms[10],", "hex_syms[5], hex_syms[4], hex_syms[2], hex_syms[10], hex_syms[11] ] cubic_syms = Quat.symEqv(\"cubic\") #", "unique orientations when the # Burgers transformation is applied unq_hex_syms", "Quat.symEqv(\"hexagonal\") # subset of hexagonal symmetries that give unique orientations", "cubic symmetries that give unique orientations when the # Burgers", "Burgers transformation is applied unq_cub_syms = [ cubic_syms[0], cubic_syms[7], cubic_syms[9],", "cubic_syms[12], cubic_syms[15], cubic_syms[4], cubic_syms[8], cubic_syms[21], cubic_syms[20] ] # HCP ->", "defdap.quat import Quat hex_syms = Quat.symEqv(\"hexagonal\") # subset of hexagonal", "] cubic_syms = Quat.symEqv(\"cubic\") # subset of cubic symmetries that", "[ hex_syms[0], hex_syms[5], hex_syms[4], hex_syms[2], hex_syms[10], hex_syms[11] ] cubic_syms =", "Burgers transformation is applied unq_hex_syms = [ hex_syms[0], hex_syms[5], hex_syms[4],", "= Quat.symEqv(\"cubic\") # subset of cubic symmetries that give unique", "hex_syms[4], hex_syms[2], hex_syms[10], hex_syms[11] ] cubic_syms = Quat.symEqv(\"cubic\") # subset", "cubic_syms[22], cubic_syms[16], cubic_syms[12], cubic_syms[15], cubic_syms[4], cubic_syms[8], cubic_syms[21], cubic_syms[20] ] #", "# subset of hexagonal symmetries that give unique orientations when", "subset of hexagonal symmetries that give unique orientations when the", "# subset of cubic symmetries that give unique orientations when", "unique orientations when the # Burgers transformation is applied unq_cub_syms", "Quat.symEqv(\"cubic\") # subset of cubic symmetries that give unique orientations", "cubic_syms[4], cubic_syms[8], cubic_syms[21], cubic_syms[20] ] # HCP -> BCC burg_eulers", "= Quat.symEqv(\"hexagonal\") # subset of hexagonal symmetries that give unique", "is applied unq_cub_syms = [ cubic_syms[0], cubic_syms[7], cubic_syms[9], cubic_syms[1], cubic_syms[22],", "cubic_syms[9], cubic_syms[1], cubic_syms[22], cubic_syms[16], cubic_syms[12], cubic_syms[15], cubic_syms[4], cubic_syms[8], cubic_syms[21], cubic_syms[20]", "hex_syms[11] ] cubic_syms = Quat.symEqv(\"cubic\") # subset of cubic symmetries", "cubic_syms[21], cubic_syms[20] ] # HCP -> BCC burg_eulers = np.array([135,", "hex_syms[2], hex_syms[10], hex_syms[11] ] cubic_syms = Quat.symEqv(\"cubic\") # subset of", "orientations when the # Burgers transformation is applied unq_hex_syms =", "np.array([135, 90, 354.74]) * np.pi / 180 burg_trans = Quat.fromEulerAngles(*burg_eulers).conjugate", "# Burgers transformation is applied unq_hex_syms = [ hex_syms[0], hex_syms[5],", "cubic_syms[15], cubic_syms[4], cubic_syms[8], cubic_syms[21], cubic_syms[20] ] # HCP -> BCC", "] # HCP -> BCC burg_eulers = np.array([135, 90, 354.74])", "applied unq_cub_syms = [ cubic_syms[0], cubic_syms[7], cubic_syms[9], cubic_syms[1], cubic_syms[22], cubic_syms[16],", "when the # Burgers transformation is applied unq_cub_syms = [", "= [ hex_syms[0], hex_syms[5], hex_syms[4], hex_syms[2], hex_syms[10], hex_syms[11] ] cubic_syms", "of hexagonal symmetries that give unique orientations when the #", "hex_syms[10], hex_syms[11] ] cubic_syms = Quat.symEqv(\"cubic\") # subset of cubic", "hexagonal symmetries that give unique orientations when the # Burgers" ]
[]
[ "required\" })), 400 if not VALIDATOR.username_exists(username): return jsonify({ \"status\": 404,", "auth_token = user.generate_auth_token(username) return make_response(jsonify({ \"status\": 200, \"message\": 'Logged in", "not email or not email.split(): return make_response(jsonify({ \"status\": 400, \"message\":", "new users\"\"\" try: data = request.get_json() except: return jsonify({ \"status\":", "required\" })), 400 if not password: return make_response(jsonify({ \"status\": 400,", "Users v1_auth_blueprint = Blueprint('auth', __name__, url_prefix='/api/v1') USER = Users() VALIDATOR", "= data.get('firstname') lastname = data.get('lastname') othername = data.get('othername') email =", "return make_response(jsonify({ \"status\": 200, \"message\": 'Logged in successfuly', \"token\": auth_token", "})), 400 if not VALIDATOR.username_exists(username): return jsonify({ \"status\": 404, \"message\":", "= user.generate_auth_token(username) return make_response(jsonify({ \"status\": 200, \"message\": 'Logged in successfuly',", "Validation from ..models.auth_models import Users v1_auth_blueprint = Blueprint('auth', __name__, url_prefix='/api/v1')", "data.get('othername') email = data.get('email') phone_number = data.get('phone_number') username = data.get('username')", "\"Wrong input\" })), 400 username = data.get('username') password = data.get('password')", "data.get('firstname') lastname = data.get('lastname') othername = data.get('othername') email = data.get('email')", "\"status\": 400, \"message\": \"Invalid input\" }), 400 firstname = data.get('firstname')", "password = generate_password_hash( password, method='pbkdf2:sha256', salt_length=8) res = USER.signup( firstname,", "\"status\": 400, \"message\": \"Please input valid phone number\" }), 400", "valid phone number\" }), 400 if VALIDATOR.validate_password(password): return jsonify({ \"status\":", "return make_response(jsonify({ \"status\": 400, \"message\": \"Phone number is required\" })),", "if VALIDATOR.email_exists(email): return jsonify({ \"status\": 400, \"message\": \"Email exists\" }),", "if not password: return make_response(jsonify({ \"status\": 400, \"message\": \"Password is", "v1_auth_blueprint = Blueprint('auth', __name__, url_prefix='/api/v1') USER = Users() VALIDATOR =", "= data.get('is_admin') password = data.get('password') if not firstname or not", "jsonify({ \"status\": 404, \"message\": \"User does not exist\" }), 404", "\"message\": \"Username is required\" })), 400 if not password: return", "\"status\": 400, \"message\": \"Password not valid\" }), 400 if not", "to control users login \"\"\" try: data = request.get_json() except:", "\"status\": 400, \"message\": \"Username is required\" })), 400 if not", "othername, \"email\": email, \"phone_number\": phone_number, \"username\": username, \"is_admin\": is_admin }]", "400, \"message\": \"Email exists\" }), 400 password = generate_password_hash( password,", "required\" })), 400 if not VALIDATOR.validate_phone_number(phone_number): return jsonify({ \"status\": 400,", "make_response(jsonify({ \"status\": 400, \"message\": \"Username is required\" })), 400 if", "input\" }), 400 firstname = data.get('firstname') lastname = data.get('lastname') othername", "flask import jsonify, Blueprint, request, json, make_response from werkzeug.security import", "signup(): \"\"\"View that controls creation of new users\"\"\" try: data", "or not firstname.split(): return make_response(jsonify({ \"status\": 400, \"message\": \"Firstname is", "is required\" })), 400 if not lastname or not lastname.split():", "phone_number = data.get('phone_number') username = data.get('username') is_admin = data.get('is_admin') password", "\"status\": 400, \"message\": \"Email is required\" })), 400 if not", "email\" }), 400 if VALIDATOR.username_exists(username): return jsonify({ \"status\": 400, \"message\":", "\"Firstname is required\" })), 400 if not lastname or not", "is required\" })), 400 if not email or not email.split():", "is required\" })), 400 if not password or not password.split():", "required\" })), 400 if not password or not password.split(): return", "controls creation of new users\"\"\" try: data = request.get_json() except:", "is required\" })), 400 if not VALIDATOR.username_exists(username): return jsonify({ \"status\":", "make_response from werkzeug.security import generate_password_hash, check_password_hash from datetime import datetime", "data = request.get_json() except: return make_response(jsonify({ \"status\": 400, \"message\": \"Wrong", "data.get('password') if not firstname or not firstname.split(): return make_response(jsonify({ \"status\":", "\"\"\"View that controls creation of new users\"\"\" try: data =", "try: data = request.get_json() except: return jsonify({ \"status\": 400, \"message\":", "\"message\": \"Username exists\" }), 400 if VALIDATOR.email_exists(email): return jsonify({ \"status\":", "400 if not VALIDATOR.username_exists(username): return jsonify({ \"status\": 404, \"message\": \"User", "required\" })), 400 if not email or not email.split(): return", "is required\" })), 400 if not username or not username.split():", "or not lastname.split(): return make_response(jsonify({ \"status\": 400, \"message\": \"Lastname is", "\"message\": \"Email is required\" })), 400 if not phone_number: return", "not VALIDATOR.validate_phone_number(phone_number): return jsonify({ \"status\": 400, \"message\": \"Please input valid", "method='pbkdf2:sha256', salt_length=8) res = USER.signup( firstname, lastname, othername, email, phone_number,", "})), 400 if not password or not password.split(): return make_response(jsonify({", "methods=['POST']) def login(): \"\"\" A view to control users login", "generate_password_hash, check_password_hash from datetime import datetime from ..utils.validators import Validation", "\"message\": \"Password not valid\" }), 400 if not VALIDATOR.validate_email(email): return", "400 firstname = data.get('firstname') lastname = data.get('lastname') othername = data.get('othername')", "})), 400 if not VALIDATOR.validate_phone_number(phone_number): return jsonify({ \"status\": 400, \"message\":", "400, \"message\": \"Email is required\" })), 400 if not phone_number:", "\"status\": 201, \"data\": [{ \"firstname\": firstname, \"lastname\": lastname, \"othername\": othername,", "= generate_password_hash( password, method='pbkdf2:sha256', salt_length=8) res = USER.signup( firstname, lastname,", "res = USER.signup( firstname, lastname, othername, email, phone_number, username, is_admin,", "= Users() VALIDATOR = Validation() @v1_auth_blueprint.route('/signup', methods=['POST']) def signup(): \"\"\"View", "if not phone_number: return make_response(jsonify({ \"status\": 400, \"message\": \"Phone number", "exist\" }), 404 auth_token = user.generate_auth_token(username) return make_response(jsonify({ \"status\": 200,", "__name__, url_prefix='/api/v1') USER = Users() VALIDATOR = Validation() @v1_auth_blueprint.route('/signup', methods=['POST'])", "A view to control users login \"\"\" try: data =", "if not VALIDATOR.validate_phone_number(phone_number): return jsonify({ \"status\": 400, \"message\": \"Please input", "is required\" })), 400 if not VALIDATOR.validate_phone_number(phone_number): return jsonify({ \"status\":", "400, \"message\": \"Username is required\" })), 400 if not password", "not phone_number: return make_response(jsonify({ \"status\": 400, \"message\": \"Phone number is", "jsonify({ \"status\": 400, \"message\": \"Invalid email\" }), 400 if VALIDATOR.username_exists(username):", "data.get('is_admin') password = data.get('password') if not firstname or not firstname.split():", "lastname = data.get('lastname') othername = data.get('othername') email = data.get('email') phone_number", "if not email or not email.split(): return make_response(jsonify({ \"status\": 400,", "import jsonify, Blueprint, request, json, make_response from werkzeug.security import generate_password_hash,", "return jsonify({ \"status\": 400, \"message\": \"Please input valid phone number\"", "400 if not lastname or not lastname.split(): return make_response(jsonify({ \"status\":", "not password: return make_response(jsonify({ \"status\": 400, \"message\": \"Password is required\"", "\"message\": \"Email exists\" }), 400 password = generate_password_hash( password, method='pbkdf2:sha256',", "username.split(): return make_response(jsonify({ \"status\": 400, \"message\": \"Username is required\" })),", "404 auth_token = user.generate_auth_token(username) return make_response(jsonify({ \"status\": 200, \"message\": 'Logged", "email.split(): return make_response(jsonify({ \"status\": 400, \"message\": \"Email is required\" })),", "othername, email, phone_number, username, is_admin, password) return jsonify({ \"status\": 201,", "\"status\": 400, \"message\": \"Phone number is required\" })), 400 if", "data.get('password') if not username: return make_response(jsonify({ \"status\": 400, \"message\": \"Username", "return jsonify({ \"status\": 400, \"message\": \"Username exists\" }), 400 if", "\"message\": \"Lastname is required\" })), 400 if not email or", "username, is_admin, password) return jsonify({ \"status\": 201, \"data\": [{ \"firstname\":", "\"message\": \"Wrong input\" })), 400 username = data.get('username') password =", "\"message\": \"User does not exist\" }), 404 auth_token = user.generate_auth_token(username)", "not firstname or not firstname.split(): return make_response(jsonify({ \"status\": 400, \"message\":", "return jsonify({ \"status\": 400, \"message\": \"Invalid input\" }), 400 firstname", "username, \"is_admin\": is_admin }] }), 201 @v1_auth_blueprint.route('/login', methods=['POST']) def login():", "201 @v1_auth_blueprint.route('/login', methods=['POST']) def login(): \"\"\" A view to control", "email or not email.split(): return make_response(jsonify({ \"status\": 400, \"message\": \"Email", "\"message\": \"Please input valid phone number\" }), 400 if VALIDATOR.validate_password(password):", "if not firstname or not firstname.split(): return make_response(jsonify({ \"status\": 400,", "[{ \"firstname\": firstname, \"lastname\": lastname, \"othername\": othername, \"email\": email, \"phone_number\":", "\"Invalid input\" }), 400 firstname = data.get('firstname') lastname = data.get('lastname')", "return make_response(jsonify({ \"status\": 400, \"message\": \"Email is required\" })), 400", "})), 400 if not phone_number: return make_response(jsonify({ \"status\": 400, \"message\":", "\"email\": email, \"phone_number\": phone_number, \"username\": username, \"is_admin\": is_admin }] }),", "400, \"message\": \"Wrong input\" })), 400 username = data.get('username') password", "password.split(): return make_response(jsonify({ \"status\": 400, \"message\": \"Password is required\" })),", "data.get('username') is_admin = data.get('is_admin') password = data.get('password') if not firstname", "not username: return make_response(jsonify({ \"status\": 400, \"message\": \"Username is required\"", "from flask import jsonify, Blueprint, request, json, make_response from werkzeug.security", "phone_number: return make_response(jsonify({ \"status\": 400, \"message\": \"Phone number is required\"", "username = data.get('username') password = data.get('password') if not username: return", "\"Phone number is required\" })), 400 if not username or", "USER = Users() VALIDATOR = Validation() @v1_auth_blueprint.route('/signup', methods=['POST']) def signup():", "}), 400 password = generate_password_hash( password, method='pbkdf2:sha256', salt_length=8) res =", "request.get_json() except: return jsonify({ \"status\": 400, \"message\": \"Invalid input\" }),", "\"Password not valid\" }), 400 if not VALIDATOR.validate_email(email): return jsonify({", "or not email.split(): return make_response(jsonify({ \"status\": 400, \"message\": \"Email is", "400 username = data.get('username') password = data.get('password') if not username:", "Blueprint('auth', __name__, url_prefix='/api/v1') USER = Users() VALIDATOR = Validation() @v1_auth_blueprint.route('/signup',", "not firstname.split(): return make_response(jsonify({ \"status\": 400, \"message\": \"Firstname is required\"", "user.generate_auth_token(username) return make_response(jsonify({ \"status\": 200, \"message\": 'Logged in successfuly', \"token\":", "username = data.get('username') is_admin = data.get('is_admin') password = data.get('password') if", "firstname or not firstname.split(): return make_response(jsonify({ \"status\": 400, \"message\": \"Firstname", "datetime import datetime from ..utils.validators import Validation from ..models.auth_models import", "\"phone_number\": phone_number, \"username\": username, \"is_admin\": is_admin }] }), 201 @v1_auth_blueprint.route('/login',", "lastname or not lastname.split(): return make_response(jsonify({ \"status\": 400, \"message\": \"Lastname", "users login \"\"\" try: data = request.get_json() except: return make_response(jsonify({", "}), 400 if VALIDATOR.username_exists(username): return jsonify({ \"status\": 400, \"message\": \"Username", "import generate_password_hash, check_password_hash from datetime import datetime from ..utils.validators import", "VALIDATOR.validate_password(password): return jsonify({ \"status\": 400, \"message\": \"Password not valid\" }),", "201, \"data\": [{ \"firstname\": firstname, \"lastname\": lastname, \"othername\": othername, \"email\":", "400, \"message\": \"Please input valid phone number\" }), 400 if", "def signup(): \"\"\"View that controls creation of new users\"\"\" try:", "creation of new users\"\"\" try: data = request.get_json() except: return", "\"message\": \"Invalid input\" }), 400 firstname = data.get('firstname') lastname =", "json, make_response from werkzeug.security import generate_password_hash, check_password_hash from datetime import", "except: return jsonify({ \"status\": 400, \"message\": \"Invalid input\" }), 400", "})), 400 username = data.get('username') password = data.get('password') if not", "not email.split(): return make_response(jsonify({ \"status\": 400, \"message\": \"Email is required\"", "def login(): \"\"\" A view to control users login \"\"\"", "from ..utils.validators import Validation from ..models.auth_models import Users v1_auth_blueprint =", "400, \"message\": \"Firstname is required\" })), 400 if not lastname", "othername = data.get('othername') email = data.get('email') phone_number = data.get('phone_number') username", "try: data = request.get_json() except: return make_response(jsonify({ \"status\": 400, \"message\":", "methods=['POST']) def signup(): \"\"\"View that controls creation of new users\"\"\"", "is_admin = data.get('is_admin') password = data.get('password') if not firstname or", "= data.get('phone_number') username = data.get('username') is_admin = data.get('is_admin') password =", "import Users v1_auth_blueprint = Blueprint('auth', __name__, url_prefix='/api/v1') USER = Users()", "return jsonify({ \"status\": 400, \"message\": \"Email exists\" }), 400 password", "400 if not phone_number: return make_response(jsonify({ \"status\": 400, \"message\": \"Phone", "return jsonify({ \"status\": 400, \"message\": \"Password not valid\" }), 400", "Validation() @v1_auth_blueprint.route('/signup', methods=['POST']) def signup(): \"\"\"View that controls creation of", "\"\"\" A view to control users login \"\"\" try: data", "password, method='pbkdf2:sha256', salt_length=8) res = USER.signup( firstname, lastname, othername, email,", "phone number\" }), 400 if VALIDATOR.validate_password(password): return jsonify({ \"status\": 400,", "400, \"message\": \"Invalid email\" }), 400 if VALIDATOR.username_exists(username): return jsonify({", "except: return make_response(jsonify({ \"status\": 400, \"message\": \"Wrong input\" })), 400", "username or not username.split(): return make_response(jsonify({ \"status\": 400, \"message\": \"Username", "\"Password is required\" })), 400 if not VALIDATOR.username_exists(username): return jsonify({", "\"User does not exist\" }), 404 auth_token = user.generate_auth_token(username) return", "\"status\": 400, \"message\": \"Firstname is required\" })), 400 if not", "check_password_hash from datetime import datetime from ..utils.validators import Validation from", "login \"\"\" try: data = request.get_json() except: return make_response(jsonify({ \"status\":", "\"status\": 400, \"message\": \"Email exists\" }), 400 password = generate_password_hash(", "400, \"message\": \"Password is required\" })), 400 if not VALIDATOR.username_exists(username):", "400 if not VALIDATOR.validate_phone_number(phone_number): return jsonify({ \"status\": 400, \"message\": \"Please", "email, \"phone_number\": phone_number, \"username\": username, \"is_admin\": is_admin }] }), 201", "data.get('phone_number') username = data.get('username') is_admin = data.get('is_admin') password = data.get('password')", "400 if not username or not username.split(): return make_response(jsonify({ \"status\":", "from datetime import datetime from ..utils.validators import Validation from ..models.auth_models", "from werkzeug.security import generate_password_hash, check_password_hash from datetime import datetime from", "return jsonify({ \"status\": 400, \"message\": \"Invalid email\" }), 400 if", "make_response(jsonify({ \"status\": 400, \"message\": \"Password is required\" })), 400 if", "that controls creation of new users\"\"\" try: data = request.get_json()", "or not username.split(): return make_response(jsonify({ \"status\": 400, \"message\": \"Username is", "USER.signup( firstname, lastname, othername, email, phone_number, username, is_admin, password) return", "import Validation from ..models.auth_models import Users v1_auth_blueprint = Blueprint('auth', __name__,", "make_response(jsonify({ \"status\": 400, \"message\": \"Email is required\" })), 400 if", "= request.get_json() except: return jsonify({ \"status\": 400, \"message\": \"Invalid input\"", "make_response(jsonify({ \"status\": 200, \"message\": 'Logged in successfuly', \"token\": auth_token })),", "password) return jsonify({ \"status\": 201, \"data\": [{ \"firstname\": firstname, \"lastname\":", "@v1_auth_blueprint.route('/signup', methods=['POST']) def signup(): \"\"\"View that controls creation of new", "\"Email is required\" })), 400 if not phone_number: return make_response(jsonify({", "\"status\": 200, \"message\": 'Logged in successfuly', \"token\": auth_token })), 200", "= data.get('password') if not username: return make_response(jsonify({ \"status\": 400, \"message\":", "\"\"\" try: data = request.get_json() except: return make_response(jsonify({ \"status\": 400,", "jsonify({ \"status\": 400, \"message\": \"Please input valid phone number\" }),", "jsonify({ \"status\": 201, \"data\": [{ \"firstname\": firstname, \"lastname\": lastname, \"othername\":", "exists\" }), 400 if VALIDATOR.email_exists(email): return jsonify({ \"status\": 400, \"message\":", "email = data.get('email') phone_number = data.get('phone_number') username = data.get('username') is_admin", "email, phone_number, username, is_admin, password) return jsonify({ \"status\": 201, \"data\":", "return make_response(jsonify({ \"status\": 400, \"message\": \"Wrong input\" })), 400 username", "import datetime from ..utils.validators import Validation from ..models.auth_models import Users", "404, \"message\": \"User does not exist\" }), 404 auth_token =", "400 if VALIDATOR.email_exists(email): return jsonify({ \"status\": 400, \"message\": \"Email exists\"", "Users() VALIDATOR = Validation() @v1_auth_blueprint.route('/signup', methods=['POST']) def signup(): \"\"\"View that", "make_response(jsonify({ \"status\": 400, \"message\": \"Phone number is required\" })), 400", "is_admin, password) return jsonify({ \"status\": 201, \"data\": [{ \"firstname\": firstname,", "generate_password_hash( password, method='pbkdf2:sha256', salt_length=8) res = USER.signup( firstname, lastname, othername,", "jsonify({ \"status\": 400, \"message\": \"Password not valid\" }), 400 if", "}] }), 201 @v1_auth_blueprint.route('/login', methods=['POST']) def login(): \"\"\" A view", "\"status\": 400, \"message\": \"Invalid email\" }), 400 if VALIDATOR.username_exists(username): return", "}), 201 @v1_auth_blueprint.route('/login', methods=['POST']) def login(): \"\"\" A view to", "data.get('lastname') othername = data.get('othername') email = data.get('email') phone_number = data.get('phone_number')", "400, \"message\": \"Invalid input\" }), 400 firstname = data.get('firstname') lastname", "\"Password is required\" })), 400 if not VALIDATOR.validate_phone_number(phone_number): return jsonify({", "@v1_auth_blueprint.route('/login', methods=['POST']) def login(): \"\"\" A view to control users", "view to control users login \"\"\" try: data = request.get_json()", "return jsonify({ \"status\": 201, \"data\": [{ \"firstname\": firstname, \"lastname\": lastname,", "})), 400 if not username or not username.split(): return make_response(jsonify({", "}), 400 if VALIDATOR.validate_password(password): return jsonify({ \"status\": 400, \"message\": \"Password", "data = request.get_json() except: return jsonify({ \"status\": 400, \"message\": \"Invalid", "\"status\": 400, \"message\": \"Wrong input\" })), 400 username = data.get('username')", "\"Username is required\" })), 400 if not password or not", "return make_response(jsonify({ \"status\": 400, \"message\": \"Firstname is required\" })), 400", "request, json, make_response from werkzeug.security import generate_password_hash, check_password_hash from datetime", "= data.get('username') password = data.get('password') if not username: return make_response(jsonify({", "}), 404 auth_token = user.generate_auth_token(username) return make_response(jsonify({ \"status\": 200, \"message\":", "= data.get('othername') email = data.get('email') phone_number = data.get('phone_number') username =", "\"Lastname is required\" })), 400 if not email or not", "400 if not password or not password.split(): return make_response(jsonify({ \"status\":", "400, \"message\": \"Phone number is required\" })), 400 if not", "password: return make_response(jsonify({ \"status\": 400, \"message\": \"Password is required\" })),", "= Blueprint('auth', __name__, url_prefix='/api/v1') USER = Users() VALIDATOR = Validation()", "from ..models.auth_models import Users v1_auth_blueprint = Blueprint('auth', __name__, url_prefix='/api/v1') USER", "VALIDATOR.email_exists(email): return jsonify({ \"status\": 400, \"message\": \"Email exists\" }), 400", "is required\" })), 400 if not phone_number: return make_response(jsonify({ \"status\":", "or not password.split(): return make_response(jsonify({ \"status\": 400, \"message\": \"Password is", "not VALIDATOR.validate_email(email): return jsonify({ \"status\": 400, \"message\": \"Invalid email\" }),", "if not lastname or not lastname.split(): return make_response(jsonify({ \"status\": 400,", "phone_number, username, is_admin, password) return jsonify({ \"status\": 201, \"data\": [{", "valid\" }), 400 if not VALIDATOR.validate_email(email): return jsonify({ \"status\": 400,", "number is required\" })), 400 if not username or not", "\"message\": \"Phone number is required\" })), 400 if not username", "400 if VALIDATOR.username_exists(username): return jsonify({ \"status\": 400, \"message\": \"Username exists\"", "..models.auth_models import Users v1_auth_blueprint = Blueprint('auth', __name__, url_prefix='/api/v1') USER =", "required\" })), 400 if not lastname or not lastname.split(): return", "return make_response(jsonify({ \"status\": 400, \"message\": \"Password is required\" })), 400", "400, \"message\": \"Password not valid\" }), 400 if not VALIDATOR.validate_email(email):", "\"othername\": othername, \"email\": email, \"phone_number\": phone_number, \"username\": username, \"is_admin\": is_admin", "data.get('username') password = data.get('password') if not username: return make_response(jsonify({ \"status\":", "if not username or not username.split(): return make_response(jsonify({ \"status\": 400,", "\"firstname\": firstname, \"lastname\": lastname, \"othername\": othername, \"email\": email, \"phone_number\": phone_number,", "\"message\": \"Invalid email\" }), 400 if VALIDATOR.username_exists(username): return jsonify({ \"status\":", "return make_response(jsonify({ \"status\": 400, \"message\": \"Username is required\" })), 400", "password = data.get('password') if not firstname or not firstname.split(): return", "not username or not username.split(): return make_response(jsonify({ \"status\": 400, \"message\":", "datetime from ..utils.validators import Validation from ..models.auth_models import Users v1_auth_blueprint", "jsonify({ \"status\": 400, \"message\": \"Email exists\" }), 400 password =", "..utils.validators import Validation from ..models.auth_models import Users v1_auth_blueprint = Blueprint('auth',", "werkzeug.security import generate_password_hash, check_password_hash from datetime import datetime from ..utils.validators", "})), 400 if not email or not email.split(): return make_response(jsonify({", "not valid\" }), 400 if not VALIDATOR.validate_email(email): return jsonify({ \"status\":", "lastname, othername, email, phone_number, username, is_admin, password) return jsonify({ \"status\":", "Blueprint, request, json, make_response from werkzeug.security import generate_password_hash, check_password_hash from", "\"message\": \"Password is required\" })), 400 if not VALIDATOR.validate_phone_number(phone_number): return", "password = data.get('password') if not username: return make_response(jsonify({ \"status\": 400,", "\"message\": \"Firstname is required\" })), 400 if not lastname or", "= Validation() @v1_auth_blueprint.route('/signup', methods=['POST']) def signup(): \"\"\"View that controls creation", "if VALIDATOR.validate_password(password): return jsonify({ \"status\": 400, \"message\": \"Password not valid\"", "\"Email exists\" }), 400 password = generate_password_hash( password, method='pbkdf2:sha256', salt_length=8)", "VALIDATOR.validate_phone_number(phone_number): return jsonify({ \"status\": 400, \"message\": \"Please input valid phone", "= data.get('password') if not firstname or not firstname.split(): return make_response(jsonify({", "lastname.split(): return make_response(jsonify({ \"status\": 400, \"message\": \"Lastname is required\" })),", "not username.split(): return make_response(jsonify({ \"status\": 400, \"message\": \"Username is required\"", "request.get_json() except: return make_response(jsonify({ \"status\": 400, \"message\": \"Wrong input\" })),", "\"Username is required\" })), 400 if not password: return make_response(jsonify({", "\"message\": \"Password is required\" })), 400 if not VALIDATOR.username_exists(username): return", "})), 400 if not password: return make_response(jsonify({ \"status\": 400, \"message\":", "}), 400 firstname = data.get('firstname') lastname = data.get('lastname') othername =", "VALIDATOR.validate_email(email): return jsonify({ \"status\": 400, \"message\": \"Invalid email\" }), 400", "jsonify({ \"status\": 400, \"message\": \"Invalid input\" }), 400 firstname =", "not password.split(): return make_response(jsonify({ \"status\": 400, \"message\": \"Password is required\"", "exists\" }), 400 password = generate_password_hash( password, method='pbkdf2:sha256', salt_length=8) res", "make_response(jsonify({ \"status\": 400, \"message\": \"Wrong input\" })), 400 username =", "not lastname or not lastname.split(): return make_response(jsonify({ \"status\": 400, \"message\":", "if not username: return make_response(jsonify({ \"status\": 400, \"message\": \"Username is", "\"message\": \"Username is required\" })), 400 if not password or", "400, \"message\": \"Username is required\" })), 400 if not password:", "firstname.split(): return make_response(jsonify({ \"status\": 400, \"message\": \"Firstname is required\" })),", "users\"\"\" try: data = request.get_json() except: return jsonify({ \"status\": 400,", "if not VALIDATOR.validate_email(email): return jsonify({ \"status\": 400, \"message\": \"Invalid email\"", "return jsonify({ \"status\": 404, \"message\": \"User does not exist\" }),", "VALIDATOR.username_exists(username): return jsonify({ \"status\": 404, \"message\": \"User does not exist\"", "not exist\" }), 404 auth_token = user.generate_auth_token(username) return make_response(jsonify({ \"status\":", "if VALIDATOR.username_exists(username): return jsonify({ \"status\": 400, \"message\": \"Username exists\" }),", "= request.get_json() except: return make_response(jsonify({ \"status\": 400, \"message\": \"Wrong input\"", "= USER.signup( firstname, lastname, othername, email, phone_number, username, is_admin, password)", "\"data\": [{ \"firstname\": firstname, \"lastname\": lastname, \"othername\": othername, \"email\": email,", "username: return make_response(jsonify({ \"status\": 400, \"message\": \"Username is required\" })),", "password or not password.split(): return make_response(jsonify({ \"status\": 400, \"message\": \"Password", "url_prefix='/api/v1') USER = Users() VALIDATOR = Validation() @v1_auth_blueprint.route('/signup', methods=['POST']) def", "does not exist\" }), 404 auth_token = user.generate_auth_token(username) return make_response(jsonify({", "input valid phone number\" }), 400 if VALIDATOR.validate_password(password): return jsonify({", "400, \"message\": \"Lastname is required\" })), 400 if not email", "not lastname.split(): return make_response(jsonify({ \"status\": 400, \"message\": \"Lastname is required\"", "of new users\"\"\" try: data = request.get_json() except: return jsonify({", "\"status\": 400, \"message\": \"Username exists\" }), 400 if VALIDATOR.email_exists(email): return", "control users login \"\"\" try: data = request.get_json() except: return", "data.get('email') phone_number = data.get('phone_number') username = data.get('username') is_admin = data.get('is_admin')", "400, \"message\": \"Username exists\" }), 400 if VALIDATOR.email_exists(email): return jsonify({", "make_response(jsonify({ \"status\": 400, \"message\": \"Firstname is required\" })), 400 if", "firstname, \"lastname\": lastname, \"othername\": othername, \"email\": email, \"phone_number\": phone_number, \"username\":", "\"status\": 404, \"message\": \"User does not exist\" }), 404 auth_token", "if not password or not password.split(): return make_response(jsonify({ \"status\": 400,", "is required\" })), 400 if not password: return make_response(jsonify({ \"status\":", "400 if not VALIDATOR.validate_email(email): return jsonify({ \"status\": 400, \"message\": \"Invalid", "= data.get('email') phone_number = data.get('phone_number') username = data.get('username') is_admin =", "jsonify({ \"status\": 400, \"message\": \"Username exists\" }), 400 if VALIDATOR.email_exists(email):", "lastname, \"othername\": othername, \"email\": email, \"phone_number\": phone_number, \"username\": username, \"is_admin\":", "required\" })), 400 if not phone_number: return make_response(jsonify({ \"status\": 400,", "salt_length=8) res = USER.signup( firstname, lastname, othername, email, phone_number, username,", "firstname, lastname, othername, email, phone_number, username, is_admin, password) return jsonify({", "}), 400 if VALIDATOR.email_exists(email): return jsonify({ \"status\": 400, \"message\": \"Email", "is_admin }] }), 201 @v1_auth_blueprint.route('/login', methods=['POST']) def login(): \"\"\" A", "\"Please input valid phone number\" }), 400 if VALIDATOR.validate_password(password): return", "if not VALIDATOR.username_exists(username): return jsonify({ \"status\": 404, \"message\": \"User does", "\"Invalid email\" }), 400 if VALIDATOR.username_exists(username): return jsonify({ \"status\": 400,", "phone_number, \"username\": username, \"is_admin\": is_admin }] }), 201 @v1_auth_blueprint.route('/login', methods=['POST'])", "not password or not password.split(): return make_response(jsonify({ \"status\": 400, \"message\":", "<filename>app/api/v1/views/auth_views.py from flask import jsonify, Blueprint, request, json, make_response from", "jsonify, Blueprint, request, json, make_response from werkzeug.security import generate_password_hash, check_password_hash", "not VALIDATOR.username_exists(username): return jsonify({ \"status\": 404, \"message\": \"User does not", "input\" })), 400 username = data.get('username') password = data.get('password') if", "}), 400 if not VALIDATOR.validate_email(email): return jsonify({ \"status\": 400, \"message\":", "= data.get('username') is_admin = data.get('is_admin') password = data.get('password') if not", "return make_response(jsonify({ \"status\": 400, \"message\": \"Lastname is required\" })), 400", "\"Username exists\" }), 400 if VALIDATOR.email_exists(email): return jsonify({ \"status\": 400,", "400, \"message\": \"Password is required\" })), 400 if not VALIDATOR.validate_phone_number(phone_number):", "required\" })), 400 if not username or not username.split(): return", "VALIDATOR.username_exists(username): return jsonify({ \"status\": 400, \"message\": \"Username exists\" }), 400", "})), 400 if not lastname or not lastname.split(): return make_response(jsonify({", "\"lastname\": lastname, \"othername\": othername, \"email\": email, \"phone_number\": phone_number, \"username\": username,", "\"status\": 400, \"message\": \"Password is required\" })), 400 if not", "\"status\": 400, \"message\": \"Lastname is required\" })), 400 if not", "\"username\": username, \"is_admin\": is_admin }] }), 201 @v1_auth_blueprint.route('/login', methods=['POST']) def", "400 if not email or not email.split(): return make_response(jsonify({ \"status\":", "\"is_admin\": is_admin }] }), 201 @v1_auth_blueprint.route('/login', methods=['POST']) def login(): \"\"\"", "= data.get('lastname') othername = data.get('othername') email = data.get('email') phone_number =", "make_response(jsonify({ \"status\": 400, \"message\": \"Lastname is required\" })), 400 if", "400 password = generate_password_hash( password, method='pbkdf2:sha256', salt_length=8) res = USER.signup(", "400 if not password: return make_response(jsonify({ \"status\": 400, \"message\": \"Password", "login(): \"\"\" A view to control users login \"\"\" try:", "VALIDATOR = Validation() @v1_auth_blueprint.route('/signup', methods=['POST']) def signup(): \"\"\"View that controls", "firstname = data.get('firstname') lastname = data.get('lastname') othername = data.get('othername') email", "number\" }), 400 if VALIDATOR.validate_password(password): return jsonify({ \"status\": 400, \"message\":", "400 if VALIDATOR.validate_password(password): return jsonify({ \"status\": 400, \"message\": \"Password not" ]
[ "(UnitsContainer) from pint.converters import (ScaleConverter, OffsetConverter) from pint.definitions import (Definition,", ")) self.assertEqual(x.symbol, 'k') self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(.001), 1) def test_baseunit_definition(self): x", "import (UnitsContainer) from pint.converters import (ScaleConverter, OffsetConverter) from pint.definitions import", "test_prefix_definition(self): for definition in ('m- = 1e-3', 'm- = 10**-3',", "self.assertEqual(x.reference, UnitsContainer(ampere=1, second=1)) x = Definition.from_string('faraday = 96485.3399 * coulomb')", "self.assertEqual(x.converter.scale, 96485.3399) self.assertEqual(x.reference, UnitsContainer(coulomb=1)) x = Definition.from_string('degF = 9 /", "unicode_literals, print_function, absolute_import from pint.util import (UnitsContainer) from pint.converters import", "'[time]') x = Definition.from_string('[speed] = [length]/[time]') self.assertIsInstance(x, DimensionDefinition) self.assertEqual(x.reference, UnitsContainer({'[length]':", "Definition.from_string('kilo- = 1e-3 = k- = anotherk-') self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name,", "for definition in ('m- = 1e-3', 'm- = 10**-3', 'm-", "= anotherk-') self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'kilo') self.assertEqual(x.aliases, ('anotherk', )) self.assertEqual(x.symbol,", "UnitDefinition) self.assertTrue(x.is_base) self.assertEqual(x.reference, UnitsContainer({'[length]': 1})) def test_unit_definition(self): x = Definition.from_string('coulomb", "9 / 5 * kelvin; offset: 255.372222') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base)", "Definition.from_string('turn = 6.28 * radian = _ = revolution =", "x = Definition.from_string('coulomb = ampere * second') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base)", "6.28 * radian = _ = revolution = = cycle", "= cycle = _') self.assertIsInstance(x, UnitDefinition) self.assertEqual(x.name, 'turn') self.assertEqual(x.aliases, ('revolution',", "self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(.001), 1) def test_baseunit_definition(self): x = Definition.from_string('meter =", "'turn') self.assertEqual(x.aliases, ('revolution', 'cycle')) self.assertEqual(x.symbol, 'turn') self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale,", "TestDefinition(BaseTestCase): def test_invalid(self): self.assertRaises(ValueError, Definition.from_string, 'x = [time] * meter')", "'m- = 0.001'): x = Definition.from_string(definition) self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'm')", "* meter') self.assertRaises(ValueError, Definition.from_string, '[x] = [time] * meter') def", "self.assertEqual(x.symbol, 'k') self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(.001), 1) x = Definition.from_string('kilo- =", "= [length]/[time]') self.assertIsInstance(x, DimensionDefinition) self.assertEqual(x.reference, UnitsContainer({'[length]': 1, '[time]': -1})) def", "= DimensionDefinition('[time]', '', (), converter='') self.assertTrue(x.is_base) self.assertEqual(x.name, '[time]') x =", "import division, unicode_literals, print_function, absolute_import from pint.util import (UnitsContainer) from", "= Definition.from_string('[speed] = [length]/[time]') self.assertIsInstance(x, DimensionDefinition) self.assertEqual(x.reference, UnitsContainer({'[length]': 1, '[time]':", "self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 1) self.assertEqual(x.reference, UnitsContainer(ampere=1, second=1)) x =", "def test_prefix_definition(self): for definition in ('m- = 1e-3', 'm- =", "division, unicode_literals, print_function, absolute_import from pint.util import (UnitsContainer) from pint.converters", "test_baseunit_definition(self): x = Definition.from_string('meter = [length]') self.assertIsInstance(x, UnitDefinition) self.assertTrue(x.is_base) self.assertEqual(x.reference,", "* radian = _ = revolution = = cycle =", "-*- coding: utf-8 -*- from __future__ import division, unicode_literals, print_function,", "1) self.assertEqual(str(x), 'm') x = Definition.from_string('kilo- = 1e-3 = k-')", "self.assertEqual(str(x), 'm') x = Definition.from_string('kilo- = 1e-3 = k-') self.assertIsInstance(x,", "'m') self.assertEqual(x.aliases, ()) self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(0.001), 1) self.assertEqual(str(x), 'm') x", "from pint.testsuite import BaseTestCase class TestDefinition(BaseTestCase): def test_invalid(self): self.assertRaises(ValueError, Definition.from_string,", "= ampere * second') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale,", "second=1)) x = Definition.from_string('faraday = 96485.3399 * coulomb') self.assertIsInstance(x, UnitDefinition)", "anotherk-') self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'kilo') self.assertEqual(x.aliases, ('anotherk', )) self.assertEqual(x.symbol, 'k')", "1) self.assertEqual(x.reference, UnitsContainer(ampere=1, second=1)) x = Definition.from_string('faraday = 96485.3399 *", "AliasDefinition) from pint.testsuite import BaseTestCase class TestDefinition(BaseTestCase): def test_invalid(self): self.assertRaises(ValueError,", "self.assertEqual(x.symbol, 'k') self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(.001), 1) def test_baseunit_definition(self): x =", "[time] * meter') self.assertRaises(ValueError, Definition.from_string, '[x] = [time] * meter')", "def test_unit_definition(self): x = Definition.from_string('coulomb = ampere * second') self.assertIsInstance(x,", "converter='') self.assertTrue(x.is_base) self.assertEqual(x.name, '[time]') x = Definition.from_string('[speed] = [length]/[time]') self.assertIsInstance(x,", "test_dimension_definition(self): x = DimensionDefinition('[time]', '', (), converter='') self.assertTrue(x.is_base) self.assertEqual(x.name, '[time]')", "self.assertIsInstance(x, UnitDefinition) self.assertTrue(x.is_base) self.assertEqual(x.reference, UnitsContainer({'[length]': 1})) def test_unit_definition(self): x =", "x = Definition.from_string(\"@alias meter = metro = metr\") self.assertIsInstance(x, AliasDefinition)", "10**-3', 'm- = 0.001'): x = Definition.from_string(definition) self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name,", "absolute_import from pint.util import (UnitsContainer) from pint.converters import (ScaleConverter, OffsetConverter)", "ampere * second') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 1)", "UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, OffsetConverter) self.assertEqual(x.converter.scale, 9/5) self.assertEqual(x.converter.offset, 255.372222) self.assertEqual(x.reference, UnitsContainer(kelvin=1))", "self.assertEqual(x.aliases, ()) self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(0.001), 1) self.assertEqual(str(x), 'm') x =", "from pint.converters import (ScaleConverter, OffsetConverter) from pint.definitions import (Definition, PrefixDefinition,", "import (Definition, PrefixDefinition, UnitDefinition, DimensionDefinition, AliasDefinition) from pint.testsuite import BaseTestCase", "pint.converters import (ScaleConverter, OffsetConverter) from pint.definitions import (Definition, PrefixDefinition, UnitDefinition,", "BaseTestCase class TestDefinition(BaseTestCase): def test_invalid(self): self.assertRaises(ValueError, Definition.from_string, 'x = [time]", "meter') def test_prefix_definition(self): for definition in ('m- = 1e-3', 'm-", "= Definition.from_string('coulomb = ampere * second') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter,", "self.assertEqual(x.reference, UnitsContainer({'[length]': 1, '[time]': -1})) def test_alias_definition(self): x = Definition.from_string(\"@alias", "PrefixDefinition) self.assertEqual(x.name, 'kilo') self.assertEqual(x.aliases, ()) self.assertEqual(x.symbol, 'k') self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(.001),", "= k-') self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'kilo') self.assertEqual(x.aliases, ()) self.assertEqual(x.symbol, 'k')", "Definition.from_string('[speed] = [length]/[time]') self.assertIsInstance(x, DimensionDefinition) self.assertEqual(x.reference, UnitsContainer({'[length]': 1, '[time]': -1}))", "kelvin; offset: 255.372222') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, OffsetConverter) self.assertEqual(x.converter.scale, 9/5)", "= 9 / 5 * kelvin; offset: 255.372222') self.assertIsInstance(x, UnitDefinition)", "pint.testsuite import BaseTestCase class TestDefinition(BaseTestCase): def test_invalid(self): self.assertRaises(ValueError, Definition.from_string, 'x", "Definition.from_string('degF = 9 / 5 * kelvin; offset: 255.372222') self.assertIsInstance(x,", "= Definition.from_string('turn = 6.28 * radian = _ = revolution", "from __future__ import division, unicode_literals, print_function, absolute_import from pint.util import", "self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 96485.3399) self.assertEqual(x.reference, UnitsContainer(coulomb=1)) x = Definition.from_string('degF =", "self.assertEqual(x.reference, UnitsContainer(kelvin=1)) x = Definition.from_string('turn = 6.28 * radian =", "x = Definition.from_string('kilo- = 1e-3 = k- = anotherk-') self.assertIsInstance(x,", "import (ScaleConverter, OffsetConverter) from pint.definitions import (Definition, PrefixDefinition, UnitDefinition, DimensionDefinition,", "from pint.definitions import (Definition, PrefixDefinition, UnitDefinition, DimensionDefinition, AliasDefinition) from pint.testsuite", "= 6.28 * radian = _ = revolution = =", "def test_invalid(self): self.assertRaises(ValueError, Definition.from_string, 'x = [time] * meter') self.assertRaises(ValueError,", "Definition.from_string('coulomb = ampere * second') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter)", "= revolution = = cycle = _') self.assertIsInstance(x, UnitDefinition) self.assertEqual(x.name,", "DimensionDefinition('[time]', '', (), converter='') self.assertTrue(x.is_base) self.assertEqual(x.name, '[time]') x = Definition.from_string('[speed]", "[length]') self.assertIsInstance(x, UnitDefinition) self.assertTrue(x.is_base) self.assertEqual(x.reference, UnitsContainer({'[length]': 1})) def test_unit_definition(self): x", "'k') self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(.001), 1) x = Definition.from_string('kilo- = 1e-3", "[length]/[time]') self.assertIsInstance(x, DimensionDefinition) self.assertEqual(x.reference, UnitsContainer({'[length]': 1, '[time]': -1})) def test_alias_definition(self):", "UnitsContainer({'[length]': 1, '[time]': -1})) def test_alias_definition(self): x = Definition.from_string(\"@alias meter", "* meter') def test_prefix_definition(self): for definition in ('m- = 1e-3',", "self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 96485.3399) self.assertEqual(x.reference, UnitsContainer(coulomb=1)) x", "OffsetConverter) self.assertEqual(x.converter.scale, 9/5) self.assertEqual(x.converter.offset, 255.372222) self.assertEqual(x.reference, UnitsContainer(kelvin=1)) x = Definition.from_string('turn", "= = cycle = _') self.assertIsInstance(x, UnitDefinition) self.assertEqual(x.name, 'turn') self.assertEqual(x.aliases,", "Definition.from_string('faraday = 96485.3399 * coulomb') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter)", "self.assertEqual(x.converter.offset, 255.372222) self.assertEqual(x.reference, UnitsContainer(kelvin=1)) x = Definition.from_string('turn = 6.28 *", "= 0.001'): x = Definition.from_string(definition) self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'm') self.assertEqual(x.aliases,", "('anotherk', )) self.assertEqual(x.symbol, 'k') self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(.001), 1) def test_baseunit_definition(self):", "255.372222) self.assertEqual(x.reference, UnitsContainer(kelvin=1)) x = Definition.from_string('turn = 6.28 * radian", "test_unit_definition(self): x = Definition.from_string('coulomb = ampere * second') self.assertIsInstance(x, UnitDefinition)", "= metro = metr\") self.assertIsInstance(x, AliasDefinition) self.assertEqual(x.name, \"meter\") self.assertEqual(x.aliases, (\"metro\",", "self.assertEqual(x.reference, UnitsContainer({'[length]': 1})) def test_unit_definition(self): x = Definition.from_string('coulomb = ampere", "6.28) self.assertEqual(x.reference, UnitsContainer(radian=1)) def test_dimension_definition(self): x = DimensionDefinition('[time]', '', (),", "= Definition.from_string('kilo- = 1e-3 = k-') self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'kilo')", "self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 96485.3399) self.assertEqual(x.reference, UnitsContainer(coulomb=1)) x = Definition.from_string('degF", "(ScaleConverter, OffsetConverter) from pint.definitions import (Definition, PrefixDefinition, UnitDefinition, DimensionDefinition, AliasDefinition)", "(), converter='') self.assertTrue(x.is_base) self.assertEqual(x.name, '[time]') x = Definition.from_string('[speed] = [length]/[time]')", "UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 1) self.assertEqual(x.reference, UnitsContainer(ampere=1, second=1)) x", "self.assertRaises(ValueError, Definition.from_string, 'x = [time] * meter') self.assertRaises(ValueError, Definition.from_string, '[x]", "self.assertEqual(x.aliases, ('anotherk', )) self.assertEqual(x.symbol, 'k') self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(.001), 1) def", "()) self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(0.001), 1) self.assertEqual(str(x), 'm') x = Definition.from_string('kilo-", "test_invalid(self): self.assertRaises(ValueError, Definition.from_string, 'x = [time] * meter') self.assertRaises(ValueError, Definition.from_string,", "= Definition.from_string(definition) self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'm') self.assertEqual(x.aliases, ()) self.assertEqual(x.converter.to_reference(1000), 1)", "import BaseTestCase class TestDefinition(BaseTestCase): def test_invalid(self): self.assertRaises(ValueError, Definition.from_string, 'x =", "= [length]') self.assertIsInstance(x, UnitDefinition) self.assertTrue(x.is_base) self.assertEqual(x.reference, UnitsContainer({'[length]': 1})) def test_unit_definition(self):", "1e-3', 'm- = 10**-3', 'm- = 0.001'): x = Definition.from_string(definition)", "coulomb') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 96485.3399) self.assertEqual(x.reference, UnitsContainer(coulomb=1))", "(Definition, PrefixDefinition, UnitDefinition, DimensionDefinition, AliasDefinition) from pint.testsuite import BaseTestCase class", "= k- = anotherk-') self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'kilo') self.assertEqual(x.aliases, ('anotherk',", "utf-8 -*- from __future__ import division, unicode_literals, print_function, absolute_import from", "ScaleConverter) self.assertEqual(x.converter.scale, 1) self.assertEqual(x.reference, UnitsContainer(ampere=1, second=1)) x = Definition.from_string('faraday =", "self.assertFalse(x.is_base) self.assertIsInstance(x.converter, OffsetConverter) self.assertEqual(x.converter.scale, 9/5) self.assertEqual(x.converter.offset, 255.372222) self.assertEqual(x.reference, UnitsContainer(kelvin=1)) x", "pint.util import (UnitsContainer) from pint.converters import (ScaleConverter, OffsetConverter) from pint.definitions", "1})) def test_unit_definition(self): x = Definition.from_string('coulomb = ampere * second')", "self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 1) self.assertEqual(x.reference, UnitsContainer(ampere=1, second=1)) x = Definition.from_string('faraday", "self.assertEqual(x.aliases, ()) self.assertEqual(x.symbol, 'k') self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(.001), 1) x =", "revolution = = cycle = _') self.assertIsInstance(x, UnitDefinition) self.assertEqual(x.name, 'turn')", "Definition.from_string('meter = [length]') self.assertIsInstance(x, UnitDefinition) self.assertTrue(x.is_base) self.assertEqual(x.reference, UnitsContainer({'[length]': 1})) def", "= 1e-3 = k- = anotherk-') self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'kilo')", "self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, OffsetConverter) self.assertEqual(x.converter.scale, 9/5) self.assertEqual(x.converter.offset, 255.372222) self.assertEqual(x.reference,", "self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 1) self.assertEqual(x.reference, UnitsContainer(ampere=1, second=1))", "96485.3399 * coulomb') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 96485.3399)", "'cycle')) self.assertEqual(x.symbol, 'turn') self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 6.28) self.assertEqual(x.reference, UnitsContainer(radian=1))", "x = DimensionDefinition('[time]', '', (), converter='') self.assertTrue(x.is_base) self.assertEqual(x.name, '[time]') x", "k- = anotherk-') self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'kilo') self.assertEqual(x.aliases, ('anotherk', ))", "'[time]': -1})) def test_alias_definition(self): x = Definition.from_string(\"@alias meter = metro", "self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'kilo') self.assertEqual(x.aliases, ()) self.assertEqual(x.symbol, 'k') self.assertEqual(x.converter.to_reference(1000), 1)", "UnitsContainer(coulomb=1)) x = Definition.from_string('degF = 9 / 5 * kelvin;", "-1})) def test_alias_definition(self): x = Definition.from_string(\"@alias meter = metro =", "k-') self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'kilo') self.assertEqual(x.aliases, ()) self.assertEqual(x.symbol, 'k') self.assertEqual(x.converter.to_reference(1000),", "1) def test_baseunit_definition(self): x = Definition.from_string('meter = [length]') self.assertIsInstance(x, UnitDefinition)", "'[x] = [time] * meter') def test_prefix_definition(self): for definition in", "1) self.assertEqual(x.converter.from_reference(.001), 1) x = Definition.from_string('kilo- = 1e-3 = k-", "= 1e-3 = k-') self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'kilo') self.assertEqual(x.aliases, ())", "PrefixDefinition) self.assertEqual(x.name, 'kilo') self.assertEqual(x.aliases, ('anotherk', )) self.assertEqual(x.symbol, 'k') self.assertEqual(x.converter.to_reference(1000), 1)", "self.assertEqual(x.name, 'm') self.assertEqual(x.aliases, ()) self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(0.001), 1) self.assertEqual(str(x), 'm')", "x = Definition.from_string('kilo- = 1e-3 = k-') self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name,", "255.372222') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, OffsetConverter) self.assertEqual(x.converter.scale, 9/5) self.assertEqual(x.converter.offset, 255.372222)", "= 10**-3', 'm- = 0.001'): x = Definition.from_string(definition) self.assertIsInstance(x, PrefixDefinition)", "self.assertIsInstance(x, DimensionDefinition) self.assertEqual(x.reference, UnitsContainer({'[length]': 1, '[time]': -1})) def test_alias_definition(self): x", "self.assertEqual(x.name, 'kilo') self.assertEqual(x.aliases, ('anotherk', )) self.assertEqual(x.symbol, 'k') self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(.001),", "offset: 255.372222') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, OffsetConverter) self.assertEqual(x.converter.scale, 9/5) self.assertEqual(x.converter.offset,", "self.assertIsInstance(x, UnitDefinition) self.assertEqual(x.name, 'turn') self.assertEqual(x.aliases, ('revolution', 'cycle')) self.assertEqual(x.symbol, 'turn') self.assertFalse(x.is_base)", "= [time] * meter') self.assertRaises(ValueError, Definition.from_string, '[x] = [time] *", "self.assertRaises(ValueError, Definition.from_string, '[x] = [time] * meter') def test_prefix_definition(self): for", "self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'kilo') self.assertEqual(x.aliases, ('anotherk', )) self.assertEqual(x.symbol, 'k') self.assertEqual(x.converter.to_reference(1000),", "self.assertIsInstance(x.converter, OffsetConverter) self.assertEqual(x.converter.scale, 9/5) self.assertEqual(x.converter.offset, 255.372222) self.assertEqual(x.reference, UnitsContainer(kelvin=1)) x =", "self.assertEqual(x.converter.from_reference(.001), 1) def test_baseunit_definition(self): x = Definition.from_string('meter = [length]') self.assertIsInstance(x,", "x = Definition.from_string(definition) self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'm') self.assertEqual(x.aliases, ()) self.assertEqual(x.converter.to_reference(1000),", "= _ = revolution = = cycle = _') self.assertIsInstance(x,", "self.assertEqual(x.aliases, ('revolution', 'cycle')) self.assertEqual(x.symbol, 'turn') self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 6.28)", "= [time] * meter') def test_prefix_definition(self): for definition in ('m-", "self.assertEqual(x.reference, UnitsContainer(radian=1)) def test_dimension_definition(self): x = DimensionDefinition('[time]', '', (), converter='')", "cycle = _') self.assertIsInstance(x, UnitDefinition) self.assertEqual(x.name, 'turn') self.assertEqual(x.aliases, ('revolution', 'cycle'))", "from pint.util import (UnitsContainer) from pint.converters import (ScaleConverter, OffsetConverter) from", "'kilo') self.assertEqual(x.aliases, ('anotherk', )) self.assertEqual(x.symbol, 'k') self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(.001), 1)", "= Definition.from_string('degF = 9 / 5 * kelvin; offset: 255.372222')", "* kelvin; offset: 255.372222') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, OffsetConverter) self.assertEqual(x.converter.scale,", "self.assertEqual(x.converter.scale, 9/5) self.assertEqual(x.converter.offset, 255.372222) self.assertEqual(x.reference, UnitsContainer(kelvin=1)) x = Definition.from_string('turn =", "PrefixDefinition, UnitDefinition, DimensionDefinition, AliasDefinition) from pint.testsuite import BaseTestCase class TestDefinition(BaseTestCase):", "self.assertEqual(x.name, '[time]') x = Definition.from_string('[speed] = [length]/[time]') self.assertIsInstance(x, DimensionDefinition) self.assertEqual(x.reference,", "DimensionDefinition, AliasDefinition) from pint.testsuite import BaseTestCase class TestDefinition(BaseTestCase): def test_invalid(self):", "1) self.assertEqual(x.converter.from_reference(.001), 1) def test_baseunit_definition(self): x = Definition.from_string('meter = [length]')", "Definition.from_string(definition) self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'm') self.assertEqual(x.aliases, ()) self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(0.001),", "self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 6.28) self.assertEqual(x.reference, UnitsContainer(radian=1)) def test_dimension_definition(self): x", "print_function, absolute_import from pint.util import (UnitsContainer) from pint.converters import (ScaleConverter,", "self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(0.001), 1) self.assertEqual(str(x), 'm') x = Definition.from_string('kilo- =", "x = Definition.from_string('turn = 6.28 * radian = _ =", "_') self.assertIsInstance(x, UnitDefinition) self.assertEqual(x.name, 'turn') self.assertEqual(x.aliases, ('revolution', 'cycle')) self.assertEqual(x.symbol, 'turn')", "meter') self.assertRaises(ValueError, Definition.from_string, '[x] = [time] * meter') def test_prefix_definition(self):", "DimensionDefinition) self.assertEqual(x.reference, UnitsContainer({'[length]': 1, '[time]': -1})) def test_alias_definition(self): x =", "('m- = 1e-3', 'm- = 10**-3', 'm- = 0.001'): x", "def test_alias_definition(self): x = Definition.from_string(\"@alias meter = metro = metr\")", "1) x = Definition.from_string('kilo- = 1e-3 = k- = anotherk-')", "* coulomb') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 96485.3399) self.assertEqual(x.reference,", "coding: utf-8 -*- from __future__ import division, unicode_literals, print_function, absolute_import", "self.assertTrue(x.is_base) self.assertEqual(x.reference, UnitsContainer({'[length]': 1})) def test_unit_definition(self): x = Definition.from_string('coulomb =", "radian = _ = revolution = = cycle = _')", "class TestDefinition(BaseTestCase): def test_invalid(self): self.assertRaises(ValueError, Definition.from_string, 'x = [time] *", "'k') self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(.001), 1) def test_baseunit_definition(self): x = Definition.from_string('meter", "UnitsContainer(radian=1)) def test_dimension_definition(self): x = DimensionDefinition('[time]', '', (), converter='') self.assertTrue(x.is_base)", "'', (), converter='') self.assertTrue(x.is_base) self.assertEqual(x.name, '[time]') x = Definition.from_string('[speed] =", "= 1e-3', 'm- = 10**-3', 'm- = 0.001'): x =", "self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'm') self.assertEqual(x.aliases, ()) self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(0.001), 1)", "x = Definition.from_string('faraday = 96485.3399 * coulomb') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base)", "UnitsContainer({'[length]': 1})) def test_unit_definition(self): x = Definition.from_string('coulomb = ampere *", "Definition.from_string, 'x = [time] * meter') self.assertRaises(ValueError, Definition.from_string, '[x] =", "96485.3399) self.assertEqual(x.reference, UnitsContainer(coulomb=1)) x = Definition.from_string('degF = 9 / 5", "x = Definition.from_string('degF = 9 / 5 * kelvin; offset:", "UnitDefinition) self.assertEqual(x.name, 'turn') self.assertEqual(x.aliases, ('revolution', 'cycle')) self.assertEqual(x.symbol, 'turn') self.assertFalse(x.is_base) self.assertIsInstance(x.converter,", "self.assertEqual(x.converter.scale, 6.28) self.assertEqual(x.reference, UnitsContainer(radian=1)) def test_dimension_definition(self): x = DimensionDefinition('[time]', '',", "metro = metr\") self.assertIsInstance(x, AliasDefinition) self.assertEqual(x.name, \"meter\") self.assertEqual(x.aliases, (\"metro\", \"metr\"))", "PrefixDefinition) self.assertEqual(x.name, 'm') self.assertEqual(x.aliases, ()) self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(0.001), 1) self.assertEqual(str(x),", "1e-3 = k- = anotherk-') self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'kilo') self.assertEqual(x.aliases,", "* second') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 1) self.assertEqual(x.reference,", "5 * kelvin; offset: 255.372222') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, OffsetConverter)", "ScaleConverter) self.assertEqual(x.converter.scale, 6.28) self.assertEqual(x.reference, UnitsContainer(radian=1)) def test_dimension_definition(self): x = DimensionDefinition('[time]',", "test_alias_definition(self): x = Definition.from_string(\"@alias meter = metro = metr\") self.assertIsInstance(x,", "0.001'): x = Definition.from_string(definition) self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'm') self.assertEqual(x.aliases, ())", "_ = revolution = = cycle = _') self.assertIsInstance(x, UnitDefinition)", "ScaleConverter) self.assertEqual(x.converter.scale, 96485.3399) self.assertEqual(x.reference, UnitsContainer(coulomb=1)) x = Definition.from_string('degF = 9", "self.assertEqual(x.reference, UnitsContainer(coulomb=1)) x = Definition.from_string('degF = 9 / 5 *", "pint.definitions import (Definition, PrefixDefinition, UnitDefinition, DimensionDefinition, AliasDefinition) from pint.testsuite import", "= Definition.from_string('kilo- = 1e-3 = k- = anotherk-') self.assertIsInstance(x, PrefixDefinition)", "def test_baseunit_definition(self): x = Definition.from_string('meter = [length]') self.assertIsInstance(x, UnitDefinition) self.assertTrue(x.is_base)", "definition in ('m- = 1e-3', 'm- = 10**-3', 'm- =", "= Definition.from_string(\"@alias meter = metro = metr\") self.assertIsInstance(x, AliasDefinition) self.assertEqual(x.name,", "meter = metro = metr\") self.assertIsInstance(x, AliasDefinition) self.assertEqual(x.name, \"meter\") self.assertEqual(x.aliases,", "/ 5 * kelvin; offset: 255.372222') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter,", "UnitsContainer(kelvin=1)) x = Definition.from_string('turn = 6.28 * radian = _", "'turn') self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 6.28) self.assertEqual(x.reference, UnitsContainer(radian=1)) def test_dimension_definition(self):", "Definition.from_string, '[x] = [time] * meter') def test_prefix_definition(self): for definition", "Definition.from_string('kilo- = 1e-3 = k-') self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'kilo') self.assertEqual(x.aliases,", "second') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 1) self.assertEqual(x.reference, UnitsContainer(ampere=1,", "UnitDefinition, DimensionDefinition, AliasDefinition) from pint.testsuite import BaseTestCase class TestDefinition(BaseTestCase): def", "()) self.assertEqual(x.symbol, 'k') self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(.001), 1) x = Definition.from_string('kilo-", "self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(.001), 1) x = Definition.from_string('kilo- = 1e-3 =", "'m') x = Definition.from_string('kilo- = 1e-3 = k-') self.assertIsInstance(x, PrefixDefinition)", "'m- = 10**-3', 'm- = 0.001'): x = Definition.from_string(definition) self.assertIsInstance(x,", "= _') self.assertIsInstance(x, UnitDefinition) self.assertEqual(x.name, 'turn') self.assertEqual(x.aliases, ('revolution', 'cycle')) self.assertEqual(x.symbol,", "x = Definition.from_string('[speed] = [length]/[time]') self.assertIsInstance(x, DimensionDefinition) self.assertEqual(x.reference, UnitsContainer({'[length]': 1,", "'kilo') self.assertEqual(x.aliases, ()) self.assertEqual(x.symbol, 'k') self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(.001), 1) x", "self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 6.28) self.assertEqual(x.reference, UnitsContainer(radian=1)) def test_dimension_definition(self): x =", "OffsetConverter) from pint.definitions import (Definition, PrefixDefinition, UnitDefinition, DimensionDefinition, AliasDefinition) from", "self.assertEqual(x.converter.from_reference(0.001), 1) self.assertEqual(str(x), 'm') x = Definition.from_string('kilo- = 1e-3 =", "[time] * meter') def test_prefix_definition(self): for definition in ('m- =", "__future__ import division, unicode_literals, print_function, absolute_import from pint.util import (UnitsContainer)", "self.assertEqual(x.name, 'turn') self.assertEqual(x.aliases, ('revolution', 'cycle')) self.assertEqual(x.symbol, 'turn') self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter)", "in ('m- = 1e-3', 'm- = 10**-3', 'm- = 0.001'):", "'x = [time] * meter') self.assertRaises(ValueError, Definition.from_string, '[x] = [time]", "1e-3 = k-') self.assertIsInstance(x, PrefixDefinition) self.assertEqual(x.name, 'kilo') self.assertEqual(x.aliases, ()) self.assertEqual(x.symbol,", "= Definition.from_string('faraday = 96485.3399 * coulomb') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter,", "9/5) self.assertEqual(x.converter.offset, 255.372222) self.assertEqual(x.reference, UnitsContainer(kelvin=1)) x = Definition.from_string('turn = 6.28", "def test_dimension_definition(self): x = DimensionDefinition('[time]', '', (), converter='') self.assertTrue(x.is_base) self.assertEqual(x.name,", "self.assertEqual(x.name, 'kilo') self.assertEqual(x.aliases, ()) self.assertEqual(x.symbol, 'k') self.assertEqual(x.converter.to_reference(1000), 1) self.assertEqual(x.converter.from_reference(.001), 1)", "# -*- coding: utf-8 -*- from __future__ import division, unicode_literals,", "1) self.assertEqual(x.converter.from_reference(0.001), 1) self.assertEqual(str(x), 'm') x = Definition.from_string('kilo- = 1e-3", "1, '[time]': -1})) def test_alias_definition(self): x = Definition.from_string(\"@alias meter =", "self.assertEqual(x.symbol, 'turn') self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 6.28) self.assertEqual(x.reference, UnitsContainer(radian=1)) def", "UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 96485.3399) self.assertEqual(x.reference, UnitsContainer(coulomb=1)) x =", "= Definition.from_string('meter = [length]') self.assertIsInstance(x, UnitDefinition) self.assertTrue(x.is_base) self.assertEqual(x.reference, UnitsContainer({'[length]': 1}))", "UnitsContainer(ampere=1, second=1)) x = Definition.from_string('faraday = 96485.3399 * coulomb') self.assertIsInstance(x,", "self.assertEqual(x.converter.from_reference(.001), 1) x = Definition.from_string('kilo- = 1e-3 = k- =", "self.assertTrue(x.is_base) self.assertEqual(x.name, '[time]') x = Definition.from_string('[speed] = [length]/[time]') self.assertIsInstance(x, DimensionDefinition)", "Definition.from_string(\"@alias meter = metro = metr\") self.assertIsInstance(x, AliasDefinition) self.assertEqual(x.name, \"meter\")", "('revolution', 'cycle')) self.assertEqual(x.symbol, 'turn') self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale, 6.28) self.assertEqual(x.reference,", "self.assertEqual(x.converter.scale, 1) self.assertEqual(x.reference, UnitsContainer(ampere=1, second=1)) x = Definition.from_string('faraday = 96485.3399", "-*- from __future__ import division, unicode_literals, print_function, absolute_import from pint.util", "x = Definition.from_string('meter = [length]') self.assertIsInstance(x, UnitDefinition) self.assertTrue(x.is_base) self.assertEqual(x.reference, UnitsContainer({'[length]':", "= 96485.3399 * coulomb') self.assertIsInstance(x, UnitDefinition) self.assertFalse(x.is_base) self.assertIsInstance(x.converter, ScaleConverter) self.assertEqual(x.converter.scale," ]
[ "# Check if there is a valid signature for the", "A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL", "CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION", "this software and associated documentation files # (the \"Software\"), to", "# KSK-2010: dns.rrset.from_text('.', 15202, 'IN', 'DNSKEY', '257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF", "htype) if ds == good_ds: break else: continue break else:", "_logger = get_logger(__name__) # hard-coded trust anchors (root KSKs) trust_anchors", "be # included in all copies or substantial portions of", "key for next iteration keys = {name: rrset} # get", "the trust anchors found in DNS') keys = {dns.name.root: root_rrset}", "a signed DS validates DNSKEY for ds in ds_rrset: for", "permission notice shall be # included in all copies or", "= rrset[0] if rr.rdtype == dns.rdatatype.SOA: continue # get DNSKEY", "sub, dns.rdatatype.DNSKEY, None) # get DS (signed by parent) ds_rrset", "dns import dns.name import dns.query import dns.dnssec import dns.message import", "get_logger(__name__) # hard-coded trust anchors (root KSKs) trust_anchors = [", "response = dns.query.udp(query, ns, 3) assert response.rcode() == dns.rcode.NOERROR, \"query", "= {name: rrset} # get TXT record (signed by zone)", "OR IN # CONNECTION WITH THE SOFTWARE OR THE USE", "rights to use, copy, modify, merge, # publish, distribute, sublicense,", "import dns.rdtypes.ANY.NSEC3PARAM import dns.rdtypes.ANY.RRSIG import dns.rdtypes.ANY.SOA import dns.rdtypes.ANY.TXT import dns.rdtypes.IN.A", "'<KEY>), # KSK-2010: dns.rrset.from_text('.', 15202, 'IN', 'DNSKEY', '257 3 8", "KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "modify, merge, # publish, distribute, sublicense, and/or sell copies of", "SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR", "0, ('No DNS record found', sub, _type) assert len(answer) !=", "= None for dnskey_rr in trust_anchors: try: # Check if", "IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS #", "deal in the Software without restriction, # including without limitation", "DNSKEY query = dns.message.make_query(sub, dns.rdatatype.NS) response = dns.query.udp(query, ns, 3)", "do so, # subject to the following conditions: # #", "import dns.rdatatype import dns.rdtypes.ANY.NS import dns.rdtypes.ANY.CNAME import dns.rdtypes.ANY.DLV import dns.rdtypes.ANY.DNSKEY", "DNS') keys = {dns.name.root: root_rrset} # top-down verification parts =", "rrset = answer elif answer[1].rdtype == dns.rdatatype.RRSIG: rrset, rrsig =", "2 else 'SHA1' good_ds = dns.dnssec.make_ds(name, dnskey, htype) if ds", "record') if keys is None: keys = {dns.name.from_text(sub):rrset} dns.dnssec.validate(rrset, rrsig,", "the Software, # and to permit persons to whom the", "shall be # included in all copies or substantial portions", "rtype) validated = True except Exception as e: _logger.info(f\"DNSSEC error:", "dns.message import dns.resolver import dns.rdatatype import dns.rdtypes.ANY.NS import dns.rdtypes.ANY.CNAME import", "copy of this software and associated documentation files # (the", "portions of the Software. # # THE SOFTWARE IS PROVIDED", "ARISING FROM, OUT OF OR IN # CONNECTION WITH THE", "LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A", "# KSK-2017: dns.rrset.from_text('.', 1 , 'IN', 'DNSKEY', '<KEY>), # KSK-2010:", "sub, _type) if answer[0].rdtype == dns.rdatatype.RRSIG: rrsig, rrset = answer", "= dns.message.make_query(sub, dns.rdatatype.NS) response = dns.query.udp(query, ns, 3) assert response.rcode()", "server nameservers = ['8.8.8.8'] ns = nameservers[0] try: out =", "WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND", "# # The above copyright notice and this permission notice", "= _check_query(ns, sub, dns.rdatatype.DS, keys) # verify that a signed", "dns.rdatatype.DNSKEY, None) # get DS (signed by parent) ds_rrset =", "TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR", "!= 0, ('No DNS record found', sub, _type) assert len(answer)", "dns.rdatatype.DS, keys) # verify that a signed DS validates DNSKEY", "good_ds: break else: continue break else: raise Exception(\"DS does not", "= response.answer assert len(answer) != 0, ('No DNS record found',", "def query(url, rtype): # 8.8.8.8 is Google's public DNS server", "dns.rdtypes.ANY.RRSIG import dns.rdtypes.ANY.SOA import dns.rdtypes.ANY.TXT import dns.rdtypes.IN.A import dns.rdtypes.IN.AAAA from", "dns.rdtypes.ANY.NSEC3PARAM import dns.rdtypes.ANY.RRSIG import dns.rdtypes.ANY.SOA import dns.rdtypes.ANY.TXT import dns.rdtypes.IN.A import", "answer[1].rdtype == dns.rdatatype.RRSIG: rrset, rrsig = answer else: raise Exception('No", "THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE", "for dnskey in rrset: htype = 'SHA256' if ds.digest_type ==", "'IN', 'DNSKEY', '257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W<KEY>S", "client # Copyright (C) 2015 <NAME> # # Permission is", "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS", "#!/usr/bin/env python # # Electrum - lightweight Bitcoin client #", "SOFTWARE. # Check DNSSEC trust chain. # Todo: verify expiration", "_logger.info(f\"DNSSEC error: {repr(e)}\") out = dns.resolver.resolve(url, rtype) validated = False", "# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT", "bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W<KEY>S Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0='), ] def _check_query(ns, sub, _type,", "the root dnskey root_rrset = _check_query(ns, '', dns.rdatatype.DNSKEY, {dns.name.root: dnskey_rr})", "want_dnssec=True) response = dns.query.tcp(q, ns, timeout=5) assert response.rcode() == 0,", "Check DNSSEC trust chain. # Todo: verify expiration dates #", "answer elif answer[1].rdtype == dns.rdatatype.RRSIG: rrset, rrsig = answer else:", "rr.rdtype == dns.rdatatype.SOA: continue # get DNSKEY (self-signed) rrset =", "import dns.query import dns.dnssec import dns.message import dns.resolver import dns.rdatatype", "else response.answer[0] rr = rrset[0] if rr.rdtype == dns.rdatatype.SOA: continue", "distribute, sublicense, and/or sell copies of the Software, # and", "dns.rdtypes.ANY.NS import dns.rdtypes.ANY.CNAME import dns.rdtypes.ANY.DLV import dns.rdtypes.ANY.DNSKEY import dns.rdtypes.ANY.DS import", "OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT", "not root_rrset: raise dns.dnssec.ValidationFailure('None of the trust anchors found in", "a valid signature for the root dnskey root_rrset = _check_query(ns,", "associated documentation files # (the \"Software\"), to deal in the", "{dns.name.root: root_rrset} # top-down verification parts = url.split('.') for i", "dns.name import dns.query import dns.dnssec import dns.message import dns.resolver import", "import dns.rdtypes.ANY.NS import dns.rdtypes.ANY.CNAME import dns.rdtypes.ANY.DLV import dns.rdtypes.ANY.DNSKEY import dns.rdtypes.ANY.DS", "{dns.name.from_text(sub):rrset} dns.dnssec.validate(rrset, rrsig, keys) return rrset def _get_and_validate(ns, url, _type):", "root_rrset = None for dnskey_rr in trust_anchors: try: # Check", "# 8.8.8.8 is Google's public DNS server nameservers = ['8.8.8.8']", "persons to whom the Software is furnished to do so,", "dns.rdtypes.ANY.NSEC import dns.rdtypes.ANY.NSEC3 import dns.rdtypes.ANY.NSEC3PARAM import dns.rdtypes.ANY.RRSIG import dns.rdtypes.ANY.SOA import", "record found', sub, _type) if answer[0].rdtype == dns.rdatatype.RRSIG: rrsig, rrset", "get DS (signed by parent) ds_rrset = _check_query(ns, sub, dns.rdatatype.DS,", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "ds.digest_type == 2 else 'SHA1' good_ds = dns.dnssec.make_ds(name, dnskey, htype)", "_check_query(ns, sub, dns.rdatatype.DS, keys) # verify that a signed DS", "dns.rdtypes.ANY.NSEC3 import dns.rdtypes.ANY.NSEC3PARAM import dns.rdtypes.ANY.RRSIG import dns.rdtypes.ANY.SOA import dns.rdtypes.ANY.TXT import", "in ds_rrset: for dnskey in rrset: htype = 'SHA256' if", "(root KSKs) trust_anchors = [ # KSK-2017: dns.rrset.from_text('.', 1 ,", "['8.8.8.8'] ns = nameservers[0] try: out = _get_and_validate(ns, url, rtype)", "def _check_query(ns, sub, _type, keys): q = dns.message.make_query(sub, _type, want_dnssec=True)", "to do so, # subject to the following conditions: #", "from .logging import get_logger _logger = get_logger(__name__) # hard-coded trust", "long as one key validates continue if not root_rrset: raise", "Todo: verify expiration dates # # Based on # http://backreference.org/2010/11/17/dnssec-verification-with-dig/", "if not root_rrset: raise dns.dnssec.ValidationFailure('None of the trust anchors found", "import dns.rdtypes.ANY.NSEC import dns.rdtypes.ANY.NSEC3 import dns.rdtypes.ANY.NSEC3PARAM import dns.rdtypes.ANY.RRSIG import dns.rdtypes.ANY.SOA", "all copies or substantial portions of the Software. # #", "import dns.rdtypes.IN.AAAA from .logging import get_logger _logger = get_logger(__name__) #", "dns.rdatatype.RRSIG: rrsig, rrset = answer elif answer[1].rdtype == dns.rdatatype.RRSIG: rrset,", "iteration keys = {name: rrset} # get TXT record (signed", "OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR", "COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "that a signed DS validates DNSKEY for ds in ds_rrset:", "answer[0].rdtype == dns.rdatatype.RRSIG: rrsig, rrset = answer elif answer[1].rdtype ==", "dns.dnssec.validate(rrset, rrsig, keys) return rrset def _get_and_validate(ns, url, _type): #", "== good_ds: break else: continue break else: raise Exception(\"DS does", "the Software without restriction, # including without limitation the rights", "_check_query(ns, sub, dns.rdatatype.DNSKEY, None) # get DS (signed by parent)", "= response.authority[0] if len(response.authority) > 0 else response.answer[0] rr =", "OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES", "root key root_rrset = None for dnskey_rr in trust_anchors: try:", "included in all copies or substantial portions of the Software.", "response.answer[0] rr = rrset[0] if rr.rdtype == dns.rdatatype.SOA: continue #", "# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "parts = url.split('.') for i in range(len(parts), 0, -1): sub", "dnskey, htype) if ds == good_ds: break else: continue break", "so, # subject to the following conditions: # # The", "notice shall be # included in all copies or substantial", "the rights to use, copy, modify, merge, # publish, distribute,", "ds in ds_rrset: for dnskey in rrset: htype = 'SHA256'", "DNSSEC trust chain. # Todo: verify expiration dates # #", "= dns.name.from_text(sub) # If server is authoritative, don't fetch DNSKEY", "{repr(e)}\") out = dns.resolver.resolve(url, rtype) validated = False return out,", "rrset = response.authority[0] if len(response.authority) > 0 else response.answer[0] rr", "IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "== 2 else 'SHA1' good_ds = dns.dnssec.make_ds(name, dnskey, htype) if", "else: continue break else: raise Exception(\"DS does not match DNSKEY\")", "# The above copyright notice and this permission notice shall", "trusted root key root_rrset = None for dnskey_rr in trust_anchors:", "TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN #", "[ # KSK-2017: dns.rrset.from_text('.', 1 , 'IN', 'DNSKEY', '<KEY>), #", "expiration dates # # Based on # http://backreference.org/2010/11/17/dnssec-verification-with-dig/ # https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py", "keys = {name: rrset} # get TXT record (signed by", "\"query error\" rrset = response.authority[0] if len(response.authority) > 0 else", "import get_logger _logger = get_logger(__name__) # hard-coded trust anchors (root", "# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT.", "= _check_query(ns, '', dns.rdatatype.DNSKEY, {dns.name.root: dnskey_rr}) break except dns.dnssec.ValidationFailure: #", "keys) return rrset def _get_and_validate(ns, url, _type): # get trusted", "dns.rdtypes.ANY.DLV import dns.rdtypes.ANY.DNSKEY import dns.rdtypes.ANY.DS import dns.rdtypes.ANY.NSEC import dns.rdtypes.ANY.NSEC3 import", "> 0 else response.answer[0] rr = rrset[0] if rr.rdtype ==", "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "dns.query.udp(query, ns, 3) assert response.rcode() == dns.rcode.NOERROR, \"query error\" rrset", "furnished to do so, # subject to the following conditions:", "AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED,", "AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM,", "\"Software\"), to deal in the Software without restriction, # including", "dns.message.make_query(sub, _type, want_dnssec=True) response = dns.query.tcp(q, ns, timeout=5) assert response.rcode()", "valid signature for the root dnskey root_rrset = _check_query(ns, '',", "# subject to the following conditions: # # The above", "OK as long as one key validates continue if not", "following conditions: # # The above copyright notice and this", "OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH", "root dnskey root_rrset = _check_query(ns, '', dns.rdatatype.DNSKEY, {dns.name.root: dnskey_rr}) break", "return rrset def query(url, rtype): # 8.8.8.8 is Google's public", "conditions: # # The above copyright notice and this permission", "don't fetch DNSKEY query = dns.message.make_query(sub, dns.rdatatype.NS) response = dns.query.udp(query,", "DNSKEY (self-signed) rrset = _check_query(ns, sub, dns.rdatatype.DNSKEY, None) # get", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #", "# https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py import dns import dns.name import dns.query import dns.dnssec", "break else: raise Exception(\"DS does not match DNSKEY\") # set", "and/or sell copies of the Software, # and to permit", "Exception as e: _logger.info(f\"DNSSEC error: {repr(e)}\") out = dns.resolver.resolve(url, rtype)", "documentation files # (the \"Software\"), to deal in the Software", "key validates continue if not root_rrset: raise dns.dnssec.ValidationFailure('None of the", "rrsig = answer else: raise Exception('No signature set in record')", "THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY", "Software without restriction, # including without limitation the rights to", "# get DS (signed by parent) ds_rrset = _check_query(ns, sub,", "q = dns.message.make_query(sub, _type, want_dnssec=True) response = dns.query.tcp(q, ns, timeout=5)", "# hard-coded trust anchors (root KSKs) trust_anchors = [ #", "found', sub, _type) assert len(answer) != 1, ('No DNSSEC record", "get_logger _logger = get_logger(__name__) # hard-coded trust anchors (root KSKs)", "# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,", "DS (signed by parent) ds_rrset = _check_query(ns, sub, dns.rdatatype.DS, keys)", "if answer[0].rdtype == dns.rdatatype.RRSIG: rrsig, rrset = answer elif answer[1].rdtype", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF", "= dns.dnssec.make_ds(name, dnskey, htype) if ds == good_ds: break else:", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS", "record (signed by zone) rrset = _check_query(ns, url, _type, keys)", "try: # Check if there is a valid signature for", "of this software and associated documentation files # (the \"Software\"),", "whom the Software is furnished to do so, # subject", "= True except Exception as e: _logger.info(f\"DNSSEC error: {repr(e)}\") out", "as one key validates continue if not root_rrset: raise dns.dnssec.ValidationFailure('None", "import dns.rdtypes.ANY.SOA import dns.rdtypes.ANY.TXT import dns.rdtypes.IN.A import dns.rdtypes.IN.AAAA from .logging", "trust_anchors = [ # KSK-2017: dns.rrset.from_text('.', 1 , 'IN', 'DNSKEY',", "copy, modify, merge, # publish, distribute, sublicense, and/or sell copies", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF #", "trust anchors (root KSKs) trust_anchors = [ # KSK-2017: dns.rrset.from_text('.',", "dns.rdatatype.NS) response = dns.query.udp(query, ns, 3) assert response.rcode() == dns.rcode.NOERROR,", ", 'IN', 'DNSKEY', '<KEY>), # KSK-2010: dns.rrset.from_text('.', 15202, 'IN', 'DNSKEY',", "# If server is authoritative, don't fetch DNSKEY query =", "= ['8.8.8.8'] ns = nameservers[0] try: out = _get_and_validate(ns, url,", "dnskey_rr in trust_anchors: try: # Check if there is a", "= url.split('.') for i in range(len(parts), 0, -1): sub =", "above copyright notice and this permission notice shall be #", "rrsig, keys) return rrset def _get_and_validate(ns, url, _type): # get", "in the Software without restriction, # including without limitation the", "is a valid signature for the root dnskey root_rrset =", "parent) ds_rrset = _check_query(ns, sub, dns.rdatatype.DS, keys) # verify that", "assert len(answer) != 0, ('No DNS record found', sub, _type)", "# # Permission is hereby granted, free of charge, to", "as long as one key validates continue if not root_rrset:", "OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION", "# top-down verification parts = url.split('.') for i in range(len(parts),", "_check_query(ns, url, _type, keys) return rrset def query(url, rtype): #", "subject to the following conditions: # # The above copyright", "0, 'No answer' answer = response.answer assert len(answer) != 0,", "OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND #", "permit persons to whom the Software is furnished to do", "import dns import dns.name import dns.query import dns.dnssec import dns.message", "else 'SHA1' good_ds = dns.dnssec.make_ds(name, dnskey, htype) if ds ==", "= answer else: raise Exception('No signature set in record') if", "to whom the Software is furnished to do so, #", "# Copyright (C) 2015 <NAME> # # Permission is hereby", "signed DS validates DNSKEY for ds in ds_rrset: for dnskey", "# including without limitation the rights to use, copy, modify,", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "dns.query import dns.dnssec import dns.message import dns.resolver import dns.rdatatype import", "url, _type, keys) return rrset def query(url, rtype): # 8.8.8.8", "Software is furnished to do so, # subject to the", "dns.name.from_text(sub) # If server is authoritative, don't fetch DNSKEY query", "IN # CONNECTION WITH THE SOFTWARE OR THE USE OR", "_type) if answer[0].rdtype == dns.rdatatype.RRSIG: rrsig, rrset = answer elif", "QxA+Uk1ihz0='), ] def _check_query(ns, sub, _type, keys): q = dns.message.make_query(sub,", "in range(len(parts), 0, -1): sub = '.'.join(parts[i-1:]) name = dns.name.from_text(sub)", "(signed by parent) ds_rrset = _check_query(ns, sub, dns.rdatatype.DS, keys) #", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "timeout=5) assert response.rcode() == 0, 'No answer' answer = response.answer", "answer else: raise Exception('No signature set in record') if keys", "ds_rrset = _check_query(ns, sub, dns.rdatatype.DS, keys) # verify that a", "python # # Electrum - lightweight Bitcoin client # Copyright", "without limitation the rights to use, copy, modify, merge, #", "or substantial portions of the Software. # # THE SOFTWARE", "('No DNS record found', sub, _type) assert len(answer) != 1,", "len(response.authority) > 0 else response.answer[0] rr = rrset[0] if rr.rdtype", "OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT", "dns.dnssec import dns.message import dns.resolver import dns.rdatatype import dns.rdtypes.ANY.NS import", "!= 1, ('No DNSSEC record found', sub, _type) if answer[0].rdtype", "including without limitation the rights to use, copy, modify, merge,", "good_ds = dns.dnssec.make_ds(name, dnskey, htype) if ds == good_ds: break", "= dns.message.make_query(sub, _type, want_dnssec=True) response = dns.query.tcp(q, ns, timeout=5) assert", "https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py import dns import dns.name import dns.query import dns.dnssec import", "== dns.rdatatype.SOA: continue # get DNSKEY (self-signed) rrset = _check_query(ns,", "dns.dnssec.ValidationFailure: # It's OK as long as one key validates", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "DEALINGS IN THE # SOFTWARE. # Check DNSSEC trust chain.", "if there is a valid signature for the root dnskey", "import dns.resolver import dns.rdatatype import dns.rdtypes.ANY.NS import dns.rdtypes.ANY.CNAME import dns.rdtypes.ANY.DLV", "zone) rrset = _check_query(ns, url, _type, keys) return rrset def", "continue break else: raise Exception(\"DS does not match DNSKEY\") #", "'SHA256' if ds.digest_type == 2 else 'SHA1' good_ds = dns.dnssec.make_ds(name,", "# Permission is hereby granted, free of charge, to any", "= dns.query.udp(query, ns, 3) assert response.rcode() == dns.rcode.NOERROR, \"query error\"", "url, _type): # get trusted root key root_rrset = None", "in trust_anchors: try: # Check if there is a valid", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "import dns.rdtypes.ANY.DLV import dns.rdtypes.ANY.DNSKEY import dns.rdtypes.ANY.DS import dns.rdtypes.ANY.NSEC import dns.rdtypes.ANY.NSEC3", "len(answer) != 1, ('No DNSSEC record found', sub, _type) if", "validates continue if not root_rrset: raise dns.dnssec.ValidationFailure('None of the trust", "-1): sub = '.'.join(parts[i-1:]) name = dns.name.from_text(sub) # If server", "# Electrum - lightweight Bitcoin client # Copyright (C) 2015", "15202, 'IN', 'DNSKEY', '257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz", "server is authoritative, don't fetch DNSKEY query = dns.message.make_query(sub, dns.rdatatype.NS)", "use, copy, modify, merge, # publish, distribute, sublicense, and/or sell", "# and to permit persons to whom the Software is", "break else: continue break else: raise Exception(\"DS does not match", "# verify that a signed DS validates DNSKEY for ds", "to permit persons to whom the Software is furnished to", "trust chain. # Todo: verify expiration dates # # Based", "chain. # Todo: verify expiration dates # # Based on", "dns.rdtypes.ANY.CNAME import dns.rdtypes.ANY.DLV import dns.rdtypes.ANY.DNSKEY import dns.rdtypes.ANY.DS import dns.rdtypes.ANY.NSEC import", "rtype): # 8.8.8.8 is Google's public DNS server nameservers =", "in DNS') keys = {dns.name.root: root_rrset} # top-down verification parts", "without restriction, # including without limitation the rights to use,", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "dns.rdtypes.ANY.TXT import dns.rdtypes.IN.A import dns.rdtypes.IN.AAAA from .logging import get_logger _logger", "answer' answer = response.answer assert len(answer) != 0, ('No DNS", "set key for next iteration keys = {name: rrset} #", "PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS", "verify expiration dates # # Based on # http://backreference.org/2010/11/17/dnssec-verification-with-dig/ #", "# get trusted root key root_rrset = None for dnskey_rr", "out = _get_and_validate(ns, url, rtype) validated = True except Exception", "(the \"Software\"), to deal in the Software without restriction, #", "None) # get DS (signed by parent) ds_rrset = _check_query(ns,", "FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO", "'257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W<KEY>S Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0='),", "else: raise Exception(\"DS does not match DNSKEY\") # set key", "dns.rdtypes.ANY.SOA import dns.rdtypes.ANY.TXT import dns.rdtypes.IN.A import dns.rdtypes.IN.AAAA from .logging import", "signature for the root dnskey root_rrset = _check_query(ns, '', dns.rdatatype.DNSKEY,", "response = dns.query.tcp(q, ns, timeout=5) assert response.rcode() == 0, 'No", "- lightweight Bitcoin client # Copyright (C) 2015 <NAME> #", "None for dnskey_rr in trust_anchors: try: # Check if there", "of the Software, # and to permit persons to whom", "# Based on # http://backreference.org/2010/11/17/dnssec-verification-with-dig/ # https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py import dns import", "WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING", "rrset def _get_and_validate(ns, url, _type): # get trusted root key", "out = dns.resolver.resolve(url, rtype) validated = False return out, validated", "import dns.rdtypes.ANY.DS import dns.rdtypes.ANY.NSEC import dns.rdtypes.ANY.NSEC3 import dns.rdtypes.ANY.NSEC3PARAM import dns.rdtypes.ANY.RRSIG", "dns.query.tcp(q, ns, timeout=5) assert response.rcode() == 0, 'No answer' answer", "record found', sub, _type) assert len(answer) != 1, ('No DNSSEC", "url.split('.') for i in range(len(parts), 0, -1): sub = '.'.join(parts[i-1:])", "if ds == good_ds: break else: continue break else: raise", "the following conditions: # # The above copyright notice and", "merge, # publish, distribute, sublicense, and/or sell copies of the", "name = dns.name.from_text(sub) # If server is authoritative, don't fetch", "Bitcoin client # Copyright (C) 2015 <NAME> # # Permission", "range(len(parts), 0, -1): sub = '.'.join(parts[i-1:]) name = dns.name.from_text(sub) #", "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN", "(self-signed) rrset = _check_query(ns, sub, dns.rdatatype.DNSKEY, None) # get DS", "3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W<KEY>S Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0='), ]", "== dns.rdatatype.RRSIG: rrset, rrsig = answer else: raise Exception('No signature", "THE USE OR OTHER DEALINGS IN THE # SOFTWARE. #", "this permission notice shall be # included in all copies", "url, rtype) validated = True except Exception as e: _logger.info(f\"DNSSEC", "return rrset def _get_and_validate(ns, url, _type): # get trusted root", "_type, keys): q = dns.message.make_query(sub, _type, want_dnssec=True) response = dns.query.tcp(q,", "root_rrset} # top-down verification parts = url.split('.') for i in", "# get DNSKEY (self-signed) rrset = _check_query(ns, sub, dns.rdatatype.DNSKEY, None)", "_get_and_validate(ns, url, _type): # get trusted root key root_rrset =", "_type, keys) return rrset def query(url, rtype): # 8.8.8.8 is", "DNSSEC record found', sub, _type) if answer[0].rdtype == dns.rdatatype.RRSIG: rrsig,", "break except dns.dnssec.ValidationFailure: # It's OK as long as one", "rrset = _check_query(ns, sub, dns.rdatatype.DNSKEY, None) # get DS (signed", "_type, want_dnssec=True) response = dns.query.tcp(q, ns, timeout=5) assert response.rcode() ==", "dns.dnssec.ValidationFailure('None of the trust anchors found in DNS') keys =", "Copyright (C) 2015 <NAME> # # Permission is hereby granted,", "if keys is None: keys = {dns.name.from_text(sub):rrset} dns.dnssec.validate(rrset, rrsig, keys)", "match DNSKEY\") # set key for next iteration keys =", "get TXT record (signed by zone) rrset = _check_query(ns, url,", "public DNS server nameservers = ['8.8.8.8'] ns = nameservers[0] try:", "e: _logger.info(f\"DNSSEC error: {repr(e)}\") out = dns.resolver.resolve(url, rtype) validated =", "'SHA1' good_ds = dns.dnssec.make_ds(name, dnskey, htype) if ds == good_ds:", "notice and this permission notice shall be # included in", "get DNSKEY (self-signed) rrset = _check_query(ns, sub, dns.rdatatype.DNSKEY, None) #", "error\" rrset = response.authority[0] if len(response.authority) > 0 else response.answer[0]", "continue # get DNSKEY (self-signed) rrset = _check_query(ns, sub, dns.rdatatype.DNSKEY,", "DNSKEY for ds in ds_rrset: for dnskey in rrset: htype", "sub, _type) assert len(answer) != 1, ('No DNSSEC record found',", "WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT", "for dnskey_rr in trust_anchors: try: # Check if there is", "validated = True except Exception as e: _logger.info(f\"DNSSEC error: {repr(e)}\")", "anchors found in DNS') keys = {dns.name.root: root_rrset} # top-down", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "= [ # KSK-2017: dns.rrset.from_text('.', 1 , 'IN', 'DNSKEY', '<KEY>),", "dns.rdtypes.ANY.DNSKEY import dns.rdtypes.ANY.DS import dns.rdtypes.ANY.NSEC import dns.rdtypes.ANY.NSEC3 import dns.rdtypes.ANY.NSEC3PARAM import", "= {dns.name.root: root_rrset} # top-down verification parts = url.split('.') for", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, #", "def _get_and_validate(ns, url, _type): # get trusted root key root_rrset", "keys is None: keys = {dns.name.from_text(sub):rrset} dns.dnssec.validate(rrset, rrsig, keys) return", "rrset} # get TXT record (signed by zone) rrset =", "len(answer) != 0, ('No DNS record found', sub, _type) assert", "ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN #", "found', sub, _type) if answer[0].rdtype == dns.rdatatype.RRSIG: rrsig, rrset =", "sublicense, and/or sell copies of the Software, # and to", "# obtaining a copy of this software and associated documentation", "WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE,", "restriction, # including without limitation the rights to use, copy,", "_get_and_validate(ns, url, rtype) validated = True except Exception as e:", "# included in all copies or substantial portions of the", "dns.dnssec.make_ds(name, dnskey, htype) if ds == good_ds: break else: continue", "raise Exception('No signature set in record') if keys is None:", "hereby granted, free of charge, to any person # obtaining", "DNS server nameservers = ['8.8.8.8'] ns = nameservers[0] try: out", "NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR", "If server is authoritative, don't fetch DNSKEY query = dns.message.make_query(sub,", "to use, copy, modify, merge, # publish, distribute, sublicense, and/or", "signature set in record') if keys is None: keys =", "not match DNSKEY\") # set key for next iteration keys", "elif answer[1].rdtype == dns.rdatatype.RRSIG: rrset, rrsig = answer else: raise", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "substantial portions of the Software. # # THE SOFTWARE IS", "3) assert response.rcode() == dns.rcode.NOERROR, \"query error\" rrset = response.authority[0]", "dnskey in rrset: htype = 'SHA256' if ds.digest_type == 2", "a copy of this software and associated documentation files #", "one key validates continue if not root_rrset: raise dns.dnssec.ValidationFailure('None of", "import dns.rdtypes.ANY.NSEC3 import dns.rdtypes.ANY.NSEC3PARAM import dns.rdtypes.ANY.RRSIG import dns.rdtypes.ANY.SOA import dns.rdtypes.ANY.TXT", "import dns.rdtypes.IN.A import dns.rdtypes.IN.AAAA from .logging import get_logger _logger =", "except dns.dnssec.ValidationFailure: # It's OK as long as one key", "KSKs) trust_anchors = [ # KSK-2017: dns.rrset.from_text('.', 1 , 'IN',", "error: {repr(e)}\") out = dns.resolver.resolve(url, rtype) validated = False return", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR", "import dns.rdtypes.ANY.DNSKEY import dns.rdtypes.ANY.DS import dns.rdtypes.ANY.NSEC import dns.rdtypes.ANY.NSEC3 import dns.rdtypes.ANY.NSEC3PARAM", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "there is a valid signature for the root dnskey root_rrset", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER", "= answer elif answer[1].rdtype == dns.rdatatype.RRSIG: rrset, rrsig = answer", "rrset[0] if rr.rdtype == dns.rdatatype.SOA: continue # get DNSKEY (self-signed)", "validates DNSKEY for ds in ds_rrset: for dnskey in rrset:", "in rrset: htype = 'SHA256' if ds.digest_type == 2 else", "is Google's public DNS server nameservers = ['8.8.8.8'] ns =", "dns.rdatatype import dns.rdtypes.ANY.NS import dns.rdtypes.ANY.CNAME import dns.rdtypes.ANY.DLV import dns.rdtypes.ANY.DNSKEY import", "dnskey_rr}) break except dns.dnssec.ValidationFailure: # It's OK as long as", "http://backreference.org/2010/11/17/dnssec-verification-with-dig/ # https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py import dns import dns.name import dns.query import", "dns.rdtypes.IN.AAAA from .logging import get_logger _logger = get_logger(__name__) # hard-coded", "_check_query(ns, '', dns.rdatatype.DNSKEY, {dns.name.root: dnskey_rr}) break except dns.dnssec.ValidationFailure: # It's", "if ds.digest_type == 2 else 'SHA1' good_ds = dns.dnssec.make_ds(name, dnskey,", "Based on # http://backreference.org/2010/11/17/dnssec-verification-with-dig/ # https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py import dns import dns.name", "_type): # get trusted root key root_rrset = None for", "root_rrset: raise dns.dnssec.ValidationFailure('None of the trust anchors found in DNS')", "= '.'.join(parts[i-1:]) name = dns.name.from_text(sub) # If server is authoritative,", "fetch DNSKEY query = dns.message.make_query(sub, dns.rdatatype.NS) response = dns.query.udp(query, ns,", "2015 <NAME> # # Permission is hereby granted, free of", "= _check_query(ns, sub, dns.rdatatype.DNSKEY, None) # get DS (signed by", "_type) assert len(answer) != 1, ('No DNSSEC record found', sub,", "copyright notice and this permission notice shall be # included", "import dns.name import dns.query import dns.dnssec import dns.message import dns.resolver", "query(url, rtype): # 8.8.8.8 is Google's public DNS server nameservers", "and to permit persons to whom the Software is furnished", "FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE", "Electrum - lightweight Bitcoin client # Copyright (C) 2015 <NAME>", "'IN', 'DNSKEY', '<KEY>), # KSK-2010: dns.rrset.from_text('.', 15202, 'IN', 'DNSKEY', '257", "FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W<KEY>S Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0='), ] def _check_query(ns, sub,", "ns = nameservers[0] try: out = _get_and_validate(ns, url, rtype) validated", "free of charge, to any person # obtaining a copy", "<NAME> # # Permission is hereby granted, free of charge,", "DNSKEY\") # set key for next iteration keys = {name:", "W<KEY>S Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0='), ] def _check_query(ns, sub, _type, keys): q", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "0 else response.answer[0] rr = rrset[0] if rr.rdtype == dns.rdatatype.SOA:", "'No answer' answer = response.answer assert len(answer) != 0, ('No", "Exception(\"DS does not match DNSKEY\") # set key for next", "OR OTHER DEALINGS IN THE # SOFTWARE. # Check DNSSEC", "top-down verification parts = url.split('.') for i in range(len(parts), 0,", "assert response.rcode() == dns.rcode.NOERROR, \"query error\" rrset = response.authority[0] if", "dns.rrset.from_text('.', 15202, 'IN', 'DNSKEY', '257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD", "charge, to any person # obtaining a copy of this", "found in DNS') keys = {dns.name.root: root_rrset} # top-down verification", "OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT,", "rrsig, rrset = answer elif answer[1].rdtype == dns.rdatatype.RRSIG: rrset, rrsig", "authoritative, don't fetch DNSKEY query = dns.message.make_query(sub, dns.rdatatype.NS) response =", "# # Based on # http://backreference.org/2010/11/17/dnssec-verification-with-dig/ # https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py import dns", "# get TXT record (signed by zone) rrset = _check_query(ns,", "obtaining a copy of this software and associated documentation files", "IN THE # SOFTWARE. # Check DNSSEC trust chain. #", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN", "import dns.rdtypes.ANY.CNAME import dns.rdtypes.ANY.DLV import dns.rdtypes.ANY.DNSKEY import dns.rdtypes.ANY.DS import dns.rdtypes.ANY.NSEC", "trust_anchors: try: # Check if there is a valid signature", "response.authority[0] if len(response.authority) > 0 else response.answer[0] rr = rrset[0]", "htype = 'SHA256' if ds.digest_type == 2 else 'SHA1' good_ds", "# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT", "Software, # and to permit persons to whom the Software", "and this permission notice shall be # included in all", "TXT record (signed by zone) rrset = _check_query(ns, url, _type,", "and associated documentation files # (the \"Software\"), to deal in", "answer = response.answer assert len(answer) != 0, ('No DNS record", "key root_rrset = None for dnskey_rr in trust_anchors: try: #", "BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS", "dns.rdatatype.RRSIG: rrset, rrsig = answer else: raise Exception('No signature set", "keys): q = dns.message.make_query(sub, _type, want_dnssec=True) response = dns.query.tcp(q, ns,", "Check if there is a valid signature for the root", "if len(response.authority) > 0 else response.answer[0] rr = rrset[0] if", "does not match DNSKEY\") # set key for next iteration", "== 0, 'No answer' answer = response.answer assert len(answer) !=", "person # obtaining a copy of this software and associated", "ds == good_ds: break else: continue break else: raise Exception(\"DS", "query = dns.message.make_query(sub, dns.rdatatype.NS) response = dns.query.udp(query, ns, 3) assert", "hard-coded trust anchors (root KSKs) trust_anchors = [ # KSK-2017:", "sub = '.'.join(parts[i-1:]) name = dns.name.from_text(sub) # If server is", "sub, dns.rdatatype.DS, keys) # verify that a signed DS validates", "lightweight Bitcoin client # Copyright (C) 2015 <NAME> # #", "next iteration keys = {name: rrset} # get TXT record", "8.8.8.8 is Google's public DNS server nameservers = ['8.8.8.8'] ns", "ds_rrset: for dnskey in rrset: htype = 'SHA256' if ds.digest_type", "keys) return rrset def query(url, rtype): # 8.8.8.8 is Google's", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN", "sell copies of the Software, # and to permit persons", "dns.rcode.NOERROR, \"query error\" rrset = response.authority[0] if len(response.authority) > 0", "verification parts = url.split('.') for i in range(len(parts), 0, -1):", "for ds in ds_rrset: for dnskey in rrset: htype =", "AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR", "1 , 'IN', 'DNSKEY', '<KEY>), # KSK-2010: dns.rrset.from_text('.', 15202, 'IN',", "('No DNSSEC record found', sub, _type) if answer[0].rdtype == dns.rdatatype.RRSIG:", "response.rcode() == dns.rcode.NOERROR, \"query error\" rrset = response.authority[0] if len(response.authority)", "is furnished to do so, # subject to the following", "# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "any person # obtaining a copy of this software and", "ns, 3) assert response.rcode() == dns.rcode.NOERROR, \"query error\" rrset =", "(signed by zone) rrset = _check_query(ns, url, _type, keys) return", "assert len(answer) != 1, ('No DNSSEC record found', sub, _type)", "is authoritative, don't fetch DNSKEY query = dns.message.make_query(sub, dns.rdatatype.NS) response", "1, ('No DNSSEC record found', sub, _type) if answer[0].rdtype ==", "else: raise Exception('No signature set in record') if keys is", "in all copies or substantial portions of the Software. #", "= _check_query(ns, url, _type, keys) return rrset def query(url, rtype):", "nameservers[0] try: out = _get_and_validate(ns, url, rtype) validated = True", "dns.rdatatype.DNSKEY, {dns.name.root: dnskey_rr}) break except dns.dnssec.ValidationFailure: # It's OK as", "copies or substantial portions of the Software. # # THE", "raise dns.dnssec.ValidationFailure('None of the trust anchors found in DNS') keys", "rrset def query(url, rtype): # 8.8.8.8 is Google's public DNS", "(C) 2015 <NAME> # # Permission is hereby granted, free", "# # Electrum - lightweight Bitcoin client # Copyright (C)", "anchors (root KSKs) trust_anchors = [ # KSK-2017: dns.rrset.from_text('.', 1", "on # http://backreference.org/2010/11/17/dnssec-verification-with-dig/ # https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py import dns import dns.name import", "sub, _type, keys): q = dns.message.make_query(sub, _type, want_dnssec=True) response =", "= nameservers[0] try: out = _get_and_validate(ns, url, rtype) validated =", "'DNSKEY', '<KEY>), # KSK-2010: dns.rrset.from_text('.', 15202, 'IN', 'DNSKEY', '257 3", "True except Exception as e: _logger.info(f\"DNSSEC error: {repr(e)}\") out =", "# set key for next iteration keys = {name: rrset}", "keys = {dns.name.root: root_rrset} # top-down verification parts = url.split('.')", "assert response.rcode() == 0, 'No answer' answer = response.answer assert", "dns.rdtypes.ANY.DS import dns.rdtypes.ANY.NSEC import dns.rdtypes.ANY.NSEC3 import dns.rdtypes.ANY.NSEC3PARAM import dns.rdtypes.ANY.RRSIG import", "FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN", "= {dns.name.from_text(sub):rrset} dns.dnssec.validate(rrset, rrsig, keys) return rrset def _get_and_validate(ns, url,", "i in range(len(parts), 0, -1): sub = '.'.join(parts[i-1:]) name =", "verify that a signed DS validates DNSKEY for ds in", "continue if not root_rrset: raise dns.dnssec.ValidationFailure('None of the trust anchors", "OTHER DEALINGS IN THE # SOFTWARE. # Check DNSSEC trust", "Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0='), ] def _check_query(ns, sub, _type, keys): q =", "= _get_and_validate(ns, url, rtype) validated = True except Exception as", "to deal in the Software without restriction, # including without", "ns, timeout=5) assert response.rcode() == 0, 'No answer' answer =", "response.rcode() == 0, 'No answer' answer = response.answer assert len(answer)", "of the trust anchors found in DNS') keys = {dns.name.root:", "keys = {dns.name.from_text(sub):rrset} dns.dnssec.validate(rrset, rrsig, keys) return rrset def _get_and_validate(ns,", "to the following conditions: # # The above copyright notice", "== dns.rcode.NOERROR, \"query error\" rrset = response.authority[0] if len(response.authority) >", "'DNSKEY', '257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W<KEY>S Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq", "KSK-2010: dns.rrset.from_text('.', 15202, 'IN', 'DNSKEY', '257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX", "DNS record found', sub, _type) assert len(answer) != 1, ('No", "AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W<KEY>S Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0='), ] def _check_query(ns,", "get trusted root key root_rrset = None for dnskey_rr in", "# It's OK as long as one key validates continue", "set in record') if keys is None: keys = {dns.name.from_text(sub):rrset}", "= 'SHA256' if ds.digest_type == 2 else 'SHA1' good_ds =", "0, -1): sub = '.'.join(parts[i-1:]) name = dns.name.from_text(sub) # If", "import dns.dnssec import dns.message import dns.resolver import dns.rdatatype import dns.rdtypes.ANY.NS", "'.'.join(parts[i-1:]) name = dns.name.from_text(sub) # If server is authoritative, don't", "nameservers = ['8.8.8.8'] ns = nameservers[0] try: out = _get_and_validate(ns,", "USE OR OTHER DEALINGS IN THE # SOFTWARE. # Check", "FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT", "try: out = _get_and_validate(ns, url, rtype) validated = True except", "except Exception as e: _logger.info(f\"DNSSEC error: {repr(e)}\") out = dns.resolver.resolve(url,", "KSK-2017: dns.rrset.from_text('.', 1 , 'IN', 'DNSKEY', '<KEY>), # KSK-2010: dns.rrset.from_text('.',", "NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE", "the Software is furnished to do so, # subject to", "# Todo: verify expiration dates # # Based on #", "X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W<KEY>S Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0='), ] def _check_query(ns, sub, _type, keys):", "PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE", "granted, free of charge, to any person # obtaining a", "EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE", "publish, distribute, sublicense, and/or sell copies of the Software, #", "copies of the Software, # and to permit persons to", "to any person # obtaining a copy of this software", "in record') if keys is None: keys = {dns.name.from_text(sub):rrset} dns.dnssec.validate(rrset,", "dnskey root_rrset = _check_query(ns, '', dns.rdatatype.DNSKEY, {dns.name.root: dnskey_rr}) break except", "rrset = _check_query(ns, url, _type, keys) return rrset def query(url,", "# SOFTWARE. # Check DNSSEC trust chain. # Todo: verify", "by zone) rrset = _check_query(ns, url, _type, keys) return rrset", "DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF", "dates # # Based on # http://backreference.org/2010/11/17/dnssec-verification-with-dig/ # https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py import", "rrset, rrsig = answer else: raise Exception('No signature set in", "None: keys = {dns.name.from_text(sub):rrset} dns.dnssec.validate(rrset, rrsig, keys) return rrset def", "dns.rdatatype.SOA: continue # get DNSKEY (self-signed) rrset = _check_query(ns, sub,", "keys) # verify that a signed DS validates DNSKEY for", "dns.rrset.from_text('.', 1 , 'IN', 'DNSKEY', '<KEY>), # KSK-2010: dns.rrset.from_text('.', 15202,", "= dns.query.tcp(q, ns, timeout=5) assert response.rcode() == 0, 'No answer'", "DS validates DNSKEY for ds in ds_rrset: for dnskey in", "import dns.rdtypes.ANY.TXT import dns.rdtypes.IN.A import dns.rdtypes.IN.AAAA from .logging import get_logger", "files # (the \"Software\"), to deal in the Software without", "as e: _logger.info(f\"DNSSEC error: {repr(e)}\") out = dns.resolver.resolve(url, rtype) validated", "import dns.rdtypes.ANY.RRSIG import dns.rdtypes.ANY.SOA import dns.rdtypes.ANY.TXT import dns.rdtypes.IN.A import dns.rdtypes.IN.AAAA", "= get_logger(__name__) # hard-coded trust anchors (root KSKs) trust_anchors =", "Google's public DNS server nameservers = ['8.8.8.8'] ns = nameservers[0]", "rr = rrset[0] if rr.rdtype == dns.rdatatype.SOA: continue # get", "of charge, to any person # obtaining a copy of", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR", "is hereby granted, free of charge, to any person #", "response.answer assert len(answer) != 0, ('No DNS record found', sub,", "if rr.rdtype == dns.rdatatype.SOA: continue # get DNSKEY (self-signed) rrset", "dns.message.make_query(sub, dns.rdatatype.NS) response = dns.query.udp(query, ns, 3) assert response.rcode() ==", "dns.rdtypes.IN.A import dns.rdtypes.IN.AAAA from .logging import get_logger _logger = get_logger(__name__)", "OF OR IN # CONNECTION WITH THE SOFTWARE OR THE", "raise Exception(\"DS does not match DNSKEY\") # set key for", "software and associated documentation files # (the \"Software\"), to deal", "OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE.", "Exception('No signature set in record') if keys is None: keys", "Permission is hereby granted, free of charge, to any person", "ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED", "8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W<KEY>S Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0='), ] def", "LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR", "The above copyright notice and this permission notice shall be", "import dns.message import dns.resolver import dns.rdatatype import dns.rdtypes.ANY.NS import dns.rdtypes.ANY.CNAME", "by parent) ds_rrset = _check_query(ns, sub, dns.rdatatype.DS, keys) # verify", "for next iteration keys = {name: rrset} # get TXT", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY,", "It's OK as long as one key validates continue if", "# http://backreference.org/2010/11/17/dnssec-verification-with-dig/ # https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py import dns import dns.name import dns.query", "for the root dnskey root_rrset = _check_query(ns, '', dns.rdatatype.DNSKEY, {dns.name.root:", "_check_query(ns, sub, _type, keys): q = dns.message.make_query(sub, _type, want_dnssec=True) response", "# (the \"Software\"), to deal in the Software without restriction,", "HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "trust anchors found in DNS') keys = {dns.name.root: root_rrset} #", "{name: rrset} # get TXT record (signed by zone) rrset", ".logging import get_logger _logger = get_logger(__name__) # hard-coded trust anchors", "== dns.rdatatype.RRSIG: rrsig, rrset = answer elif answer[1].rdtype == dns.rdatatype.RRSIG:", "# Check DNSSEC trust chain. # Todo: verify expiration dates", "] def _check_query(ns, sub, _type, keys): q = dns.message.make_query(sub, _type,", "is None: keys = {dns.name.from_text(sub):rrset} dns.dnssec.validate(rrset, rrsig, keys) return rrset", "root_rrset = _check_query(ns, '', dns.rdatatype.DNSKEY, {dns.name.root: dnskey_rr}) break except dns.dnssec.ValidationFailure:", "rrset: htype = 'SHA256' if ds.digest_type == 2 else 'SHA1'", "limitation the rights to use, copy, modify, merge, # publish,", "THE # SOFTWARE. # Check DNSSEC trust chain. # Todo:", "'', dns.rdatatype.DNSKEY, {dns.name.root: dnskey_rr}) break except dns.dnssec.ValidationFailure: # It's OK", "dns.resolver import dns.rdatatype import dns.rdtypes.ANY.NS import dns.rdtypes.ANY.CNAME import dns.rdtypes.ANY.DLV import", "for i in range(len(parts), 0, -1): sub = '.'.join(parts[i-1:]) name", "{dns.name.root: dnskey_rr}) break except dns.dnssec.ValidationFailure: # It's OK as long", "# publish, distribute, sublicense, and/or sell copies of the Software," ]
[ "= Enum(\"D3D11_RTV_DIMENSION\", [ \"D3D11_RTV_DIMENSION_UNKNOWN\", \"D3D11_RTV_DIMENSION_BUFFER\", \"D3D11_RTV_DIMENSION_TEXTURE1D\", \"D3D11_RTV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE2D\", \"D3D11_RTV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE2DMS\",", "Struct(\"D3D11_TEX2DMS_DSV\", [ (UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2DMS_ARRAY_DSV = Struct(\"D3D11_TEX2DMS_ARRAY_DSV\", [ (UINT,", "\"GetDesc\", [Out(Pointer(D3D11_RASTERIZER_DESC), \"pDesc\")]), ] D3D11_SUBRESOURCE_DATA = Struct(\"D3D11_SUBRESOURCE_DATA\", [ (OpaquePointer(Const(Void)), \"pSysMem\"),", "[(UINT, \"IndexCount\"), (UINT, \"StartIndexLocation\"), (INT, \"BaseVertexLocation\")]), StdMethod(Void, \"Draw\", [(UINT, \"VertexCount\"),", "] D3D11_TEXTURE3D_DESC = Struct(\"D3D11_TEXTURE3D_DESC\", [ (UINT, \"Width\"), (UINT, \"Height\"), (UINT,", "+= [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_COUNTER_DESC), \"pDesc\")]), ] D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS = Enum(\"D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS\",", "\"pSrcResource\"), (UINT, \"SrcSubresource\"), (Pointer(Const(D3D11_BOX)), \"pSrcBox\")]), StdMethod(Void, \"CopyResource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (ObjPointer(ID3D11Resource),", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "\"ClearDepthStencilView\", [(ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\"), (D3D11_CLEAR_FLAG, \"ClearFlags\"), (FLOAT, \"Depth\"), (UINT8, \"Stencil\")]), StdMethod(Void,", "(UINT, \"NumUAVs\"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), \"NumUAVs\"), \"ppUnorderedAccessViews\"), (Pointer(Const(UINT)), \"pUAVInitialCounts\")]), StdMethod(Void, \"OMSetBlendState\", [(ObjPointer(ID3D11BlendState),", "StdMethod(Void, \"RSGetState\", [Out(Pointer(ObjPointer(ID3D11RasterizerState)), \"ppRasterizerState\")]), StdMethod(Void, \"RSGetViewports\", [Out(Pointer(UINT), \"pNumViewports\"), Out(Array(D3D11_VIEWPORT, \"*pNumViewports\"),", "StdMethod(Void, \"Flush\", []), StdMethod(D3D11_DEVICE_CONTEXT_TYPE, \"GetType\", []), StdMethod(UINT, \"GetContextFlags\", []), StdMethod(HRESULT,", "StdMethod(HRESULT, \"Map\", [(ObjPointer(ID3D11Resource), \"pResource\"), (UINT, \"Subresource\"), (D3D11_MAP, \"MapType\"), (D3D11_MAP_FLAG, \"MapFlags\"),", "\"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_ANISOTROPIC\", ]) D3D11_FILTER_TYPE = Enum(\"D3D11_FILTER_TYPE\", [ \"D3D11_FILTER_TYPE_POINT\",", "] D3D11_SUBRESOURCE_DATA = Struct(\"D3D11_SUBRESOURCE_DATA\", [ (OpaquePointer(Const(Void)), \"pSysMem\"), (UINT, \"SysMemPitch\"), (UINT,", "(Union(None, [(UINT, \"NumElements\"), (UINT, \"ElementWidth\")]), None), ]) D3D11_BUFFEREX_SRV_FLAG = Flags(UINT,", "\"GSSetShader\", [(ObjPointer(ID3D11GeometryShader), \"pShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"IASetPrimitiveTopology\",", "\"D3D11_RTV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE2D\", \"D3D11_RTV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE2DMS\", \"D3D11_RTV_DIMENSION_TEXTURE2DMSARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE3D\", ]) D3D11_UAV_DIMENSION = Enum(\"D3D11_UAV_DIMENSION\",", "\"UAVStartSlot\"), (UINT, \"NumUAVs\"), (Array(ObjPointer(ID3D11UnorderedAccessView), \"NumUAVs\"), \"ppUnorderedAccessViews\")]), StdMethod(Void, \"OMGetBlendState\", [Out(Pointer(ObjPointer(ID3D11BlendState)), \"ppBlendState\"),", "\"IndexCountPerInstance\"), (UINT, \"InstanceCount\"), (UINT, \"StartIndexLocation\"), (INT, \"BaseVertexLocation\"), (UINT, \"StartInstanceLocation\")]), StdMethod(Void,", "\"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"PSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)),", "= EnumPolymorphic(\"D3D11_FEATURE\", \"Feature\", [ (\"D3D11_FEATURE_THREADING\", Pointer(D3D11_FEATURE_DATA_THREADING)), (\"D3D11_FEATURE_DOUBLES\", Pointer(D3D11_FEATURE_DATA_DOUBLES)), (\"D3D11_FEATURE_FORMAT_SUPPORT\", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT)),", "\"CreateComputeShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11ComputeShader)), \"ppComputeShader\")]),", "[(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_UNORDERED_ACCESS_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11UnorderedAccessView)), \"ppUAView\")]), StdMethod(HRESULT, \"CreateRenderTargetView\", [(ObjPointer(ID3D11Resource), \"pResource\"),", "[(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"PSGetShaderResources\", [(UINT,", "[ (DXGI_FORMAT, \"Format\"), (D3D11_SRV_DIMENSION, \"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_SRV, \"Buffer\"), (D3D11_TEX1D_SRV,", "(D3D11_DEPTH_STENCILOP_DESC, \"FrontFace\"), (D3D11_DEPTH_STENCILOP_DESC, \"BackFace\"), ]) ID3D11DepthStencilState.methods += [ StdMethod(Void, \"GetDesc\",", "\"D3D11_BLEND_DEST_ALPHA\", \"D3D11_BLEND_INV_DEST_ALPHA\", \"D3D11_BLEND_DEST_COLOR\", \"D3D11_BLEND_INV_DEST_COLOR\", \"D3D11_BLEND_SRC_ALPHA_SAT\", \"D3D11_BLEND_BLEND_FACTOR\", \"D3D11_BLEND_INV_BLEND_FACTOR\", \"D3D11_BLEND_SRC1_COLOR\", \"D3D11_BLEND_INV_SRC1_COLOR\", \"D3D11_BLEND_SRC1_ALPHA\",", "\"pDesc\")]), ] D3D11_BUFFER_UAV_FLAG = Flags(UINT, [ \"D3D11_BUFFER_UAV_FLAG_RAW\", \"D3D11_BUFFER_UAV_FLAG_APPEND\", \"D3D11_BUFFER_UAV_FLAG_COUNTER\", ])", "SOFTWARE. # ##########################################################################/ from dxgi import * from d3dcommon import", "\"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"HSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState),", "StdMethod(Void, \"End\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\")]), StdMethod(HRESULT, \"GetData\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\"), Out(OpaqueBlob(Void, \"DataSize\"),", "\"NumSimultaneousCounters\"), (UINT8, \"NumDetectableParallelUnits\"), ]) ID3D11Counter.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_COUNTER_DESC),", "\"Texture1DArray\"), (D3D11_TEX2D_UAV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_UAV, \"Texture2DArray\"), (D3D11_TEX3D_UAV, \"Texture3D\"), ]), None), ])", "\"DstZ\"), (ObjPointer(ID3D11Resource), \"pSrcResource\"), (UINT, \"SrcSubresource\"), (Pointer(Const(D3D11_BOX)), \"pSrcBox\")]), StdMethod(Void, \"CopyResource\", [(ObjPointer(ID3D11Resource),", "[(UINT, \"EvictionPriority\")]), StdMethod(UINT, \"GetEvictionPriority\", []), ] D3D11_BUFFER_DESC = Struct(\"D3D11_BUFFER_DESC\", [", "[(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11ComputeShader)), \"ppComputeShader\")]), StdMethod(HRESULT,", "[(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"GSSetSamplers\", [(UINT,", "\"PSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"IAGetInputLayout\",", "[(REFGUID, \"guid\"), (OpaquePointer(Const(IUnknown)), \"pData\")]), ] D3D11_COMPARISON_FUNC = Enum(\"D3D11_COMPARISON_FUNC\", [ \"D3D11_COMPARISON_NEVER\",", "\"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"VSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"),", "\"MipLevels\"), (UINT, \"ArraySize\"), (DXGI_FORMAT, \"Format\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG,", "StdMethod(UINT, \"GetDataSize\", []), ] D3D11_ASYNC_GETDATA_FLAG = Flags(UINT, [ \"D3D11_ASYNC_GETDATA_DONOTFLUSH\", ])", "\"BaseConstantBufferOffset\"), (UINT, \"BaseTexture\"), (UINT, \"BaseSampler\"), (BOOL, \"Created\"), ]) ID3D11ClassInstance.methods +=", "\"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RENDERTARGET\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_LOAD\", \"D3D11_FORMAT_SUPPORT_SHADER_GATHER\", \"D3D11_FORMAT_SUPPORT_BACK_BUFFER_CAST\", \"D3D11_FORMAT_SUPPORT_TYPED_UNORDERED_ACCESS_VIEW\", \"D3D11_FORMAT_SUPPORT_SHADER_GATHER_COMPARISON\", ]) D3D11_FORMAT_SUPPORT2 = Enum(\"D3D11_FORMAT_SUPPORT2\",", "Interface(\"ID3D11Device\", IUnknown) D3D11_INPUT_CLASSIFICATION = Enum(\"D3D11_INPUT_CLASSIFICATION\", [ \"D3D11_INPUT_PER_VERTEX_DATA\", \"D3D11_INPUT_PER_INSTANCE_DATA\", ]) D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET", "D3D11_FEATURE, D3D11_FEATURE_DATA = EnumPolymorphic(\"D3D11_FEATURE\", \"Feature\", [ (\"D3D11_FEATURE_THREADING\", Pointer(D3D11_FEATURE_DATA_THREADING)), (\"D3D11_FEATURE_DOUBLES\", Pointer(D3D11_FEATURE_DATA_DOUBLES)),", "permission notice shall be included in # all copies or", "(ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\")]), StdMethod(HRESULT, \"CreateGeometryShaderWithStreamOutput\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T,", "(SIZE_T, \"BytecodeLength\"), Out(Pointer(ObjPointer(ID3D11InputLayout)), \"ppInputLayout\")]), StdMethod(HRESULT, \"CreateVertexShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T,", "\"D3D11_CULL_FRONT\", \"D3D11_CULL_BACK\", ]) D3D11_SO_DECLARATION_ENTRY = Struct(\"D3D11_SO_DECLARATION_ENTRY\", [ (UINT, \"Stream\"), (LPCSTR,", "\"D3D11_PRIMITIVE_9_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_10_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_11_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_12_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_13_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_14_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_15_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_16_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_17_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_18_CONTROL_POINT_PATCH\",", "(Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"HSGetShader\", [Out(Pointer(ObjPointer(ID3D11HullShader)), \"ppHullShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"),", "(Pointer(Const(UINT)), \"pUAVInitialCounts\")]), StdMethod(Void, \"CSSetShader\", [(ObjPointer(ID3D11ComputeShader), \"pComputeShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT,", "StdMethod(Void, \"HSSetShader\", [(ObjPointer(ID3D11HullShader), \"pHullShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void,", "\"D3D11_STENCIL_OP_KEEP\", \"D3D11_STENCIL_OP_ZERO\", \"D3D11_STENCIL_OP_REPLACE\", \"D3D11_STENCIL_OP_INCR_SAT\", \"D3D11_STENCIL_OP_DECR_SAT\", \"D3D11_STENCIL_OP_INVERT\", \"D3D11_STENCIL_OP_INCR\", \"D3D11_STENCIL_OP_DECR\", ]) D3D11_DEPTH_STENCILOP_DESC", "= Enum(\"D3D11_PRIMITIVE\", [ \"D3D11_PRIMITIVE_UNDEFINED\", \"D3D11_PRIMITIVE_POINT\", \"D3D11_PRIMITIVE_LINE\", \"D3D11_PRIMITIVE_TRIANGLE\", \"D3D11_PRIMITIVE_LINE_ADJ\", \"D3D11_PRIMITIVE_TRIANGLE_ADJ\", \"D3D11_PRIMITIVE_1_CONTROL_POINT_PATCH\",", "\"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"CSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"),", "portions of the Software. # # THE SOFTWARE IS PROVIDED", "Interface(\"ID3D11CommandList\", ID3D11DeviceChild) ID3D11Device = Interface(\"ID3D11Device\", IUnknown) D3D11_INPUT_CLASSIFICATION = Enum(\"D3D11_INPUT_CLASSIFICATION\", [", "D3D11_TEX1D_DSV = Struct(\"D3D11_TEX1D_DSV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_DSV = Struct(\"D3D11_TEX1D_ARRAY_DSV\",", "\"D3D11_STENCIL_OP_INCR\", \"D3D11_STENCIL_OP_DECR\", ]) D3D11_DEPTH_STENCILOP_DESC = Struct(\"D3D11_DEPTH_STENCILOP_DESC\", [ (D3D11_STENCIL_OP, \"StencilFailOp\"), (D3D11_STENCIL_OP,", "[Out(Pointer(UINT), \"pNumViewports\"), Out(Array(D3D11_VIEWPORT, \"*pNumViewports\"), \"pViewports\")]), StdMethod(Void, \"RSGetScissorRects\", [Out(Pointer(UINT), \"pNumRects\"), Out(Array(D3D11_RECT,", "(Pointer(Const(UINT)), \"pOffsets\")]), StdMethod(Void, \"IASetIndexBuffer\", [(ObjPointer(ID3D11Buffer), \"pIndexBuffer\"), (DXGI_FORMAT, \"Format\"), (UINT, \"Offset\")]),", "D3D11_SAMPLER_DESC = Struct(\"D3D11_SAMPLER_DESC\", [ (D3D11_FILTER, \"Filter\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressU\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressV\"),", "StdMethod(Void, \"PSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void,", "\"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"GetPredication\", [Out(Pointer(ObjPointer(ID3D11Predicate)), \"ppPredicate\"), Out(Pointer(BOOL), \"pPredicateValue\")]),", "(UINT, \"Subresource\")]), StdMethod(Void, \"PSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"),", "\"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture2D.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE2D_DESC),", "\"pDesc\")]), ] D3D11_TEX1D_DSV = Struct(\"D3D11_TEX1D_DSV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_DSV", "Out(LPSTR, \"szUnits\"), Out(Pointer(UINT), \"pUnitsLength\"), Out(LPSTR, \"szDescription\"), Out(Pointer(UINT), \"pDescriptionLength\")]), StdMethod(HRESULT, \"CheckFeatureSupport\",", "]) D3D11_TEX3D_UAV = Struct(\"D3D11_TEX3D_UAV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstWSlice\"), (UINT,", "[ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_DSV =", "\"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_UNSIGNED_MIN_OR_MAX\", \"D3D11_FORMAT_SUPPORT2_UAV_TYPED_LOAD\", \"D3D11_FORMAT_SUPPORT2_UAV_TYPED_STORE\", ]) ID3D11Asynchronous.methods += [ StdMethod(UINT, \"GetDataSize\", []),", "\"ppRTView\")]), StdMethod(HRESULT, \"CreateDepthStencilView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_DEPTH_STENCIL_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\")]), StdMethod(HRESULT,", "\"D3D11_FILTER_MIN_MAG_MIP_LINEAR\", \"D3D11_FILTER_ANISOTROPIC\", \"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_LINEAR\",", "Software without restriction, including without limitation the rights # to", "StdFunction(HRESULT, \"D3D11CreateDevice\", [(ObjPointer(IDXGIAdapter), \"pAdapter\"), (D3D_DRIVER_TYPE, \"DriverType\"), (HMODULE, \"Software\"), (D3D11_CREATE_DEVICE_FLAG, \"Flags\"),", "\"MiscFlags\"), ]) ID3D11Query.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_QUERY_DESC), \"pDesc\")]), ]", "] D3D11_BUFFER_UAV_FLAG = Flags(UINT, [ \"D3D11_BUFFER_UAV_FLAG_RAW\", \"D3D11_BUFFER_UAV_FLAG_APPEND\", \"D3D11_BUFFER_UAV_FLAG_COUNTER\", ]) D3D11_BUFFER_UAV", "], Blob(Void, \"FeatureSupportDataSize\"), False) ID3D11DeviceContext.methods += [ StdMethod(Void, \"VSSetConstantBuffers\", [(UINT,", "]) ID3D11DepthStencilState.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_DEPTH_STENCIL_DESC), \"pDesc\")]), ] D3D11_BLEND", "\"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"PSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"),", "StdMethod(Void, \"IAGetVertexBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppVertexBuffers\"), Out(Pointer(UINT),", "(D3D11_CLEAR_FLAG, \"ClearFlags\"), (FLOAT, \"Depth\"), (UINT8, \"Stencil\")]), StdMethod(Void, \"GenerateMips\", [(ObjPointer(ID3D11ShaderResourceView), \"pShaderResourceView\")]),", "\"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"DSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"),", "(UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), (UINT, \"First2DArrayFace\"), (UINT, \"NumCubes\"), ]) D3D11_TEX2DMS_SRV", "FakeEnum(UINT, [ \"D3D11_APPEND_ALIGNED_ELEMENT\", ]) D3D11_INPUT_ELEMENT_DESC = Struct(\"D3D11_INPUT_ELEMENT_DESC\", [ (LPCSTR, \"SemanticName\"),", "distribute, sublicense, and/or sell # copies of the Software, and", "\"HSInvocations\"), (UINT64, \"DSInvocations\"), (UINT64, \"CSInvocations\"), ]) D3D11_QUERY_DATA_SO_STATISTICS = Struct(\"D3D11_QUERY_DATA_SO_STATISTICS\", [", "[ (\"D3D11_FEATURE_THREADING\", Pointer(D3D11_FEATURE_DATA_THREADING)), (\"D3D11_FEATURE_DOUBLES\", Pointer(D3D11_FEATURE_DATA_DOUBLES)), (\"D3D11_FEATURE_FORMAT_SUPPORT\", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT)), (\"D3D11_FEATURE_FORMAT_SUPPORT2\", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT2)), (\"D3D11_FEATURE_D3D10_X_HARDWARE_OPTIONS\",", "\"SetPrivateData\", [(REFGUID, \"guid\"), (UINT, \"DataSize\"), (OpaqueBlob(Const(Void), \"DataSize\"), \"pData\")]), StdMethod(HRESULT, \"SetPrivateDataInterface\",", "(Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppSOTargets\"), (Pointer(Const(UINT)), \"pOffsets\")]), StdMethod(Void, \"DrawAuto\", []), StdMethod(Void, \"DrawIndexedInstancedIndirect\",", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "[Out(Pointer(ObjPointer(ID3D11InputLayout)), \"ppInputLayout\")]), StdMethod(Void, \"IAGetVertexBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"),", "]) D3D11_RESOURCE_MISC_FLAG = Flags(UINT, [ \"D3D11_RESOURCE_MISC_GENERATE_MIPS\", \"D3D11_RESOURCE_MISC_SHARED\", \"D3D11_RESOURCE_MISC_TEXTURECUBE\", \"D3D11_RESOURCE_MISC_DRAWINDIRECT_ARGS\", \"D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS\",", "] D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS = Enum(\"D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS\", [ \"D3D11_STANDARD_MULTISAMPLE_PATTERN\", \"D3D11_CENTER_MULTISAMPLE_PATTERN\", ]) D3D11_DEVICE_CONTEXT_TYPE =", "[(UINT, \"NumViewports\"), (Array(Const(D3D11_VIEWPORT), \"NumViewports\"), \"pViewports\")]), StdMethod(Void, \"RSSetScissorRects\", [(UINT, \"NumRects\"), (Array(Const(D3D11_RECT),", "included in # all copies or substantial portions of the", "(OpaquePointer(Void), \"pData\"), (UINT, \"RowPitch\"), (UINT, \"DepthPitch\"), ]) ID3D11Resource.methods += [", "(Array(Const(D3D11_SO_DECLARATION_ENTRY), \"NumEntries\"), \"pSODeclaration\"), (UINT, \"NumEntries\"), (Array(Const(UINT), \"NumStrides\"), \"pBufferStrides\"), (UINT, \"NumStrides\"),", "ID3D11Counter = Interface(\"ID3D11Counter\", ID3D11Asynchronous) ID3D11ClassInstance = Interface(\"ID3D11ClassInstance\", ID3D11DeviceChild) ID3D11ClassLinkage =", "Interface(\"ID3D11DepthStencilState\", ID3D11DeviceChild) ID3D11BlendState = Interface(\"ID3D11BlendState\", ID3D11DeviceChild) ID3D11RasterizerState = Interface(\"ID3D11RasterizerState\", ID3D11DeviceChild)", "D3D11_TEX2D_ARRAY_RTV = Struct(\"D3D11_TEX2D_ARRAY_RTV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"),", "\"D3D11_MAP_WRITE\", \"D3D11_MAP_READ_WRITE\", \"D3D11_MAP_WRITE_DISCARD\", \"D3D11_MAP_WRITE_NO_OVERWRITE\", ]) D3D11_MAP_FLAG = Flags(UINT, [ \"D3D11_MAP_FLAG_DO_NOT_WAIT\",", "\"pRects\")]), StdMethod(Void, \"CopySubresourceRegion\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"), (UINT, \"DstX\"), (UINT,", "\"pFeatureSupportData\"), (UINT, \"FeatureSupportDataSize\")]), StdMethod(HRESULT, \"GetPrivateData\", [(REFGUID, \"guid\"), Out(Pointer(UINT), \"pDataSize\"), Out(OpaquePointer(Void),", "\"D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD\", \"D3DERR_INVALIDCALL\", \"D3DERR_WASSTILLDRAWING\", ]) ID3D11DepthStencilState = Interface(\"ID3D11DepthStencilState\", ID3D11DeviceChild) ID3D11BlendState =", "\"D3D11_UAV_DIMENSION_BUFFER\", \"D3D11_UAV_DIMENSION_TEXTURE1D\", \"D3D11_UAV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_UAV_DIMENSION_TEXTURE2D\", \"D3D11_UAV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_UAV_DIMENSION_TEXTURE3D\", ]) D3D11_USAGE = Enum(\"D3D11_USAGE\",", "(Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"PSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView),", "\"ConstantVectorOffset\"), (UINT, \"TextureOffset\"), (UINT, \"SamplerOffset\"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), \"ppInstance\")]), ] ID3D11CommandList.methods +=", "\"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"GSSetSamplers\", [(UINT, \"StartSlot\"),", "(UINT64, \"IAVertices\"), (UINT64, \"IAPrimitives\"), (UINT64, \"VSInvocations\"), (UINT64, \"GSInvocations\"), (UINT64, \"GSPrimitives\"),", "Struct(\"D3D11_BUFFER_DESC\", [ (UINT, \"ByteWidth\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"),", "(Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"DSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)),", "StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE2D_DESC), \"pDesc\")]), ] D3D11_TEXTURE3D_DESC = Struct(\"D3D11_TEXTURE3D_DESC\", [ (UINT,", "\"D3D11_MAP_WRITE_NO_OVERWRITE\", ]) D3D11_MAP_FLAG = Flags(UINT, [ \"D3D11_MAP_FLAG_DO_NOT_WAIT\", ]) D3D11_RAISE_FLAG =", "]) D3D11_QUERY_DESC = Struct(\"D3D11_QUERY_DESC\", [ (D3D11_QUERY, \"Query\"), (D3D11_QUERY_MISC_FLAG, \"MiscFlags\"), ])", "\"ppVertexBuffers\"), Out(Pointer(UINT), \"pStrides\"), Out(Pointer(UINT), \"pOffsets\")]), StdMethod(Void, \"IAGetIndexBuffer\", [Out(Pointer(ObjPointer(ID3D11Buffer)), \"pIndexBuffer\"), Out(Pointer(DXGI_FORMAT),", "Out(Pointer(UINT), \"Offset\")]), StdMethod(Void, \"GSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"),", "(UINT, \"NumElements\"), (Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecodeWithInputSignature\"), (SIZE_T, \"BytecodeLength\"), Out(Pointer(ObjPointer(ID3D11InputLayout)), \"ppInputLayout\")]), StdMethod(HRESULT,", "\"Format\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ])", "ID3D11ClassLinkage.methods += [ StdMethod(HRESULT, \"GetClassInstance\", [(LPCSTR, \"pClassInstanceName\"), (UINT, \"InstanceIndex\"), Out(Pointer(ObjPointer(ID3D11ClassInstance)),", "\"CopyResource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (ObjPointer(ID3D11Resource), \"pSrcResource\")]), StdMethod(Void, \"UpdateSubresource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT,", "= Struct(\"D3D11_TEX2D_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEX2D_ARRAY_SRV =", "\"D3D11_RESOURCE_MISC_BUFFER_STRUCTURED\", \"D3D11_RESOURCE_MISC_RESOURCE_CLAMP\", \"D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX\", \"D3D11_RESOURCE_MISC_GDI_COMPATIBLE\", ]) D3D11_MAP = Enum(\"D3D11_MAP\", [ \"D3D11_MAP_READ\",", "(UINT, \"MipLevels\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_SRV = Struct(\"D3D11_TEX2D_SRV\",", "\"GetDesc\", [Out(Pointer(D3D11_TEXTURE2D_DESC), \"pDesc\")]), ] D3D11_TEXTURE3D_DESC = Struct(\"D3D11_TEXTURE3D_DESC\", [ (UINT, \"Width\"),", "[(D3D11_FEATURE, \"Feature\"), Out(D3D11_FEATURE_DATA, \"pFeatureSupportData\"), (UINT, \"FeatureSupportDataSize\")]), StdMethod(HRESULT, \"GetPrivateData\", [(REFGUID, \"guid\"),", "D3D11_RASTERIZER_DESC = Struct(\"D3D11_RASTERIZER_DESC\", [ (D3D11_FILL_MODE, \"FillMode\"), (D3D11_CULL_MODE, \"CullMode\"), (BOOL, \"FrontCounterClockwise\"),", "(Union(None, [ (D3D11_BUFFER_RTV, \"Buffer\"), (D3D11_TEX1D_RTV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_RTV, \"Texture1DArray\"), (D3D11_TEX2D_RTV, \"Texture2D\"),", "[(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"), (ObjPointer(ID3D11Resource), \"pSrcResource\"), (UINT, \"SrcSubresource\"), (DXGI_FORMAT, \"Format\")]),", "\"NumViewports\"), (Array(Const(D3D11_VIEWPORT), \"NumViewports\"), \"pViewports\")]), StdMethod(Void, \"RSSetScissorRects\", [(UINT, \"NumRects\"), (Array(Const(D3D11_RECT), \"NumRects\"),", "]) D3D11_QUERY_DATA_SO_STATISTICS = Struct(\"D3D11_QUERY_DATA_SO_STATISTICS\", [ (UINT64, \"NumPrimitivesWritten\"), (UINT64, \"PrimitivesStorageNeeded\"), ])", "ID3D11Texture3D = Interface(\"ID3D11Texture3D\", ID3D11Resource) ID3D11View = Interface(\"ID3D11View\", ID3D11DeviceChild) ID3D11ShaderResourceView =", "\"NumViews\"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), \"NumViews\"), \"ppRenderTargetViews\"), (ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\")]), StdMethod(Void, \"OMSetRenderTargetsAndUnorderedAccessViews\", [(UINT, \"NumRTVs\"),", "StdMethod(Void, \"PSGetShader\", [Out(Pointer(ObjPointer(ID3D11PixelShader)), \"ppPixelShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void,", "\"FeatureSupportDataSize\")]), StdMethod(HRESULT, \"GetPrivateData\", [(REFGUID, \"guid\"), Out(Pointer(UINT), \"pDataSize\"), Out(OpaquePointer(Void), \"pData\")]), StdMethod(HRESULT,", "[(REFGUID, \"guid\"), Out(Pointer(UINT), \"pDataSize\"), Out(OpaquePointer(Void), \"pData\")]), StdMethod(HRESULT, \"SetPrivateData\", [(REFGUID, \"guid\"),", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "[(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"GetPredication\", [Out(Pointer(ObjPointer(ID3D11Predicate)),", "OTHER DEALINGS IN # THE SOFTWARE. # ##########################################################################/ from dxgi", "D3D11_INPUT_ELEMENT_DESC = Struct(\"D3D11_INPUT_ELEMENT_DESC\", [ (LPCSTR, \"SemanticName\"), (UINT, \"SemanticIndex\"), (DXGI_FORMAT, \"Format\"),", "\"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"Begin\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\")]), StdMethod(Void, \"End\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\")]),", "StdFunction(HRESULT, \"D3D11CoreCreateDevice\", [DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD,", "\"ppVertexShader\")]), StdMethod(HRESULT, \"CreateGeometryShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"),", "ID3D11Texture2D.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE2D_DESC), \"pDesc\")]), ] D3D11_TEXTURE3D_DESC =", "\"ppConstantBuffers\")]), StdMethod(Void, \"PSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]),", "(UINT, \"SysMemPitch\"), (UINT, \"SysMemSlicePitch\"), ]) D3D11_MAPPED_SUBRESOURCE = Struct(\"D3D11_MAPPED_SUBRESOURCE\", [ (OpaquePointer(Void),", "\"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_DSV = Struct(\"D3D11_TEX2D_DSV\", [", "\"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\")]), StdMethod(HRESULT, \"CreateGeometryShaderWithStreamOutput\",", "= Struct(\"D3D11_TEX1D_ARRAY_DSV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ])", "\"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"DSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState),", "Out(Pointer(ObjPointer(ID3D11ShaderResourceView)), \"ppSRView\")]), StdMethod(HRESULT, \"CreateUnorderedAccessView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_UNORDERED_ACCESS_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11UnorderedAccessView)), \"ppUAView\")]),", "THE SOFTWARE. # ##########################################################################/ from dxgi import * from d3dcommon", "\"DepthBias\"), (FLOAT, \"DepthBiasClamp\"), (FLOAT, \"SlopeScaledDepthBias\"), (BOOL, \"DepthClipEnable\"), (BOOL, \"ScissorEnable\"), (BOOL,", "\"D3D11_PRIMITIVE_TOPOLOGY_22_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_23_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_24_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_25_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_26_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_27_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_28_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_29_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_30_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_31_CONTROL_POINT_PATCHLIST\",", "\"RasterizedStream\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\")]), StdMethod(HRESULT, \"CreatePixelShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"),", "]) ID3D11Texture1D.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE1D_DESC), \"pDesc\")]), ] D3D11_TEXTURE2D_DESC", "D3D11_TEX1D_RTV = Struct(\"D3D11_TEX1D_RTV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_RTV = Struct(\"D3D11_TEX1D_ARRAY_RTV\",", "\"D3D11_MAP_READ\", \"D3D11_MAP_WRITE\", \"D3D11_MAP_READ_WRITE\", \"D3D11_MAP_WRITE_DISCARD\", \"D3D11_MAP_WRITE_NO_OVERWRITE\", ]) D3D11_MAP_FLAG = Flags(UINT, [", "\"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"CSGetUnorderedAccessViews\", [(UINT, \"StartSlot\"),", "= Struct(\"D3D11_TEX2DMS_ARRAY_RTV\", [ (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX3D_RTV =", "\"ElementWidth\")]), None), ]) D3D11_BUFFEREX_SRV_FLAG = Flags(UINT, [ \"D3D11_BUFFEREX_SRV_FLAG_RAW\", ]) D3D11_BUFFEREX_SRV", "StdMethod(Void, \"OMGetBlendState\", [Out(Pointer(ObjPointer(ID3D11BlendState)), \"ppBlendState\"), Out(Array(FLOAT, 4), \"BlendFactor\"), Out(Pointer(UINT), \"pSampleMask\")]), StdMethod(Void,", "StdMethod(Void, \"ClearUnorderedAccessViewUint\", [(ObjPointer(ID3D11UnorderedAccessView), \"pUnorderedAccessView\"), (Array(Const(UINT), 4), \"Values\")]), StdMethod(Void, \"ClearUnorderedAccessViewFloat\", [(ObjPointer(ID3D11UnorderedAccessView),", "\"NumClassInstances\")]), StdMethod(Void, \"DrawIndexed\", [(UINT, \"IndexCount\"), (UINT, \"StartIndexLocation\"), (INT, \"BaseVertexLocation\")]), StdMethod(Void,", "\"D3D11_PRIMITIVE_12_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_13_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_14_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_15_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_16_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_17_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_18_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_19_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_20_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_21_CONTROL_POINT_PATCH\",", "a copy # of this software and associated documentation files", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF", "\"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX3D_UAV = Struct(\"D3D11_TEX3D_UAV\", [", "\"CSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"VSGetConstantBuffers\",", "\"NumRTVs\"), \"ppRenderTargetViews\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\"), (UINT, \"UAVStartSlot\"), (UINT, \"NumUAVs\"), (Array(ObjPointer(ID3D11UnorderedAccessView), \"NumUAVs\"),", "notice shall be included in # all copies or substantial", "\"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"HSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"),", "(D3D11_STENCIL_OP, \"StencilFailOp\"), (D3D11_STENCIL_OP, \"StencilDepthFailOp\"), (D3D11_STENCIL_OP, \"StencilPassOp\"), (D3D11_COMPARISON_FUNC, \"StencilFunc\"), ]) D3D11_DEPTH_STENCIL_DESC", "[(ObjPointer(ID3D11VertexShader), \"pVertexShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"DrawIndexed\", [(UINT,", "(FLOAT, \"SlopeScaledDepthBias\"), (BOOL, \"DepthClipEnable\"), (BOOL, \"ScissorEnable\"), (BOOL, \"MultisampleEnable\"), (BOOL, \"AntialiasedLineEnable\"),", "= Interface(\"ID3D11CommandList\", ID3D11DeviceChild) ID3D11Device = Interface(\"ID3D11Device\", IUnknown) D3D11_INPUT_CLASSIFICATION = Enum(\"D3D11_INPUT_CLASSIFICATION\",", "and this permission notice shall be included in # all", "\"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_2_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_3_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_4_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_5_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_6_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_7_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_8_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_9_CONTROL_POINT_PATCHLIST\",", "\"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"DSSetShader\", [(ObjPointer(ID3D11DomainShader), \"pDomainShader\"),", "(UINT64, \"GSPrimitives\"), (UINT64, \"CInvocations\"), (UINT64, \"CPrimitives\"), (UINT64, \"PSInvocations\"), (UINT64, \"HSInvocations\"),", "StdMethod(Void, \"OMSetDepthStencilState\", [(ObjPointer(ID3D11DepthStencilState), \"pDepthStencilState\"), (UINT, \"StencilRef\")]), StdMethod(Void, \"SOSetTargets\", [(UINT, \"NumBuffers\"),", "\"SysMemPitch\"), (UINT, \"SysMemSlicePitch\"), ]) D3D11_MAPPED_SUBRESOURCE = Struct(\"D3D11_MAPPED_SUBRESOURCE\", [ (OpaquePointer(Void), \"pData\"),", "\"pDesc\")]), StdMethod(Void, \"GetInstanceName\", [Out(LPSTR, \"pInstanceName\"), Out(Pointer(SIZE_T), \"pBufferLength\")]), StdMethod(Void, \"GetTypeName\", [Out(LPSTR,", "\"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"DSSetShader\", [(ObjPointer(ID3D11DomainShader), \"pDomainShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"),", "[ \"D3D11_CULL_NONE\", \"D3D11_CULL_FRONT\", \"D3D11_CULL_BACK\", ]) D3D11_SO_DECLARATION_ENTRY = Struct(\"D3D11_SO_DECLARATION_ENTRY\", [ (UINT,", "\"D3D11_COMPARISON_EQUAL\", \"D3D11_COMPARISON_LESS_EQUAL\", \"D3D11_COMPARISON_GREATER\", \"D3D11_COMPARISON_NOT_EQUAL\", \"D3D11_COMPARISON_GREATER_EQUAL\", \"D3D11_COMPARISON_ALWAYS\", ]) D3D11_DEPTH_WRITE_MASK = Enum(\"D3D11_DEPTH_WRITE_MASK\",", "\"IndependentBlendEnable\"), (Array(D3D11_RENDER_TARGET_BLEND_DESC, 8), \"RenderTarget\"), ]) ID3D11BlendState.methods += [ StdMethod(Void, \"GetDesc\",", "\"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"PSGetShaderResources\", [(UINT, \"StartSlot\"),", "\"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture2D)), \"ppTexture2D\")]), StdMethod(HRESULT, \"CreateTexture3D\", [(Pointer(Const(D3D11_TEXTURE3D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture3D)),", "\"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11ComputeShader)), \"ppComputeShader\")]), StdMethod(HRESULT, \"CreateClassLinkage\",", "(UINT, \"SemanticIndex\"), (BYTE, \"StartComponent\"), (BYTE, \"ComponentCount\"), (BYTE, \"OutputSlot\"), ]) D3D11_VIEWPORT", "D3D11_FEATURE_DATA_THREADING = Struct(\"D3D11_FEATURE_DATA_THREADING\", [ (BOOL, \"DriverConcurrentCreates\"), (BOOL, \"DriverCommandLists\"), ]) D3D11_FEATURE_DATA_DOUBLES", "\"D3D11CreateDeviceAndSwapChain\", [(ObjPointer(IDXGIAdapter), \"pAdapter\"), (D3D_DRIVER_TYPE, \"DriverType\"), (HMODULE, \"Software\"), (D3D11_CREATE_DEVICE_FLAG, \"Flags\"), (Array(Const(D3D_FEATURE_LEVEL),", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "\"SrcBlendAlpha\"), (D3D11_BLEND, \"DestBlendAlpha\"), (D3D11_BLEND_OP, \"BlendOpAlpha\"), (UINT8, \"RenderTargetWriteMask\"), ]) D3D11_BLEND_DESC =", "[ (UINT64, \"NumPrimitivesWritten\"), (UINT64, \"PrimitivesStorageNeeded\"), ]) D3D11_COUNTER = Enum(\"D3D11_COUNTER\", [", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "(UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"GSSetSamplers\", [(UINT, \"StartSlot\"), (UINT,", "= Interface(\"ID3D11Buffer\", ID3D11Resource) ID3D11Texture1D = Interface(\"ID3D11Texture1D\", ID3D11Resource) ID3D11Texture2D = Interface(\"ID3D11Texture2D\",", "(UINT, \"ArraySize\"), (DXGI_FORMAT, \"Format\"), (DXGI_SAMPLE_DESC, \"SampleDesc\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"),", "(UINT, \"MipLevels\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX3D_SRV = Struct(\"D3D11_TEX3D_SRV\",", "Blob(Void, \"FeatureSupportDataSize\"), False) ID3D11DeviceContext.methods += [ StdMethod(Void, \"VSSetConstantBuffers\", [(UINT, \"StartSlot\"),", "D3D11_FEATURE_DATA = EnumPolymorphic(\"D3D11_FEATURE\", \"Feature\", [ (\"D3D11_FEATURE_THREADING\", Pointer(D3D11_FEATURE_DATA_THREADING)), (\"D3D11_FEATURE_DOUBLES\", Pointer(D3D11_FEATURE_DATA_DOUBLES)), (\"D3D11_FEATURE_FORMAT_SUPPORT\",", "\"D3D11_RESOURCE_DIMENSION_BUFFER\", \"D3D11_RESOURCE_DIMENSION_TEXTURE1D\", \"D3D11_RESOURCE_DIMENSION_TEXTURE2D\", \"D3D11_RESOURCE_DIMENSION_TEXTURE3D\", ]) D3D11_SRV_DIMENSION = Enum(\"D3D11_SRV_DIMENSION\", [ \"D3D11_SRV_DIMENSION_UNKNOWN\",", "(D3D11_TEX3D_SRV, \"Texture3D\"), (D3D11_TEXCUBE_SRV, \"TextureCube\"), (D3D11_TEXCUBE_ARRAY_SRV, \"TextureCubeArray\"), (D3D11_BUFFEREX_SRV, \"BufferEx\"), ]), None),", "\"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"OMGetRenderTargets\", [(UINT, \"NumViews\"),", "\"D3D11_TEXTURECUBE_FACE_POSITIVE_Y\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_Y\", \"D3D11_TEXTURECUBE_FACE_POSITIVE_Z\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_Z\", ]) ID3D11View.methods += [ StdMethod(Void, \"GetResource\",", "\"MipSlice\"), ]) D3D11_TEX1D_ARRAY_RTV = Struct(\"D3D11_TEX1D_ARRAY_RTV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"),", "[ \"D3D11_QUERY_MISC_PREDICATEHINT\", ]) D3D11_QUERY_DESC = Struct(\"D3D11_QUERY_DESC\", [ (D3D11_QUERY, \"Query\"), (D3D11_QUERY_MISC_FLAG,", "\"VSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"VSSetSamplers\",", "[ (UINT, \"MipSlice\"), ]) D3D11_TEX2D_ARRAY_UAV = Struct(\"D3D11_TEX2D_ARRAY_UAV\", [ (UINT, \"MipSlice\"),", "\"ppSOTargets\"), (Pointer(Const(UINT)), \"pOffsets\")]), StdMethod(Void, \"DrawAuto\", []), StdMethod(Void, \"DrawIndexedInstancedIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"),", "\"D3D11_UAV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_UAV_DIMENSION_TEXTURE2D\", \"D3D11_UAV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_UAV_DIMENSION_TEXTURE3D\", ]) D3D11_USAGE = Enum(\"D3D11_USAGE\", [ \"D3D11_USAGE_DEFAULT\",", "\"D3D11_STENCIL_OP_ZERO\", \"D3D11_STENCIL_OP_REPLACE\", \"D3D11_STENCIL_OP_INCR_SAT\", \"D3D11_STENCIL_OP_DECR_SAT\", \"D3D11_STENCIL_OP_INVERT\", \"D3D11_STENCIL_OP_INCR\", \"D3D11_STENCIL_OP_DECR\", ]) D3D11_DEPTH_STENCILOP_DESC =", "[(LPCSTR, \"pClassTypeName\"), (UINT, \"ConstantBufferOffset\"), (UINT, \"ConstantVectorOffset\"), (UINT, \"TextureOffset\"), (UINT, \"SamplerOffset\"),", "(UINT, \"ArraySize\"), ]) D3D11_TEX2DMS_DSV = Struct(\"D3D11_TEX2DMS_DSV\", [ (UINT, \"UnusedField_NothingToDefine\"), ])", "\"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture2D)), \"ppTexture2D\")]), StdMethod(HRESULT, \"CreateTexture3D\", [(Pointer(Const(D3D11_TEXTURE3D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)),", "\"ppConstantBuffers\")]), StdMethod(Void, \"GSSetShader\", [(ObjPointer(ID3D11GeometryShader), \"pShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]),", "[ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_BLEND_DESC), \"pDesc\")]), ] D3D11_RASTERIZER_DESC = Struct(\"D3D11_RASTERIZER_DESC\", [", "\"CSSetUnorderedAccessViews\", [(UINT, \"StartSlot\"), (UINT, \"NumUAVs\"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), \"NumUAVs\"), \"ppUnorderedAccessViews\"), (Pointer(Const(UINT)), \"pUAVInitialCounts\")]),", "\"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"CSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"),", "the rights # to use, copy, modify, merge, publish, distribute,", "Out(Pointer(BOOL), \"pPredicateValue\")]), StdMethod(Void, \"GSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"),", "\"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"DSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"),", "Struct(\"D3D11_TEX2D_ARRAY_UAV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX3D_UAV", "]) D3D11_FORMAT_SUPPORT2 = Enum(\"D3D11_FORMAT_SUPPORT2\", [ \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_ADD\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_BITWISE_OPS\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_COMPARE_STORE_OR_COMPARE_EXCHANGE\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_EXCHANGE\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_SIGNED_MIN_OR_MAX\",", "\"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2DMS_ARRAY_RTV = Struct(\"D3D11_TEX2DMS_ARRAY_RTV\", [ (UINT, \"FirstArraySlice\"),", "[(ObjPointer(ID3D11PixelShader), \"pPixelShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"PSSetSamplers\", [(UINT,", "\"D3D11_FORMAT_SUPPORT_DISPLAY\", \"D3D11_FORMAT_SUPPORT_CAST_WITHIN_BIT_LAYOUT\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RENDERTARGET\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_LOAD\", \"D3D11_FORMAT_SUPPORT_SHADER_GATHER\", \"D3D11_FORMAT_SUPPORT_BACK_BUFFER_CAST\", \"D3D11_FORMAT_SUPPORT_TYPED_UNORDERED_ACCESS_VIEW\", \"D3D11_FORMAT_SUPPORT_SHADER_GATHER_COMPARISON\", ]) D3D11_FORMAT_SUPPORT2", "\"D3D11_PRIMITIVE_21_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_22_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_23_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_24_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_25_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_26_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_27_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_28_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_29_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_30_CONTROL_POINT_PATCH\",", "\"pDstResource\"), (UINT, \"DstSubresource\"), (Pointer(Const(D3D11_BOX)), \"pDstBox\"), (OpaquePointer(Const(Void)), \"pSrcData\"), (UINT, \"SrcRowPitch\"), (UINT,", "\"D3D11_PRIMITIVE_16_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_17_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_18_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_19_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_20_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_21_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_22_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_23_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_24_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_25_CONTROL_POINT_PATCH\",", "\"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11HullShader)), \"ppHullShader\")]), StdMethod(HRESULT, \"CreateDomainShader\", [(Blob(Const(Void),", "Struct(\"D3D11_FEATURE_DATA_FORMAT_SUPPORT2\", [ (DXGI_FORMAT, \"InFormat\"), (D3D11_FORMAT_SUPPORT2, \"OutFormatSupport2\"), ]) D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS = Struct(\"D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS\",", "(D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture3D.methods += [ StdMethod(Void, \"GetDesc\",", "\"TextureOffset\"), (UINT, \"SamplerOffset\"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), \"ppInstance\")]), ] ID3D11CommandList.methods += [ StdMethod(UINT,", "\"front\"), (UINT, \"right\"), (UINT, \"bottom\"), (UINT, \"back\"), ]) ID3D11DeviceChild.methods +=", "\"DepthWriteMask\"), (D3D11_COMPARISON_FUNC, \"DepthFunc\"), (BOOL, \"StencilEnable\"), (UINT8, \"StencilReadMask\"), (UINT8, \"StencilWriteMask\"), (D3D11_DEPTH_STENCILOP_DESC,", "StdMethod(Void, \"PSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void,", "(UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"CSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT,", "is hereby granted, free of charge, to any person obtaining", "]) D3D11_BUFFER_UAV = Struct(\"D3D11_BUFFER_UAV\", [ (UINT, \"FirstElement\"), (UINT, \"NumElements\"), (D3D11_BUFFER_UAV_FLAG,", "ID3D11DeviceChild) ID3D11DomainShader = Interface(\"ID3D11DomainShader\", ID3D11DeviceChild) ID3D11GeometryShader = Interface(\"ID3D11GeometryShader\", ID3D11DeviceChild) ID3D11PixelShader", "StdMethod(HRESULT, \"GetClassInstance\", [(LPCSTR, \"pClassInstanceName\"), (UINT, \"InstanceIndex\"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), \"ppInstance\")]), StdMethod(HRESULT, \"CreateClassInstance\",", "Flags(UINT, [ \"D3D11_CREATE_DEVICE_SINGLETHREADED\", \"D3D11_CREATE_DEVICE_DEBUG\", \"D3D11_CREATE_DEVICE_SWITCH_TO_REF\", \"D3D11_CREATE_DEVICE_PREVENT_INTERNAL_THREADING_OPTIMIZATIONS\", \"D3D11_CREATE_DEVICE_BGRA_SUPPORT\", ]) ID3D11Device.methods +=", "[ (DXGI_FORMAT, \"InFormat\"), (D3D11_FORMAT_SUPPORT, \"OutFormatSupport\"), ]) D3D11_FEATURE_DATA_FORMAT_SUPPORT2 = Struct(\"D3D11_FEATURE_DATA_FORMAT_SUPPORT2\", [", "\"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"Dispatch\", [(UINT, \"ThreadGroupCountX\"), (UINT, \"ThreadGroupCountY\"), (UINT, \"ThreadGroupCountZ\")]), StdMethod(Void,", "(Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"DSSetShader\", [(ObjPointer(ID3D11DomainShader), \"pDomainShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"),", "\"SDKVersion\"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC)), \"pSwapChainDesc\"), Out(Pointer(ObjPointer(IDXGISwapChain)), \"ppSwapChain\"), Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\"), Out(Pointer(D3D_FEATURE_LEVEL), \"pFeatureLevel\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)),", "[(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"GSSetShader\", [(ObjPointer(ID3D11GeometryShader),", "D3D11_SRV_DIMENSION = Enum(\"D3D11_SRV_DIMENSION\", [ \"D3D11_SRV_DIMENSION_UNKNOWN\", \"D3D11_SRV_DIMENSION_BUFFER\", \"D3D11_SRV_DIMENSION_TEXTURE1D\", \"D3D11_SRV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE2D\", \"D3D11_SRV_DIMENSION_TEXTURE2DARRAY\",", "= Interface(\"ID3D11Resource\", ID3D11DeviceChild) ID3D11Buffer = Interface(\"ID3D11Buffer\", ID3D11Resource) ID3D11Texture1D = Interface(\"ID3D11Texture1D\",", "(D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture3D.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE3D_DESC), \"pDesc\")]),", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR", "= Struct(\"D3D11_SUBRESOURCE_DATA\", [ (OpaquePointer(Const(Void)), \"pSysMem\"), (UINT, \"SysMemPitch\"), (UINT, \"SysMemSlicePitch\"), ])", "\"D3D11_PRIMITIVE_3_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_4_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_5_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_6_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_7_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_8_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_9_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_10_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_11_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_12_CONTROL_POINT_PATCH\",", "person obtaining a copy # of this software and associated", "(D3D11_DSV_DIMENSION, \"ViewDimension\"), (D3D11_DSV_FLAG, \"Flags\"), (Union(None, [ (D3D11_TEX1D_DSV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_DSV, \"Texture1DArray\"),", "\"FrontFace\"), (D3D11_DEPTH_STENCILOP_DESC, \"BackFace\"), ]) ID3D11DepthStencilState.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_DEPTH_STENCIL_DESC),", "\"NumBuffers\"), \"ppVertexBuffers\"), Out(Pointer(UINT), \"pStrides\"), Out(Pointer(UINT), \"pOffsets\")]), StdMethod(Void, \"IAGetIndexBuffer\", [Out(Pointer(ObjPointer(ID3D11Buffer)), \"pIndexBuffer\"),", "\"ReturnedInterface\"), Out(Pointer(ObjPointer(Void)), \"ppResource\")]), StdMethod(HRESULT, \"CheckFormatSupport\", [(DXGI_FORMAT, \"Format\"), Out(Pointer(D3D11_FORMAT_SUPPORT), \"pFormatSupport\")]), StdMethod(HRESULT,", "ID3D11Resource = Interface(\"ID3D11Resource\", ID3D11DeviceChild) ID3D11Buffer = Interface(\"ID3D11Buffer\", ID3D11Resource) ID3D11Texture1D =", "\"ClearUnorderedAccessViewUint\", [(ObjPointer(ID3D11UnorderedAccessView), \"pUnorderedAccessView\"), (Array(Const(UINT), 4), \"Values\")]), StdMethod(Void, \"ClearUnorderedAccessViewFloat\", [(ObjPointer(ID3D11UnorderedAccessView), \"pUnorderedAccessView\"),", "[(Pointer(Const(D3D11_BLEND_DESC)), \"pBlendStateDesc\"), Out(Pointer(ObjPointer(ID3D11BlendState)), \"ppBlendState\")]), StdMethod(HRESULT, \"CreateDepthStencilState\", [(Pointer(Const(D3D11_DEPTH_STENCIL_DESC)), \"pDepthStencilDesc\"), Out(Pointer(ObjPointer(ID3D11DepthStencilState)), \"ppDepthStencilState\")]),", "[Out(Pointer(ObjPointer(ID3D11Predicate)), \"ppPredicate\"), Out(Pointer(BOOL), \"pPredicateValue\")]), StdMethod(Void, \"GSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"),", "ID3D11Asynchronous = Interface(\"ID3D11Asynchronous\", ID3D11DeviceChild) ID3D11Query = Interface(\"ID3D11Query\", ID3D11Asynchronous) ID3D11Predicate =", "StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE1D_DESC), \"pDesc\")]), ] D3D11_TEXTURE2D_DESC = Struct(\"D3D11_TEXTURE2D_DESC\", [ (UINT,", "D3D11_COMPARISON_FUNC = Enum(\"D3D11_COMPARISON_FUNC\", [ \"D3D11_COMPARISON_NEVER\", \"D3D11_COMPARISON_LESS\", \"D3D11_COMPARISON_EQUAL\", \"D3D11_COMPARISON_LESS_EQUAL\", \"D3D11_COMPARISON_GREATER\", \"D3D11_COMPARISON_NOT_EQUAL\",", "\"MipSlice\"), ]) D3D11_TEX2D_ARRAY_UAV = Struct(\"D3D11_TEX2D_ARRAY_UAV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"),", "\"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"VSSetSamplers\", [(UINT, \"StartSlot\"),", "(BOOL, \"ComputeShaders_Plus_RawAndStructuredBuffers_Via_Shader_4_x\"), ]) D3D11_FEATURE, D3D11_FEATURE_DATA = EnumPolymorphic(\"D3D11_FEATURE\", \"Feature\", [ (\"D3D11_FEATURE_THREADING\",", "StdMethod(Void, \"RSGetViewports\", [Out(Pointer(UINT), \"pNumViewports\"), Out(Array(D3D11_VIEWPORT, \"*pNumViewports\"), \"pViewports\")]), StdMethod(Void, \"RSGetScissorRects\", [Out(Pointer(UINT),", "]) D3D11_TEX1D_ARRAY_UAV = Struct(\"D3D11_TEX1D_ARRAY_UAV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT,", "[ \"D3D11_FORMAT_SUPPORT_BUFFER\", \"D3D11_FORMAT_SUPPORT_IA_VERTEX_BUFFER\", \"D3D11_FORMAT_SUPPORT_IA_INDEX_BUFFER\", \"D3D11_FORMAT_SUPPORT_SO_BUFFER\", \"D3D11_FORMAT_SUPPORT_TEXTURE1D\", \"D3D11_FORMAT_SUPPORT_TEXTURE2D\", \"D3D11_FORMAT_SUPPORT_TEXTURE3D\", \"D3D11_FORMAT_SUPPORT_TEXTURECUBE\", \"D3D11_FORMAT_SUPPORT_SHADER_LOAD\",", "\"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEX2D_ARRAY_SRV = Struct(\"D3D11_TEX2D_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"),", "\"D3D11_PRIMITIVE_POINT\", \"D3D11_PRIMITIVE_LINE\", \"D3D11_PRIMITIVE_TRIANGLE\", \"D3D11_PRIMITIVE_LINE_ADJ\", \"D3D11_PRIMITIVE_TRIANGLE_ADJ\", \"D3D11_PRIMITIVE_1_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_2_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_3_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_4_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_5_CONTROL_POINT_PATCH\",", "\"GetImmediateContext\", [Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), StdMethod(HRESULT, \"SetExceptionMode\", [(D3D11_RAISE_FLAG, \"RaiseFlags\")]), StdMethod(UINT, \"GetExceptionMode\", []),", "D3D11_INPUT_CLASSIFICATION = Enum(\"D3D11_INPUT_CLASSIFICATION\", [ \"D3D11_INPUT_PER_VERTEX_DATA\", \"D3D11_INPUT_PER_INSTANCE_DATA\", ]) D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET = FakeEnum(UINT,", "Enum(\"D3D11_FORMAT_SUPPORT2\", [ \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_ADD\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_BITWISE_OPS\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_COMPARE_STORE_OR_COMPARE_EXCHANGE\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_EXCHANGE\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_SIGNED_MIN_OR_MAX\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_UNSIGNED_MIN_OR_MAX\", \"D3D11_FORMAT_SUPPORT2_UAV_TYPED_LOAD\", \"D3D11_FORMAT_SUPPORT2_UAV_TYPED_STORE\",", "]) D3D11_TEXCUBE_ARRAY_SRV = Struct(\"D3D11_TEXCUBE_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), (UINT,", "\"DSGetShader\", [Out(Pointer(ObjPointer(ID3D11DomainShader)), \"ppDomainShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"DSGetSamplers\",", "\"Flags\"), (Array(Const(D3D_FEATURE_LEVEL), \"FeatureLevels\"), \"pFeatureLevels\"), (UINT, \"FeatureLevels\"), (UINT, \"SDKVersion\"), Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\"),", "]) D3D11_TEX2DMS_DSV = Struct(\"D3D11_TEX2DMS_DSV\", [ (UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2DMS_ARRAY_DSV =", "\"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_SHADER_RESOURCE_VIEW_DESC = Struct(\"D3D11_SHADER_RESOURCE_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"),", "(UINT64, \"GSInvocations\"), (UINT64, \"GSPrimitives\"), (UINT64, \"CInvocations\"), (UINT64, \"CPrimitives\"), (UINT64, \"PSInvocations\"),", "]) D3D11_TEX2DMS_ARRAY_DSV = Struct(\"D3D11_TEX2DMS_ARRAY_DSV\", [ (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ])", "]) ID3D11ShaderResourceView.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_SHADER_RESOURCE_VIEW_DESC), \"pDesc\")]), ] D3D11_BUFFER_RTV", "internal=True), ]) d3d11.addInterfaces([ IDXGIAdapter1, IDXGIDevice1, IDXGIResource, ID3D11Debug, ID3D11InfoQueue, ID3D11SwitchToRef, ])", "\"DSInvocations\"), (UINT64, \"CSInvocations\"), ]) D3D11_QUERY_DATA_SO_STATISTICS = Struct(\"D3D11_QUERY_DATA_SO_STATISTICS\", [ (UINT64, \"NumPrimitivesWritten\"),", "\"D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR\", \"D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINT\", \"D3D11_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_MIN_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_MIN_MAG_MIP_LINEAR\", \"D3D11_FILTER_ANISOTROPIC\", \"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_MIP_LINEAR\",", "[ (BOOL, \"AlphaToCoverageEnable\"), (BOOL, \"IndependentBlendEnable\"), (Array(D3D11_RENDER_TARGET_BLEND_DESC, 8), \"RenderTarget\"), ]) ID3D11BlendState.methods", "[ \"D3D11_CPU_ACCESS_WRITE\", \"D3D11_CPU_ACCESS_READ\", ]) D3D11_RESOURCE_MISC_FLAG = Flags(UINT, [ \"D3D11_RESOURCE_MISC_GENERATE_MIPS\", \"D3D11_RESOURCE_MISC_SHARED\",", "(UINT, \"RowPitch\"), (UINT, \"DepthPitch\"), ]) ID3D11Resource.methods += [ StdMethod(Void, \"GetType\",", "\"Texture2DArray\"), (D3D11_TEX3D_UAV, \"Texture3D\"), ]), None), ]) ID3D11UnorderedAccessView.methods += [ StdMethod(Void,", "StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_CLASS_INSTANCE_DESC), \"pDesc\")]), StdMethod(Void, \"GetInstanceName\", [Out(LPSTR, \"pInstanceName\"), Out(Pointer(SIZE_T), \"pBufferLength\")]),", "[(ObjPointer(ID3D11UnorderedAccessView), \"pUnorderedAccessView\"), (Array(Const(FLOAT), 4), \"Values\")]), StdMethod(Void, \"ClearDepthStencilView\", [(ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\"), (D3D11_CLEAR_FLAG,", "StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_SAMPLER_DESC), \"pDesc\")]), ] D3D11_FORMAT_SUPPORT = Flags(UINT, [ \"D3D11_FORMAT_SUPPORT_BUFFER\",", "\"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"CSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"),", "\"D3D11_SRV_DIMENSION_TEXTURECUBEARRAY\", \"D3D11_SRV_DIMENSION_BUFFEREX\", ]) D3D11_DSV_DIMENSION = Enum(\"D3D11_DSV_DIMENSION\", [ \"D3D11_DSV_DIMENSION_UNKNOWN\", \"D3D11_DSV_DIMENSION_TEXTURE1D\", \"D3D11_DSV_DIMENSION_TEXTURE1DARRAY\",", "\"ppQuery\")]), StdMethod(HRESULT, \"CreatePredicate\", [(Pointer(Const(D3D11_QUERY_DESC)), \"pPredicateDesc\"), Out(Pointer(ObjPointer(ID3D11Predicate)), \"ppPredicate\")]), StdMethod(HRESULT, \"CreateCounter\", [(Pointer(Const(D3D11_COUNTER_DESC)),", "\"Dispatch\", [(UINT, \"ThreadGroupCountX\"), (UINT, \"ThreadGroupCountY\"), (UINT, \"ThreadGroupCountZ\")]), StdMethod(Void, \"DispatchIndirect\", [(ObjPointer(ID3D11Buffer),", "Out(LPSTR, \"szName\"), Out(Pointer(UINT), \"pNameLength\"), Out(LPSTR, \"szUnits\"), Out(Pointer(UINT), \"pUnitsLength\"), Out(LPSTR, \"szDescription\"),", "\"D3D11_FILTER_COMPARISON_MIN_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_ANISOTROPIC\", ]) D3D11_FILTER_TYPE", "\"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture1D)), \"ppTexture1D\")]), StdMethod(HRESULT, \"CreateTexture2D\", [(Pointer(Const(D3D11_TEXTURE2D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture2D)),", "\"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"HSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"),", "\"InFormat\"), (D3D11_FORMAT_SUPPORT2, \"OutFormatSupport2\"), ]) D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS = Struct(\"D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS\", [ (BOOL, \"ComputeShaders_Plus_RawAndStructuredBuffers_Via_Shader_4_x\"),", "\"Texture2DMSArray\"), ]), None), ]) ID3D11DepthStencilView.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_DEPTH_STENCIL_VIEW_DESC),", "\"Texture1DArray\"), (D3D11_TEX2D_RTV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_RTV, \"Texture2DArray\"), (D3D11_TEX2DMS_RTV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_RTV, \"Texture2DMSArray\"), (D3D11_TEX3D_RTV,", "\"Values\")]), StdMethod(Void, \"ClearDepthStencilView\", [(ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\"), (D3D11_CLEAR_FLAG, \"ClearFlags\"), (FLOAT, \"Depth\"), (UINT8,", "\"DriverConcurrentCreates\"), (BOOL, \"DriverCommandLists\"), ]) D3D11_FEATURE_DATA_DOUBLES = Struct(\"D3D11_FEATURE_DATA_DOUBLES\", [ (BOOL, \"DoublePrecisionFloatShaderOps\"),", "Interface(\"ID3D11Asynchronous\", ID3D11DeviceChild) ID3D11Query = Interface(\"ID3D11Query\", ID3D11Asynchronous) ID3D11Predicate = Interface(\"ID3D11Predicate\", ID3D11Query)", "= Struct(\"D3D11_VIEWPORT\", [ (FLOAT, \"TopLeftX\"), (FLOAT, \"TopLeftY\"), (FLOAT, \"Width\"), (FLOAT,", "D3D11_RTV_DIMENSION = Enum(\"D3D11_RTV_DIMENSION\", [ \"D3D11_RTV_DIMENSION_UNKNOWN\", \"D3D11_RTV_DIMENSION_BUFFER\", \"D3D11_RTV_DIMENSION_TEXTURE1D\", \"D3D11_RTV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE2D\", \"D3D11_RTV_DIMENSION_TEXTURE2DARRAY\",", "\"pDstBox\"), (OpaquePointer(Const(Void)), \"pSrcData\"), (UINT, \"SrcRowPitch\"), (UINT, \"SrcDepthPitch\")]), StdMethod(Void, \"CopyStructureCount\", [(ObjPointer(ID3D11Buffer),", "\"D3D11_BLEND_INV_SRC1_COLOR\", \"D3D11_BLEND_SRC1_ALPHA\", \"D3D11_BLEND_INV_SRC1_ALPHA\", ]) D3D11_BLEND_OP = Enum(\"D3D11_BLEND_OP\", [ \"D3D11_BLEND_OP_ADD\", \"D3D11_BLEND_OP_SUBTRACT\",", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "(Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture3D)), \"ppTexture3D\")]), StdMethod(HRESULT, \"CreateShaderResourceView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_SHADER_RESOURCE_VIEW_DESC)), \"pDesc\"),", "\"pDepthStencilView\")]), StdMethod(Void, \"OMSetRenderTargetsAndUnorderedAccessViews\", [(UINT, \"NumRTVs\"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), \"NumRTVs\"), \"ppRenderTargetViews\"), (ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\"),", "= Struct(\"D3D11_TEX1D_RTV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_RTV = Struct(\"D3D11_TEX1D_ARRAY_RTV\", [", "Struct(\"D3D11_QUERY_DATA_PIPELINE_STATISTICS\", [ (UINT64, \"IAVertices\"), (UINT64, \"IAPrimitives\"), (UINT64, \"VSInvocations\"), (UINT64, \"GSInvocations\"),", "]) D3D11_TEXTURE_ADDRESS_MODE = Enum(\"D3D11_TEXTURE_ADDRESS_MODE\", [ \"D3D11_TEXTURE_ADDRESS_WRAP\", \"D3D11_TEXTURE_ADDRESS_MIRROR\", \"D3D11_TEXTURE_ADDRESS_CLAMP\", \"D3D11_TEXTURE_ADDRESS_BORDER\", \"D3D11_TEXTURE_ADDRESS_MIRROR_ONCE\",", "\"BaseSampler\"), (BOOL, \"Created\"), ]) ID3D11ClassInstance.methods += [ StdMethod(Void, \"GetClassLinkage\", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)),", "# Permission is hereby granted, free of charge, to any", "(UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"DSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT,", "\"pDesc\")]), ] D3D11_TEXTURECUBE_FACE = Enum(\"D3D11_TEXTURECUBE_FACE\", [ \"D3D11_TEXTURECUBE_FACE_POSITIVE_X\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_X\", \"D3D11_TEXTURECUBE_FACE_POSITIVE_Y\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_Y\",", "\"D3D11_DEPTH_WRITE_MASK_ZERO\", \"D3D11_DEPTH_WRITE_MASK_ALL\", ]) D3D11_STENCIL_OP = Enum(\"D3D11_STENCIL_OP\", [ \"D3D11_STENCIL_OP_KEEP\", \"D3D11_STENCIL_OP_ZERO\", \"D3D11_STENCIL_OP_REPLACE\",", "(D3D11_FILL_MODE, \"FillMode\"), (D3D11_CULL_MODE, \"CullMode\"), (BOOL, \"FrontCounterClockwise\"), (INT, \"DepthBias\"), (FLOAT, \"DepthBiasClamp\"),", "\"CreateShaderResourceView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_SHADER_RESOURCE_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11ShaderResourceView)), \"ppSRView\")]), StdMethod(HRESULT, \"CreateUnorderedAccessView\", [(ObjPointer(ID3D11Resource),", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE3D_DESC), \"pDesc\")]), ] D3D11_TEXTURECUBE_FACE = Enum(\"D3D11_TEXTURECUBE_FACE\", [ \"D3D11_TEXTURECUBE_FACE_POSITIVE_X\",", "\"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"HSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)),", "\"DrawIndexed\", [(UINT, \"IndexCount\"), (UINT, \"StartIndexLocation\"), (INT, \"BaseVertexLocation\")]), StdMethod(Void, \"Draw\", [(UINT,", "(SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11VertexShader)), \"ppVertexShader\")]), StdMethod(HRESULT, \"CreateGeometryShader\", [(Blob(Const(Void), \"BytecodeLength\"),", "Copyright 2012 <NAME> # All Rights Reserved. # # Permission", "Interface(\"ID3D11Counter\", ID3D11Asynchronous) ID3D11ClassInstance = Interface(\"ID3D11ClassInstance\", ID3D11DeviceChild) ID3D11ClassLinkage = Interface(\"ID3D11ClassLinkage\", ID3D11DeviceChild)", "\"NumBuffers\"), \"ppVertexBuffers\"), (Pointer(Const(UINT)), \"pStrides\"), (Pointer(Const(UINT)), \"pOffsets\")]), StdMethod(Void, \"IASetIndexBuffer\", [(ObjPointer(ID3D11Buffer), \"pIndexBuffer\"),", "[(HANDLE, \"hResource\"), (REFIID, \"ReturnedInterface\"), Out(Pointer(ObjPointer(Void)), \"ppResource\")]), StdMethod(HRESULT, \"CheckFormatSupport\", [(DXGI_FORMAT, \"Format\"),", "\"D3D11_TEXTURECUBE_FACE_POSITIVE_Z\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_Z\", ]) ID3D11View.methods += [ StdMethod(Void, \"GetResource\", [Out(Pointer(ObjPointer(ID3D11Resource)), \"ppResource\")]),", "\"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11VertexShader)), \"ppVertexShader\")]), StdMethod(HRESULT, \"CreateGeometryShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"),", "Interface(\"ID3D11Buffer\", ID3D11Resource) ID3D11Texture1D = Interface(\"ID3D11Texture1D\", ID3D11Resource) ID3D11Texture2D = Interface(\"ID3D11Texture2D\", ID3D11Resource)", "8), \"RenderTarget\"), ]) ID3D11BlendState.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_BLEND_DESC), \"pDesc\")]),", "\"SampleCount\"), Out(Pointer(UINT), \"pNumQualityLevels\")]), StdMethod(Void, \"CheckCounterInfo\", [Out(Pointer(D3D11_COUNTER_INFO), \"pCounterInfo\")]), StdMethod(HRESULT, \"CheckCounter\", [(Pointer(Const(D3D11_COUNTER_DESC)),", "Struct(\"D3D11_SUBRESOURCE_DATA\", [ (OpaquePointer(Const(Void)), \"pSysMem\"), (UINT, \"SysMemPitch\"), (UINT, \"SysMemSlicePitch\"), ]) D3D11_MAPPED_SUBRESOURCE", "[ (D3D11_TEX1D_DSV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_DSV, \"Texture1DArray\"), (D3D11_TEX2D_DSV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_DSV, \"Texture2DArray\"), (D3D11_TEX2DMS_DSV,", "\"ppTexture1D\")]), StdMethod(HRESULT, \"CreateTexture2D\", [(Pointer(Const(D3D11_TEXTURE2D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture2D)), \"ppTexture2D\")]), StdMethod(HRESULT,", "(BOOL, \"PredicateValue\")]), StdMethod(Void, \"GSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"),", "ID3D11DeviceChild) ID3D11GeometryShader = Interface(\"ID3D11GeometryShader\", ID3D11DeviceChild) ID3D11PixelShader = Interface(\"ID3D11PixelShader\", ID3D11DeviceChild) ID3D11ComputeShader", "(SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11HullShader)), \"ppHullShader\")]), StdMethod(HRESULT, \"CreateDomainShader\", [(Blob(Const(Void), \"BytecodeLength\"),", "\"TextureCube\"), (D3D11_TEXCUBE_ARRAY_SRV, \"TextureCubeArray\"), (D3D11_BUFFEREX_SRV, \"BufferEx\"), ]), None), ]) ID3D11ShaderResourceView.methods +=", "(D3D11_TEXCUBE_SRV, \"TextureCube\"), (D3D11_TEXCUBE_ARRAY_SRV, \"TextureCubeArray\"), (D3D11_BUFFEREX_SRV, \"BufferEx\"), ]), None), ]) ID3D11ShaderResourceView.methods", "= Struct(\"D3D11_TEX1D_UAV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_UAV = Struct(\"D3D11_TEX1D_ARRAY_UAV\", [", "\"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"GSGetShader\", [Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT),", "Enum(\"D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS\", [ \"D3D11_STANDARD_MULTISAMPLE_PATTERN\", \"D3D11_CENTER_MULTISAMPLE_PATTERN\", ]) D3D11_DEVICE_CONTEXT_TYPE = Enum(\"D3D11_DEVICE_CONTEXT_TYPE\", [ \"D3D11_DEVICE_CONTEXT_IMMEDIATE\",", "StdMethod(HRESULT, \"CreateDeferredContext\", [(UINT, \"ContextFlags\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppDeferredContext\")]), StdMethod(HRESULT, \"OpenSharedResource\", [(HANDLE, \"hResource\"),", "StdFunction(HRESULT, \"D3D11CreateDeviceAndSwapChain\", [(ObjPointer(IDXGIAdapter), \"pAdapter\"), (D3D_DRIVER_TYPE, \"DriverType\"), (HMODULE, \"Software\"), (D3D11_CREATE_DEVICE_FLAG, \"Flags\"),", "Interface(\"ID3D11PixelShader\", ID3D11DeviceChild) ID3D11ComputeShader = Interface(\"ID3D11ComputeShader\", ID3D11DeviceChild) ID3D11InputLayout = Interface(\"ID3D11InputLayout\", ID3D11DeviceChild)", "(Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"DrawIndexed\", [(UINT, \"IndexCount\"), (UINT,", "StdMethod(Void, \"GSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void,", "\"D3D11_PRIMITIVE_TOPOLOGY_7_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_8_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_9_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_10_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_11_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_12_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_13_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_14_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_15_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_16_CONTROL_POINT_PATCHLIST\",", "(UINT, \"SrcDepthPitch\")]), StdMethod(Void, \"CopyStructureCount\", [(ObjPointer(ID3D11Buffer), \"pDstBuffer\"), (UINT, \"DstAlignedByteOffset\"), (ObjPointer(ID3D11UnorderedAccessView), \"pSrcView\")]),", "(SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\")]), StdMethod(HRESULT, \"CreateGeometryShaderWithStreamOutput\", [(Blob(Const(Void), \"BytecodeLength\"),", "\"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"PSGetShader\", [Out(Pointer(ObjPointer(ID3D11PixelShader)), \"ppPixelShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT),", "[(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (Array(Const(D3D11_SO_DECLARATION_ENTRY), \"NumEntries\"), \"pSODeclaration\"), (UINT, \"NumEntries\"),", "[(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_DEPTH_STENCIL_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\")]), StdMethod(HRESULT, \"CreateInputLayout\", [(Array(Const(D3D11_INPUT_ELEMENT_DESC), \"NumElements\"),", "\"RenderTarget\"), ]) ID3D11BlendState.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_BLEND_DESC), \"pDesc\")]), ]", "[ \"D3D11_PRIMITIVE_UNDEFINED\", \"D3D11_PRIMITIVE_POINT\", \"D3D11_PRIMITIVE_LINE\", \"D3D11_PRIMITIVE_TRIANGLE\", \"D3D11_PRIMITIVE_LINE_ADJ\", \"D3D11_PRIMITIVE_TRIANGLE_ADJ\", \"D3D11_PRIMITIVE_1_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_2_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_3_CONTROL_POINT_PATCH\",", "]) D3D11_FEATURE_DATA_FORMAT_SUPPORT2 = Struct(\"D3D11_FEATURE_DATA_FORMAT_SUPPORT2\", [ (DXGI_FORMAT, \"InFormat\"), (D3D11_FORMAT_SUPPORT2, \"OutFormatSupport2\"), ])", "]) D3D11_MAP = Enum(\"D3D11_MAP\", [ \"D3D11_MAP_READ\", \"D3D11_MAP_WRITE\", \"D3D11_MAP_READ_WRITE\", \"D3D11_MAP_WRITE_DISCARD\", \"D3D11_MAP_WRITE_NO_OVERWRITE\",", "(D3D11_TEX2DMS_DSV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_DSV, \"Texture2DMSArray\"), ]), None), ]) ID3D11DepthStencilView.methods += [", "\"D3D11_PRIMITIVE_11_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_12_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_13_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_14_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_15_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_16_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_17_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_18_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_19_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_20_CONTROL_POINT_PATCH\",", "= Struct(\"D3D11_FEATURE_DATA_THREADING\", [ (BOOL, \"DriverConcurrentCreates\"), (BOOL, \"DriverCommandLists\"), ]) D3D11_FEATURE_DATA_DOUBLES =", "[ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ])", "\"ppVertexShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"PSGetConstantBuffers\", [(UINT, \"StartSlot\"),", "ID3D11Query.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_QUERY_DESC), \"pDesc\")]), ] D3D11_QUERY_DATA_TIMESTAMP_DISJOINT =", "\"ppDepthStencilView\")]), StdMethod(Void, \"OMGetRenderTargetsAndUnorderedAccessViews\", [(UINT, \"NumRTVs\"), (Array(ObjPointer(ID3D11RenderTargetView), \"NumRTVs\"), \"ppRenderTargetViews\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\"),", "StdMethod(HRESULT, \"CheckFeatureSupport\", [(D3D11_FEATURE, \"Feature\"), Out(D3D11_FEATURE_DATA, \"pFeatureSupportData\"), (UINT, \"FeatureSupportDataSize\")]), StdMethod(HRESULT, \"GetPrivateData\",", "(UINT, \"NumCubes\"), ]) D3D11_TEX2DMS_SRV = Struct(\"D3D11_TEX2DMS_SRV\", [ (UINT, \"UnusedField_NothingToDefine\"), ])", "\"D3D11_FORMAT_SUPPORT_CPU_LOCKABLE\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RESOLVE\", \"D3D11_FORMAT_SUPPORT_DISPLAY\", \"D3D11_FORMAT_SUPPORT_CAST_WITHIN_BIT_LAYOUT\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RENDERTARGET\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_LOAD\", \"D3D11_FORMAT_SUPPORT_SHADER_GATHER\", \"D3D11_FORMAT_SUPPORT_BACK_BUFFER_CAST\", \"D3D11_FORMAT_SUPPORT_TYPED_UNORDERED_ACCESS_VIEW\", \"D3D11_FORMAT_SUPPORT_SHADER_GATHER_COMPARISON\",", "StdMethod(Void, \"CSSetUnorderedAccessViews\", [(UINT, \"StartSlot\"), (UINT, \"NumUAVs\"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), \"NumUAVs\"), \"ppUnorderedAccessViews\"), (Pointer(Const(UINT)),", "\"ppRenderTargetViews\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\")]), StdMethod(Void, \"OMGetRenderTargetsAndUnorderedAccessViews\", [(UINT, \"NumRTVs\"), (Array(ObjPointer(ID3D11RenderTargetView), \"NumRTVs\"), \"ppRenderTargetViews\"),", "D3D11_TEX3D_SRV = Struct(\"D3D11_TEX3D_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEXCUBE_SRV", "(D3D11_DEPTH_WRITE_MASK, \"DepthWriteMask\"), (D3D11_COMPARISON_FUNC, \"DepthFunc\"), (BOOL, \"StencilEnable\"), (UINT8, \"StencilReadMask\"), (UINT8, \"StencilWriteMask\"),", "Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"PSGetSamplers\", [(UINT, \"StartSlot\"), (UINT,", "Struct(\"D3D11_TEX2D_ARRAY_RTV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2DMS_ARRAY_RTV", "\"D3D11_PRIMITIVE_7_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_8_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_9_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_10_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_11_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_12_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_13_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_14_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_15_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_16_CONTROL_POINT_PATCH\",", "[(LPCSTR, \"pClassInstanceName\"), (UINT, \"InstanceIndex\"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), \"ppInstance\")]), StdMethod(HRESULT, \"CreateClassInstance\", [(LPCSTR, \"pClassTypeName\"),", "\"D3D11_RTV_DIMENSION_BUFFER\", \"D3D11_RTV_DIMENSION_TEXTURE1D\", \"D3D11_RTV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE2D\", \"D3D11_RTV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE2DMS\", \"D3D11_RTV_DIMENSION_TEXTURE2DMSARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE3D\", ]) D3D11_UAV_DIMENSION", "]) D3D11_QUERY_DATA_PIPELINE_STATISTICS = Struct(\"D3D11_QUERY_DATA_PIPELINE_STATISTICS\", [ (UINT64, \"IAVertices\"), (UINT64, \"IAPrimitives\"), (UINT64,", "\"GSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"GSSetShader\",", "\"FeatureLevels\"), \"pFeatureLevels\"), (UINT, \"FeatureLevels\"), (UINT, \"SDKVersion\"), Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\"), Out(Pointer(D3D_FEATURE_LEVEL), \"pFeatureLevel\"),", "\"NumClassInstances\")]), StdMethod(Void, \"CSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]),", "]) D3D11_COUNTER_INFO = Struct(\"D3D11_COUNTER_INFO\", [ (D3D11_COUNTER, \"LastDeviceDependentCounter\"), (UINT, \"NumSimultaneousCounters\"), (UINT8,", "(UINT, \"ByteWidth\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"),", "\"D3D11_RAISE_FLAG_DRIVER_INTERNAL_ERROR\", ]) D3D11_CLEAR_FLAG = Flags(UINT, [ \"D3D11_CLEAR_DEPTH\", \"D3D11_CLEAR_STENCIL\", ]) D3D11_RECT", "(UINT, \"NumEntries\"), (Array(Const(UINT), \"NumStrides\"), \"pBufferStrides\"), (UINT, \"NumStrides\"), (UINT, \"RasterizedStream\"), (ObjPointer(ID3D11ClassLinkage),", "StdMethod(Void, \"IAGetInputLayout\", [Out(Pointer(ObjPointer(ID3D11InputLayout)), \"ppInputLayout\")]), StdMethod(Void, \"IAGetVertexBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"),", "\"NumClassInstances\")]), StdMethod(Void, \"PSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]),", "\"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_SRV = Struct(\"D3D11_TEX2D_SRV\", [ (UINT, \"MostDetailedMip\"),", "\"ConstantBufferOffset\"), (UINT, \"ConstantVectorOffset\"), (UINT, \"TextureOffset\"), (UINT, \"SamplerOffset\"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), \"ppInstance\")]), ]", "\"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"IASetInputLayout\", [(ObjPointer(ID3D11InputLayout), \"pInputLayout\")]),", "\"D3D11_CLEAR_DEPTH\", \"D3D11_CLEAR_STENCIL\", ]) D3D11_RECT = Alias(\"D3D11_RECT\", RECT) D3D11_BOX = Struct(\"D3D11_BOX\",", "\"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11HullShader)), \"ppHullShader\")]), StdMethod(HRESULT, \"CreateDomainShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"),", "\"pMappedResource\")]), StdMethod(Void, \"Unmap\", [(ObjPointer(ID3D11Resource), \"pResource\"), (UINT, \"Subresource\")]), StdMethod(Void, \"PSSetConstantBuffers\", [(UINT,", "\"Feature\"), Out(D3D11_FEATURE_DATA, \"pFeatureSupportData\"), (UINT, \"FeatureSupportDataSize\")]), StdMethod(HRESULT, \"GetPrivateData\", [(REFGUID, \"guid\"), Out(Pointer(UINT),", "\"UnusedField_NothingToDefine\"), ]) D3D11_TEX2DMS_ARRAY_DSV = Struct(\"D3D11_TEX2DMS_ARRAY_DSV\", [ (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"),", "]) D3D11_TEX2D_ARRAY_SRV = Struct(\"D3D11_TEX2D_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), (UINT,", "\"Filter\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressU\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressV\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressW\"), (FLOAT, \"MipLODBias\"), (UINT,", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "ID3D11DeviceChild) ID3D11ComputeShader = Interface(\"ID3D11ComputeShader\", ID3D11DeviceChild) ID3D11InputLayout = Interface(\"ID3D11InputLayout\", ID3D11DeviceChild) ID3D11SamplerState", "\"D3D11_BIND_CONSTANT_BUFFER\", \"D3D11_BIND_SHADER_RESOURCE\", \"D3D11_BIND_STREAM_OUTPUT\", \"D3D11_BIND_RENDER_TARGET\", \"D3D11_BIND_DEPTH_STENCIL\", \"D3D11_BIND_UNORDERED_ACCESS\", ]) D3D11_CPU_ACCESS_FLAG = Flags(UINT,", "[ (OpaquePointer(Const(Void)), \"pSysMem\"), (UINT, \"SysMemPitch\"), (UINT, \"SysMemSlicePitch\"), ]) D3D11_MAPPED_SUBRESOURCE =", "(UINT, \"SamplerOffset\"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), \"ppInstance\")]), ] ID3D11CommandList.methods += [ StdMethod(UINT, \"GetContextFlags\",", "[ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_COUNTER_DESC), \"pDesc\")]), ] D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS = Enum(\"D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS\", [", "\"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM2\", \"D3D11_QUERY_SO_STATISTICS_STREAM3\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM3\", ]) D3D11_QUERY_MISC_FLAG = Flags(UINT, [ \"D3D11_QUERY_MISC_PREDICATEHINT\", ])", "\"D3D11_PRIMITIVE_31_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_32_CONTROL_POINT_PATCH\", ]) D3D11_CULL_MODE = Enum(\"D3D11_CULL_MODE\", [ \"D3D11_CULL_NONE\", \"D3D11_CULL_FRONT\", \"D3D11_CULL_BACK\",", "(BOOL, \"DepthEnable\"), (D3D11_DEPTH_WRITE_MASK, \"DepthWriteMask\"), (D3D11_COMPARISON_FUNC, \"DepthFunc\"), (BOOL, \"StencilEnable\"), (UINT8, \"StencilReadMask\"),", "]) ID3D11Query.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_QUERY_DESC), \"pDesc\")]), ] D3D11_QUERY_DATA_TIMESTAMP_DISJOINT", "StdMethod(Void, \"CSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void,", "(\"D3D11_FEATURE_D3D10_X_HARDWARE_OPTIONS\", Pointer(D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS)), ], Blob(Void, \"FeatureSupportDataSize\"), False) ID3D11DeviceContext.methods += [ StdMethod(Void,", "Out(Array(D3D11_RECT, \"*pNumRects\"), \"pRects\")]), StdMethod(Void, \"HSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView),", "= Enum(\"D3D11_BLEND\", [ \"D3D11_BLEND_ZERO\", \"D3D11_BLEND_ONE\", \"D3D11_BLEND_SRC_COLOR\", \"D3D11_BLEND_INV_SRC_COLOR\", \"D3D11_BLEND_SRC_ALPHA\", \"D3D11_BLEND_INV_SRC_ALPHA\", \"D3D11_BLEND_DEST_ALPHA\",", "\"ppShaderResourceViews\")]), StdMethod(Void, \"DSGetShader\", [Out(Pointer(ObjPointer(ID3D11DomainShader)), \"ppDomainShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]),", "= Interface(\"ID3D11PixelShader\", ID3D11DeviceChild) ID3D11ComputeShader = Interface(\"ID3D11ComputeShader\", ID3D11DeviceChild) ID3D11InputLayout = Interface(\"ID3D11InputLayout\",", "ID3D11ClassInstance = Interface(\"ID3D11ClassInstance\", ID3D11DeviceChild) ID3D11ClassLinkage = Interface(\"ID3D11ClassLinkage\", ID3D11DeviceChild) ID3D11CommandList =", "(D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), (UINT, \"StructureByteStride\"), ]) ID3D11Buffer.methods += [ StdMethod(Void, \"GetDesc\",", "D3D11_COLOR_WRITE_ENABLE = Enum(\"D3D11_COLOR_WRITE_ENABLE\", [ \"D3D11_COLOR_WRITE_ENABLE_ALL\", \"D3D11_COLOR_WRITE_ENABLE_RED\", \"D3D11_COLOR_WRITE_ENABLE_GREEN\", \"D3D11_COLOR_WRITE_ENABLE_BLUE\", \"D3D11_COLOR_WRITE_ENABLE_ALPHA\", ])", "\"szName\"), Out(Pointer(UINT), \"pNameLength\"), Out(LPSTR, \"szUnits\"), Out(Pointer(UINT), \"pUnitsLength\"), Out(LPSTR, \"szDescription\"), Out(Pointer(UINT),", "StdMethod(HRESULT, \"SetPrivateData\", [(REFGUID, \"guid\"), (UINT, \"DataSize\"), (OpaqueBlob(Const(Void), \"DataSize\"), \"pData\")]), StdMethod(HRESULT,", "# Copyright 2012 <NAME> # All Rights Reserved. # #", "[ (LPCSTR, \"SemanticName\"), (UINT, \"SemanticIndex\"), (DXGI_FORMAT, \"Format\"), (UINT, \"InputSlot\"), (D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET,", "\"GetDataSize\", []), ] D3D11_ASYNC_GETDATA_FLAG = Flags(UINT, [ \"D3D11_ASYNC_GETDATA_DONOTFLUSH\", ]) D3D11_QUERY", "[Out(Pointer(D3D11_COUNTER_INFO), \"pCounterInfo\")]), StdMethod(HRESULT, \"CheckCounter\", [(Pointer(Const(D3D11_COUNTER_DESC)), \"pDesc\"), Out(Pointer(D3D11_COUNTER_TYPE), \"pType\"), Out(Pointer(UINT), \"pActiveCounters\"),", "\"D3D11_DSV_DIMENSION_TEXTURE2DMS\", \"D3D11_DSV_DIMENSION_TEXTURE2DMSARRAY\", ]) D3D11_RTV_DIMENSION = Enum(\"D3D11_RTV_DIMENSION\", [ \"D3D11_RTV_DIMENSION_UNKNOWN\", \"D3D11_RTV_DIMENSION_BUFFER\", \"D3D11_RTV_DIMENSION_TEXTURE1D\",", "[ StdMethod(Void, \"GetClassLinkage\", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), \"ppLinkage\")]), StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_CLASS_INSTANCE_DESC), \"pDesc\")]), StdMethod(Void,", "\"D3D11_PRIMITIVE_TOPOLOGY_32_CONTROL_POINT_PATCHLIST\", ]) D3D11_PRIMITIVE = Enum(\"D3D11_PRIMITIVE\", [ \"D3D11_PRIMITIVE_UNDEFINED\", \"D3D11_PRIMITIVE_POINT\", \"D3D11_PRIMITIVE_LINE\", \"D3D11_PRIMITIVE_TRIANGLE\",", "StdMethod(Void, \"UpdateSubresource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"), (Pointer(Const(D3D11_BOX)), \"pDstBox\"), (OpaquePointer(Const(Void)), \"pSrcData\"),", "\"D3D11_SRV_DIMENSION_BUFFER\", \"D3D11_SRV_DIMENSION_TEXTURE1D\", \"D3D11_SRV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE2D\", \"D3D11_SRV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE2DMS\", \"D3D11_SRV_DIMENSION_TEXTURE2DMSARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE3D\", \"D3D11_SRV_DIMENSION_TEXTURECUBE\", \"D3D11_SRV_DIMENSION_TEXTURECUBEARRAY\",", "\"MipLevels\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX3D_SRV = Struct(\"D3D11_TEX3D_SRV\", [", "\"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_ANISOTROPIC\", ]) D3D11_FILTER_TYPE = Enum(\"D3D11_FILTER_TYPE\", [", "StdMethod(Void, \"OMSetRenderTargetsAndUnorderedAccessViews\", [(UINT, \"NumRTVs\"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), \"NumRTVs\"), \"ppRenderTargetViews\"), (ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\"), (UINT,", "\"Buffer\"), (D3D11_TEX1D_UAV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_UAV, \"Texture1DArray\"), (D3D11_TEX2D_UAV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_UAV, \"Texture2DArray\"), (D3D11_TEX3D_UAV,", "StdMethod(FLOAT, \"GetResourceMinLOD\", [(ObjPointer(ID3D11Resource), \"pResource\")]), StdMethod(Void, \"ResolveSubresource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"),", "\"D3D11_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR\", \"D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINT\", \"D3D11_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_MIN_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_MIN_MAG_MIP_LINEAR\", \"D3D11_FILTER_ANISOTROPIC\", \"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_LINEAR_MIP_POINT\",", "[]), StdMethod(Void, \"DrawIndexedInstancedIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"DrawInstancedIndirect\", [(ObjPointer(ID3D11Buffer),", "D3D11_TEX2D_DSV = Struct(\"D3D11_TEX2D_DSV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX2D_ARRAY_DSV = Struct(\"D3D11_TEX2D_ARRAY_DSV\",", "= Enum(\"D3D11_UAV_DIMENSION\", [ \"D3D11_UAV_DIMENSION_UNKNOWN\", \"D3D11_UAV_DIMENSION_BUFFER\", \"D3D11_UAV_DIMENSION_TEXTURE1D\", \"D3D11_UAV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_UAV_DIMENSION_TEXTURE2D\", \"D3D11_UAV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_UAV_DIMENSION_TEXTURE3D\",", "D3D11_FILTER = Enum(\"D3D11_FILTER\", [ \"D3D11_FILTER_MIN_MAG_MIP_POINT\", \"D3D11_FILTER_MIN_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR\", \"D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINT\", \"D3D11_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR\",", "\"VSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"Begin\",", "= Enum(\"D3D11_FILTER\", [ \"D3D11_FILTER_MIN_MAG_MIP_POINT\", \"D3D11_FILTER_MIN_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR\", \"D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINT\", \"D3D11_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_MIN_MAG_LINEAR_MIP_POINT\",", "[Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"IAGetPrimitiveTopology\", [Out(Pointer(D3D11_PRIMITIVE_TOPOLOGY),", "\"pShaderBytecodeWithInputSignature\"), (SIZE_T, \"BytecodeLength\"), Out(Pointer(ObjPointer(ID3D11InputLayout)), \"ppInputLayout\")]), StdMethod(HRESULT, \"CreateVertexShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"),", "StdMethod(Void, \"VSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void,", "D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS = Enum(\"D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS\", [ \"D3D11_STANDARD_MULTISAMPLE_PATTERN\", \"D3D11_CENTER_MULTISAMPLE_PATTERN\", ]) D3D11_DEVICE_CONTEXT_TYPE = Enum(\"D3D11_DEVICE_CONTEXT_TYPE\",", "(UINT, \"ArraySize\"), ]) D3D11_TEX3D_RTV = Struct(\"D3D11_TEX3D_RTV\", [ (UINT, \"MipSlice\"), (UINT,", "[ \"D3D11_BUFFER_UAV_FLAG_RAW\", \"D3D11_BUFFER_UAV_FLAG_APPEND\", \"D3D11_BUFFER_UAV_FLAG_COUNTER\", ]) D3D11_BUFFER_UAV = Struct(\"D3D11_BUFFER_UAV\", [ (UINT,", "Out(Pointer(ObjPointer(ID3D11RenderTargetView)), \"ppRTView\")]), StdMethod(HRESULT, \"CreateDepthStencilView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_DEPTH_STENCIL_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\")]),", "ID3D11CommandList = Interface(\"ID3D11CommandList\", ID3D11DeviceChild) ID3D11Device = Interface(\"ID3D11Device\", IUnknown) D3D11_INPUT_CLASSIFICATION =", "\"D3D11_BIND_UNORDERED_ACCESS\", ]) D3D11_CPU_ACCESS_FLAG = Flags(UINT, [ \"D3D11_CPU_ACCESS_WRITE\", \"D3D11_CPU_ACCESS_READ\", ]) D3D11_RESOURCE_MISC_FLAG", "StdMethod(Void, \"VSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void,", "[ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE3D_DESC), \"pDesc\")]), ] D3D11_TEXTURECUBE_FACE = Enum(\"D3D11_TEXTURECUBE_FACE\", [", "= Struct(\"D3D11_FEATURE_DATA_FORMAT_SUPPORT2\", [ (DXGI_FORMAT, \"InFormat\"), (D3D11_FORMAT_SUPPORT2, \"OutFormatSupport2\"), ]) D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS =", "\"pVertexShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"DrawIndexed\", [(UINT, \"IndexCount\"),", "(UINT64, \"VSInvocations\"), (UINT64, \"GSInvocations\"), (UINT64, \"GSPrimitives\"), (UINT64, \"CInvocations\"), (UINT64, \"CPrimitives\"),", "(UINT, \"StartVertexLocation\")]), StdMethod(HRESULT, \"Map\", [(ObjPointer(ID3D11Resource), \"pResource\"), (UINT, \"Subresource\"), (D3D11_MAP, \"MapType\"),", "\"CSSetShader\", [(ObjPointer(ID3D11ComputeShader), \"pComputeShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"CSSetSamplers\",", "(UINT, \"NumClassInstances\")]), StdMethod(Void, \"DSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"),", "D3D11_TEX2D_ARRAY_SRV = Struct(\"D3D11_TEX2D_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), (UINT, \"FirstArraySlice\"),", "(UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"GSGetSamplers\", [(UINT, \"StartSlot\"), (UINT,", "[ \"D3D11_APPEND_ALIGNED_ELEMENT\", ]) D3D11_INPUT_ELEMENT_DESC = Struct(\"D3D11_INPUT_ELEMENT_DESC\", [ (LPCSTR, \"SemanticName\"), (UINT,", "\"ComponentCount\"), (BYTE, \"OutputSlot\"), ]) D3D11_VIEWPORT = Struct(\"D3D11_VIEWPORT\", [ (FLOAT, \"TopLeftX\"),", "\"D3D11_RESOURCE_DIMENSION_TEXTURE2D\", \"D3D11_RESOURCE_DIMENSION_TEXTURE3D\", ]) D3D11_SRV_DIMENSION = Enum(\"D3D11_SRV_DIMENSION\", [ \"D3D11_SRV_DIMENSION_UNKNOWN\", \"D3D11_SRV_DIMENSION_BUFFER\", \"D3D11_SRV_DIMENSION_TEXTURE1D\",", "+= [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_BLEND_DESC), \"pDesc\")]), ] D3D11_RASTERIZER_DESC = Struct(\"D3D11_RASTERIZER_DESC\",", "\"D3D11_RTV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE2DMS\", \"D3D11_RTV_DIMENSION_TEXTURE2DMSARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE3D\", ]) D3D11_UAV_DIMENSION = Enum(\"D3D11_UAV_DIMENSION\", [ \"D3D11_UAV_DIMENSION_UNKNOWN\",", "]) D3D11_COUNTER_DESC = Struct(\"D3D11_COUNTER_DESC\", [ (D3D11_COUNTER, \"Counter\"), (UINT, \"MiscFlags\"), ])", "\"IAGetInputLayout\", [Out(Pointer(ObjPointer(ID3D11InputLayout)), \"ppInputLayout\")]), StdMethod(Void, \"IAGetVertexBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer),", "\"szDescription\"), Out(Pointer(UINT), \"pDescriptionLength\")]), StdMethod(HRESULT, \"CheckFeatureSupport\", [(D3D11_FEATURE, \"Feature\"), Out(D3D11_FEATURE_DATA, \"pFeatureSupportData\"), (UINT,", "\"pClassTypeName\"), (UINT, \"ConstantBufferOffset\"), (UINT, \"ConstantVectorOffset\"), (UINT, \"TextureOffset\"), (UINT, \"SamplerOffset\"), Out(Pointer(ObjPointer(ID3D11ClassInstance)),", "set StdFunction(HRESULT, \"D3D11CoreRegisterLayers\", [LPCVOID, DWORD], internal=True), StdFunction(SIZE_T, \"D3D11CoreGetLayeredDeviceSize\", [LPCVOID, DWORD],", "\"D3D11_MAP_FLAG_DO_NOT_WAIT\", ]) D3D11_RAISE_FLAG = Flags(UINT, [ \"D3D11_RAISE_FLAG_DRIVER_INTERNAL_ERROR\", ]) D3D11_CLEAR_FLAG =", "\"CreateTexture3D\", [(Pointer(Const(D3D11_TEXTURE3D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture3D)), \"ppTexture3D\")]), StdMethod(HRESULT, \"CreateShaderResourceView\", [(ObjPointer(ID3D11Resource),", "restriction, including without limitation the rights # to use, copy,", "Enum(\"D3D11_STENCIL_OP\", [ \"D3D11_STENCIL_OP_KEEP\", \"D3D11_STENCIL_OP_ZERO\", \"D3D11_STENCIL_OP_REPLACE\", \"D3D11_STENCIL_OP_INCR_SAT\", \"D3D11_STENCIL_OP_DECR_SAT\", \"D3D11_STENCIL_OP_INVERT\", \"D3D11_STENCIL_OP_INCR\", \"D3D11_STENCIL_OP_DECR\",", "[(REFGUID, \"guid\"), (OpaquePointer(Const(IUnknown)), \"pData\")]), StdMethod(D3D_FEATURE_LEVEL, \"GetFeatureLevel\", []), StdMethod(D3D11_CREATE_DEVICE_FLAG, \"GetCreationFlags\", []),", "StdMethod(Void, \"PSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void,", "Interface(\"ID3D11ShaderResourceView\", ID3D11View) ID3D11RenderTargetView = Interface(\"ID3D11RenderTargetView\", ID3D11View) ID3D11DepthStencilView = Interface(\"ID3D11DepthStencilView\", ID3D11View)", "\"D3D11_SRV_DIMENSION_TEXTURE3D\", \"D3D11_SRV_DIMENSION_TEXTURECUBE\", \"D3D11_SRV_DIMENSION_TEXTURECUBEARRAY\", \"D3D11_SRV_DIMENSION_BUFFEREX\", ]) D3D11_DSV_DIMENSION = Enum(\"D3D11_DSV_DIMENSION\", [ \"D3D11_DSV_DIMENSION_UNKNOWN\",", "Struct(\"D3D11_TEX2DMS_ARRAY_RTV\", [ (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX3D_RTV = Struct(\"D3D11_TEX3D_RTV\",", "[(UINT, \"StartSlot\"), (UINT, \"NumUAVs\"), (Array(ObjPointer(ID3D11UnorderedAccessView), \"NumUAVs\"), \"ppUnorderedAccessViews\")]), StdMethod(Void, \"CSGetShader\", [Out(Pointer(ObjPointer(ID3D11ComputeShader)),", "ID3D11RenderTargetView = Interface(\"ID3D11RenderTargetView\", ID3D11View) ID3D11DepthStencilView = Interface(\"ID3D11DepthStencilView\", ID3D11View) ID3D11UnorderedAccessView =", "Out(Pointer(D3D11_COUNTER_TYPE), \"pType\"), Out(Pointer(UINT), \"pActiveCounters\"), Out(LPSTR, \"szName\"), Out(Pointer(UINT), \"pNameLength\"), Out(LPSTR, \"szUnits\"),", "\"D3D11_PRIMITIVE_28_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_29_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_30_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_31_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_32_CONTROL_POINT_PATCH\", ]) D3D11_CULL_MODE = Enum(\"D3D11_CULL_MODE\", [", "Out(Array(FLOAT, 4), \"BlendFactor\"), Out(Pointer(UINT), \"pSampleMask\")]), StdMethod(Void, \"OMGetDepthStencilState\", [Out(Pointer(ObjPointer(ID3D11DepthStencilState)), \"ppDepthStencilState\"), Out(Pointer(UINT),", "[(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"PSSetShader\", [(ObjPointer(ID3D11PixelShader),", "\"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"PSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"),", "D3D11_PRIMITIVE = Enum(\"D3D11_PRIMITIVE\", [ \"D3D11_PRIMITIVE_UNDEFINED\", \"D3D11_PRIMITIVE_POINT\", \"D3D11_PRIMITIVE_LINE\", \"D3D11_PRIMITIVE_TRIANGLE\", \"D3D11_PRIMITIVE_LINE_ADJ\", \"D3D11_PRIMITIVE_TRIANGLE_ADJ\",", "\"D3D11_FORMAT_SUPPORT_RENDER_TARGET\", \"D3D11_FORMAT_SUPPORT_BLENDABLE\", \"D3D11_FORMAT_SUPPORT_DEPTH_STENCIL\", \"D3D11_FORMAT_SUPPORT_CPU_LOCKABLE\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RESOLVE\", \"D3D11_FORMAT_SUPPORT_DISPLAY\", \"D3D11_FORMAT_SUPPORT_CAST_WITHIN_BIT_LAYOUT\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RENDERTARGET\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_LOAD\", \"D3D11_FORMAT_SUPPORT_SHADER_GATHER\",", "[ \"D3D11_BLEND_OP_ADD\", \"D3D11_BLEND_OP_SUBTRACT\", \"D3D11_BLEND_OP_REV_SUBTRACT\", \"D3D11_BLEND_OP_MIN\", \"D3D11_BLEND_OP_MAX\", ]) D3D11_COLOR_WRITE_ENABLE = Enum(\"D3D11_COLOR_WRITE_ENABLE\",", "(UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"CSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT,", "\"TopLeftY\"), (FLOAT, \"Width\"), (FLOAT, \"Height\"), (FLOAT, \"MinDepth\"), (FLOAT, \"MaxDepth\"), ])", "\"Begin\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\")]), StdMethod(Void, \"End\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\")]), StdMethod(HRESULT, \"GetData\", [(ObjPointer(ID3D11Asynchronous),", "\"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"DSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"),", "\"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"DrawIndexed\", [(UINT, \"IndexCount\"), (UINT, \"StartIndexLocation\"),", "StdMethod(Void, \"GetResource\", [Out(Pointer(ObjPointer(ID3D11Resource)), \"ppResource\")]), ] D3D11_BUFFER_SRV = Struct(\"D3D11_BUFFER_SRV\", [ (Union(None,", "(UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"OMGetRenderTargets\", [(UINT, \"NumViews\"), (Array(ObjPointer(ID3D11RenderTargetView),", "\"PSInvocations\"), (UINT64, \"HSInvocations\"), (UINT64, \"DSInvocations\"), (UINT64, \"CSInvocations\"), ]) D3D11_QUERY_DATA_SO_STATISTICS =", "\"IASetIndexBuffer\", [(ObjPointer(ID3D11Buffer), \"pIndexBuffer\"), (DXGI_FORMAT, \"Format\"), (UINT, \"Offset\")]), StdMethod(Void, \"DrawIndexedInstanced\", [(UINT,", "[(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"HSGetConstantBuffers\", [(UINT,", "[ \"D3D11_MAP_FLAG_DO_NOT_WAIT\", ]) D3D11_RAISE_FLAG = Flags(UINT, [ \"D3D11_RAISE_FLAG_DRIVER_INTERNAL_ERROR\", ]) D3D11_CLEAR_FLAG", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "StdMethod(HRESULT, \"CreateClassInstance\", [(LPCSTR, \"pClassTypeName\"), (UINT, \"ConstantBufferOffset\"), (UINT, \"ConstantVectorOffset\"), (UINT, \"TextureOffset\"),", "free of charge, to any person obtaining a copy #", "(Union(None, [(UINT, \"NumElements\"), (UINT, \"ElementWidth\")]), None), ]) D3D11_TEX1D_RTV = Struct(\"D3D11_TEX1D_RTV\",", "\"D3D11_FILTER_COMPARISON_MIN_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_ANISOTROPIC\", ]) D3D11_FILTER_TYPE = Enum(\"D3D11_FILTER_TYPE\", [ \"D3D11_FILTER_TYPE_POINT\", \"D3D11_FILTER_TYPE_LINEAR\",", "]) ID3D11DeviceChild.methods += [ StdMethod(Void, \"GetDevice\", [Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\")]), StdMethod(HRESULT, \"GetPrivateData\",", "(UINT, \"UAVStartSlot\"), (UINT, \"NumUAVs\"), (Array(ObjPointer(ID3D11UnorderedAccessView), \"NumUAVs\"), \"ppUnorderedAccessViews\")]), StdMethod(Void, \"OMGetBlendState\", [Out(Pointer(ObjPointer(ID3D11BlendState)),", "\"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"HSSetShader\", [(ObjPointer(ID3D11HullShader), \"pHullShader\"),", "# THE SOFTWARE. # ##########################################################################/ from dxgi import * from", "(UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX3D_SRV", "Struct(\"D3D11_TEX1D_ARRAY_DSV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_DSV", "\"ppUnorderedAccessViews\")]), StdMethod(Void, \"OMGetBlendState\", [Out(Pointer(ObjPointer(ID3D11BlendState)), \"ppBlendState\"), Out(Array(FLOAT, 4), \"BlendFactor\"), Out(Pointer(UINT), \"pSampleMask\")]),", "(D3D11_BLEND, \"DestBlendAlpha\"), (D3D11_BLEND_OP, \"BlendOpAlpha\"), (UINT8, \"RenderTargetWriteMask\"), ]) D3D11_BLEND_DESC = Struct(\"D3D11_BLEND_DESC\",", "\"DepthClipEnable\"), (BOOL, \"ScissorEnable\"), (BOOL, \"MultisampleEnable\"), (BOOL, \"AntialiasedLineEnable\"), ]) ID3D11RasterizerState.methods +=", "(Array(Const(UINT), 4), \"Values\")]), StdMethod(Void, \"ClearUnorderedAccessViewFloat\", [(ObjPointer(ID3D11UnorderedAccessView), \"pUnorderedAccessView\"), (Array(Const(FLOAT), 4), \"Values\")]),", "StdMethod(HRESULT, \"CreateRenderTargetView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_RENDER_TARGET_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11RenderTargetView)), \"ppRTView\")]), StdMethod(HRESULT, \"CreateDepthStencilView\",", "\"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX3D_RTV = Struct(\"D3D11_TEX3D_RTV\", [ (UINT, \"MipSlice\"),", "\"CreateInputLayout\", [(Array(Const(D3D11_INPUT_ELEMENT_DESC), \"NumElements\"), \"pInputElementDescs\"), (UINT, \"NumElements\"), (Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecodeWithInputSignature\"), (SIZE_T,", "(INT, \"BaseVertexLocation\"), (UINT, \"StartInstanceLocation\")]), StdMethod(Void, \"DrawInstanced\", [(UINT, \"VertexCountPerInstance\"), (UINT, \"InstanceCount\"),", "(UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"CSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT,", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "\"UnusedField_NothingToDefine\"), ]) D3D11_TEX2DMS_ARRAY_SRV = Struct(\"D3D11_TEX2DMS_ARRAY_SRV\", [ (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"),", "\"ppPredicate\")]), StdMethod(HRESULT, \"CreateCounter\", [(Pointer(Const(D3D11_COUNTER_DESC)), \"pCounterDesc\"), Out(Pointer(ObjPointer(ID3D11Counter)), \"ppCounter\")]), StdMethod(HRESULT, \"CreateDeferredContext\", [(UINT,", "\"ppSamplers\")]), StdMethod(Void, \"CSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]),", "(UINT, \"NumSimultaneousCounters\"), (UINT8, \"NumDetectableParallelUnits\"), ]) ID3D11Counter.methods += [ StdMethod(Void, \"GetDesc\",", "\"D3D11_MAP_READ_WRITE\", \"D3D11_MAP_WRITE_DISCARD\", \"D3D11_MAP_WRITE_NO_OVERWRITE\", ]) D3D11_MAP_FLAG = Flags(UINT, [ \"D3D11_MAP_FLAG_DO_NOT_WAIT\", ])", "(UINT64, \"IAPrimitives\"), (UINT64, \"VSInvocations\"), (UINT64, \"GSInvocations\"), (UINT64, \"GSPrimitives\"), (UINT64, \"CInvocations\"),", "StdMethod(Void, \"HSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void,", "(UINT, \"MipLevels\"), ]) D3D11_TEX2D_ARRAY_SRV = Struct(\"D3D11_TEX2D_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT,", "\"D3D11_ASYNC_GETDATA_DONOTFLUSH\", ]) D3D11_QUERY = Enum(\"D3D11_QUERY\", [ \"D3D11_QUERY_EVENT\", \"D3D11_QUERY_OCCLUSION\", \"D3D11_QUERY_TIMESTAMP\", \"D3D11_QUERY_TIMESTAMP_DISJOINT\",", "Struct(\"D3D11_TEXTURE2D_DESC\", [ (UINT, \"Width\"), (UINT, \"Height\"), (UINT, \"MipLevels\"), (UINT, \"ArraySize\"),", "[ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEX1D_ARRAY_SRV = Struct(\"D3D11_TEX1D_ARRAY_SRV\", [", "[(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"GSGetShader\", [Out(Pointer(ObjPointer(ID3D11GeometryShader)),", "StdMethod(HRESULT, \"CreateQuery\", [(Pointer(Const(D3D11_QUERY_DESC)), \"pQueryDesc\"), Out(Pointer(ObjPointer(ID3D11Query)), \"ppQuery\")]), StdMethod(HRESULT, \"CreatePredicate\", [(Pointer(Const(D3D11_QUERY_DESC)), \"pPredicateDesc\"),", "(D3D11_TEX3D_RTV, \"Texture3D\"), ]), None), ]) ID3D11RenderTargetView.methods += [ StdMethod(Void, \"GetDesc\",", "\"SemanticName\"), (UINT, \"SemanticIndex\"), (DXGI_FORMAT, \"Format\"), (UINT, \"InputSlot\"), (D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET, \"AlignedByteOffset\"), (D3D11_INPUT_CLASSIFICATION,", "= Enum(\"D3D11_DSV_DIMENSION\", [ \"D3D11_DSV_DIMENSION_UNKNOWN\", \"D3D11_DSV_DIMENSION_TEXTURE1D\", \"D3D11_DSV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_DSV_DIMENSION_TEXTURE2D\", \"D3D11_DSV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_DSV_DIMENSION_TEXTURE2DMS\", \"D3D11_DSV_DIMENSION_TEXTURE2DMSARRAY\",", "(DXGI_FORMAT, \"Format\")]), StdMethod(Void, \"ExecuteCommandList\", [(ObjPointer(ID3D11CommandList), \"pCommandList\"), (BOOL, \"RestoreContextState\")]), StdMethod(Void, \"HSSetShaderResources\",", "(UINT, \"MipSlice\"), ]) D3D11_TEX2D_ARRAY_DSV = Struct(\"D3D11_TEX2D_ARRAY_DSV\", [ (UINT, \"MipSlice\"), (UINT,", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "\"MinLOD\")]), StdMethod(FLOAT, \"GetResourceMinLOD\", [(ObjPointer(ID3D11Resource), \"pResource\")]), StdMethod(Void, \"ResolveSubresource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT,", "D3D11_TEXTURE2D_DESC = Struct(\"D3D11_TEXTURE2D_DESC\", [ (UINT, \"Width\"), (UINT, \"Height\"), (UINT, \"MipLevels\"),", "\"CreateTexture2D\", [(Pointer(Const(D3D11_TEXTURE2D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture2D)), \"ppTexture2D\")]), StdMethod(HRESULT, \"CreateTexture3D\", [(Pointer(Const(D3D11_TEXTURE3D_DESC)),", "\"D3D11_PRIMITIVE_20_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_21_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_22_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_23_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_24_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_25_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_26_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_27_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_28_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_29_CONTROL_POINT_PATCH\",", "]) D3D11_CPU_ACCESS_FLAG = Flags(UINT, [ \"D3D11_CPU_ACCESS_WRITE\", \"D3D11_CPU_ACCESS_READ\", ]) D3D11_RESOURCE_MISC_FLAG =", "\"D3D11_FORMAT_SUPPORT_BLENDABLE\", \"D3D11_FORMAT_SUPPORT_DEPTH_STENCIL\", \"D3D11_FORMAT_SUPPORT_CPU_LOCKABLE\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RESOLVE\", \"D3D11_FORMAT_SUPPORT_DISPLAY\", \"D3D11_FORMAT_SUPPORT_CAST_WITHIN_BIT_LAYOUT\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RENDERTARGET\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_LOAD\", \"D3D11_FORMAT_SUPPORT_SHADER_GATHER\", \"D3D11_FORMAT_SUPPORT_BACK_BUFFER_CAST\",", "(Array(Const(D3D11_RECT), \"NumRects\"), \"pRects\")]), StdMethod(Void, \"CopySubresourceRegion\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"), (UINT,", "None), (Union(None, [(UINT, \"NumElements\"), (UINT, \"ElementWidth\")]), None), ]) D3D11_TEX1D_RTV =", "StdMethod(Void, \"RSSetScissorRects\", [(UINT, \"NumRects\"), (Array(Const(D3D11_RECT), \"NumRects\"), \"pRects\")]), StdMethod(Void, \"CopySubresourceRegion\", [(ObjPointer(ID3D11Resource),", "\"NumPrimitivesWritten\"), (UINT64, \"PrimitivesStorageNeeded\"), ]) D3D11_COUNTER = Enum(\"D3D11_COUNTER\", [ \"D3D11_COUNTER_DEVICE_DEPENDENT_0\", ])", "\"ppDevice\"), Out(Pointer(D3D_FEATURE_LEVEL), \"pFeatureLevel\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), # XXX: Undocumented functions, called", "(UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2D_ARRAY_RTV = Struct(\"D3D11_TEX2D_ARRAY_RTV\", [ (UINT, \"MipSlice\"), (UINT,", "= Interface(\"ID3D11Asynchronous\", ID3D11DeviceChild) ID3D11Query = Interface(\"ID3D11Query\", ID3D11Asynchronous) ID3D11Predicate = Interface(\"ID3D11Predicate\",", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "ID3D11DeviceChild.methods += [ StdMethod(Void, \"GetDevice\", [Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\")]), StdMethod(HRESULT, \"GetPrivateData\", [(REFGUID,", "\"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_SRV, \"Texture2DMSArray\"), (D3D11_TEX3D_SRV, \"Texture3D\"), (D3D11_TEXCUBE_SRV, \"TextureCube\"), (D3D11_TEXCUBE_ARRAY_SRV, \"TextureCubeArray\"), (D3D11_BUFFEREX_SRV,", "]) D3D11_TEX2D_DSV = Struct(\"D3D11_TEX2D_DSV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX2D_ARRAY_DSV =", "]) D3D11_TEX3D_RTV = Struct(\"D3D11_TEX3D_RTV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstWSlice\"), (UINT,", "\"Format\")]), StdMethod(Void, \"ExecuteCommandList\", [(ObjPointer(ID3D11CommandList), \"pCommandList\"), (BOOL, \"RestoreContextState\")]), StdMethod(Void, \"HSSetShaderResources\", [(UINT,", "\"D3D11_DSV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_DSV_DIMENSION_TEXTURE2D\", \"D3D11_DSV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_DSV_DIMENSION_TEXTURE2DMS\", \"D3D11_DSV_DIMENSION_TEXTURE2DMSARRAY\", ]) D3D11_RTV_DIMENSION = Enum(\"D3D11_RTV_DIMENSION\", [", "\"D3D11_BIND_DEPTH_STENCIL\", \"D3D11_BIND_UNORDERED_ACCESS\", ]) D3D11_CPU_ACCESS_FLAG = Flags(UINT, [ \"D3D11_CPU_ACCESS_WRITE\", \"D3D11_CPU_ACCESS_READ\", ])", "Alias(\"D3D11_RECT\", RECT) D3D11_BOX = Struct(\"D3D11_BOX\", [ (UINT, \"left\"), (UINT, \"top\"),", "[Out(Pointer(D3D11_PRIMITIVE_TOPOLOGY), \"pTopology\")]), StdMethod(Void, \"VSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"),", "\"Format\"), Out(Pointer(D3D11_FORMAT_SUPPORT), \"pFormatSupport\")]), StdMethod(HRESULT, \"CheckMultisampleQualityLevels\", [(DXGI_FORMAT, \"Format\"), (UINT, \"SampleCount\"), Out(Pointer(UINT),", "\"D3DERR_WASSTILLDRAWING\", ]) ID3D11DepthStencilState = Interface(\"ID3D11DepthStencilState\", ID3D11DeviceChild) ID3D11BlendState = Interface(\"ID3D11BlendState\", ID3D11DeviceChild)", "\"ResolveSubresource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"), (ObjPointer(ID3D11Resource), \"pSrcResource\"), (UINT, \"SrcSubresource\"), (DXGI_FORMAT,", "\"ppUAView\")]), StdMethod(HRESULT, \"CreateRenderTargetView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_RENDER_TARGET_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11RenderTargetView)), \"ppRTView\")]), StdMethod(HRESULT,", "\"Texture2DArray\"), (D3D11_TEX2DMS_SRV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_SRV, \"Texture2DMSArray\"), (D3D11_TEX3D_SRV, \"Texture3D\"), (D3D11_TEXCUBE_SRV, \"TextureCube\"), (D3D11_TEXCUBE_ARRAY_SRV,", "= Struct(\"D3D11_COUNTER_INFO\", [ (D3D11_COUNTER, \"LastDeviceDependentCounter\"), (UINT, \"NumSimultaneousCounters\"), (UINT8, \"NumDetectableParallelUnits\"), ])", "charge, to any person obtaining a copy # of this", "\"D3D11_QUERY_TIMESTAMP\", \"D3D11_QUERY_TIMESTAMP_DISJOINT\", \"D3D11_QUERY_PIPELINE_STATISTICS\", \"D3D11_QUERY_OCCLUSION_PREDICATE\", \"D3D11_QUERY_SO_STATISTICS\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE\", \"D3D11_QUERY_SO_STATISTICS_STREAM0\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM0\", \"D3D11_QUERY_SO_STATISTICS_STREAM1\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM1\",", "D3D11_DEPTH_STENCIL_VIEW_DESC = Struct(\"D3D11_DEPTH_STENCIL_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_DSV_DIMENSION, \"ViewDimension\"), (D3D11_DSV_FLAG, \"Flags\"),", "= Interface(\"ID3D11Texture2D\", ID3D11Resource) ID3D11Texture3D = Interface(\"ID3D11Texture3D\", ID3D11Resource) ID3D11View = Interface(\"ID3D11View\",", "(SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11PixelShader)), \"ppPixelShader\")]), StdMethod(HRESULT, \"CreateHullShader\", [(Blob(Const(Void), \"BytecodeLength\"),", "\"ppPixelShader\")]), StdMethod(HRESULT, \"CreateHullShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"),", "]) ID3D11DepthStencilState = Interface(\"ID3D11DepthStencilState\", ID3D11DeviceChild) ID3D11BlendState = Interface(\"ID3D11BlendState\", ID3D11DeviceChild) ID3D11RasterizerState", "\"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"GetPredication\", [Out(Pointer(ObjPointer(ID3D11Predicate)), \"ppPredicate\"), Out(Pointer(BOOL), \"pPredicateValue\")]), StdMethod(Void, \"GSGetShaderResources\",", "this permission notice shall be included in # all copies", "\"HSGetShader\", [Out(Pointer(ObjPointer(ID3D11HullShader)), \"ppHullShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"HSGetSamplers\",", "(HMODULE, \"Software\"), (D3D11_CREATE_DEVICE_FLAG, \"Flags\"), (Array(Const(D3D_FEATURE_LEVEL), \"FeatureLevels\"), \"pFeatureLevels\"), (UINT, \"FeatureLevels\"), (UINT,", "\"bottom\"), (UINT, \"back\"), ]) ID3D11DeviceChild.methods += [ StdMethod(Void, \"GetDevice\", [Out(Pointer(ObjPointer(ID3D11Device)),", "D3D11_TEX1D_SRV = Struct(\"D3D11_TEX1D_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEX1D_ARRAY_SRV", "\"D3D11_SRV_DIMENSION_TEXTURE2D\", \"D3D11_SRV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE2DMS\", \"D3D11_SRV_DIMENSION_TEXTURE2DMSARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE3D\", \"D3D11_SRV_DIMENSION_TEXTURECUBE\", \"D3D11_SRV_DIMENSION_TEXTURECUBEARRAY\", \"D3D11_SRV_DIMENSION_BUFFEREX\", ]) D3D11_DSV_DIMENSION", "\"DSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"DSGetConstantBuffers\",", "Struct(\"D3D11_TEXTURE1D_DESC\", [ (UINT, \"Width\"), (UINT, \"MipLevels\"), (UINT, \"ArraySize\"), (DXGI_FORMAT, \"Format\"),", "\"D3D11_FORMAT_SUPPORT_MULTISAMPLE_LOAD\", \"D3D11_FORMAT_SUPPORT_SHADER_GATHER\", \"D3D11_FORMAT_SUPPORT_BACK_BUFFER_CAST\", \"D3D11_FORMAT_SUPPORT_TYPED_UNORDERED_ACCESS_VIEW\", \"D3D11_FORMAT_SUPPORT_SHADER_GATHER_COMPARISON\", ]) D3D11_FORMAT_SUPPORT2 = Enum(\"D3D11_FORMAT_SUPPORT2\", [", "OR OTHER DEALINGS IN # THE SOFTWARE. # ##########################################################################/ from", "(BOOL, \"MultisampleEnable\"), (BOOL, \"AntialiasedLineEnable\"), ]) ID3D11RasterizerState.methods += [ StdMethod(Void, \"GetDesc\",", "(UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_SRV", "\"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"GSSetShader\", [(ObjPointer(ID3D11GeometryShader), \"pShader\"),", "]) D3D11_INPUT_ELEMENT_DESC = Struct(\"D3D11_INPUT_ELEMENT_DESC\", [ (LPCSTR, \"SemanticName\"), (UINT, \"SemanticIndex\"), (DXGI_FORMAT,", "[ \"D3D11_DSV_READ_ONLY_DEPTH\", \"D3D11_DSV_READ_ONLY_STENCIL\", ]) D3D11_DEPTH_STENCIL_VIEW_DESC = Struct(\"D3D11_DEPTH_STENCIL_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"),", "\"GSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"GSSetSamplers\",", "Interface(\"ID3D11VertexShader\", ID3D11DeviceChild) ID3D11HullShader = Interface(\"ID3D11HullShader\", ID3D11DeviceChild) ID3D11DomainShader = Interface(\"ID3D11DomainShader\", ID3D11DeviceChild)", "\"pDepthStencilView\"), (UINT, \"UAVStartSlot\"), (UINT, \"NumUAVs\"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), \"NumUAVs\"), \"ppUnorderedAccessViews\"), (Pointer(Const(UINT)), \"pUAVInitialCounts\")]),", "\"GetDesc\", [Out(Pointer(D3D11_DEPTH_STENCIL_DESC), \"pDesc\")]), ] D3D11_BLEND = Enum(\"D3D11_BLEND\", [ \"D3D11_BLEND_ZERO\", \"D3D11_BLEND_ONE\",", "\"CreateUnorderedAccessView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_UNORDERED_ACCESS_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11UnorderedAccessView)), \"ppUAView\")]), StdMethod(HRESULT, \"CreateRenderTargetView\", [(ObjPointer(ID3D11Resource),", "\"D3D11_PRIMITIVE_UNDEFINED\", \"D3D11_PRIMITIVE_POINT\", \"D3D11_PRIMITIVE_LINE\", \"D3D11_PRIMITIVE_TRIANGLE\", \"D3D11_PRIMITIVE_LINE_ADJ\", \"D3D11_PRIMITIVE_TRIANGLE_ADJ\", \"D3D11_PRIMITIVE_1_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_2_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_3_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_4_CONTROL_POINT_PATCH\",", "\"GetDesc\", [Out(Pointer(D3D11_QUERY_DESC), \"pDesc\")]), ] D3D11_QUERY_DATA_TIMESTAMP_DISJOINT = Struct(\"D3D11_QUERY_DATA_TIMESTAMP_DISJOINT\", [ (UINT64, \"Frequency\"),", "\"D3D11_COMPARISON_GREATER_EQUAL\", \"D3D11_COMPARISON_ALWAYS\", ]) D3D11_DEPTH_WRITE_MASK = Enum(\"D3D11_DEPTH_WRITE_MASK\", [ \"D3D11_DEPTH_WRITE_MASK_ZERO\", \"D3D11_DEPTH_WRITE_MASK_ALL\", ])", "\"Width\"), (FLOAT, \"Height\"), (FLOAT, \"MinDepth\"), (FLOAT, \"MaxDepth\"), ]) D3D11_RESOURCE_DIMENSION =", "\"MipSlice\"), ]) D3D11_TEX1D_ARRAY_DSV = Struct(\"D3D11_TEX1D_ARRAY_DSV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"),", "\"SetPredication\", [(ObjPointer(ID3D11Predicate), \"pPredicate\"), (BOOL, \"PredicateValue\")]), StdMethod(Void, \"GSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT,", "\"Stream\"), (LPCSTR, \"SemanticName\"), (UINT, \"SemanticIndex\"), (BYTE, \"StartComponent\"), (BYTE, \"ComponentCount\"), (BYTE,", "\"Texture1D\"), (D3D11_TEX1D_ARRAY_DSV, \"Texture1DArray\"), (D3D11_TEX2D_DSV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_DSV, \"Texture2DArray\"), (D3D11_TEX2DMS_DSV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_DSV,", "\"pDesc\")]), ] D3D11_RASTERIZER_DESC = Struct(\"D3D11_RASTERIZER_DESC\", [ (D3D11_FILL_MODE, \"FillMode\"), (D3D11_CULL_MODE, \"CullMode\"),", "= Struct(\"D3D11_TEX3D_RTV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstWSlice\"), (UINT, \"WSize\"), ])", "(UINT, \"ArraySize\"), ]) D3D11_TEX3D_SRV = Struct(\"D3D11_TEX3D_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT,", "D3D11_TEX2D_UAV = Struct(\"D3D11_TEX2D_UAV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX2D_ARRAY_UAV = Struct(\"D3D11_TEX2D_ARRAY_UAV\",", "\"D3D11_QUERY_SO_STATISTICS_STREAM2\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM2\", \"D3D11_QUERY_SO_STATISTICS_STREAM3\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM3\", ]) D3D11_QUERY_MISC_FLAG = Flags(UINT, [ \"D3D11_QUERY_MISC_PREDICATEHINT\",", "\"NumViewports\"), \"pViewports\")]), StdMethod(Void, \"RSSetScissorRects\", [(UINT, \"NumRects\"), (Array(Const(D3D11_RECT), \"NumRects\"), \"pRects\")]), StdMethod(Void,", "[(Pointer(Const(D3D11_RASTERIZER_DESC)), \"pRasterizerDesc\"), Out(Pointer(ObjPointer(ID3D11RasterizerState)), \"ppRasterizerState\")]), StdMethod(HRESULT, \"CreateSamplerState\", [(Pointer(Const(D3D11_SAMPLER_DESC)), \"pSamplerDesc\"), Out(Pointer(ObjPointer(ID3D11SamplerState)), \"ppSamplerState\")]),", "(BYTE, \"OutputSlot\"), ]) D3D11_VIEWPORT = Struct(\"D3D11_VIEWPORT\", [ (FLOAT, \"TopLeftX\"), (FLOAT,", "(D3D11_TEX2DMS_ARRAY_SRV, \"Texture2DMSArray\"), (D3D11_TEX3D_SRV, \"Texture3D\"), (D3D11_TEXCUBE_SRV, \"TextureCube\"), (D3D11_TEXCUBE_ARRAY_SRV, \"TextureCubeArray\"), (D3D11_BUFFEREX_SRV, \"BufferEx\"),", "\"ThreadGroupCountX\"), (UINT, \"ThreadGroupCountY\"), (UINT, \"ThreadGroupCountZ\")]), StdMethod(Void, \"DispatchIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT,", "\"GetPredication\", [Out(Pointer(ObjPointer(ID3D11Predicate)), \"ppPredicate\"), Out(Pointer(BOOL), \"pPredicateValue\")]), StdMethod(Void, \"GSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT,", "\"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_RTV = Struct(\"D3D11_TEX2D_RTV\", [ (UINT, \"MipSlice\"),", "(D3D11_TEX2D_DSV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_DSV, \"Texture2DArray\"), (D3D11_TEX2DMS_DSV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_DSV, \"Texture2DMSArray\"), ]), None),", "\"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"CSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"),", "D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS = Struct(\"D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS\", [ (BOOL, \"ComputeShaders_Plus_RawAndStructuredBuffers_Via_Shader_4_x\"), ]) D3D11_FEATURE, D3D11_FEATURE_DATA =", "\"GetDesc\", [Out(Pointer(D3D11_TEXTURE3D_DESC), \"pDesc\")]), ] D3D11_TEXTURECUBE_FACE = Enum(\"D3D11_TEXTURECUBE_FACE\", [ \"D3D11_TEXTURECUBE_FACE_POSITIVE_X\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_X\",", "(UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2DMS_ARRAY_SRV = Struct(\"D3D11_TEX2DMS_ARRAY_SRV\", [ (UINT, \"FirstArraySlice\"), (UINT,", "None), ]) D3D11_TEX1D_RTV = Struct(\"D3D11_TEX1D_RTV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_RTV", "]) D3D11_SHADER_RESOURCE_VIEW_DESC = Struct(\"D3D11_SHADER_RESOURCE_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_SRV_DIMENSION, \"ViewDimension\"), (Union(None,", "\"D3D11_STENCIL_OP_DECR_SAT\", \"D3D11_STENCIL_OP_INVERT\", \"D3D11_STENCIL_OP_INCR\", \"D3D11_STENCIL_OP_DECR\", ]) D3D11_DEPTH_STENCILOP_DESC = Struct(\"D3D11_DEPTH_STENCILOP_DESC\", [ (D3D11_STENCIL_OP,", "D3D11_BUFFER_SRV = Struct(\"D3D11_BUFFER_SRV\", [ (Union(None, [(UINT, \"FirstElement\"), (UINT, \"ElementOffset\")]), None),", "Struct(\"D3D11_TEX3D_RTV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstWSlice\"), (UINT, \"WSize\"), ]) D3D11_RENDER_TARGET_VIEW_DESC", "(UINT8, \"Stencil\")]), StdMethod(Void, \"GenerateMips\", [(ObjPointer(ID3D11ShaderResourceView), \"pShaderResourceView\")]), StdMethod(Void, \"SetResourceMinLOD\", [(ObjPointer(ID3D11Resource), \"pResource\"),", "Struct(\"D3D11_DEPTH_STENCILOP_DESC\", [ (D3D11_STENCIL_OP, \"StencilFailOp\"), (D3D11_STENCIL_OP, \"StencilDepthFailOp\"), (D3D11_STENCIL_OP, \"StencilPassOp\"), (D3D11_COMPARISON_FUNC, \"StencilFunc\"),", "(Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"IASetInputLayout\", [(ObjPointer(ID3D11InputLayout), \"pInputLayout\")]), StdMethod(Void, \"IASetVertexBuffers\", [(UINT,", "Struct(\"D3D11_UNORDERED_ACCESS_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_UAV_DIMENSION, \"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_UAV, \"Buffer\"),", "\"D3D11_SRV_DIMENSION_TEXTURE1D\", \"D3D11_SRV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE2D\", \"D3D11_SRV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE2DMS\", \"D3D11_SRV_DIMENSION_TEXTURE2DMSARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE3D\", \"D3D11_SRV_DIMENSION_TEXTURECUBE\", \"D3D11_SRV_DIMENSION_TEXTURECUBEARRAY\", \"D3D11_SRV_DIMENSION_BUFFEREX\",", "Interface(\"ID3D11RasterizerState\", ID3D11DeviceChild) ID3D11Resource = Interface(\"ID3D11Resource\", ID3D11DeviceChild) ID3D11Buffer = Interface(\"ID3D11Buffer\", ID3D11Resource)", "StdMethod(Void, \"ClearState\", []), StdMethod(Void, \"Flush\", []), StdMethod(D3D11_DEVICE_CONTEXT_TYPE, \"GetType\", []), StdMethod(UINT,", "\"pPixelShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"PSSetSamplers\", [(UINT, \"StartSlot\"),", "\"ppVertexBuffers\"), (Pointer(Const(UINT)), \"pStrides\"), (Pointer(Const(UINT)), \"pOffsets\")]), StdMethod(Void, \"IASetIndexBuffer\", [(ObjPointer(ID3D11Buffer), \"pIndexBuffer\"), (DXGI_FORMAT,", "\"D3D11_BUFFER_UAV_FLAG_APPEND\", \"D3D11_BUFFER_UAV_FLAG_COUNTER\", ]) D3D11_BUFFER_UAV = Struct(\"D3D11_BUFFER_UAV\", [ (UINT, \"FirstElement\"), (UINT,", "\"D3D11CoreGetLayeredDeviceSize\", [LPCVOID, DWORD], internal=True), StdFunction(HRESULT, \"D3D11CoreCreateLayeredDevice\", [LPCVOID, DWORD, LPCVOID, (REFIID,", "\"D3D11_BLEND_SRC_ALPHA\", \"D3D11_BLEND_INV_SRC_ALPHA\", \"D3D11_BLEND_DEST_ALPHA\", \"D3D11_BLEND_INV_DEST_ALPHA\", \"D3D11_BLEND_DEST_COLOR\", \"D3D11_BLEND_INV_DEST_COLOR\", \"D3D11_BLEND_SRC_ALPHA_SAT\", \"D3D11_BLEND_BLEND_FACTOR\", \"D3D11_BLEND_INV_BLEND_FACTOR\", \"D3D11_BLEND_SRC1_COLOR\",", "\"DstSubresource\"), (UINT, \"DstX\"), (UINT, \"DstY\"), (UINT, \"DstZ\"), (ObjPointer(ID3D11Resource), \"pSrcResource\"), (UINT,", "\"D3D11_PRIMITIVE_22_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_23_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_24_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_25_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_26_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_27_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_28_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_29_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_30_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_31_CONTROL_POINT_PATCH\",", "\"NumDetectableParallelUnits\"), ]) ID3D11Counter.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_COUNTER_DESC), \"pDesc\")]), ]", "(UINT, \"MipSlice\"), (UINT, \"FirstWSlice\"), (UINT, \"WSize\"), ]) D3D11_UNORDERED_ACCESS_VIEW_DESC = Struct(\"D3D11_UNORDERED_ACCESS_VIEW_DESC\",", "(ObjPointer(ID3D11UnorderedAccessView), \"pSrcView\")]), StdMethod(Void, \"ClearRenderTargetView\", [(ObjPointer(ID3D11RenderTargetView), \"pRenderTargetView\"), (Array(Const(FLOAT), 4), \"ColorRGBA\")]), StdMethod(Void,", "\"ppSamplers\")]), StdMethod(Void, \"CSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]),", "\"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_DSV, \"Texture2DMSArray\"), ]), None), ]) ID3D11DepthStencilView.methods += [ StdMethod(Void,", "(Array(ObjPointer(ID3D11UnorderedAccessView), \"NumUAVs\"), \"ppUnorderedAccessViews\")]), StdMethod(Void, \"CSGetShader\", [Out(Pointer(ObjPointer(ID3D11ComputeShader)), \"ppComputeShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"),", "= Interface(\"ID3D11Query\", ID3D11Asynchronous) ID3D11Predicate = Interface(\"ID3D11Predicate\", ID3D11Query) ID3D11Counter = Interface(\"ID3D11Counter\",", "(UINT, \"FeatureLevels\"), (UINT, \"SDKVersion\"), Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\"), Out(Pointer(D3D_FEATURE_LEVEL), \"pFeatureLevel\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]),", "\"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"DSSetConstantBuffers\", [(UINT, \"StartSlot\"),", "[(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"HSGetShader\", [Out(Pointer(ObjPointer(ID3D11HullShader)),", "\"D3D11_QUERY_SO_STATISTICS_STREAM1\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM1\", \"D3D11_QUERY_SO_STATISTICS_STREAM2\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM2\", \"D3D11_QUERY_SO_STATISTICS_STREAM3\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM3\", ]) D3D11_QUERY_MISC_FLAG = Flags(UINT,", "\"pDepthStencilView\"), (D3D11_CLEAR_FLAG, \"ClearFlags\"), (FLOAT, \"Depth\"), (UINT8, \"Stencil\")]), StdMethod(Void, \"GenerateMips\", [(ObjPointer(ID3D11ShaderResourceView),", "\"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"CSGetShaderResources\", [(UINT, \"StartSlot\"),", "[]), ] D3D11_ASYNC_GETDATA_FLAG = Flags(UINT, [ \"D3D11_ASYNC_GETDATA_DONOTFLUSH\", ]) D3D11_QUERY =", "\"D3D11_QUERY_SO_STATISTICS_STREAM3\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM3\", ]) D3D11_QUERY_MISC_FLAG = Flags(UINT, [ \"D3D11_QUERY_MISC_PREDICATEHINT\", ]) D3D11_QUERY_DESC", "\"NumViews\"), \"ppRenderTargetViews\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\")]), StdMethod(Void, \"OMGetRenderTargetsAndUnorderedAccessViews\", [(UINT, \"NumRTVs\"), (Array(ObjPointer(ID3D11RenderTargetView), \"NumRTVs\"),", "4), \"Values\")]), StdMethod(Void, \"ClearUnorderedAccessViewFloat\", [(ObjPointer(ID3D11UnorderedAccessView), \"pUnorderedAccessView\"), (Array(Const(FLOAT), 4), \"Values\")]), StdMethod(Void,", "modify, merge, publish, distribute, sublicense, and/or sell # copies of", "Enum(\"D3D11_COUNTER\", [ \"D3D11_COUNTER_DEVICE_DEPENDENT_0\", ]) D3D11_COUNTER_TYPE = Enum(\"D3D11_COUNTER_TYPE\", [ \"D3D11_COUNTER_TYPE_FLOAT32\", \"D3D11_COUNTER_TYPE_UINT16\",", "\"BackFace\"), ]) ID3D11DepthStencilState.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_DEPTH_STENCIL_DESC), \"pDesc\")]), ]", "(UINT, \"ArraySize\"), ]) D3D11_SHADER_RESOURCE_VIEW_DESC = Struct(\"D3D11_SHADER_RESOURCE_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_SRV_DIMENSION,", "\"ppShaderResourceViews\")]), StdMethod(Void, \"HSGetShader\", [Out(Pointer(ObjPointer(ID3D11HullShader)), \"ppHullShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]),", "(DXGI_FORMAT, \"Format\"), (D3D11_RTV_DIMENSION, \"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_RTV, \"Buffer\"), (D3D11_TEX1D_RTV, \"Texture1D\"),", "StdMethod(HRESULT, \"CreateSamplerState\", [(Pointer(Const(D3D11_SAMPLER_DESC)), \"pSamplerDesc\"), Out(Pointer(ObjPointer(ID3D11SamplerState)), \"ppSamplerState\")]), StdMethod(HRESULT, \"CreateQuery\", [(Pointer(Const(D3D11_QUERY_DESC)), \"pQueryDesc\"),", "\"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"OMGetRenderTargets\", [(UINT, \"NumViews\"), (Array(ObjPointer(ID3D11RenderTargetView), \"NumViews\"),", "\"GetDesc\", [Out(Pointer(D3D11_TEXTURE1D_DESC), \"pDesc\")]), ] D3D11_TEXTURE2D_DESC = Struct(\"D3D11_TEXTURE2D_DESC\", [ (UINT, \"Width\"),", "\"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11PixelShader)), \"ppPixelShader\")]), StdMethod(HRESULT, \"CreateHullShader\", [(Blob(Const(Void),", "= Struct(\"D3D11_TEX2DMS_DSV\", [ (UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2DMS_ARRAY_DSV = Struct(\"D3D11_TEX2DMS_ARRAY_DSV\", [", "\"D3D11_RESOURCE_MISC_RESOURCE_CLAMP\", \"D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX\", \"D3D11_RESOURCE_MISC_GDI_COMPATIBLE\", ]) D3D11_MAP = Enum(\"D3D11_MAP\", [ \"D3D11_MAP_READ\", \"D3D11_MAP_WRITE\",", "[(UINT, \"NumElements\"), (UINT, \"ElementWidth\")]), None), ]) D3D11_TEX1D_RTV = Struct(\"D3D11_TEX1D_RTV\", [", "\"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11HullShader)), \"ppHullShader\")]), StdMethod(HRESULT, \"CreateDomainShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"),", "\"Counter\"), (UINT, \"MiscFlags\"), ]) D3D11_COUNTER_INFO = Struct(\"D3D11_COUNTER_INFO\", [ (D3D11_COUNTER, \"LastDeviceDependentCounter\"),", "\"pDescriptionLength\")]), StdMethod(HRESULT, \"CheckFeatureSupport\", [(D3D11_FEATURE, \"Feature\"), Out(D3D11_FEATURE_DATA, \"pFeatureSupportData\"), (UINT, \"FeatureSupportDataSize\")]), StdMethod(HRESULT,", "\"ColorRGBA\")]), StdMethod(Void, \"ClearUnorderedAccessViewUint\", [(ObjPointer(ID3D11UnorderedAccessView), \"pUnorderedAccessView\"), (Array(Const(UINT), 4), \"Values\")]), StdMethod(Void, \"ClearUnorderedAccessViewFloat\",", "\"MultisampleEnable\"), (BOOL, \"AntialiasedLineEnable\"), ]) ID3D11RasterizerState.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_RASTERIZER_DESC),", "\"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_ANISOTROPIC\", ]) D3D11_FILTER_TYPE = Enum(\"D3D11_FILTER_TYPE\", [ \"D3D11_FILTER_TYPE_POINT\", \"D3D11_FILTER_TYPE_LINEAR\", ])", "\"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"DrawIndexed\", [(UINT, \"IndexCount\"), (UINT, \"StartIndexLocation\"), (INT,", "[ \"D3D11_FILL_WIREFRAME\", \"D3D11_FILL_SOLID\", ]) D3D11_PRIMITIVE_TOPOLOGY = Enum(\"D3D11_PRIMITIVE_TOPOLOGY\", [ \"D3D11_PRIMITIVE_TOPOLOGY_UNDEFINED\", \"D3D11_PRIMITIVE_TOPOLOGY_POINTLIST\",", "Out(Pointer(D3D_FEATURE_LEVEL), \"pFeatureLevel\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), StdFunction(HRESULT, \"D3D11CreateDeviceAndSwapChain\", [(ObjPointer(IDXGIAdapter), \"pAdapter\"), (D3D_DRIVER_TYPE, \"DriverType\"),", "(UINT, \"Offset\")]), StdMethod(Void, \"DrawIndexedInstanced\", [(UINT, \"IndexCountPerInstance\"), (UINT, \"InstanceCount\"), (UINT, \"StartIndexLocation\"),", "= Struct(\"D3D11_QUERY_DATA_SO_STATISTICS\", [ (UINT64, \"NumPrimitivesWritten\"), (UINT64, \"PrimitivesStorageNeeded\"), ]) D3D11_COUNTER =", "\"pDesc\")]), ] D3D11_QUERY_DATA_TIMESTAMP_DISJOINT = Struct(\"D3D11_QUERY_DATA_TIMESTAMP_DISJOINT\", [ (UINT64, \"Frequency\"), (BOOL, \"Disjoint\"),", "[(BOOL, \"RestoreDeferredContextState\"), Out(Pointer(ObjPointer(ID3D11CommandList)), \"ppCommandList\")]), ] D3D11_CREATE_DEVICE_FLAG = Flags(UINT, [ \"D3D11_CREATE_DEVICE_SINGLETHREADED\",", "Interface(\"ID3D11UnorderedAccessView\", ID3D11View) ID3D11VertexShader = Interface(\"ID3D11VertexShader\", ID3D11DeviceChild) ID3D11HullShader = Interface(\"ID3D11HullShader\", ID3D11DeviceChild)", "\"ppRasterizerState\")]), StdMethod(Void, \"RSGetViewports\", [Out(Pointer(UINT), \"pNumViewports\"), Out(Array(D3D11_VIEWPORT, \"*pNumViewports\"), \"pViewports\")]), StdMethod(Void, \"RSGetScissorRects\",", "[ \"D3D11_RAISE_FLAG_DRIVER_INTERNAL_ERROR\", ]) D3D11_CLEAR_FLAG = Flags(UINT, [ \"D3D11_CLEAR_DEPTH\", \"D3D11_CLEAR_STENCIL\", ])", "\"pResourceDimension\")]), StdMethod(Void, \"SetEvictionPriority\", [(UINT, \"EvictionPriority\")]), StdMethod(UINT, \"GetEvictionPriority\", []), ] D3D11_BUFFER_DESC", "\"D3D11_TEXTURE_ADDRESS_MIRROR_ONCE\", ]) D3D11_SAMPLER_DESC = Struct(\"D3D11_SAMPLER_DESC\", [ (D3D11_FILTER, \"Filter\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressU\"),", "\"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11VertexShader)), \"ppVertexShader\")]), StdMethod(HRESULT, \"CreateGeometryShader\", [(Blob(Const(Void),", "\"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\")]), StdMethod(HRESULT, \"CreateGeometryShaderWithStreamOutput\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"),", "\"DepthPitch\"), ]) ID3D11Resource.methods += [ StdMethod(Void, \"GetType\", [Out(Pointer(D3D11_RESOURCE_DIMENSION), \"pResourceDimension\")]), StdMethod(Void,", "\"NumRects\"), (Array(Const(D3D11_RECT), \"NumRects\"), \"pRects\")]), StdMethod(Void, \"CopySubresourceRegion\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"),", "\"ppConstantBuffers\")]), StdMethod(Void, \"CSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]),", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "(SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11DomainShader)), \"ppDomainShader\")]), StdMethod(HRESULT, \"CreateComputeShader\", [(Blob(Const(Void), \"BytecodeLength\"),", "StdMethod(HRESULT, \"CreateBlendState\", [(Pointer(Const(D3D11_BLEND_DESC)), \"pBlendStateDesc\"), Out(Pointer(ObjPointer(ID3D11BlendState)), \"ppBlendState\")]), StdMethod(HRESULT, \"CreateDepthStencilState\", [(Pointer(Const(D3D11_DEPTH_STENCIL_DESC)), \"pDepthStencilDesc\"),", "\"D3D11_BIND_INDEX_BUFFER\", \"D3D11_BIND_CONSTANT_BUFFER\", \"D3D11_BIND_SHADER_RESOURCE\", \"D3D11_BIND_STREAM_OUTPUT\", \"D3D11_BIND_RENDER_TARGET\", \"D3D11_BIND_DEPTH_STENCIL\", \"D3D11_BIND_UNORDERED_ACCESS\", ]) D3D11_CPU_ACCESS_FLAG =", "Struct(\"D3D11_BUFFER_RTV\", [ (Union(None, [(UINT, \"FirstElement\"), (UINT, \"ElementOffset\")]), None), (Union(None, [(UINT,", "\"ppSamplers\")]), StdMethod(Void, \"VSGetShader\", [Out(Pointer(ObjPointer(ID3D11VertexShader)), \"ppVertexShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]),", "D3D11_TEXCUBE_SRV = Struct(\"D3D11_TEXCUBE_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEXCUBE_ARRAY_SRV", "= Enum(\"D3D11_TEXTURECUBE_FACE\", [ \"D3D11_TEXTURECUBE_FACE_POSITIVE_X\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_X\", \"D3D11_TEXTURECUBE_FACE_POSITIVE_Y\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_Y\", \"D3D11_TEXTURECUBE_FACE_POSITIVE_Z\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_Z\", ])", "= Interface(\"ID3D11SamplerState\", ID3D11DeviceChild) ID3D11Asynchronous = Interface(\"ID3D11Asynchronous\", ID3D11DeviceChild) ID3D11Query = Interface(\"ID3D11Query\",", "]) D3D11_TEX1D_UAV = Struct(\"D3D11_TEX1D_UAV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_UAV =", "ID3D11VertexShader = Interface(\"ID3D11VertexShader\", ID3D11DeviceChild) ID3D11HullShader = Interface(\"ID3D11HullShader\", ID3D11DeviceChild) ID3D11DomainShader =", "(UINT, \"NumElements\"), (D3D11_BUFFER_UAV_FLAG, \"Flags\"), ]) D3D11_TEX1D_UAV = Struct(\"D3D11_TEX1D_UAV\", [ (UINT,", "[(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"IASetInputLayout\", [(ObjPointer(ID3D11InputLayout),", "(UINT, \"MipSlice\"), (UINT, \"FirstWSlice\"), (UINT, \"WSize\"), ]) D3D11_RENDER_TARGET_VIEW_DESC = Struct(\"D3D11_RENDER_TARGET_VIEW_DESC\",", "Enum(\"D3D11_BLEND_OP\", [ \"D3D11_BLEND_OP_ADD\", \"D3D11_BLEND_OP_SUBTRACT\", \"D3D11_BLEND_OP_REV_SUBTRACT\", \"D3D11_BLEND_OP_MIN\", \"D3D11_BLEND_OP_MAX\", ]) D3D11_COLOR_WRITE_ENABLE =", "Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\"), Out(Pointer(D3D_FEATURE_LEVEL), \"pFeatureLevel\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), # XXX: Undocumented functions,", "from dxgi import * from d3dcommon import * from d3d11sdklayers", "\"D3D11_PRIMITIVE_25_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_26_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_27_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_28_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_29_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_30_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_31_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_32_CONTROL_POINT_PATCH\", ]) D3D11_CULL_MODE", "\"MipLevels\"), (UINT, \"ArraySize\"), (DXGI_FORMAT, \"Format\"), (DXGI_SAMPLE_DESC, \"SampleDesc\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG,", "\"pResource\"), (Pointer(Const(D3D11_UNORDERED_ACCESS_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11UnorderedAccessView)), \"ppUAView\")]), StdMethod(HRESULT, \"CreateRenderTargetView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_RENDER_TARGET_VIEW_DESC)),", "SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE", "\"AlignedByteOffset\"), (D3D11_INPUT_CLASSIFICATION, \"InputSlotClass\"), (UINT, \"InstanceDataStepRate\"), ]) D3D11_FILL_MODE = Enum(\"D3D11_FILL_MODE\", [", "\"Format\"), (D3D11_DSV_DIMENSION, \"ViewDimension\"), (D3D11_DSV_FLAG, \"Flags\"), (Union(None, [ (D3D11_TEX1D_DSV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_DSV,", "\"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"PSGetShader\", [Out(Pointer(ObjPointer(ID3D11PixelShader)), \"ppPixelShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"),", "[(ObjPointer(ID3D11DepthStencilState), \"pDepthStencilState\"), (UINT, \"StencilRef\")]), StdMethod(Void, \"SOSetTargets\", [(UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"),", "\"HSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"DSSetShaderResources\",", "[ (DXGI_FORMAT, \"Format\"), (D3D11_UAV_DIMENSION, \"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_UAV, \"Buffer\"), (D3D11_TEX1D_UAV,", "\"HSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"HSSetShader\",", "Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT2)), (\"D3D11_FEATURE_D3D10_X_HARDWARE_OPTIONS\", Pointer(D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS)), ], Blob(Void, \"FeatureSupportDataSize\"), False) ID3D11DeviceContext.methods += [", "\"GetDeviceRemovedReason\", []), StdMethod(Void, \"GetImmediateContext\", [Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), StdMethod(HRESULT, \"SetExceptionMode\", [(D3D11_RAISE_FLAG, \"RaiseFlags\")]),", "= Enum(\"D3D11_USAGE\", [ \"D3D11_USAGE_DEFAULT\", \"D3D11_USAGE_IMMUTABLE\", \"D3D11_USAGE_DYNAMIC\", \"D3D11_USAGE_STAGING\", ]) D3D11_BIND_FLAG =", "Struct(\"D3D11_BUFFEREX_SRV\", [ (UINT, \"FirstElement\"), (UINT, \"NumElements\"), (D3D11_BUFFEREX_SRV_FLAG, \"Flags\"), ]) D3D11_TEX1D_SRV", "(DXGI_FORMAT, \"Format\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"),", "\"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"HSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"),", "StdMethod(Void, \"VSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void,", "\"ppDomainShader\")]), StdMethod(HRESULT, \"CreateComputeShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"),", "(UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_UAV = Struct(\"D3D11_TEX1D_ARRAY_UAV\", [ (UINT, \"MipSlice\"), (UINT,", "\"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"DrawInstancedIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"Dispatch\", [(UINT,", "[(UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), \"NumViews\"), \"ppRenderTargetViews\"), (ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\")]), StdMethod(Void, \"OMSetRenderTargetsAndUnorderedAccessViews\", [(UINT,", "= Struct(\"D3D11_UNORDERED_ACCESS_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_UAV_DIMENSION, \"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_UAV,", "(UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"CSGetUnorderedAccessViews\", [(UINT, \"StartSlot\"), (UINT,", "StdMethod(HRESULT, \"CreateVertexShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11VertexShader)),", "(UINT, \"back\"), ]) ID3D11DeviceChild.methods += [ StdMethod(Void, \"GetDevice\", [Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\")]),", "+= [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE3D_DESC), \"pDesc\")]), ] D3D11_TEXTURECUBE_FACE = Enum(\"D3D11_TEXTURECUBE_FACE\",", "\"ContextFlags\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppDeferredContext\")]), StdMethod(HRESULT, \"OpenSharedResource\", [(HANDLE, \"hResource\"), (REFIID, \"ReturnedInterface\"), Out(Pointer(ObjPointer(Void)),", "\"pResource\"), (Pointer(Const(D3D11_RENDER_TARGET_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11RenderTargetView)), \"ppRTView\")]), StdMethod(HRESULT, \"CreateDepthStencilView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_DEPTH_STENCIL_VIEW_DESC)),", "\"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEXCUBE_ARRAY_SRV = Struct(\"D3D11_TEXCUBE_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"),", "\"pQueryDesc\"), Out(Pointer(ObjPointer(ID3D11Query)), \"ppQuery\")]), StdMethod(HRESULT, \"CreatePredicate\", [(Pointer(Const(D3D11_QUERY_DESC)), \"pPredicateDesc\"), Out(Pointer(ObjPointer(ID3D11Predicate)), \"ppPredicate\")]), StdMethod(HRESULT,", "D3D11_VIEWPORT = Struct(\"D3D11_VIEWPORT\", [ (FLOAT, \"TopLeftX\"), (FLOAT, \"TopLeftY\"), (FLOAT, \"Width\"),", "ID3D11Texture3D.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE3D_DESC), \"pDesc\")]), ] D3D11_TEXTURECUBE_FACE =", "[(ObjPointer(ID3D11UnorderedAccessView), \"pUnorderedAccessView\"), (Array(Const(UINT), 4), \"Values\")]), StdMethod(Void, \"ClearUnorderedAccessViewFloat\", [(ObjPointer(ID3D11UnorderedAccessView), \"pUnorderedAccessView\"), (Array(Const(FLOAT),", "(Pointer(Const(D3D11_UNORDERED_ACCESS_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11UnorderedAccessView)), \"ppUAView\")]), StdMethod(HRESULT, \"CreateRenderTargetView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_RENDER_TARGET_VIEW_DESC)), \"pDesc\"),", "(UINT, \"DstSubresource\"), (UINT, \"DstX\"), (UINT, \"DstY\"), (UINT, \"DstZ\"), (ObjPointer(ID3D11Resource), \"pSrcResource\"),", "D3D11_TEX1D_ARRAY_DSV = Struct(\"D3D11_TEX1D_ARRAY_DSV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"),", "(Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"CSSetSamplers\", [(UINT, \"StartSlot\"), (UINT,", "[]), StdMethod(Void, \"GetImmediateContext\", [Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), StdMethod(HRESULT, \"SetExceptionMode\", [(D3D11_RAISE_FLAG, \"RaiseFlags\")]), StdMethod(UINT,", "]), None), ]) ID3D11RenderTargetView.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_RENDER_TARGET_VIEW_DESC), \"pDesc\")]),", "(UINT, \"Width\"), (UINT, \"MipLevels\"), (UINT, \"ArraySize\"), (DXGI_FORMAT, \"Format\"), (D3D11_USAGE, \"Usage\"),", "\"D3D11_PRIMITIVE_14_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_15_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_16_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_17_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_18_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_19_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_20_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_21_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_22_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_23_CONTROL_POINT_PATCH\",", "\"NumViews\"), \"ppRenderTargetViews\"), (ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\")]), StdMethod(Void, \"OMSetRenderTargetsAndUnorderedAccessViews\", [(UINT, \"NumRTVs\"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), \"NumRTVs\"),", "(Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"VSSetShader\", [(ObjPointer(ID3D11VertexShader), \"pVertexShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"),", "\"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"PSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"),", "(UINT, \"MaxAnisotropy\"), (D3D11_COMPARISON_FUNC, \"ComparisonFunc\"), (Array(FLOAT, 4), \"BorderColor\"), (FLOAT, \"MinLOD\"), (FLOAT,", "= FakeEnum(UINT, [ \"D3D11_APPEND_ALIGNED_ELEMENT\", ]) D3D11_INPUT_ELEMENT_DESC = Struct(\"D3D11_INPUT_ELEMENT_DESC\", [ (LPCSTR,", "\"NumRTVs\"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), \"NumRTVs\"), \"ppRenderTargetViews\"), (ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\"), (UINT, \"UAVStartSlot\"), (UINT, \"NumUAVs\"),", "Interface(\"ID3D11Texture3D\", ID3D11Resource) ID3D11View = Interface(\"ID3D11View\", ID3D11DeviceChild) ID3D11ShaderResourceView = Interface(\"ID3D11ShaderResourceView\", ID3D11View)", "\"GetInstanceName\", [Out(LPSTR, \"pInstanceName\"), Out(Pointer(SIZE_T), \"pBufferLength\")]), StdMethod(Void, \"GetTypeName\", [Out(LPSTR, \"pTypeName\"), Out(Pointer(SIZE_T),", "StdMethod(Void, \"DSSetShader\", [(ObjPointer(ID3D11DomainShader), \"pDomainShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void,", "ID3D11Asynchronous) ID3D11Predicate = Interface(\"ID3D11Predicate\", ID3D11Query) ID3D11Counter = Interface(\"ID3D11Counter\", ID3D11Asynchronous) ID3D11ClassInstance", "(UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"PSGetShader\", [Out(Pointer(ObjPointer(ID3D11PixelShader)), \"ppPixelShader\"), Out(Array(ObjPointer(ID3D11ClassInstance),", "\"D3D11_QUERY_SO_OVERFLOW_PREDICATE\", \"D3D11_QUERY_SO_STATISTICS_STREAM0\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM0\", \"D3D11_QUERY_SO_STATISTICS_STREAM1\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM1\", \"D3D11_QUERY_SO_STATISTICS_STREAM2\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM2\", \"D3D11_QUERY_SO_STATISTICS_STREAM3\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM3\", ])", "\"D3D11_FILTER_TYPE_LINEAR\", ]) D3D11_TEXTURE_ADDRESS_MODE = Enum(\"D3D11_TEXTURE_ADDRESS_MODE\", [ \"D3D11_TEXTURE_ADDRESS_WRAP\", \"D3D11_TEXTURE_ADDRESS_MIRROR\", \"D3D11_TEXTURE_ADDRESS_CLAMP\", \"D3D11_TEXTURE_ADDRESS_BORDER\",", "\"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_ANISOTROPIC\", ]) D3D11_FILTER_TYPE =", "Interface(\"ID3D11DomainShader\", ID3D11DeviceChild) ID3D11GeometryShader = Interface(\"ID3D11GeometryShader\", ID3D11DeviceChild) ID3D11PixelShader = Interface(\"ID3D11PixelShader\", ID3D11DeviceChild)", "\"PSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"IASetInputLayout\",", "] D3D11_FILTER = Enum(\"D3D11_FILTER\", [ \"D3D11_FILTER_MIN_MAG_MIP_POINT\", \"D3D11_FILTER_MIN_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR\", \"D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINT\",", "(UINT, \"SrcSubresource\"), (DXGI_FORMAT, \"Format\")]), StdMethod(Void, \"ExecuteCommandList\", [(ObjPointer(ID3D11CommandList), \"pCommandList\"), (BOOL, \"RestoreContextState\")]),", "[Out(Pointer(ObjPointer(ID3D11Buffer)), \"pIndexBuffer\"), Out(Pointer(DXGI_FORMAT), \"Format\"), Out(Pointer(UINT), \"Offset\")]), StdMethod(Void, \"GSGetConstantBuffers\", [(UINT, \"StartSlot\"),", "[Out(Pointer(ObjPointer(ID3D11RasterizerState)), \"ppRasterizerState\")]), StdMethod(Void, \"RSGetViewports\", [Out(Pointer(UINT), \"pNumViewports\"), Out(Array(D3D11_VIEWPORT, \"*pNumViewports\"), \"pViewports\")]), StdMethod(Void,", "\"D3D11_BLEND_OP_MIN\", \"D3D11_BLEND_OP_MAX\", ]) D3D11_COLOR_WRITE_ENABLE = Enum(\"D3D11_COLOR_WRITE_ENABLE\", [ \"D3D11_COLOR_WRITE_ENABLE_ALL\", \"D3D11_COLOR_WRITE_ENABLE_RED\", \"D3D11_COLOR_WRITE_ENABLE_GREEN\",", "\"D3D11_CENTER_MULTISAMPLE_PATTERN\", ]) D3D11_DEVICE_CONTEXT_TYPE = Enum(\"D3D11_DEVICE_CONTEXT_TYPE\", [ \"D3D11_DEVICE_CONTEXT_IMMEDIATE\", \"D3D11_DEVICE_CONTEXT_DEFERRED\", ]) D3D11_CLASS_INSTANCE_DESC", "D3D11_RENDER_TARGET_BLEND_DESC = Struct(\"D3D11_RENDER_TARGET_BLEND_DESC\", [ (BOOL, \"BlendEnable\"), (D3D11_BLEND, \"SrcBlend\"), (D3D11_BLEND, \"DestBlend\"),", "from d3dcommon import * from d3d11sdklayers import * HRESULT =", "(Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"CSGetUnorderedAccessViews\", [(UINT, \"StartSlot\"), (UINT, \"NumUAVs\"), (Array(ObjPointer(ID3D11UnorderedAccessView),", "Out(Pointer(ObjPointer(Void)), \"ppvObj\")], internal=True), StdFunction(HRESULT, \"D3D11CoreCreateDevice\", [DWORD, DWORD, DWORD, DWORD, DWORD,", "\"ppDomainShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"DSGetSamplers\", [(UINT, \"StartSlot\"),", "\"pResource\"), (UINT, \"Subresource\"), (D3D11_MAP, \"MapType\"), (D3D11_MAP_FLAG, \"MapFlags\"), Out(Pointer(D3D11_MAPPED_SUBRESOURCE), \"pMappedResource\")]), StdMethod(Void,", "\"D3D11_QUERY_MISC_PREDICATEHINT\", ]) D3D11_QUERY_DESC = Struct(\"D3D11_QUERY_DESC\", [ (D3D11_QUERY, \"Query\"), (D3D11_QUERY_MISC_FLAG, \"MiscFlags\"),", "[ (UINT64, \"IAVertices\"), (UINT64, \"IAPrimitives\"), (UINT64, \"VSInvocations\"), (UINT64, \"GSInvocations\"), (UINT64,", "]) D3D11_RTV_DIMENSION = Enum(\"D3D11_RTV_DIMENSION\", [ \"D3D11_RTV_DIMENSION_UNKNOWN\", \"D3D11_RTV_DIMENSION_BUFFER\", \"D3D11_RTV_DIMENSION_TEXTURE1D\", \"D3D11_RTV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE2D\",", "(BYTE, \"StartComponent\"), (BYTE, \"ComponentCount\"), (BYTE, \"OutputSlot\"), ]) D3D11_VIEWPORT = Struct(\"D3D11_VIEWPORT\",", "[(D3D11_PRIMITIVE_TOPOLOGY, \"Topology\")]), StdMethod(Void, \"VSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"),", "(UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEXCUBE_ARRAY_SRV = Struct(\"D3D11_TEXCUBE_ARRAY_SRV\", [ (UINT,", "called by d3d11sdklayers.dll when D3D11_CREATE_DEVICE_DEBUG is set StdFunction(HRESULT, \"D3D11CoreRegisterLayers\", [LPCVOID,", "(UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX3D_UAV = Struct(\"D3D11_TEX3D_UAV\",", "Out(Pointer(UINT), \"pNumQualityLevels\")]), StdMethod(Void, \"CheckCounterInfo\", [Out(Pointer(D3D11_COUNTER_INFO), \"pCounterInfo\")]), StdMethod(HRESULT, \"CheckCounter\", [(Pointer(Const(D3D11_COUNTER_DESC)), \"pDesc\"),", "]) D3D11_TEX2DMS_ARRAY_SRV = Struct(\"D3D11_TEX2DMS_ARRAY_SRV\", [ (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ])", "\"D3D11_TEXTURE_ADDRESS_MIRROR\", \"D3D11_TEXTURE_ADDRESS_CLAMP\", \"D3D11_TEXTURE_ADDRESS_BORDER\", \"D3D11_TEXTURE_ADDRESS_MIRROR_ONCE\", ]) D3D11_SAMPLER_DESC = Struct(\"D3D11_SAMPLER_DESC\", [ (D3D11_FILTER,", "(UINT, \"DstX\"), (UINT, \"DstY\"), (UINT, \"DstZ\"), (ObjPointer(ID3D11Resource), \"pSrcResource\"), (UINT, \"SrcSubresource\"),", "Out(Pointer(ObjPointer(IDXGISwapChain)), \"ppSwapChain\"), Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\"), Out(Pointer(D3D_FEATURE_LEVEL), \"pFeatureLevel\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), # XXX:", "ID3D11DeviceChild) ID3D11Asynchronous = Interface(\"ID3D11Asynchronous\", ID3D11DeviceChild) ID3D11Query = Interface(\"ID3D11Query\", ID3D11Asynchronous) ID3D11Predicate", "= Struct(\"D3D11_TEXTURE3D_DESC\", [ (UINT, \"Width\"), (UINT, \"Height\"), (UINT, \"Depth\"), (UINT,", "StdMethod(HRESULT, \"CreateCounter\", [(Pointer(Const(D3D11_COUNTER_DESC)), \"pCounterDesc\"), Out(Pointer(ObjPointer(ID3D11Counter)), \"ppCounter\")]), StdMethod(HRESULT, \"CreateDeferredContext\", [(UINT, \"ContextFlags\"),", "[(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"ClearState\", []),", "(UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"PSSetShader\", [(ObjPointer(ID3D11PixelShader), \"pPixelShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)),", "[(Pointer(Const(D3D11_DEPTH_STENCIL_DESC)), \"pDepthStencilDesc\"), Out(Pointer(ObjPointer(ID3D11DepthStencilState)), \"ppDepthStencilState\")]), StdMethod(HRESULT, \"CreateRasterizerState\", [(Pointer(Const(D3D11_RASTERIZER_DESC)), \"pRasterizerDesc\"), Out(Pointer(ObjPointer(ID3D11RasterizerState)), \"ppRasterizerState\")]),", "\"BaseVertexLocation\"), (UINT, \"StartInstanceLocation\")]), StdMethod(Void, \"DrawInstanced\", [(UINT, \"VertexCountPerInstance\"), (UINT, \"InstanceCount\"), (UINT,", "(UINT, \"WSize\"), ]) D3D11_UNORDERED_ACCESS_VIEW_DESC = Struct(\"D3D11_UNORDERED_ACCESS_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_UAV_DIMENSION,", "Struct(\"D3D11_RENDER_TARGET_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_RTV_DIMENSION, \"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_RTV, \"Buffer\"),", "(FLOAT, \"MipLODBias\"), (UINT, \"MaxAnisotropy\"), (D3D11_COMPARISON_FUNC, \"ComparisonFunc\"), (Array(FLOAT, 4), \"BorderColor\"), (FLOAT,", "\"pDesc\"), Out(Pointer(ObjPointer(ID3D11ShaderResourceView)), \"ppSRView\")]), StdMethod(HRESULT, \"CreateUnorderedAccessView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_UNORDERED_ACCESS_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11UnorderedAccessView)),", "\"D3D11_BLEND_SRC1_ALPHA\", \"D3D11_BLEND_INV_SRC1_ALPHA\", ]) D3D11_BLEND_OP = Enum(\"D3D11_BLEND_OP\", [ \"D3D11_BLEND_OP_ADD\", \"D3D11_BLEND_OP_SUBTRACT\", \"D3D11_BLEND_OP_REV_SUBTRACT\",", "[(Pointer(Const(D3D11_COUNTER_DESC)), \"pCounterDesc\"), Out(Pointer(ObjPointer(ID3D11Counter)), \"ppCounter\")]), StdMethod(HRESULT, \"CreateDeferredContext\", [(UINT, \"ContextFlags\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppDeferredContext\")]),", "Interface(\"ID3D11View\", ID3D11DeviceChild) ID3D11ShaderResourceView = Interface(\"ID3D11ShaderResourceView\", ID3D11View) ID3D11RenderTargetView = Interface(\"ID3D11RenderTargetView\", ID3D11View)", "(D3D11_TEX3D_UAV, \"Texture3D\"), ]), None), ]) ID3D11UnorderedAccessView.methods += [ StdMethod(Void, \"GetDesc\",", "= Struct(\"D3D11_DEPTH_STENCILOP_DESC\", [ (D3D11_STENCIL_OP, \"StencilFailOp\"), (D3D11_STENCIL_OP, \"StencilDepthFailOp\"), (D3D11_STENCIL_OP, \"StencilPassOp\"), (D3D11_COMPARISON_FUNC,", "(D3D11_BUFFEREX_SRV, \"BufferEx\"), ]), None), ]) ID3D11ShaderResourceView.methods += [ StdMethod(Void, \"GetDesc\",", "[ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_RENDER_TARGET_VIEW_DESC), \"pDesc\")]), ] D3D11_TEX1D_DSV = Struct(\"D3D11_TEX1D_DSV\", [", "\"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppVertexBuffers\"), (Pointer(Const(UINT)), \"pStrides\"), (Pointer(Const(UINT)), \"pOffsets\")]), StdMethod(Void, \"IASetIndexBuffer\",", "= Struct(\"D3D11_SO_DECLARATION_ENTRY\", [ (UINT, \"Stream\"), (LPCSTR, \"SemanticName\"), (UINT, \"SemanticIndex\"), (BYTE,", "\"D3D11_TEXTURECUBE_FACE_NEGATIVE_X\", \"D3D11_TEXTURECUBE_FACE_POSITIVE_Y\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_Y\", \"D3D11_TEXTURECUBE_FACE_POSITIVE_Z\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_Z\", ]) ID3D11View.methods += [ StdMethod(Void,", "[ StdMethod(UINT, \"GetDataSize\", []), ] D3D11_ASYNC_GETDATA_FLAG = Flags(UINT, [ \"D3D11_ASYNC_GETDATA_DONOTFLUSH\",", "DWORD, LPCVOID, (REFIID, \"riid\"), Out(Pointer(ObjPointer(Void)), \"ppvObj\")], internal=True), StdFunction(HRESULT, \"D3D11CoreCreateDevice\", [DWORD,", "\"StencilEnable\"), (UINT8, \"StencilReadMask\"), (UINT8, \"StencilWriteMask\"), (D3D11_DEPTH_STENCILOP_DESC, \"FrontFace\"), (D3D11_DEPTH_STENCILOP_DESC, \"BackFace\"), ])", "\"*pNumRects\"), \"pRects\")]), StdMethod(Void, \"HSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"),", "[(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"VSGetSamplers\", [(UINT,", "[(Array(Const(D3D11_INPUT_ELEMENT_DESC), \"NumElements\"), \"pInputElementDescs\"), (UINT, \"NumElements\"), (Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecodeWithInputSignature\"), (SIZE_T, \"BytecodeLength\"),", "\"D3D11_TEXTURECUBE_FACE_NEGATIVE_Y\", \"D3D11_TEXTURECUBE_FACE_POSITIVE_Z\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_Z\", ]) ID3D11View.methods += [ StdMethod(Void, \"GetResource\", [Out(Pointer(ObjPointer(ID3D11Resource)),", "\"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"DSGetShader\", [Out(Pointer(ObjPointer(ID3D11DomainShader)), \"ppDomainShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT),", "]) D3D11_TEX3D_SRV = Struct(\"D3D11_TEX3D_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ])", "(D3D11_TEXTURE_ADDRESS_MODE, \"AddressU\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressV\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressW\"), (FLOAT, \"MipLODBias\"), (UINT, \"MaxAnisotropy\"),", "[Out(Pointer(ObjPointer(ID3D11BlendState)), \"ppBlendState\"), Out(Array(FLOAT, 4), \"BlendFactor\"), Out(Pointer(UINT), \"pSampleMask\")]), StdMethod(Void, \"OMGetDepthStencilState\", [Out(Pointer(ObjPointer(ID3D11DepthStencilState)),", "\"ElementOffset\")]), None), (Union(None, [(UINT, \"NumElements\"), (UINT, \"ElementWidth\")]), None), ]) D3D11_BUFFEREX_SRV_FLAG", "(D3D11_TEX2D_ARRAY_RTV, \"Texture2DArray\"), (D3D11_TEX2DMS_RTV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_RTV, \"Texture2DMSArray\"), (D3D11_TEX3D_RTV, \"Texture3D\"), ]), None),", "\"GetType\", []), StdMethod(UINT, \"GetContextFlags\", []), StdMethod(HRESULT, \"FinishCommandList\", [(BOOL, \"RestoreDeferredContextState\"), Out(Pointer(ObjPointer(ID3D11CommandList)),", "\"RSGetViewports\", [Out(Pointer(UINT), \"pNumViewports\"), Out(Array(D3D11_VIEWPORT, \"*pNumViewports\"), \"pViewports\")]), StdMethod(Void, \"RSGetScissorRects\", [Out(Pointer(UINT), \"pNumRects\"),", "D3D11_QUERY_DATA_PIPELINE_STATISTICS = Struct(\"D3D11_QUERY_DATA_PIPELINE_STATISTICS\", [ (UINT64, \"IAVertices\"), (UINT64, \"IAPrimitives\"), (UINT64, \"VSInvocations\"),", "\"StartInstanceLocation\")]), StdMethod(Void, \"DrawInstanced\", [(UINT, \"VertexCountPerInstance\"), (UINT, \"InstanceCount\"), (UINT, \"StartVertexLocation\"), (UINT,", "\"pResource\"), (FLOAT, \"MinLOD\")]), StdMethod(FLOAT, \"GetResourceMinLOD\", [(ObjPointer(ID3D11Resource), \"pResource\")]), StdMethod(Void, \"ResolveSubresource\", [(ObjPointer(ID3D11Resource),", "= Struct(\"D3D11_SAMPLER_DESC\", [ (D3D11_FILTER, \"Filter\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressU\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressV\"), (D3D11_TEXTURE_ADDRESS_MODE,", "\"StartIndexLocation\"), (INT, \"BaseVertexLocation\"), (UINT, \"StartInstanceLocation\")]), StdMethod(Void, \"DrawInstanced\", [(UINT, \"VertexCountPerInstance\"), (UINT,", "StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_SHADER_RESOURCE_VIEW_DESC), \"pDesc\")]), ] D3D11_BUFFER_RTV = Struct(\"D3D11_BUFFER_RTV\", [ (Union(None,", "\"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"PSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"),", "\"DrawIndexedInstanced\", [(UINT, \"IndexCountPerInstance\"), (UINT, \"InstanceCount\"), (UINT, \"StartIndexLocation\"), (INT, \"BaseVertexLocation\"), (UINT,", "[ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), (UINT, \"First2DArrayFace\"), (UINT, \"NumCubes\"), ])", "]) D3D11_BUFFEREX_SRV_FLAG = Flags(UINT, [ \"D3D11_BUFFEREX_SRV_FLAG_RAW\", ]) D3D11_BUFFEREX_SRV = Struct(\"D3D11_BUFFEREX_SRV\",", "\"InstanceCount\"), (UINT, \"StartIndexLocation\"), (INT, \"BaseVertexLocation\"), (UINT, \"StartInstanceLocation\")]), StdMethod(Void, \"DrawInstanced\", [(UINT,", "\"D3D11_RTV_DIMENSION_TEXTURE3D\", ]) D3D11_UAV_DIMENSION = Enum(\"D3D11_UAV_DIMENSION\", [ \"D3D11_UAV_DIMENSION_UNKNOWN\", \"D3D11_UAV_DIMENSION_BUFFER\", \"D3D11_UAV_DIMENSION_TEXTURE1D\", \"D3D11_UAV_DIMENSION_TEXTURE1DARRAY\",", "\"D3D11_PRIMITIVE_1_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_2_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_3_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_4_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_5_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_6_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_7_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_8_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_9_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_10_CONTROL_POINT_PATCH\",", "[(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"Dispatch\", [(UINT, \"ThreadGroupCountX\"), (UINT, \"ThreadGroupCountY\"),", "[ (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX3D_RTV = Struct(\"D3D11_TEX3D_RTV\", [", "(UINT, \"NumUAVs\"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), \"NumUAVs\"), \"ppUnorderedAccessViews\"), (Pointer(Const(UINT)), \"pUAVInitialCounts\")]), StdMethod(Void, \"CSSetShader\", [(ObjPointer(ID3D11ComputeShader),", "[ (D3D11_BUFFER_SRV, \"Buffer\"), (D3D11_TEX1D_SRV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_SRV, \"Texture1DArray\"), (D3D11_TEX2D_SRV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_SRV,", "\"pSysMem\"), (UINT, \"SysMemPitch\"), (UINT, \"SysMemSlicePitch\"), ]) D3D11_MAPPED_SUBRESOURCE = Struct(\"D3D11_MAPPED_SUBRESOURCE\", [", "D3D11_SHADER_RESOURCE_VIEW_DESC = Struct(\"D3D11_SHADER_RESOURCE_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_SRV_DIMENSION, \"ViewDimension\"), (Union(None, [", "StdMethod(HRESULT, \"CreateInputLayout\", [(Array(Const(D3D11_INPUT_ELEMENT_DESC), \"NumElements\"), \"pInputElementDescs\"), (UINT, \"NumElements\"), (Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecodeWithInputSignature\"),", "]) D3D11_PRIMITIVE = Enum(\"D3D11_PRIMITIVE\", [ \"D3D11_PRIMITIVE_UNDEFINED\", \"D3D11_PRIMITIVE_POINT\", \"D3D11_PRIMITIVE_LINE\", \"D3D11_PRIMITIVE_TRIANGLE\", \"D3D11_PRIMITIVE_LINE_ADJ\",", "\"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_ANISOTROPIC\", ])", "StdMethod(Void, \"GetDevice\", [Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\")]), StdMethod(HRESULT, \"GetPrivateData\", [(REFGUID, \"guid\"), Out(Pointer(UINT), \"pDataSize\"),", "]) D3D11_TEX2D_RTV = Struct(\"D3D11_TEX2D_RTV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX2DMS_RTV =", "\"CreateSamplerState\", [(Pointer(Const(D3D11_SAMPLER_DESC)), \"pSamplerDesc\"), Out(Pointer(ObjPointer(ID3D11SamplerState)), \"ppSamplerState\")]), StdMethod(HRESULT, \"CreateQuery\", [(Pointer(Const(D3D11_QUERY_DESC)), \"pQueryDesc\"), Out(Pointer(ObjPointer(ID3D11Query)),", "Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\")]), StdMethod(HRESULT, \"CreatePixelShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage),", "\"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"DSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"),", "StdMethod(Void, \"Dispatch\", [(UINT, \"ThreadGroupCountX\"), (UINT, \"ThreadGroupCountY\"), (UINT, \"ThreadGroupCountZ\")]), StdMethod(Void, \"DispatchIndirect\",", "StdMethod(Void, \"SOGetTargets\", [(UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppSOTargets\")]), StdMethod(Void, \"RSGetState\", [Out(Pointer(ObjPointer(ID3D11RasterizerState)),", "\"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppVertexBuffers\"), Out(Pointer(UINT), \"pStrides\"), Out(Pointer(UINT), \"pOffsets\")]), StdMethod(Void, \"IAGetIndexBuffer\",", "Enum(\"D3D11_PRIMITIVE\", [ \"D3D11_PRIMITIVE_UNDEFINED\", \"D3D11_PRIMITIVE_POINT\", \"D3D11_PRIMITIVE_LINE\", \"D3D11_PRIMITIVE_TRIANGLE\", \"D3D11_PRIMITIVE_LINE_ADJ\", \"D3D11_PRIMITIVE_TRIANGLE_ADJ\", \"D3D11_PRIMITIVE_1_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_2_CONTROL_POINT_PATCH\",", "(UINT, \"ArraySize\"), ]) D3D11_TEX2D_DSV = Struct(\"D3D11_TEX2D_DSV\", [ (UINT, \"MipSlice\"), ])", "[ \"D3D11_BIND_VERTEX_BUFFER\", \"D3D11_BIND_INDEX_BUFFER\", \"D3D11_BIND_CONSTANT_BUFFER\", \"D3D11_BIND_SHADER_RESOURCE\", \"D3D11_BIND_STREAM_OUTPUT\", \"D3D11_BIND_RENDER_TARGET\", \"D3D11_BIND_DEPTH_STENCIL\", \"D3D11_BIND_UNORDERED_ACCESS\", ])", "[ \"D3D11_QUERY_EVENT\", \"D3D11_QUERY_OCCLUSION\", \"D3D11_QUERY_TIMESTAMP\", \"D3D11_QUERY_TIMESTAMP_DISJOINT\", \"D3D11_QUERY_PIPELINE_STATISTICS\", \"D3D11_QUERY_OCCLUSION_PREDICATE\", \"D3D11_QUERY_SO_STATISTICS\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE\", \"D3D11_QUERY_SO_STATISTICS_STREAM0\",", "deal # in the Software without restriction, including without limitation", "\"NumUAVs\"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), \"NumUAVs\"), \"ppUnorderedAccessViews\"), (Pointer(Const(UINT)), \"pUAVInitialCounts\")]), StdMethod(Void, \"OMSetBlendState\", [(ObjPointer(ID3D11BlendState), \"pBlendState\"),", "\"pNameLength\"), Out(LPSTR, \"szUnits\"), Out(Pointer(UINT), \"pUnitsLength\"), Out(LPSTR, \"szDescription\"), Out(Pointer(UINT), \"pDescriptionLength\")]), StdMethod(HRESULT,", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell #", "notice and this permission notice shall be included in #", "\"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"VSGetConstantBuffers\", [(UINT, \"StartSlot\"),", "\"pFeatureLevel\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), # XXX: Undocumented functions, called by d3d11sdklayers.dll", "D3D11_FILTER_TYPE = Enum(\"D3D11_FILTER_TYPE\", [ \"D3D11_FILTER_TYPE_POINT\", \"D3D11_FILTER_TYPE_LINEAR\", ]) D3D11_TEXTURE_ADDRESS_MODE = Enum(\"D3D11_TEXTURE_ADDRESS_MODE\",", "[ (D3D11_STENCIL_OP, \"StencilFailOp\"), (D3D11_STENCIL_OP, \"StencilDepthFailOp\"), (D3D11_STENCIL_OP, \"StencilPassOp\"), (D3D11_COMPARISON_FUNC, \"StencilFunc\"), ])", "StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_RENDER_TARGET_VIEW_DESC), \"pDesc\")]), ] D3D11_TEX1D_DSV = Struct(\"D3D11_TEX1D_DSV\", [ (UINT,", "= Struct(\"D3D11_BUFFEREX_SRV\", [ (UINT, \"FirstElement\"), (UINT, \"NumElements\"), (D3D11_BUFFEREX_SRV_FLAG, \"Flags\"), ])", "\"InstanceId\"), (UINT, \"InstanceIndex\"), (UINT, \"TypeId\"), (UINT, \"ConstantBuffer\"), (UINT, \"BaseConstantBufferOffset\"), (UINT,", "Interface(\"ID3D11DepthStencilView\", ID3D11View) ID3D11UnorderedAccessView = Interface(\"ID3D11UnorderedAccessView\", ID3D11View) ID3D11VertexShader = Interface(\"ID3D11VertexShader\", ID3D11DeviceChild)", "\"pDesc\")]), ] D3D11_FORMAT_SUPPORT = Flags(UINT, [ \"D3D11_FORMAT_SUPPORT_BUFFER\", \"D3D11_FORMAT_SUPPORT_IA_VERTEX_BUFFER\", \"D3D11_FORMAT_SUPPORT_IA_INDEX_BUFFER\", \"D3D11_FORMAT_SUPPORT_SO_BUFFER\",", "\"ppInstance\")]), ] ID3D11CommandList.methods += [ StdMethod(UINT, \"GetContextFlags\", []), ] D3D11_FEATURE_DATA_THREADING", "[(UINT, \"VertexCount\"), (UINT, \"StartVertexLocation\")]), StdMethod(HRESULT, \"Map\", [(ObjPointer(ID3D11Resource), \"pResource\"), (UINT, \"Subresource\"),", "(D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture2D.methods += [ StdMethod(Void, \"GetDesc\",", "(UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"IAGetInputLayout\", [Out(Pointer(ObjPointer(ID3D11InputLayout)), \"ppInputLayout\")]), StdMethod(Void,", "(UINT, \"SrcSubresource\"), (Pointer(Const(D3D11_BOX)), \"pSrcBox\")]), StdMethod(Void, \"CopyResource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (ObjPointer(ID3D11Resource), \"pSrcResource\")]),", "StdMethod(UINT, \"GetExceptionMode\", []), ] d3d11 = API(\"d3d11\") d3d11.addFunctions([ StdFunction(HRESULT, \"D3D11CreateDevice\",", "\"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"DSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"),", "\"D3D11_COLOR_WRITE_ENABLE_ALPHA\", ]) D3D11_RENDER_TARGET_BLEND_DESC = Struct(\"D3D11_RENDER_TARGET_BLEND_DESC\", [ (BOOL, \"BlendEnable\"), (D3D11_BLEND, \"SrcBlend\"),", "(UINT, \"MipLevels\"), ]) D3D11_TEX1D_ARRAY_SRV = Struct(\"D3D11_TEX1D_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT,", "(Array(Const(UINT), \"NumStrides\"), \"pBufferStrides\"), (UINT, \"NumStrides\"), (UINT, \"RasterizedStream\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11GeometryShader)),", "D3D11_TEX1D_ARRAY_UAV = Struct(\"D3D11_TEX1D_ARRAY_UAV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"),", "[(ObjPointer(ID3D11Buffer), \"pIndexBuffer\"), (DXGI_FORMAT, \"Format\"), (UINT, \"Offset\")]), StdMethod(Void, \"DrawIndexedInstanced\", [(UINT, \"IndexCountPerInstance\"),", "]) D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET = FakeEnum(UINT, [ \"D3D11_APPEND_ALIGNED_ELEMENT\", ]) D3D11_INPUT_ELEMENT_DESC = Struct(\"D3D11_INPUT_ELEMENT_DESC\",", "DEALINGS IN # THE SOFTWARE. # ##########################################################################/ from dxgi import", "\"AlphaToCoverageEnable\"), (BOOL, \"IndependentBlendEnable\"), (Array(D3D11_RENDER_TARGET_BLEND_DESC, 8), \"RenderTarget\"), ]) ID3D11BlendState.methods += [", "\"IASetVertexBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppVertexBuffers\"), (Pointer(Const(UINT)), \"pStrides\"),", "\"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"HSGetShader\", [Out(Pointer(ObjPointer(ID3D11HullShader)), \"ppHullShader\"),", "\"D3D11_QUERY_SO_STATISTICS\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE\", \"D3D11_QUERY_SO_STATISTICS_STREAM0\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM0\", \"D3D11_QUERY_SO_STATISTICS_STREAM1\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM1\", \"D3D11_QUERY_SO_STATISTICS_STREAM2\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM2\", \"D3D11_QUERY_SO_STATISTICS_STREAM3\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM3\",", "\"D3D11_MAP_WRITE_DISCARD\", \"D3D11_MAP_WRITE_NO_OVERWRITE\", ]) D3D11_MAP_FLAG = Flags(UINT, [ \"D3D11_MAP_FLAG_DO_NOT_WAIT\", ]) D3D11_RAISE_FLAG", "(UINT, \"SysMemSlicePitch\"), ]) D3D11_MAPPED_SUBRESOURCE = Struct(\"D3D11_MAPPED_SUBRESOURCE\", [ (OpaquePointer(Void), \"pData\"), (UINT,", "[(ObjPointer(ID3D11BlendState), \"pBlendState\"), (Array(Const(FLOAT), 4), \"BlendFactor\"), (UINT, \"SampleMask\")]), StdMethod(Void, \"OMSetDepthStencilState\", [(ObjPointer(ID3D11DepthStencilState),", "\"pComputeShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"CSSetSamplers\", [(UINT, \"StartSlot\"),", "\"GetDesc\", [Out(Pointer(D3D11_CLASS_INSTANCE_DESC), \"pDesc\")]), StdMethod(Void, \"GetInstanceName\", [Out(LPSTR, \"pInstanceName\"), Out(Pointer(SIZE_T), \"pBufferLength\")]), StdMethod(Void,", "Undocumented functions, called by d3d11sdklayers.dll when D3D11_CREATE_DEVICE_DEBUG is set StdFunction(HRESULT,", "= Flags(UINT, [ \"D3D11_FORMAT_SUPPORT_BUFFER\", \"D3D11_FORMAT_SUPPORT_IA_VERTEX_BUFFER\", \"D3D11_FORMAT_SUPPORT_IA_INDEX_BUFFER\", \"D3D11_FORMAT_SUPPORT_SO_BUFFER\", \"D3D11_FORMAT_SUPPORT_TEXTURE1D\", \"D3D11_FORMAT_SUPPORT_TEXTURE2D\", \"D3D11_FORMAT_SUPPORT_TEXTURE3D\",", "\"StencilDepthFailOp\"), (D3D11_STENCIL_OP, \"StencilPassOp\"), (D3D11_COMPARISON_FUNC, \"StencilFunc\"), ]) D3D11_DEPTH_STENCIL_DESC = Struct(\"D3D11_DEPTH_STENCIL_DESC\", [", "\"D3D11_BLEND_INV_BLEND_FACTOR\", \"D3D11_BLEND_SRC1_COLOR\", \"D3D11_BLEND_INV_SRC1_COLOR\", \"D3D11_BLEND_SRC1_ALPHA\", \"D3D11_BLEND_INV_SRC1_ALPHA\", ]) D3D11_BLEND_OP = Enum(\"D3D11_BLEND_OP\", [", "\"D3D11CoreCreateLayeredDevice\", [LPCVOID, DWORD, LPCVOID, (REFIID, \"riid\"), Out(Pointer(ObjPointer(Void)), \"ppvObj\")], internal=True), StdFunction(HRESULT,", "\"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"VSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"),", "\"MiscFlags\"), (UINT, \"StructureByteStride\"), ]) ID3D11Buffer.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_BUFFER_DESC),", "\"ppInstance\")]), StdMethod(HRESULT, \"CreateClassInstance\", [(LPCSTR, \"pClassTypeName\"), (UINT, \"ConstantBufferOffset\"), (UINT, \"ConstantVectorOffset\"), (UINT,", "Out(Pointer(ObjPointer(ID3D11DomainShader)), \"ppDomainShader\")]), StdMethod(HRESULT, \"CreateComputeShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage),", "Out(Pointer(ObjPointer(ID3D11Texture1D)), \"ppTexture1D\")]), StdMethod(HRESULT, \"CreateTexture2D\", [(Pointer(Const(D3D11_TEXTURE2D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture2D)), \"ppTexture2D\")]),", "] D3D11_COMPARISON_FUNC = Enum(\"D3D11_COMPARISON_FUNC\", [ \"D3D11_COMPARISON_NEVER\", \"D3D11_COMPARISON_LESS\", \"D3D11_COMPARISON_EQUAL\", \"D3D11_COMPARISON_LESS_EQUAL\", \"D3D11_COMPARISON_GREATER\",", "StdMethod(Void, \"CSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void,", "Out(Pointer(ObjPointer(ID3D11BlendState)), \"ppBlendState\")]), StdMethod(HRESULT, \"CreateDepthStencilState\", [(Pointer(Const(D3D11_DEPTH_STENCIL_DESC)), \"pDepthStencilDesc\"), Out(Pointer(ObjPointer(ID3D11DepthStencilState)), \"ppDepthStencilState\")]), StdMethod(HRESULT, \"CreateRasterizerState\",", "(UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"Dispatch\", [(UINT, \"ThreadGroupCountX\"), (UINT, \"ThreadGroupCountY\"), (UINT, \"ThreadGroupCountZ\")]),", "\"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"PSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"),", "\"VSSetShader\", [(ObjPointer(ID3D11VertexShader), \"pVertexShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"DrawIndexed\",", "\"D3D11_COMPARISON_LESS_EQUAL\", \"D3D11_COMPARISON_GREATER\", \"D3D11_COMPARISON_NOT_EQUAL\", \"D3D11_COMPARISON_GREATER_EQUAL\", \"D3D11_COMPARISON_ALWAYS\", ]) D3D11_DEPTH_WRITE_MASK = Enum(\"D3D11_DEPTH_WRITE_MASK\", [", "\"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture3D)), \"ppTexture3D\")]), StdMethod(HRESULT, \"CreateShaderResourceView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_SHADER_RESOURCE_VIEW_DESC)),", "\"SamplerOffset\"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), \"ppInstance\")]), ] ID3D11CommandList.methods += [ StdMethod(UINT, \"GetContextFlags\", []),", "\"D3D11_PRIMITIVE_5_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_6_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_7_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_8_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_9_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_10_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_11_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_12_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_13_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_14_CONTROL_POINT_PATCH\",", "(DXGI_FORMAT, \"Format\"), (D3D11_SRV_DIMENSION, \"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_SRV, \"Buffer\"), (D3D11_TEX1D_SRV, \"Texture1D\"),", "4), \"BorderColor\"), (FLOAT, \"MinLOD\"), (FLOAT, \"MaxLOD\"), ]) ID3D11SamplerState.methods += [", "\"D3D11_DSV_READ_ONLY_DEPTH\", \"D3D11_DSV_READ_ONLY_STENCIL\", ]) D3D11_DEPTH_STENCIL_VIEW_DESC = Struct(\"D3D11_DEPTH_STENCIL_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_DSV_DIMENSION,", "[Out(Pointer(D3D11_TEXTURE2D_DESC), \"pDesc\")]), ] D3D11_TEXTURE3D_DESC = Struct(\"D3D11_TEXTURE3D_DESC\", [ (UINT, \"Width\"), (UINT,", "StdMethod(HRESULT, \"GetData\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\"), Out(OpaqueBlob(Void, \"DataSize\"), \"pData\"), (UINT, \"DataSize\"), (D3D11_ASYNC_GETDATA_FLAG,", "\"Format\"), (DXGI_SAMPLE_DESC, \"SampleDesc\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG,", "\"Values\")]), StdMethod(Void, \"ClearUnorderedAccessViewFloat\", [(ObjPointer(ID3D11UnorderedAccessView), \"pUnorderedAccessView\"), (Array(Const(FLOAT), 4), \"Values\")]), StdMethod(Void, \"ClearDepthStencilView\",", "ID3D11RasterizerState.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_RASTERIZER_DESC), \"pDesc\")]), ] D3D11_SUBRESOURCE_DATA =", "]) D3D11_TEX2D_ARRAY_RTV = Struct(\"D3D11_TEX2D_ARRAY_RTV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT,", "\"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), (UINT, \"StructureByteStride\"), ]) ID3D11Buffer.methods += [ StdMethod(Void,", "\"pCommandList\"), (BOOL, \"RestoreContextState\")]), StdMethod(Void, \"HSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)),", "# All Rights Reserved. # # Permission is hereby granted,", "]) D3D11_BIND_FLAG = Flags(UINT, [ \"D3D11_BIND_VERTEX_BUFFER\", \"D3D11_BIND_INDEX_BUFFER\", \"D3D11_BIND_CONSTANT_BUFFER\", \"D3D11_BIND_SHADER_RESOURCE\", \"D3D11_BIND_STREAM_OUTPUT\",", "[Out(Pointer(ObjPointer(ID3D11HullShader)), \"ppHullShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"HSGetSamplers\", [(UINT,", "Struct(\"D3D11_TEX2D_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEX2D_ARRAY_SRV = Struct(\"D3D11_TEX2D_ARRAY_SRV\",", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "[LPCVOID, DWORD], internal=True), StdFunction(SIZE_T, \"D3D11CoreGetLayeredDeviceSize\", [LPCVOID, DWORD], internal=True), StdFunction(HRESULT, \"D3D11CoreCreateLayeredDevice\",", "Struct(\"D3D11_BOX\", [ (UINT, \"left\"), (UINT, \"top\"), (UINT, \"front\"), (UINT, \"right\"),", "[Out(Pointer(D3D11_TEXTURE3D_DESC), \"pDesc\")]), ] D3D11_TEXTURECUBE_FACE = Enum(\"D3D11_TEXTURECUBE_FACE\", [ \"D3D11_TEXTURECUBE_FACE_POSITIVE_X\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_X\", \"D3D11_TEXTURECUBE_FACE_POSITIVE_Y\",", "DWORD, DWORD, DWORD, DWORD], internal=True), ]) d3d11.addInterfaces([ IDXGIAdapter1, IDXGIDevice1, IDXGIResource,", "] D3D11_TEXTURE2D_DESC = Struct(\"D3D11_TEXTURE2D_DESC\", [ (UINT, \"Width\"), (UINT, \"Height\"), (UINT,", "\"D3D11_FORMAT_SUPPORT_IA_VERTEX_BUFFER\", \"D3D11_FORMAT_SUPPORT_IA_INDEX_BUFFER\", \"D3D11_FORMAT_SUPPORT_SO_BUFFER\", \"D3D11_FORMAT_SUPPORT_TEXTURE1D\", \"D3D11_FORMAT_SUPPORT_TEXTURE2D\", \"D3D11_FORMAT_SUPPORT_TEXTURE3D\", \"D3D11_FORMAT_SUPPORT_TEXTURECUBE\", \"D3D11_FORMAT_SUPPORT_SHADER_LOAD\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_COMPARISON\",", "[(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"PSSetShaderResources\", [(UINT,", "\"SrcSubresource\"), (DXGI_FORMAT, \"Format\")]), StdMethod(Void, \"ExecuteCommandList\", [(ObjPointer(ID3D11CommandList), \"pCommandList\"), (BOOL, \"RestoreContextState\")]), StdMethod(Void,", "\"D3D11_QUERY_OCCLUSION_PREDICATE\", \"D3D11_QUERY_SO_STATISTICS\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE\", \"D3D11_QUERY_SO_STATISTICS_STREAM0\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM0\", \"D3D11_QUERY_SO_STATISTICS_STREAM1\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM1\", \"D3D11_QUERY_SO_STATISTICS_STREAM2\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM2\", \"D3D11_QUERY_SO_STATISTICS_STREAM3\",", "\"IAVertices\"), (UINT64, \"IAPrimitives\"), (UINT64, \"VSInvocations\"), (UINT64, \"GSInvocations\"), (UINT64, \"GSPrimitives\"), (UINT64,", "\"Depth\"), (UINT8, \"Stencil\")]), StdMethod(Void, \"GenerateMips\", [(ObjPointer(ID3D11ShaderResourceView), \"pShaderResourceView\")]), StdMethod(Void, \"SetResourceMinLOD\", [(ObjPointer(ID3D11Resource),", "StdMethod(HRESULT, \"CheckFormatSupport\", [(DXGI_FORMAT, \"Format\"), Out(Pointer(D3D11_FORMAT_SUPPORT), \"pFormatSupport\")]), StdMethod(HRESULT, \"CheckMultisampleQualityLevels\", [(DXGI_FORMAT, \"Format\"),", "]) D3D11_FEATURE, D3D11_FEATURE_DATA = EnumPolymorphic(\"D3D11_FEATURE\", \"Feature\", [ (\"D3D11_FEATURE_THREADING\", Pointer(D3D11_FEATURE_DATA_THREADING)), (\"D3D11_FEATURE_DOUBLES\",", "StdMethod(Void, \"CopySubresourceRegion\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"), (UINT, \"DstX\"), (UINT, \"DstY\"),", "\"D3D11_SRV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE2DMS\", \"D3D11_SRV_DIMENSION_TEXTURE2DMSARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE3D\", \"D3D11_SRV_DIMENSION_TEXTURECUBE\", \"D3D11_SRV_DIMENSION_TEXTURECUBEARRAY\", \"D3D11_SRV_DIMENSION_BUFFEREX\", ]) D3D11_DSV_DIMENSION =", "DWORD], internal=True), StdFunction(HRESULT, \"D3D11CoreCreateLayeredDevice\", [LPCVOID, DWORD, LPCVOID, (REFIID, \"riid\"), Out(Pointer(ObjPointer(Void)),", "to deal # in the Software without restriction, including without", "\"IndexCount\"), (UINT, \"StartIndexLocation\"), (INT, \"BaseVertexLocation\")]), StdMethod(Void, \"Draw\", [(UINT, \"VertexCount\"), (UINT,", "Out(LPSTR, \"szDescription\"), Out(Pointer(UINT), \"pDescriptionLength\")]), StdMethod(HRESULT, \"CheckFeatureSupport\", [(D3D11_FEATURE, \"Feature\"), Out(D3D11_FEATURE_DATA, \"pFeatureSupportData\"),", "[ (UINT64, \"Frequency\"), (BOOL, \"Disjoint\"), ]) D3D11_QUERY_DATA_PIPELINE_STATISTICS = Struct(\"D3D11_QUERY_DATA_PIPELINE_STATISTICS\", [", "(UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2DMS_DSV = Struct(\"D3D11_TEX2DMS_DSV\", [ (UINT,", "StdMethod(Void, \"OMGetRenderTargets\", [(UINT, \"NumViews\"), (Array(ObjPointer(ID3D11RenderTargetView), \"NumViews\"), \"ppRenderTargetViews\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\")]), StdMethod(Void,", "Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\")]), StdMethod(HRESULT, \"CreateGeometryShaderWithStreamOutput\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (Array(Const(D3D11_SO_DECLARATION_ENTRY),", "[(ObjPointer(ID3D11Asynchronous), \"pAsync\")]), StdMethod(Void, \"End\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\")]), StdMethod(HRESULT, \"GetData\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\"),", "\"ArraySize\"), ]) D3D11_TEX3D_UAV = Struct(\"D3D11_TEX3D_UAV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstWSlice\"),", "(UINT, \"TypeId\"), (UINT, \"ConstantBuffer\"), (UINT, \"BaseConstantBufferOffset\"), (UINT, \"BaseTexture\"), (UINT, \"BaseSampler\"),", "\"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"VSSetShader\", [(ObjPointer(ID3D11VertexShader), \"pVertexShader\"),", "(UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"DSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT,", "StdMethod(HRESULT, \"CreateHullShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11HullShader)),", "\"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture1D)), \"ppTexture1D\")]), StdMethod(HRESULT, \"CreateTexture2D\", [(Pointer(Const(D3D11_TEXTURE2D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)),", "\"D3D11_PRIMITIVE_TOPOLOGY_26_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_27_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_28_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_29_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_30_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_31_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_32_CONTROL_POINT_PATCHLIST\", ]) D3D11_PRIMITIVE =", "\"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2DMS_DSV = Struct(\"D3D11_TEX2DMS_DSV\", [ (UINT, \"UnusedField_NothingToDefine\"),", "(Pointer(Const(UINT)), \"pUAVInitialCounts\")]), StdMethod(Void, \"OMSetBlendState\", [(ObjPointer(ID3D11BlendState), \"pBlendState\"), (Array(Const(FLOAT), 4), \"BlendFactor\"), (UINT,", "\"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"CSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"),", "(UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"DSGetShader\", [Out(Pointer(ObjPointer(ID3D11DomainShader)), \"ppDomainShader\"), Out(Array(ObjPointer(ID3D11ClassInstance),", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "\"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"DSGetShaderResources\", [(UINT, \"StartSlot\"),", "\"pPredicateValue\")]), StdMethod(Void, \"GSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]),", "(Array(Const(ObjPointer(ID3D11RenderTargetView)), \"NumViews\"), \"ppRenderTargetViews\"), (ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\")]), StdMethod(Void, \"OMSetRenderTargetsAndUnorderedAccessViews\", [(UINT, \"NumRTVs\"), (Array(Const(ObjPointer(ID3D11RenderTargetView)),", "= Struct(\"D3D11_BOX\", [ (UINT, \"left\"), (UINT, \"top\"), (UINT, \"front\"), (UINT,", "[ (D3D11_COUNTER, \"Counter\"), (UINT, \"MiscFlags\"), ]) D3D11_COUNTER_INFO = Struct(\"D3D11_COUNTER_INFO\", [", "(UINT, \"ArraySize\"), ]) D3D11_TEX2D_SRV = Struct(\"D3D11_TEX2D_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT,", "\"CheckCounter\", [(Pointer(Const(D3D11_COUNTER_DESC)), \"pDesc\"), Out(Pointer(D3D11_COUNTER_TYPE), \"pType\"), Out(Pointer(UINT), \"pActiveCounters\"), Out(LPSTR, \"szName\"), Out(Pointer(UINT),", "\"D3D11_PRIMITIVE_TOPOLOGY_3_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_4_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_5_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_6_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_7_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_8_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_9_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_10_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_11_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_12_CONTROL_POINT_PATCHLIST\",", "\"pData\")]), StdMethod(HRESULT, \"SetPrivateData\", [(REFGUID, \"guid\"), (UINT, \"DataSize\"), (OpaqueBlob(Const(Void), \"DataSize\"), \"pData\")]),", "\"ppRenderTargetViews\"), (ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\")]), StdMethod(Void, \"OMSetRenderTargetsAndUnorderedAccessViews\", [(UINT, \"NumRTVs\"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), \"NumRTVs\"), \"ppRenderTargetViews\"),", "\"pShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"IASetPrimitiveTopology\", [(D3D11_PRIMITIVE_TOPOLOGY, \"Topology\")]),", "(ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11HullShader)), \"ppHullShader\")]), StdMethod(HRESULT, \"CreateDomainShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T,", "(UINT, \"StructureByteStride\"), ]) ID3D11Buffer.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_BUFFER_DESC), \"pDesc\")]),", "\"D3D11_DSV_DIMENSION_TEXTURE2DMSARRAY\", ]) D3D11_RTV_DIMENSION = Enum(\"D3D11_RTV_DIMENSION\", [ \"D3D11_RTV_DIMENSION_UNKNOWN\", \"D3D11_RTV_DIMENSION_BUFFER\", \"D3D11_RTV_DIMENSION_TEXTURE1D\", \"D3D11_RTV_DIMENSION_TEXTURE1DARRAY\",", "\"D3D11_PRIMITIVE_TOPOLOGY_21_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_22_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_23_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_24_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_25_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_26_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_27_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_28_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_29_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_30_CONTROL_POINT_PATCHLIST\",", "\"GetResource\", [Out(Pointer(ObjPointer(ID3D11Resource)), \"ppResource\")]), ] D3D11_BUFFER_SRV = Struct(\"D3D11_BUFFER_SRV\", [ (Union(None, [(UINT,", "\"NumElements\"), (UINT, \"ElementWidth\")]), None), ]) D3D11_BUFFEREX_SRV_FLAG = Flags(UINT, [ \"D3D11_BUFFEREX_SRV_FLAG_RAW\",", "Struct(\"D3D11_TEX2D_UAV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX2D_ARRAY_UAV = Struct(\"D3D11_TEX2D_ARRAY_UAV\", [ (UINT,", "]) D3D11_SAMPLER_DESC = Struct(\"D3D11_SAMPLER_DESC\", [ (D3D11_FILTER, \"Filter\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressU\"), (D3D11_TEXTURE_ADDRESS_MODE,", "StdMethod(HRESULT, \"CreateShaderResourceView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_SHADER_RESOURCE_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11ShaderResourceView)), \"ppSRView\")]), StdMethod(HRESULT, \"CreateUnorderedAccessView\",", "\"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"PSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"),", "D3D11_QUERY_DESC = Struct(\"D3D11_QUERY_DESC\", [ (D3D11_QUERY, \"Query\"), (D3D11_QUERY_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Query.methods", "DWORD, DWORD, DWORD], internal=True), ]) d3d11.addInterfaces([ IDXGIAdapter1, IDXGIDevice1, IDXGIResource, ID3D11Debug,", "StdMethod(UINT, \"GetEvictionPriority\", []), ] D3D11_BUFFER_DESC = Struct(\"D3D11_BUFFER_DESC\", [ (UINT, \"ByteWidth\"),", "\"ppSOTargets\")]), StdMethod(Void, \"RSGetState\", [Out(Pointer(ObjPointer(ID3D11RasterizerState)), \"ppRasterizerState\")]), StdMethod(Void, \"RSGetViewports\", [Out(Pointer(UINT), \"pNumViewports\"), Out(Array(D3D11_VIEWPORT,", "\"D3D11_PRIMITIVE_TRIANGLE_ADJ\", \"D3D11_PRIMITIVE_1_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_2_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_3_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_4_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_5_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_6_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_7_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_8_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_9_CONTROL_POINT_PATCH\",", "(BYTE, \"ComponentCount\"), (BYTE, \"OutputSlot\"), ]) D3D11_VIEWPORT = Struct(\"D3D11_VIEWPORT\", [ (FLOAT,", "(\"D3D11_FEATURE_THREADING\", Pointer(D3D11_FEATURE_DATA_THREADING)), (\"D3D11_FEATURE_DOUBLES\", Pointer(D3D11_FEATURE_DATA_DOUBLES)), (\"D3D11_FEATURE_FORMAT_SUPPORT\", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT)), (\"D3D11_FEATURE_FORMAT_SUPPORT2\", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT2)), (\"D3D11_FEATURE_D3D10_X_HARDWARE_OPTIONS\", Pointer(D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS)),", "+= [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_RENDER_TARGET_VIEW_DESC), \"pDesc\")]), ] D3D11_TEX1D_DSV = Struct(\"D3D11_TEX1D_DSV\",", "\"DriverType\"), (HMODULE, \"Software\"), (D3D11_CREATE_DEVICE_FLAG, \"Flags\"), (Array(Const(D3D_FEATURE_LEVEL), \"FeatureLevels\"), \"pFeatureLevels\"), (UINT, \"FeatureLevels\"),", "D3D11_USAGE = Enum(\"D3D11_USAGE\", [ \"D3D11_USAGE_DEFAULT\", \"D3D11_USAGE_IMMUTABLE\", \"D3D11_USAGE_DYNAMIC\", \"D3D11_USAGE_STAGING\", ]) D3D11_BIND_FLAG", "[ \"D3D11_MAP_READ\", \"D3D11_MAP_WRITE\", \"D3D11_MAP_READ_WRITE\", \"D3D11_MAP_WRITE_DISCARD\", \"D3D11_MAP_WRITE_NO_OVERWRITE\", ]) D3D11_MAP_FLAG = Flags(UINT,", "[(UINT, \"NumRTVs\"), (Array(ObjPointer(ID3D11RenderTargetView), \"NumRTVs\"), \"ppRenderTargetViews\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\"), (UINT, \"UAVStartSlot\"), (UINT,", "\"D3D11_BLEND_INV_DEST_ALPHA\", \"D3D11_BLEND_DEST_COLOR\", \"D3D11_BLEND_INV_DEST_COLOR\", \"D3D11_BLEND_SRC_ALPHA_SAT\", \"D3D11_BLEND_BLEND_FACTOR\", \"D3D11_BLEND_INV_BLEND_FACTOR\", \"D3D11_BLEND_SRC1_COLOR\", \"D3D11_BLEND_INV_SRC1_COLOR\", \"D3D11_BLEND_SRC1_ALPHA\", \"D3D11_BLEND_INV_SRC1_ALPHA\",", "(D3D11_BUFFER_SRV, \"Buffer\"), (D3D11_TEX1D_SRV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_SRV, \"Texture1DArray\"), (D3D11_TEX2D_SRV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_SRV, \"Texture2DArray\"),", "(DXGI_FORMAT, \"InFormat\"), (D3D11_FORMAT_SUPPORT2, \"OutFormatSupport2\"), ]) D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS = Struct(\"D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS\", [ (BOOL,", "[ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2DMS_DSV =", "\"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_EXCHANGE\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_SIGNED_MIN_OR_MAX\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_UNSIGNED_MIN_OR_MAX\", \"D3D11_FORMAT_SUPPORT2_UAV_TYPED_LOAD\", \"D3D11_FORMAT_SUPPORT2_UAV_TYPED_STORE\", ]) ID3D11Asynchronous.methods += [ StdMethod(UINT,", "D3D11_COUNTER_TYPE = Enum(\"D3D11_COUNTER_TYPE\", [ \"D3D11_COUNTER_TYPE_FLOAT32\", \"D3D11_COUNTER_TYPE_UINT16\", \"D3D11_COUNTER_TYPE_UINT32\", \"D3D11_COUNTER_TYPE_UINT64\", ]) D3D11_COUNTER_DESC", "(UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"Begin\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\")]), StdMethod(Void,", "ID3D11Resource) ID3D11Texture3D = Interface(\"ID3D11Texture3D\", ID3D11Resource) ID3D11View = Interface(\"ID3D11View\", ID3D11DeviceChild) ID3D11ShaderResourceView", "\"ppDevice\"), Out(Pointer(D3D_FEATURE_LEVEL), \"pFeatureLevel\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), StdFunction(HRESULT, \"D3D11CreateDeviceAndSwapChain\", [(ObjPointer(IDXGIAdapter), \"pAdapter\"), (D3D_DRIVER_TYPE,", "\"GetDesc\", [Out(Pointer(D3D11_COUNTER_DESC), \"pDesc\")]), ] D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS = Enum(\"D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS\", [ \"D3D11_STANDARD_MULTISAMPLE_PATTERN\", \"D3D11_CENTER_MULTISAMPLE_PATTERN\",", "(Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"Begin\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\")]), StdMethod(Void, \"End\", [(ObjPointer(ID3D11Asynchronous),", "None), ]) ID3D11ShaderResourceView.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_SHADER_RESOURCE_VIEW_DESC), \"pDesc\")]), ]", "\"InstanceCount\"), (UINT, \"StartVertexLocation\"), (UINT, \"StartInstanceLocation\")]), StdMethod(Void, \"GSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT,", "[(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"DSGetConstantBuffers\", [(UINT,", "StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_UNORDERED_ACCESS_VIEW_DESC), \"pDesc\")]), ] D3D11_FILTER = Enum(\"D3D11_FILTER\", [ \"D3D11_FILTER_MIN_MAG_MIP_POINT\",", "[ StdMethod(Void, \"GetDevice\", [Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\")]), StdMethod(HRESULT, \"GetPrivateData\", [(REFGUID, \"guid\"), Out(Pointer(UINT),", "Out(Pointer(UINT), \"pStrides\"), Out(Pointer(UINT), \"pOffsets\")]), StdMethod(Void, \"IAGetIndexBuffer\", [Out(Pointer(ObjPointer(ID3D11Buffer)), \"pIndexBuffer\"), Out(Pointer(DXGI_FORMAT), \"Format\"),", "(D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture2D.methods", "Enum(\"D3D11_FILTER_TYPE\", [ \"D3D11_FILTER_TYPE_POINT\", \"D3D11_FILTER_TYPE_LINEAR\", ]) D3D11_TEXTURE_ADDRESS_MODE = Enum(\"D3D11_TEXTURE_ADDRESS_MODE\", [ \"D3D11_TEXTURE_ADDRESS_WRAP\",", "[ (D3D11_QUERY, \"Query\"), (D3D11_QUERY_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Query.methods += [ StdMethod(Void,", "\"ppShaderResourceViews\")]), StdMethod(Void, \"CSGetUnorderedAccessViews\", [(UINT, \"StartSlot\"), (UINT, \"NumUAVs\"), (Array(ObjPointer(ID3D11UnorderedAccessView), \"NumUAVs\"), \"ppUnorderedAccessViews\")]),", "(Array(Const(D3D11_VIEWPORT), \"NumViewports\"), \"pViewports\")]), StdMethod(Void, \"RSSetScissorRects\", [(UINT, \"NumRects\"), (Array(Const(D3D11_RECT), \"NumRects\"), \"pRects\")]),", "(UINT, \"SDKVersion\"), Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\"), Out(Pointer(D3D_FEATURE_LEVEL), \"pFeatureLevel\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), StdFunction(HRESULT, \"D3D11CreateDeviceAndSwapChain\",", "(OpaquePointer(Const(Void)), \"pSrcData\"), (UINT, \"SrcRowPitch\"), (UINT, \"SrcDepthPitch\")]), StdMethod(Void, \"CopyStructureCount\", [(ObjPointer(ID3D11Buffer), \"pDstBuffer\"),", "StdMethod(Void, \"IAGetPrimitiveTopology\", [Out(Pointer(D3D11_PRIMITIVE_TOPOLOGY), \"pTopology\")]), StdMethod(Void, \"VSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"),", "]) ID3D11View.methods += [ StdMethod(Void, \"GetResource\", [Out(Pointer(ObjPointer(ID3D11Resource)), \"ppResource\")]), ] D3D11_BUFFER_SRV", "Interface(\"ID3D11SamplerState\", ID3D11DeviceChild) ID3D11Asynchronous = Interface(\"ID3D11Asynchronous\", ID3D11DeviceChild) ID3D11Query = Interface(\"ID3D11Query\", ID3D11Asynchronous)", "= Enum(\"D3D11_RESOURCE_DIMENSION\", [ \"D3D11_RESOURCE_DIMENSION_UNKNOWN\", \"D3D11_RESOURCE_DIMENSION_BUFFER\", \"D3D11_RESOURCE_DIMENSION_TEXTURE1D\", \"D3D11_RESOURCE_DIMENSION_TEXTURE2D\", \"D3D11_RESOURCE_DIMENSION_TEXTURE3D\", ]) D3D11_SRV_DIMENSION", "\"D3D11_FORMAT_SUPPORT2_UAV_TYPED_LOAD\", \"D3D11_FORMAT_SUPPORT2_UAV_TYPED_STORE\", ]) ID3D11Asynchronous.methods += [ StdMethod(UINT, \"GetDataSize\", []), ]", "\"D3D11_FORMAT_SUPPORT_CAST_WITHIN_BIT_LAYOUT\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RENDERTARGET\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_LOAD\", \"D3D11_FORMAT_SUPPORT_SHADER_GATHER\", \"D3D11_FORMAT_SUPPORT_BACK_BUFFER_CAST\", \"D3D11_FORMAT_SUPPORT_TYPED_UNORDERED_ACCESS_VIEW\", \"D3D11_FORMAT_SUPPORT_SHADER_GATHER_COMPARISON\", ]) D3D11_FORMAT_SUPPORT2 =", "\"MipLevels\"), ]) D3D11_TEX1D_ARRAY_SRV = Struct(\"D3D11_TEX1D_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"),", "= Struct(\"D3D11_TEX2D_DSV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX2D_ARRAY_DSV = Struct(\"D3D11_TEX2D_ARRAY_DSV\", [", "\"Format\"), (UINT, \"Offset\")]), StdMethod(Void, \"DrawIndexedInstanced\", [(UINT, \"IndexCountPerInstance\"), (UINT, \"InstanceCount\"), (UINT,", "\"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Buffer)), \"ppBuffer\")]), StdMethod(HRESULT, \"CreateTexture1D\", [(Pointer(Const(D3D11_TEXTURE1D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)),", "\"D3D11_CPU_ACCESS_WRITE\", \"D3D11_CPU_ACCESS_READ\", ]) D3D11_RESOURCE_MISC_FLAG = Flags(UINT, [ \"D3D11_RESOURCE_MISC_GENERATE_MIPS\", \"D3D11_RESOURCE_MISC_SHARED\", \"D3D11_RESOURCE_MISC_TEXTURECUBE\",", "(UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"VSSetSamplers\", [(UINT, \"StartSlot\"), (UINT,", "+= [ StdMethod(HRESULT, \"GetClassInstance\", [(LPCSTR, \"pClassInstanceName\"), (UINT, \"InstanceIndex\"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), \"ppInstance\")]),", "(D3D11_TEX2D_SRV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_SRV, \"Texture2DArray\"), (D3D11_TEX2DMS_SRV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_SRV, \"Texture2DMSArray\"), (D3D11_TEX3D_SRV, \"Texture3D\"),", "\"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_RTV = Struct(\"D3D11_TEX2D_RTV\", [", "Enum(\"D3D11_COLOR_WRITE_ENABLE\", [ \"D3D11_COLOR_WRITE_ENABLE_ALL\", \"D3D11_COLOR_WRITE_ENABLE_RED\", \"D3D11_COLOR_WRITE_ENABLE_GREEN\", \"D3D11_COLOR_WRITE_ENABLE_BLUE\", \"D3D11_COLOR_WRITE_ENABLE_ALPHA\", ]) D3D11_RENDER_TARGET_BLEND_DESC =", "\"EvictionPriority\")]), StdMethod(UINT, \"GetEvictionPriority\", []), ] D3D11_BUFFER_DESC = Struct(\"D3D11_BUFFER_DESC\", [ (UINT,", "\"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2DMS_DSV = Struct(\"D3D11_TEX2DMS_DSV\", [", "StdMethod(HRESULT, \"CheckCounter\", [(Pointer(Const(D3D11_COUNTER_DESC)), \"pDesc\"), Out(Pointer(D3D11_COUNTER_TYPE), \"pType\"), Out(Pointer(UINT), \"pActiveCounters\"), Out(LPSTR, \"szName\"),", "(D3D11_TEX2D_RTV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_RTV, \"Texture2DArray\"), (D3D11_TEX2DMS_RTV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_RTV, \"Texture2DMSArray\"), (D3D11_TEX3D_RTV, \"Texture3D\"),", "\"pOffsets\")]), StdMethod(Void, \"DrawAuto\", []), StdMethod(Void, \"DrawIndexedInstancedIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]),", "(D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET, \"AlignedByteOffset\"), (D3D11_INPUT_CLASSIFICATION, \"InputSlotClass\"), (UINT, \"InstanceDataStepRate\"), ]) D3D11_FILL_MODE = Enum(\"D3D11_FILL_MODE\",", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "\"D3D11_BLEND_OP_SUBTRACT\", \"D3D11_BLEND_OP_REV_SUBTRACT\", \"D3D11_BLEND_OP_MIN\", \"D3D11_BLEND_OP_MAX\", ]) D3D11_COLOR_WRITE_ENABLE = Enum(\"D3D11_COLOR_WRITE_ENABLE\", [ \"D3D11_COLOR_WRITE_ENABLE_ALL\",", "Struct(\"D3D11_TEX1D_UAV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_UAV = Struct(\"D3D11_TEX1D_ARRAY_UAV\", [ (UINT,", "[(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"CSGetShaderResources\", [(UINT,", "\"pDepthStencilDesc\"), Out(Pointer(ObjPointer(ID3D11DepthStencilState)), \"ppDepthStencilState\")]), StdMethod(HRESULT, \"CreateRasterizerState\", [(Pointer(Const(D3D11_RASTERIZER_DESC)), \"pRasterizerDesc\"), Out(Pointer(ObjPointer(ID3D11RasterizerState)), \"ppRasterizerState\")]), StdMethod(HRESULT,", "\"Texture2DArray\"), (D3D11_TEX2DMS_RTV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_RTV, \"Texture2DMSArray\"), (D3D11_TEX3D_RTV, \"Texture3D\"), ]), None), ])", "[ (D3D11_COUNTER, \"LastDeviceDependentCounter\"), (UINT, \"NumSimultaneousCounters\"), (UINT8, \"NumDetectableParallelUnits\"), ]) ID3D11Counter.methods +=", "\"DSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"CSGetShaderResources\",", "\"D3D11_BLEND_ZERO\", \"D3D11_BLEND_ONE\", \"D3D11_BLEND_SRC_COLOR\", \"D3D11_BLEND_INV_SRC_COLOR\", \"D3D11_BLEND_SRC_ALPHA\", \"D3D11_BLEND_INV_SRC_ALPHA\", \"D3D11_BLEND_DEST_ALPHA\", \"D3D11_BLEND_INV_DEST_ALPHA\", \"D3D11_BLEND_DEST_COLOR\", \"D3D11_BLEND_INV_DEST_COLOR\",", "\"ppConstantBuffers\")]), StdMethod(Void, \"GSGetShader\", [Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]),", "Enum(\"D3D11_FILL_MODE\", [ \"D3D11_FILL_WIREFRAME\", \"D3D11_FILL_SOLID\", ]) D3D11_PRIMITIVE_TOPOLOGY = Enum(\"D3D11_PRIMITIVE_TOPOLOGY\", [ \"D3D11_PRIMITIVE_TOPOLOGY_UNDEFINED\",", "\"NumUAVs\"), \"ppUnorderedAccessViews\"), (Pointer(Const(UINT)), \"pUAVInitialCounts\")]), StdMethod(Void, \"OMSetBlendState\", [(ObjPointer(ID3D11BlendState), \"pBlendState\"), (Array(Const(FLOAT), 4),", "\"AddressU\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressV\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressW\"), (FLOAT, \"MipLODBias\"), (UINT, \"MaxAnisotropy\"), (D3D11_COMPARISON_FUNC,", "[]), StdMethod(D3D11_DEVICE_CONTEXT_TYPE, \"GetType\", []), StdMethod(UINT, \"GetContextFlags\", []), StdMethod(HRESULT, \"FinishCommandList\", [(BOOL,", "\"DSSetShader\", [(ObjPointer(ID3D11DomainShader), \"pDomainShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"DSSetSamplers\",", "\"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"VSSetShader\", [(ObjPointer(ID3D11VertexShader), \"pVertexShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT,", "or substantial portions of the Software. # # THE SOFTWARE", "\"Texture3D\"), (D3D11_TEXCUBE_SRV, \"TextureCube\"), (D3D11_TEXCUBE_ARRAY_SRV, \"TextureCubeArray\"), (D3D11_BUFFEREX_SRV, \"BufferEx\"), ]), None), ])", "\"ppRasterizerState\")]), StdMethod(HRESULT, \"CreateSamplerState\", [(Pointer(Const(D3D11_SAMPLER_DESC)), \"pSamplerDesc\"), Out(Pointer(ObjPointer(ID3D11SamplerState)), \"ppSamplerState\")]), StdMethod(HRESULT, \"CreateQuery\", [(Pointer(Const(D3D11_QUERY_DESC)),", "\"riid\"), Out(Pointer(ObjPointer(Void)), \"ppvObj\")], internal=True), StdFunction(HRESULT, \"D3D11CoreCreateDevice\", [DWORD, DWORD, DWORD, DWORD,", "Out(Pointer(ObjPointer(ID3D11UnorderedAccessView)), \"ppUAView\")]), StdMethod(HRESULT, \"CreateRenderTargetView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_RENDER_TARGET_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11RenderTargetView)), \"ppRTView\")]),", "(Pointer(Const(DXGI_SWAP_CHAIN_DESC)), \"pSwapChainDesc\"), Out(Pointer(ObjPointer(IDXGISwapChain)), \"ppSwapChain\"), Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\"), Out(Pointer(D3D_FEATURE_LEVEL), \"pFeatureLevel\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]),", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "(D3D11_TEXTURE_ADDRESS_MODE, \"AddressW\"), (FLOAT, \"MipLODBias\"), (UINT, \"MaxAnisotropy\"), (D3D11_COMPARISON_FUNC, \"ComparisonFunc\"), (Array(FLOAT, 4),", "\"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\")]), StdMethod(HRESULT, \"CreatePixelShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"),", "(UINT, \"DataSize\"), (D3D11_ASYNC_GETDATA_FLAG, \"GetDataFlags\")]), StdMethod(Void, \"SetPredication\", [(ObjPointer(ID3D11Predicate), \"pPredicate\"), (BOOL, \"PredicateValue\")]),", "(\"D3D11_FEATURE_FORMAT_SUPPORT\", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT)), (\"D3D11_FEATURE_FORMAT_SUPPORT2\", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT2)), (\"D3D11_FEATURE_D3D10_X_HARDWARE_OPTIONS\", Pointer(D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS)), ], Blob(Void, \"FeatureSupportDataSize\"), False)", "+= [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_BUFFER_DESC), \"pDesc\")]), ] D3D11_TEXTURE1D_DESC = Struct(\"D3D11_TEXTURE1D_DESC\",", "[ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE1D_DESC), \"pDesc\")]), ] D3D11_TEXTURE2D_DESC = Struct(\"D3D11_TEXTURE2D_DESC\", [", "Reserved. # # Permission is hereby granted, free of charge,", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "\"ConstantBuffer\"), (UINT, \"BaseConstantBufferOffset\"), (UINT, \"BaseTexture\"), (UINT, \"BaseSampler\"), (BOOL, \"Created\"), ])", "[(UINT, \"NumRects\"), (Array(Const(D3D11_RECT), \"NumRects\"), \"pRects\")]), StdMethod(Void, \"CopySubresourceRegion\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT,", "(UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_SHADER_RESOURCE_VIEW_DESC = Struct(\"D3D11_SHADER_RESOURCE_VIEW_DESC\", [ (DXGI_FORMAT,", "(UINT, \"StartIndexLocation\"), (INT, \"BaseVertexLocation\")]), StdMethod(Void, \"Draw\", [(UINT, \"VertexCount\"), (UINT, \"StartVertexLocation\")]),", "\"NumUAVs\"), \"ppUnorderedAccessViews\")]), StdMethod(Void, \"CSGetShader\", [Out(Pointer(ObjPointer(ID3D11ComputeShader)), \"ppComputeShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT),", "\"D3D11_RESOURCE_MISC_DRAWINDIRECT_ARGS\", \"D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS\", \"D3D11_RESOURCE_MISC_BUFFER_STRUCTURED\", \"D3D11_RESOURCE_MISC_RESOURCE_CLAMP\", \"D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX\", \"D3D11_RESOURCE_MISC_GDI_COMPATIBLE\", ]) D3D11_MAP = Enum(\"D3D11_MAP\",", "Struct(\"D3D11_TEX2DMS_ARRAY_DSV\", [ (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_DSV_FLAG = Flags(UINT,", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "D3D11_FEATURE_DATA_FORMAT_SUPPORT2 = Struct(\"D3D11_FEATURE_DATA_FORMAT_SUPPORT2\", [ (DXGI_FORMAT, \"InFormat\"), (D3D11_FORMAT_SUPPORT2, \"OutFormatSupport2\"), ]) D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS", "= Enum(\"D3D11_FILL_MODE\", [ \"D3D11_FILL_WIREFRAME\", \"D3D11_FILL_SOLID\", ]) D3D11_PRIMITIVE_TOPOLOGY = Enum(\"D3D11_PRIMITIVE_TOPOLOGY\", [", "\"D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS\", \"D3D11_RESOURCE_MISC_BUFFER_STRUCTURED\", \"D3D11_RESOURCE_MISC_RESOURCE_CLAMP\", \"D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX\", \"D3D11_RESOURCE_MISC_GDI_COMPATIBLE\", ]) D3D11_MAP = Enum(\"D3D11_MAP\", [", "\"D3D11_PRIMITIVE_TOPOLOGY_2_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_3_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_4_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_5_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_6_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_7_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_8_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_9_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_10_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_11_CONTROL_POINT_PATCHLIST\",", "\"DestBlendAlpha\"), (D3D11_BLEND_OP, \"BlendOpAlpha\"), (UINT8, \"RenderTargetWriteMask\"), ]) D3D11_BLEND_DESC = Struct(\"D3D11_BLEND_DESC\", [", "= Flags(UINT, [ \"D3D11_BUFFEREX_SRV_FLAG_RAW\", ]) D3D11_BUFFEREX_SRV = Struct(\"D3D11_BUFFEREX_SRV\", [ (UINT,", "(D3D11_BUFFEREX_SRV_FLAG, \"Flags\"), ]) D3D11_TEX1D_SRV = Struct(\"D3D11_TEX1D_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT,", "[Out(Pointer(D3D11_SHADER_RESOURCE_VIEW_DESC), \"pDesc\")]), ] D3D11_BUFFER_RTV = Struct(\"D3D11_BUFFER_RTV\", [ (Union(None, [(UINT, \"FirstElement\"),", "= Interface(\"ID3D11Texture3D\", ID3D11Resource) ID3D11View = Interface(\"ID3D11View\", ID3D11DeviceChild) ID3D11ShaderResourceView = Interface(\"ID3D11ShaderResourceView\",", "\"D3D11_COUNTER_DEVICE_DEPENDENT_0\", ]) D3D11_COUNTER_TYPE = Enum(\"D3D11_COUNTER_TYPE\", [ \"D3D11_COUNTER_TYPE_FLOAT32\", \"D3D11_COUNTER_TYPE_UINT16\", \"D3D11_COUNTER_TYPE_UINT32\", \"D3D11_COUNTER_TYPE_UINT64\",", "StdMethod(Void, \"GetTypeName\", [Out(LPSTR, \"pTypeName\"), Out(Pointer(SIZE_T), \"pBufferLength\")]), ] ID3D11ClassLinkage.methods += [", "= Interface(\"ID3D11DomainShader\", ID3D11DeviceChild) ID3D11GeometryShader = Interface(\"ID3D11GeometryShader\", ID3D11DeviceChild) ID3D11PixelShader = Interface(\"ID3D11PixelShader\",", "\"ppHullShader\")]), StdMethod(HRESULT, \"CreateDomainShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"),", "[(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11DomainShader)), \"ppDomainShader\")]), StdMethod(HRESULT,", "[(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppVertexBuffers\"), Out(Pointer(UINT), \"pStrides\"), Out(Pointer(UINT),", "]) ID3D11Asynchronous.methods += [ StdMethod(UINT, \"GetDataSize\", []), ] D3D11_ASYNC_GETDATA_FLAG =", "\"GetPrivateData\", [(REFGUID, \"guid\"), Out(Pointer(UINT), \"pDataSize\"), Out(OpaquePointer(Void), \"pData\")]), StdMethod(HRESULT, \"SetPrivateData\", [(REFGUID,", "+= [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_SAMPLER_DESC), \"pDesc\")]), ] D3D11_FORMAT_SUPPORT = Flags(UINT,", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "\"StartIndexLocation\"), (INT, \"BaseVertexLocation\")]), StdMethod(Void, \"Draw\", [(UINT, \"VertexCount\"), (UINT, \"StartVertexLocation\")]), StdMethod(HRESULT,", "\"ThreadGroupCountZ\")]), StdMethod(Void, \"DispatchIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"RSSetState\", [(ObjPointer(ID3D11RasterizerState),", "Interface(\"ID3D11Predicate\", ID3D11Query) ID3D11Counter = Interface(\"ID3D11Counter\", ID3D11Asynchronous) ID3D11ClassInstance = Interface(\"ID3D11ClassInstance\", ID3D11DeviceChild)", "\"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"GSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"),", "HRESULT = MAKE_HRESULT([ \"D3D11_ERROR_FILE_NOT_FOUND\", \"D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS\", \"D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS\", \"D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD\", \"D3DERR_INVALIDCALL\", \"D3DERR_WASSTILLDRAWING\", ])", "so, subject to the following conditions: # # The above", "\"CheckCounterInfo\", [Out(Pointer(D3D11_COUNTER_INFO), \"pCounterInfo\")]), StdMethod(HRESULT, \"CheckCounter\", [(Pointer(Const(D3D11_COUNTER_DESC)), \"pDesc\"), Out(Pointer(D3D11_COUNTER_TYPE), \"pType\"), Out(Pointer(UINT),", "\"D3D11_RTV_DIMENSION_TEXTURE2DMSARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE3D\", ]) D3D11_UAV_DIMENSION = Enum(\"D3D11_UAV_DIMENSION\", [ \"D3D11_UAV_DIMENSION_UNKNOWN\", \"D3D11_UAV_DIMENSION_BUFFER\", \"D3D11_UAV_DIMENSION_TEXTURE1D\",", "import * HRESULT = MAKE_HRESULT([ \"D3D11_ERROR_FILE_NOT_FOUND\", \"D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS\", \"D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS\", \"D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD\", \"D3DERR_INVALIDCALL\",", "(UINT64, \"Frequency\"), (BOOL, \"Disjoint\"), ]) D3D11_QUERY_DATA_PIPELINE_STATISTICS = Struct(\"D3D11_QUERY_DATA_PIPELINE_STATISTICS\", [ (UINT64,", "+= [ StdMethod(Void, \"VSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"),", "= API(\"d3d11\") d3d11.addFunctions([ StdFunction(HRESULT, \"D3D11CreateDevice\", [(ObjPointer(IDXGIAdapter), \"pAdapter\"), (D3D_DRIVER_TYPE, \"DriverType\"), (HMODULE,", "\"Texture2D\"), (D3D11_TEX2D_ARRAY_SRV, \"Texture2DArray\"), (D3D11_TEX2DMS_SRV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_SRV, \"Texture2DMSArray\"), (D3D11_TEX3D_SRV, \"Texture3D\"), (D3D11_TEXCUBE_SRV,", "(UINT, \"RasterizedStream\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\")]), StdMethod(HRESULT, \"CreatePixelShader\", [(Blob(Const(Void), \"BytecodeLength\"),", "\"DepthEnable\"), (D3D11_DEPTH_WRITE_MASK, \"DepthWriteMask\"), (D3D11_COMPARISON_FUNC, \"DepthFunc\"), (BOOL, \"StencilEnable\"), (UINT8, \"StencilReadMask\"), (UINT8,", "\"OpenSharedResource\", [(HANDLE, \"hResource\"), (REFIID, \"ReturnedInterface\"), Out(Pointer(ObjPointer(Void)), \"ppResource\")]), StdMethod(HRESULT, \"CheckFormatSupport\", [(DXGI_FORMAT,", "(UINT, \"MipSlice\"), ]) D3D11_TEX2DMS_RTV = Struct(\"D3D11_TEX2DMS_RTV\", [ (UINT, \"UnusedField_NothingToDefine\"), ])", "\"pUAVInitialCounts\")]), StdMethod(Void, \"OMSetBlendState\", [(ObjPointer(ID3D11BlendState), \"pBlendState\"), (Array(Const(FLOAT), 4), \"BlendFactor\"), (UINT, \"SampleMask\")]),", "\"D3D11CreateDevice\", [(ObjPointer(IDXGIAdapter), \"pAdapter\"), (D3D_DRIVER_TYPE, \"DriverType\"), (HMODULE, \"Software\"), (D3D11_CREATE_DEVICE_FLAG, \"Flags\"), (Array(Const(D3D_FEATURE_LEVEL),", "\"D3D11_QUERY_PIPELINE_STATISTICS\", \"D3D11_QUERY_OCCLUSION_PREDICATE\", \"D3D11_QUERY_SO_STATISTICS\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE\", \"D3D11_QUERY_SO_STATISTICS_STREAM0\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM0\", \"D3D11_QUERY_SO_STATISTICS_STREAM1\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM1\", \"D3D11_QUERY_SO_STATISTICS_STREAM2\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM2\",", "\"ppDepthStencilState\"), Out(Pointer(UINT), \"pStencilRef\")]), StdMethod(Void, \"SOGetTargets\", [(UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppSOTargets\")]),", "\"pNumClassInstances\")]), StdMethod(Void, \"PSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]),", "StdMethod(HRESULT, \"SetExceptionMode\", [(D3D11_RAISE_FLAG, \"RaiseFlags\")]), StdMethod(UINT, \"GetExceptionMode\", []), ] d3d11 =", "\"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"CSGetUnorderedAccessViews\", [(UINT, \"StartSlot\"), (UINT, \"NumUAVs\"), (Array(ObjPointer(ID3D11UnorderedAccessView), \"NumUAVs\"),", "\"pNumRects\"), Out(Array(D3D11_RECT, \"*pNumRects\"), \"pRects\")]), StdMethod(Void, \"HSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"),", "\"D3D11_BLEND_INV_DEST_COLOR\", \"D3D11_BLEND_SRC_ALPHA_SAT\", \"D3D11_BLEND_BLEND_FACTOR\", \"D3D11_BLEND_INV_BLEND_FACTOR\", \"D3D11_BLEND_SRC1_COLOR\", \"D3D11_BLEND_INV_SRC1_COLOR\", \"D3D11_BLEND_SRC1_ALPHA\", \"D3D11_BLEND_INV_SRC1_ALPHA\", ]) D3D11_BLEND_OP", "[]), ] D3D11_BUFFER_DESC = Struct(\"D3D11_BUFFER_DESC\", [ (UINT, \"ByteWidth\"), (D3D11_USAGE, \"Usage\"),", "from d3d11sdklayers import * HRESULT = MAKE_HRESULT([ \"D3D11_ERROR_FILE_NOT_FOUND\", \"D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS\", \"D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS\",", "\"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"VSGetShader\", [Out(Pointer(ObjPointer(ID3D11VertexShader)), \"ppVertexShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"),", "the following conditions: # # The above copyright notice and", "FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE", "[ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX3D_UAV =", "(D3D11_UAV_DIMENSION, \"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_UAV, \"Buffer\"), (D3D11_TEX1D_UAV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_UAV, \"Texture1DArray\"),", "\"pRasterizerDesc\"), Out(Pointer(ObjPointer(ID3D11RasterizerState)), \"ppRasterizerState\")]), StdMethod(HRESULT, \"CreateSamplerState\", [(Pointer(Const(D3D11_SAMPLER_DESC)), \"pSamplerDesc\"), Out(Pointer(ObjPointer(ID3D11SamplerState)), \"ppSamplerState\")]), StdMethod(HRESULT,", "= Enum(\"D3D11_STENCIL_OP\", [ \"D3D11_STENCIL_OP_KEEP\", \"D3D11_STENCIL_OP_ZERO\", \"D3D11_STENCIL_OP_REPLACE\", \"D3D11_STENCIL_OP_INCR_SAT\", \"D3D11_STENCIL_OP_DECR_SAT\", \"D3D11_STENCIL_OP_INVERT\", \"D3D11_STENCIL_OP_INCR\",", "= Struct(\"D3D11_TEX1D_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), (UINT, \"FirstArraySlice\"), (UINT,", "\"pHullShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"HSSetSamplers\", [(UINT, \"StartSlot\"),", "\"pData\")]), StdMethod(D3D_FEATURE_LEVEL, \"GetFeatureLevel\", []), StdMethod(D3D11_CREATE_DEVICE_FLAG, \"GetCreationFlags\", []), StdMethod(HRESULT, \"GetDeviceRemovedReason\", []),", "[LPCVOID, DWORD, LPCVOID, (REFIID, \"riid\"), Out(Pointer(ObjPointer(Void)), \"ppvObj\")], internal=True), StdFunction(HRESULT, \"D3D11CoreCreateDevice\",", "[ (UINT, \"Stream\"), (LPCSTR, \"SemanticName\"), (UINT, \"SemanticIndex\"), (BYTE, \"StartComponent\"), (BYTE,", "StdMethod(Void, \"CopyStructureCount\", [(ObjPointer(ID3D11Buffer), \"pDstBuffer\"), (UINT, \"DstAlignedByteOffset\"), (ObjPointer(ID3D11UnorderedAccessView), \"pSrcView\")]), StdMethod(Void, \"ClearRenderTargetView\",", "\"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_SRV, \"Buffer\"), (D3D11_TEX1D_SRV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_SRV, \"Texture1DArray\"), (D3D11_TEX2D_SRV,", "\"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), (UINT, \"StructureByteStride\"), ]) ID3D11Buffer.methods +=", "(D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture2D.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE2D_DESC), \"pDesc\")]),", "\"FirstElement\"), (UINT, \"ElementOffset\")]), None), (Union(None, [(UINT, \"NumElements\"), (UINT, \"ElementWidth\")]), None),", "StdMethod(Void, \"OMSetBlendState\", [(ObjPointer(ID3D11BlendState), \"pBlendState\"), (Array(Const(FLOAT), 4), \"BlendFactor\"), (UINT, \"SampleMask\")]), StdMethod(Void,", "\"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"GSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"),", "(Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture1D)), \"ppTexture1D\")]), StdMethod(HRESULT, \"CreateTexture2D\", [(Pointer(Const(D3D11_TEXTURE2D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"),", "\"ppGeometryShader\")]), StdMethod(HRESULT, \"CreateGeometryShaderWithStreamOutput\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (Array(Const(D3D11_SO_DECLARATION_ENTRY), \"NumEntries\"),", "\"pDstResource\"), (UINT, \"DstSubresource\"), (UINT, \"DstX\"), (UINT, \"DstY\"), (UINT, \"DstZ\"), (ObjPointer(ID3D11Resource),", "] D3D11_TEXTURE1D_DESC = Struct(\"D3D11_TEXTURE1D_DESC\", [ (UINT, \"Width\"), (UINT, \"MipLevels\"), (UINT,", "(UINT, \"NumClassInstances\")]), StdMethod(Void, \"HSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"),", "+= [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_DEPTH_STENCIL_VIEW_DESC), \"pDesc\")]), ] D3D11_BUFFER_UAV_FLAG = Flags(UINT,", "ID3D11UnorderedAccessView.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_UNORDERED_ACCESS_VIEW_DESC), \"pDesc\")]), ] D3D11_FILTER =", "= Interface(\"ID3D11Device\", IUnknown) D3D11_INPUT_CLASSIFICATION = Enum(\"D3D11_INPUT_CLASSIFICATION\", [ \"D3D11_INPUT_PER_VERTEX_DATA\", \"D3D11_INPUT_PER_INSTANCE_DATA\", ])", "in # all copies or substantial portions of the Software.", "Enum(\"D3D11_COUNTER_TYPE\", [ \"D3D11_COUNTER_TYPE_FLOAT32\", \"D3D11_COUNTER_TYPE_UINT16\", \"D3D11_COUNTER_TYPE_UINT32\", \"D3D11_COUNTER_TYPE_UINT64\", ]) D3D11_COUNTER_DESC = Struct(\"D3D11_COUNTER_DESC\",", "\"GetDesc\", [Out(Pointer(D3D11_BUFFER_DESC), \"pDesc\")]), ] D3D11_TEXTURE1D_DESC = Struct(\"D3D11_TEXTURE1D_DESC\", [ (UINT, \"Width\"),", "\"StencilWriteMask\"), (D3D11_DEPTH_STENCILOP_DESC, \"FrontFace\"), (D3D11_DEPTH_STENCILOP_DESC, \"BackFace\"), ]) ID3D11DepthStencilState.methods += [ StdMethod(Void,", "]) D3D11_MAP_FLAG = Flags(UINT, [ \"D3D11_MAP_FLAG_DO_NOT_WAIT\", ]) D3D11_RAISE_FLAG = Flags(UINT,", "\"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"GetPredication\", [Out(Pointer(ObjPointer(ID3D11Predicate)), \"ppPredicate\"),", "]) D3D11_QUERY = Enum(\"D3D11_QUERY\", [ \"D3D11_QUERY_EVENT\", \"D3D11_QUERY_OCCLUSION\", \"D3D11_QUERY_TIMESTAMP\", \"D3D11_QUERY_TIMESTAMP_DISJOINT\", \"D3D11_QUERY_PIPELINE_STATISTICS\",", "[ (UINT, \"ByteWidth\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG,", "\"pDstResource\"), (UINT, \"DstSubresource\"), (ObjPointer(ID3D11Resource), \"pSrcResource\"), (UINT, \"SrcSubresource\"), (DXGI_FORMAT, \"Format\")]), StdMethod(Void,", "(UINT, \"ElementWidth\")]), None), ]) D3D11_BUFFEREX_SRV_FLAG = Flags(UINT, [ \"D3D11_BUFFEREX_SRV_FLAG_RAW\", ])", "Struct(\"D3D11_RASTERIZER_DESC\", [ (D3D11_FILL_MODE, \"FillMode\"), (D3D11_CULL_MODE, \"CullMode\"), (BOOL, \"FrontCounterClockwise\"), (INT, \"DepthBias\"),", "(D3D11_SRV_DIMENSION, \"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_SRV, \"Buffer\"), (D3D11_TEX1D_SRV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_SRV, \"Texture1DArray\"),", "\"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_ADD\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_BITWISE_OPS\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_COMPARE_STORE_OR_COMPARE_EXCHANGE\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_EXCHANGE\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_SIGNED_MIN_OR_MAX\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_UNSIGNED_MIN_OR_MAX\", \"D3D11_FORMAT_SUPPORT2_UAV_TYPED_LOAD\", \"D3D11_FORMAT_SUPPORT2_UAV_TYPED_STORE\", ]) ID3D11Asynchronous.methods", "associated documentation files (the \"Software\"), to deal # in the", "\"D3D11_SRV_DIMENSION_UNKNOWN\", \"D3D11_SRV_DIMENSION_BUFFER\", \"D3D11_SRV_DIMENSION_TEXTURE1D\", \"D3D11_SRV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE2D\", \"D3D11_SRV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE2DMS\", \"D3D11_SRV_DIMENSION_TEXTURE2DMSARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE3D\", \"D3D11_SRV_DIMENSION_TEXTURECUBE\",", "Flags(UINT, [ \"D3D11_CPU_ACCESS_WRITE\", \"D3D11_CPU_ACCESS_READ\", ]) D3D11_RESOURCE_MISC_FLAG = Flags(UINT, [ \"D3D11_RESOURCE_MISC_GENERATE_MIPS\",", "\"pDesc\")]), ] D3D11_BLEND = Enum(\"D3D11_BLEND\", [ \"D3D11_BLEND_ZERO\", \"D3D11_BLEND_ONE\", \"D3D11_BLEND_SRC_COLOR\", \"D3D11_BLEND_INV_SRC_COLOR\",", "\"TextureCubeArray\"), (D3D11_BUFFEREX_SRV, \"BufferEx\"), ]), None), ]) ID3D11ShaderResourceView.methods += [ StdMethod(Void,", "StdMethod(Void, \"HSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void,", "Out(Pointer(UINT), \"pNameLength\"), Out(LPSTR, \"szUnits\"), Out(Pointer(UINT), \"pUnitsLength\"), Out(LPSTR, \"szDescription\"), Out(Pointer(UINT), \"pDescriptionLength\")]),", "Struct(\"D3D11_SO_DECLARATION_ENTRY\", [ (UINT, \"Stream\"), (LPCSTR, \"SemanticName\"), (UINT, \"SemanticIndex\"), (BYTE, \"StartComponent\"),", "\"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"HSSetConstantBuffers\", [(UINT, \"StartSlot\"),", "\"Texture1D\"), (D3D11_TEX1D_ARRAY_RTV, \"Texture1DArray\"), (D3D11_TEX2D_RTV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_RTV, \"Texture2DArray\"), (D3D11_TEX2DMS_RTV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_RTV,", "[ \"D3D11_DEPTH_WRITE_MASK_ZERO\", \"D3D11_DEPTH_WRITE_MASK_ALL\", ]) D3D11_STENCIL_OP = Enum(\"D3D11_STENCIL_OP\", [ \"D3D11_STENCIL_OP_KEEP\", \"D3D11_STENCIL_OP_ZERO\",", "\"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"ClearState\", []), StdMethod(Void,", "\"ppRenderTargetViews\"), (ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\"), (UINT, \"UAVStartSlot\"), (UINT, \"NumUAVs\"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), \"NumUAVs\"), \"ppUnorderedAccessViews\"),", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "to any person obtaining a copy # of this software", "\"ComparisonFunc\"), (Array(FLOAT, 4), \"BorderColor\"), (FLOAT, \"MinLOD\"), (FLOAT, \"MaxLOD\"), ]) ID3D11SamplerState.methods", "\"pUnorderedAccessView\"), (Array(Const(FLOAT), 4), \"Values\")]), StdMethod(Void, \"ClearDepthStencilView\", [(ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\"), (D3D11_CLEAR_FLAG, \"ClearFlags\"),", "Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"PSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"),", "\"ArraySize\"), ]) D3D11_TEX2D_UAV = Struct(\"D3D11_TEX2D_UAV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX2D_ARRAY_UAV", "= Flags(UINT, [ \"D3D11_DSV_READ_ONLY_DEPTH\", \"D3D11_DSV_READ_ONLY_STENCIL\", ]) D3D11_DEPTH_STENCIL_VIEW_DESC = Struct(\"D3D11_DEPTH_STENCIL_VIEW_DESC\", [", "\"D3D11_FILTER_MIN_MAG_MIP_POINT\", \"D3D11_FILTER_MIN_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR\", \"D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINT\", \"D3D11_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_MIN_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_MIN_MAG_MIP_LINEAR\", \"D3D11_FILTER_ANISOTROPIC\", \"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_POINT\",", "\"D3D11_PRIMITIVE_LINE_ADJ\", \"D3D11_PRIMITIVE_TRIANGLE_ADJ\", \"D3D11_PRIMITIVE_1_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_2_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_3_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_4_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_5_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_6_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_7_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_8_CONTROL_POINT_PATCH\",", "\"D3D11_FILTER_ANISOTROPIC\", \"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_ANISOTROPIC\",", "of the Software, and to permit persons to whom the", "= Struct(\"D3D11_TEX2D_ARRAY_DSV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ])", "\"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"OMGetRenderTargets\", [(UINT, \"NumViews\"), (Array(ObjPointer(ID3D11RenderTargetView), \"NumViews\"), \"ppRenderTargetViews\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)),", "\"NumElements\"), (D3D11_BUFFER_UAV_FLAG, \"Flags\"), ]) D3D11_TEX1D_UAV = Struct(\"D3D11_TEX1D_UAV\", [ (UINT, \"MipSlice\"),", "\"GetType\", [Out(Pointer(D3D11_RESOURCE_DIMENSION), \"pResourceDimension\")]), StdMethod(Void, \"SetEvictionPriority\", [(UINT, \"EvictionPriority\")]), StdMethod(UINT, \"GetEvictionPriority\", []),", "\"pSrcResource\")]), StdMethod(Void, \"UpdateSubresource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"), (Pointer(Const(D3D11_BOX)), \"pDstBox\"), (OpaquePointer(Const(Void)),", "[LPCVOID, DWORD], internal=True), StdFunction(HRESULT, \"D3D11CoreCreateLayeredDevice\", [LPCVOID, DWORD, LPCVOID, (REFIID, \"riid\"),", "(UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2DMS_DSV = Struct(\"D3D11_TEX2DMS_DSV\",", "(UINT, \"Width\"), (UINT, \"Height\"), (UINT, \"Depth\"), (UINT, \"MipLevels\"), (DXGI_FORMAT, \"Format\"),", "[ (UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2DMS_ARRAY_DSV = Struct(\"D3D11_TEX2DMS_ARRAY_DSV\", [ (UINT, \"FirstArraySlice\"),", "(D3D11_TEX1D_DSV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_DSV, \"Texture1DArray\"), (D3D11_TEX2D_DSV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_DSV, \"Texture2DArray\"), (D3D11_TEX2DMS_DSV, \"Texture2DMS\"),", "\"ppSamplers\")]), StdMethod(Void, \"OMGetRenderTargets\", [(UINT, \"NumViews\"), (Array(ObjPointer(ID3D11RenderTargetView), \"NumViews\"), \"ppRenderTargetViews\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\")]),", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "]) D3D11_TEX1D_ARRAY_DSV = Struct(\"D3D11_TEX1D_ARRAY_DSV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT,", "Struct(\"D3D11_RENDER_TARGET_BLEND_DESC\", [ (BOOL, \"BlendEnable\"), (D3D11_BLEND, \"SrcBlend\"), (D3D11_BLEND, \"DestBlend\"), (D3D11_BLEND_OP, \"BlendOp\"),", "StdMethod(Void, \"HSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void,", "(BOOL, \"Created\"), ]) ID3D11ClassInstance.methods += [ StdMethod(Void, \"GetClassLinkage\", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), \"ppLinkage\")]),", "\"MapType\"), (D3D11_MAP_FLAG, \"MapFlags\"), Out(Pointer(D3D11_MAPPED_SUBRESOURCE), \"pMappedResource\")]), StdMethod(Void, \"Unmap\", [(ObjPointer(ID3D11Resource), \"pResource\"), (UINT,", "sublicense, and/or sell # copies of the Software, and to", "= Enum(\"D3D11_COUNTER\", [ \"D3D11_COUNTER_DEVICE_DEPENDENT_0\", ]) D3D11_COUNTER_TYPE = Enum(\"D3D11_COUNTER_TYPE\", [ \"D3D11_COUNTER_TYPE_FLOAT32\",", "Enum(\"D3D11_BLEND\", [ \"D3D11_BLEND_ZERO\", \"D3D11_BLEND_ONE\", \"D3D11_BLEND_SRC_COLOR\", \"D3D11_BLEND_INV_SRC_COLOR\", \"D3D11_BLEND_SRC_ALPHA\", \"D3D11_BLEND_INV_SRC_ALPHA\", \"D3D11_BLEND_DEST_ALPHA\", \"D3D11_BLEND_INV_DEST_ALPHA\",", "\"ElementWidth\")]), None), ]) D3D11_TEX1D_RTV = Struct(\"D3D11_TEX1D_RTV\", [ (UINT, \"MipSlice\"), ])", "\"FirstWSlice\"), (UINT, \"WSize\"), ]) D3D11_UNORDERED_ACCESS_VIEW_DESC = Struct(\"D3D11_UNORDERED_ACCESS_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"),", "Struct(\"D3D11_TEX1D_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEX1D_ARRAY_SRV = Struct(\"D3D11_TEX1D_ARRAY_SRV\",", "]) D3D11_TEX1D_RTV = Struct(\"D3D11_TEX1D_RTV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_RTV =", "\"RaiseFlags\")]), StdMethod(UINT, \"GetExceptionMode\", []), ] d3d11 = API(\"d3d11\") d3d11.addFunctions([ StdFunction(HRESULT,", "] D3D11_QUERY_DATA_TIMESTAMP_DISJOINT = Struct(\"D3D11_QUERY_DATA_TIMESTAMP_DISJOINT\", [ (UINT64, \"Frequency\"), (BOOL, \"Disjoint\"), ])", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "Out(Pointer(ObjPointer(ID3D11ComputeShader)), \"ppComputeShader\")]), StdMethod(HRESULT, \"CreateClassLinkage\", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), \"ppLinkage\")]), StdMethod(HRESULT, \"CreateBlendState\", [(Pointer(Const(D3D11_BLEND_DESC)), \"pBlendStateDesc\"),", "\"D3D11_PRIMITIVE_24_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_25_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_26_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_27_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_28_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_29_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_30_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_31_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_32_CONTROL_POINT_PATCH\", ])", "\"ppSamplers\")]), StdMethod(Void, \"Begin\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\")]), StdMethod(Void, \"End\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\")]), StdMethod(HRESULT,", "in the Software without restriction, including without limitation the rights", "[(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"CSGetUnorderedAccessViews\", [(UINT,", "(UINT, \"SampleMask\")]), StdMethod(Void, \"OMSetDepthStencilState\", [(ObjPointer(ID3D11DepthStencilState), \"pDepthStencilState\"), (UINT, \"StencilRef\")]), StdMethod(Void, \"SOSetTargets\",", "ID3D11DeviceChild) ID3D11CommandList = Interface(\"ID3D11CommandList\", ID3D11DeviceChild) ID3D11Device = Interface(\"ID3D11Device\", IUnknown) D3D11_INPUT_CLASSIFICATION", "\"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"GSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"),", "\"pSODeclaration\"), (UINT, \"NumEntries\"), (Array(Const(UINT), \"NumStrides\"), \"pBufferStrides\"), (UINT, \"NumStrides\"), (UINT, \"RasterizedStream\"),", "\"Texture1DArray\"), (D3D11_TEX2D_DSV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_DSV, \"Texture2DArray\"), (D3D11_TEX2DMS_DSV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_DSV, \"Texture2DMSArray\"), ]),", "Enum(\"D3D11_MAP\", [ \"D3D11_MAP_READ\", \"D3D11_MAP_WRITE\", \"D3D11_MAP_READ_WRITE\", \"D3D11_MAP_WRITE_DISCARD\", \"D3D11_MAP_WRITE_NO_OVERWRITE\", ]) D3D11_MAP_FLAG =", "]) D3D11_TEX2D_SRV = Struct(\"D3D11_TEX2D_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ])", "\"D3D11_PRIMITIVE_TOPOLOGY_30_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_31_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_32_CONTROL_POINT_PATCHLIST\", ]) D3D11_PRIMITIVE = Enum(\"D3D11_PRIMITIVE\", [ \"D3D11_PRIMITIVE_UNDEFINED\", \"D3D11_PRIMITIVE_POINT\",", "\"D3D11_INPUT_PER_INSTANCE_DATA\", ]) D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET = FakeEnum(UINT, [ \"D3D11_APPEND_ALIGNED_ELEMENT\", ]) D3D11_INPUT_ELEMENT_DESC =", "\"D3D11_PRIMITIVE_8_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_9_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_10_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_11_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_12_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_13_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_14_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_15_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_16_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_17_CONTROL_POINT_PATCH\",", "\"D3D11_UAV_DIMENSION_TEXTURE3D\", ]) D3D11_USAGE = Enum(\"D3D11_USAGE\", [ \"D3D11_USAGE_DEFAULT\", \"D3D11_USAGE_IMMUTABLE\", \"D3D11_USAGE_DYNAMIC\", \"D3D11_USAGE_STAGING\",", "(Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"DSGetShader\", [Out(Pointer(ObjPointer(ID3D11DomainShader)), \"ppDomainShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"),", "Struct(\"D3D11_TEX1D_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"),", "(UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2DMS_ARRAY_RTV = Struct(\"D3D11_TEX2DMS_ARRAY_RTV\",", "(UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEXCUBE_SRV = Struct(\"D3D11_TEXCUBE_SRV\", [ (UINT,", "\"D3D11_PRIMITIVE_27_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_28_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_29_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_30_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_31_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_32_CONTROL_POINT_PATCH\", ]) D3D11_CULL_MODE = Enum(\"D3D11_CULL_MODE\",", "[ (D3D11_BUFFER_RTV, \"Buffer\"), (D3D11_TEX1D_RTV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_RTV, \"Texture1DArray\"), (D3D11_TEX2D_RTV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_RTV,", "[(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppVertexBuffers\"), (Pointer(Const(UINT)), \"pStrides\"), (Pointer(Const(UINT)),", "ID3D11DeviceChild) ID3D11Query = Interface(\"ID3D11Query\", ID3D11Asynchronous) ID3D11Predicate = Interface(\"ID3D11Predicate\", ID3D11Query) ID3D11Counter", "Out(Pointer(UINT), \"pUnitsLength\"), Out(LPSTR, \"szDescription\"), Out(Pointer(UINT), \"pDescriptionLength\")]), StdMethod(HRESULT, \"CheckFeatureSupport\", [(D3D11_FEATURE, \"Feature\"),", "[ StdMethod(HRESULT, \"CreateBuffer\", [(Pointer(Const(D3D11_BUFFER_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Buffer)), \"ppBuffer\")]), StdMethod(HRESULT,", "(Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"OMGetRenderTargets\", [(UINT, \"NumViews\"), (Array(ObjPointer(ID3D11RenderTargetView), \"NumViews\"), \"ppRenderTargetViews\"),", "= Flags(UINT, [ \"D3D11_QUERY_MISC_PREDICATEHINT\", ]) D3D11_QUERY_DESC = Struct(\"D3D11_QUERY_DESC\", [ (D3D11_QUERY,", "(Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppSOTargets\")]), StdMethod(Void, \"RSGetState\", [Out(Pointer(ObjPointer(ID3D11RasterizerState)), \"ppRasterizerState\")]), StdMethod(Void, \"RSGetViewports\", [Out(Pointer(UINT),", "(UINT, \"SemanticIndex\"), (DXGI_FORMAT, \"Format\"), (UINT, \"InputSlot\"), (D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET, \"AlignedByteOffset\"), (D3D11_INPUT_CLASSIFICATION, \"InputSlotClass\"),", "(Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"VSGetShader\", [Out(Pointer(ObjPointer(ID3D11VertexShader)), \"ppVertexShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"),", "\"SampleDesc\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ])", "\"IAGetVertexBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppVertexBuffers\"), Out(Pointer(UINT), \"pStrides\"),", "4), \"Values\")]), StdMethod(Void, \"ClearDepthStencilView\", [(ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\"), (D3D11_CLEAR_FLAG, \"ClearFlags\"), (FLOAT, \"Depth\"),", "D3D11_UNORDERED_ACCESS_VIEW_DESC = Struct(\"D3D11_UNORDERED_ACCESS_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_UAV_DIMENSION, \"ViewDimension\"), (Union(None, [", "including without limitation the rights # to use, copy, modify,", "\"D3D11_PRIMITIVE_30_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_31_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_32_CONTROL_POINT_PATCH\", ]) D3D11_CULL_MODE = Enum(\"D3D11_CULL_MODE\", [ \"D3D11_CULL_NONE\", \"D3D11_CULL_FRONT\",", "] D3D11_TEX1D_DSV = Struct(\"D3D11_TEX1D_DSV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_DSV =", "\"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM1\", \"D3D11_QUERY_SO_STATISTICS_STREAM2\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM2\", \"D3D11_QUERY_SO_STATISTICS_STREAM3\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM3\", ]) D3D11_QUERY_MISC_FLAG = Flags(UINT, [", "\"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"CSGetUnorderedAccessViews\", [(UINT, \"StartSlot\"), (UINT, \"NumUAVs\"),", "(Union(None, [ (D3D11_BUFFER_SRV, \"Buffer\"), (D3D11_TEX1D_SRV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_SRV, \"Texture1DArray\"), (D3D11_TEX2D_SRV, \"Texture2D\"),", "\"pStrides\"), Out(Pointer(UINT), \"pOffsets\")]), StdMethod(Void, \"IAGetIndexBuffer\", [Out(Pointer(ObjPointer(ID3D11Buffer)), \"pIndexBuffer\"), Out(Pointer(DXGI_FORMAT), \"Format\"), Out(Pointer(UINT),", "[(ObjPointer(ID3D11Predicate), \"pPredicate\"), (BOOL, \"PredicateValue\")]), StdMethod(Void, \"GSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"),", "\"D3D11_PRIMITIVE_15_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_16_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_17_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_18_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_19_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_20_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_21_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_22_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_23_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_24_CONTROL_POINT_PATCH\",", "\"D3D11_RTV_DIMENSION_TEXTURE2DMS\", \"D3D11_RTV_DIMENSION_TEXTURE2DMSARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE3D\", ]) D3D11_UAV_DIMENSION = Enum(\"D3D11_UAV_DIMENSION\", [ \"D3D11_UAV_DIMENSION_UNKNOWN\", \"D3D11_UAV_DIMENSION_BUFFER\",", "Interface(\"ID3D11Texture1D\", ID3D11Resource) ID3D11Texture2D = Interface(\"ID3D11Texture2D\", ID3D11Resource) ID3D11Texture3D = Interface(\"ID3D11Texture3D\", ID3D11Resource)", "D3D11_BUFFER_RTV = Struct(\"D3D11_BUFFER_RTV\", [ (Union(None, [(UINT, \"FirstElement\"), (UINT, \"ElementOffset\")]), None),", "(D3D11_DEPTH_STENCILOP_DESC, \"BackFace\"), ]) ID3D11DepthStencilState.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_DEPTH_STENCIL_DESC), \"pDesc\")]),", "\"D3D11_QUERY_SO_STATISTICS_STREAM0\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM0\", \"D3D11_QUERY_SO_STATISTICS_STREAM1\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM1\", \"D3D11_QUERY_SO_STATISTICS_STREAM2\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM2\", \"D3D11_QUERY_SO_STATISTICS_STREAM3\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM3\", ]) D3D11_QUERY_MISC_FLAG", "D3D11_BOX = Struct(\"D3D11_BOX\", [ (UINT, \"left\"), (UINT, \"top\"), (UINT, \"front\"),", "= Interface(\"ID3D11ClassLinkage\", ID3D11DeviceChild) ID3D11CommandList = Interface(\"ID3D11CommandList\", ID3D11DeviceChild) ID3D11Device = Interface(\"ID3D11Device\",", "D3D11_SO_DECLARATION_ENTRY = Struct(\"D3D11_SO_DECLARATION_ENTRY\", [ (UINT, \"Stream\"), (LPCSTR, \"SemanticName\"), (UINT, \"SemanticIndex\"),", "D3D11_DEPTH_STENCILOP_DESC = Struct(\"D3D11_DEPTH_STENCILOP_DESC\", [ (D3D11_STENCIL_OP, \"StencilFailOp\"), (D3D11_STENCIL_OP, \"StencilDepthFailOp\"), (D3D11_STENCIL_OP, \"StencilPassOp\"),", "[ \"D3D11_PRIMITIVE_TOPOLOGY_UNDEFINED\", \"D3D11_PRIMITIVE_TOPOLOGY_POINTLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_LINELIST\", \"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP\", \"D3D11_PRIMITIVE_TOPOLOGY_LINELIST_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ\",", "(UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_RTV = Struct(\"D3D11_TEX1D_ARRAY_RTV\", [ (UINT, \"MipSlice\"), (UINT,", "\"D3D11_PRIMITIVE_TOPOLOGY_4_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_5_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_6_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_7_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_8_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_9_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_10_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_11_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_12_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_13_CONTROL_POINT_PATCHLIST\",", "\"MipLevels\"), (DXGI_FORMAT, \"Format\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG,", "]) D3D11_COUNTER = Enum(\"D3D11_COUNTER\", [ \"D3D11_COUNTER_DEVICE_DEPENDENT_0\", ]) D3D11_COUNTER_TYPE = Enum(\"D3D11_COUNTER_TYPE\",", "Out(Pointer(SIZE_T), \"pBufferLength\")]), StdMethod(Void, \"GetTypeName\", [Out(LPSTR, \"pTypeName\"), Out(Pointer(SIZE_T), \"pBufferLength\")]), ] ID3D11ClassLinkage.methods", "[ (UINT, \"Width\"), (UINT, \"MipLevels\"), (UINT, \"ArraySize\"), (DXGI_FORMAT, \"Format\"), (D3D11_USAGE,", "Flags(UINT, [ \"D3D11_QUERY_MISC_PREDICATEHINT\", ]) D3D11_QUERY_DESC = Struct(\"D3D11_QUERY_DESC\", [ (D3D11_QUERY, \"Query\"),", "Struct(\"D3D11_BLEND_DESC\", [ (BOOL, \"AlphaToCoverageEnable\"), (BOOL, \"IndependentBlendEnable\"), (Array(D3D11_RENDER_TARGET_BLEND_DESC, 8), \"RenderTarget\"), ])", "(UINT, \"StartIndexLocation\"), (INT, \"BaseVertexLocation\"), (UINT, \"StartInstanceLocation\")]), StdMethod(Void, \"DrawInstanced\", [(UINT, \"VertexCountPerInstance\"),", "\"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"CSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)),", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "[(Pointer(Const(D3D11_TEXTURE1D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture1D)), \"ppTexture1D\")]), StdMethod(HRESULT, \"CreateTexture2D\", [(Pointer(Const(D3D11_TEXTURE2D_DESC)), \"pDesc\"),", "(ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11ComputeShader)), \"ppComputeShader\")]), StdMethod(HRESULT, \"CreateClassLinkage\", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), \"ppLinkage\")]), StdMethod(HRESULT, \"CreateBlendState\",", "(D3D11_TEX2DMS_RTV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_RTV, \"Texture2DMSArray\"), (D3D11_TEX3D_RTV, \"Texture3D\"), ]), None), ]) ID3D11RenderTargetView.methods", "\"MipLevels\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_SRV = Struct(\"D3D11_TEX2D_SRV\", [", "\"ppShaderResourceViews\")]), StdMethod(Void, \"HSSetShader\", [(ObjPointer(ID3D11HullShader), \"pHullShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]),", "\"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"DSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"),", "[ \"D3D11_RESOURCE_DIMENSION_UNKNOWN\", \"D3D11_RESOURCE_DIMENSION_BUFFER\", \"D3D11_RESOURCE_DIMENSION_TEXTURE1D\", \"D3D11_RESOURCE_DIMENSION_TEXTURE2D\", \"D3D11_RESOURCE_DIMENSION_TEXTURE3D\", ]) D3D11_SRV_DIMENSION = Enum(\"D3D11_SRV_DIMENSION\",", "(Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Buffer)), \"ppBuffer\")]), StdMethod(HRESULT, \"CreateTexture1D\", [(Pointer(Const(D3D11_TEXTURE1D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"),", "\"RestoreContextState\")]), StdMethod(Void, \"HSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]),", "\"pAsync\"), Out(OpaqueBlob(Void, \"DataSize\"), \"pData\"), (UINT, \"DataSize\"), (D3D11_ASYNC_GETDATA_FLAG, \"GetDataFlags\")]), StdMethod(Void, \"SetPredication\",", "\"D3D11_COMPARISON_ALWAYS\", ]) D3D11_DEPTH_WRITE_MASK = Enum(\"D3D11_DEPTH_WRITE_MASK\", [ \"D3D11_DEPTH_WRITE_MASK_ZERO\", \"D3D11_DEPTH_WRITE_MASK_ALL\", ]) D3D11_STENCIL_OP", "(D3D11_TEXCUBE_ARRAY_SRV, \"TextureCubeArray\"), (D3D11_BUFFEREX_SRV, \"BufferEx\"), ]), None), ]) ID3D11ShaderResourceView.methods += [", "\"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"OMSetRenderTargets\", [(UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), \"NumViews\"),", "StdMethod(Void, \"CSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void,", "[ (FLOAT, \"TopLeftX\"), (FLOAT, \"TopLeftY\"), (FLOAT, \"Width\"), (FLOAT, \"Height\"), (FLOAT,", "\"pViewports\")]), StdMethod(Void, \"RSGetScissorRects\", [Out(Pointer(UINT), \"pNumRects\"), Out(Array(D3D11_RECT, \"*pNumRects\"), \"pRects\")]), StdMethod(Void, \"HSGetShaderResources\",", "\"D3D11_PRIMITIVE_13_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_14_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_15_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_16_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_17_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_18_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_19_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_20_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_21_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_22_CONTROL_POINT_PATCH\",", "StdMethod(Void, \"RSSetState\", [(ObjPointer(ID3D11RasterizerState), \"pRasterizerState\")]), StdMethod(Void, \"RSSetViewports\", [(UINT, \"NumViewports\"), (Array(Const(D3D11_VIEWPORT), \"NumViewports\"),", "D3D11_SUBRESOURCE_DATA = Struct(\"D3D11_SUBRESOURCE_DATA\", [ (OpaquePointer(Const(Void)), \"pSysMem\"), (UINT, \"SysMemPitch\"), (UINT, \"SysMemSlicePitch\"),", "]) D3D11_DEPTH_STENCIL_VIEW_DESC = Struct(\"D3D11_DEPTH_STENCIL_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_DSV_DIMENSION, \"ViewDimension\"), (D3D11_DSV_FLAG,", "StdMethod(Void, \"PSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void,", "\"NumUAVs\"), \"ppUnorderedAccessViews\"), (Pointer(Const(UINT)), \"pUAVInitialCounts\")]), StdMethod(Void, \"CSSetShader\", [(ObjPointer(ID3D11ComputeShader), \"pComputeShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"),", "\"pOffsets\")]), StdMethod(Void, \"IAGetIndexBuffer\", [Out(Pointer(ObjPointer(ID3D11Buffer)), \"pIndexBuffer\"), Out(Pointer(DXGI_FORMAT), \"Format\"), Out(Pointer(UINT), \"Offset\")]), StdMethod(Void,", "\"PSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"PSGetShader\",", "\"pUnitsLength\"), Out(LPSTR, \"szDescription\"), Out(Pointer(UINT), \"pDescriptionLength\")]), StdMethod(HRESULT, \"CheckFeatureSupport\", [(D3D11_FEATURE, \"Feature\"), Out(D3D11_FEATURE_DATA,", "\"NumRects\"), \"pRects\")]), StdMethod(Void, \"CopySubresourceRegion\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"), (UINT, \"DstX\"),", "= Interface(\"ID3D11HullShader\", ID3D11DeviceChild) ID3D11DomainShader = Interface(\"ID3D11DomainShader\", ID3D11DeviceChild) ID3D11GeometryShader = Interface(\"ID3D11GeometryShader\",", "(D3D11_STENCIL_OP, \"StencilPassOp\"), (D3D11_COMPARISON_FUNC, \"StencilFunc\"), ]) D3D11_DEPTH_STENCIL_DESC = Struct(\"D3D11_DEPTH_STENCIL_DESC\", [ (BOOL,", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "\"AddressW\"), (FLOAT, \"MipLODBias\"), (UINT, \"MaxAnisotropy\"), (D3D11_COMPARISON_FUNC, \"ComparisonFunc\"), (Array(FLOAT, 4), \"BorderColor\"),", "\"guid\"), (OpaquePointer(Const(IUnknown)), \"pData\")]), StdMethod(D3D_FEATURE_LEVEL, \"GetFeatureLevel\", []), StdMethod(D3D11_CREATE_DEVICE_FLAG, \"GetCreationFlags\", []), StdMethod(HRESULT,", "]) D3D11_TEX1D_ARRAY_RTV = Struct(\"D3D11_TEX1D_ARRAY_RTV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT,", "D3D11_QUERY_DATA_TIMESTAMP_DISJOINT = Struct(\"D3D11_QUERY_DATA_TIMESTAMP_DISJOINT\", [ (UINT64, \"Frequency\"), (BOOL, \"Disjoint\"), ]) D3D11_QUERY_DATA_PIPELINE_STATISTICS", "StdMethod(Void, \"GSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void,", "(UINT, \"ConstantBuffer\"), (UINT, \"BaseConstantBufferOffset\"), (UINT, \"BaseTexture\"), (UINT, \"BaseSampler\"), (BOOL, \"Created\"),", "[Out(Pointer(D3D11_CLASS_INSTANCE_DESC), \"pDesc\")]), StdMethod(Void, \"GetInstanceName\", [Out(LPSTR, \"pInstanceName\"), Out(Pointer(SIZE_T), \"pBufferLength\")]), StdMethod(Void, \"GetTypeName\",", "\"D3D11_PRIMITIVE_TOPOLOGY_UNDEFINED\", \"D3D11_PRIMITIVE_TOPOLOGY_POINTLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_LINELIST\", \"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP\", \"D3D11_PRIMITIVE_TOPOLOGY_LINELIST_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ\",", "= Interface(\"ID3D11Counter\", ID3D11Asynchronous) ID3D11ClassInstance = Interface(\"ID3D11ClassInstance\", ID3D11DeviceChild) ID3D11ClassLinkage = Interface(\"ID3D11ClassLinkage\",", "[ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEX2D_ARRAY_SRV = Struct(\"D3D11_TEX2D_ARRAY_SRV\", [", "(UINT, \"InstanceIndex\"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), \"ppInstance\")]), StdMethod(HRESULT, \"CreateClassInstance\", [(LPCSTR, \"pClassTypeName\"), (UINT, \"ConstantBufferOffset\"),", "StdMethod(Void, \"DSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void,", "\"top\"), (UINT, \"front\"), (UINT, \"right\"), (UINT, \"bottom\"), (UINT, \"back\"), ])", "\"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"PSSetShader\", [(ObjPointer(ID3D11PixelShader), \"pPixelShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT,", "\"SetPrivateDataInterface\", [(REFGUID, \"guid\"), (OpaquePointer(Const(IUnknown)), \"pData\")]), StdMethod(D3D_FEATURE_LEVEL, \"GetFeatureLevel\", []), StdMethod(D3D11_CREATE_DEVICE_FLAG, \"GetCreationFlags\",", "+= [ StdMethod(Void, \"GetType\", [Out(Pointer(D3D11_RESOURCE_DIMENSION), \"pResourceDimension\")]), StdMethod(Void, \"SetEvictionPriority\", [(UINT, \"EvictionPriority\")]),", "(Array(Const(ObjPointer(ID3D11UnorderedAccessView)), \"NumUAVs\"), \"ppUnorderedAccessViews\"), (Pointer(Const(UINT)), \"pUAVInitialCounts\")]), StdMethod(Void, \"CSSetShader\", [(ObjPointer(ID3D11ComputeShader), \"pComputeShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)),", "= Struct(\"D3D11_DEPTH_STENCIL_DESC\", [ (BOOL, \"DepthEnable\"), (D3D11_DEPTH_WRITE_MASK, \"DepthWriteMask\"), (D3D11_COMPARISON_FUNC, \"DepthFunc\"), (BOOL,", "\"CopyStructureCount\", [(ObjPointer(ID3D11Buffer), \"pDstBuffer\"), (UINT, \"DstAlignedByteOffset\"), (ObjPointer(ID3D11UnorderedAccessView), \"pSrcView\")]), StdMethod(Void, \"ClearRenderTargetView\", [(ObjPointer(ID3D11RenderTargetView),", "\"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11PixelShader)), \"ppPixelShader\")]), StdMethod(HRESULT, \"CreateHullShader\",", "\"SetResourceMinLOD\", [(ObjPointer(ID3D11Resource), \"pResource\"), (FLOAT, \"MinLOD\")]), StdMethod(FLOAT, \"GetResourceMinLOD\", [(ObjPointer(ID3D11Resource), \"pResource\")]), StdMethod(Void,", "= Enum(\"D3D11_DEVICE_CONTEXT_TYPE\", [ \"D3D11_DEVICE_CONTEXT_IMMEDIATE\", \"D3D11_DEVICE_CONTEXT_DEFERRED\", ]) D3D11_CLASS_INSTANCE_DESC = Struct(\"D3D11_CLASS_INSTANCE_DESC\", [", "D3D11_BLEND_DESC = Struct(\"D3D11_BLEND_DESC\", [ (BOOL, \"AlphaToCoverageEnable\"), (BOOL, \"IndependentBlendEnable\"), (Array(D3D11_RENDER_TARGET_BLEND_DESC, 8),", "\"Flags\"), (Array(Const(D3D_FEATURE_LEVEL), \"FeatureLevels\"), \"pFeatureLevels\"), (UINT, \"FeatureLevels\"), (UINT, \"SDKVersion\"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC)), \"pSwapChainDesc\"),", "(the \"Software\"), to deal # in the Software without restriction,", "(Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"GSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState),", "(LPCSTR, \"SemanticName\"), (UINT, \"SemanticIndex\"), (BYTE, \"StartComponent\"), (BYTE, \"ComponentCount\"), (BYTE, \"OutputSlot\"),", "\"ClearRenderTargetView\", [(ObjPointer(ID3D11RenderTargetView), \"pRenderTargetView\"), (Array(Const(FLOAT), 4), \"ColorRGBA\")]), StdMethod(Void, \"ClearUnorderedAccessViewUint\", [(ObjPointer(ID3D11UnorderedAccessView), \"pUnorderedAccessView\"),", "StdMethod(Void, \"OMGetDepthStencilState\", [Out(Pointer(ObjPointer(ID3D11DepthStencilState)), \"ppDepthStencilState\"), Out(Pointer(UINT), \"pStencilRef\")]), StdMethod(Void, \"SOGetTargets\", [(UINT, \"NumBuffers\"),", "2012 <NAME> # All Rights Reserved. # # Permission is", "\"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"DSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"),", "D3D11_TEX2D_ARRAY_DSV = Struct(\"D3D11_TEX2D_ARRAY_DSV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"),", "(D3D11_MAP_FLAG, \"MapFlags\"), Out(Pointer(D3D11_MAPPED_SUBRESOURCE), \"pMappedResource\")]), StdMethod(Void, \"Unmap\", [(ObjPointer(ID3D11Resource), \"pResource\"), (UINT, \"Subresource\")]),", "\"left\"), (UINT, \"top\"), (UINT, \"front\"), (UINT, \"right\"), (UINT, \"bottom\"), (UINT,", "D3D11_TEXCUBE_ARRAY_SRV = Struct(\"D3D11_TEXCUBE_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), (UINT, \"First2DArrayFace\"),", "permit persons to whom the Software is # furnished to", "\"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"PSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer),", "\"NumUAVs\"), \"ppUnorderedAccessViews\")]), StdMethod(Void, \"OMGetBlendState\", [Out(Pointer(ObjPointer(ID3D11BlendState)), \"ppBlendState\"), Out(Array(FLOAT, 4), \"BlendFactor\"), Out(Pointer(UINT),", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "D3D11_QUERY_DATA_SO_STATISTICS = Struct(\"D3D11_QUERY_DATA_SO_STATISTICS\", [ (UINT64, \"NumPrimitivesWritten\"), (UINT64, \"PrimitivesStorageNeeded\"), ]) D3D11_COUNTER", "Enum(\"D3D11_TEXTURE_ADDRESS_MODE\", [ \"D3D11_TEXTURE_ADDRESS_WRAP\", \"D3D11_TEXTURE_ADDRESS_MIRROR\", \"D3D11_TEXTURE_ADDRESS_CLAMP\", \"D3D11_TEXTURE_ADDRESS_BORDER\", \"D3D11_TEXTURE_ADDRESS_MIRROR_ONCE\", ]) D3D11_SAMPLER_DESC =", "(FLOAT, \"MaxLOD\"), ]) ID3D11SamplerState.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_SAMPLER_DESC), \"pDesc\")]),", "above copyright notice and this permission notice shall be included", "(UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_UAV = Struct(\"D3D11_TEX2D_UAV\",", "\"D3D11_CREATE_DEVICE_BGRA_SUPPORT\", ]) ID3D11Device.methods += [ StdMethod(HRESULT, \"CreateBuffer\", [(Pointer(Const(D3D11_BUFFER_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)),", "\"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture1D.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE1D_DESC),", "(ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11DomainShader)), \"ppDomainShader\")]), StdMethod(HRESULT, \"CreateComputeShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T,", "\"D3D11_FORMAT_SUPPORT_DEPTH_STENCIL\", \"D3D11_FORMAT_SUPPORT_CPU_LOCKABLE\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RESOLVE\", \"D3D11_FORMAT_SUPPORT_DISPLAY\", \"D3D11_FORMAT_SUPPORT_CAST_WITHIN_BIT_LAYOUT\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RENDERTARGET\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_LOAD\", \"D3D11_FORMAT_SUPPORT_SHADER_GATHER\", \"D3D11_FORMAT_SUPPORT_BACK_BUFFER_CAST\", \"D3D11_FORMAT_SUPPORT_TYPED_UNORDERED_ACCESS_VIEW\",", "(Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"VSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)),", "without limitation the rights # to use, copy, modify, merge,", "\"Texture2D\"), (D3D11_TEX2D_ARRAY_RTV, \"Texture2DArray\"), (D3D11_TEX2DMS_RTV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_RTV, \"Texture2DMSArray\"), (D3D11_TEX3D_RTV, \"Texture3D\"), ]),", "\"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"RSSetState\", [(ObjPointer(ID3D11RasterizerState), \"pRasterizerState\")]), StdMethod(Void, \"RSSetViewports\", [(UINT, \"NumViewports\"), (Array(Const(D3D11_VIEWPORT),", "StdMethod(Void, \"ResolveSubresource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"), (ObjPointer(ID3D11Resource), \"pSrcResource\"), (UINT, \"SrcSubresource\"),", "[(Pointer(Const(D3D11_COUNTER_DESC)), \"pDesc\"), Out(Pointer(D3D11_COUNTER_TYPE), \"pType\"), Out(Pointer(UINT), \"pActiveCounters\"), Out(LPSTR, \"szName\"), Out(Pointer(UINT), \"pNameLength\"),", "\"pPredicateDesc\"), Out(Pointer(ObjPointer(ID3D11Predicate)), \"ppPredicate\")]), StdMethod(HRESULT, \"CreateCounter\", [(Pointer(Const(D3D11_COUNTER_DESC)), \"pCounterDesc\"), Out(Pointer(ObjPointer(ID3D11Counter)), \"ppCounter\")]), StdMethod(HRESULT,", "\"OutputSlot\"), ]) D3D11_VIEWPORT = Struct(\"D3D11_VIEWPORT\", [ (FLOAT, \"TopLeftX\"), (FLOAT, \"TopLeftY\"),", "+= [ StdMethod(Void, \"GetDevice\", [Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\")]), StdMethod(HRESULT, \"GetPrivateData\", [(REFGUID, \"guid\"),", "\"CreateRenderTargetView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_RENDER_TARGET_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11RenderTargetView)), \"ppRTView\")]), StdMethod(HRESULT, \"CreateDepthStencilView\", [(ObjPointer(ID3D11Resource),", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "\"VertexCount\"), (UINT, \"StartVertexLocation\")]), StdMethod(HRESULT, \"Map\", [(ObjPointer(ID3D11Resource), \"pResource\"), (UINT, \"Subresource\"), (D3D11_MAP,", "StdMethod(Void, \"GetType\", [Out(Pointer(D3D11_RESOURCE_DIMENSION), \"pResourceDimension\")]), StdMethod(Void, \"SetEvictionPriority\", [(UINT, \"EvictionPriority\")]), StdMethod(UINT, \"GetEvictionPriority\",", "= Flags(UINT, [ \"D3D11_MAP_FLAG_DO_NOT_WAIT\", ]) D3D11_RAISE_FLAG = Flags(UINT, [ \"D3D11_RAISE_FLAG_DRIVER_INTERNAL_ERROR\",", "\"Width\"), (UINT, \"MipLevels\"), (UINT, \"ArraySize\"), (DXGI_FORMAT, \"Format\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG,", "\"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"CSSetUnorderedAccessViews\", [(UINT, \"StartSlot\"), (UINT, \"NumUAVs\"),", "\"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), (UINT, \"StructureByteStride\"), ])", "(Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"HSSetShader\", [(ObjPointer(ID3D11HullShader), \"pHullShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"),", "functions, called by d3d11sdklayers.dll when D3D11_CREATE_DEVICE_DEBUG is set StdFunction(HRESULT, \"D3D11CoreRegisterLayers\",", "Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"IAGetPrimitiveTopology\", [Out(Pointer(D3D11_PRIMITIVE_TOPOLOGY), \"pTopology\")]), StdMethod(Void,", "[ (BOOL, \"ComputeShaders_Plus_RawAndStructuredBuffers_Via_Shader_4_x\"), ]) D3D11_FEATURE, D3D11_FEATURE_DATA = EnumPolymorphic(\"D3D11_FEATURE\", \"Feature\", [", "Interface(\"ID3D11Texture2D\", ID3D11Resource) ID3D11Texture3D = Interface(\"ID3D11Texture3D\", ID3D11Resource) ID3D11View = Interface(\"ID3D11View\", ID3D11DeviceChild)", "LPCVOID, (REFIID, \"riid\"), Out(Pointer(ObjPointer(Void)), \"ppvObj\")], internal=True), StdFunction(HRESULT, \"D3D11CoreCreateDevice\", [DWORD, DWORD,", "StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_BLEND_DESC), \"pDesc\")]), ] D3D11_RASTERIZER_DESC = Struct(\"D3D11_RASTERIZER_DESC\", [ (D3D11_FILL_MODE,", "\"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture3D.methods +=", "(D3D11_TEX1D_SRV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_SRV, \"Texture1DArray\"), (D3D11_TEX2D_SRV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_SRV, \"Texture2DArray\"), (D3D11_TEX2DMS_SRV, \"Texture2DMS\"),", "\"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"GSGetShader\", [Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\"),", "]) D3D11_BLEND_DESC = Struct(\"D3D11_BLEND_DESC\", [ (BOOL, \"AlphaToCoverageEnable\"), (BOOL, \"IndependentBlendEnable\"), (Array(D3D11_RENDER_TARGET_BLEND_DESC,", "StdMethod(Void, \"VSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void,", "\"BytecodeLength\"), (Array(Const(D3D11_SO_DECLARATION_ENTRY), \"NumEntries\"), \"pSODeclaration\"), (UINT, \"NumEntries\"), (Array(Const(UINT), \"NumStrides\"), \"pBufferStrides\"), (UINT,", "Enum(\"D3D11_RESOURCE_DIMENSION\", [ \"D3D11_RESOURCE_DIMENSION_UNKNOWN\", \"D3D11_RESOURCE_DIMENSION_BUFFER\", \"D3D11_RESOURCE_DIMENSION_TEXTURE1D\", \"D3D11_RESOURCE_DIMENSION_TEXTURE2D\", \"D3D11_RESOURCE_DIMENSION_TEXTURE3D\", ]) D3D11_SRV_DIMENSION =", "ID3D11Query) ID3D11Counter = Interface(\"ID3D11Counter\", ID3D11Asynchronous) ID3D11ClassInstance = Interface(\"ID3D11ClassInstance\", ID3D11DeviceChild) ID3D11ClassLinkage", "\"DSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"DSSetShader\",", "\"ppCounter\")]), StdMethod(HRESULT, \"CreateDeferredContext\", [(UINT, \"ContextFlags\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppDeferredContext\")]), StdMethod(HRESULT, \"OpenSharedResource\", [(HANDLE,", "]) D3D11_TEX2DMS_SRV = Struct(\"D3D11_TEX2DMS_SRV\", [ (UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2DMS_ARRAY_SRV =", "\"D3D11_PRIMITIVE_29_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_30_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_31_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_32_CONTROL_POINT_PATCH\", ]) D3D11_CULL_MODE = Enum(\"D3D11_CULL_MODE\", [ \"D3D11_CULL_NONE\",", "StdMethod(Void, \"SetEvictionPriority\", [(UINT, \"EvictionPriority\")]), StdMethod(UINT, \"GetEvictionPriority\", []), ] D3D11_BUFFER_DESC =", "StdMethod(HRESULT, \"SetPrivateDataInterface\", [(REFGUID, \"guid\"), (OpaquePointer(Const(IUnknown)), \"pData\")]), StdMethod(D3D_FEATURE_LEVEL, \"GetFeatureLevel\", []), StdMethod(D3D11_CREATE_DEVICE_FLAG,", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT", "Struct(\"D3D11_TEX2D_DSV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX2D_ARRAY_DSV = Struct(\"D3D11_TEX2D_ARRAY_DSV\", [ (UINT,", "StdMethod(Void, \"IASetInputLayout\", [(ObjPointer(ID3D11InputLayout), \"pInputLayout\")]), StdMethod(Void, \"IASetVertexBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"),", "\"D3D11_PRIMITIVE_TOPOLOGY_14_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_15_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_16_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_17_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_18_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_19_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_20_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_21_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_22_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_23_CONTROL_POINT_PATCHLIST\",", "\"ppConstantBuffers\")]), StdMethod(Void, \"DSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]),", "= Interface(\"ID3D11Texture1D\", ID3D11Resource) ID3D11Texture2D = Interface(\"ID3D11Texture2D\", ID3D11Resource) ID3D11Texture3D = Interface(\"ID3D11Texture3D\",", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #", "(BOOL, \"RestoreContextState\")]), StdMethod(Void, \"HSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"),", "D3D11_TEX2D_RTV = Struct(\"D3D11_TEX2D_RTV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX2DMS_RTV = Struct(\"D3D11_TEX2DMS_RTV\",", "publish, distribute, sublicense, and/or sell # copies of the Software,", "= Struct(\"D3D11_TEX2D_ARRAY_UAV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ])", "[ \"D3D11_FILTER_TYPE_POINT\", \"D3D11_FILTER_TYPE_LINEAR\", ]) D3D11_TEXTURE_ADDRESS_MODE = Enum(\"D3D11_TEXTURE_ADDRESS_MODE\", [ \"D3D11_TEXTURE_ADDRESS_WRAP\", \"D3D11_TEXTURE_ADDRESS_MIRROR\",", "\"D3D11_PRIMITIVE_TOPOLOGY_8_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_9_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_10_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_11_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_12_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_13_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_14_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_15_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_16_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_17_CONTROL_POINT_PATCHLIST\",", "\"CheckFormatSupport\", [(DXGI_FORMAT, \"Format\"), Out(Pointer(D3D11_FORMAT_SUPPORT), \"pFormatSupport\")]), StdMethod(HRESULT, \"CheckMultisampleQualityLevels\", [(DXGI_FORMAT, \"Format\"), (UINT,", "to the following conditions: # # The above copyright notice", "\"GetResourceMinLOD\", [(ObjPointer(ID3D11Resource), \"pResource\")]), StdMethod(Void, \"ResolveSubresource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"), (ObjPointer(ID3D11Resource),", "\"OMSetRenderTargetsAndUnorderedAccessViews\", [(UINT, \"NumRTVs\"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), \"NumRTVs\"), \"ppRenderTargetViews\"), (ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\"), (UINT, \"UAVStartSlot\"),", "= MAKE_HRESULT([ \"D3D11_ERROR_FILE_NOT_FOUND\", \"D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS\", \"D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS\", \"D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD\", \"D3DERR_INVALIDCALL\", \"D3DERR_WASSTILLDRAWING\", ]) ID3D11DepthStencilState", "\"pCounterDesc\"), Out(Pointer(ObjPointer(ID3D11Counter)), \"ppCounter\")]), StdMethod(HRESULT, \"CreateDeferredContext\", [(UINT, \"ContextFlags\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppDeferredContext\")]), StdMethod(HRESULT,", "[(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\")]), StdMethod(HRESULT,", "API(\"d3d11\") d3d11.addFunctions([ StdFunction(HRESULT, \"D3D11CreateDevice\", [(ObjPointer(IDXGIAdapter), \"pAdapter\"), (D3D_DRIVER_TYPE, \"DriverType\"), (HMODULE, \"Software\"),", "(UINT, \"StencilRef\")]), StdMethod(Void, \"SOSetTargets\", [(UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppSOTargets\"), (Pointer(Const(UINT)),", "ID3D11View = Interface(\"ID3D11View\", ID3D11DeviceChild) ID3D11ShaderResourceView = Interface(\"ID3D11ShaderResourceView\", ID3D11View) ID3D11RenderTargetView =", "\"Frequency\"), (BOOL, \"Disjoint\"), ]) D3D11_QUERY_DATA_PIPELINE_STATISTICS = Struct(\"D3D11_QUERY_DATA_PIPELINE_STATISTICS\", [ (UINT64, \"IAVertices\"),", "StdMethod(Void, \"Unmap\", [(ObjPointer(ID3D11Resource), \"pResource\"), (UINT, \"Subresource\")]), StdMethod(Void, \"PSSetConstantBuffers\", [(UINT, \"StartSlot\"),", "[Out(Pointer(UINT), \"pNumRects\"), Out(Array(D3D11_RECT, \"*pNumRects\"), \"pRects\")]), StdMethod(Void, \"HSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT,", "\"ppImmediateContext\")]), StdMethod(HRESULT, \"SetExceptionMode\", [(D3D11_RAISE_FLAG, \"RaiseFlags\")]), StdMethod(UINT, \"GetExceptionMode\", []), ] d3d11", "StdMethod(HRESULT, \"CreateTexture2D\", [(Pointer(Const(D3D11_TEXTURE2D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture2D)), \"ppTexture2D\")]), StdMethod(HRESULT, \"CreateTexture3D\",", "]) D3D11_TEX1D_ARRAY_SRV = Struct(\"D3D11_TEX1D_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), (UINT,", "\"ArraySize\"), ]) D3D11_TEX2D_DSV = Struct(\"D3D11_TEX2D_DSV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX2D_ARRAY_DSV", "\"D3D11_RESOURCE_MISC_GENERATE_MIPS\", \"D3D11_RESOURCE_MISC_SHARED\", \"D3D11_RESOURCE_MISC_TEXTURECUBE\", \"D3D11_RESOURCE_MISC_DRAWINDIRECT_ARGS\", \"D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS\", \"D3D11_RESOURCE_MISC_BUFFER_STRUCTURED\", \"D3D11_RESOURCE_MISC_RESOURCE_CLAMP\", \"D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX\", \"D3D11_RESOURCE_MISC_GDI_COMPATIBLE\", ])", "D3D11_TEX2DMS_SRV = Struct(\"D3D11_TEX2DMS_SRV\", [ (UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2DMS_ARRAY_SRV = Struct(\"D3D11_TEX2DMS_ARRAY_SRV\",", "(D3D11_TEX2D_ARRAY_DSV, \"Texture2DArray\"), (D3D11_TEX2DMS_DSV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_DSV, \"Texture2DMSArray\"), ]), None), ]) ID3D11DepthStencilView.methods", "Out(Pointer(ObjPointer(ID3D11Texture2D)), \"ppTexture2D\")]), StdMethod(HRESULT, \"CreateTexture3D\", [(Pointer(Const(D3D11_TEXTURE3D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture3D)), \"ppTexture3D\")]),", "D3D11_TEX2DMS_DSV = Struct(\"D3D11_TEX2DMS_DSV\", [ (UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2DMS_ARRAY_DSV = Struct(\"D3D11_TEX2DMS_ARRAY_DSV\",", "\"CSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"CSGetConstantBuffers\",", "\"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11DomainShader)), \"ppDomainShader\")]), StdMethod(HRESULT, \"CreateComputeShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"),", "\"ppConstantBuffers\")]), StdMethod(Void, \"DSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]),", "\"D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS\", \"D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD\", \"D3DERR_INVALIDCALL\", \"D3DERR_WASSTILLDRAWING\", ]) ID3D11DepthStencilState = Interface(\"ID3D11DepthStencilState\", ID3D11DeviceChild) ID3D11BlendState", "= Struct(\"D3D11_RENDER_TARGET_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_RTV_DIMENSION, \"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_RTV,", "\"UnusedField_NothingToDefine\"), ]) D3D11_TEX2D_ARRAY_RTV = Struct(\"D3D11_TEX2D_ARRAY_RTV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"),", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or", "(UINT, \"DstY\"), (UINT, \"DstZ\"), (ObjPointer(ID3D11Resource), \"pSrcResource\"), (UINT, \"SrcSubresource\"), (Pointer(Const(D3D11_BOX)), \"pSrcBox\")]),", "RECT) D3D11_BOX = Struct(\"D3D11_BOX\", [ (UINT, \"left\"), (UINT, \"top\"), (UINT,", "\"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"GSSetShader\", [(ObjPointer(ID3D11GeometryShader), \"pShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"),", "Pointer(D3D11_FEATURE_DATA_THREADING)), (\"D3D11_FEATURE_DOUBLES\", Pointer(D3D11_FEATURE_DATA_DOUBLES)), (\"D3D11_FEATURE_FORMAT_SUPPORT\", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT)), (\"D3D11_FEATURE_FORMAT_SUPPORT2\", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT2)), (\"D3D11_FEATURE_D3D10_X_HARDWARE_OPTIONS\", Pointer(D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS)), ],", "\"pRenderTargetView\"), (Array(Const(FLOAT), 4), \"ColorRGBA\")]), StdMethod(Void, \"ClearUnorderedAccessViewUint\", [(ObjPointer(ID3D11UnorderedAccessView), \"pUnorderedAccessView\"), (Array(Const(UINT), 4),", "(UINT, \"ArraySize\"), ]) D3D11_DSV_FLAG = Flags(UINT, [ \"D3D11_DSV_READ_ONLY_DEPTH\", \"D3D11_DSV_READ_ONLY_STENCIL\", ])", "D3D11_TEX2D_SRV = Struct(\"D3D11_TEX2D_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEX2D_ARRAY_SRV", "StdMethod(HRESULT, \"CreateDomainShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11DomainShader)),", "[ (Union(None, [(UINT, \"FirstElement\"), (UINT, \"ElementOffset\")]), None), (Union(None, [(UINT, \"NumElements\"),", "\"pNumClassInstances\")]), StdMethod(Void, \"IAGetPrimitiveTopology\", [Out(Pointer(D3D11_PRIMITIVE_TOPOLOGY), \"pTopology\")]), StdMethod(Void, \"VSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT,", "\"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"CSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"),", "\"D3D11_BIND_VERTEX_BUFFER\", \"D3D11_BIND_INDEX_BUFFER\", \"D3D11_BIND_CONSTANT_BUFFER\", \"D3D11_BIND_SHADER_RESOURCE\", \"D3D11_BIND_STREAM_OUTPUT\", \"D3D11_BIND_RENDER_TARGET\", \"D3D11_BIND_DEPTH_STENCIL\", \"D3D11_BIND_UNORDERED_ACCESS\", ]) D3D11_CPU_ACCESS_FLAG", "= Struct(\"D3D11_RASTERIZER_DESC\", [ (D3D11_FILL_MODE, \"FillMode\"), (D3D11_CULL_MODE, \"CullMode\"), (BOOL, \"FrontCounterClockwise\"), (INT,", "* from d3dcommon import * from d3d11sdklayers import * HRESULT", "\"Software\"), to deal # in the Software without restriction, including", "]) D3D11_MAPPED_SUBRESOURCE = Struct(\"D3D11_MAPPED_SUBRESOURCE\", [ (OpaquePointer(Void), \"pData\"), (UINT, \"RowPitch\"), (UINT,", "\"CreatePixelShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11PixelShader)), \"ppPixelShader\")]),", "[ (UINT, \"MipSlice\"), ]) D3D11_TEX2D_ARRAY_DSV = Struct(\"D3D11_TEX2D_ARRAY_DSV\", [ (UINT, \"MipSlice\"),", "(Array(ObjPointer(ID3D11UnorderedAccessView), \"NumUAVs\"), \"ppUnorderedAccessViews\")]), StdMethod(Void, \"OMGetBlendState\", [Out(Pointer(ObjPointer(ID3D11BlendState)), \"ppBlendState\"), Out(Array(FLOAT, 4), \"BlendFactor\"),", "\"CSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"CSGetUnorderedAccessViews\",", "None), ]) ID3D11RenderTargetView.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_RENDER_TARGET_VIEW_DESC), \"pDesc\")]), ]", "\"NumUAVs\"), (Array(ObjPointer(ID3D11UnorderedAccessView), \"NumUAVs\"), \"ppUnorderedAccessViews\")]), StdMethod(Void, \"CSGetShader\", [Out(Pointer(ObjPointer(ID3D11ComputeShader)), \"ppComputeShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"),", "Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\"), (UINT, \"UAVStartSlot\"), (UINT, \"NumUAVs\"), (Array(ObjPointer(ID3D11UnorderedAccessView), \"NumUAVs\"), \"ppUnorderedAccessViews\")]), StdMethod(Void,", "[(ObjPointer(ID3D11DomainShader), \"pDomainShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"DSSetSamplers\", [(UINT,", "\"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"IAGetInputLayout\", [Out(Pointer(ObjPointer(ID3D11InputLayout)), \"ppInputLayout\")]), StdMethod(Void, \"IAGetVertexBuffers\", [(UINT, \"StartSlot\"),", "Out(Pointer(D3D11_FORMAT_SUPPORT), \"pFormatSupport\")]), StdMethod(HRESULT, \"CheckMultisampleQualityLevels\", [(DXGI_FORMAT, \"Format\"), (UINT, \"SampleCount\"), Out(Pointer(UINT), \"pNumQualityLevels\")]),", "\"D3D11_BIND_RENDER_TARGET\", \"D3D11_BIND_DEPTH_STENCIL\", \"D3D11_BIND_UNORDERED_ACCESS\", ]) D3D11_CPU_ACCESS_FLAG = Flags(UINT, [ \"D3D11_CPU_ACCESS_WRITE\", \"D3D11_CPU_ACCESS_READ\",", "\"D3D11_SRV_DIMENSION_TEXTURE2DMSARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE3D\", \"D3D11_SRV_DIMENSION_TEXTURECUBE\", \"D3D11_SRV_DIMENSION_TEXTURECUBEARRAY\", \"D3D11_SRV_DIMENSION_BUFFEREX\", ]) D3D11_DSV_DIMENSION = Enum(\"D3D11_DSV_DIMENSION\", [", "[Out(Pointer(D3D11_DEPTH_STENCIL_DESC), \"pDesc\")]), ] D3D11_BLEND = Enum(\"D3D11_BLEND\", [ \"D3D11_BLEND_ZERO\", \"D3D11_BLEND_ONE\", \"D3D11_BLEND_SRC_COLOR\",", "\"GetTypeName\", [Out(LPSTR, \"pTypeName\"), Out(Pointer(SIZE_T), \"pBufferLength\")]), ] ID3D11ClassLinkage.methods += [ StdMethod(HRESULT,", "\"DriverCommandLists\"), ]) D3D11_FEATURE_DATA_DOUBLES = Struct(\"D3D11_FEATURE_DATA_DOUBLES\", [ (BOOL, \"DoublePrecisionFloatShaderOps\"), ]) D3D11_FEATURE_DATA_FORMAT_SUPPORT", "\"GetCreationFlags\", []), StdMethod(HRESULT, \"GetDeviceRemovedReason\", []), StdMethod(Void, \"GetImmediateContext\", [Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), StdMethod(HRESULT,", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "\"D3D11_UAV_DIMENSION_TEXTURE2D\", \"D3D11_UAV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_UAV_DIMENSION_TEXTURE3D\", ]) D3D11_USAGE = Enum(\"D3D11_USAGE\", [ \"D3D11_USAGE_DEFAULT\", \"D3D11_USAGE_IMMUTABLE\",", "\"D3D11_UAV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_UAV_DIMENSION_TEXTURE3D\", ]) D3D11_USAGE = Enum(\"D3D11_USAGE\", [ \"D3D11_USAGE_DEFAULT\", \"D3D11_USAGE_IMMUTABLE\", \"D3D11_USAGE_DYNAMIC\",", "(UINT64, \"PrimitivesStorageNeeded\"), ]) D3D11_COUNTER = Enum(\"D3D11_COUNTER\", [ \"D3D11_COUNTER_DEVICE_DEPENDENT_0\", ]) D3D11_COUNTER_TYPE", "StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_DEPTH_STENCIL_VIEW_DESC), \"pDesc\")]), ] D3D11_BUFFER_UAV_FLAG = Flags(UINT, [ \"D3D11_BUFFER_UAV_FLAG_RAW\",", "(Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"GSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)),", "]) ID3D11RenderTargetView.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_RENDER_TARGET_VIEW_DESC), \"pDesc\")]), ] D3D11_TEX1D_DSV", "\"MiscFlags\"), ]) ID3D11Texture2D.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE2D_DESC), \"pDesc\")]), ]", "\"VSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"PSGetShaderResources\",", "ID3D11DomainShader = Interface(\"ID3D11DomainShader\", ID3D11DeviceChild) ID3D11GeometryShader = Interface(\"ID3D11GeometryShader\", ID3D11DeviceChild) ID3D11PixelShader =", "(UINT, \"ArraySize\"), ]) D3D11_TEX3D_UAV = Struct(\"D3D11_TEX3D_UAV\", [ (UINT, \"MipSlice\"), (UINT,", "[(ObjPointer(ID3D11RenderTargetView), \"pRenderTargetView\"), (Array(Const(FLOAT), 4), \"ColorRGBA\")]), StdMethod(Void, \"ClearUnorderedAccessViewUint\", [(ObjPointer(ID3D11UnorderedAccessView), \"pUnorderedAccessView\"), (Array(Const(UINT),", "XXX: Undocumented functions, called by d3d11sdklayers.dll when D3D11_CREATE_DEVICE_DEBUG is set", "]) D3D11_UAV_DIMENSION = Enum(\"D3D11_UAV_DIMENSION\", [ \"D3D11_UAV_DIMENSION_UNKNOWN\", \"D3D11_UAV_DIMENSION_BUFFER\", \"D3D11_UAV_DIMENSION_TEXTURE1D\", \"D3D11_UAV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_UAV_DIMENSION_TEXTURE2D\",", "\"D3D11_STENCIL_OP_DECR\", ]) D3D11_DEPTH_STENCILOP_DESC = Struct(\"D3D11_DEPTH_STENCILOP_DESC\", [ (D3D11_STENCIL_OP, \"StencilFailOp\"), (D3D11_STENCIL_OP, \"StencilDepthFailOp\"),", "\"CreateBuffer\", [(Pointer(Const(D3D11_BUFFER_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Buffer)), \"ppBuffer\")]), StdMethod(HRESULT, \"CreateTexture1D\", [(Pointer(Const(D3D11_TEXTURE1D_DESC)),", "D3D11_CREATE_DEVICE_FLAG = Flags(UINT, [ \"D3D11_CREATE_DEVICE_SINGLETHREADED\", \"D3D11_CREATE_DEVICE_DEBUG\", \"D3D11_CREATE_DEVICE_SWITCH_TO_REF\", \"D3D11_CREATE_DEVICE_PREVENT_INTERNAL_THREADING_OPTIMIZATIONS\", \"D3D11_CREATE_DEVICE_BGRA_SUPPORT\", ])", "granted, free of charge, to any person obtaining a copy", "[(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"PSGetShader\", [Out(Pointer(ObjPointer(ID3D11PixelShader)),", "\"pDesc\")]), ] D3D11_FILTER = Enum(\"D3D11_FILTER\", [ \"D3D11_FILTER_MIN_MAG_MIP_POINT\", \"D3D11_FILTER_MIN_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR\",", "[ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2DMS_ARRAY_RTV =", "[(ObjPointer(ID3D11ShaderResourceView), \"pShaderResourceView\")]), StdMethod(Void, \"SetResourceMinLOD\", [(ObjPointer(ID3D11Resource), \"pResource\"), (FLOAT, \"MinLOD\")]), StdMethod(FLOAT, \"GetResourceMinLOD\",", "\"VertexCountPerInstance\"), (UINT, \"InstanceCount\"), (UINT, \"StartVertexLocation\"), (UINT, \"StartInstanceLocation\")]), StdMethod(Void, \"GSSetConstantBuffers\", [(UINT,", "[ \"D3D11_CREATE_DEVICE_SINGLETHREADED\", \"D3D11_CREATE_DEVICE_DEBUG\", \"D3D11_CREATE_DEVICE_SWITCH_TO_REF\", \"D3D11_CREATE_DEVICE_PREVENT_INTERNAL_THREADING_OPTIMIZATIONS\", \"D3D11_CREATE_DEVICE_BGRA_SUPPORT\", ]) ID3D11Device.methods += [", "StdMethod(Void, \"DrawIndexedInstancedIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"DrawInstancedIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"),", "(ObjPointer(ID3D11Resource), \"pSrcResource\"), (UINT, \"SrcSubresource\"), (DXGI_FORMAT, \"Format\")]), StdMethod(Void, \"ExecuteCommandList\", [(ObjPointer(ID3D11CommandList), \"pCommandList\"),", "[Out(Pointer(D3D11_RESOURCE_DIMENSION), \"pResourceDimension\")]), StdMethod(Void, \"SetEvictionPriority\", [(UINT, \"EvictionPriority\")]), StdMethod(UINT, \"GetEvictionPriority\", []), ]", "\"NumEntries\"), \"pSODeclaration\"), (UINT, \"NumEntries\"), (Array(Const(UINT), \"NumStrides\"), \"pBufferStrides\"), (UINT, \"NumStrides\"), (UINT,", "Struct(\"D3D11_QUERY_DATA_TIMESTAMP_DISJOINT\", [ (UINT64, \"Frequency\"), (BOOL, \"Disjoint\"), ]) D3D11_QUERY_DATA_PIPELINE_STATISTICS = Struct(\"D3D11_QUERY_DATA_PIPELINE_STATISTICS\",", "[ \"D3D11_DSV_DIMENSION_UNKNOWN\", \"D3D11_DSV_DIMENSION_TEXTURE1D\", \"D3D11_DSV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_DSV_DIMENSION_TEXTURE2D\", \"D3D11_DSV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_DSV_DIMENSION_TEXTURE2DMS\", \"D3D11_DSV_DIMENSION_TEXTURE2DMSARRAY\", ]) D3D11_RTV_DIMENSION", "(UINT, \"BaseConstantBufferOffset\"), (UINT, \"BaseTexture\"), (UINT, \"BaseSampler\"), (BOOL, \"Created\"), ]) ID3D11ClassInstance.methods", "(D3D11_TEX2DMS_ARRAY_RTV, \"Texture2DMSArray\"), (D3D11_TEX3D_RTV, \"Texture3D\"), ]), None), ]) ID3D11RenderTargetView.methods += [", "\"Stencil\")]), StdMethod(Void, \"GenerateMips\", [(ObjPointer(ID3D11ShaderResourceView), \"pShaderResourceView\")]), StdMethod(Void, \"SetResourceMinLOD\", [(ObjPointer(ID3D11Resource), \"pResource\"), (FLOAT,", "\"ppDepthStencilState\")]), StdMethod(HRESULT, \"CreateRasterizerState\", [(Pointer(Const(D3D11_RASTERIZER_DESC)), \"pRasterizerDesc\"), Out(Pointer(ObjPointer(ID3D11RasterizerState)), \"ppRasterizerState\")]), StdMethod(HRESULT, \"CreateSamplerState\", [(Pointer(Const(D3D11_SAMPLER_DESC)),", "\"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"GSGetShader\", [Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"),", "StdMethod(HRESULT, \"GetDeviceRemovedReason\", []), StdMethod(Void, \"GetImmediateContext\", [Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), StdMethod(HRESULT, \"SetExceptionMode\", [(D3D11_RAISE_FLAG,", "(BOOL, \"DriverConcurrentCreates\"), (BOOL, \"DriverCommandLists\"), ]) D3D11_FEATURE_DATA_DOUBLES = Struct(\"D3D11_FEATURE_DATA_DOUBLES\", [ (BOOL,", "\"D3D11_PRIMITIVE_TOPOLOGY_25_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_26_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_27_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_28_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_29_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_30_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_31_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_32_CONTROL_POINT_PATCHLIST\", ]) D3D11_PRIMITIVE", "] D3D11_BUFFER_RTV = Struct(\"D3D11_BUFFER_RTV\", [ (Union(None, [(UINT, \"FirstElement\"), (UINT, \"ElementOffset\")]),", "\"pAsync\")]), StdMethod(HRESULT, \"GetData\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\"), Out(OpaqueBlob(Void, \"DataSize\"), \"pData\"), (UINT, \"DataSize\"),", "[(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"GSGetSamplers\", [(UINT,", "\"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP\", \"D3D11_PRIMITIVE_TOPOLOGY_LINELIST_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_2_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_3_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_4_CONTROL_POINT_PATCHLIST\",", "(UINT, \"ArraySize\"), ]) D3D11_TEX2D_UAV = Struct(\"D3D11_TEX2D_UAV\", [ (UINT, \"MipSlice\"), ])", "\"DataSize\"), \"pData\")]), StdMethod(HRESULT, \"SetPrivateDataInterface\", [(REFGUID, \"guid\"), (OpaquePointer(Const(IUnknown)), \"pData\")]), StdMethod(D3D_FEATURE_LEVEL, \"GetFeatureLevel\",", "\"NumClassInstances\")]), StdMethod(Void, \"IASetPrimitiveTopology\", [(D3D11_PRIMITIVE_TOPOLOGY, \"Topology\")]), StdMethod(Void, \"VSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT,", "\"ppImmediateContext\")]), StdFunction(HRESULT, \"D3D11CreateDeviceAndSwapChain\", [(ObjPointer(IDXGIAdapter), \"pAdapter\"), (D3D_DRIVER_TYPE, \"DriverType\"), (HMODULE, \"Software\"), (D3D11_CREATE_DEVICE_FLAG,", "StdMethod(Void, \"DSGetShader\", [Out(Pointer(ObjPointer(ID3D11DomainShader)), \"ppDomainShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void,", "\"ppShaderResourceViews\")]), StdMethod(Void, \"GSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]),", "(UINT, \"MipLevels\"), (UINT, \"ArraySize\"), (DXGI_FORMAT, \"Format\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"),", "(UINT, \"SDKVersion\"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC)), \"pSwapChainDesc\"), Out(Pointer(ObjPointer(IDXGISwapChain)), \"ppSwapChain\"), Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\"), Out(Pointer(D3D_FEATURE_LEVEL), \"pFeatureLevel\"),", "+= [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_DEPTH_STENCIL_DESC), \"pDesc\")]), ] D3D11_BLEND = Enum(\"D3D11_BLEND\",", "(D3D11_CREATE_DEVICE_FLAG, \"Flags\"), (Array(Const(D3D_FEATURE_LEVEL), \"FeatureLevels\"), \"pFeatureLevels\"), (UINT, \"FeatureLevels\"), (UINT, \"SDKVersion\"), Out(Pointer(ObjPointer(ID3D11Device)),", "(D3D11_TEX1D_RTV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_RTV, \"Texture1DArray\"), (D3D11_TEX2D_RTV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_RTV, \"Texture2DArray\"), (D3D11_TEX2DMS_RTV, \"Texture2DMS\"),", "\"ppShaderResourceViews\")]), StdMethod(Void, \"CSSetUnorderedAccessViews\", [(UINT, \"StartSlot\"), (UINT, \"NumUAVs\"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), \"NumUAVs\"), \"ppUnorderedAccessViews\"),", "(DXGI_FORMAT, \"Format\"), (UINT, \"Offset\")]), StdMethod(Void, \"DrawIndexedInstanced\", [(UINT, \"IndexCountPerInstance\"), (UINT, \"InstanceCount\"),", "[(ObjPointer(ID3D11RasterizerState), \"pRasterizerState\")]), StdMethod(Void, \"RSSetViewports\", [(UINT, \"NumViewports\"), (Array(Const(D3D11_VIEWPORT), \"NumViewports\"), \"pViewports\")]), StdMethod(Void,", "\"pFeatureLevel\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), StdFunction(HRESULT, \"D3D11CreateDeviceAndSwapChain\", [(ObjPointer(IDXGIAdapter), \"pAdapter\"), (D3D_DRIVER_TYPE, \"DriverType\"), (HMODULE,", "(UINT, \"DepthPitch\"), ]) ID3D11Resource.methods += [ StdMethod(Void, \"GetType\", [Out(Pointer(D3D11_RESOURCE_DIMENSION), \"pResourceDimension\")]),", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "[]), ] d3d11 = API(\"d3d11\") d3d11.addFunctions([ StdFunction(HRESULT, \"D3D11CreateDevice\", [(ObjPointer(IDXGIAdapter), \"pAdapter\"),", "(FLOAT, \"MinDepth\"), (FLOAT, \"MaxDepth\"), ]) D3D11_RESOURCE_DIMENSION = Enum(\"D3D11_RESOURCE_DIMENSION\", [ \"D3D11_RESOURCE_DIMENSION_UNKNOWN\",", "\"DrawIndexedInstancedIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"DrawInstancedIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT,", "Pointer(D3D11_FEATURE_DATA_DOUBLES)), (\"D3D11_FEATURE_FORMAT_SUPPORT\", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT)), (\"D3D11_FEATURE_FORMAT_SUPPORT2\", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT2)), (\"D3D11_FEATURE_D3D10_X_HARDWARE_OPTIONS\", Pointer(D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS)), ], Blob(Void, \"FeatureSupportDataSize\"),", "\"BorderColor\"), (FLOAT, \"MinLOD\"), (FLOAT, \"MaxLOD\"), ]) ID3D11SamplerState.methods += [ StdMethod(Void,", "= Struct(\"D3D11_TEX2DMS_ARRAY_DSV\", [ (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_DSV_FLAG =", "(D3D11_TEX2DMS_ARRAY_DSV, \"Texture2DMSArray\"), ]), None), ]) ID3D11DepthStencilView.methods += [ StdMethod(Void, \"GetDesc\",", "StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_COUNTER_DESC), \"pDesc\")]), ] D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS = Enum(\"D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS\", [ \"D3D11_STANDARD_MULTISAMPLE_PATTERN\",", "]) D3D11_VIEWPORT = Struct(\"D3D11_VIEWPORT\", [ (FLOAT, \"TopLeftX\"), (FLOAT, \"TopLeftY\"), (FLOAT,", "ID3D11ShaderResourceView = Interface(\"ID3D11ShaderResourceView\", ID3D11View) ID3D11RenderTargetView = Interface(\"ID3D11RenderTargetView\", ID3D11View) ID3D11DepthStencilView =", "[Out(Pointer(D3D11_BUFFER_DESC), \"pDesc\")]), ] D3D11_TEXTURE1D_DESC = Struct(\"D3D11_TEXTURE1D_DESC\", [ (UINT, \"Width\"), (UINT,", "(UINT, \"FirstElement\"), (UINT, \"NumElements\"), (D3D11_BUFFER_UAV_FLAG, \"Flags\"), ]) D3D11_TEX1D_UAV = Struct(\"D3D11_TEX1D_UAV\",", "(OpaqueBlob(Const(Void), \"DataSize\"), \"pData\")]), StdMethod(HRESULT, \"SetPrivateDataInterface\", [(REFGUID, \"guid\"), (OpaquePointer(Const(IUnknown)), \"pData\")]), ]", "(Union(None, [ (D3D11_TEX1D_DSV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_DSV, \"Texture1DArray\"), (D3D11_TEX2D_DSV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_DSV, \"Texture2DArray\"),", "\"pDesc\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\")]), StdMethod(HRESULT, \"CreateInputLayout\", [(Array(Const(D3D11_INPUT_ELEMENT_DESC), \"NumElements\"), \"pInputElementDescs\"), (UINT, \"NumElements\"),", "StdMethod(Void, \"CheckCounterInfo\", [Out(Pointer(D3D11_COUNTER_INFO), \"pCounterInfo\")]), StdMethod(HRESULT, \"CheckCounter\", [(Pointer(Const(D3D11_COUNTER_DESC)), \"pDesc\"), Out(Pointer(D3D11_COUNTER_TYPE), \"pType\"),", "]), None), ]) ID3D11DepthStencilView.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_DEPTH_STENCIL_VIEW_DESC), \"pDesc\")]),", "(UINT, \"ConstantBufferOffset\"), (UINT, \"ConstantVectorOffset\"), (UINT, \"TextureOffset\"), (UINT, \"SamplerOffset\"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), \"ppInstance\")]),", "\"CInvocations\"), (UINT64, \"CPrimitives\"), (UINT64, \"PSInvocations\"), (UINT64, \"HSInvocations\"), (UINT64, \"DSInvocations\"), (UINT64,", "\"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"IASetPrimitiveTopology\", [(D3D11_PRIMITIVE_TOPOLOGY, \"Topology\")]), StdMethod(Void, \"VSSetShaderResources\", [(UINT,", "d3d11sdklayers import * HRESULT = MAKE_HRESULT([ \"D3D11_ERROR_FILE_NOT_FOUND\", \"D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS\", \"D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS\", \"D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD\",", "ID3D11View) ID3D11DepthStencilView = Interface(\"ID3D11DepthStencilView\", ID3D11View) ID3D11UnorderedAccessView = Interface(\"ID3D11UnorderedAccessView\", ID3D11View) ID3D11VertexShader", "\"Subresource\")]), StdMethod(Void, \"PSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]),", "\"Offset\")]), StdMethod(Void, \"DrawIndexedInstanced\", [(UINT, \"IndexCountPerInstance\"), (UINT, \"InstanceCount\"), (UINT, \"StartIndexLocation\"), (INT,", "\"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture1D.methods +=", "\"RestoreDeferredContextState\"), Out(Pointer(ObjPointer(ID3D11CommandList)), \"ppCommandList\")]), ] D3D11_CREATE_DEVICE_FLAG = Flags(UINT, [ \"D3D11_CREATE_DEVICE_SINGLETHREADED\", \"D3D11_CREATE_DEVICE_DEBUG\",", "] D3D11_FEATURE_DATA_THREADING = Struct(\"D3D11_FEATURE_DATA_THREADING\", [ (BOOL, \"DriverConcurrentCreates\"), (BOOL, \"DriverCommandLists\"), ])", "(UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"GetPredication\", [Out(Pointer(ObjPointer(ID3D11Predicate)), \"ppPredicate\"), Out(Pointer(BOOL),", "StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_RASTERIZER_DESC), \"pDesc\")]), ] D3D11_SUBRESOURCE_DATA = Struct(\"D3D11_SUBRESOURCE_DATA\", [ (OpaquePointer(Const(Void)),", "\"MaxDepth\"), ]) D3D11_RESOURCE_DIMENSION = Enum(\"D3D11_RESOURCE_DIMENSION\", [ \"D3D11_RESOURCE_DIMENSION_UNKNOWN\", \"D3D11_RESOURCE_DIMENSION_BUFFER\", \"D3D11_RESOURCE_DIMENSION_TEXTURE1D\", \"D3D11_RESOURCE_DIMENSION_TEXTURE2D\",", "StdMethod(Void, \"SOSetTargets\", [(UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppSOTargets\"), (Pointer(Const(UINT)), \"pOffsets\")]), StdMethod(Void,", "StdMethod(HRESULT, \"CreateDepthStencilView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_DEPTH_STENCIL_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\")]), StdMethod(HRESULT, \"CreateInputLayout\",", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "(Array(Const(FLOAT), 4), \"BlendFactor\"), (UINT, \"SampleMask\")]), StdMethod(Void, \"OMSetDepthStencilState\", [(ObjPointer(ID3D11DepthStencilState), \"pDepthStencilState\"), (UINT,", "(ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\"), (UINT, \"UAVStartSlot\"), (UINT, \"NumUAVs\"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), \"NumUAVs\"), \"ppUnorderedAccessViews\"), (Pointer(Const(UINT)),", "Enum(\"D3D11_PRIMITIVE_TOPOLOGY\", [ \"D3D11_PRIMITIVE_TOPOLOGY_UNDEFINED\", \"D3D11_PRIMITIVE_TOPOLOGY_POINTLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_LINELIST\", \"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP\", \"D3D11_PRIMITIVE_TOPOLOGY_LINELIST_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ\",", "rights # to use, copy, modify, merge, publish, distribute, sublicense,", "[(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"RSSetState\", [(ObjPointer(ID3D11RasterizerState), \"pRasterizerState\")]), StdMethod(Void, \"RSSetViewports\",", "\"RSSetViewports\", [(UINT, \"NumViewports\"), (Array(Const(D3D11_VIEWPORT), \"NumViewports\"), \"pViewports\")]), StdMethod(Void, \"RSSetScissorRects\", [(UINT, \"NumRects\"),", "\"ppConstantBuffers\")]), StdMethod(Void, \"PSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]),", "(D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture1D.methods += [", "\"D3D11_PRIMITIVE_TOPOLOGY_10_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_11_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_12_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_13_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_14_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_15_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_16_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_17_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_18_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_19_CONTROL_POINT_PATCHLIST\",", "\"VSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"PSSetShaderResources\",", "\"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_COMPARISON\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_MONO_TEXT\", \"D3D11_FORMAT_SUPPORT_MIP\", \"D3D11_FORMAT_SUPPORT_MIP_AUTOGEN\", \"D3D11_FORMAT_SUPPORT_RENDER_TARGET\", \"D3D11_FORMAT_SUPPORT_BLENDABLE\", \"D3D11_FORMAT_SUPPORT_DEPTH_STENCIL\", \"D3D11_FORMAT_SUPPORT_CPU_LOCKABLE\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RESOLVE\",", "(D3D11_COMPARISON_FUNC, \"StencilFunc\"), ]) D3D11_DEPTH_STENCIL_DESC = Struct(\"D3D11_DEPTH_STENCIL_DESC\", [ (BOOL, \"DepthEnable\"), (D3D11_DEPTH_WRITE_MASK,", "False) ID3D11DeviceContext.methods += [ StdMethod(Void, \"VSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"),", "\"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"OMSetRenderTargets\", [(UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), \"NumViews\"), \"ppRenderTargetViews\"), (ObjPointer(ID3D11DepthStencilView),", "\"IASetPrimitiveTopology\", [(D3D11_PRIMITIVE_TOPOLOGY, \"Topology\")]), StdMethod(Void, \"VSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)),", "[ \"D3D11_STENCIL_OP_KEEP\", \"D3D11_STENCIL_OP_ZERO\", \"D3D11_STENCIL_OP_REPLACE\", \"D3D11_STENCIL_OP_INCR_SAT\", \"D3D11_STENCIL_OP_DECR_SAT\", \"D3D11_STENCIL_OP_INVERT\", \"D3D11_STENCIL_OP_INCR\", \"D3D11_STENCIL_OP_DECR\", ])", "D3D11_TEX2D_ARRAY_UAV = Struct(\"D3D11_TEX2D_ARRAY_UAV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"),", "\"ppCommandList\")]), ] D3D11_CREATE_DEVICE_FLAG = Flags(UINT, [ \"D3D11_CREATE_DEVICE_SINGLETHREADED\", \"D3D11_CREATE_DEVICE_DEBUG\", \"D3D11_CREATE_DEVICE_SWITCH_TO_REF\", \"D3D11_CREATE_DEVICE_PREVENT_INTERNAL_THREADING_OPTIMIZATIONS\",", "ID3D11DeviceChild) ID3D11ClassLinkage = Interface(\"ID3D11ClassLinkage\", ID3D11DeviceChild) ID3D11CommandList = Interface(\"ID3D11CommandList\", ID3D11DeviceChild) ID3D11Device", "\"Created\"), ]) ID3D11ClassInstance.methods += [ StdMethod(Void, \"GetClassLinkage\", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), \"ppLinkage\")]), StdMethod(Void,", "\"D3D11_PRIMITIVE_TOPOLOGY_28_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_29_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_30_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_31_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_32_CONTROL_POINT_PATCHLIST\", ]) D3D11_PRIMITIVE = Enum(\"D3D11_PRIMITIVE\", [", "= Enum(\"D3D11_TEXTURE_ADDRESS_MODE\", [ \"D3D11_TEXTURE_ADDRESS_WRAP\", \"D3D11_TEXTURE_ADDRESS_MIRROR\", \"D3D11_TEXTURE_ADDRESS_CLAMP\", \"D3D11_TEXTURE_ADDRESS_BORDER\", \"D3D11_TEXTURE_ADDRESS_MIRROR_ONCE\", ]) D3D11_SAMPLER_DESC", "StdMethod(Void, \"OMSetRenderTargets\", [(UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), \"NumViews\"), \"ppRenderTargetViews\"), (ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\")]), StdMethod(Void,", "\"CreateCounter\", [(Pointer(Const(D3D11_COUNTER_DESC)), \"pCounterDesc\"), Out(Pointer(ObjPointer(ID3D11Counter)), \"ppCounter\")]), StdMethod(HRESULT, \"CreateDeferredContext\", [(UINT, \"ContextFlags\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)),", "\"pAsync\")]), StdMethod(Void, \"End\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\")]), StdMethod(HRESULT, \"GetData\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\"), Out(OpaqueBlob(Void,", "\"D3D11_BLEND_DEST_COLOR\", \"D3D11_BLEND_INV_DEST_COLOR\", \"D3D11_BLEND_SRC_ALPHA_SAT\", \"D3D11_BLEND_BLEND_FACTOR\", \"D3D11_BLEND_INV_BLEND_FACTOR\", \"D3D11_BLEND_SRC1_COLOR\", \"D3D11_BLEND_INV_SRC1_COLOR\", \"D3D11_BLEND_SRC1_ALPHA\", \"D3D11_BLEND_INV_SRC1_ALPHA\", ])", "\"NumUAVs\"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), \"NumUAVs\"), \"ppUnorderedAccessViews\"), (Pointer(Const(UINT)), \"pUAVInitialCounts\")]), StdMethod(Void, \"CSSetShader\", [(ObjPointer(ID3D11ComputeShader), \"pComputeShader\"),", "= Struct(\"D3D11_BUFFER_UAV\", [ (UINT, \"FirstElement\"), (UINT, \"NumElements\"), (D3D11_BUFFER_UAV_FLAG, \"Flags\"), ])", "\"DestBlend\"), (D3D11_BLEND_OP, \"BlendOp\"), (D3D11_BLEND, \"SrcBlendAlpha\"), (D3D11_BLEND, \"DestBlendAlpha\"), (D3D11_BLEND_OP, \"BlendOpAlpha\"), (UINT8,", "# # The above copyright notice and this permission notice", "\"D3D11_DSV_READ_ONLY_STENCIL\", ]) D3D11_DEPTH_STENCIL_VIEW_DESC = Struct(\"D3D11_DEPTH_STENCIL_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_DSV_DIMENSION, \"ViewDimension\"),", "(UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX3D_SRV = Struct(\"D3D11_TEX3D_SRV\", [ (UINT,", "(Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"PSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)),", "[(Pointer(Const(D3D11_QUERY_DESC)), \"pPredicateDesc\"), Out(Pointer(ObjPointer(ID3D11Predicate)), \"ppPredicate\")]), StdMethod(HRESULT, \"CreateCounter\", [(Pointer(Const(D3D11_COUNTER_DESC)), \"pCounterDesc\"), Out(Pointer(ObjPointer(ID3D11Counter)), \"ppCounter\")]),", "\"D3D11_FILL_SOLID\", ]) D3D11_PRIMITIVE_TOPOLOGY = Enum(\"D3D11_PRIMITIVE_TOPOLOGY\", [ \"D3D11_PRIMITIVE_TOPOLOGY_UNDEFINED\", \"D3D11_PRIMITIVE_TOPOLOGY_POINTLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_LINELIST\", \"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP\",", "]) D3D11_FILL_MODE = Enum(\"D3D11_FILL_MODE\", [ \"D3D11_FILL_WIREFRAME\", \"D3D11_FILL_SOLID\", ]) D3D11_PRIMITIVE_TOPOLOGY =", "Struct(\"D3D11_TEX1D_ARRAY_UAV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_UAV", "\"AddressV\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressW\"), (FLOAT, \"MipLODBias\"), (UINT, \"MaxAnisotropy\"), (D3D11_COMPARISON_FUNC, \"ComparisonFunc\"), (Array(FLOAT,", "(Pointer(Const(UINT)), \"pOffsets\")]), StdMethod(Void, \"DrawAuto\", []), StdMethod(Void, \"DrawIndexedInstancedIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT,", "\"D3D11CoreCreateDevice\", [DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD],", "]) D3D11_FILTER_TYPE = Enum(\"D3D11_FILTER_TYPE\", [ \"D3D11_FILTER_TYPE_POINT\", \"D3D11_FILTER_TYPE_LINEAR\", ]) D3D11_TEXTURE_ADDRESS_MODE =", "(UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"HSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT,", "\"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_BITWISE_OPS\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_COMPARE_STORE_OR_COMPARE_EXCHANGE\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_EXCHANGE\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_SIGNED_MIN_OR_MAX\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_UNSIGNED_MIN_OR_MAX\", \"D3D11_FORMAT_SUPPORT2_UAV_TYPED_LOAD\", \"D3D11_FORMAT_SUPPORT2_UAV_TYPED_STORE\", ]) ID3D11Asynchronous.methods +=", "[Out(LPSTR, \"pInstanceName\"), Out(Pointer(SIZE_T), \"pBufferLength\")]), StdMethod(Void, \"GetTypeName\", [Out(LPSTR, \"pTypeName\"), Out(Pointer(SIZE_T), \"pBufferLength\")]),", "\"pShaderResourceView\")]), StdMethod(Void, \"SetResourceMinLOD\", [(ObjPointer(ID3D11Resource), \"pResource\"), (FLOAT, \"MinLOD\")]), StdMethod(FLOAT, \"GetResourceMinLOD\", [(ObjPointer(ID3D11Resource),", "\"CSGetShader\", [Out(Pointer(ObjPointer(ID3D11ComputeShader)), \"ppComputeShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"CSGetSamplers\",", "copies of the Software, and to permit persons to whom", "\"pBlendStateDesc\"), Out(Pointer(ObjPointer(ID3D11BlendState)), \"ppBlendState\")]), StdMethod(HRESULT, \"CreateDepthStencilState\", [(Pointer(Const(D3D11_DEPTH_STENCIL_DESC)), \"pDepthStencilDesc\"), Out(Pointer(ObjPointer(ID3D11DepthStencilState)), \"ppDepthStencilState\")]), StdMethod(HRESULT,", "= Struct(\"D3D11_TEX1D_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEX1D_ARRAY_SRV =", "Flags(UINT, [ \"D3D11_MAP_FLAG_DO_NOT_WAIT\", ]) D3D11_RAISE_FLAG = Flags(UINT, [ \"D3D11_RAISE_FLAG_DRIVER_INTERNAL_ERROR\", ])", "= Interface(\"ID3D11UnorderedAccessView\", ID3D11View) ID3D11VertexShader = Interface(\"ID3D11VertexShader\", ID3D11DeviceChild) ID3D11HullShader = Interface(\"ID3D11HullShader\",", "\"FillMode\"), (D3D11_CULL_MODE, \"CullMode\"), (BOOL, \"FrontCounterClockwise\"), (INT, \"DepthBias\"), (FLOAT, \"DepthBiasClamp\"), (FLOAT,", "Out(Pointer(DXGI_FORMAT), \"Format\"), Out(Pointer(UINT), \"Offset\")]), StdMethod(Void, \"GSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"),", "]) D3D11_DEPTH_STENCIL_DESC = Struct(\"D3D11_DEPTH_STENCIL_DESC\", [ (BOOL, \"DepthEnable\"), (D3D11_DEPTH_WRITE_MASK, \"DepthWriteMask\"), (D3D11_COMPARISON_FUNC,", "\"BufferEx\"), ]), None), ]) ID3D11ShaderResourceView.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_SHADER_RESOURCE_VIEW_DESC),", "[Out(Pointer(D3D11_SAMPLER_DESC), \"pDesc\")]), ] D3D11_FORMAT_SUPPORT = Flags(UINT, [ \"D3D11_FORMAT_SUPPORT_BUFFER\", \"D3D11_FORMAT_SUPPORT_IA_VERTEX_BUFFER\", \"D3D11_FORMAT_SUPPORT_IA_INDEX_BUFFER\",", "StdMethod(Void, \"CopyResource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (ObjPointer(ID3D11Resource), \"pSrcResource\")]), StdMethod(Void, \"UpdateSubresource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"),", "Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"PSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT,", "\"ppPredicate\"), Out(Pointer(BOOL), \"pPredicateValue\")]), StdMethod(Void, \"GSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView),", "\"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11ComputeShader)), \"ppComputeShader\")]), StdMethod(HRESULT, \"CreateClassLinkage\", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), \"ppLinkage\")]), StdMethod(HRESULT,", "\"PSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"VSSetShader\",", "\"OMGetDepthStencilState\", [Out(Pointer(ObjPointer(ID3D11DepthStencilState)), \"ppDepthStencilState\"), Out(Pointer(UINT), \"pStencilRef\")]), StdMethod(Void, \"SOGetTargets\", [(UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer),", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "]) D3D11_USAGE = Enum(\"D3D11_USAGE\", [ \"D3D11_USAGE_DEFAULT\", \"D3D11_USAGE_IMMUTABLE\", \"D3D11_USAGE_DYNAMIC\", \"D3D11_USAGE_STAGING\", ])", "StdMethod(HRESULT, \"CreatePredicate\", [(Pointer(Const(D3D11_QUERY_DESC)), \"pPredicateDesc\"), Out(Pointer(ObjPointer(ID3D11Predicate)), \"ppPredicate\")]), StdMethod(HRESULT, \"CreateCounter\", [(Pointer(Const(D3D11_COUNTER_DESC)), \"pCounterDesc\"),", "(UINT64, \"CSInvocations\"), ]) D3D11_QUERY_DATA_SO_STATISTICS = Struct(\"D3D11_QUERY_DATA_SO_STATISTICS\", [ (UINT64, \"NumPrimitivesWritten\"), (UINT64,", "(BOOL, \"DriverCommandLists\"), ]) D3D11_FEATURE_DATA_DOUBLES = Struct(\"D3D11_FEATURE_DATA_DOUBLES\", [ (BOOL, \"DoublePrecisionFloatShaderOps\"), ])", "(UINT, \"First2DArrayFace\"), (UINT, \"NumCubes\"), ]) D3D11_TEX2DMS_SRV = Struct(\"D3D11_TEX2DMS_SRV\", [ (UINT,", "[ \"D3D11_TEXTURECUBE_FACE_POSITIVE_X\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_X\", \"D3D11_TEXTURECUBE_FACE_POSITIVE_Y\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_Y\", \"D3D11_TEXTURECUBE_FACE_POSITIVE_Z\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_Z\", ]) ID3D11View.methods +=", "D3D11_STENCIL_OP = Enum(\"D3D11_STENCIL_OP\", [ \"D3D11_STENCIL_OP_KEEP\", \"D3D11_STENCIL_OP_ZERO\", \"D3D11_STENCIL_OP_REPLACE\", \"D3D11_STENCIL_OP_INCR_SAT\", \"D3D11_STENCIL_OP_DECR_SAT\", \"D3D11_STENCIL_OP_INVERT\",", "[Out(Pointer(D3D11_COUNTER_DESC), \"pDesc\")]), ] D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS = Enum(\"D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS\", [ \"D3D11_STANDARD_MULTISAMPLE_PATTERN\", \"D3D11_CENTER_MULTISAMPLE_PATTERN\", ])", "] ID3D11CommandList.methods += [ StdMethod(UINT, \"GetContextFlags\", []), ] D3D11_FEATURE_DATA_THREADING =", "(UINT, \"InstanceCount\"), (UINT, \"StartVertexLocation\"), (UINT, \"StartInstanceLocation\")]), StdMethod(Void, \"GSSetConstantBuffers\", [(UINT, \"StartSlot\"),", "[ (UINT, \"Width\"), (UINT, \"Height\"), (UINT, \"MipLevels\"), (UINT, \"ArraySize\"), (DXGI_FORMAT,", "StdMethod(Void, \"CSGetUnorderedAccessViews\", [(UINT, \"StartSlot\"), (UINT, \"NumUAVs\"), (Array(ObjPointer(ID3D11UnorderedAccessView), \"NumUAVs\"), \"ppUnorderedAccessViews\")]), StdMethod(Void,", "\"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"IAGetPrimitiveTopology\", [Out(Pointer(D3D11_PRIMITIVE_TOPOLOGY), \"pTopology\")]), StdMethod(Void, \"VSGetShaderResources\",", "(UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"VSGetShader\", [Out(Pointer(ObjPointer(ID3D11VertexShader)), \"ppVertexShader\"), Out(Array(ObjPointer(ID3D11ClassInstance),", "(UINT64, \"CInvocations\"), (UINT64, \"CPrimitives\"), (UINT64, \"PSInvocations\"), (UINT64, \"HSInvocations\"), (UINT64, \"DSInvocations\"),", "ID3D11UnorderedAccessView = Interface(\"ID3D11UnorderedAccessView\", ID3D11View) ID3D11VertexShader = Interface(\"ID3D11VertexShader\", ID3D11DeviceChild) ID3D11HullShader =", "= Struct(\"D3D11_TEXTURE1D_DESC\", [ (UINT, \"Width\"), (UINT, \"MipLevels\"), (UINT, \"ArraySize\"), (DXGI_FORMAT,", "(DXGI_FORMAT, \"Format\"), (D3D11_DSV_DIMENSION, \"ViewDimension\"), (D3D11_DSV_FLAG, \"Flags\"), (Union(None, [ (D3D11_TEX1D_DSV, \"Texture1D\"),", "\"D3D11_TEXTURE_ADDRESS_BORDER\", \"D3D11_TEXTURE_ADDRESS_MIRROR_ONCE\", ]) D3D11_SAMPLER_DESC = Struct(\"D3D11_SAMPLER_DESC\", [ (D3D11_FILTER, \"Filter\"), (D3D11_TEXTURE_ADDRESS_MODE,", "[DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD], internal=True),", "\"ComputeShaders_Plus_RawAndStructuredBuffers_Via_Shader_4_x\"), ]) D3D11_FEATURE, D3D11_FEATURE_DATA = EnumPolymorphic(\"D3D11_FEATURE\", \"Feature\", [ (\"D3D11_FEATURE_THREADING\", Pointer(D3D11_FEATURE_DATA_THREADING)),", "\"D3D11_PRIMITIVE_LINE\", \"D3D11_PRIMITIVE_TRIANGLE\", \"D3D11_PRIMITIVE_LINE_ADJ\", \"D3D11_PRIMITIVE_TRIANGLE_ADJ\", \"D3D11_PRIMITIVE_1_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_2_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_3_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_4_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_5_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_6_CONTROL_POINT_PATCH\",", "(REFIID, \"ReturnedInterface\"), Out(Pointer(ObjPointer(Void)), \"ppResource\")]), StdMethod(HRESULT, \"CheckFormatSupport\", [(DXGI_FORMAT, \"Format\"), Out(Pointer(D3D11_FORMAT_SUPPORT), \"pFormatSupport\")]),", "= Flags(UINT, [ \"D3D11_CPU_ACCESS_WRITE\", \"D3D11_CPU_ACCESS_READ\", ]) D3D11_RESOURCE_MISC_FLAG = Flags(UINT, [", "\"D3D11_STENCIL_OP_INCR_SAT\", \"D3D11_STENCIL_OP_DECR_SAT\", \"D3D11_STENCIL_OP_INVERT\", \"D3D11_STENCIL_OP_INCR\", \"D3D11_STENCIL_OP_DECR\", ]) D3D11_DEPTH_STENCILOP_DESC = Struct(\"D3D11_DEPTH_STENCILOP_DESC\", [", "[(Pointer(Const(D3D11_TEXTURE2D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture2D)), \"ppTexture2D\")]), StdMethod(HRESULT, \"CreateTexture3D\", [(Pointer(Const(D3D11_TEXTURE3D_DESC)), \"pDesc\"),", "\"pResource\"), (Pointer(Const(D3D11_SHADER_RESOURCE_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11ShaderResourceView)), \"ppSRView\")]), StdMethod(HRESULT, \"CreateUnorderedAccessView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_UNORDERED_ACCESS_VIEW_DESC)),", "StdMethod(HRESULT, \"CheckMultisampleQualityLevels\", [(DXGI_FORMAT, \"Format\"), (UINT, \"SampleCount\"), Out(Pointer(UINT), \"pNumQualityLevels\")]), StdMethod(Void, \"CheckCounterInfo\",", "\"InstanceDataStepRate\"), ]) D3D11_FILL_MODE = Enum(\"D3D11_FILL_MODE\", [ \"D3D11_FILL_WIREFRAME\", \"D3D11_FILL_SOLID\", ]) D3D11_PRIMITIVE_TOPOLOGY", "\"ppvObj\")], internal=True), StdFunction(HRESULT, \"D3D11CoreCreateDevice\", [DWORD, DWORD, DWORD, DWORD, DWORD, DWORD,", "(UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_DSV = Struct(\"D3D11_TEX1D_ARRAY_DSV\", [ (UINT, \"MipSlice\"), (UINT,", "\"pBufferLength\")]), ] ID3D11ClassLinkage.methods += [ StdMethod(HRESULT, \"GetClassInstance\", [(LPCSTR, \"pClassInstanceName\"), (UINT,", "\"pSrcResource\"), (UINT, \"SrcSubresource\"), (DXGI_FORMAT, \"Format\")]), StdMethod(Void, \"ExecuteCommandList\", [(ObjPointer(ID3D11CommandList), \"pCommandList\"), (BOOL,", "(D3D_DRIVER_TYPE, \"DriverType\"), (HMODULE, \"Software\"), (D3D11_CREATE_DEVICE_FLAG, \"Flags\"), (Array(Const(D3D_FEATURE_LEVEL), \"FeatureLevels\"), \"pFeatureLevels\"), (UINT,", "ID3D11DepthStencilState = Interface(\"ID3D11DepthStencilState\", ID3D11DeviceChild) ID3D11BlendState = Interface(\"ID3D11BlendState\", ID3D11DeviceChild) ID3D11RasterizerState =", "D3D11_COUNTER = Enum(\"D3D11_COUNTER\", [ \"D3D11_COUNTER_DEVICE_DEPENDENT_0\", ]) D3D11_COUNTER_TYPE = Enum(\"D3D11_COUNTER_TYPE\", [", "(UINT, \"NumClassInstances\")]), StdMethod(Void, \"DrawIndexed\", [(UINT, \"IndexCount\"), (UINT, \"StartIndexLocation\"), (INT, \"BaseVertexLocation\")]),", "copy, modify, merge, publish, distribute, sublicense, and/or sell # copies", "\"pDepthStencilState\"), (UINT, \"StencilRef\")]), StdMethod(Void, \"SOSetTargets\", [(UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppSOTargets\"),", "\"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2DMS_ARRAY_RTV = Struct(\"D3D11_TEX2DMS_ARRAY_RTV\", [", "\"GetEvictionPriority\", []), ] D3D11_BUFFER_DESC = Struct(\"D3D11_BUFFER_DESC\", [ (UINT, \"ByteWidth\"), (D3D11_USAGE,", "\"D3D11_PRIMITIVE_10_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_11_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_12_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_13_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_14_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_15_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_16_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_17_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_18_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_19_CONTROL_POINT_PATCH\",", "(UINT, \"MipLevels\"), (UINT, \"First2DArrayFace\"), (UINT, \"NumCubes\"), ]) D3D11_TEX2DMS_SRV = Struct(\"D3D11_TEX2DMS_SRV\",", "(SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11ComputeShader)), \"ppComputeShader\")]), StdMethod(HRESULT, \"CreateClassLinkage\", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), \"ppLinkage\")]),", "(Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppVertexBuffers\"), (Pointer(Const(UINT)), \"pStrides\"), (Pointer(Const(UINT)), \"pOffsets\")]), StdMethod(Void, \"IASetIndexBuffer\", [(ObjPointer(ID3D11Buffer),", "\"GetClassInstance\", [(LPCSTR, \"pClassInstanceName\"), (UINT, \"InstanceIndex\"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), \"ppInstance\")]), StdMethod(HRESULT, \"CreateClassInstance\", [(LPCSTR,", "(D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), (UINT, \"StructureByteStride\"), ]) ID3D11Buffer.methods", "\"D3D11_PRIMITIVE_TOPOLOGY_11_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_12_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_13_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_14_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_15_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_16_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_17_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_18_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_19_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_20_CONTROL_POINT_PATCHLIST\",", "[(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"Begin\", [(ObjPointer(ID3D11Asynchronous),", "\"First2DArrayFace\"), (UINT, \"NumCubes\"), ]) D3D11_TEX2DMS_SRV = Struct(\"D3D11_TEX2DMS_SRV\", [ (UINT, \"UnusedField_NothingToDefine\"),", "software and associated documentation files (the \"Software\"), to deal #", "\"guid\"), Out(Pointer(UINT), \"pDataSize\"), Out(OpaquePointer(Void), \"pData\")]), StdMethod(HRESULT, \"SetPrivateData\", [(REFGUID, \"guid\"), (UINT,", "\"guid\"), (OpaquePointer(Const(IUnknown)), \"pData\")]), ] D3D11_COMPARISON_FUNC = Enum(\"D3D11_COMPARISON_FUNC\", [ \"D3D11_COMPARISON_NEVER\", \"D3D11_COMPARISON_LESS\",", "\"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture3D.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE3D_DESC),", "\"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"HSSetShader\", [(ObjPointer(ID3D11HullShader), \"pHullShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT,", "= Interface(\"ID3D11BlendState\", ID3D11DeviceChild) ID3D11RasterizerState = Interface(\"ID3D11RasterizerState\", ID3D11DeviceChild) ID3D11Resource = Interface(\"ID3D11Resource\",", "\"Query\"), (D3D11_QUERY_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Query.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_QUERY_DESC),", "(UINT, \"BaseTexture\"), (UINT, \"BaseSampler\"), (BOOL, \"Created\"), ]) ID3D11ClassInstance.methods += [", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "(DXGI_FORMAT, \"Format\"), (UINT, \"InputSlot\"), (D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET, \"AlignedByteOffset\"), (D3D11_INPUT_CLASSIFICATION, \"InputSlotClass\"), (UINT, \"InstanceDataStepRate\"),", "D3D11_RESOURCE_DIMENSION = Enum(\"D3D11_RESOURCE_DIMENSION\", [ \"D3D11_RESOURCE_DIMENSION_UNKNOWN\", \"D3D11_RESOURCE_DIMENSION_BUFFER\", \"D3D11_RESOURCE_DIMENSION_TEXTURE1D\", \"D3D11_RESOURCE_DIMENSION_TEXTURE2D\", \"D3D11_RESOURCE_DIMENSION_TEXTURE3D\", ])", "the Software without restriction, including without limitation the rights #", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "(BOOL, \"AntialiasedLineEnable\"), ]) ID3D11RasterizerState.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_RASTERIZER_DESC), \"pDesc\")]),", "[ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_SAMPLER_DESC), \"pDesc\")]), ] D3D11_FORMAT_SUPPORT = Flags(UINT, [", "\"CopySubresourceRegion\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"), (UINT, \"DstX\"), (UINT, \"DstY\"), (UINT,", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "= Interface(\"ID3D11VertexShader\", ID3D11DeviceChild) ID3D11HullShader = Interface(\"ID3D11HullShader\", ID3D11DeviceChild) ID3D11DomainShader = Interface(\"ID3D11DomainShader\",", "Struct(\"D3D11_TEX1D_ARRAY_RTV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_RTV", "Flags(UINT, [ \"D3D11_ASYNC_GETDATA_DONOTFLUSH\", ]) D3D11_QUERY = Enum(\"D3D11_QUERY\", [ \"D3D11_QUERY_EVENT\", \"D3D11_QUERY_OCCLUSION\",", "\"hResource\"), (REFIID, \"ReturnedInterface\"), Out(Pointer(ObjPointer(Void)), \"ppResource\")]), StdMethod(HRESULT, \"CheckFormatSupport\", [(DXGI_FORMAT, \"Format\"), Out(Pointer(D3D11_FORMAT_SUPPORT),", "\"GetDataFlags\")]), StdMethod(Void, \"SetPredication\", [(ObjPointer(ID3D11Predicate), \"pPredicate\"), (BOOL, \"PredicateValue\")]), StdMethod(Void, \"GSSetShaderResources\", [(UINT,", "(UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"CSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT,", "\"ppShaderResourceViews\")]), StdMethod(Void, \"GSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]),", "\"pViewports\")]), StdMethod(Void, \"RSSetScissorRects\", [(UINT, \"NumRects\"), (Array(Const(D3D11_RECT), \"NumRects\"), \"pRects\")]), StdMethod(Void, \"CopySubresourceRegion\",", "\"PSSetShader\", [(ObjPointer(ID3D11PixelShader), \"pPixelShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"PSSetSamplers\",", "Flags(UINT, [ \"D3D11_FORMAT_SUPPORT_BUFFER\", \"D3D11_FORMAT_SUPPORT_IA_VERTEX_BUFFER\", \"D3D11_FORMAT_SUPPORT_IA_INDEX_BUFFER\", \"D3D11_FORMAT_SUPPORT_SO_BUFFER\", \"D3D11_FORMAT_SUPPORT_TEXTURE1D\", \"D3D11_FORMAT_SUPPORT_TEXTURE2D\", \"D3D11_FORMAT_SUPPORT_TEXTURE3D\", \"D3D11_FORMAT_SUPPORT_TEXTURECUBE\",", "\"ppPixelShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"PSGetSamplers\", [(UINT, \"StartSlot\"),", "\"pRects\")]), StdMethod(Void, \"HSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]),", "\"pUnorderedAccessView\"), (Array(Const(UINT), 4), \"Values\")]), StdMethod(Void, \"ClearUnorderedAccessViewFloat\", [(ObjPointer(ID3D11UnorderedAccessView), \"pUnorderedAccessView\"), (Array(Const(FLOAT), 4),", "\"D3D11_CPU_ACCESS_READ\", ]) D3D11_RESOURCE_MISC_FLAG = Flags(UINT, [ \"D3D11_RESOURCE_MISC_GENERATE_MIPS\", \"D3D11_RESOURCE_MISC_SHARED\", \"D3D11_RESOURCE_MISC_TEXTURECUBE\", \"D3D11_RESOURCE_MISC_DRAWINDIRECT_ARGS\",", "\"SrcSubresource\"), (Pointer(Const(D3D11_BOX)), \"pSrcBox\")]), StdMethod(Void, \"CopyResource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (ObjPointer(ID3D11Resource), \"pSrcResource\")]), StdMethod(Void,", "\"StartVertexLocation\"), (UINT, \"StartInstanceLocation\")]), StdMethod(Void, \"GSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)),", "StdMethod(Void, \"GSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void,", "\"GSInvocations\"), (UINT64, \"GSPrimitives\"), (UINT64, \"CInvocations\"), (UINT64, \"CPrimitives\"), (UINT64, \"PSInvocations\"), (UINT64,", "\"HSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"HSSetConstantBuffers\",", "] ID3D11ClassLinkage.methods += [ StdMethod(HRESULT, \"GetClassInstance\", [(LPCSTR, \"pClassInstanceName\"), (UINT, \"InstanceIndex\"),", "StdFunction(HRESULT, \"D3D11CoreRegisterLayers\", [LPCVOID, DWORD], internal=True), StdFunction(SIZE_T, \"D3D11CoreGetLayeredDeviceSize\", [LPCVOID, DWORD], internal=True),", "ID3D11View) ID3D11RenderTargetView = Interface(\"ID3D11RenderTargetView\", ID3D11View) ID3D11DepthStencilView = Interface(\"ID3D11DepthStencilView\", ID3D11View) ID3D11UnorderedAccessView", "\"pDesc\"), Out(Pointer(D3D11_COUNTER_TYPE), \"pType\"), Out(Pointer(UINT), \"pActiveCounters\"), Out(LPSTR, \"szName\"), Out(Pointer(UINT), \"pNameLength\"), Out(LPSTR,", "\"BlendFactor\"), Out(Pointer(UINT), \"pSampleMask\")]), StdMethod(Void, \"OMGetDepthStencilState\", [Out(Pointer(ObjPointer(ID3D11DepthStencilState)), \"ppDepthStencilState\"), Out(Pointer(UINT), \"pStencilRef\")]), StdMethod(Void,", "# of this software and associated documentation files (the \"Software\"),", "furnished to do so, subject to the following conditions: #", "\"ppUnorderedAccessViews\"), (Pointer(Const(UINT)), \"pUAVInitialCounts\")]), StdMethod(Void, \"CSSetShader\", [(ObjPointer(ID3D11ComputeShader), \"pComputeShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"),", "# The above copyright notice and this permission notice shall", "\"GetDesc\", [Out(Pointer(D3D11_RENDER_TARGET_VIEW_DESC), \"pDesc\")]), ] D3D11_TEX1D_DSV = Struct(\"D3D11_TEX1D_DSV\", [ (UINT, \"MipSlice\"),", "\"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"PSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"),", "Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"HSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"),", "\"pDesc\"), Out(Pointer(ObjPointer(ID3D11RenderTargetView)), \"ppRTView\")]), StdMethod(HRESULT, \"CreateDepthStencilView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_DEPTH_STENCIL_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)),", "(Array(ObjPointer(ID3D11RenderTargetView), \"NumViews\"), \"ppRenderTargetViews\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\")]), StdMethod(Void, \"OMGetRenderTargetsAndUnorderedAccessViews\", [(UINT, \"NumRTVs\"), (Array(ObjPointer(ID3D11RenderTargetView),", "[Out(Pointer(ObjPointer(ID3D11DepthStencilState)), \"ppDepthStencilState\"), Out(Pointer(UINT), \"pStencilRef\")]), StdMethod(Void, \"SOGetTargets\", [(UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"),", "(UINT, \"Height\"), (UINT, \"MipLevels\"), (UINT, \"ArraySize\"), (DXGI_FORMAT, \"Format\"), (DXGI_SAMPLE_DESC, \"SampleDesc\"),", "= Flags(UINT, [ \"D3D11_CREATE_DEVICE_SINGLETHREADED\", \"D3D11_CREATE_DEVICE_DEBUG\", \"D3D11_CREATE_DEVICE_SWITCH_TO_REF\", \"D3D11_CREATE_DEVICE_PREVENT_INTERNAL_THREADING_OPTIMIZATIONS\", \"D3D11_CREATE_DEVICE_BGRA_SUPPORT\", ]) ID3D11Device.methods", "\"D3D11_COLOR_WRITE_ENABLE_GREEN\", \"D3D11_COLOR_WRITE_ENABLE_BLUE\", \"D3D11_COLOR_WRITE_ENABLE_ALPHA\", ]) D3D11_RENDER_TARGET_BLEND_DESC = Struct(\"D3D11_RENDER_TARGET_BLEND_DESC\", [ (BOOL, \"BlendEnable\"),", "StdMethod(Void, \"DSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void,", "\"GetContextFlags\", []), ] D3D11_FEATURE_DATA_THREADING = Struct(\"D3D11_FEATURE_DATA_THREADING\", [ (BOOL, \"DriverConcurrentCreates\"), (BOOL,", "(UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"DSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT,", "(UINT, \"MipLevels\"), (UINT, \"ArraySize\"), (DXGI_FORMAT, \"Format\"), (DXGI_SAMPLE_DESC, \"SampleDesc\"), (D3D11_USAGE, \"Usage\"),", "\"pType\"), Out(Pointer(UINT), \"pActiveCounters\"), Out(LPSTR, \"szName\"), Out(Pointer(UINT), \"pNameLength\"), Out(LPSTR, \"szUnits\"), Out(Pointer(UINT),", "= Enum(\"D3D11_QUERY\", [ \"D3D11_QUERY_EVENT\", \"D3D11_QUERY_OCCLUSION\", \"D3D11_QUERY_TIMESTAMP\", \"D3D11_QUERY_TIMESTAMP_DISJOINT\", \"D3D11_QUERY_PIPELINE_STATISTICS\", \"D3D11_QUERY_OCCLUSION_PREDICATE\", \"D3D11_QUERY_SO_STATISTICS\",", "4), \"BlendFactor\"), Out(Pointer(UINT), \"pSampleMask\")]), StdMethod(Void, \"OMGetDepthStencilState\", [Out(Pointer(ObjPointer(ID3D11DepthStencilState)), \"ppDepthStencilState\"), Out(Pointer(UINT), \"pStencilRef\")]),", "\"ppSamplers\")]), StdMethod(Void, \"VSSetShader\", [(ObjPointer(ID3D11VertexShader), \"pVertexShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]),", "\"StencilFunc\"), ]) D3D11_DEPTH_STENCIL_DESC = Struct(\"D3D11_DEPTH_STENCIL_DESC\", [ (BOOL, \"DepthEnable\"), (D3D11_DEPTH_WRITE_MASK, \"DepthWriteMask\"),", "(D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture3D.methods", "\"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"VSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"),", "ID3D11View.methods += [ StdMethod(Void, \"GetResource\", [Out(Pointer(ObjPointer(ID3D11Resource)), \"ppResource\")]), ] D3D11_BUFFER_SRV =", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "(UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"HSSetShader\", [(ObjPointer(ID3D11HullShader), \"pHullShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)),", "\"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11DomainShader)), \"ppDomainShader\")]), StdMethod(HRESULT, \"CreateComputeShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"),", "(Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"HSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)),", "\"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11PixelShader)), \"ppPixelShader\")]), StdMethod(HRESULT, \"CreateHullShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"),", "(FLOAT, \"TopLeftX\"), (FLOAT, \"TopLeftY\"), (FLOAT, \"Width\"), (FLOAT, \"Height\"), (FLOAT, \"MinDepth\"),", "Flags(UINT, [ \"D3D11_DSV_READ_ONLY_DEPTH\", \"D3D11_DSV_READ_ONLY_STENCIL\", ]) D3D11_DEPTH_STENCIL_VIEW_DESC = Struct(\"D3D11_DEPTH_STENCIL_VIEW_DESC\", [ (DXGI_FORMAT,", "\"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_COMPARE_STORE_OR_COMPARE_EXCHANGE\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_EXCHANGE\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_SIGNED_MIN_OR_MAX\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_UNSIGNED_MIN_OR_MAX\", \"D3D11_FORMAT_SUPPORT2_UAV_TYPED_LOAD\", \"D3D11_FORMAT_SUPPORT2_UAV_TYPED_STORE\", ]) ID3D11Asynchronous.methods += [", "\"MapFlags\"), Out(Pointer(D3D11_MAPPED_SUBRESOURCE), \"pMappedResource\")]), StdMethod(Void, \"Unmap\", [(ObjPointer(ID3D11Resource), \"pResource\"), (UINT, \"Subresource\")]), StdMethod(Void,", "D3D11_FEATURE_DATA_DOUBLES = Struct(\"D3D11_FEATURE_DATA_DOUBLES\", [ (BOOL, \"DoublePrecisionFloatShaderOps\"), ]) D3D11_FEATURE_DATA_FORMAT_SUPPORT = Struct(\"D3D11_FEATURE_DATA_FORMAT_SUPPORT\",", "(OpaquePointer(Const(Void)), \"pSysMem\"), (UINT, \"SysMemPitch\"), (UINT, \"SysMemSlicePitch\"), ]) D3D11_MAPPED_SUBRESOURCE = Struct(\"D3D11_MAPPED_SUBRESOURCE\",", "(UINT, \"FeatureSupportDataSize\")]), StdMethod(HRESULT, \"GetPrivateData\", [(REFGUID, \"guid\"), Out(Pointer(UINT), \"pDataSize\"), Out(OpaquePointer(Void), \"pData\")]),", "following conditions: # # The above copyright notice and this", "D3D11_DEPTH_WRITE_MASK = Enum(\"D3D11_DEPTH_WRITE_MASK\", [ \"D3D11_DEPTH_WRITE_MASK_ZERO\", \"D3D11_DEPTH_WRITE_MASK_ALL\", ]) D3D11_STENCIL_OP = Enum(\"D3D11_STENCIL_OP\",", "\"D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINT\", \"D3D11_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_MIN_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_MIN_MAG_MIP_LINEAR\", \"D3D11_FILTER_ANISOTROPIC\", \"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_MIP_POINT\",", "[ (DXGI_FORMAT, \"InFormat\"), (D3D11_FORMAT_SUPPORT2, \"OutFormatSupport2\"), ]) D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS = Struct(\"D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS\", [", "conditions: # # The above copyright notice and this permission", "StdMethod(HRESULT, \"CreateClassLinkage\", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), \"ppLinkage\")]), StdMethod(HRESULT, \"CreateBlendState\", [(Pointer(Const(D3D11_BLEND_DESC)), \"pBlendStateDesc\"), Out(Pointer(ObjPointer(ID3D11BlendState)), \"ppBlendState\")]),", "\"NumEntries\"), (Array(Const(UINT), \"NumStrides\"), \"pBufferStrides\"), (UINT, \"NumStrides\"), (UINT, \"RasterizedStream\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"),", "\"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX3D_SRV = Struct(\"D3D11_TEX3D_SRV\", [ (UINT, \"MostDetailedMip\"),", "StdMethod(HRESULT, \"OpenSharedResource\", [(HANDLE, \"hResource\"), (REFIID, \"ReturnedInterface\"), Out(Pointer(ObjPointer(Void)), \"ppResource\")]), StdMethod(HRESULT, \"CheckFormatSupport\",", "(Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"PSSetSamplers\", [(UINT, \"StartSlot\"), (UINT,", "StdMethod(Void, \"DispatchIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"RSSetState\", [(ObjPointer(ID3D11RasterizerState), \"pRasterizerState\")]),", "\"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11VertexShader)), \"ppVertexShader\")]), StdMethod(HRESULT, \"CreateGeometryShader\",", "ID3D11ShaderResourceView.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_SHADER_RESOURCE_VIEW_DESC), \"pDesc\")]), ] D3D11_BUFFER_RTV =", "\"D3D11_COUNTER_TYPE_UINT16\", \"D3D11_COUNTER_TYPE_UINT32\", \"D3D11_COUNTER_TYPE_UINT64\", ]) D3D11_COUNTER_DESC = Struct(\"D3D11_COUNTER_DESC\", [ (D3D11_COUNTER, \"Counter\"),", "ID3D11DeviceChild) ID3D11HullShader = Interface(\"ID3D11HullShader\", ID3D11DeviceChild) ID3D11DomainShader = Interface(\"ID3D11DomainShader\", ID3D11DeviceChild) ID3D11GeometryShader", "\"InputSlotClass\"), (UINT, \"InstanceDataStepRate\"), ]) D3D11_FILL_MODE = Enum(\"D3D11_FILL_MODE\", [ \"D3D11_FILL_WIREFRAME\", \"D3D11_FILL_SOLID\",", "(BOOL, \"FrontCounterClockwise\"), (INT, \"DepthBias\"), (FLOAT, \"DepthBiasClamp\"), (FLOAT, \"SlopeScaledDepthBias\"), (BOOL, \"DepthClipEnable\"),", "(D3D11_TEX2D_ARRAY_UAV, \"Texture2DArray\"), (D3D11_TEX3D_UAV, \"Texture3D\"), ]), None), ]) ID3D11UnorderedAccessView.methods += [", "]), None), ]) ID3D11ShaderResourceView.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_SHADER_RESOURCE_VIEW_DESC), \"pDesc\")]),", "(D3D11_QUERY, \"Query\"), (D3D11_QUERY_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Query.methods += [ StdMethod(Void, \"GetDesc\",", "\"CSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"CSSetUnorderedAccessViews\",", "\"pIndexBuffer\"), (DXGI_FORMAT, \"Format\"), (UINT, \"Offset\")]), StdMethod(Void, \"DrawIndexedInstanced\", [(UINT, \"IndexCountPerInstance\"), (UINT,", "(ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11PixelShader)), \"ppPixelShader\")]), StdMethod(HRESULT, \"CreateHullShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T,", "\"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"IAGetPrimitiveTopology\", [Out(Pointer(D3D11_PRIMITIVE_TOPOLOGY), \"pTopology\")]), StdMethod(Void, \"VSGetShaderResources\", [(UINT,", "[ \"D3D11_COUNTER_DEVICE_DEPENDENT_0\", ]) D3D11_COUNTER_TYPE = Enum(\"D3D11_COUNTER_TYPE\", [ \"D3D11_COUNTER_TYPE_FLOAT32\", \"D3D11_COUNTER_TYPE_UINT16\", \"D3D11_COUNTER_TYPE_UINT32\",", "(UINT, \"DstAlignedByteOffset\"), (ObjPointer(ID3D11UnorderedAccessView), \"pSrcView\")]), StdMethod(Void, \"ClearRenderTargetView\", [(ObjPointer(ID3D11RenderTargetView), \"pRenderTargetView\"), (Array(Const(FLOAT), 4),", "+= [ StdMethod(UINT, \"GetContextFlags\", []), ] D3D11_FEATURE_DATA_THREADING = Struct(\"D3D11_FEATURE_DATA_THREADING\", [", "\"pOffsets\")]), StdMethod(Void, \"IASetIndexBuffer\", [(ObjPointer(ID3D11Buffer), \"pIndexBuffer\"), (DXGI_FORMAT, \"Format\"), (UINT, \"Offset\")]), StdMethod(Void,", "\"D3D11_RESOURCE_DIMENSION_UNKNOWN\", \"D3D11_RESOURCE_DIMENSION_BUFFER\", \"D3D11_RESOURCE_DIMENSION_TEXTURE1D\", \"D3D11_RESOURCE_DIMENSION_TEXTURE2D\", \"D3D11_RESOURCE_DIMENSION_TEXTURE3D\", ]) D3D11_SRV_DIMENSION = Enum(\"D3D11_SRV_DIMENSION\", [", "StdMethod(UINT, \"GetContextFlags\", []), ] D3D11_FEATURE_DATA_THREADING = Struct(\"D3D11_FEATURE_DATA_THREADING\", [ (BOOL, \"DriverConcurrentCreates\"),", "\"GetDesc\", [Out(Pointer(D3D11_SAMPLER_DESC), \"pDesc\")]), ] D3D11_FORMAT_SUPPORT = Flags(UINT, [ \"D3D11_FORMAT_SUPPORT_BUFFER\", \"D3D11_FORMAT_SUPPORT_IA_VERTEX_BUFFER\",", "\"MiscFlags\"), ]) ID3D11Texture1D.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE1D_DESC), \"pDesc\")]), ]", "\"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_RTV, \"Texture2DMSArray\"), (D3D11_TEX3D_RTV, \"Texture3D\"), ]), None), ]) ID3D11RenderTargetView.methods +=", "D3D11_FEATURE_DATA_FORMAT_SUPPORT = Struct(\"D3D11_FEATURE_DATA_FORMAT_SUPPORT\", [ (DXGI_FORMAT, \"InFormat\"), (D3D11_FORMAT_SUPPORT, \"OutFormatSupport\"), ]) D3D11_FEATURE_DATA_FORMAT_SUPPORT2", "\"D3D11_ERROR_FILE_NOT_FOUND\", \"D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS\", \"D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS\", \"D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD\", \"D3DERR_INVALIDCALL\", \"D3DERR_WASSTILLDRAWING\", ]) ID3D11DepthStencilState = Interface(\"ID3D11DepthStencilState\",", "\"D3D11_PRIMITIVE_19_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_20_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_21_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_22_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_23_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_24_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_25_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_26_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_27_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_28_CONTROL_POINT_PATCH\",", "StdMethod(Void, \"VSSetShader\", [(ObjPointer(ID3D11VertexShader), \"pVertexShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void,", "* from d3d11sdklayers import * HRESULT = MAKE_HRESULT([ \"D3D11_ERROR_FILE_NOT_FOUND\", \"D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS\",", "\"D3D11_PRIMITIVE_TOPOLOGY_12_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_13_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_14_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_15_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_16_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_17_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_18_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_19_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_20_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_21_CONTROL_POINT_PATCHLIST\",", "+= [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_SHADER_RESOURCE_VIEW_DESC), \"pDesc\")]), ] D3D11_BUFFER_RTV = Struct(\"D3D11_BUFFER_RTV\",", "Out(Pointer(UINT), \"pDescriptionLength\")]), StdMethod(HRESULT, \"CheckFeatureSupport\", [(D3D11_FEATURE, \"Feature\"), Out(D3D11_FEATURE_DATA, \"pFeatureSupportData\"), (UINT, \"FeatureSupportDataSize\")]),", "D3D11_UAV_DIMENSION = Enum(\"D3D11_UAV_DIMENSION\", [ \"D3D11_UAV_DIMENSION_UNKNOWN\", \"D3D11_UAV_DIMENSION_BUFFER\", \"D3D11_UAV_DIMENSION_TEXTURE1D\", \"D3D11_UAV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_UAV_DIMENSION_TEXTURE2D\", \"D3D11_UAV_DIMENSION_TEXTURE2DARRAY\",", "be included in # all copies or substantial portions of", "\"D3D11_BLEND_OP_MAX\", ]) D3D11_COLOR_WRITE_ENABLE = Enum(\"D3D11_COLOR_WRITE_ENABLE\", [ \"D3D11_COLOR_WRITE_ENABLE_ALL\", \"D3D11_COLOR_WRITE_ENABLE_RED\", \"D3D11_COLOR_WRITE_ENABLE_GREEN\", \"D3D11_COLOR_WRITE_ENABLE_BLUE\",", "[(Pointer(Const(D3D11_TEXTURE3D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture3D)), \"ppTexture3D\")]), StdMethod(HRESULT, \"CreateShaderResourceView\", [(ObjPointer(ID3D11Resource), \"pResource\"),", "[(Pointer(Const(D3D11_SAMPLER_DESC)), \"pSamplerDesc\"), Out(Pointer(ObjPointer(ID3D11SamplerState)), \"ppSamplerState\")]), StdMethod(HRESULT, \"CreateQuery\", [(Pointer(Const(D3D11_QUERY_DESC)), \"pQueryDesc\"), Out(Pointer(ObjPointer(ID3D11Query)), \"ppQuery\")]),", "\"ppShaderResourceViews\")]), StdMethod(Void, \"PSGetShader\", [Out(Pointer(ObjPointer(ID3D11PixelShader)), \"ppPixelShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]),", "\"NumElements\"), \"pInputElementDescs\"), (UINT, \"NumElements\"), (Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecodeWithInputSignature\"), (SIZE_T, \"BytecodeLength\"), Out(Pointer(ObjPointer(ID3D11InputLayout)),", "D3D11_CLEAR_FLAG = Flags(UINT, [ \"D3D11_CLEAR_DEPTH\", \"D3D11_CLEAR_STENCIL\", ]) D3D11_RECT = Alias(\"D3D11_RECT\",", "\"ElementOffset\")]), None), (Union(None, [(UINT, \"NumElements\"), (UINT, \"ElementWidth\")]), None), ]) D3D11_TEX1D_RTV", "[ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_DEPTH_STENCIL_DESC), \"pDesc\")]), ] D3D11_BLEND = Enum(\"D3D11_BLEND\", [", "\"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"GSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"),", "[Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), StdMethod(HRESULT, \"SetExceptionMode\", [(D3D11_RAISE_FLAG, \"RaiseFlags\")]), StdMethod(UINT, \"GetExceptionMode\", []), ]", "(Array(D3D11_RENDER_TARGET_BLEND_DESC, 8), \"RenderTarget\"), ]) ID3D11BlendState.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_BLEND_DESC),", "(D3D11_ASYNC_GETDATA_FLAG, \"GetDataFlags\")]), StdMethod(Void, \"SetPredication\", [(ObjPointer(ID3D11Predicate), \"pPredicate\"), (BOOL, \"PredicateValue\")]), StdMethod(Void, \"GSSetShaderResources\",", "[(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"DrawInstancedIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]),", "\"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"VSGetShader\", [Out(Pointer(ObjPointer(ID3D11VertexShader)), \"ppVertexShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT),", "(UINT, \"DstSubresource\"), (ObjPointer(ID3D11Resource), \"pSrcResource\"), (UINT, \"SrcSubresource\"), (DXGI_FORMAT, \"Format\")]), StdMethod(Void, \"ExecuteCommandList\",", "(UINT, \"left\"), (UINT, \"top\"), (UINT, \"front\"), (UINT, \"right\"), (UINT, \"bottom\"),", "\"ppBlendState\"), Out(Array(FLOAT, 4), \"BlendFactor\"), Out(Pointer(UINT), \"pSampleMask\")]), StdMethod(Void, \"OMGetDepthStencilState\", [Out(Pointer(ObjPointer(ID3D11DepthStencilState)), \"ppDepthStencilState\"),", "Out(Pointer(ObjPointer(ID3D11Predicate)), \"ppPredicate\")]), StdMethod(HRESULT, \"CreateCounter\", [(Pointer(Const(D3D11_COUNTER_DESC)), \"pCounterDesc\"), Out(Pointer(ObjPointer(ID3D11Counter)), \"ppCounter\")]), StdMethod(HRESULT, \"CreateDeferredContext\",", "None), ]) D3D11_BUFFEREX_SRV_FLAG = Flags(UINT, [ \"D3D11_BUFFEREX_SRV_FLAG_RAW\", ]) D3D11_BUFFEREX_SRV =", "[(UINT, \"FirstElement\"), (UINT, \"ElementOffset\")]), None), (Union(None, [(UINT, \"NumElements\"), (UINT, \"ElementWidth\")]),", "D3D11_TEX1D_ARRAY_RTV = Struct(\"D3D11_TEX1D_ARRAY_RTV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"),", "\"D3D11_USAGE_DEFAULT\", \"D3D11_USAGE_IMMUTABLE\", \"D3D11_USAGE_DYNAMIC\", \"D3D11_USAGE_STAGING\", ]) D3D11_BIND_FLAG = Flags(UINT, [ \"D3D11_BIND_VERTEX_BUFFER\",", "(FLOAT, \"DepthBiasClamp\"), (FLOAT, \"SlopeScaledDepthBias\"), (BOOL, \"DepthClipEnable\"), (BOOL, \"ScissorEnable\"), (BOOL, \"MultisampleEnable\"),", "\"pData\"), (UINT, \"RowPitch\"), (UINT, \"DepthPitch\"), ]) ID3D11Resource.methods += [ StdMethod(Void,", "Out(Pointer(ObjPointer(ID3D11RasterizerState)), \"ppRasterizerState\")]), StdMethod(HRESULT, \"CreateSamplerState\", [(Pointer(Const(D3D11_SAMPLER_DESC)), \"pSamplerDesc\"), Out(Pointer(ObjPointer(ID3D11SamplerState)), \"ppSamplerState\")]), StdMethod(HRESULT, \"CreateQuery\",", "without restriction, including without limitation the rights # to use,", "# # Permission is hereby granted, free of charge, to", "\"pFeatureLevels\"), (UINT, \"FeatureLevels\"), (UINT, \"SDKVersion\"), Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\"), Out(Pointer(D3D_FEATURE_LEVEL), \"pFeatureLevel\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)),", "D3D11_CULL_MODE = Enum(\"D3D11_CULL_MODE\", [ \"D3D11_CULL_NONE\", \"D3D11_CULL_FRONT\", \"D3D11_CULL_BACK\", ]) D3D11_SO_DECLARATION_ENTRY =", "\"GSGetShader\", [Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"IAGetPrimitiveTopology\",", "subject to the following conditions: # # The above copyright", "Struct(\"D3D11_DEPTH_STENCIL_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_DSV_DIMENSION, \"ViewDimension\"), (D3D11_DSV_FLAG, \"Flags\"), (Union(None, [", "\"MipSlice\"), ]) D3D11_TEX1D_ARRAY_UAV = Struct(\"D3D11_TEX1D_ARRAY_UAV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"),", "ID3D11View) ID3D11VertexShader = Interface(\"ID3D11VertexShader\", ID3D11DeviceChild) ID3D11HullShader = Interface(\"ID3D11HullShader\", ID3D11DeviceChild) ID3D11DomainShader", "All Rights Reserved. # # Permission is hereby granted, free", "[ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_SHADER_RESOURCE_VIEW_DESC), \"pDesc\")]), ] D3D11_BUFFER_RTV = Struct(\"D3D11_BUFFER_RTV\", [", "[ \"D3D11_BUFFEREX_SRV_FLAG_RAW\", ]) D3D11_BUFFEREX_SRV = Struct(\"D3D11_BUFFEREX_SRV\", [ (UINT, \"FirstElement\"), (UINT,", "\"GetDesc\", [Out(Pointer(D3D11_SHADER_RESOURCE_VIEW_DESC), \"pDesc\")]), ] D3D11_BUFFER_RTV = Struct(\"D3D11_BUFFER_RTV\", [ (Union(None, [(UINT,", "\"ArraySize\"), ]) D3D11_TEX2D_SRV = Struct(\"D3D11_TEX2D_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"),", "D3D11_COUNTER_INFO = Struct(\"D3D11_COUNTER_INFO\", [ (D3D11_COUNTER, \"LastDeviceDependentCounter\"), (UINT, \"NumSimultaneousCounters\"), (UINT8, \"NumDetectableParallelUnits\"),", "]) ID3D11ClassInstance.methods += [ StdMethod(Void, \"GetClassLinkage\", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), \"ppLinkage\")]), StdMethod(Void, \"GetDesc\",", "= Struct(\"D3D11_FEATURE_DATA_DOUBLES\", [ (BOOL, \"DoublePrecisionFloatShaderOps\"), ]) D3D11_FEATURE_DATA_FORMAT_SUPPORT = Struct(\"D3D11_FEATURE_DATA_FORMAT_SUPPORT\", [", "\"D3D11_FORMAT_SUPPORT_TEXTURE1D\", \"D3D11_FORMAT_SUPPORT_TEXTURE2D\", \"D3D11_FORMAT_SUPPORT_TEXTURE3D\", \"D3D11_FORMAT_SUPPORT_TEXTURECUBE\", \"D3D11_FORMAT_SUPPORT_SHADER_LOAD\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_COMPARISON\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_MONO_TEXT\", \"D3D11_FORMAT_SUPPORT_MIP\", \"D3D11_FORMAT_SUPPORT_MIP_AUTOGEN\",", "\"CreateRasterizerState\", [(Pointer(Const(D3D11_RASTERIZER_DESC)), \"pRasterizerDesc\"), Out(Pointer(ObjPointer(ID3D11RasterizerState)), \"ppRasterizerState\")]), StdMethod(HRESULT, \"CreateSamplerState\", [(Pointer(Const(D3D11_SAMPLER_DESC)), \"pSamplerDesc\"), Out(Pointer(ObjPointer(ID3D11SamplerState)),", "\"ppBlendState\")]), StdMethod(HRESULT, \"CreateDepthStencilState\", [(Pointer(Const(D3D11_DEPTH_STENCIL_DESC)), \"pDepthStencilDesc\"), Out(Pointer(ObjPointer(ID3D11DepthStencilState)), \"ppDepthStencilState\")]), StdMethod(HRESULT, \"CreateRasterizerState\", [(Pointer(Const(D3D11_RASTERIZER_DESC)),", "[ (UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_DSV = Struct(\"D3D11_TEX1D_ARRAY_DSV\", [ (UINT, \"MipSlice\"),", "\"D3D11_PRIMITIVE_TOPOLOGY_LINELIST\", \"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP\", \"D3D11_PRIMITIVE_TOPOLOGY_LINELIST_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_2_CONTROL_POINT_PATCHLIST\",", "\"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"PSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState),", "]) D3D11_DEPTH_STENCILOP_DESC = Struct(\"D3D11_DEPTH_STENCILOP_DESC\", [ (D3D11_STENCIL_OP, \"StencilFailOp\"), (D3D11_STENCIL_OP, \"StencilDepthFailOp\"), (D3D11_STENCIL_OP,", "(UINT, \"InstanceId\"), (UINT, \"InstanceIndex\"), (UINT, \"TypeId\"), (UINT, \"ConstantBuffer\"), (UINT, \"BaseConstantBufferOffset\"),", "[(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"CSSetConstantBuffers\", [(UINT,", "[(ObjPointer(ID3D11Asynchronous), \"pAsync\")]), StdMethod(HRESULT, \"GetData\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\"), Out(OpaqueBlob(Void, \"DataSize\"), \"pData\"), (UINT,", "StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_QUERY_DESC), \"pDesc\")]), ] D3D11_QUERY_DATA_TIMESTAMP_DISJOINT = Struct(\"D3D11_QUERY_DATA_TIMESTAMP_DISJOINT\", [ (UINT64,", "\"pRasterizerState\")]), StdMethod(Void, \"RSSetViewports\", [(UINT, \"NumViewports\"), (Array(Const(D3D11_VIEWPORT), \"NumViewports\"), \"pViewports\")]), StdMethod(Void, \"RSSetScissorRects\",", "]), None), ]) ID3D11UnorderedAccessView.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_UNORDERED_ACCESS_VIEW_DESC), \"pDesc\")]),", "Struct(\"D3D11_COUNTER_DESC\", [ (D3D11_COUNTER, \"Counter\"), (UINT, \"MiscFlags\"), ]) D3D11_COUNTER_INFO = Struct(\"D3D11_COUNTER_INFO\",", "StdMethod(Void, \"DrawIndexedInstanced\", [(UINT, \"IndexCountPerInstance\"), (UINT, \"InstanceCount\"), (UINT, \"StartIndexLocation\"), (INT, \"BaseVertexLocation\"),", "\"ppShaderResourceViews\")]), StdMethod(Void, \"VSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]),", "Enum(\"D3D11_FILTER\", [ \"D3D11_FILTER_MIN_MAG_MIP_POINT\", \"D3D11_FILTER_MIN_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR\", \"D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINT\", \"D3D11_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_MIN_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_MIN_MAG_MIP_LINEAR\",", "\"Map\", [(ObjPointer(ID3D11Resource), \"pResource\"), (UINT, \"Subresource\"), (D3D11_MAP, \"MapType\"), (D3D11_MAP_FLAG, \"MapFlags\"), Out(Pointer(D3D11_MAPPED_SUBRESOURCE),", "Enum(\"D3D11_USAGE\", [ \"D3D11_USAGE_DEFAULT\", \"D3D11_USAGE_IMMUTABLE\", \"D3D11_USAGE_DYNAMIC\", \"D3D11_USAGE_STAGING\", ]) D3D11_BIND_FLAG = Flags(UINT,", "(UINT, \"Subresource\"), (D3D11_MAP, \"MapType\"), (D3D11_MAP_FLAG, \"MapFlags\"), Out(Pointer(D3D11_MAPPED_SUBRESOURCE), \"pMappedResource\")]), StdMethod(Void, \"Unmap\",", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "\"ppComputeShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"CSGetSamplers\", [(UINT, \"StartSlot\"),", "StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_BUFFER_DESC), \"pDesc\")]), ] D3D11_TEXTURE1D_DESC = Struct(\"D3D11_TEXTURE1D_DESC\", [ (UINT,", "= Struct(\"D3D11_COUNTER_DESC\", [ (D3D11_COUNTER, \"Counter\"), (UINT, \"MiscFlags\"), ]) D3D11_COUNTER_INFO =", "\"IAPrimitives\"), (UINT64, \"VSInvocations\"), (UINT64, \"GSInvocations\"), (UINT64, \"GSPrimitives\"), (UINT64, \"CInvocations\"), (UINT64,", "\"D3D11_COUNTER_TYPE_FLOAT32\", \"D3D11_COUNTER_TYPE_UINT16\", \"D3D11_COUNTER_TYPE_UINT32\", \"D3D11_COUNTER_TYPE_UINT64\", ]) D3D11_COUNTER_DESC = Struct(\"D3D11_COUNTER_DESC\", [ (D3D11_COUNTER,", "\"DstY\"), (UINT, \"DstZ\"), (ObjPointer(ID3D11Resource), \"pSrcResource\"), (UINT, \"SrcSubresource\"), (Pointer(Const(D3D11_BOX)), \"pSrcBox\")]), StdMethod(Void,", "\"FeatureLevels\"), \"pFeatureLevels\"), (UINT, \"FeatureLevels\"), (UINT, \"SDKVersion\"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC)), \"pSwapChainDesc\"), Out(Pointer(ObjPointer(IDXGISwapChain)), \"ppSwapChain\"),", "\"GetDesc\", [Out(Pointer(D3D11_BLEND_DESC), \"pDesc\")]), ] D3D11_RASTERIZER_DESC = Struct(\"D3D11_RASTERIZER_DESC\", [ (D3D11_FILL_MODE, \"FillMode\"),", "ID3D11Predicate = Interface(\"ID3D11Predicate\", ID3D11Query) ID3D11Counter = Interface(\"ID3D11Counter\", ID3D11Asynchronous) ID3D11ClassInstance =", "\"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"OMSetRenderTargets\", [(UINT, \"NumViews\"),", "\"SampleMask\")]), StdMethod(Void, \"OMSetDepthStencilState\", [(ObjPointer(ID3D11DepthStencilState), \"pDepthStencilState\"), (UINT, \"StencilRef\")]), StdMethod(Void, \"SOSetTargets\", [(UINT,", "]) D3D11_COUNTER_TYPE = Enum(\"D3D11_COUNTER_TYPE\", [ \"D3D11_COUNTER_TYPE_FLOAT32\", \"D3D11_COUNTER_TYPE_UINT16\", \"D3D11_COUNTER_TYPE_UINT32\", \"D3D11_COUNTER_TYPE_UINT64\", ])", "(UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2DMS_ARRAY_DSV = Struct(\"D3D11_TEX2DMS_ARRAY_DSV\", [ (UINT, \"FirstArraySlice\"), (UINT,", "Out(Pointer(UINT), \"pOffsets\")]), StdMethod(Void, \"IAGetIndexBuffer\", [Out(Pointer(ObjPointer(ID3D11Buffer)), \"pIndexBuffer\"), Out(Pointer(DXGI_FORMAT), \"Format\"), Out(Pointer(UINT), \"Offset\")]),", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "\"NumStrides\"), \"pBufferStrides\"), (UINT, \"NumStrides\"), (UINT, \"RasterizedStream\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\")]),", "]) D3D11_PRIMITIVE_TOPOLOGY = Enum(\"D3D11_PRIMITIVE_TOPOLOGY\", [ \"D3D11_PRIMITIVE_TOPOLOGY_UNDEFINED\", \"D3D11_PRIMITIVE_TOPOLOGY_POINTLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_LINELIST\", \"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST\",", "\"Texture1D\"), (D3D11_TEX1D_ARRAY_SRV, \"Texture1DArray\"), (D3D11_TEX2D_SRV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_SRV, \"Texture2DArray\"), (D3D11_TEX2DMS_SRV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_SRV,", "[(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11HullShader)), \"ppHullShader\")]), StdMethod(HRESULT,", "\"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEX1D_ARRAY_SRV = Struct(\"D3D11_TEX1D_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"),", "Out(Pointer(UINT), \"pSampleMask\")]), StdMethod(Void, \"OMGetDepthStencilState\", [Out(Pointer(ObjPointer(ID3D11DepthStencilState)), \"ppDepthStencilState\"), Out(Pointer(UINT), \"pStencilRef\")]), StdMethod(Void, \"SOGetTargets\",", "\"D3D11_PRIMITIVE_TOPOLOGY_LINELIST_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_2_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_3_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_4_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_5_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_6_CONTROL_POINT_PATCHLIST\",", "\"VSGetShader\", [Out(Pointer(ObjPointer(ID3D11VertexShader)), \"ppVertexShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"PSGetConstantBuffers\",", "merge, publish, distribute, sublicense, and/or sell # copies of the", "\"SrcBlend\"), (D3D11_BLEND, \"DestBlend\"), (D3D11_BLEND_OP, \"BlendOp\"), (D3D11_BLEND, \"SrcBlendAlpha\"), (D3D11_BLEND, \"DestBlendAlpha\"), (D3D11_BLEND_OP,", "(UINT, \"Width\"), (UINT, \"Height\"), (UINT, \"MipLevels\"), (UINT, \"ArraySize\"), (DXGI_FORMAT, \"Format\"),", "= Interface(\"ID3D11View\", ID3D11DeviceChild) ID3D11ShaderResourceView = Interface(\"ID3D11ShaderResourceView\", ID3D11View) ID3D11RenderTargetView = Interface(\"ID3D11RenderTargetView\",", "DWORD, DWORD, DWORD, DWORD, DWORD, DWORD], internal=True), ]) d3d11.addInterfaces([ IDXGIAdapter1,", "\"DrawInstancedIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"Dispatch\", [(UINT, \"ThreadGroupCountX\"), (UINT,", "\"D3DERR_INVALIDCALL\", \"D3DERR_WASSTILLDRAWING\", ]) ID3D11DepthStencilState = Interface(\"ID3D11DepthStencilState\", ID3D11DeviceChild) ID3D11BlendState = Interface(\"ID3D11BlendState\",", "= Struct(\"D3D11_TEX2D_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), (UINT, \"FirstArraySlice\"), (UINT,", "\"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\")]), StdMethod(HRESULT, \"CreateGeometryShaderWithStreamOutput\", [(Blob(Const(Void),", "\"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_DSV_FLAG = Flags(UINT, [ \"D3D11_DSV_READ_ONLY_DEPTH\", \"D3D11_DSV_READ_ONLY_STENCIL\",", "Interface(\"ID3D11RenderTargetView\", ID3D11View) ID3D11DepthStencilView = Interface(\"ID3D11DepthStencilView\", ID3D11View) ID3D11UnorderedAccessView = Interface(\"ID3D11UnorderedAccessView\", ID3D11View)", "[ \"D3D11_BLEND_ZERO\", \"D3D11_BLEND_ONE\", \"D3D11_BLEND_SRC_COLOR\", \"D3D11_BLEND_INV_SRC_COLOR\", \"D3D11_BLEND_SRC_ALPHA\", \"D3D11_BLEND_INV_SRC_ALPHA\", \"D3D11_BLEND_DEST_ALPHA\", \"D3D11_BLEND_INV_DEST_ALPHA\", \"D3D11_BLEND_DEST_COLOR\",", "[ (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_SHADER_RESOURCE_VIEW_DESC = Struct(\"D3D11_SHADER_RESOURCE_VIEW_DESC\", [", "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "StdMethod(HRESULT, \"CreateGeometryShaderWithStreamOutput\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (Array(Const(D3D11_SO_DECLARATION_ENTRY), \"NumEntries\"), \"pSODeclaration\"),", "\"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_UAV = Struct(\"D3D11_TEX2D_UAV\", [ (UINT, \"MipSlice\"),", "]) D3D11_CLASS_INSTANCE_DESC = Struct(\"D3D11_CLASS_INSTANCE_DESC\", [ (UINT, \"InstanceId\"), (UINT, \"InstanceIndex\"), (UINT,", "[Out(Pointer(ObjPointer(ID3D11DomainShader)), \"ppDomainShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"DSGetSamplers\", [(UINT,", "\"Texture2D\"), (D3D11_TEX2D_ARRAY_DSV, \"Texture2DArray\"), (D3D11_TEX2DMS_DSV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_DSV, \"Texture2DMSArray\"), ]), None), ])", "Struct(\"D3D11_TEX2D_RTV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX2DMS_RTV = Struct(\"D3D11_TEX2DMS_RTV\", [ (UINT,", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "ID3D11Resource) ID3D11Texture1D = Interface(\"ID3D11Texture1D\", ID3D11Resource) ID3D11Texture2D = Interface(\"ID3D11Texture2D\", ID3D11Resource) ID3D11Texture3D", "\"pStrides\"), (Pointer(Const(UINT)), \"pOffsets\")]), StdMethod(Void, \"IASetIndexBuffer\", [(ObjPointer(ID3D11Buffer), \"pIndexBuffer\"), (DXGI_FORMAT, \"Format\"), (UINT,", "Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"CSGetSamplers\", [(UINT, \"StartSlot\"), (UINT,", "(Pointer(Const(D3D11_DEPTH_STENCIL_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\")]), StdMethod(HRESULT, \"CreateInputLayout\", [(Array(Const(D3D11_INPUT_ELEMENT_DESC), \"NumElements\"), \"pInputElementDescs\"), (UINT,", "\"SDKVersion\"), Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\"), Out(Pointer(D3D_FEATURE_LEVEL), \"pFeatureLevel\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), StdFunction(HRESULT, \"D3D11CreateDeviceAndSwapChain\", [(ObjPointer(IDXGIAdapter),", "\"ArraySize\"), ]) D3D11_SHADER_RESOURCE_VIEW_DESC = Struct(\"D3D11_SHADER_RESOURCE_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_SRV_DIMENSION, \"ViewDimension\"),", "\"ppSamplers\")]), StdMethod(Void, \"GetPredication\", [Out(Pointer(ObjPointer(ID3D11Predicate)), \"ppPredicate\"), Out(Pointer(BOOL), \"pPredicateValue\")]), StdMethod(Void, \"GSGetShaderResources\", [(UINT,", "MAKE_HRESULT([ \"D3D11_ERROR_FILE_NOT_FOUND\", \"D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS\", \"D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS\", \"D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD\", \"D3DERR_INVALIDCALL\", \"D3DERR_WASSTILLDRAWING\", ]) ID3D11DepthStencilState =", "[]), StdMethod(HRESULT, \"FinishCommandList\", [(BOOL, \"RestoreDeferredContextState\"), Out(Pointer(ObjPointer(ID3D11CommandList)), \"ppCommandList\")]), ] D3D11_CREATE_DEVICE_FLAG =", "\"D3D11_PRIMITIVE_TOPOLOGY_23_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_24_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_25_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_26_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_27_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_28_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_29_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_30_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_31_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_32_CONTROL_POINT_PATCHLIST\",", "ID3D11Buffer.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_BUFFER_DESC), \"pDesc\")]), ] D3D11_TEXTURE1D_DESC =", "ID3D11ClassLinkage = Interface(\"ID3D11ClassLinkage\", ID3D11DeviceChild) ID3D11CommandList = Interface(\"ID3D11CommandList\", ID3D11DeviceChild) ID3D11Device =", "(UINT, \"ElementWidth\")]), None), ]) D3D11_TEX1D_RTV = Struct(\"D3D11_TEX1D_RTV\", [ (UINT, \"MipSlice\"),", "\"Texture1D\"), (D3D11_TEX1D_ARRAY_UAV, \"Texture1DArray\"), (D3D11_TEX2D_UAV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_UAV, \"Texture2DArray\"), (D3D11_TEX3D_UAV, \"Texture3D\"), ]),", "\"CreateClassInstance\", [(LPCSTR, \"pClassTypeName\"), (UINT, \"ConstantBufferOffset\"), (UINT, \"ConstantVectorOffset\"), (UINT, \"TextureOffset\"), (UINT,", "\"pInputLayout\")]), StdMethod(Void, \"IASetVertexBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppVertexBuffers\"),", "] D3D11_ASYNC_GETDATA_FLAG = Flags(UINT, [ \"D3D11_ASYNC_GETDATA_DONOTFLUSH\", ]) D3D11_QUERY = Enum(\"D3D11_QUERY\",", "\"pSampleMask\")]), StdMethod(Void, \"OMGetDepthStencilState\", [Out(Pointer(ObjPointer(ID3D11DepthStencilState)), \"ppDepthStencilState\"), Out(Pointer(UINT), \"pStencilRef\")]), StdMethod(Void, \"SOGetTargets\", [(UINT,", "D3D11_BUFFEREX_SRV_FLAG = Flags(UINT, [ \"D3D11_BUFFEREX_SRV_FLAG_RAW\", ]) D3D11_BUFFEREX_SRV = Struct(\"D3D11_BUFFEREX_SRV\", [", "ID3D11RenderTargetView.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_RENDER_TARGET_VIEW_DESC), \"pDesc\")]), ] D3D11_TEX1D_DSV =", "[(Pointer(Const(D3D11_BUFFER_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Buffer)), \"ppBuffer\")]), StdMethod(HRESULT, \"CreateTexture1D\", [(Pointer(Const(D3D11_TEXTURE1D_DESC)), \"pDesc\"),", "is set StdFunction(HRESULT, \"D3D11CoreRegisterLayers\", [LPCVOID, DWORD], internal=True), StdFunction(SIZE_T, \"D3D11CoreGetLayeredDeviceSize\", [LPCVOID,", "D3D11_TEXTURECUBE_FACE = Enum(\"D3D11_TEXTURECUBE_FACE\", [ \"D3D11_TEXTURECUBE_FACE_POSITIVE_X\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_X\", \"D3D11_TEXTURECUBE_FACE_POSITIVE_Y\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_Y\", \"D3D11_TEXTURECUBE_FACE_POSITIVE_Z\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_Z\",", "Struct(\"D3D11_TEX2DMS_RTV\", [ (UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2D_ARRAY_RTV = Struct(\"D3D11_TEX2D_ARRAY_RTV\", [ (UINT,", "EnumPolymorphic(\"D3D11_FEATURE\", \"Feature\", [ (\"D3D11_FEATURE_THREADING\", Pointer(D3D11_FEATURE_DATA_THREADING)), (\"D3D11_FEATURE_DOUBLES\", Pointer(D3D11_FEATURE_DATA_DOUBLES)), (\"D3D11_FEATURE_FORMAT_SUPPORT\", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT)), (\"D3D11_FEATURE_FORMAT_SUPPORT2\",", "\"ClearState\", []), StdMethod(Void, \"Flush\", []), StdMethod(D3D11_DEVICE_CONTEXT_TYPE, \"GetType\", []), StdMethod(UINT, \"GetContextFlags\",", "(D3D11_TEX1D_ARRAY_SRV, \"Texture1DArray\"), (D3D11_TEX2D_SRV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_SRV, \"Texture2DArray\"), (D3D11_TEX2DMS_SRV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_SRV, \"Texture2DMSArray\"),", "ID3D11DeviceChild) ID3D11InputLayout = Interface(\"ID3D11InputLayout\", ID3D11DeviceChild) ID3D11SamplerState = Interface(\"ID3D11SamplerState\", ID3D11DeviceChild) ID3D11Asynchronous", "Interface(\"ID3D11InputLayout\", ID3D11DeviceChild) ID3D11SamplerState = Interface(\"ID3D11SamplerState\", ID3D11DeviceChild) ID3D11Asynchronous = Interface(\"ID3D11Asynchronous\", ID3D11DeviceChild)", "\"BlendOp\"), (D3D11_BLEND, \"SrcBlendAlpha\"), (D3D11_BLEND, \"DestBlendAlpha\"), (D3D11_BLEND_OP, \"BlendOpAlpha\"), (UINT8, \"RenderTargetWriteMask\"), ])", "Interface(\"ID3D11BlendState\", ID3D11DeviceChild) ID3D11RasterizerState = Interface(\"ID3D11RasterizerState\", ID3D11DeviceChild) ID3D11Resource = Interface(\"ID3D11Resource\", ID3D11DeviceChild)", "\"D3D11_FORMAT_SUPPORT_SHADER_LOAD\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_COMPARISON\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_MONO_TEXT\", \"D3D11_FORMAT_SUPPORT_MIP\", \"D3D11_FORMAT_SUPPORT_MIP_AUTOGEN\", \"D3D11_FORMAT_SUPPORT_RENDER_TARGET\", \"D3D11_FORMAT_SUPPORT_BLENDABLE\", \"D3D11_FORMAT_SUPPORT_DEPTH_STENCIL\", \"D3D11_FORMAT_SUPPORT_CPU_LOCKABLE\",", "] D3D11_FORMAT_SUPPORT = Flags(UINT, [ \"D3D11_FORMAT_SUPPORT_BUFFER\", \"D3D11_FORMAT_SUPPORT_IA_VERTEX_BUFFER\", \"D3D11_FORMAT_SUPPORT_IA_INDEX_BUFFER\", \"D3D11_FORMAT_SUPPORT_SO_BUFFER\", \"D3D11_FORMAT_SUPPORT_TEXTURE1D\",", "\"StartComponent\"), (BYTE, \"ComponentCount\"), (BYTE, \"OutputSlot\"), ]) D3D11_VIEWPORT = Struct(\"D3D11_VIEWPORT\", [", "\"D3D11_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_MIN_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_MIN_MAG_MIP_LINEAR\", \"D3D11_FILTER_ANISOTROPIC\", \"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_POINT_MIP_LINEAR\",", "= Enum(\"D3D11_FILTER_TYPE\", [ \"D3D11_FILTER_TYPE_POINT\", \"D3D11_FILTER_TYPE_LINEAR\", ]) D3D11_TEXTURE_ADDRESS_MODE = Enum(\"D3D11_TEXTURE_ADDRESS_MODE\", [", "\"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"GSSetShader\", [(ObjPointer(ID3D11GeometryShader), \"pShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT,", "\"D3D11_FILTER_COMPARISON_ANISOTROPIC\", ]) D3D11_FILTER_TYPE = Enum(\"D3D11_FILTER_TYPE\", [ \"D3D11_FILTER_TYPE_POINT\", \"D3D11_FILTER_TYPE_LINEAR\", ]) D3D11_TEXTURE_ADDRESS_MODE", "StdMethod(Void, \"DSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void,", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "\"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"Begin\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\")]), StdMethod(Void, \"End\",", "[ StdMethod(Void, \"GetType\", [Out(Pointer(D3D11_RESOURCE_DIMENSION), \"pResourceDimension\")]), StdMethod(Void, \"SetEvictionPriority\", [(UINT, \"EvictionPriority\")]), StdMethod(UINT,", "Interface(\"ID3D11ClassInstance\", ID3D11DeviceChild) ID3D11ClassLinkage = Interface(\"ID3D11ClassLinkage\", ID3D11DeviceChild) ID3D11CommandList = Interface(\"ID3D11CommandList\", ID3D11DeviceChild)", "\"D3D11_COLOR_WRITE_ENABLE_ALL\", \"D3D11_COLOR_WRITE_ENABLE_RED\", \"D3D11_COLOR_WRITE_ENABLE_GREEN\", \"D3D11_COLOR_WRITE_ENABLE_BLUE\", \"D3D11_COLOR_WRITE_ENABLE_ALPHA\", ]) D3D11_RENDER_TARGET_BLEND_DESC = Struct(\"D3D11_RENDER_TARGET_BLEND_DESC\", [", "\"ByteWidth\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), (UINT,", "(Union(None, [ (D3D11_BUFFER_UAV, \"Buffer\"), (D3D11_TEX1D_UAV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_UAV, \"Texture1DArray\"), (D3D11_TEX2D_UAV, \"Texture2D\"),", "\"D3D11_QUERY_EVENT\", \"D3D11_QUERY_OCCLUSION\", \"D3D11_QUERY_TIMESTAMP\", \"D3D11_QUERY_TIMESTAMP_DISJOINT\", \"D3D11_QUERY_PIPELINE_STATISTICS\", \"D3D11_QUERY_OCCLUSION_PREDICATE\", \"D3D11_QUERY_SO_STATISTICS\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE\", \"D3D11_QUERY_SO_STATISTICS_STREAM0\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM0\",", "D3D11_CLASS_INSTANCE_DESC = Struct(\"D3D11_CLASS_INSTANCE_DESC\", [ (UINT, \"InstanceId\"), (UINT, \"InstanceIndex\"), (UINT, \"TypeId\"),", "Struct(\"D3D11_CLASS_INSTANCE_DESC\", [ (UINT, \"InstanceId\"), (UINT, \"InstanceIndex\"), (UINT, \"TypeId\"), (UINT, \"ConstantBuffer\"),", "\"NumRTVs\"), (Array(ObjPointer(ID3D11RenderTargetView), \"NumRTVs\"), \"ppRenderTargetViews\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\"), (UINT, \"UAVStartSlot\"), (UINT, \"NumUAVs\"),", "\"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RESOLVE\", \"D3D11_FORMAT_SUPPORT_DISPLAY\", \"D3D11_FORMAT_SUPPORT_CAST_WITHIN_BIT_LAYOUT\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RENDERTARGET\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_LOAD\", \"D3D11_FORMAT_SUPPORT_SHADER_GATHER\", \"D3D11_FORMAT_SUPPORT_BACK_BUFFER_CAST\", \"D3D11_FORMAT_SUPPORT_TYPED_UNORDERED_ACCESS_VIEW\", \"D3D11_FORMAT_SUPPORT_SHADER_GATHER_COMPARISON\", ])", "\"OMGetBlendState\", [Out(Pointer(ObjPointer(ID3D11BlendState)), \"ppBlendState\"), Out(Array(FLOAT, 4), \"BlendFactor\"), Out(Pointer(UINT), \"pSampleMask\")]), StdMethod(Void, \"OMGetDepthStencilState\",", "\"NumUAVs\"), (Array(ObjPointer(ID3D11UnorderedAccessView), \"NumUAVs\"), \"ppUnorderedAccessViews\")]), StdMethod(Void, \"OMGetBlendState\", [Out(Pointer(ObjPointer(ID3D11BlendState)), \"ppBlendState\"), Out(Array(FLOAT, 4),", "\"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"HSGetConstantBuffers\", [(UINT, \"StartSlot\"),", "(UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX3D_RTV = Struct(\"D3D11_TEX3D_RTV\", [ (UINT,", "(Array(Const(ObjPointer(ID3D11UnorderedAccessView)), \"NumUAVs\"), \"ppUnorderedAccessViews\"), (Pointer(Const(UINT)), \"pUAVInitialCounts\")]), StdMethod(Void, \"OMSetBlendState\", [(ObjPointer(ID3D11BlendState), \"pBlendState\"), (Array(Const(FLOAT),", "\"D3D11_COLOR_WRITE_ENABLE_BLUE\", \"D3D11_COLOR_WRITE_ENABLE_ALPHA\", ]) D3D11_RENDER_TARGET_BLEND_DESC = Struct(\"D3D11_RENDER_TARGET_BLEND_DESC\", [ (BOOL, \"BlendEnable\"), (D3D11_BLEND,", "(D3D11_BLEND, \"SrcBlend\"), (D3D11_BLEND, \"DestBlend\"), (D3D11_BLEND_OP, \"BlendOp\"), (D3D11_BLEND, \"SrcBlendAlpha\"), (D3D11_BLEND, \"DestBlendAlpha\"),", "[ (OpaquePointer(Void), \"pData\"), (UINT, \"RowPitch\"), (UINT, \"DepthPitch\"), ]) ID3D11Resource.methods +=", "(Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"CSSetUnorderedAccessViews\", [(UINT, \"StartSlot\"), (UINT, \"NumUAVs\"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)),", "\"D3D11_BUFFER_UAV_FLAG_COUNTER\", ]) D3D11_BUFFER_UAV = Struct(\"D3D11_BUFFER_UAV\", [ (UINT, \"FirstElement\"), (UINT, \"NumElements\"),", "ID3D11DeviceChild) ID3D11SamplerState = Interface(\"ID3D11SamplerState\", ID3D11DeviceChild) ID3D11Asynchronous = Interface(\"ID3D11Asynchronous\", ID3D11DeviceChild) ID3D11Query", "StdMethod(Void, \"DrawIndexed\", [(UINT, \"IndexCount\"), (UINT, \"StartIndexLocation\"), (INT, \"BaseVertexLocation\")]), StdMethod(Void, \"Draw\",", "\"D3D11_COMPARISON_GREATER\", \"D3D11_COMPARISON_NOT_EQUAL\", \"D3D11_COMPARISON_GREATER_EQUAL\", \"D3D11_COMPARISON_ALWAYS\", ]) D3D11_DEPTH_WRITE_MASK = Enum(\"D3D11_DEPTH_WRITE_MASK\", [ \"D3D11_DEPTH_WRITE_MASK_ZERO\",", "D3D11_PRIMITIVE_TOPOLOGY = Enum(\"D3D11_PRIMITIVE_TOPOLOGY\", [ \"D3D11_PRIMITIVE_TOPOLOGY_UNDEFINED\", \"D3D11_PRIMITIVE_TOPOLOGY_POINTLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_LINELIST\", \"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP\",", "(INT, \"DepthBias\"), (FLOAT, \"DepthBiasClamp\"), (FLOAT, \"SlopeScaledDepthBias\"), (BOOL, \"DepthClipEnable\"), (BOOL, \"ScissorEnable\"),", "(ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\")]), StdMethod(HRESULT, \"CreatePixelShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T,", "\"D3D11_FILTER_MIN_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_MIN_MAG_MIP_LINEAR\", \"D3D11_FILTER_ANISOTROPIC\", \"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_MAG_LINEAR_MIP_POINT\",", "\"StartSlot\"), (UINT, \"NumUAVs\"), (Array(ObjPointer(ID3D11UnorderedAccessView), \"NumUAVs\"), \"ppUnorderedAccessViews\")]), StdMethod(Void, \"CSGetShader\", [Out(Pointer(ObjPointer(ID3D11ComputeShader)), \"ppComputeShader\"),", "Struct(\"D3D11_FEATURE_DATA_THREADING\", [ (BOOL, \"DriverConcurrentCreates\"), (BOOL, \"DriverCommandLists\"), ]) D3D11_FEATURE_DATA_DOUBLES = Struct(\"D3D11_FEATURE_DATA_DOUBLES\",", "\"DataSize\"), (D3D11_ASYNC_GETDATA_FLAG, \"GetDataFlags\")]), StdMethod(Void, \"SetPredication\", [(ObjPointer(ID3D11Predicate), \"pPredicate\"), (BOOL, \"PredicateValue\")]), StdMethod(Void,", "= Struct(\"D3D11_QUERY_DESC\", [ (D3D11_QUERY, \"Query\"), (D3D11_QUERY_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Query.methods +=", "] D3D11_TEXTURECUBE_FACE = Enum(\"D3D11_TEXTURECUBE_FACE\", [ \"D3D11_TEXTURECUBE_FACE_POSITIVE_X\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_X\", \"D3D11_TEXTURECUBE_FACE_POSITIVE_Y\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_Y\", \"D3D11_TEXTURECUBE_FACE_POSITIVE_Z\",", "\"pDomainShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"DSSetSamplers\", [(UINT, \"StartSlot\"),", "(Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"DSSetSamplers\", [(UINT, \"StartSlot\"), (UINT,", "\"StencilReadMask\"), (UINT8, \"StencilWriteMask\"), (D3D11_DEPTH_STENCILOP_DESC, \"FrontFace\"), (D3D11_DEPTH_STENCILOP_DESC, \"BackFace\"), ]) ID3D11DepthStencilState.methods +=", "\"Format\"), (UINT, \"SampleCount\"), Out(Pointer(UINT), \"pNumQualityLevels\")]), StdMethod(Void, \"CheckCounterInfo\", [Out(Pointer(D3D11_COUNTER_INFO), \"pCounterInfo\")]), StdMethod(HRESULT,", "(D3D11_TEX1D_ARRAY_UAV, \"Texture1DArray\"), (D3D11_TEX2D_UAV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_UAV, \"Texture2DArray\"), (D3D11_TEX3D_UAV, \"Texture3D\"), ]), None),", "\"MiscFlags\"), ]) D3D11_COUNTER_INFO = Struct(\"D3D11_COUNTER_INFO\", [ (D3D11_COUNTER, \"LastDeviceDependentCounter\"), (UINT, \"NumSimultaneousCounters\"),", "ID3D11RasterizerState = Interface(\"ID3D11RasterizerState\", ID3D11DeviceChild) ID3D11Resource = Interface(\"ID3D11Resource\", ID3D11DeviceChild) ID3D11Buffer =", "\"CreateVertexShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11VertexShader)), \"ppVertexShader\")]),", "internal=True), StdFunction(HRESULT, \"D3D11CoreCreateLayeredDevice\", [LPCVOID, DWORD, LPCVOID, (REFIID, \"riid\"), Out(Pointer(ObjPointer(Void)), \"ppvObj\")],", "\"D3D11_PRIMITIVE_TOPOLOGY_31_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_32_CONTROL_POINT_PATCHLIST\", ]) D3D11_PRIMITIVE = Enum(\"D3D11_PRIMITIVE\", [ \"D3D11_PRIMITIVE_UNDEFINED\", \"D3D11_PRIMITIVE_POINT\", \"D3D11_PRIMITIVE_LINE\",", "(UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2DMS_ARRAY_RTV = Struct(\"D3D11_TEX2DMS_ARRAY_RTV\", [ (UINT,", "[(ObjPointer(IDXGIAdapter), \"pAdapter\"), (D3D_DRIVER_TYPE, \"DriverType\"), (HMODULE, \"Software\"), (D3D11_CREATE_DEVICE_FLAG, \"Flags\"), (Array(Const(D3D_FEATURE_LEVEL), \"FeatureLevels\"),", "Out(Pointer(UINT), \"pDataSize\"), Out(OpaquePointer(Void), \"pData\")]), StdMethod(HRESULT, \"SetPrivateData\", [(REFGUID, \"guid\"), (UINT, \"DataSize\"),", "persons to whom the Software is # furnished to do", "(UINT, \"NumElements\"), (D3D11_BUFFEREX_SRV_FLAG, \"Flags\"), ]) D3D11_TEX1D_SRV = Struct(\"D3D11_TEX1D_SRV\", [ (UINT,", "StdMethod(Void, \"SetPredication\", [(ObjPointer(ID3D11Predicate), \"pPredicate\"), (BOOL, \"PredicateValue\")]), StdMethod(Void, \"GSSetShaderResources\", [(UINT, \"StartSlot\"),", "(UINT, \"ArraySize\"), ]) D3D11_TEX2D_RTV = Struct(\"D3D11_TEX2D_RTV\", [ (UINT, \"MipSlice\"), ])", "\"pDstResource\"), (ObjPointer(ID3D11Resource), \"pSrcResource\")]), StdMethod(Void, \"UpdateSubresource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"), (Pointer(Const(D3D11_BOX)),", "\"*pNumViewports\"), \"pViewports\")]), StdMethod(Void, \"RSGetScissorRects\", [Out(Pointer(UINT), \"pNumRects\"), Out(Array(D3D11_RECT, \"*pNumRects\"), \"pRects\")]), StdMethod(Void,", "import * from d3dcommon import * from d3d11sdklayers import *", "Interface(\"ID3D11ComputeShader\", ID3D11DeviceChild) ID3D11InputLayout = Interface(\"ID3D11InputLayout\", ID3D11DeviceChild) ID3D11SamplerState = Interface(\"ID3D11SamplerState\", ID3D11DeviceChild)", "OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE.", "[ \"D3D11_FILTER_MIN_MAG_MIP_POINT\", \"D3D11_FILTER_MIN_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR\", \"D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINT\", \"D3D11_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_MIN_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_MIN_MAG_MIP_LINEAR\", \"D3D11_FILTER_ANISOTROPIC\",", "\"pData\")]), ] D3D11_COMPARISON_FUNC = Enum(\"D3D11_COMPARISON_FUNC\", [ \"D3D11_COMPARISON_NEVER\", \"D3D11_COMPARISON_LESS\", \"D3D11_COMPARISON_EQUAL\", \"D3D11_COMPARISON_LESS_EQUAL\",", "D3D11_TEX2DMS_RTV = Struct(\"D3D11_TEX2DMS_RTV\", [ (UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2D_ARRAY_RTV = Struct(\"D3D11_TEX2D_ARRAY_RTV\",", "import * from d3d11sdklayers import * HRESULT = MAKE_HRESULT([ \"D3D11_ERROR_FILE_NOT_FOUND\",", "\"D3D11_PRIMITIVE_TOPOLOGY_15_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_16_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_17_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_18_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_19_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_20_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_21_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_22_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_23_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_24_CONTROL_POINT_PATCHLIST\",", "\"MostDetailedMip\"), (UINT, \"MipLevels\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX3D_SRV =", "\"pPredicate\"), (BOOL, \"PredicateValue\")]), StdMethod(Void, \"GSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)),", "]) D3D11_TEX2D_ARRAY_UAV = Struct(\"D3D11_TEX2D_ARRAY_UAV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT,", "\"InputSlot\"), (D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET, \"AlignedByteOffset\"), (D3D11_INPUT_CLASSIFICATION, \"InputSlotClass\"), (UINT, \"InstanceDataStepRate\"), ]) D3D11_FILL_MODE =", "[]), ] D3D11_FEATURE_DATA_THREADING = Struct(\"D3D11_FEATURE_DATA_THREADING\", [ (BOOL, \"DriverConcurrentCreates\"), (BOOL, \"DriverCommandLists\"),", "]) D3D11_RESOURCE_DIMENSION = Enum(\"D3D11_RESOURCE_DIMENSION\", [ \"D3D11_RESOURCE_DIMENSION_UNKNOWN\", \"D3D11_RESOURCE_DIMENSION_BUFFER\", \"D3D11_RESOURCE_DIMENSION_TEXTURE1D\", \"D3D11_RESOURCE_DIMENSION_TEXTURE2D\", \"D3D11_RESOURCE_DIMENSION_TEXTURE3D\",", "Enum(\"D3D11_QUERY\", [ \"D3D11_QUERY_EVENT\", \"D3D11_QUERY_OCCLUSION\", \"D3D11_QUERY_TIMESTAMP\", \"D3D11_QUERY_TIMESTAMP_DISJOINT\", \"D3D11_QUERY_PIPELINE_STATISTICS\", \"D3D11_QUERY_OCCLUSION_PREDICATE\", \"D3D11_QUERY_SO_STATISTICS\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE\",", "\"ViewDimension\"), (D3D11_DSV_FLAG, \"Flags\"), (Union(None, [ (D3D11_TEX1D_DSV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_DSV, \"Texture1DArray\"), (D3D11_TEX2D_DSV,", "[ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_RASTERIZER_DESC), \"pDesc\")]), ] D3D11_SUBRESOURCE_DATA = Struct(\"D3D11_SUBRESOURCE_DATA\", [", "\"D3D11_FORMAT_SUPPORT_SHADER_GATHER_COMPARISON\", ]) D3D11_FORMAT_SUPPORT2 = Enum(\"D3D11_FORMAT_SUPPORT2\", [ \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_ADD\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_BITWISE_OPS\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_COMPARE_STORE_OR_COMPARE_EXCHANGE\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_EXCHANGE\",", "(UINT, \"BaseSampler\"), (BOOL, \"Created\"), ]) ID3D11ClassInstance.methods += [ StdMethod(Void, \"GetClassLinkage\",", "ID3D11Resource.methods += [ StdMethod(Void, \"GetType\", [Out(Pointer(D3D11_RESOURCE_DIMENSION), \"pResourceDimension\")]), StdMethod(Void, \"SetEvictionPriority\", [(UINT,", "\"D3D11_INPUT_PER_VERTEX_DATA\", \"D3D11_INPUT_PER_INSTANCE_DATA\", ]) D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET = FakeEnum(UINT, [ \"D3D11_APPEND_ALIGNED_ELEMENT\", ]) D3D11_INPUT_ELEMENT_DESC", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "(SIZE_T, \"BytecodeLength\"), (Array(Const(D3D11_SO_DECLARATION_ENTRY), \"NumEntries\"), \"pSODeclaration\"), (UINT, \"NumEntries\"), (Array(Const(UINT), \"NumStrides\"), \"pBufferStrides\"),", "\"D3D11_BLEND_SRC_ALPHA_SAT\", \"D3D11_BLEND_BLEND_FACTOR\", \"D3D11_BLEND_INV_BLEND_FACTOR\", \"D3D11_BLEND_SRC1_COLOR\", \"D3D11_BLEND_INV_SRC1_COLOR\", \"D3D11_BLEND_SRC1_ALPHA\", \"D3D11_BLEND_INV_SRC1_ALPHA\", ]) D3D11_BLEND_OP =", "(UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_RTV = Struct(\"D3D11_TEX2D_RTV\",", "\"FirstWSlice\"), (UINT, \"WSize\"), ]) D3D11_RENDER_TARGET_VIEW_DESC = Struct(\"D3D11_RENDER_TARGET_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"),", "\"D3D11_PRIMITIVE_26_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_27_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_28_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_29_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_30_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_31_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_32_CONTROL_POINT_PATCH\", ]) D3D11_CULL_MODE =", "+= [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_QUERY_DESC), \"pDesc\")]), ] D3D11_QUERY_DATA_TIMESTAMP_DISJOINT = Struct(\"D3D11_QUERY_DATA_TIMESTAMP_DISJOINT\",", "\"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"CSSetShaderResources\", [(UINT, \"StartSlot\"),", "\"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"VSGetSamplers\", [(UINT, \"StartSlot\"),", "]) D3D11_RECT = Alias(\"D3D11_RECT\", RECT) D3D11_BOX = Struct(\"D3D11_BOX\", [ (UINT,", "(Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"GSGetShader\", [Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"),", "StdMethod(HRESULT, \"FinishCommandList\", [(BOOL, \"RestoreDeferredContextState\"), Out(Pointer(ObjPointer(ID3D11CommandList)), \"ppCommandList\")]), ] D3D11_CREATE_DEVICE_FLAG = Flags(UINT,", "\"BytecodeLength\"), Out(Pointer(ObjPointer(ID3D11InputLayout)), \"ppInputLayout\")]), StdMethod(HRESULT, \"CreateVertexShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"),", "\"ppImmediateContext\")]), # XXX: Undocumented functions, called by d3d11sdklayers.dll when D3D11_CREATE_DEVICE_DEBUG", "\"pDesc\")]), ] D3D11_TEXTURE2D_DESC = Struct(\"D3D11_TEXTURE2D_DESC\", [ (UINT, \"Width\"), (UINT, \"Height\"),", "= Enum(\"D3D11_COMPARISON_FUNC\", [ \"D3D11_COMPARISON_NEVER\", \"D3D11_COMPARISON_LESS\", \"D3D11_COMPARISON_EQUAL\", \"D3D11_COMPARISON_LESS_EQUAL\", \"D3D11_COMPARISON_GREATER\", \"D3D11_COMPARISON_NOT_EQUAL\", \"D3D11_COMPARISON_GREATER_EQUAL\",", "\"pTypeName\"), Out(Pointer(SIZE_T), \"pBufferLength\")]), ] ID3D11ClassLinkage.methods += [ StdMethod(HRESULT, \"GetClassInstance\", [(LPCSTR,", "\"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11DomainShader)), \"ppDomainShader\")]), StdMethod(HRESULT, \"CreateComputeShader\", [(Blob(Const(Void),", "[(DXGI_FORMAT, \"Format\"), (UINT, \"SampleCount\"), Out(Pointer(UINT), \"pNumQualityLevels\")]), StdMethod(Void, \"CheckCounterInfo\", [Out(Pointer(D3D11_COUNTER_INFO), \"pCounterInfo\")]),", "(D3D11_BUFFER_RTV, \"Buffer\"), (D3D11_TEX1D_RTV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_RTV, \"Texture1DArray\"), (D3D11_TEX2D_RTV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_RTV, \"Texture2DArray\"),", "= Interface(\"ID3D11DepthStencilState\", ID3D11DeviceChild) ID3D11BlendState = Interface(\"ID3D11BlendState\", ID3D11DeviceChild) ID3D11RasterizerState = Interface(\"ID3D11RasterizerState\",", "D3D11_BUFFER_DESC = Struct(\"D3D11_BUFFER_DESC\", [ (UINT, \"ByteWidth\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"),", "(D3D11_FORMAT_SUPPORT2, \"OutFormatSupport2\"), ]) D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS = Struct(\"D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS\", [ (BOOL, \"ComputeShaders_Plus_RawAndStructuredBuffers_Via_Shader_4_x\"), ])", "(Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"HSSetSamplers\", [(UINT, \"StartSlot\"), (UINT,", "\"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP\", \"D3D11_PRIMITIVE_TOPOLOGY_LINELIST_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_2_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_3_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_4_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_5_CONTROL_POINT_PATCHLIST\",", "= Interface(\"ID3D11DepthStencilView\", ID3D11View) ID3D11UnorderedAccessView = Interface(\"ID3D11UnorderedAccessView\", ID3D11View) ID3D11VertexShader = Interface(\"ID3D11VertexShader\",", "Pointer(D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS)), ], Blob(Void, \"FeatureSupportDataSize\"), False) ID3D11DeviceContext.methods += [ StdMethod(Void, \"VSSetConstantBuffers\",", "\"pFormatSupport\")]), StdMethod(HRESULT, \"CheckMultisampleQualityLevels\", [(DXGI_FORMAT, \"Format\"), (UINT, \"SampleCount\"), Out(Pointer(UINT), \"pNumQualityLevels\")]), StdMethod(Void,", "Out(Pointer(ObjPointer(ID3D11PixelShader)), \"ppPixelShader\")]), StdMethod(HRESULT, \"CreateHullShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage),", "\"GSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"GSGetSamplers\",", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "substantial portions of the Software. # # THE SOFTWARE IS", "ID3D11Texture1D.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE1D_DESC), \"pDesc\")]), ] D3D11_TEXTURE2D_DESC =", "(D3D11_TEX1D_UAV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_UAV, \"Texture1DArray\"), (D3D11_TEX2D_UAV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_UAV, \"Texture2DArray\"), (D3D11_TEX3D_UAV, \"Texture3D\"),", "\"OMGetRenderTargetsAndUnorderedAccessViews\", [(UINT, \"NumRTVs\"), (Array(ObjPointer(ID3D11RenderTargetView), \"NumRTVs\"), \"ppRenderTargetViews\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\"), (UINT, \"UAVStartSlot\"),", "\"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11DomainShader)), \"ppDomainShader\")]), StdMethod(HRESULT, \"CreateComputeShader\",", "]) D3D11_SRV_DIMENSION = Enum(\"D3D11_SRV_DIMENSION\", [ \"D3D11_SRV_DIMENSION_UNKNOWN\", \"D3D11_SRV_DIMENSION_BUFFER\", \"D3D11_SRV_DIMENSION_TEXTURE1D\", \"D3D11_SRV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE2D\",", "StdMethod(HRESULT, \"SetPrivateDataInterface\", [(REFGUID, \"guid\"), (OpaquePointer(Const(IUnknown)), \"pData\")]), ] D3D11_COMPARISON_FUNC = Enum(\"D3D11_COMPARISON_FUNC\",", "\"DrawAuto\", []), StdMethod(Void, \"DrawIndexedInstancedIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"DrawInstancedIndirect\",", "(D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), (UINT, \"StructureByteStride\"), ]) ID3D11Buffer.methods += [", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"PSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"),", "\"GSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"GSGetShader\",", "= Struct(\"D3D11_TEXCUBE_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEXCUBE_ARRAY_SRV =", "StdMethod(Void, \"HSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void,", "]) ID3D11Device.methods += [ StdMethod(HRESULT, \"CreateBuffer\", [(Pointer(Const(D3D11_BUFFER_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"),", "]) D3D11_CLEAR_FLAG = Flags(UINT, [ \"D3D11_CLEAR_DEPTH\", \"D3D11_CLEAR_STENCIL\", ]) D3D11_RECT =", "= Struct(\"D3D11_TEX2D_UAV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX2D_ARRAY_UAV = Struct(\"D3D11_TEX2D_ARRAY_UAV\", [", "\"D3D11_SRV_DIMENSION_BUFFEREX\", ]) D3D11_DSV_DIMENSION = Enum(\"D3D11_DSV_DIMENSION\", [ \"D3D11_DSV_DIMENSION_UNKNOWN\", \"D3D11_DSV_DIMENSION_TEXTURE1D\", \"D3D11_DSV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_DSV_DIMENSION_TEXTURE2D\",", "[(ObjPointer(ID3D11Buffer), \"pDstBuffer\"), (UINT, \"DstAlignedByteOffset\"), (ObjPointer(ID3D11UnorderedAccessView), \"pSrcView\")]), StdMethod(Void, \"ClearRenderTargetView\", [(ObjPointer(ID3D11RenderTargetView), \"pRenderTargetView\"),", "\"D3D11_CREATE_DEVICE_SINGLETHREADED\", \"D3D11_CREATE_DEVICE_DEBUG\", \"D3D11_CREATE_DEVICE_SWITCH_TO_REF\", \"D3D11_CREATE_DEVICE_PREVENT_INTERNAL_THREADING_OPTIMIZATIONS\", \"D3D11_CREATE_DEVICE_BGRA_SUPPORT\", ]) ID3D11Device.methods += [ StdMethod(HRESULT,", "(D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), (UINT, \"StructureByteStride\"),", "\"Offset\")]), StdMethod(Void, \"GSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]),", "any person obtaining a copy # of this software and", "D3D11_BUFFER_UAV = Struct(\"D3D11_BUFFER_UAV\", [ (UINT, \"FirstElement\"), (UINT, \"NumElements\"), (D3D11_BUFFER_UAV_FLAG, \"Flags\"),", "ARISING FROM, # OUT OF OR IN CONNECTION WITH THE", "\"GetFeatureLevel\", []), StdMethod(D3D11_CREATE_DEVICE_FLAG, \"GetCreationFlags\", []), StdMethod(HRESULT, \"GetDeviceRemovedReason\", []), StdMethod(Void, \"GetImmediateContext\",", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "[ \"D3D11_CLEAR_DEPTH\", \"D3D11_CLEAR_STENCIL\", ]) D3D11_RECT = Alias(\"D3D11_RECT\", RECT) D3D11_BOX =", "\"SemanticIndex\"), (BYTE, \"StartComponent\"), (BYTE, \"ComponentCount\"), (BYTE, \"OutputSlot\"), ]) D3D11_VIEWPORT =", "StdMethod(D3D_FEATURE_LEVEL, \"GetFeatureLevel\", []), StdMethod(D3D11_CREATE_DEVICE_FLAG, \"GetCreationFlags\", []), StdMethod(HRESULT, \"GetDeviceRemovedReason\", []), StdMethod(Void,", "(DXGI_FORMAT, \"Format\"), (DXGI_SAMPLE_DESC, \"SampleDesc\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"),", "\"StartSlot\"), (UINT, \"NumUAVs\"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), \"NumUAVs\"), \"ppUnorderedAccessViews\"), (Pointer(Const(UINT)), \"pUAVInitialCounts\")]), StdMethod(Void, \"CSSetShader\",", "(UINT, \"NumUAVs\"), (Array(ObjPointer(ID3D11UnorderedAccessView), \"NumUAVs\"), \"ppUnorderedAccessViews\")]), StdMethod(Void, \"CSGetShader\", [Out(Pointer(ObjPointer(ID3D11ComputeShader)), \"ppComputeShader\"), Out(Array(ObjPointer(ID3D11ClassInstance),", "\"NumBuffers\"), \"ppSOTargets\"), (Pointer(Const(UINT)), \"pOffsets\")]), StdMethod(Void, \"DrawAuto\", []), StdMethod(Void, \"DrawIndexedInstancedIndirect\", [(ObjPointer(ID3D11Buffer),", "[(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"CSSetShaderResources\", [(UINT,", "\"NumElements\"), (D3D11_BUFFEREX_SRV_FLAG, \"Flags\"), ]) D3D11_TEX1D_SRV = Struct(\"D3D11_TEX1D_SRV\", [ (UINT, \"MostDetailedMip\"),", "D3D11_BUFFEREX_SRV = Struct(\"D3D11_BUFFEREX_SRV\", [ (UINT, \"FirstElement\"), (UINT, \"NumElements\"), (D3D11_BUFFEREX_SRV_FLAG, \"Flags\"),", "(Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"VSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState),", "[ \"D3D11_TEXTURE_ADDRESS_WRAP\", \"D3D11_TEXTURE_ADDRESS_MIRROR\", \"D3D11_TEXTURE_ADDRESS_CLAMP\", \"D3D11_TEXTURE_ADDRESS_BORDER\", \"D3D11_TEXTURE_ADDRESS_MIRROR_ONCE\", ]) D3D11_SAMPLER_DESC = Struct(\"D3D11_SAMPLER_DESC\",", "D3D11_FORMAT_SUPPORT = Flags(UINT, [ \"D3D11_FORMAT_SUPPORT_BUFFER\", \"D3D11_FORMAT_SUPPORT_IA_VERTEX_BUFFER\", \"D3D11_FORMAT_SUPPORT_IA_INDEX_BUFFER\", \"D3D11_FORMAT_SUPPORT_SO_BUFFER\", \"D3D11_FORMAT_SUPPORT_TEXTURE1D\", \"D3D11_FORMAT_SUPPORT_TEXTURE2D\",", "THE USE OR OTHER DEALINGS IN # THE SOFTWARE. #", "\"ppTexture2D\")]), StdMethod(HRESULT, \"CreateTexture3D\", [(Pointer(Const(D3D11_TEXTURE3D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture3D)), \"ppTexture3D\")]), StdMethod(HRESULT,", "\"MaxAnisotropy\"), (D3D11_COMPARISON_FUNC, \"ComparisonFunc\"), (Array(FLOAT, 4), \"BorderColor\"), (FLOAT, \"MinLOD\"), (FLOAT, \"MaxLOD\"),", "(UINT, \"Depth\"), (UINT, \"MipLevels\"), (DXGI_FORMAT, \"Format\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"),", "Out(Pointer(ObjPointer(ID3D11SamplerState)), \"ppSamplerState\")]), StdMethod(HRESULT, \"CreateQuery\", [(Pointer(Const(D3D11_QUERY_DESC)), \"pQueryDesc\"), Out(Pointer(ObjPointer(ID3D11Query)), \"ppQuery\")]), StdMethod(HRESULT, \"CreatePredicate\",", "]) D3D11_TEXCUBE_SRV = Struct(\"D3D11_TEXCUBE_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ])", "\"pCounterInfo\")]), StdMethod(HRESULT, \"CheckCounter\", [(Pointer(Const(D3D11_COUNTER_DESC)), \"pDesc\"), Out(Pointer(D3D11_COUNTER_TYPE), \"pType\"), Out(Pointer(UINT), \"pActiveCounters\"), Out(LPSTR,", "\"D3D11_FILL_WIREFRAME\", \"D3D11_FILL_SOLID\", ]) D3D11_PRIMITIVE_TOPOLOGY = Enum(\"D3D11_PRIMITIVE_TOPOLOGY\", [ \"D3D11_PRIMITIVE_TOPOLOGY_UNDEFINED\", \"D3D11_PRIMITIVE_TOPOLOGY_POINTLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_LINELIST\",", "copyright notice and this permission notice shall be included in", "\"pStencilRef\")]), StdMethod(Void, \"SOGetTargets\", [(UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppSOTargets\")]), StdMethod(Void, \"RSGetState\",", "(D3D11_TEX2D_ARRAY_SRV, \"Texture2DArray\"), (D3D11_TEX2DMS_SRV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_SRV, \"Texture2DMSArray\"), (D3D11_TEX3D_SRV, \"Texture3D\"), (D3D11_TEXCUBE_SRV, \"TextureCube\"),", "\"StencilRef\")]), StdMethod(Void, \"SOSetTargets\", [(UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppSOTargets\"), (Pointer(Const(UINT)), \"pOffsets\")]),", "\"WSize\"), ]) D3D11_UNORDERED_ACCESS_VIEW_DESC = Struct(\"D3D11_UNORDERED_ACCESS_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_UAV_DIMENSION, \"ViewDimension\"),", "\"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture2D.methods += [ StdMethod(Void,", "files (the \"Software\"), to deal # in the Software without", "[ \"D3D11_COMPARISON_NEVER\", \"D3D11_COMPARISON_LESS\", \"D3D11_COMPARISON_EQUAL\", \"D3D11_COMPARISON_LESS_EQUAL\", \"D3D11_COMPARISON_GREATER\", \"D3D11_COMPARISON_NOT_EQUAL\", \"D3D11_COMPARISON_GREATER_EQUAL\", \"D3D11_COMPARISON_ALWAYS\", ])", "]) ID3D11Buffer.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_BUFFER_DESC), \"pDesc\")]), ] D3D11_TEXTURE1D_DESC", "= Flags(UINT, [ \"D3D11_BUFFER_UAV_FLAG_RAW\", \"D3D11_BUFFER_UAV_FLAG_APPEND\", \"D3D11_BUFFER_UAV_FLAG_COUNTER\", ]) D3D11_BUFFER_UAV = Struct(\"D3D11_BUFFER_UAV\",", "Struct(\"D3D11_TEX3D_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEXCUBE_SRV = Struct(\"D3D11_TEXCUBE_SRV\",", "\"D3D11_PRIMITIVE_TOPOLOGY_20_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_21_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_22_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_23_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_24_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_25_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_26_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_27_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_28_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_29_CONTROL_POINT_PATCHLIST\",", "\"pInstanceName\"), Out(Pointer(SIZE_T), \"pBufferLength\")]), StdMethod(Void, \"GetTypeName\", [Out(LPSTR, \"pTypeName\"), Out(Pointer(SIZE_T), \"pBufferLength\")]), ]", "]) D3D11_BLEND_OP = Enum(\"D3D11_BLEND_OP\", [ \"D3D11_BLEND_OP_ADD\", \"D3D11_BLEND_OP_SUBTRACT\", \"D3D11_BLEND_OP_REV_SUBTRACT\", \"D3D11_BLEND_OP_MIN\", \"D3D11_BLEND_OP_MAX\",", "\"D3D11_USAGE_IMMUTABLE\", \"D3D11_USAGE_DYNAMIC\", \"D3D11_USAGE_STAGING\", ]) D3D11_BIND_FLAG = Flags(UINT, [ \"D3D11_BIND_VERTEX_BUFFER\", \"D3D11_BIND_INDEX_BUFFER\",", "(UINT, \"ArraySize\"), ]) D3D11_TEX2DMS_ARRAY_RTV = Struct(\"D3D11_TEX2DMS_ARRAY_RTV\", [ (UINT, \"FirstArraySlice\"), (UINT,", "\"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"DSGetConstantBuffers\", [(UINT, \"StartSlot\"),", "= Struct(\"D3D11_BUFFER_RTV\", [ (Union(None, [(UINT, \"FirstElement\"), (UINT, \"ElementOffset\")]), None), (Union(None,", "StdMethod(Void, \"PSSetShader\", [(ObjPointer(ID3D11PixelShader), \"pPixelShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void,", "[Out(Pointer(D3D11_QUERY_DESC), \"pDesc\")]), ] D3D11_QUERY_DATA_TIMESTAMP_DISJOINT = Struct(\"D3D11_QUERY_DATA_TIMESTAMP_DISJOINT\", [ (UINT64, \"Frequency\"), (BOOL,", "\"D3D11_PRIMITIVE_32_CONTROL_POINT_PATCH\", ]) D3D11_CULL_MODE = Enum(\"D3D11_CULL_MODE\", [ \"D3D11_CULL_NONE\", \"D3D11_CULL_FRONT\", \"D3D11_CULL_BACK\", ])", "= Struct(\"D3D11_TEX2D_ARRAY_RTV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ])", "= Struct(\"D3D11_BUFFER_DESC\", [ (UINT, \"ByteWidth\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG,", "StdMethod(Void, \"GSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void,", "] D3D11_CREATE_DEVICE_FLAG = Flags(UINT, [ \"D3D11_CREATE_DEVICE_SINGLETHREADED\", \"D3D11_CREATE_DEVICE_DEBUG\", \"D3D11_CREATE_DEVICE_SWITCH_TO_REF\", \"D3D11_CREATE_DEVICE_PREVENT_INTERNAL_THREADING_OPTIMIZATIONS\", \"D3D11_CREATE_DEVICE_BGRA_SUPPORT\",", "\"D3D11_QUERY_TIMESTAMP_DISJOINT\", \"D3D11_QUERY_PIPELINE_STATISTICS\", \"D3D11_QUERY_OCCLUSION_PREDICATE\", \"D3D11_QUERY_SO_STATISTICS\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE\", \"D3D11_QUERY_SO_STATISTICS_STREAM0\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM0\", \"D3D11_QUERY_SO_STATISTICS_STREAM1\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM1\", \"D3D11_QUERY_SO_STATISTICS_STREAM2\",", "= Enum(\"D3D11_COUNTER_TYPE\", [ \"D3D11_COUNTER_TYPE_FLOAT32\", \"D3D11_COUNTER_TYPE_UINT16\", \"D3D11_COUNTER_TYPE_UINT32\", \"D3D11_COUNTER_TYPE_UINT64\", ]) D3D11_COUNTER_DESC =", "(\"D3D11_FEATURE_DOUBLES\", Pointer(D3D11_FEATURE_DATA_DOUBLES)), (\"D3D11_FEATURE_FORMAT_SUPPORT\", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT)), (\"D3D11_FEATURE_FORMAT_SUPPORT2\", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT2)), (\"D3D11_FEATURE_D3D10_X_HARDWARE_OPTIONS\", Pointer(D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS)), ], Blob(Void,", "\"DispatchIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"RSSetState\", [(ObjPointer(ID3D11RasterizerState), \"pRasterizerState\")]), StdMethod(Void,", "\"ExecuteCommandList\", [(ObjPointer(ID3D11CommandList), \"pCommandList\"), (BOOL, \"RestoreContextState\")]), StdMethod(Void, \"HSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT,", "(UINT64, \"DSInvocations\"), (UINT64, \"CSInvocations\"), ]) D3D11_QUERY_DATA_SO_STATISTICS = Struct(\"D3D11_QUERY_DATA_SO_STATISTICS\", [ (UINT64,", "ID3D11Device.methods += [ StdMethod(HRESULT, \"CreateBuffer\", [(Pointer(Const(D3D11_BUFFER_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Buffer)),", "\"D3D11_PRIMITIVE_TOPOLOGY_13_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_14_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_15_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_16_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_17_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_18_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_19_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_20_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_21_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_22_CONTROL_POINT_PATCHLIST\",", "]) D3D11_QUERY_MISC_FLAG = Flags(UINT, [ \"D3D11_QUERY_MISC_PREDICATEHINT\", ]) D3D11_QUERY_DESC = Struct(\"D3D11_QUERY_DESC\",", "D3D11_BUFFER_UAV_FLAG = Flags(UINT, [ \"D3D11_BUFFER_UAV_FLAG_RAW\", \"D3D11_BUFFER_UAV_FLAG_APPEND\", \"D3D11_BUFFER_UAV_FLAG_COUNTER\", ]) D3D11_BUFFER_UAV =", "Struct(\"D3D11_COUNTER_INFO\", [ (D3D11_COUNTER, \"LastDeviceDependentCounter\"), (UINT, \"NumSimultaneousCounters\"), (UINT8, \"NumDetectableParallelUnits\"), ]) ID3D11Counter.methods", "(UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"HSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT,", "= Flags(UINT, [ \"D3D11_BIND_VERTEX_BUFFER\", \"D3D11_BIND_INDEX_BUFFER\", \"D3D11_BIND_CONSTANT_BUFFER\", \"D3D11_BIND_SHADER_RESOURCE\", \"D3D11_BIND_STREAM_OUTPUT\", \"D3D11_BIND_RENDER_TARGET\", \"D3D11_BIND_DEPTH_STENCIL\",", "(DXGI_FORMAT, \"Format\"), (D3D11_UAV_DIMENSION, \"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_UAV, \"Buffer\"), (D3D11_TEX1D_UAV, \"Texture1D\"),", "= Struct(\"D3D11_CLASS_INSTANCE_DESC\", [ (UINT, \"InstanceId\"), (UINT, \"InstanceIndex\"), (UINT, \"TypeId\"), (UINT,", "(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecodeWithInputSignature\"), (SIZE_T, \"BytecodeLength\"), Out(Pointer(ObjPointer(ID3D11InputLayout)), \"ppInputLayout\")]), StdMethod(HRESULT, \"CreateVertexShader\", [(Blob(Const(Void),", "\"DoublePrecisionFloatShaderOps\"), ]) D3D11_FEATURE_DATA_FORMAT_SUPPORT = Struct(\"D3D11_FEATURE_DATA_FORMAT_SUPPORT\", [ (DXGI_FORMAT, \"InFormat\"), (D3D11_FORMAT_SUPPORT, \"OutFormatSupport\"),", "[Out(Pointer(ObjPointer(ID3D11ComputeShader)), \"ppComputeShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"CSGetSamplers\", [(UINT,", "Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"CSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"),", "\"MostDetailedMip\"), (UINT, \"MipLevels\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_SRV =", "\"Format\"), (D3D11_RTV_DIMENSION, \"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_RTV, \"Buffer\"), (D3D11_TEX1D_RTV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_RTV,", "\"MaxLOD\"), ]) ID3D11SamplerState.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_SAMPLER_DESC), \"pDesc\")]), ]", "(UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"ClearState\", []), StdMethod(Void, \"Flush\",", "\"ppTexture3D\")]), StdMethod(HRESULT, \"CreateShaderResourceView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_SHADER_RESOURCE_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11ShaderResourceView)), \"ppSRView\")]), StdMethod(HRESULT,", "\"OMSetDepthStencilState\", [(ObjPointer(ID3D11DepthStencilState), \"pDepthStencilState\"), (UINT, \"StencilRef\")]), StdMethod(Void, \"SOSetTargets\", [(UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)),", "\"ppInputLayout\")]), StdMethod(HRESULT, \"CreateVertexShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"),", "\"BlendFactor\"), (UINT, \"SampleMask\")]), StdMethod(Void, \"OMSetDepthStencilState\", [(ObjPointer(ID3D11DepthStencilState), \"pDepthStencilState\"), (UINT, \"StencilRef\")]), StdMethod(Void,", "\"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"IASetInputLayout\", [(ObjPointer(ID3D11InputLayout), \"pInputLayout\")]), StdMethod(Void, \"IASetVertexBuffers\",", "(Pointer(Const(UINT)), \"pStrides\"), (Pointer(Const(UINT)), \"pOffsets\")]), StdMethod(Void, \"IASetIndexBuffer\", [(ObjPointer(ID3D11Buffer), \"pIndexBuffer\"), (DXGI_FORMAT, \"Format\"),", "internal=True), StdFunction(HRESULT, \"D3D11CoreCreateDevice\", [DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD,", "\"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"CSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"),", "(DXGI_SAMPLE_DESC, \"SampleDesc\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"),", "of this software and associated documentation files (the \"Software\"), to", "Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT)), (\"D3D11_FEATURE_FORMAT_SUPPORT2\", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT2)), (\"D3D11_FEATURE_D3D10_X_HARDWARE_OPTIONS\", Pointer(D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS)), ], Blob(Void, \"FeatureSupportDataSize\"), False) ID3D11DeviceContext.methods", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "Struct(\"D3D11_TEX1D_DSV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_DSV = Struct(\"D3D11_TEX1D_ARRAY_DSV\", [ (UINT,", "\"SOSetTargets\", [(UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppSOTargets\"), (Pointer(Const(UINT)), \"pOffsets\")]), StdMethod(Void, \"DrawAuto\",", "(Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"OMSetRenderTargets\", [(UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), \"NumViews\"), \"ppRenderTargetViews\"),", "\"pUAVInitialCounts\")]), StdMethod(Void, \"CSSetShader\", [(ObjPointer(ID3D11ComputeShader), \"pComputeShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]),", "StdMethod(HRESULT, \"CreatePixelShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11PixelShader)),", "= Interface(\"ID3D11GeometryShader\", ID3D11DeviceChild) ID3D11PixelShader = Interface(\"ID3D11PixelShader\", ID3D11DeviceChild) ID3D11ComputeShader = Interface(\"ID3D11ComputeShader\",", "Struct(\"D3D11_INPUT_ELEMENT_DESC\", [ (LPCSTR, \"SemanticName\"), (UINT, \"SemanticIndex\"), (DXGI_FORMAT, \"Format\"), (UINT, \"InputSlot\"),", "\"AntialiasedLineEnable\"), ]) ID3D11RasterizerState.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_RASTERIZER_DESC), \"pDesc\")]), ]", "\"pResource\"), (UINT, \"Subresource\")]), StdMethod(Void, \"PSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)),", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "Out(Pointer(UINT), \"pActiveCounters\"), Out(LPSTR, \"szName\"), Out(Pointer(UINT), \"pNameLength\"), Out(LPSTR, \"szUnits\"), Out(Pointer(UINT), \"pUnitsLength\"),", "ID3D11Query = Interface(\"ID3D11Query\", ID3D11Asynchronous) ID3D11Predicate = Interface(\"ID3D11Predicate\", ID3D11Query) ID3D11Counter =", "(Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"GSSetShader\", [(ObjPointer(ID3D11GeometryShader), \"pShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"),", "\"CreateDepthStencilView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_DEPTH_STENCIL_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\")]), StdMethod(HRESULT, \"CreateInputLayout\", [(Array(Const(D3D11_INPUT_ELEMENT_DESC),", "\"InFormat\"), (D3D11_FORMAT_SUPPORT, \"OutFormatSupport\"), ]) D3D11_FEATURE_DATA_FORMAT_SUPPORT2 = Struct(\"D3D11_FEATURE_DATA_FORMAT_SUPPORT2\", [ (DXGI_FORMAT, \"InFormat\"),", "\"RSGetState\", [Out(Pointer(ObjPointer(ID3D11RasterizerState)), \"ppRasterizerState\")]), StdMethod(Void, \"RSGetViewports\", [Out(Pointer(UINT), \"pNumViewports\"), Out(Array(D3D11_VIEWPORT, \"*pNumViewports\"), \"pViewports\")]),", "\"ArraySize\"), ]) D3D11_DSV_FLAG = Flags(UINT, [ \"D3D11_DSV_READ_ONLY_DEPTH\", \"D3D11_DSV_READ_ONLY_STENCIL\", ]) D3D11_DEPTH_STENCIL_VIEW_DESC", "\"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"CSSetConstantBuffers\", [(UINT, \"StartSlot\"),", "\"GSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"OMSetRenderTargets\",", "StdMethod(Void, \"VSGetShader\", [Out(Pointer(ObjPointer(ID3D11VertexShader)), \"ppVertexShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void,", "\"pNumClassInstances\")]), StdMethod(Void, \"CSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]),", "\"ppGeometryShader\")]), StdMethod(HRESULT, \"CreatePixelShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"),", "(D3D11_BLEND_OP, \"BlendOp\"), (D3D11_BLEND, \"SrcBlendAlpha\"), (D3D11_BLEND, \"DestBlendAlpha\"), (D3D11_BLEND_OP, \"BlendOpAlpha\"), (UINT8, \"RenderTargetWriteMask\"),", "]) D3D11_BUFFEREX_SRV = Struct(\"D3D11_BUFFEREX_SRV\", [ (UINT, \"FirstElement\"), (UINT, \"NumElements\"), (D3D11_BUFFEREX_SRV_FLAG,", "\"NumStrides\"), (UINT, \"RasterizedStream\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\")]), StdMethod(HRESULT, \"CreatePixelShader\", [(Blob(Const(Void),", "\"D3D11_TEXTURE_ADDRESS_CLAMP\", \"D3D11_TEXTURE_ADDRESS_BORDER\", \"D3D11_TEXTURE_ADDRESS_MIRROR_ONCE\", ]) D3D11_SAMPLER_DESC = Struct(\"D3D11_SAMPLER_DESC\", [ (D3D11_FILTER, \"Filter\"),", "(UINT, \"MiscFlags\"), ]) D3D11_COUNTER_INFO = Struct(\"D3D11_COUNTER_INFO\", [ (D3D11_COUNTER, \"LastDeviceDependentCounter\"), (UINT,", "]) ID3D11Counter.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_COUNTER_DESC), \"pDesc\")]), ] D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS", "(UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"VSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT,", "\"pNumQualityLevels\")]), StdMethod(Void, \"CheckCounterInfo\", [Out(Pointer(D3D11_COUNTER_INFO), \"pCounterInfo\")]), StdMethod(HRESULT, \"CheckCounter\", [(Pointer(Const(D3D11_COUNTER_DESC)), \"pDesc\"), Out(Pointer(D3D11_COUNTER_TYPE),", "\"ppGeometryShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"IAGetPrimitiveTopology\", [Out(Pointer(D3D11_PRIMITIVE_TOPOLOGY), \"pTopology\")]),", "StdMethod(Void, \"GSGetShader\", [Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void,", "\"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"PSGetShader\", [Out(Pointer(ObjPointer(ID3D11PixelShader)), \"ppPixelShader\"),", "\"MipSlice\"), (UINT, \"FirstWSlice\"), (UINT, \"WSize\"), ]) D3D11_RENDER_TARGET_VIEW_DESC = Struct(\"D3D11_RENDER_TARGET_VIEW_DESC\", [", "\"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"HSSetShader\", [(ObjPointer(ID3D11HullShader), \"pHullShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"),", "d3d11.addFunctions([ StdFunction(HRESULT, \"D3D11CreateDevice\", [(ObjPointer(IDXGIAdapter), \"pAdapter\"), (D3D_DRIVER_TYPE, \"DriverType\"), (HMODULE, \"Software\"), (D3D11_CREATE_DEVICE_FLAG,", "[ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_BUFFER_DESC), \"pDesc\")]), ] D3D11_TEXTURE1D_DESC = Struct(\"D3D11_TEXTURE1D_DESC\", [", "(UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"GSGetShader\", [Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\"), Out(Array(ObjPointer(ID3D11ClassInstance),", "\"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"HSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"),", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "\"pData\")]), StdMethod(HRESULT, \"SetPrivateDataInterface\", [(REFGUID, \"guid\"), (OpaquePointer(Const(IUnknown)), \"pData\")]), ] D3D11_COMPARISON_FUNC =", "]) D3D11_DEVICE_CONTEXT_TYPE = Enum(\"D3D11_DEVICE_CONTEXT_TYPE\", [ \"D3D11_DEVICE_CONTEXT_IMMEDIATE\", \"D3D11_DEVICE_CONTEXT_DEFERRED\", ]) D3D11_CLASS_INSTANCE_DESC =", "StdMethod(Void, \"ClearUnorderedAccessViewFloat\", [(ObjPointer(ID3D11UnorderedAccessView), \"pUnorderedAccessView\"), (Array(Const(FLOAT), 4), \"Values\")]), StdMethod(Void, \"ClearDepthStencilView\", [(ObjPointer(ID3D11DepthStencilView),", "]) D3D11_SO_DECLARATION_ENTRY = Struct(\"D3D11_SO_DECLARATION_ENTRY\", [ (UINT, \"Stream\"), (LPCSTR, \"SemanticName\"), (UINT,", "\"D3D11_BLEND_OP_REV_SUBTRACT\", \"D3D11_BLEND_OP_MIN\", \"D3D11_BLEND_OP_MAX\", ]) D3D11_COLOR_WRITE_ENABLE = Enum(\"D3D11_COLOR_WRITE_ENABLE\", [ \"D3D11_COLOR_WRITE_ENABLE_ALL\", \"D3D11_COLOR_WRITE_ENABLE_RED\",", "StdMethod(Void, \"Begin\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\")]), StdMethod(Void, \"End\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\")]), StdMethod(HRESULT, \"GetData\",", "Struct(\"D3D11_FEATURE_DATA_DOUBLES\", [ (BOOL, \"DoublePrecisionFloatShaderOps\"), ]) D3D11_FEATURE_DATA_FORMAT_SUPPORT = Struct(\"D3D11_FEATURE_DATA_FORMAT_SUPPORT\", [ (DXGI_FORMAT,", "(UINT, \"NumClassInstances\")]), StdMethod(Void, \"PSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"),", "\"D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS\", \"D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS\", \"D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD\", \"D3DERR_INVALIDCALL\", \"D3DERR_WASSTILLDRAWING\", ]) ID3D11DepthStencilState = Interface(\"ID3D11DepthStencilState\", ID3D11DeviceChild)", "(D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture2D.methods += [", "\"pNumClassInstances\")]), StdMethod(Void, \"PSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]),", "\"OMGetRenderTargets\", [(UINT, \"NumViews\"), (Array(ObjPointer(ID3D11RenderTargetView), \"NumViews\"), \"ppRenderTargetViews\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\")]), StdMethod(Void, \"OMGetRenderTargetsAndUnorderedAccessViews\",", "Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"HSGetSamplers\", [(UINT, \"StartSlot\"), (UINT,", "\"SetPrivateDataInterface\", [(REFGUID, \"guid\"), (OpaquePointer(Const(IUnknown)), \"pData\")]), ] D3D11_COMPARISON_FUNC = Enum(\"D3D11_COMPARISON_FUNC\", [", "(UINT, \"UAVStartSlot\"), (UINT, \"NumUAVs\"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), \"NumUAVs\"), \"ppUnorderedAccessViews\"), (Pointer(Const(UINT)), \"pUAVInitialCounts\")]), StdMethod(Void,", "] D3D11_BUFFER_SRV = Struct(\"D3D11_BUFFER_SRV\", [ (Union(None, [(UINT, \"FirstElement\"), (UINT, \"ElementOffset\")]),", "\"DataSize\"), \"pData\"), (UINT, \"DataSize\"), (D3D11_ASYNC_GETDATA_FLAG, \"GetDataFlags\")]), StdMethod(Void, \"SetPredication\", [(ObjPointer(ID3D11Predicate), \"pPredicate\"),", "\"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"CSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"),", "(D3D11_BLEND, \"DestBlend\"), (D3D11_BLEND_OP, \"BlendOp\"), (D3D11_BLEND, \"SrcBlendAlpha\"), (D3D11_BLEND, \"DestBlendAlpha\"), (D3D11_BLEND_OP, \"BlendOpAlpha\"),", "# in the Software without restriction, including without limitation the", "Flags(UINT, [ \"D3D11_BUFFEREX_SRV_FLAG_RAW\", ]) D3D11_BUFFEREX_SRV = Struct(\"D3D11_BUFFEREX_SRV\", [ (UINT, \"FirstElement\"),", "(D3D11_QUERY_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Query.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_QUERY_DESC), \"pDesc\")]),", "\"PSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"VSGetShader\",", "[]), StdMethod(Void, \"Flush\", []), StdMethod(D3D11_DEVICE_CONTEXT_TYPE, \"GetType\", []), StdMethod(UINT, \"GetContextFlags\", []),", "]) D3D11_COLOR_WRITE_ENABLE = Enum(\"D3D11_COLOR_WRITE_ENABLE\", [ \"D3D11_COLOR_WRITE_ENABLE_ALL\", \"D3D11_COLOR_WRITE_ENABLE_RED\", \"D3D11_COLOR_WRITE_ENABLE_GREEN\", \"D3D11_COLOR_WRITE_ENABLE_BLUE\", \"D3D11_COLOR_WRITE_ENABLE_ALPHA\",", "\"CSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"ClearState\",", "ID3D11HullShader = Interface(\"ID3D11HullShader\", ID3D11DeviceChild) ID3D11DomainShader = Interface(\"ID3D11DomainShader\", ID3D11DeviceChild) ID3D11GeometryShader =", "(UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_DSV_FLAG = Flags(UINT, [ \"D3D11_DSV_READ_ONLY_DEPTH\",", "DWORD], internal=True), ]) d3d11.addInterfaces([ IDXGIAdapter1, IDXGIDevice1, IDXGIResource, ID3D11Debug, ID3D11InfoQueue, ID3D11SwitchToRef,", "Struct(\"D3D11_TEXCUBE_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEXCUBE_ARRAY_SRV = Struct(\"D3D11_TEXCUBE_ARRAY_SRV\",", "(UINT, \"StartInstanceLocation\")]), StdMethod(Void, \"DrawInstanced\", [(UINT, \"VertexCountPerInstance\"), (UINT, \"InstanceCount\"), (UINT, \"StartVertexLocation\"),", "[(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"DSGetShaderResources\", [(UINT,", "(D3D11_STENCIL_OP, \"StencilDepthFailOp\"), (D3D11_STENCIL_OP, \"StencilPassOp\"), (D3D11_COMPARISON_FUNC, \"StencilFunc\"), ]) D3D11_DEPTH_STENCIL_DESC = Struct(\"D3D11_DEPTH_STENCIL_DESC\",", "\"Subresource\"), (D3D11_MAP, \"MapType\"), (D3D11_MAP_FLAG, \"MapFlags\"), Out(Pointer(D3D11_MAPPED_SUBRESOURCE), \"pMappedResource\")]), StdMethod(Void, \"Unmap\", [(ObjPointer(ID3D11Resource),", "D3D11_TEX3D_RTV = Struct(\"D3D11_TEX3D_RTV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstWSlice\"), (UINT, \"WSize\"),", "] D3D11_BLEND = Enum(\"D3D11_BLEND\", [ \"D3D11_BLEND_ZERO\", \"D3D11_BLEND_ONE\", \"D3D11_BLEND_SRC_COLOR\", \"D3D11_BLEND_INV_SRC_COLOR\", \"D3D11_BLEND_SRC_ALPHA\",", "(UINT, \"FirstWSlice\"), (UINT, \"WSize\"), ]) D3D11_UNORDERED_ACCESS_VIEW_DESC = Struct(\"D3D11_UNORDERED_ACCESS_VIEW_DESC\", [ (DXGI_FORMAT,", "\"GSPrimitives\"), (UINT64, \"CInvocations\"), (UINT64, \"CPrimitives\"), (UINT64, \"PSInvocations\"), (UINT64, \"HSInvocations\"), (UINT64,", "(OpaquePointer(Const(IUnknown)), \"pData\")]), ] D3D11_COMPARISON_FUNC = Enum(\"D3D11_COMPARISON_FUNC\", [ \"D3D11_COMPARISON_NEVER\", \"D3D11_COMPARISON_LESS\", \"D3D11_COMPARISON_EQUAL\",", "]) ID3D11DepthStencilView.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_DEPTH_STENCIL_VIEW_DESC), \"pDesc\")]), ] D3D11_BUFFER_UAV_FLAG", "[(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"VSGetShader\", [Out(Pointer(ObjPointer(ID3D11VertexShader)),", "\"CheckFeatureSupport\", [(D3D11_FEATURE, \"Feature\"), Out(D3D11_FEATURE_DATA, \"pFeatureSupportData\"), (UINT, \"FeatureSupportDataSize\")]), StdMethod(HRESULT, \"GetPrivateData\", [(REFGUID,", "\"D3D11_TEXTURE_ADDRESS_WRAP\", \"D3D11_TEXTURE_ADDRESS_MIRROR\", \"D3D11_TEXTURE_ADDRESS_CLAMP\", \"D3D11_TEXTURE_ADDRESS_BORDER\", \"D3D11_TEXTURE_ADDRESS_MIRROR_ONCE\", ]) D3D11_SAMPLER_DESC = Struct(\"D3D11_SAMPLER_DESC\", [", "[(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"OMSetRenderTargets\", [(UINT,", "\"SetEvictionPriority\", [(UINT, \"EvictionPriority\")]), StdMethod(UINT, \"GetEvictionPriority\", []), ] D3D11_BUFFER_DESC = Struct(\"D3D11_BUFFER_DESC\",", "sell # copies of the Software, and to permit persons", "]) D3D11_RENDER_TARGET_BLEND_DESC = Struct(\"D3D11_RENDER_TARGET_BLEND_DESC\", [ (BOOL, \"BlendEnable\"), (D3D11_BLEND, \"SrcBlend\"), (D3D11_BLEND,", "\"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"PSSetShader\", [(ObjPointer(ID3D11PixelShader), \"pPixelShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"),", "]) D3D11_TEX2DMS_RTV = Struct(\"D3D11_TEX2DMS_RTV\", [ (UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2D_ARRAY_RTV =", "]) D3D11_TEX2D_UAV = Struct(\"D3D11_TEX2D_UAV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX2D_ARRAY_UAV =", "[ (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_DSV_FLAG = Flags(UINT, [", "(UINT, \"StartVertexLocation\"), (UINT, \"StartInstanceLocation\")]), StdMethod(Void, \"GSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"),", "(UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_SRV = Struct(\"D3D11_TEX2D_SRV\", [ (UINT,", "[(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"CSSetUnorderedAccessViews\", [(UINT,", "\"ppHullShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"HSGetSamplers\", [(UINT, \"StartSlot\"),", "= Struct(\"D3D11_TEX1D_ARRAY_UAV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ])", "+= [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE1D_DESC), \"pDesc\")]), ] D3D11_TEXTURE2D_DESC = Struct(\"D3D11_TEXTURE2D_DESC\",", "\"Buffer\"), (D3D11_TEX1D_RTV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_RTV, \"Texture1DArray\"), (D3D11_TEX2D_RTV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_RTV, \"Texture2DArray\"), (D3D11_TEX2DMS_RTV,", "(UINT, \"SampleCount\"), Out(Pointer(UINT), \"pNumQualityLevels\")]), StdMethod(Void, \"CheckCounterInfo\", [Out(Pointer(D3D11_COUNTER_INFO), \"pCounterInfo\")]), StdMethod(HRESULT, \"CheckCounter\",", "\"pClassInstanceName\"), (UINT, \"InstanceIndex\"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), \"ppInstance\")]), StdMethod(HRESULT, \"CreateClassInstance\", [(LPCSTR, \"pClassTypeName\"), (UINT,", "[ StdMethod(Void, \"GetResource\", [Out(Pointer(ObjPointer(ID3D11Resource)), \"ppResource\")]), ] D3D11_BUFFER_SRV = Struct(\"D3D11_BUFFER_SRV\", [", "\"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"DSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"),", "(UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"GSSetShader\", [(ObjPointer(ID3D11GeometryShader), \"pShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)),", "= Struct(\"D3D11_TEXTURE2D_DESC\", [ (UINT, \"Width\"), (UINT, \"Height\"), (UINT, \"MipLevels\"), (UINT,", "= Struct(\"D3D11_SHADER_RESOURCE_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_SRV_DIMENSION, \"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_SRV,", "\"D3D11_BIND_STREAM_OUTPUT\", \"D3D11_BIND_RENDER_TARGET\", \"D3D11_BIND_DEPTH_STENCIL\", \"D3D11_BIND_UNORDERED_ACCESS\", ]) D3D11_CPU_ACCESS_FLAG = Flags(UINT, [ \"D3D11_CPU_ACCESS_WRITE\",", "D3D11_DSV_DIMENSION = Enum(\"D3D11_DSV_DIMENSION\", [ \"D3D11_DSV_DIMENSION_UNKNOWN\", \"D3D11_DSV_DIMENSION_TEXTURE1D\", \"D3D11_DSV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_DSV_DIMENSION_TEXTURE2D\", \"D3D11_DSV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_DSV_DIMENSION_TEXTURE2DMS\",", "StdMethod(HRESULT, \"CreateGeometryShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11GeometryShader)),", "(UINT, \"ThreadGroupCountY\"), (UINT, \"ThreadGroupCountZ\")]), StdMethod(Void, \"DispatchIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]),", "\"ppShaderResourceViews\")]), StdMethod(Void, \"VSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]),", "\"SemanticIndex\"), (DXGI_FORMAT, \"Format\"), (UINT, \"InputSlot\"), (D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET, \"AlignedByteOffset\"), (D3D11_INPUT_CLASSIFICATION, \"InputSlotClass\"), (UINT,", "\"D3D11_DSV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_DSV_DIMENSION_TEXTURE2DMS\", \"D3D11_DSV_DIMENSION_TEXTURE2DMSARRAY\", ]) D3D11_RTV_DIMENSION = Enum(\"D3D11_RTV_DIMENSION\", [ \"D3D11_RTV_DIMENSION_UNKNOWN\", \"D3D11_RTV_DIMENSION_BUFFER\",", "\"Format\"), Out(Pointer(UINT), \"Offset\")]), StdMethod(Void, \"GSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer),", "D3D11_FILL_MODE = Enum(\"D3D11_FILL_MODE\", [ \"D3D11_FILL_WIREFRAME\", \"D3D11_FILL_SOLID\", ]) D3D11_PRIMITIVE_TOPOLOGY = Enum(\"D3D11_PRIMITIVE_TOPOLOGY\",", "\"ppLinkage\")]), StdMethod(HRESULT, \"CreateBlendState\", [(Pointer(Const(D3D11_BLEND_DESC)), \"pBlendStateDesc\"), Out(Pointer(ObjPointer(ID3D11BlendState)), \"ppBlendState\")]), StdMethod(HRESULT, \"CreateDepthStencilState\", [(Pointer(Const(D3D11_DEPTH_STENCIL_DESC)),", "[ (UINT, \"MipSlice\"), (UINT, \"FirstWSlice\"), (UINT, \"WSize\"), ]) D3D11_RENDER_TARGET_VIEW_DESC =", "D3D11_TEX2DMS_ARRAY_RTV = Struct(\"D3D11_TEX2DMS_ARRAY_RTV\", [ (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX3D_RTV", "[ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE2D_DESC), \"pDesc\")]), ] D3D11_TEXTURE3D_DESC = Struct(\"D3D11_TEXTURE3D_DESC\", [", "[]), StdMethod(HRESULT, \"GetDeviceRemovedReason\", []), StdMethod(Void, \"GetImmediateContext\", [Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), StdMethod(HRESULT, \"SetExceptionMode\",", "OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION", "ID3D11SamplerState.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_SAMPLER_DESC), \"pDesc\")]), ] D3D11_FORMAT_SUPPORT =", "\"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"Begin\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\")]),", "\"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"VSGetShader\", [Out(Pointer(ObjPointer(ID3D11VertexShader)), \"ppVertexShader\"),", "\"Flags\"), (Union(None, [ (D3D11_TEX1D_DSV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_DSV, \"Texture1DArray\"), (D3D11_TEX2D_DSV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_DSV,", "Out(Pointer(ObjPointer(ID3D11VertexShader)), \"ppVertexShader\")]), StdMethod(HRESULT, \"CreateGeometryShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage),", "= Struct(\"D3D11_TEX2DMS_ARRAY_SRV\", [ (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_SHADER_RESOURCE_VIEW_DESC =", "\"D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX\", \"D3D11_RESOURCE_MISC_GDI_COMPATIBLE\", ]) D3D11_MAP = Enum(\"D3D11_MAP\", [ \"D3D11_MAP_READ\", \"D3D11_MAP_WRITE\", \"D3D11_MAP_READ_WRITE\",", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "+= [ StdMethod(UINT, \"GetDataSize\", []), ] D3D11_ASYNC_GETDATA_FLAG = Flags(UINT, [", "[ \"D3D11_UAV_DIMENSION_UNKNOWN\", \"D3D11_UAV_DIMENSION_BUFFER\", \"D3D11_UAV_DIMENSION_TEXTURE1D\", \"D3D11_UAV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_UAV_DIMENSION_TEXTURE2D\", \"D3D11_UAV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_UAV_DIMENSION_TEXTURE3D\", ]) D3D11_USAGE", "\"UpdateSubresource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"), (Pointer(Const(D3D11_BOX)), \"pDstBox\"), (OpaquePointer(Const(Void)), \"pSrcData\"), (UINT,", "Struct(\"D3D11_SHADER_RESOURCE_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_SRV_DIMENSION, \"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_SRV, \"Buffer\"),", "(UINT, \"InstanceDataStepRate\"), ]) D3D11_FILL_MODE = Enum(\"D3D11_FILL_MODE\", [ \"D3D11_FILL_WIREFRAME\", \"D3D11_FILL_SOLID\", ])", "(UINT, \"NumStrides\"), (UINT, \"RasterizedStream\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\")]), StdMethod(HRESULT, \"CreatePixelShader\",", "Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"DSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"),", "[(ObjPointer(ID3D11HullShader), \"pHullShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"HSSetSamplers\", [(UINT,", "Out(Pointer(ObjPointer(ID3D11ClassInstance)), \"ppInstance\")]), StdMethod(HRESULT, \"CreateClassInstance\", [(LPCSTR, \"pClassTypeName\"), (UINT, \"ConstantBufferOffset\"), (UINT, \"ConstantVectorOffset\"),", "+= [ StdMethod(Void, \"GetClassLinkage\", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), \"ppLinkage\")]), StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_CLASS_INSTANCE_DESC), \"pDesc\")]),", "= Interface(\"ID3D11InputLayout\", ID3D11DeviceChild) ID3D11SamplerState = Interface(\"ID3D11SamplerState\", ID3D11DeviceChild) ID3D11Asynchronous = Interface(\"ID3D11Asynchronous\",", "\"BaseTexture\"), (UINT, \"BaseSampler\"), (BOOL, \"Created\"), ]) ID3D11ClassInstance.methods += [ StdMethod(Void,", "\"CreateBlendState\", [(Pointer(Const(D3D11_BLEND_DESC)), \"pBlendStateDesc\"), Out(Pointer(ObjPointer(ID3D11BlendState)), \"ppBlendState\")]), StdMethod(HRESULT, \"CreateDepthStencilState\", [(Pointer(Const(D3D11_DEPTH_STENCIL_DESC)), \"pDepthStencilDesc\"), Out(Pointer(ObjPointer(ID3D11DepthStencilState)),", "\"D3D11_DEVICE_CONTEXT_DEFERRED\", ]) D3D11_CLASS_INSTANCE_DESC = Struct(\"D3D11_CLASS_INSTANCE_DESC\", [ (UINT, \"InstanceId\"), (UINT, \"InstanceIndex\"),", "(D3D11_COMPARISON_FUNC, \"DepthFunc\"), (BOOL, \"StencilEnable\"), (UINT8, \"StencilReadMask\"), (UINT8, \"StencilWriteMask\"), (D3D11_DEPTH_STENCILOP_DESC, \"FrontFace\"),", "(D3D11_CULL_MODE, \"CullMode\"), (BOOL, \"FrontCounterClockwise\"), (INT, \"DepthBias\"), (FLOAT, \"DepthBiasClamp\"), (FLOAT, \"SlopeScaledDepthBias\"),", "StdMethod(Void, \"SetResourceMinLOD\", [(ObjPointer(ID3D11Resource), \"pResource\"), (FLOAT, \"MinLOD\")]), StdMethod(FLOAT, \"GetResourceMinLOD\", [(ObjPointer(ID3D11Resource), \"pResource\")]),", "\"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"PSSetShader\", [(ObjPointer(ID3D11PixelShader), \"pPixelShader\"),", "Struct(\"D3D11_TEX2D_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"),", "(D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture3D.methods += [", "\"D3D11_FORMAT_SUPPORT_TYPED_UNORDERED_ACCESS_VIEW\", \"D3D11_FORMAT_SUPPORT_SHADER_GATHER_COMPARISON\", ]) D3D11_FORMAT_SUPPORT2 = Enum(\"D3D11_FORMAT_SUPPORT2\", [ \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_ADD\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_BITWISE_OPS\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_COMPARE_STORE_OR_COMPARE_EXCHANGE\",", "(Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"PSGetShader\", [Out(Pointer(ObjPointer(ID3D11PixelShader)), \"ppPixelShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"),", "\"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11HullShader)), \"ppHullShader\")]), StdMethod(HRESULT, \"CreateDomainShader\",", "\"InstanceIndex\"), (UINT, \"TypeId\"), (UINT, \"ConstantBuffer\"), (UINT, \"BaseConstantBufferOffset\"), (UINT, \"BaseTexture\"), (UINT,", "d3d11 = API(\"d3d11\") d3d11.addFunctions([ StdFunction(HRESULT, \"D3D11CreateDevice\", [(ObjPointer(IDXGIAdapter), \"pAdapter\"), (D3D_DRIVER_TYPE, \"DriverType\"),", "\"D3D11_BLEND_OP_ADD\", \"D3D11_BLEND_OP_SUBTRACT\", \"D3D11_BLEND_OP_REV_SUBTRACT\", \"D3D11_BLEND_OP_MIN\", \"D3D11_BLEND_OP_MAX\", ]) D3D11_COLOR_WRITE_ENABLE = Enum(\"D3D11_COLOR_WRITE_ENABLE\", [", "\"ppConstantBuffers\")]), StdMethod(Void, \"IASetInputLayout\", [(ObjPointer(ID3D11InputLayout), \"pInputLayout\")]), StdMethod(Void, \"IASetVertexBuffers\", [(UINT, \"StartSlot\"), (UINT,", "\"FeatureSupportDataSize\"), False) ID3D11DeviceContext.methods += [ StdMethod(Void, \"VSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT,", "\"CullMode\"), (BOOL, \"FrontCounterClockwise\"), (INT, \"DepthBias\"), (FLOAT, \"DepthBiasClamp\"), (FLOAT, \"SlopeScaledDepthBias\"), (BOOL,", "Flags(UINT, [ \"D3D11_BIND_VERTEX_BUFFER\", \"D3D11_BIND_INDEX_BUFFER\", \"D3D11_BIND_CONSTANT_BUFFER\", \"D3D11_BIND_SHADER_RESOURCE\", \"D3D11_BIND_STREAM_OUTPUT\", \"D3D11_BIND_RENDER_TARGET\", \"D3D11_BIND_DEPTH_STENCIL\", \"D3D11_BIND_UNORDERED_ACCESS\",", "StdMethod(Void, \"GetImmediateContext\", [Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), StdMethod(HRESULT, \"SetExceptionMode\", [(D3D11_RAISE_FLAG, \"RaiseFlags\")]), StdMethod(UINT, \"GetExceptionMode\",", "\"D3D11_PRIMITIVE_TOPOLOGY_27_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_28_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_29_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_30_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_31_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_32_CONTROL_POINT_PATCHLIST\", ]) D3D11_PRIMITIVE = Enum(\"D3D11_PRIMITIVE\",", "# copies of the Software, and to permit persons to", "ID3D11ComputeShader = Interface(\"ID3D11ComputeShader\", ID3D11DeviceChild) ID3D11InputLayout = Interface(\"ID3D11InputLayout\", ID3D11DeviceChild) ID3D11SamplerState =", "(Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"VSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer),", "Enum(\"D3D11_CULL_MODE\", [ \"D3D11_CULL_NONE\", \"D3D11_CULL_FRONT\", \"D3D11_CULL_BACK\", ]) D3D11_SO_DECLARATION_ENTRY = Struct(\"D3D11_SO_DECLARATION_ENTRY\", [", "\"VSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"GetPredication\",", "(UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"DSSetShader\", [(ObjPointer(ID3D11DomainShader), \"pDomainShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)),", "\"D3D11_BLEND_ONE\", \"D3D11_BLEND_SRC_COLOR\", \"D3D11_BLEND_INV_SRC_COLOR\", \"D3D11_BLEND_SRC_ALPHA\", \"D3D11_BLEND_INV_SRC_ALPHA\", \"D3D11_BLEND_DEST_ALPHA\", \"D3D11_BLEND_INV_DEST_ALPHA\", \"D3D11_BLEND_DEST_COLOR\", \"D3D11_BLEND_INV_DEST_COLOR\", \"D3D11_BLEND_SRC_ALPHA_SAT\",", "Flags(UINT, [ \"D3D11_RESOURCE_MISC_GENERATE_MIPS\", \"D3D11_RESOURCE_MISC_SHARED\", \"D3D11_RESOURCE_MISC_TEXTURECUBE\", \"D3D11_RESOURCE_MISC_DRAWINDIRECT_ARGS\", \"D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS\", \"D3D11_RESOURCE_MISC_BUFFER_STRUCTURED\", \"D3D11_RESOURCE_MISC_RESOURCE_CLAMP\", \"D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX\",", "\"D3D11_PRIMITIVE_18_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_19_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_20_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_21_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_22_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_23_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_24_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_25_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_26_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_27_CONTROL_POINT_PATCH\",", "4), \"ColorRGBA\")]), StdMethod(Void, \"ClearUnorderedAccessViewUint\", [(ObjPointer(ID3D11UnorderedAccessView), \"pUnorderedAccessView\"), (Array(Const(UINT), 4), \"Values\")]), StdMethod(Void,", "\"RSSetState\", [(ObjPointer(ID3D11RasterizerState), \"pRasterizerState\")]), StdMethod(Void, \"RSSetViewports\", [(UINT, \"NumViewports\"), (Array(Const(D3D11_VIEWPORT), \"NumViewports\"), \"pViewports\")]),", "TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN", "\"BlendEnable\"), (D3D11_BLEND, \"SrcBlend\"), (D3D11_BLEND, \"DestBlend\"), (D3D11_BLEND_OP, \"BlendOp\"), (D3D11_BLEND, \"SrcBlendAlpha\"), (D3D11_BLEND,", "Out(Pointer(ObjPointer(ID3D11Query)), \"ppQuery\")]), StdMethod(HRESULT, \"CreatePredicate\", [(Pointer(Const(D3D11_QUERY_DESC)), \"pPredicateDesc\"), Out(Pointer(ObjPointer(ID3D11Predicate)), \"ppPredicate\")]), StdMethod(HRESULT, \"CreateCounter\",", "copy # of this software and associated documentation files (the", "\"D3D11_DSV_DIMENSION_TEXTURE1D\", \"D3D11_DSV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_DSV_DIMENSION_TEXTURE2D\", \"D3D11_DSV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_DSV_DIMENSION_TEXTURE2DMS\", \"D3D11_DSV_DIMENSION_TEXTURE2DMSARRAY\", ]) D3D11_RTV_DIMENSION = Enum(\"D3D11_RTV_DIMENSION\",", "[ (UINT, \"left\"), (UINT, \"top\"), (UINT, \"front\"), (UINT, \"right\"), (UINT,", "[ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEXCUBE_ARRAY_SRV = Struct(\"D3D11_TEXCUBE_ARRAY_SRV\", [", "Permission is hereby granted, free of charge, to any person", "StdMethod(Void, \"HSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void,", "\"BlendOpAlpha\"), (UINT8, \"RenderTargetWriteMask\"), ]) D3D11_BLEND_DESC = Struct(\"D3D11_BLEND_DESC\", [ (BOOL, \"AlphaToCoverageEnable\"),", "StdMethod(Void, \"HSGetShader\", [Out(Pointer(ObjPointer(ID3D11HullShader)), \"ppHullShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void,", "\"szUnits\"), Out(Pointer(UINT), \"pUnitsLength\"), Out(LPSTR, \"szDescription\"), Out(Pointer(UINT), \"pDescriptionLength\")]), StdMethod(HRESULT, \"CheckFeatureSupport\", [(D3D11_FEATURE,", "\"D3D11_USAGE_STAGING\", ]) D3D11_BIND_FLAG = Flags(UINT, [ \"D3D11_BIND_VERTEX_BUFFER\", \"D3D11_BIND_INDEX_BUFFER\", \"D3D11_BIND_CONSTANT_BUFFER\", \"D3D11_BIND_SHADER_RESOURCE\",", "\"D3D11_FORMAT_SUPPORT_TEXTURE3D\", \"D3D11_FORMAT_SUPPORT_TEXTURECUBE\", \"D3D11_FORMAT_SUPPORT_SHADER_LOAD\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_COMPARISON\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_MONO_TEXT\", \"D3D11_FORMAT_SUPPORT_MIP\", \"D3D11_FORMAT_SUPPORT_MIP_AUTOGEN\", \"D3D11_FORMAT_SUPPORT_RENDER_TARGET\", \"D3D11_FORMAT_SUPPORT_BLENDABLE\",", "Struct(\"D3D11_FEATURE_DATA_FORMAT_SUPPORT\", [ (DXGI_FORMAT, \"InFormat\"), (D3D11_FORMAT_SUPPORT, \"OutFormatSupport\"), ]) D3D11_FEATURE_DATA_FORMAT_SUPPORT2 = Struct(\"D3D11_FEATURE_DATA_FORMAT_SUPPORT2\",", "+= [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE2D_DESC), \"pDesc\")]), ] D3D11_TEXTURE3D_DESC = Struct(\"D3D11_TEXTURE3D_DESC\",", "(Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"PSSetShader\", [(ObjPointer(ID3D11PixelShader), \"pPixelShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"),", "[Out(Pointer(ObjPointer(ID3D11PixelShader)), \"ppPixelShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"PSGetSamplers\", [(UINT,", "Interface(\"ID3D11Resource\", ID3D11DeviceChild) ID3D11Buffer = Interface(\"ID3D11Buffer\", ID3D11Resource) ID3D11Texture1D = Interface(\"ID3D11Texture1D\", ID3D11Resource)", "StdMethod(Void, \"GenerateMips\", [(ObjPointer(ID3D11ShaderResourceView), \"pShaderResourceView\")]), StdMethod(Void, \"SetResourceMinLOD\", [(ObjPointer(ID3D11Resource), \"pResource\"), (FLOAT, \"MinLOD\")]),", "[(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11VertexShader)), \"ppVertexShader\")]), StdMethod(HRESULT,", "(UINT, \"DataSize\"), (OpaqueBlob(Const(Void), \"DataSize\"), \"pData\")]), StdMethod(HRESULT, \"SetPrivateDataInterface\", [(REFGUID, \"guid\"), (OpaquePointer(Const(IUnknown)),", "StdMethod(Void, \"RSSetViewports\", [(UINT, \"NumViewports\"), (Array(Const(D3D11_VIEWPORT), \"NumViewports\"), \"pViewports\")]), StdMethod(Void, \"RSSetScissorRects\", [(UINT,", "\"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"DSSetShaderResources\", [(UINT, \"StartSlot\"),", "[ \"D3D11_SRV_DIMENSION_UNKNOWN\", \"D3D11_SRV_DIMENSION_BUFFER\", \"D3D11_SRV_DIMENSION_TEXTURE1D\", \"D3D11_SRV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE2D\", \"D3D11_SRV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE2DMS\", \"D3D11_SRV_DIMENSION_TEXTURE2DMSARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE3D\",", "(DXGI_FORMAT, \"InFormat\"), (D3D11_FORMAT_SUPPORT, \"OutFormatSupport\"), ]) D3D11_FEATURE_DATA_FORMAT_SUPPORT2 = Struct(\"D3D11_FEATURE_DATA_FORMAT_SUPPORT2\", [ (DXGI_FORMAT,", "[(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"DSSetConstantBuffers\", [(UINT,", "(UINT, \"NumClassInstances\")]), StdMethod(Void, \"CSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"),", "D3D11_TEX1D_UAV = Struct(\"D3D11_TEX1D_UAV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_UAV = Struct(\"D3D11_TEX1D_ARRAY_UAV\",", "ID3D11Buffer = Interface(\"ID3D11Buffer\", ID3D11Resource) ID3D11Texture1D = Interface(\"ID3D11Texture1D\", ID3D11Resource) ID3D11Texture2D =", "(D3D11_MAP, \"MapType\"), (D3D11_MAP_FLAG, \"MapFlags\"), Out(Pointer(D3D11_MAPPED_SUBRESOURCE), \"pMappedResource\")]), StdMethod(Void, \"Unmap\", [(ObjPointer(ID3D11Resource), \"pResource\"),", "(OpaqueBlob(Const(Void), \"DataSize\"), \"pData\")]), StdMethod(HRESULT, \"SetPrivateDataInterface\", [(REFGUID, \"guid\"), (OpaquePointer(Const(IUnknown)), \"pData\")]), StdMethod(D3D_FEATURE_LEVEL,", "IN # THE SOFTWARE. # ##########################################################################/ from dxgi import *", "Struct(\"D3D11_TEX3D_UAV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstWSlice\"), (UINT, \"WSize\"), ]) D3D11_UNORDERED_ACCESS_VIEW_DESC", "\"pDstBuffer\"), (UINT, \"DstAlignedByteOffset\"), (ObjPointer(ID3D11UnorderedAccessView), \"pSrcView\")]), StdMethod(Void, \"ClearRenderTargetView\", [(ObjPointer(ID3D11RenderTargetView), \"pRenderTargetView\"), (Array(Const(FLOAT),", "\"ArraySize\"), ]) D3D11_TEX2DMS_DSV = Struct(\"D3D11_TEX2DMS_DSV\", [ (UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2DMS_ARRAY_DSV", "[Out(Pointer(D3D11_UNORDERED_ACCESS_VIEW_DESC), \"pDesc\")]), ] D3D11_FILTER = Enum(\"D3D11_FILTER\", [ \"D3D11_FILTER_MIN_MAG_MIP_POINT\", \"D3D11_FILTER_MIN_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT\",", "\"pTopology\")]), StdMethod(Void, \"VSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]),", "\"D3D11_CLEAR_STENCIL\", ]) D3D11_RECT = Alias(\"D3D11_RECT\", RECT) D3D11_BOX = Struct(\"D3D11_BOX\", [", "\"D3D11_COMPARISON_LESS\", \"D3D11_COMPARISON_EQUAL\", \"D3D11_COMPARISON_LESS_EQUAL\", \"D3D11_COMPARISON_GREATER\", \"D3D11_COMPARISON_NOT_EQUAL\", \"D3D11_COMPARISON_GREATER_EQUAL\", \"D3D11_COMPARISON_ALWAYS\", ]) D3D11_DEPTH_WRITE_MASK =", "StdMethod(Void, \"GetPredication\", [Out(Pointer(ObjPointer(ID3D11Predicate)), \"ppPredicate\"), Out(Pointer(BOOL), \"pPredicateValue\")]), StdMethod(Void, \"GSGetShaderResources\", [(UINT, \"StartSlot\"),", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "[(ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\"), (D3D11_CLEAR_FLAG, \"ClearFlags\"), (FLOAT, \"Depth\"), (UINT8, \"Stencil\")]), StdMethod(Void, \"GenerateMips\",", "\"MipLevels\"), (UINT, \"First2DArrayFace\"), (UINT, \"NumCubes\"), ]) D3D11_TEX2DMS_SRV = Struct(\"D3D11_TEX2DMS_SRV\", [", "DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD], internal=True), ]) d3d11.addInterfaces([", "\"D3D11_PRIMITIVE_4_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_5_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_6_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_7_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_8_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_9_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_10_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_11_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_12_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_13_CONTROL_POINT_PATCH\",", "(D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture1D.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE1D_DESC), \"pDesc\")]),", "[ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_UNORDERED_ACCESS_VIEW_DESC), \"pDesc\")]), ] D3D11_FILTER = Enum(\"D3D11_FILTER\", [", "to permit persons to whom the Software is # furnished", "(D3D11_BUFFER_UAV, \"Buffer\"), (D3D11_TEX1D_UAV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_UAV, \"Texture1DArray\"), (D3D11_TEX2D_UAV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_UAV, \"Texture2DArray\"),", "\"pBufferLength\")]), StdMethod(Void, \"GetTypeName\", [Out(LPSTR, \"pTypeName\"), Out(Pointer(SIZE_T), \"pBufferLength\")]), ] ID3D11ClassLinkage.methods +=", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "ID3D11PixelShader = Interface(\"ID3D11PixelShader\", ID3D11DeviceChild) ID3D11ComputeShader = Interface(\"ID3D11ComputeShader\", ID3D11DeviceChild) ID3D11InputLayout =", "Enum(\"D3D11_COMPARISON_FUNC\", [ \"D3D11_COMPARISON_NEVER\", \"D3D11_COMPARISON_LESS\", \"D3D11_COMPARISON_EQUAL\", \"D3D11_COMPARISON_LESS_EQUAL\", \"D3D11_COMPARISON_GREATER\", \"D3D11_COMPARISON_NOT_EQUAL\", \"D3D11_COMPARISON_GREATER_EQUAL\", \"D3D11_COMPARISON_ALWAYS\",", "\"D3D11_BIND_SHADER_RESOURCE\", \"D3D11_BIND_STREAM_OUTPUT\", \"D3D11_BIND_RENDER_TARGET\", \"D3D11_BIND_DEPTH_STENCIL\", \"D3D11_BIND_UNORDERED_ACCESS\", ]) D3D11_CPU_ACCESS_FLAG = Flags(UINT, [", "\"ppSamplers\")]), StdMethod(Void, \"HSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]),", "StdMethod(Void, \"PSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void,", "\"D3D11_BLEND_INV_SRC1_ALPHA\", ]) D3D11_BLEND_OP = Enum(\"D3D11_BLEND_OP\", [ \"D3D11_BLEND_OP_ADD\", \"D3D11_BLEND_OP_SUBTRACT\", \"D3D11_BLEND_OP_REV_SUBTRACT\", \"D3D11_BLEND_OP_MIN\",", "[(ObjPointer(ID3D11Resource), \"pResource\"), (UINT, \"Subresource\"), (D3D11_MAP, \"MapType\"), (D3D11_MAP_FLAG, \"MapFlags\"), Out(Pointer(D3D11_MAPPED_SUBRESOURCE), \"pMappedResource\")]),", "StdMethod(Void, \"CSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void,", "\"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"ClearState\", []), StdMethod(Void, \"Flush\", []),", "\"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"HSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"),", "\"SysMemSlicePitch\"), ]) D3D11_MAPPED_SUBRESOURCE = Struct(\"D3D11_MAPPED_SUBRESOURCE\", [ (OpaquePointer(Void), \"pData\"), (UINT, \"RowPitch\"),", "StdMethod(Void, \"VSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void,", "\"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"IAGetInputLayout\", [Out(Pointer(ObjPointer(ID3D11InputLayout)), \"ppInputLayout\")]), StdMethod(Void, \"IAGetVertexBuffers\",", "Enum(\"D3D11_DEPTH_WRITE_MASK\", [ \"D3D11_DEPTH_WRITE_MASK_ZERO\", \"D3D11_DEPTH_WRITE_MASK_ALL\", ]) D3D11_STENCIL_OP = Enum(\"D3D11_STENCIL_OP\", [ \"D3D11_STENCIL_OP_KEEP\",", "\"Format\"), (D3D11_SRV_DIMENSION, \"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_SRV, \"Buffer\"), (D3D11_TEX1D_SRV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_SRV,", "= Struct(\"D3D11_QUERY_DATA_TIMESTAMP_DISJOINT\", [ (UINT64, \"Frequency\"), (BOOL, \"Disjoint\"), ]) D3D11_QUERY_DATA_PIPELINE_STATISTICS =", "\"FirstElement\"), (UINT, \"NumElements\"), (D3D11_BUFFEREX_SRV_FLAG, \"Flags\"), ]) D3D11_TEX1D_SRV = Struct(\"D3D11_TEX1D_SRV\", [", "[]), StdMethod(D3D11_CREATE_DEVICE_FLAG, \"GetCreationFlags\", []), StdMethod(HRESULT, \"GetDeviceRemovedReason\", []), StdMethod(Void, \"GetImmediateContext\", [Out(Pointer(ObjPointer(ID3D11DeviceContext)),", "\"D3D11_CULL_NONE\", \"D3D11_CULL_FRONT\", \"D3D11_CULL_BACK\", ]) D3D11_SO_DECLARATION_ENTRY = Struct(\"D3D11_SO_DECLARATION_ENTRY\", [ (UINT, \"Stream\"),", "Out(Pointer(D3D_FEATURE_LEVEL), \"pFeatureLevel\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), # XXX: Undocumented functions, called by", "\"GetExceptionMode\", []), ] d3d11 = API(\"d3d11\") d3d11.addFunctions([ StdFunction(HRESULT, \"D3D11CreateDevice\", [(ObjPointer(IDXGIAdapter),", "StdMethod(Void, \"CSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void,", "\"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (Array(Const(D3D11_SO_DECLARATION_ENTRY), \"NumEntries\"), \"pSODeclaration\"), (UINT, \"NumEntries\"), (Array(Const(UINT),", "= Interface(\"ID3D11RasterizerState\", ID3D11DeviceChild) ID3D11Resource = Interface(\"ID3D11Resource\", ID3D11DeviceChild) ID3D11Buffer = Interface(\"ID3D11Buffer\",", "ID3D11InputLayout = Interface(\"ID3D11InputLayout\", ID3D11DeviceChild) ID3D11SamplerState = Interface(\"ID3D11SamplerState\", ID3D11DeviceChild) ID3D11Asynchronous =", "(UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEX2D_ARRAY_SRV = Struct(\"D3D11_TEX2D_ARRAY_SRV\", [ (UINT,", "StdMethod(Void, \"CSSetShader\", [(ObjPointer(ID3D11ComputeShader), \"pComputeShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void,", "(UINT, \"InstanceCount\"), (UINT, \"StartIndexLocation\"), (INT, \"BaseVertexLocation\"), (UINT, \"StartInstanceLocation\")]), StdMethod(Void, \"DrawInstanced\",", "\"D3D11_SRV_DIMENSION_TEXTURECUBE\", \"D3D11_SRV_DIMENSION_TEXTURECUBEARRAY\", \"D3D11_SRV_DIMENSION_BUFFEREX\", ]) D3D11_DSV_DIMENSION = Enum(\"D3D11_DSV_DIMENSION\", [ \"D3D11_DSV_DIMENSION_UNKNOWN\", \"D3D11_DSV_DIMENSION_TEXTURE1D\",", "(UINT64, \"NumPrimitivesWritten\"), (UINT64, \"PrimitivesStorageNeeded\"), ]) D3D11_COUNTER = Enum(\"D3D11_COUNTER\", [ \"D3D11_COUNTER_DEVICE_DEPENDENT_0\",", "\"D3D11_FORMAT_SUPPORT_TEXTURE2D\", \"D3D11_FORMAT_SUPPORT_TEXTURE3D\", \"D3D11_FORMAT_SUPPORT_TEXTURECUBE\", \"D3D11_FORMAT_SUPPORT_SHADER_LOAD\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_COMPARISON\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_MONO_TEXT\", \"D3D11_FORMAT_SUPPORT_MIP\", \"D3D11_FORMAT_SUPPORT_MIP_AUTOGEN\", \"D3D11_FORMAT_SUPPORT_RENDER_TARGET\",", "(UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"PSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT,", "(UINT, \"WSize\"), ]) D3D11_RENDER_TARGET_VIEW_DESC = Struct(\"D3D11_RENDER_TARGET_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_RTV_DIMENSION,", "[ (BOOL, \"DepthEnable\"), (D3D11_DEPTH_WRITE_MASK, \"DepthWriteMask\"), (D3D11_COMPARISON_FUNC, \"DepthFunc\"), (BOOL, \"StencilEnable\"), (UINT8,", "\"pDesc\")]), ] D3D11_SUBRESOURCE_DATA = Struct(\"D3D11_SUBRESOURCE_DATA\", [ (OpaquePointer(Const(Void)), \"pSysMem\"), (UINT, \"SysMemPitch\"),", "(BOOL, \"Disjoint\"), ]) D3D11_QUERY_DATA_PIPELINE_STATISTICS = Struct(\"D3D11_QUERY_DATA_PIPELINE_STATISTICS\", [ (UINT64, \"IAVertices\"), (UINT64,", "[ (DXGI_FORMAT, \"Format\"), (D3D11_DSV_DIMENSION, \"ViewDimension\"), (D3D11_DSV_FLAG, \"Flags\"), (Union(None, [ (D3D11_TEX1D_DSV,", "D3D11_TEX2DMS_ARRAY_SRV = Struct(\"D3D11_TEX2DMS_ARRAY_SRV\", [ (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_SHADER_RESOURCE_VIEW_DESC", "\"VSInvocations\"), (UINT64, \"GSInvocations\"), (UINT64, \"GSPrimitives\"), (UINT64, \"CInvocations\"), (UINT64, \"CPrimitives\"), (UINT64,", "[ (UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2D_ARRAY_RTV = Struct(\"D3D11_TEX2D_ARRAY_RTV\", [ (UINT, \"MipSlice\"),", "[Out(Pointer(D3D11_RASTERIZER_DESC), \"pDesc\")]), ] D3D11_SUBRESOURCE_DATA = Struct(\"D3D11_SUBRESOURCE_DATA\", [ (OpaquePointer(Const(Void)), \"pSysMem\"), (UINT,", "(UINT, \"FirstElement\"), (UINT, \"NumElements\"), (D3D11_BUFFEREX_SRV_FLAG, \"Flags\"), ]) D3D11_TEX1D_SRV = Struct(\"D3D11_TEX1D_SRV\",", "(FLOAT, \"Width\"), (FLOAT, \"Height\"), (FLOAT, \"MinDepth\"), (FLOAT, \"MaxDepth\"), ]) D3D11_RESOURCE_DIMENSION", "\"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"DSGetShader\", [Out(Pointer(ObjPointer(ID3D11DomainShader)), \"ppDomainShader\"),", "Struct(\"D3D11_TEX1D_RTV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_RTV = Struct(\"D3D11_TEX1D_ARRAY_RTV\", [ (UINT,", "(Union(None, [(UINT, \"FirstElement\"), (UINT, \"ElementOffset\")]), None), (Union(None, [(UINT, \"NumElements\"), (UINT,", "\"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX3D_UAV = Struct(\"D3D11_TEX3D_UAV\", [ (UINT, \"MipSlice\"),", "\"ArraySize\"), ]) D3D11_TEX3D_SRV = Struct(\"D3D11_TEX3D_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"),", "\"UAVStartSlot\"), (UINT, \"NumUAVs\"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), \"NumUAVs\"), \"ppUnorderedAccessViews\"), (Pointer(Const(UINT)), \"pUAVInitialCounts\")]), StdMethod(Void, \"OMSetBlendState\",", "]) ID3D11BlendState.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_BLEND_DESC), \"pDesc\")]), ] D3D11_RASTERIZER_DESC", "\"Texture2DArray\"), (D3D11_TEX2DMS_DSV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_DSV, \"Texture2DMSArray\"), ]), None), ]) ID3D11DepthStencilView.methods +=", "(BOOL, \"DoublePrecisionFloatShaderOps\"), ]) D3D11_FEATURE_DATA_FORMAT_SUPPORT = Struct(\"D3D11_FEATURE_DATA_FORMAT_SUPPORT\", [ (DXGI_FORMAT, \"InFormat\"), (D3D11_FORMAT_SUPPORT,", "\"MipLODBias\"), (UINT, \"MaxAnisotropy\"), (D3D11_COMPARISON_FUNC, \"ComparisonFunc\"), (Array(FLOAT, 4), \"BorderColor\"), (FLOAT, \"MinLOD\"),", "########################################################################## # # Copyright 2012 <NAME> # All Rights Reserved.", "Flags(UINT, [ \"D3D11_CLEAR_DEPTH\", \"D3D11_CLEAR_STENCIL\", ]) D3D11_RECT = Alias(\"D3D11_RECT\", RECT) D3D11_BOX", "Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"IAGetPrimitiveTopology\", [Out(Pointer(D3D11_PRIMITIVE_TOPOLOGY), \"pTopology\")]), StdMethod(Void, \"VSGetShaderResources\", [(UINT, \"StartSlot\"),", "Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\"), Out(Pointer(D3D_FEATURE_LEVEL), \"pFeatureLevel\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), StdFunction(HRESULT, \"D3D11CreateDeviceAndSwapChain\", [(ObjPointer(IDXGIAdapter), \"pAdapter\"),", "\"pResource\")]), StdMethod(Void, \"ResolveSubresource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"), (ObjPointer(ID3D11Resource), \"pSrcResource\"), (UINT,", "\"ppUnorderedAccessViews\"), (Pointer(Const(UINT)), \"pUAVInitialCounts\")]), StdMethod(Void, \"OMSetBlendState\", [(ObjPointer(ID3D11BlendState), \"pBlendState\"), (Array(Const(FLOAT), 4), \"BlendFactor\"),", "\"CheckMultisampleQualityLevels\", [(DXGI_FORMAT, \"Format\"), (UINT, \"SampleCount\"), Out(Pointer(UINT), \"pNumQualityLevels\")]), StdMethod(Void, \"CheckCounterInfo\", [Out(Pointer(D3D11_COUNTER_INFO),", "ID3D11BlendState.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_BLEND_DESC), \"pDesc\")]), ] D3D11_RASTERIZER_DESC =", "(UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"HSGetShader\", [Out(Pointer(ObjPointer(ID3D11HullShader)), \"ppHullShader\"), Out(Array(ObjPointer(ID3D11ClassInstance),", "\"NumCubes\"), ]) D3D11_TEX2DMS_SRV = Struct(\"D3D11_TEX2DMS_SRV\", [ (UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2DMS_ARRAY_SRV", "and associated documentation files (the \"Software\"), to deal # in", "and to permit persons to whom the Software is #", "(ObjPointer(ID3D11Resource), \"pSrcResource\"), (UINT, \"SrcSubresource\"), (Pointer(Const(D3D11_BOX)), \"pSrcBox\")]), StdMethod(Void, \"CopyResource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"),", "\"HSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"HSGetShader\",", "[(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_SHADER_RESOURCE_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11ShaderResourceView)), \"ppSRView\")]), StdMethod(HRESULT, \"CreateUnorderedAccessView\", [(ObjPointer(ID3D11Resource), \"pResource\"),", "[(UINT, \"NumElements\"), (UINT, \"ElementWidth\")]), None), ]) D3D11_BUFFEREX_SRV_FLAG = Flags(UINT, [", "hereby granted, free of charge, to any person obtaining a", "Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), StdFunction(HRESULT, \"D3D11CreateDeviceAndSwapChain\", [(ObjPointer(IDXGIAdapter), \"pAdapter\"), (D3D_DRIVER_TYPE, \"DriverType\"), (HMODULE, \"Software\"),", "\"OMSetBlendState\", [(ObjPointer(ID3D11BlendState), \"pBlendState\"), (Array(Const(FLOAT), 4), \"BlendFactor\"), (UINT, \"SampleMask\")]), StdMethod(Void, \"OMSetDepthStencilState\",", "ID3D11DepthStencilView = Interface(\"ID3D11DepthStencilView\", ID3D11View) ID3D11UnorderedAccessView = Interface(\"ID3D11UnorderedAccessView\", ID3D11View) ID3D11VertexShader =", "D3D11_MAP_FLAG = Flags(UINT, [ \"D3D11_MAP_FLAG_DO_NOT_WAIT\", ]) D3D11_RAISE_FLAG = Flags(UINT, [", "\"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture3D)), \"ppTexture3D\")]), StdMethod(HRESULT, \"CreateShaderResourceView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_SHADER_RESOURCE_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11ShaderResourceView)),", "\"pSrcBox\")]), StdMethod(Void, \"CopyResource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (ObjPointer(ID3D11Resource), \"pSrcResource\")]), StdMethod(Void, \"UpdateSubresource\", [(ObjPointer(ID3D11Resource),", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "]) ID3D11Texture3D.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE3D_DESC), \"pDesc\")]), ] D3D11_TEXTURECUBE_FACE", "[ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEXCUBE_SRV = Struct(\"D3D11_TEXCUBE_SRV\", [", "\"pActiveCounters\"), Out(LPSTR, \"szName\"), Out(Pointer(UINT), \"pNameLength\"), Out(LPSTR, \"szUnits\"), Out(Pointer(UINT), \"pUnitsLength\"), Out(LPSTR,", "StdMethod(Void, \"DrawAuto\", []), StdMethod(Void, \"DrawIndexedInstancedIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void,", "# all copies or substantial portions of the Software. #", "\"D3D11_UAV_DIMENSION_UNKNOWN\", \"D3D11_UAV_DIMENSION_BUFFER\", \"D3D11_UAV_DIMENSION_TEXTURE1D\", \"D3D11_UAV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_UAV_DIMENSION_TEXTURE2D\", \"D3D11_UAV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_UAV_DIMENSION_TEXTURE3D\", ]) D3D11_USAGE =", "(D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture1D.methods", "Struct(\"D3D11_TEX2DMS_SRV\", [ (UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2DMS_ARRAY_SRV = Struct(\"D3D11_TEX2DMS_ARRAY_SRV\", [ (UINT,", "None), ]) ID3D11DepthStencilView.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_DEPTH_STENCIL_VIEW_DESC), \"pDesc\")]), ]", "StdMethod(Void, \"DSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void,", "\"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"DSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"),", "(Array(Const(D3D_FEATURE_LEVEL), \"FeatureLevels\"), \"pFeatureLevels\"), (UINT, \"FeatureLevels\"), (UINT, \"SDKVersion\"), Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\"), Out(Pointer(D3D_FEATURE_LEVEL),", "\"D3D11_UAV_DIMENSION_TEXTURE1D\", \"D3D11_UAV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_UAV_DIMENSION_TEXTURE2D\", \"D3D11_UAV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_UAV_DIMENSION_TEXTURE3D\", ]) D3D11_USAGE = Enum(\"D3D11_USAGE\", [", "StdMethod(Void, \"IASetVertexBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppVertexBuffers\"), (Pointer(Const(UINT)),", "StdMethod(HRESULT, \"CreateDepthStencilState\", [(Pointer(Const(D3D11_DEPTH_STENCIL_DESC)), \"pDepthStencilDesc\"), Out(Pointer(ObjPointer(ID3D11DepthStencilState)), \"ppDepthStencilState\")]), StdMethod(HRESULT, \"CreateRasterizerState\", [(Pointer(Const(D3D11_RASTERIZER_DESC)), \"pRasterizerDesc\"),", "Struct(\"D3D11_BUFFER_SRV\", [ (Union(None, [(UINT, \"FirstElement\"), (UINT, \"ElementOffset\")]), None), (Union(None, [(UINT,", "\"MipSlice\"), ]) D3D11_TEX2DMS_RTV = Struct(\"D3D11_TEX2DMS_RTV\", [ (UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2D_ARRAY_RTV", "\"NumElements\"), (UINT, \"ElementWidth\")]), None), ]) D3D11_TEX1D_RTV = Struct(\"D3D11_TEX1D_RTV\", [ (UINT,", "Out(Pointer(SIZE_T), \"pBufferLength\")]), ] ID3D11ClassLinkage.methods += [ StdMethod(HRESULT, \"GetClassInstance\", [(LPCSTR, \"pClassInstanceName\"),", "\"D3D11_STANDARD_MULTISAMPLE_PATTERN\", \"D3D11_CENTER_MULTISAMPLE_PATTERN\", ]) D3D11_DEVICE_CONTEXT_TYPE = Enum(\"D3D11_DEVICE_CONTEXT_TYPE\", [ \"D3D11_DEVICE_CONTEXT_IMMEDIATE\", \"D3D11_DEVICE_CONTEXT_DEFERRED\", ])", "D3D11_BIND_FLAG = Flags(UINT, [ \"D3D11_BIND_VERTEX_BUFFER\", \"D3D11_BIND_INDEX_BUFFER\", \"D3D11_BIND_CONSTANT_BUFFER\", \"D3D11_BIND_SHADER_RESOURCE\", \"D3D11_BIND_STREAM_OUTPUT\", \"D3D11_BIND_RENDER_TARGET\",", "[(UINT, \"ContextFlags\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppDeferredContext\")]), StdMethod(HRESULT, \"OpenSharedResource\", [(HANDLE, \"hResource\"), (REFIID, \"ReturnedInterface\"),", "(UINT, \"ConstantVectorOffset\"), (UINT, \"TextureOffset\"), (UINT, \"SamplerOffset\"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), \"ppInstance\")]), ] ID3D11CommandList.methods", "= Struct(\"D3D11_RENDER_TARGET_BLEND_DESC\", [ (BOOL, \"BlendEnable\"), (D3D11_BLEND, \"SrcBlend\"), (D3D11_BLEND, \"DestBlend\"), (D3D11_BLEND_OP,", "Struct(\"D3D11_MAPPED_SUBRESOURCE\", [ (OpaquePointer(Void), \"pData\"), (UINT, \"RowPitch\"), (UINT, \"DepthPitch\"), ]) ID3D11Resource.methods", "\"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_MONO_TEXT\", \"D3D11_FORMAT_SUPPORT_MIP\", \"D3D11_FORMAT_SUPPORT_MIP_AUTOGEN\", \"D3D11_FORMAT_SUPPORT_RENDER_TARGET\", \"D3D11_FORMAT_SUPPORT_BLENDABLE\", \"D3D11_FORMAT_SUPPORT_DEPTH_STENCIL\", \"D3D11_FORMAT_SUPPORT_CPU_LOCKABLE\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RESOLVE\", \"D3D11_FORMAT_SUPPORT_DISPLAY\", \"D3D11_FORMAT_SUPPORT_CAST_WITHIN_BIT_LAYOUT\",", "= Struct(\"D3D11_INPUT_ELEMENT_DESC\", [ (LPCSTR, \"SemanticName\"), (UINT, \"SemanticIndex\"), (DXGI_FORMAT, \"Format\"), (UINT,", "D3D11_COUNTER_DESC = Struct(\"D3D11_COUNTER_DESC\", [ (D3D11_COUNTER, \"Counter\"), (UINT, \"MiscFlags\"), ]) D3D11_COUNTER_INFO", "D3D11_BLEND_OP = Enum(\"D3D11_BLEND_OP\", [ \"D3D11_BLEND_OP_ADD\", \"D3D11_BLEND_OP_SUBTRACT\", \"D3D11_BLEND_OP_REV_SUBTRACT\", \"D3D11_BLEND_OP_MIN\", \"D3D11_BLEND_OP_MAX\", ])", "Struct(\"D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS\", [ (BOOL, \"ComputeShaders_Plus_RawAndStructuredBuffers_Via_Shader_4_x\"), ]) D3D11_FEATURE, D3D11_FEATURE_DATA = EnumPolymorphic(\"D3D11_FEATURE\", \"Feature\",", "D3D11_ASYNC_GETDATA_FLAG = Flags(UINT, [ \"D3D11_ASYNC_GETDATA_DONOTFLUSH\", ]) D3D11_QUERY = Enum(\"D3D11_QUERY\", [", "OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH", "Interface(\"ID3D11GeometryShader\", ID3D11DeviceChild) ID3D11PixelShader = Interface(\"ID3D11PixelShader\", ID3D11DeviceChild) ID3D11ComputeShader = Interface(\"ID3D11ComputeShader\", ID3D11DeviceChild)", "\"CreatePredicate\", [(Pointer(Const(D3D11_QUERY_DESC)), \"pPredicateDesc\"), Out(Pointer(ObjPointer(ID3D11Predicate)), \"ppPredicate\")]), StdMethod(HRESULT, \"CreateCounter\", [(Pointer(Const(D3D11_COUNTER_DESC)), \"pCounterDesc\"), Out(Pointer(ObjPointer(ID3D11Counter)),", "[ StdMethod(UINT, \"GetContextFlags\", []), ] D3D11_FEATURE_DATA_THREADING = Struct(\"D3D11_FEATURE_DATA_THREADING\", [ (BOOL,", "\"StructureByteStride\"), ]) ID3D11Buffer.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_BUFFER_DESC), \"pDesc\")]), ]", "\"D3D11_FORMAT_SUPPORT_BACK_BUFFER_CAST\", \"D3D11_FORMAT_SUPPORT_TYPED_UNORDERED_ACCESS_VIEW\", \"D3D11_FORMAT_SUPPORT_SHADER_GATHER_COMPARISON\", ]) D3D11_FORMAT_SUPPORT2 = Enum(\"D3D11_FORMAT_SUPPORT2\", [ \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_ADD\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_BITWISE_OPS\",", "(D3D11_INPUT_CLASSIFICATION, \"InputSlotClass\"), (UINT, \"InstanceDataStepRate\"), ]) D3D11_FILL_MODE = Enum(\"D3D11_FILL_MODE\", [ \"D3D11_FILL_WIREFRAME\",", "(BOOL, \"DepthClipEnable\"), (BOOL, \"ScissorEnable\"), (BOOL, \"MultisampleEnable\"), (BOOL, \"AntialiasedLineEnable\"), ]) ID3D11RasterizerState.methods", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "[Out(Pointer(D3D11_RENDER_TARGET_VIEW_DESC), \"pDesc\")]), ] D3D11_TEX1D_DSV = Struct(\"D3D11_TEX1D_DSV\", [ (UINT, \"MipSlice\"), ])", "= Struct(\"D3D11_DEPTH_STENCIL_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_DSV_DIMENSION, \"ViewDimension\"), (D3D11_DSV_FLAG, \"Flags\"), (Union(None,", "\"D3D11_FORMAT_SUPPORT_TEXTURECUBE\", \"D3D11_FORMAT_SUPPORT_SHADER_LOAD\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_COMPARISON\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_MONO_TEXT\", \"D3D11_FORMAT_SUPPORT_MIP\", \"D3D11_FORMAT_SUPPORT_MIP_AUTOGEN\", \"D3D11_FORMAT_SUPPORT_RENDER_TARGET\", \"D3D11_FORMAT_SUPPORT_BLENDABLE\", \"D3D11_FORMAT_SUPPORT_DEPTH_STENCIL\",", "StdMethod(Void, \"DrawInstancedIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"Dispatch\", [(UINT, \"ThreadGroupCountX\"),", "StdMethod(D3D11_CREATE_DEVICE_FLAG, \"GetCreationFlags\", []), StdMethod(HRESULT, \"GetDeviceRemovedReason\", []), StdMethod(Void, \"GetImmediateContext\", [Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]),", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "[(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"HSSetShader\", [(ObjPointer(ID3D11HullShader),", "[ (UINT, \"Width\"), (UINT, \"Height\"), (UINT, \"Depth\"), (UINT, \"MipLevels\"), (DXGI_FORMAT,", "\"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"PSSetShaderResources\", [(UINT, \"StartSlot\"),", "(UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"IASetInputLayout\", [(ObjPointer(ID3D11InputLayout), \"pInputLayout\")]), StdMethod(Void,", "(UINT64, \"CPrimitives\"), (UINT64, \"PSInvocations\"), (UINT64, \"HSInvocations\"), (UINT64, \"DSInvocations\"), (UINT64, \"CSInvocations\"),", "all copies or substantial portions of the Software. # #", "\"D3D11_RTV_DIMENSION_UNKNOWN\", \"D3D11_RTV_DIMENSION_BUFFER\", \"D3D11_RTV_DIMENSION_TEXTURE1D\", \"D3D11_RTV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE2D\", \"D3D11_RTV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE2DMS\", \"D3D11_RTV_DIMENSION_TEXTURE2DMSARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE3D\", ])", "\"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_UAV = Struct(\"D3D11_TEX2D_UAV\", [", "(Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"GetPredication\", [Out(Pointer(ObjPointer(ID3D11Predicate)), \"ppPredicate\"), Out(Pointer(BOOL), \"pPredicateValue\")]), StdMethod(Void,", "ID3D11DeviceChild) ID3D11Buffer = Interface(\"ID3D11Buffer\", ID3D11Resource) ID3D11Texture1D = Interface(\"ID3D11Texture1D\", ID3D11Resource) ID3D11Texture2D", "\"pDesc\")]), ] D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS = Enum(\"D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS\", [ \"D3D11_STANDARD_MULTISAMPLE_PATTERN\", \"D3D11_CENTER_MULTISAMPLE_PATTERN\", ]) D3D11_DEVICE_CONTEXT_TYPE", "= Struct(\"D3D11_QUERY_DATA_PIPELINE_STATISTICS\", [ (UINT64, \"IAVertices\"), (UINT64, \"IAPrimitives\"), (UINT64, \"VSInvocations\"), (UINT64,", "[(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11PixelShader)), \"ppPixelShader\")]), StdMethod(HRESULT,", "StdMethod(Void, \"GetClassLinkage\", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), \"ppLinkage\")]), StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_CLASS_INSTANCE_DESC), \"pDesc\")]), StdMethod(Void, \"GetInstanceName\",", "\"ppSwapChain\"), Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\"), Out(Pointer(D3D_FEATURE_LEVEL), \"pFeatureLevel\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), # XXX: Undocumented", "[(ObjPointer(ID3D11Resource), \"pResource\"), (FLOAT, \"MinLOD\")]), StdMethod(FLOAT, \"GetResourceMinLOD\", [(ObjPointer(ID3D11Resource), \"pResource\")]), StdMethod(Void, \"ResolveSubresource\",", "(FLOAT, \"Height\"), (FLOAT, \"MinDepth\"), (FLOAT, \"MaxDepth\"), ]) D3D11_RESOURCE_DIMENSION = Enum(\"D3D11_RESOURCE_DIMENSION\",", "StdMethod(Void, \"GetInstanceName\", [Out(LPSTR, \"pInstanceName\"), Out(Pointer(SIZE_T), \"pBufferLength\")]), StdMethod(Void, \"GetTypeName\", [Out(LPSTR, \"pTypeName\"),", "[(UINT, \"NumRTVs\"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), \"NumRTVs\"), \"ppRenderTargetViews\"), (ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\"), (UINT, \"UAVStartSlot\"), (UINT,", "Out(D3D11_FEATURE_DATA, \"pFeatureSupportData\"), (UINT, \"FeatureSupportDataSize\")]), StdMethod(HRESULT, \"GetPrivateData\", [(REFGUID, \"guid\"), Out(Pointer(UINT), \"pDataSize\"),", "]) ID3D11Texture2D.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE2D_DESC), \"pDesc\")]), ] D3D11_TEXTURE3D_DESC", "[(UINT, \"ThreadGroupCountX\"), (UINT, \"ThreadGroupCountY\"), (UINT, \"ThreadGroupCountZ\")]), StdMethod(Void, \"DispatchIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"),", "]) D3D11_TEX2D_ARRAY_DSV = Struct(\"D3D11_TEX2D_ARRAY_DSV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT,", "Rights Reserved. # # Permission is hereby granted, free of", "(UINT, \"TextureOffset\"), (UINT, \"SamplerOffset\"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), \"ppInstance\")]), ] ID3D11CommandList.methods += [", "\"IASetInputLayout\", [(ObjPointer(ID3D11InputLayout), \"pInputLayout\")]), StdMethod(Void, \"IASetVertexBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)),", "\"ppSamplers\")]), StdMethod(Void, \"OMSetRenderTargets\", [(UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), \"NumViews\"), \"ppRenderTargetViews\"), (ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\")]),", "[(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"), (UINT, \"DstX\"), (UINT, \"DstY\"), (UINT, \"DstZ\"),", "[ (UINT, \"MipSlice\"), (UINT, \"FirstWSlice\"), (UINT, \"WSize\"), ]) D3D11_UNORDERED_ACCESS_VIEW_DESC =", "D3D11_MAP = Enum(\"D3D11_MAP\", [ \"D3D11_MAP_READ\", \"D3D11_MAP_WRITE\", \"D3D11_MAP_READ_WRITE\", \"D3D11_MAP_WRITE_DISCARD\", \"D3D11_MAP_WRITE_NO_OVERWRITE\", ])", "[(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"VSGetConstantBuffers\", [(UINT,", "[ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_RTV =", "\"GSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"OMGetRenderTargets\",", "D3D11_TEX2DMS_ARRAY_DSV = Struct(\"D3D11_TEX2DMS_ARRAY_DSV\", [ (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_DSV_FLAG", "Interface(\"ID3D11Query\", ID3D11Asynchronous) ID3D11Predicate = Interface(\"ID3D11Predicate\", ID3D11Query) ID3D11Counter = Interface(\"ID3D11Counter\", ID3D11Asynchronous)", "\"ppBuffer\")]), StdMethod(HRESULT, \"CreateTexture1D\", [(Pointer(Const(D3D11_TEXTURE1D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture1D)), \"ppTexture1D\")]), StdMethod(HRESULT,", "\"ppSamplerState\")]), StdMethod(HRESULT, \"CreateQuery\", [(Pointer(Const(D3D11_QUERY_DESC)), \"pQueryDesc\"), Out(Pointer(ObjPointer(ID3D11Query)), \"ppQuery\")]), StdMethod(HRESULT, \"CreatePredicate\", [(Pointer(Const(D3D11_QUERY_DESC)),", "[(ObjPointer(ID3D11CommandList), \"pCommandList\"), (BOOL, \"RestoreContextState\")]), StdMethod(Void, \"HSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"),", "Enum(\"D3D11_DEVICE_CONTEXT_TYPE\", [ \"D3D11_DEVICE_CONTEXT_IMMEDIATE\", \"D3D11_DEVICE_CONTEXT_DEFERRED\", ]) D3D11_CLASS_INSTANCE_DESC = Struct(\"D3D11_CLASS_INSTANCE_DESC\", [ (UINT,", "(D3D11_TEX2D_UAV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_UAV, \"Texture2DArray\"), (D3D11_TEX3D_UAV, \"Texture3D\"), ]), None), ]) ID3D11UnorderedAccessView.methods", "\"D3D11_PRIMITIVE_TOPOLOGY_9_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_10_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_11_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_12_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_13_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_14_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_15_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_16_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_17_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_18_CONTROL_POINT_PATCHLIST\",", "\"Buffer\"), (D3D11_TEX1D_SRV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_SRV, \"Texture1DArray\"), (D3D11_TEX2D_SRV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_SRV, \"Texture2DArray\"), (D3D11_TEX2DMS_SRV,", "= Struct(\"D3D11_TEX2DMS_RTV\", [ (UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2D_ARRAY_RTV = Struct(\"D3D11_TEX2D_ARRAY_RTV\", [", "(D3D11_BUFFER_UAV_FLAG, \"Flags\"), ]) D3D11_TEX1D_UAV = Struct(\"D3D11_TEX1D_UAV\", [ (UINT, \"MipSlice\"), ])", "\"Format\"), (UINT, \"InputSlot\"), (D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET, \"AlignedByteOffset\"), (D3D11_INPUT_CLASSIFICATION, \"InputSlotClass\"), (UINT, \"InstanceDataStepRate\"), ])", "ID3D11CommandList.methods += [ StdMethod(UINT, \"GetContextFlags\", []), ] D3D11_FEATURE_DATA_THREADING = Struct(\"D3D11_FEATURE_DATA_THREADING\",", "\"SemanticName\"), (UINT, \"SemanticIndex\"), (BYTE, \"StartComponent\"), (BYTE, \"ComponentCount\"), (BYTE, \"OutputSlot\"), ])", "\"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"IAGetInputLayout\", [Out(Pointer(ObjPointer(ID3D11InputLayout)), \"ppInputLayout\")]),", "= Struct(\"D3D11_TEX1D_DSV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_DSV = Struct(\"D3D11_TEX1D_ARRAY_DSV\", [", "to do so, subject to the following conditions: # #", "ID3D11DeviceChild) ID3D11BlendState = Interface(\"ID3D11BlendState\", ID3D11DeviceChild) ID3D11RasterizerState = Interface(\"ID3D11RasterizerState\", ID3D11DeviceChild) ID3D11Resource", "= Interface(\"ID3D11Predicate\", ID3D11Query) ID3D11Counter = Interface(\"ID3D11Counter\", ID3D11Asynchronous) ID3D11ClassInstance = Interface(\"ID3D11ClassInstance\",", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "= Interface(\"ID3D11ClassInstance\", ID3D11DeviceChild) ID3D11ClassLinkage = Interface(\"ID3D11ClassLinkage\", ID3D11DeviceChild) ID3D11CommandList = Interface(\"ID3D11CommandList\",", "StdMethod(Void, \"CSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void,", "\"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Buffer)), \"ppBuffer\")]), StdMethod(HRESULT, \"CreateTexture1D\", [(Pointer(Const(D3D11_TEXTURE1D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture1D)),", "D3D11_QUERY = Enum(\"D3D11_QUERY\", [ \"D3D11_QUERY_EVENT\", \"D3D11_QUERY_OCCLUSION\", \"D3D11_QUERY_TIMESTAMP\", \"D3D11_QUERY_TIMESTAMP_DISJOINT\", \"D3D11_QUERY_PIPELINE_STATISTICS\", \"D3D11_QUERY_OCCLUSION_PREDICATE\",", "\"DSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"DSSetConstantBuffers\",", "(Array(Const(ObjPointer(ID3D11RenderTargetView)), \"NumRTVs\"), \"ppRenderTargetViews\"), (ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\"), (UINT, \"UAVStartSlot\"), (UINT, \"NumUAVs\"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)),", "(UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"RSSetState\", [(ObjPointer(ID3D11RasterizerState), \"pRasterizerState\")]), StdMethod(Void, \"RSSetViewports\", [(UINT, \"NumViewports\"),", "\"D3D11_RTV_DIMENSION_TEXTURE1D\", \"D3D11_RTV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE2D\", \"D3D11_RTV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE2DMS\", \"D3D11_RTV_DIMENSION_TEXTURE2DMSARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE3D\", ]) D3D11_UAV_DIMENSION =", "Enum(\"D3D11_UAV_DIMENSION\", [ \"D3D11_UAV_DIMENSION_UNKNOWN\", \"D3D11_UAV_DIMENSION_BUFFER\", \"D3D11_UAV_DIMENSION_TEXTURE1D\", \"D3D11_UAV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_UAV_DIMENSION_TEXTURE2D\", \"D3D11_UAV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_UAV_DIMENSION_TEXTURE3D\", ])", "]) D3D11_FEATURE_DATA_DOUBLES = Struct(\"D3D11_FEATURE_DATA_DOUBLES\", [ (BOOL, \"DoublePrecisionFloatShaderOps\"), ]) D3D11_FEATURE_DATA_FORMAT_SUPPORT =", "\"ppComputeShader\")]), StdMethod(HRESULT, \"CreateClassLinkage\", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), \"ppLinkage\")]), StdMethod(HRESULT, \"CreateBlendState\", [(Pointer(Const(D3D11_BLEND_DESC)), \"pBlendStateDesc\"), Out(Pointer(ObjPointer(ID3D11BlendState)),", "\"pDesc\"), Out(Pointer(ObjPointer(ID3D11UnorderedAccessView)), \"ppUAView\")]), StdMethod(HRESULT, \"CreateRenderTargetView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_RENDER_TARGET_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11RenderTargetView)),", "[ \"D3D11_USAGE_DEFAULT\", \"D3D11_USAGE_IMMUTABLE\", \"D3D11_USAGE_DYNAMIC\", \"D3D11_USAGE_STAGING\", ]) D3D11_BIND_FLAG = Flags(UINT, [", "(FLOAT, \"MinLOD\"), (FLOAT, \"MaxLOD\"), ]) ID3D11SamplerState.methods += [ StdMethod(Void, \"GetDesc\",", "\"D3D11_COUNTER_TYPE_UINT64\", ]) D3D11_COUNTER_DESC = Struct(\"D3D11_COUNTER_DESC\", [ (D3D11_COUNTER, \"Counter\"), (UINT, \"MiscFlags\"),", "[(UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppSOTargets\")]), StdMethod(Void, \"RSGetState\", [Out(Pointer(ObjPointer(ID3D11RasterizerState)), \"ppRasterizerState\")]), StdMethod(Void,", "[(D3D11_RAISE_FLAG, \"RaiseFlags\")]), StdMethod(UINT, \"GetExceptionMode\", []), ] d3d11 = API(\"d3d11\") d3d11.addFunctions([", "\"Width\"), (UINT, \"Height\"), (UINT, \"Depth\"), (UINT, \"MipLevels\"), (DXGI_FORMAT, \"Format\"), (D3D11_USAGE,", "= Struct(\"D3D11_TEX2D_RTV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX2DMS_RTV = Struct(\"D3D11_TEX2DMS_RTV\", [", "\"Width\"), (UINT, \"Height\"), (UINT, \"MipLevels\"), (UINT, \"ArraySize\"), (DXGI_FORMAT, \"Format\"), (DXGI_SAMPLE_DESC,", "D3D11_FORMAT_SUPPORT2 = Enum(\"D3D11_FORMAT_SUPPORT2\", [ \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_ADD\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_BITWISE_OPS\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_COMPARE_STORE_OR_COMPARE_EXCHANGE\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_EXCHANGE\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_SIGNED_MIN_OR_MAX\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_UNSIGNED_MIN_OR_MAX\",", "\"TypeId\"), (UINT, \"ConstantBuffer\"), (UINT, \"BaseConstantBufferOffset\"), (UINT, \"BaseTexture\"), (UINT, \"BaseSampler\"), (BOOL,", "\"OutFormatSupport\"), ]) D3D11_FEATURE_DATA_FORMAT_SUPPORT2 = Struct(\"D3D11_FEATURE_DATA_FORMAT_SUPPORT2\", [ (DXGI_FORMAT, \"InFormat\"), (D3D11_FORMAT_SUPPORT2, \"OutFormatSupport2\"),", "StdMethod(Void, \"GSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void,", "(UINT, \"NumUAVs\"), (Array(ObjPointer(ID3D11UnorderedAccessView), \"NumUAVs\"), \"ppUnorderedAccessViews\")]), StdMethod(Void, \"OMGetBlendState\", [Out(Pointer(ObjPointer(ID3D11BlendState)), \"ppBlendState\"), Out(Array(FLOAT,", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "(UINT, \"NumClassInstances\")]), StdMethod(Void, \"IASetPrimitiveTopology\", [(D3D11_PRIMITIVE_TOPOLOGY, \"Topology\")]), StdMethod(Void, \"VSSetShaderResources\", [(UINT, \"StartSlot\"),", "\"DepthBiasClamp\"), (FLOAT, \"SlopeScaledDepthBias\"), (BOOL, \"DepthClipEnable\"), (BOOL, \"ScissorEnable\"), (BOOL, \"MultisampleEnable\"), (BOOL,", "D3D11_QUERY_MISC_FLAG = Flags(UINT, [ \"D3D11_QUERY_MISC_PREDICATEHINT\", ]) D3D11_QUERY_DESC = Struct(\"D3D11_QUERY_DESC\", [", "= Flags(UINT, [ \"D3D11_ASYNC_GETDATA_DONOTFLUSH\", ]) D3D11_QUERY = Enum(\"D3D11_QUERY\", [ \"D3D11_QUERY_EVENT\",", "\"RSSetScissorRects\", [(UINT, \"NumRects\"), (Array(Const(D3D11_RECT), \"NumRects\"), \"pRects\")]), StdMethod(Void, \"CopySubresourceRegion\", [(ObjPointer(ID3D11Resource), \"pDstResource\"),", "(REFIID, \"riid\"), Out(Pointer(ObjPointer(Void)), \"ppvObj\")], internal=True), StdFunction(HRESULT, \"D3D11CoreCreateDevice\", [DWORD, DWORD, DWORD,", "[(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"DSSetShaderResources\", [(UINT,", "\"HSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"HSGetConstantBuffers\",", "\"D3D11_DEPTH_WRITE_MASK_ALL\", ]) D3D11_STENCIL_OP = Enum(\"D3D11_STENCIL_OP\", [ \"D3D11_STENCIL_OP_KEEP\", \"D3D11_STENCIL_OP_ZERO\", \"D3D11_STENCIL_OP_REPLACE\", \"D3D11_STENCIL_OP_INCR_SAT\",", "(BOOL, \"IndependentBlendEnable\"), (Array(D3D11_RENDER_TARGET_BLEND_DESC, 8), \"RenderTarget\"), ]) ID3D11BlendState.methods += [ StdMethod(Void,", "USE OR OTHER DEALINGS IN # THE SOFTWARE. # ##########################################################################/", "\"D3D11_RESOURCE_DIMENSION_TEXTURE3D\", ]) D3D11_SRV_DIMENSION = Enum(\"D3D11_SRV_DIMENSION\", [ \"D3D11_SRV_DIMENSION_UNKNOWN\", \"D3D11_SRV_DIMENSION_BUFFER\", \"D3D11_SRV_DIMENSION_TEXTURE1D\", \"D3D11_SRV_DIMENSION_TEXTURE1DARRAY\",", "\"ppDevice\")]), StdMethod(HRESULT, \"GetPrivateData\", [(REFGUID, \"guid\"), Out(Pointer(UINT), \"pDataSize\"), Out(OpaquePointer(Void), \"pData\")]), StdMethod(HRESULT,", "\"Software\"), (D3D11_CREATE_DEVICE_FLAG, \"Flags\"), (Array(Const(D3D_FEATURE_LEVEL), \"FeatureLevels\"), \"pFeatureLevels\"), (UINT, \"FeatureLevels\"), (UINT, \"SDKVersion\"),", "Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"DSGetSamplers\", [(UINT, \"StartSlot\"), (UINT,", "\"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"RSSetState\", [(ObjPointer(ID3D11RasterizerState), \"pRasterizerState\")]), StdMethod(Void, \"RSSetViewports\", [(UINT,", "\"LastDeviceDependentCounter\"), (UINT, \"NumSimultaneousCounters\"), (UINT8, \"NumDetectableParallelUnits\"), ]) ID3D11Counter.methods += [ StdMethod(Void,", "[ (UINT, \"MipSlice\"), ]) D3D11_TEX2DMS_RTV = Struct(\"D3D11_TEX2DMS_RTV\", [ (UINT, \"UnusedField_NothingToDefine\"),", "[(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"), (Pointer(Const(D3D11_BOX)), \"pDstBox\"), (OpaquePointer(Const(Void)), \"pSrcData\"), (UINT, \"SrcRowPitch\"),", "StdMethod(Void, \"GSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void,", "\"ppSamplers\")]), StdMethod(Void, \"DSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]),", "(D3D11_TEX1D_ARRAY_DSV, \"Texture1DArray\"), (D3D11_TEX2D_DSV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_DSV, \"Texture2DArray\"), (D3D11_TEX2DMS_DSV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_DSV, \"Texture2DMSArray\"),", "\"D3D11_PRIMITIVE_TOPOLOGY_6_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_7_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_8_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_9_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_10_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_11_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_12_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_13_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_14_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_15_CONTROL_POINT_PATCHLIST\",", "\"pResource\"), (Pointer(Const(D3D11_DEPTH_STENCIL_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\")]), StdMethod(HRESULT, \"CreateInputLayout\", [(Array(Const(D3D11_INPUT_ELEMENT_DESC), \"NumElements\"), \"pInputElementDescs\"),", "Out(Pointer(D3D11_MAPPED_SUBRESOURCE), \"pMappedResource\")]), StdMethod(Void, \"Unmap\", [(ObjPointer(ID3D11Resource), \"pResource\"), (UINT, \"Subresource\")]), StdMethod(Void, \"PSSetConstantBuffers\",", "\"pBufferStrides\"), (UINT, \"NumStrides\"), (UINT, \"RasterizedStream\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\")]), StdMethod(HRESULT,", "\"D3D11_BLEND_SRC1_COLOR\", \"D3D11_BLEND_INV_SRC1_COLOR\", \"D3D11_BLEND_SRC1_ALPHA\", \"D3D11_BLEND_INV_SRC1_ALPHA\", ]) D3D11_BLEND_OP = Enum(\"D3D11_BLEND_OP\", [ \"D3D11_BLEND_OP_ADD\",", "[Out(Pointer(D3D11_DEPTH_STENCIL_VIEW_DESC), \"pDesc\")]), ] D3D11_BUFFER_UAV_FLAG = Flags(UINT, [ \"D3D11_BUFFER_UAV_FLAG_RAW\", \"D3D11_BUFFER_UAV_FLAG_APPEND\", \"D3D11_BUFFER_UAV_FLAG_COUNTER\",", "\"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_UAV, \"Buffer\"), (D3D11_TEX1D_UAV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_UAV, \"Texture1DArray\"), (D3D11_TEX2D_UAV,", "\"D3D11_DSV_DIMENSION_UNKNOWN\", \"D3D11_DSV_DIMENSION_TEXTURE1D\", \"D3D11_DSV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_DSV_DIMENSION_TEXTURE2D\", \"D3D11_DSV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_DSV_DIMENSION_TEXTURE2DMS\", \"D3D11_DSV_DIMENSION_TEXTURE2DMSARRAY\", ]) D3D11_RTV_DIMENSION =", "[(DXGI_FORMAT, \"Format\"), Out(Pointer(D3D11_FORMAT_SUPPORT), \"pFormatSupport\")]), StdMethod(HRESULT, \"CheckMultisampleQualityLevels\", [(DXGI_FORMAT, \"Format\"), (UINT, \"SampleCount\"),", "\"CreateDeferredContext\", [(UINT, \"ContextFlags\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppDeferredContext\")]), StdMethod(HRESULT, \"OpenSharedResource\", [(HANDLE, \"hResource\"), (REFIID,", "\"pDataSize\"), Out(OpaquePointer(Void), \"pData\")]), StdMethod(HRESULT, \"SetPrivateData\", [(REFGUID, \"guid\"), (UINT, \"DataSize\"), (OpaqueBlob(Const(Void),", "StdMethod(Void, \"Draw\", [(UINT, \"VertexCount\"), (UINT, \"StartVertexLocation\")]), StdMethod(HRESULT, \"Map\", [(ObjPointer(ID3D11Resource), \"pResource\"),", "\"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_MIN_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_LINEAR\", \"D3D11_FILTER_COMPARISON_ANISOTROPIC\", ]) D3D11_FILTER_TYPE = Enum(\"D3D11_FILTER_TYPE\",", "(D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture1D.methods += [ StdMethod(Void, \"GetDesc\",", "\"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"VSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"),", "and/or sell # copies of the Software, and to permit", "StdMethod(HRESULT, \"GetPrivateData\", [(REFGUID, \"guid\"), Out(Pointer(UINT), \"pDataSize\"), Out(OpaquePointer(Void), \"pData\")]), StdMethod(HRESULT, \"SetPrivateData\",", "\"PrimitivesStorageNeeded\"), ]) D3D11_COUNTER = Enum(\"D3D11_COUNTER\", [ \"D3D11_COUNTER_DEVICE_DEPENDENT_0\", ]) D3D11_COUNTER_TYPE =", "\"D3D11_FORMAT_SUPPORT_SO_BUFFER\", \"D3D11_FORMAT_SUPPORT_TEXTURE1D\", \"D3D11_FORMAT_SUPPORT_TEXTURE2D\", \"D3D11_FORMAT_SUPPORT_TEXTURE3D\", \"D3D11_FORMAT_SUPPORT_TEXTURECUBE\", \"D3D11_FORMAT_SUPPORT_SHADER_LOAD\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_COMPARISON\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_MONO_TEXT\", \"D3D11_FORMAT_SUPPORT_MIP\",", "Flags(UINT, [ \"D3D11_RAISE_FLAG_DRIVER_INTERNAL_ERROR\", ]) D3D11_CLEAR_FLAG = Flags(UINT, [ \"D3D11_CLEAR_DEPTH\", \"D3D11_CLEAR_STENCIL\",", "ID3D11ClassInstance.methods += [ StdMethod(Void, \"GetClassLinkage\", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), \"ppLinkage\")]), StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_CLASS_INSTANCE_DESC),", "\"right\"), (UINT, \"bottom\"), (UINT, \"back\"), ]) ID3D11DeviceChild.methods += [ StdMethod(Void,", "\"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11ComputeShader)), \"ppComputeShader\")]), StdMethod(HRESULT, \"CreateClassLinkage\", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), \"ppLinkage\")]), StdMethod(HRESULT, \"CreateBlendState\", [(Pointer(Const(D3D11_BLEND_DESC)),", "[ \"D3D11_COLOR_WRITE_ENABLE_ALL\", \"D3D11_COLOR_WRITE_ENABLE_RED\", \"D3D11_COLOR_WRITE_ENABLE_GREEN\", \"D3D11_COLOR_WRITE_ENABLE_BLUE\", \"D3D11_COLOR_WRITE_ENABLE_ALPHA\", ]) D3D11_RENDER_TARGET_BLEND_DESC = Struct(\"D3D11_RENDER_TARGET_BLEND_DESC\",", "\"NumClassInstances\")]), StdMethod(Void, \"DSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]),", "\"FeatureLevels\"), (UINT, \"SDKVersion\"), Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\"), Out(Pointer(D3D_FEATURE_LEVEL), \"pFeatureLevel\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), StdFunction(HRESULT,", "Struct(\"D3D11_BUFFER_UAV\", [ (UINT, \"FirstElement\"), (UINT, \"NumElements\"), (D3D11_BUFFER_UAV_FLAG, \"Flags\"), ]) D3D11_TEX1D_UAV", "Struct(\"D3D11_TEXTURE3D_DESC\", [ (UINT, \"Width\"), (UINT, \"Height\"), (UINT, \"Depth\"), (UINT, \"MipLevels\"),", "\"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"HSGetShader\", [Out(Pointer(ObjPointer(ID3D11HullShader)), \"ppHullShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT),", "Struct(\"D3D11_DEPTH_STENCIL_DESC\", [ (BOOL, \"DepthEnable\"), (D3D11_DEPTH_WRITE_MASK, \"DepthWriteMask\"), (D3D11_COMPARISON_FUNC, \"DepthFunc\"), (BOOL, \"StencilEnable\"),", "\"D3D11_PRIMITIVE_TOPOLOGY_29_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_30_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_31_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_32_CONTROL_POINT_PATCHLIST\", ]) D3D11_PRIMITIVE = Enum(\"D3D11_PRIMITIVE\", [ \"D3D11_PRIMITIVE_UNDEFINED\",", "\"D3D11_PRIMITIVE_TOPOLOGY_18_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_19_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_20_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_21_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_22_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_23_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_24_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_25_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_26_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_27_CONTROL_POINT_PATCHLIST\",", "[(ObjPointer(ID3D11Asynchronous), \"pAsync\"), Out(OpaqueBlob(Void, \"DataSize\"), \"pData\"), (UINT, \"DataSize\"), (D3D11_ASYNC_GETDATA_FLAG, \"GetDataFlags\")]), StdMethod(Void,", "\"D3D11_CREATE_DEVICE_DEBUG\", \"D3D11_CREATE_DEVICE_SWITCH_TO_REF\", \"D3D11_CREATE_DEVICE_PREVENT_INTERNAL_THREADING_OPTIMIZATIONS\", \"D3D11_CREATE_DEVICE_BGRA_SUPPORT\", ]) ID3D11Device.methods += [ StdMethod(HRESULT, \"CreateBuffer\",", "\"CreateDomainShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11DomainShader)), \"ppDomainShader\")]),", "= Flags(UINT, [ \"D3D11_RESOURCE_MISC_GENERATE_MIPS\", \"D3D11_RESOURCE_MISC_SHARED\", \"D3D11_RESOURCE_MISC_TEXTURECUBE\", \"D3D11_RESOURCE_MISC_DRAWINDIRECT_ARGS\", \"D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS\", \"D3D11_RESOURCE_MISC_BUFFER_STRUCTURED\", \"D3D11_RESOURCE_MISC_RESOURCE_CLAMP\",", "(Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"DSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)),", "\"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"VSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"),", "\"Depth\"), (UINT, \"MipLevels\"), (DXGI_FORMAT, \"Format\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG,", "[Out(LPSTR, \"pTypeName\"), Out(Pointer(SIZE_T), \"pBufferLength\")]), ] ID3D11ClassLinkage.methods += [ StdMethod(HRESULT, \"GetClassInstance\",", "\"D3D11_FORMAT_SUPPORT_BUFFER\", \"D3D11_FORMAT_SUPPORT_IA_VERTEX_BUFFER\", \"D3D11_FORMAT_SUPPORT_IA_INDEX_BUFFER\", \"D3D11_FORMAT_SUPPORT_SO_BUFFER\", \"D3D11_FORMAT_SUPPORT_TEXTURE1D\", \"D3D11_FORMAT_SUPPORT_TEXTURE2D\", \"D3D11_FORMAT_SUPPORT_TEXTURE3D\", \"D3D11_FORMAT_SUPPORT_TEXTURECUBE\", \"D3D11_FORMAT_SUPPORT_SHADER_LOAD\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE\",", "\"D3D11_BLEND_SRC_COLOR\", \"D3D11_BLEND_INV_SRC_COLOR\", \"D3D11_BLEND_SRC_ALPHA\", \"D3D11_BLEND_INV_SRC_ALPHA\", \"D3D11_BLEND_DEST_ALPHA\", \"D3D11_BLEND_INV_DEST_ALPHA\", \"D3D11_BLEND_DEST_COLOR\", \"D3D11_BLEND_INV_DEST_COLOR\", \"D3D11_BLEND_SRC_ALPHA_SAT\", \"D3D11_BLEND_BLEND_FACTOR\",", "ID3D11Device = Interface(\"ID3D11Device\", IUnknown) D3D11_INPUT_CLASSIFICATION = Enum(\"D3D11_INPUT_CLASSIFICATION\", [ \"D3D11_INPUT_PER_VERTEX_DATA\", \"D3D11_INPUT_PER_INSTANCE_DATA\",", "[(ObjPointer(ID3D11Resource), \"pResource\")]), StdMethod(Void, \"ResolveSubresource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"), (ObjPointer(ID3D11Resource), \"pSrcResource\"),", "\"SrcDepthPitch\")]), StdMethod(Void, \"CopyStructureCount\", [(ObjPointer(ID3D11Buffer), \"pDstBuffer\"), (UINT, \"DstAlignedByteOffset\"), (ObjPointer(ID3D11UnorderedAccessView), \"pSrcView\")]), StdMethod(Void,", "[ (BOOL, \"BlendEnable\"), (D3D11_BLEND, \"SrcBlend\"), (D3D11_BLEND, \"DestBlend\"), (D3D11_BLEND_OP, \"BlendOp\"), (D3D11_BLEND,", "(D3D11_BLEND, \"SrcBlendAlpha\"), (D3D11_BLEND, \"DestBlendAlpha\"), (D3D11_BLEND_OP, \"BlendOpAlpha\"), (UINT8, \"RenderTargetWriteMask\"), ]) D3D11_BLEND_DESC", "\"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture3D.methods += [ StdMethod(Void,", "(UINT, \"FirstWSlice\"), (UINT, \"WSize\"), ]) D3D11_RENDER_TARGET_VIEW_DESC = Struct(\"D3D11_RENDER_TARGET_VIEW_DESC\", [ (DXGI_FORMAT,", "Struct(\"D3D11_TEXCUBE_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), (UINT, \"First2DArrayFace\"), (UINT, \"NumCubes\"),", "= Alias(\"D3D11_RECT\", RECT) D3D11_BOX = Struct(\"D3D11_BOX\", [ (UINT, \"left\"), (UINT,", "] D3D11_RASTERIZER_DESC = Struct(\"D3D11_RASTERIZER_DESC\", [ (D3D11_FILL_MODE, \"FillMode\"), (D3D11_CULL_MODE, \"CullMode\"), (BOOL,", "Enum(\"D3D11_TEXTURECUBE_FACE\", [ \"D3D11_TEXTURECUBE_FACE_POSITIVE_X\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_X\", \"D3D11_TEXTURECUBE_FACE_POSITIVE_Y\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_Y\", \"D3D11_TEXTURECUBE_FACE_POSITIVE_Z\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_Z\", ]) ID3D11View.methods", "[(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_RENDER_TARGET_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11RenderTargetView)), \"ppRTView\")]), StdMethod(HRESULT, \"CreateDepthStencilView\", [(ObjPointer(ID3D11Resource), \"pResource\"),", "\"Height\"), (UINT, \"MipLevels\"), (UINT, \"ArraySize\"), (DXGI_FORMAT, \"Format\"), (DXGI_SAMPLE_DESC, \"SampleDesc\"), (D3D11_USAGE,", "\"Texture3D\"), ]), None), ]) ID3D11RenderTargetView.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_RENDER_TARGET_VIEW_DESC),", "D3D11_TEXTURE_ADDRESS_MODE = Enum(\"D3D11_TEXTURE_ADDRESS_MODE\", [ \"D3D11_TEXTURE_ADDRESS_WRAP\", \"D3D11_TEXTURE_ADDRESS_MIRROR\", \"D3D11_TEXTURE_ADDRESS_CLAMP\", \"D3D11_TEXTURE_ADDRESS_BORDER\", \"D3D11_TEXTURE_ADDRESS_MIRROR_ONCE\", ])", "StdMethod(Void, \"OMGetRenderTargetsAndUnorderedAccessViews\", [(UINT, \"NumRTVs\"), (Array(ObjPointer(ID3D11RenderTargetView), \"NumRTVs\"), \"ppRenderTargetViews\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\"), (UINT,", "[ (UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_UAV = Struct(\"D3D11_TEX1D_ARRAY_UAV\", [ (UINT, \"MipSlice\"),", "ID3D11DeviceChild) ID3D11Device = Interface(\"ID3D11Device\", IUnknown) D3D11_INPUT_CLASSIFICATION = Enum(\"D3D11_INPUT_CLASSIFICATION\", [ \"D3D11_INPUT_PER_VERTEX_DATA\",", "internal=True), StdFunction(SIZE_T, \"D3D11CoreGetLayeredDeviceSize\", [LPCVOID, DWORD], internal=True), StdFunction(HRESULT, \"D3D11CoreCreateLayeredDevice\", [LPCVOID, DWORD,", "\"D3D11_COMPARISON_NEVER\", \"D3D11_COMPARISON_LESS\", \"D3D11_COMPARISON_EQUAL\", \"D3D11_COMPARISON_LESS_EQUAL\", \"D3D11_COMPARISON_GREATER\", \"D3D11_COMPARISON_NOT_EQUAL\", \"D3D11_COMPARISON_GREATER_EQUAL\", \"D3D11_COMPARISON_ALWAYS\", ]) D3D11_DEPTH_WRITE_MASK", "\"CSInvocations\"), ]) D3D11_QUERY_DATA_SO_STATISTICS = Struct(\"D3D11_QUERY_DATA_SO_STATISTICS\", [ (UINT64, \"NumPrimitivesWritten\"), (UINT64, \"PrimitivesStorageNeeded\"),", "+= [ StdMethod(Void, \"GetResource\", [Out(Pointer(ObjPointer(ID3D11Resource)), \"ppResource\")]), ] D3D11_BUFFER_SRV = Struct(\"D3D11_BUFFER_SRV\",", "ID3D11Asynchronous.methods += [ StdMethod(UINT, \"GetDataSize\", []), ] D3D11_ASYNC_GETDATA_FLAG = Flags(UINT,", "[ \"D3D11_RTV_DIMENSION_UNKNOWN\", \"D3D11_RTV_DIMENSION_BUFFER\", \"D3D11_RTV_DIMENSION_TEXTURE1D\", \"D3D11_RTV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE2D\", \"D3D11_RTV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE2DMS\", \"D3D11_RTV_DIMENSION_TEXTURE2DMSARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE3D\",", "(UINT8, \"StencilReadMask\"), (UINT8, \"StencilWriteMask\"), (D3D11_DEPTH_STENCILOP_DESC, \"FrontFace\"), (D3D11_DEPTH_STENCILOP_DESC, \"BackFace\"), ]) ID3D11DepthStencilState.methods", "D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET = FakeEnum(UINT, [ \"D3D11_APPEND_ALIGNED_ELEMENT\", ]) D3D11_INPUT_ELEMENT_DESC = Struct(\"D3D11_INPUT_ELEMENT_DESC\", [", "(UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"VSSetShader\", [(ObjPointer(ID3D11VertexShader), \"pVertexShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)),", "\"CPrimitives\"), (UINT64, \"PSInvocations\"), (UINT64, \"HSInvocations\"), (UINT64, \"DSInvocations\"), (UINT64, \"CSInvocations\"), ])", "[(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"HSSetConstantBuffers\", [(UINT,", "[ (UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_RTV = Struct(\"D3D11_TEX1D_ARRAY_RTV\", [ (UINT, \"MipSlice\"),", "\"MinLOD\"), (FLOAT, \"MaxLOD\"), ]) ID3D11SamplerState.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_SAMPLER_DESC),", "\"pNumClassInstances\")]), StdMethod(Void, \"DSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]),", "(Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture2D)), \"ppTexture2D\")]), StdMethod(HRESULT, \"CreateTexture3D\", [(Pointer(Const(D3D11_TEXTURE3D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"),", "\"Flags\"), ]) D3D11_TEX1D_UAV = Struct(\"D3D11_TEX1D_UAV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX1D_ARRAY_UAV", "\"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppVertexBuffers\"), (Pointer(Const(UINT)), \"pStrides\"), (Pointer(Const(UINT)), \"pOffsets\")]),", "[ StdMethod(HRESULT, \"GetClassInstance\", [(LPCSTR, \"pClassInstanceName\"), (UINT, \"InstanceIndex\"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), \"ppInstance\")]), StdMethod(HRESULT,", "\"D3D11_PRIMITIVE_TOPOLOGY_POINTLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_LINELIST\", \"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP\", \"D3D11_PRIMITIVE_TOPOLOGY_LINELIST_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST\",", "\"CSGetUnorderedAccessViews\", [(UINT, \"StartSlot\"), (UINT, \"NumUAVs\"), (Array(ObjPointer(ID3D11UnorderedAccessView), \"NumUAVs\"), \"ppUnorderedAccessViews\")]), StdMethod(Void, \"CSGetShader\",", "\"D3D11_PRIMITIVE_TOPOLOGY_19_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_20_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_21_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_22_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_23_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_24_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_25_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_26_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_27_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_28_CONTROL_POINT_PATCHLIST\",", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "\"ppRenderTargetViews\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\"), (UINT, \"UAVStartSlot\"), (UINT, \"NumUAVs\"), (Array(ObjPointer(ID3D11UnorderedAccessView), \"NumUAVs\"), \"ppUnorderedAccessViews\")]),", "Out(Pointer(UINT), \"pStencilRef\")]), StdMethod(Void, \"SOGetTargets\", [(UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppSOTargets\")]), StdMethod(Void,", "ID3D11Asynchronous) ID3D11ClassInstance = Interface(\"ID3D11ClassInstance\", ID3D11DeviceChild) ID3D11ClassLinkage = Interface(\"ID3D11ClassLinkage\", ID3D11DeviceChild) ID3D11CommandList", "\"ppLinkage\")]), StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_CLASS_INSTANCE_DESC), \"pDesc\")]), StdMethod(Void, \"GetInstanceName\", [Out(LPSTR, \"pInstanceName\"), Out(Pointer(SIZE_T),", "\"FirstElement\"), (UINT, \"NumElements\"), (D3D11_BUFFER_UAV_FLAG, \"Flags\"), ]) D3D11_TEX1D_UAV = Struct(\"D3D11_TEX1D_UAV\", [", "\"D3D11_BLEND_INV_SRC_COLOR\", \"D3D11_BLEND_SRC_ALPHA\", \"D3D11_BLEND_INV_SRC_ALPHA\", \"D3D11_BLEND_DEST_ALPHA\", \"D3D11_BLEND_INV_DEST_ALPHA\", \"D3D11_BLEND_DEST_COLOR\", \"D3D11_BLEND_INV_DEST_COLOR\", \"D3D11_BLEND_SRC_ALPHA_SAT\", \"D3D11_BLEND_BLEND_FACTOR\", \"D3D11_BLEND_INV_BLEND_FACTOR\",", "\"D3D11_CULL_BACK\", ]) D3D11_SO_DECLARATION_ENTRY = Struct(\"D3D11_SO_DECLARATION_ENTRY\", [ (UINT, \"Stream\"), (LPCSTR, \"SemanticName\"),", "\"DSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"CSSetShaderResources\",", "None), (Union(None, [(UINT, \"NumElements\"), (UINT, \"ElementWidth\")]), None), ]) D3D11_BUFFEREX_SRV_FLAG =", "\"D3D11_FORMAT_SUPPORT_MIP\", \"D3D11_FORMAT_SUPPORT_MIP_AUTOGEN\", \"D3D11_FORMAT_SUPPORT_RENDER_TARGET\", \"D3D11_FORMAT_SUPPORT_BLENDABLE\", \"D3D11_FORMAT_SUPPORT_DEPTH_STENCIL\", \"D3D11_FORMAT_SUPPORT_CPU_LOCKABLE\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RESOLVE\", \"D3D11_FORMAT_SUPPORT_DISPLAY\", \"D3D11_FORMAT_SUPPORT_CAST_WITHIN_BIT_LAYOUT\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RENDERTARGET\",", "(UINT8, \"NumDetectableParallelUnits\"), ]) ID3D11Counter.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_COUNTER_DESC), \"pDesc\")]),", "\"ppSamplers\")]), StdMethod(Void, \"HSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]),", "\"Texture2D\"), (D3D11_TEX2D_ARRAY_UAV, \"Texture2DArray\"), (D3D11_TEX3D_UAV, \"Texture3D\"), ]), None), ]) ID3D11UnorderedAccessView.methods +=", "\"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"CSSetUnorderedAccessViews\", [(UINT, \"StartSlot\"), (UINT, \"NumUAVs\"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), \"NumUAVs\"),", "\"TopLeftX\"), (FLOAT, \"TopLeftY\"), (FLOAT, \"Width\"), (FLOAT, \"Height\"), (FLOAT, \"MinDepth\"), (FLOAT,", "ID3D11Resource) ID3D11Texture2D = Interface(\"ID3D11Texture2D\", ID3D11Resource) ID3D11Texture3D = Interface(\"ID3D11Texture3D\", ID3D11Resource) ID3D11View", "(UINT64, \"HSInvocations\"), (UINT64, \"DSInvocations\"), (UINT64, \"CSInvocations\"), ]) D3D11_QUERY_DATA_SO_STATISTICS = Struct(\"D3D11_QUERY_DATA_SO_STATISTICS\",", "(Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppVertexBuffers\"), Out(Pointer(UINT), \"pStrides\"), Out(Pointer(UINT), \"pOffsets\")]), StdMethod(Void, \"IAGetIndexBuffer\", [Out(Pointer(ObjPointer(ID3D11Buffer)),", "[(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"DSGetShader\", [Out(Pointer(ObjPointer(ID3D11DomainShader)),", "of charge, to any person obtaining a copy # of", "\"D3D11_APPEND_ALIGNED_ELEMENT\", ]) D3D11_INPUT_ELEMENT_DESC = Struct(\"D3D11_INPUT_ELEMENT_DESC\", [ (LPCSTR, \"SemanticName\"), (UINT, \"SemanticIndex\"),", "\"Flush\", []), StdMethod(D3D11_DEVICE_CONTEXT_TYPE, \"GetType\", []), StdMethod(UINT, \"GetContextFlags\", []), StdMethod(HRESULT, \"FinishCommandList\",", "\"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11VertexShader)), \"ppVertexShader\")]), StdMethod(HRESULT, \"CreateGeometryShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"),", "StdMethod(Void, \"DSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void,", "\"GetDesc\", [Out(Pointer(D3D11_UNORDERED_ACCESS_VIEW_DESC), \"pDesc\")]), ] D3D11_FILTER = Enum(\"D3D11_FILTER\", [ \"D3D11_FILTER_MIN_MAG_MIP_POINT\", \"D3D11_FILTER_MIN_MAG_POINT_MIP_LINEAR\",", "D3D11_TEXTURE3D_DESC = Struct(\"D3D11_TEXTURE3D_DESC\", [ (UINT, \"Width\"), (UINT, \"Height\"), (UINT, \"Depth\"),", "\"DstSubresource\"), (Pointer(Const(D3D11_BOX)), \"pDstBox\"), (OpaquePointer(Const(Void)), \"pSrcData\"), (UINT, \"SrcRowPitch\"), (UINT, \"SrcDepthPitch\")]), StdMethod(Void,", "Enum(\"D3D11_DSV_DIMENSION\", [ \"D3D11_DSV_DIMENSION_UNKNOWN\", \"D3D11_DSV_DIMENSION_TEXTURE1D\", \"D3D11_DSV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_DSV_DIMENSION_TEXTURE2D\", \"D3D11_DSV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_DSV_DIMENSION_TEXTURE2DMS\", \"D3D11_DSV_DIMENSION_TEXTURE2DMSARRAY\", ])", "(UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"VSGetSamplers\", [(UINT, \"StartSlot\"), (UINT,", "] D3D11_BUFFER_DESC = Struct(\"D3D11_BUFFER_DESC\", [ (UINT, \"ByteWidth\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG,", "\"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_RTV, \"Buffer\"), (D3D11_TEX1D_RTV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_RTV, \"Texture1DArray\"), (D3D11_TEX2D_RTV,", "\"OutFormatSupport2\"), ]) D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS = Struct(\"D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS\", [ (BOOL, \"ComputeShaders_Plus_RawAndStructuredBuffers_Via_Shader_4_x\"), ]) D3D11_FEATURE,", "\"DataSize\"), (OpaqueBlob(Const(Void), \"DataSize\"), \"pData\")]), StdMethod(HRESULT, \"SetPrivateDataInterface\", [(REFGUID, \"guid\"), (OpaquePointer(Const(IUnknown)), \"pData\")]),", "\"GenerateMips\", [(ObjPointer(ID3D11ShaderResourceView), \"pShaderResourceView\")]), StdMethod(Void, \"SetResourceMinLOD\", [(ObjPointer(ID3D11Resource), \"pResource\"), (FLOAT, \"MinLOD\")]), StdMethod(FLOAT,", "\"RSGetScissorRects\", [Out(Pointer(UINT), \"pNumRects\"), Out(Array(D3D11_RECT, \"*pNumRects\"), \"pRects\")]), StdMethod(Void, \"HSGetShaderResources\", [(UINT, \"StartSlot\"),", "= Struct(\"D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS\", [ (BOOL, \"ComputeShaders_Plus_RawAndStructuredBuffers_Via_Shader_4_x\"), ]) D3D11_FEATURE, D3D11_FEATURE_DATA = EnumPolymorphic(\"D3D11_FEATURE\",", "StdMethod(HRESULT, \"CreateTexture3D\", [(Pointer(Const(D3D11_TEXTURE3D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture3D)), \"ppTexture3D\")]), StdMethod(HRESULT, \"CreateShaderResourceView\",", "[(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"OMGetRenderTargets\", [(UINT,", "[(REFGUID, \"guid\"), (UINT, \"DataSize\"), (OpaqueBlob(Const(Void), \"DataSize\"), \"pData\")]), StdMethod(HRESULT, \"SetPrivateDataInterface\", [(REFGUID,", "by d3d11sdklayers.dll when D3D11_CREATE_DEVICE_DEBUG is set StdFunction(HRESULT, \"D3D11CoreRegisterLayers\", [LPCVOID, DWORD],", "\"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppSOTargets\"), (Pointer(Const(UINT)), \"pOffsets\")]), StdMethod(Void, \"DrawAuto\", []), StdMethod(Void,", "\"D3D11_DSV_DIMENSION_TEXTURE2D\", \"D3D11_DSV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_DSV_DIMENSION_TEXTURE2DMS\", \"D3D11_DSV_DIMENSION_TEXTURE2DMSARRAY\", ]) D3D11_RTV_DIMENSION = Enum(\"D3D11_RTV_DIMENSION\", [ \"D3D11_RTV_DIMENSION_UNKNOWN\",", "* HRESULT = MAKE_HRESULT([ \"D3D11_ERROR_FILE_NOT_FOUND\", \"D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS\", \"D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS\", \"D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD\", \"D3DERR_INVALIDCALL\", \"D3DERR_WASSTILLDRAWING\",", "when D3D11_CREATE_DEVICE_DEBUG is set StdFunction(HRESULT, \"D3D11CoreRegisterLayers\", [LPCVOID, DWORD], internal=True), StdFunction(SIZE_T,", "ID3D11GeometryShader = Interface(\"ID3D11GeometryShader\", ID3D11DeviceChild) ID3D11PixelShader = Interface(\"ID3D11PixelShader\", ID3D11DeviceChild) ID3D11ComputeShader =", "(Pointer(Const(D3D11_BOX)), \"pSrcBox\")]), StdMethod(Void, \"CopyResource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (ObjPointer(ID3D11Resource), \"pSrcResource\")]), StdMethod(Void, \"UpdateSubresource\",", "\"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"IASetInputLayout\", [(ObjPointer(ID3D11InputLayout), \"pInputLayout\")]), StdMethod(Void, \"IASetVertexBuffers\", [(UINT, \"StartSlot\"),", "]) D3D11_RENDER_TARGET_VIEW_DESC = Struct(\"D3D11_RENDER_TARGET_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_RTV_DIMENSION, \"ViewDimension\"), (Union(None,", "\"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"CSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState),", "\"FinishCommandList\", [(BOOL, \"RestoreDeferredContextState\"), Out(Pointer(ObjPointer(ID3D11CommandList)), \"ppCommandList\")]), ] D3D11_CREATE_DEVICE_FLAG = Flags(UINT, [", "(UINT, \"DstZ\"), (ObjPointer(ID3D11Resource), \"pSrcResource\"), (UINT, \"SrcSubresource\"), (Pointer(Const(D3D11_BOX)), \"pSrcBox\")]), StdMethod(Void, \"CopyResource\",", "\"OMSetRenderTargets\", [(UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), \"NumViews\"), \"ppRenderTargetViews\"), (ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\")]), StdMethod(Void, \"OMSetRenderTargetsAndUnorderedAccessViews\",", "\"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"VSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"),", "\"StartInstanceLocation\")]), StdMethod(Void, \"GSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]),", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #", "]) D3D11_DSV_DIMENSION = Enum(\"D3D11_DSV_DIMENSION\", [ \"D3D11_DSV_DIMENSION_UNKNOWN\", \"D3D11_DSV_DIMENSION_TEXTURE1D\", \"D3D11_DSV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_DSV_DIMENSION_TEXTURE2D\", \"D3D11_DSV_DIMENSION_TEXTURE2DARRAY\",", "\"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP\", \"D3D11_PRIMITIVE_TOPOLOGY_LINELIST_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_2_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_3_CONTROL_POINT_PATCHLIST\",", "\"pFeatureLevels\"), (UINT, \"FeatureLevels\"), (UINT, \"SDKVersion\"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC)), \"pSwapChainDesc\"), Out(Pointer(ObjPointer(IDXGISwapChain)), \"ppSwapChain\"), Out(Pointer(ObjPointer(ID3D11Device)),", "= Enum(\"D3D11_MAP\", [ \"D3D11_MAP_READ\", \"D3D11_MAP_WRITE\", \"D3D11_MAP_READ_WRITE\", \"D3D11_MAP_WRITE_DISCARD\", \"D3D11_MAP_WRITE_NO_OVERWRITE\", ]) D3D11_MAP_FLAG", "= Struct(\"D3D11_BLEND_DESC\", [ (BOOL, \"AlphaToCoverageEnable\"), (BOOL, \"IndependentBlendEnable\"), (Array(D3D11_RENDER_TARGET_BLEND_DESC, 8), \"RenderTarget\"),", "D3D11_RECT = Alias(\"D3D11_RECT\", RECT) D3D11_BOX = Struct(\"D3D11_BOX\", [ (UINT, \"left\"),", "\"pSrcView\")]), StdMethod(Void, \"ClearRenderTargetView\", [(ObjPointer(ID3D11RenderTargetView), \"pRenderTargetView\"), (Array(Const(FLOAT), 4), \"ColorRGBA\")]), StdMethod(Void, \"ClearUnorderedAccessViewUint\",", "(Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"DSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView),", "]) ID3D11Resource.methods += [ StdMethod(Void, \"GetType\", [Out(Pointer(D3D11_RESOURCE_DIMENSION), \"pResourceDimension\")]), StdMethod(Void, \"SetEvictionPriority\",", "= Enum(\"D3D11_DEPTH_WRITE_MASK\", [ \"D3D11_DEPTH_WRITE_MASK_ZERO\", \"D3D11_DEPTH_WRITE_MASK_ALL\", ]) D3D11_STENCIL_OP = Enum(\"D3D11_STENCIL_OP\", [", "(D3D11_TEXTURE_ADDRESS_MODE, \"AddressV\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressW\"), (FLOAT, \"MipLODBias\"), (UINT, \"MaxAnisotropy\"), (D3D11_COMPARISON_FUNC, \"ComparisonFunc\"),", "\"Height\"), (FLOAT, \"MinDepth\"), (FLOAT, \"MaxDepth\"), ]) D3D11_RESOURCE_DIMENSION = Enum(\"D3D11_RESOURCE_DIMENSION\", [", "ID3D11DepthStencilState.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_DEPTH_STENCIL_DESC), \"pDesc\")]), ] D3D11_BLEND =", "\"InstanceIndex\"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), \"ppInstance\")]), StdMethod(HRESULT, \"CreateClassInstance\", [(LPCSTR, \"pClassTypeName\"), (UINT, \"ConstantBufferOffset\"), (UINT,", "\"D3D11_DEVICE_CONTEXT_IMMEDIATE\", \"D3D11_DEVICE_CONTEXT_DEFERRED\", ]) D3D11_CLASS_INSTANCE_DESC = Struct(\"D3D11_CLASS_INSTANCE_DESC\", [ (UINT, \"InstanceId\"), (UINT,", "\"HSSetShader\", [(ObjPointer(ID3D11HullShader), \"pHullShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"HSSetSamplers\",", "(UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"DSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT,", "\"D3D11_PRIMITIVE_TOPOLOGY_16_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_17_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_18_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_19_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_20_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_21_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_22_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_23_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_24_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_25_CONTROL_POINT_PATCHLIST\",", "ID3D11BlendState = Interface(\"ID3D11BlendState\", ID3D11DeviceChild) ID3D11RasterizerState = Interface(\"ID3D11RasterizerState\", ID3D11DeviceChild) ID3D11Resource =", "ID3D11DeviceChild) ID3D11Resource = Interface(\"ID3D11Resource\", ID3D11DeviceChild) ID3D11Buffer = Interface(\"ID3D11Buffer\", ID3D11Resource) ID3D11Texture1D", "(LPCSTR, \"SemanticName\"), (UINT, \"SemanticIndex\"), (DXGI_FORMAT, \"Format\"), (UINT, \"InputSlot\"), (D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET, \"AlignedByteOffset\"),", "[(UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppSOTargets\"), (Pointer(Const(UINT)), \"pOffsets\")]), StdMethod(Void, \"DrawAuto\", []),", "\"D3D11_FORMAT_SUPPORT_IA_INDEX_BUFFER\", \"D3D11_FORMAT_SUPPORT_SO_BUFFER\", \"D3D11_FORMAT_SUPPORT_TEXTURE1D\", \"D3D11_FORMAT_SUPPORT_TEXTURE2D\", \"D3D11_FORMAT_SUPPORT_TEXTURE3D\", \"D3D11_FORMAT_SUPPORT_TEXTURECUBE\", \"D3D11_FORMAT_SUPPORT_SHADER_LOAD\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_COMPARISON\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_MONO_TEXT\",", "[Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\")]), StdMethod(HRESULT, \"GetPrivateData\", [(REFGUID, \"guid\"), Out(Pointer(UINT), \"pDataSize\"), Out(OpaquePointer(Void), \"pData\")]),", "\"WSize\"), ]) D3D11_RENDER_TARGET_VIEW_DESC = Struct(\"D3D11_RENDER_TARGET_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_RTV_DIMENSION, \"ViewDimension\"),", "\"MipSlice\"), ]) D3D11_TEX2D_ARRAY_DSV = Struct(\"D3D11_TEX2D_ARRAY_DSV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"),", "]) D3D11_FEATURE_DATA_FORMAT_SUPPORT = Struct(\"D3D11_FEATURE_DATA_FORMAT_SUPPORT\", [ (DXGI_FORMAT, \"InFormat\"), (D3D11_FORMAT_SUPPORT, \"OutFormatSupport\"), ])", "[(UINT, \"VertexCountPerInstance\"), (UINT, \"InstanceCount\"), (UINT, \"StartVertexLocation\"), (UINT, \"StartInstanceLocation\")]), StdMethod(Void, \"GSSetConstantBuffers\",", "\"Topology\")]), StdMethod(Void, \"VSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]),", "\"D3D11_FORMAT_SUPPORT2_UAV_TYPED_STORE\", ]) ID3D11Asynchronous.methods += [ StdMethod(UINT, \"GetDataSize\", []), ] D3D11_ASYNC_GETDATA_FLAG", "= Struct(\"D3D11_TEX2DMS_SRV\", [ (UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2DMS_ARRAY_SRV = Struct(\"D3D11_TEX2DMS_ARRAY_SRV\", [", "(UINT, \"ElementOffset\")]), None), (Union(None, [(UINT, \"NumElements\"), (UINT, \"ElementWidth\")]), None), ])", "(Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"ClearState\", []), StdMethod(Void, \"Flush\", []), StdMethod(D3D11_DEVICE_CONTEXT_TYPE,", "[Out(Pointer(ObjPointer(ID3D11ClassLinkage)), \"ppLinkage\")]), StdMethod(HRESULT, \"CreateBlendState\", [(Pointer(Const(D3D11_BLEND_DESC)), \"pBlendStateDesc\"), Out(Pointer(ObjPointer(ID3D11BlendState)), \"ppBlendState\")]), StdMethod(HRESULT, \"CreateDepthStencilState\",", "[Out(Pointer(ObjPointer(ID3D11ClassLinkage)), \"ppLinkage\")]), StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_CLASS_INSTANCE_DESC), \"pDesc\")]), StdMethod(Void, \"GetInstanceName\", [Out(LPSTR, \"pInstanceName\"),", "Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), # XXX: Undocumented functions, called by d3d11sdklayers.dll when", "(ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\")]), StdMethod(Void, \"OMSetRenderTargetsAndUnorderedAccessViews\", [(UINT, \"NumRTVs\"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), \"NumRTVs\"), \"ppRenderTargetViews\"), (ObjPointer(ID3D11DepthStencilView),", "\"NumViews\"), (Array(ObjPointer(ID3D11RenderTargetView), \"NumViews\"), \"ppRenderTargetViews\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\")]), StdMethod(Void, \"OMGetRenderTargetsAndUnorderedAccessViews\", [(UINT, \"NumRTVs\"),", "\"CreateTexture1D\", [(Pointer(Const(D3D11_TEXTURE1D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture1D)), \"ppTexture1D\")]), StdMethod(HRESULT, \"CreateTexture2D\", [(Pointer(Const(D3D11_TEXTURE2D_DESC)),", "(INT, \"BaseVertexLocation\")]), StdMethod(Void, \"Draw\", [(UINT, \"VertexCount\"), (UINT, \"StartVertexLocation\")]), StdMethod(HRESULT, \"Map\",", "\"ArraySize\"), (DXGI_FORMAT, \"Format\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG,", "Struct(\"D3D11_QUERY_DESC\", [ (D3D11_QUERY, \"Query\"), (D3D11_QUERY_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Query.methods += [", "\"HSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"DSGetShaderResources\",", "ID3D11DeviceChild) ID3D11RasterizerState = Interface(\"ID3D11RasterizerState\", ID3D11DeviceChild) ID3D11Resource = Interface(\"ID3D11Resource\", ID3D11DeviceChild) ID3D11Buffer", "(UINT, \"bottom\"), (UINT, \"back\"), ]) ID3D11DeviceChild.methods += [ StdMethod(Void, \"GetDevice\",", "(UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEX1D_ARRAY_SRV = Struct(\"D3D11_TEX1D_ARRAY_SRV\", [ (UINT,", "the Software, and to permit persons to whom the Software", "\"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture1D.methods += [ StdMethod(Void,", "\"Unmap\", [(ObjPointer(ID3D11Resource), \"pResource\"), (UINT, \"Subresource\")]), StdMethod(Void, \"PSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT,", "\"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"IASetPrimitiveTopology\", [(D3D11_PRIMITIVE_TOPOLOGY, \"Topology\")]), StdMethod(Void, \"VSSetShaderResources\",", "[(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"VSSetSamplers\", [(UINT,", "[(UINT, \"NumViews\"), (Array(ObjPointer(ID3D11RenderTargetView), \"NumViews\"), \"ppRenderTargetViews\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\")]), StdMethod(Void, \"OMGetRenderTargetsAndUnorderedAccessViews\", [(UINT,", "= Struct(\"D3D11_TEX3D_UAV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstWSlice\"), (UINT, \"WSize\"), ])", "[ (DXGI_FORMAT, \"Format\"), (D3D11_RTV_DIMENSION, \"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_RTV, \"Buffer\"), (D3D11_TEX1D_RTV,", "\"D3D11_BUFFER_UAV_FLAG_RAW\", \"D3D11_BUFFER_UAV_FLAG_APPEND\", \"D3D11_BUFFER_UAV_FLAG_COUNTER\", ]) D3D11_BUFFER_UAV = Struct(\"D3D11_BUFFER_UAV\", [ (UINT, \"FirstElement\"),", "]) D3D11_UNORDERED_ACCESS_VIEW_DESC = Struct(\"D3D11_UNORDERED_ACCESS_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_UAV_DIMENSION, \"ViewDimension\"), (Union(None,", "[ (UINT, \"FirstElement\"), (UINT, \"NumElements\"), (D3D11_BUFFER_UAV_FLAG, \"Flags\"), ]) D3D11_TEX1D_UAV =", "StdMethod(Void, \"VSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void,", "\"DSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"DSGetShader\",", "\"ArraySize\"), ]) D3D11_TEX3D_RTV = Struct(\"D3D11_TEX3D_RTV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstWSlice\"),", "\"GetClassLinkage\", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), \"ppLinkage\")]), StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_CLASS_INSTANCE_DESC), \"pDesc\")]), StdMethod(Void, \"GetInstanceName\", [Out(LPSTR,", "= Struct(\"D3D11_BUFFER_SRV\", [ (Union(None, [(UINT, \"FirstElement\"), (UINT, \"ElementOffset\")]), None), (Union(None,", "Out(Array(D3D11_VIEWPORT, \"*pNumViewports\"), \"pViewports\")]), StdMethod(Void, \"RSGetScissorRects\", [Out(Pointer(UINT), \"pNumRects\"), Out(Array(D3D11_RECT, \"*pNumRects\"), \"pRects\")]),", "D3D11_MAPPED_SUBRESOURCE = Struct(\"D3D11_MAPPED_SUBRESOURCE\", [ (OpaquePointer(Void), \"pData\"), (UINT, \"RowPitch\"), (UINT, \"DepthPitch\"),", "\"Texture3D\"), ]), None), ]) ID3D11UnorderedAccessView.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_UNORDERED_ACCESS_VIEW_DESC),", "\"MinDepth\"), (FLOAT, \"MaxDepth\"), ]) D3D11_RESOURCE_DIMENSION = Enum(\"D3D11_RESOURCE_DIMENSION\", [ \"D3D11_RESOURCE_DIMENSION_UNKNOWN\", \"D3D11_RESOURCE_DIMENSION_BUFFER\",", "(D3D11_COUNTER, \"Counter\"), (UINT, \"MiscFlags\"), ]) D3D11_COUNTER_INFO = Struct(\"D3D11_COUNTER_INFO\", [ (D3D11_COUNTER,", "StdMethod(Void, \"DSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void,", "[ \"D3D11_COUNTER_TYPE_FLOAT32\", \"D3D11_COUNTER_TYPE_UINT16\", \"D3D11_COUNTER_TYPE_UINT32\", \"D3D11_COUNTER_TYPE_UINT64\", ]) D3D11_COUNTER_DESC = Struct(\"D3D11_COUNTER_DESC\", [", "\"ClearUnorderedAccessViewFloat\", [(ObjPointer(ID3D11UnorderedAccessView), \"pUnorderedAccessView\"), (Array(Const(FLOAT), 4), \"Values\")]), StdMethod(Void, \"ClearDepthStencilView\", [(ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\"),", "Out(Pointer(ObjPointer(ID3D11DepthStencilState)), \"ppDepthStencilState\")]), StdMethod(HRESULT, \"CreateRasterizerState\", [(Pointer(Const(D3D11_RASTERIZER_DESC)), \"pRasterizerDesc\"), Out(Pointer(ObjPointer(ID3D11RasterizerState)), \"ppRasterizerState\")]), StdMethod(HRESULT, \"CreateSamplerState\",", "Enum(\"D3D11_INPUT_CLASSIFICATION\", [ \"D3D11_INPUT_PER_VERTEX_DATA\", \"D3D11_INPUT_PER_INSTANCE_DATA\", ]) D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET = FakeEnum(UINT, [ \"D3D11_APPEND_ALIGNED_ELEMENT\",", "[ \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_ADD\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_BITWISE_OPS\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_COMPARE_STORE_OR_COMPARE_EXCHANGE\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_EXCHANGE\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_SIGNED_MIN_OR_MAX\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_UNSIGNED_MIN_OR_MAX\", \"D3D11_FORMAT_SUPPORT2_UAV_TYPED_LOAD\", \"D3D11_FORMAT_SUPPORT2_UAV_TYPED_STORE\", ])", "StdFunction(HRESULT, \"D3D11CoreCreateLayeredDevice\", [LPCVOID, DWORD, LPCVOID, (REFIID, \"riid\"), Out(Pointer(ObjPointer(Void)), \"ppvObj\")], internal=True),", "= Enum(\"D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS\", [ \"D3D11_STANDARD_MULTISAMPLE_PATTERN\", \"D3D11_CENTER_MULTISAMPLE_PATTERN\", ]) D3D11_DEVICE_CONTEXT_TYPE = Enum(\"D3D11_DEVICE_CONTEXT_TYPE\", [", "\"DataSize\"), \"pData\")]), StdMethod(HRESULT, \"SetPrivateDataInterface\", [(REFGUID, \"guid\"), (OpaquePointer(Const(IUnknown)), \"pData\")]), ] D3D11_COMPARISON_FUNC", "D3D11_BLEND = Enum(\"D3D11_BLEND\", [ \"D3D11_BLEND_ZERO\", \"D3D11_BLEND_ONE\", \"D3D11_BLEND_SRC_COLOR\", \"D3D11_BLEND_INV_SRC_COLOR\", \"D3D11_BLEND_SRC_ALPHA\", \"D3D11_BLEND_INV_SRC_ALPHA\",", "\"FrontCounterClockwise\"), (INT, \"DepthBias\"), (FLOAT, \"DepthBiasClamp\"), (FLOAT, \"SlopeScaledDepthBias\"), (BOOL, \"DepthClipEnable\"), (BOOL,", "\"pSrcData\"), (UINT, \"SrcRowPitch\"), (UINT, \"SrcDepthPitch\")]), StdMethod(Void, \"CopyStructureCount\", [(ObjPointer(ID3D11Buffer), \"pDstBuffer\"), (UINT,", "\"MipSlice\"), (UINT, \"FirstWSlice\"), (UINT, \"WSize\"), ]) D3D11_UNORDERED_ACCESS_VIEW_DESC = Struct(\"D3D11_UNORDERED_ACCESS_VIEW_DESC\", [", "[(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"CSGetConstantBuffers\", [(UINT,", "(Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"HSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer),", "= Flags(UINT, [ \"D3D11_RAISE_FLAG_DRIVER_INTERNAL_ERROR\", ]) D3D11_CLEAR_FLAG = Flags(UINT, [ \"D3D11_CLEAR_DEPTH\",", "(Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"IASetPrimitiveTopology\", [(D3D11_PRIMITIVE_TOPOLOGY, \"Topology\")]), StdMethod(Void,", "+= [ StdMethod(HRESULT, \"CreateBuffer\", [(Pointer(Const(D3D11_BUFFER_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Buffer)), \"ppBuffer\")]),", "Flags(UINT, [ \"D3D11_BUFFER_UAV_FLAG_RAW\", \"D3D11_BUFFER_UAV_FLAG_APPEND\", \"D3D11_BUFFER_UAV_FLAG_COUNTER\", ]) D3D11_BUFFER_UAV = Struct(\"D3D11_BUFFER_UAV\", [", "\"D3D11_CREATE_DEVICE_PREVENT_INTERNAL_THREADING_OPTIMIZATIONS\", \"D3D11_CREATE_DEVICE_BGRA_SUPPORT\", ]) ID3D11Device.methods += [ StdMethod(HRESULT, \"CreateBuffer\", [(Pointer(Const(D3D11_BUFFER_DESC)), \"pDesc\"),", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "StdMethod(Void, \"ExecuteCommandList\", [(ObjPointer(ID3D11CommandList), \"pCommandList\"), (BOOL, \"RestoreContextState\")]), StdMethod(Void, \"HSSetShaderResources\", [(UINT, \"StartSlot\"),", "(UINT, \"MipSlice\"), ]) D3D11_TEX2D_ARRAY_UAV = Struct(\"D3D11_TEX2D_ARRAY_UAV\", [ (UINT, \"MipSlice\"), (UINT,", "Struct(\"D3D11_VIEWPORT\", [ (FLOAT, \"TopLeftX\"), (FLOAT, \"TopLeftY\"), (FLOAT, \"Width\"), (FLOAT, \"Height\"),", "\"Texture1DArray\"), (D3D11_TEX2D_SRV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_SRV, \"Texture2DArray\"), (D3D11_TEX2DMS_SRV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_SRV, \"Texture2DMSArray\"), (D3D11_TEX3D_SRV,", "\"ppResource\")]), StdMethod(HRESULT, \"CheckFormatSupport\", [(DXGI_FORMAT, \"Format\"), Out(Pointer(D3D11_FORMAT_SUPPORT), \"pFormatSupport\")]), StdMethod(HRESULT, \"CheckMultisampleQualityLevels\", [(DXGI_FORMAT,", "\"ppResource\")]), ] D3D11_BUFFER_SRV = Struct(\"D3D11_BUFFER_SRV\", [ (Union(None, [(UINT, \"FirstElement\"), (UINT,", "dxgi import * from d3dcommon import * from d3d11sdklayers import", "\"pData\")]), StdMethod(HRESULT, \"SetPrivateDataInterface\", [(REFGUID, \"guid\"), (OpaquePointer(Const(IUnknown)), \"pData\")]), StdMethod(D3D_FEATURE_LEVEL, \"GetFeatureLevel\", []),", "[ \"D3D11_DEVICE_CONTEXT_IMMEDIATE\", \"D3D11_DEVICE_CONTEXT_DEFERRED\", ]) D3D11_CLASS_INSTANCE_DESC = Struct(\"D3D11_CLASS_INSTANCE_DESC\", [ (UINT, \"InstanceId\"),", "\"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_COMPARISON\", \"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_MONO_TEXT\", \"D3D11_FORMAT_SUPPORT_MIP\", \"D3D11_FORMAT_SUPPORT_MIP_AUTOGEN\", \"D3D11_FORMAT_SUPPORT_RENDER_TARGET\", \"D3D11_FORMAT_SUPPORT_BLENDABLE\", \"D3D11_FORMAT_SUPPORT_DEPTH_STENCIL\", \"D3D11_FORMAT_SUPPORT_CPU_LOCKABLE\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RESOLVE\", \"D3D11_FORMAT_SUPPORT_DISPLAY\",", "StdMethod(HRESULT, \"CreateRasterizerState\", [(Pointer(Const(D3D11_RASTERIZER_DESC)), \"pRasterizerDesc\"), Out(Pointer(ObjPointer(ID3D11RasterizerState)), \"ppRasterizerState\")]), StdMethod(HRESULT, \"CreateSamplerState\", [(Pointer(Const(D3D11_SAMPLER_DESC)), \"pSamplerDesc\"),", "\"Flags\"), ]) D3D11_TEX1D_SRV = Struct(\"D3D11_TEX1D_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"),", "this software and associated documentation files (the \"Software\"), to deal", "StdMethod(Void, \"IASetIndexBuffer\", [(ObjPointer(ID3D11Buffer), \"pIndexBuffer\"), (DXGI_FORMAT, \"Format\"), (UINT, \"Offset\")]), StdMethod(Void, \"DrawIndexedInstanced\",", "\"DstX\"), (UINT, \"DstY\"), (UINT, \"DstZ\"), (ObjPointer(ID3D11Resource), \"pSrcResource\"), (UINT, \"SrcSubresource\"), (Pointer(Const(D3D11_BOX)),", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "\"D3D11_STENCIL_OP_INVERT\", \"D3D11_STENCIL_OP_INCR\", \"D3D11_STENCIL_OP_DECR\", ]) D3D11_DEPTH_STENCILOP_DESC = Struct(\"D3D11_DEPTH_STENCILOP_DESC\", [ (D3D11_STENCIL_OP, \"StencilFailOp\"),", "\"D3D11_BLEND_BLEND_FACTOR\", \"D3D11_BLEND_INV_BLEND_FACTOR\", \"D3D11_BLEND_SRC1_COLOR\", \"D3D11_BLEND_INV_SRC1_COLOR\", \"D3D11_BLEND_SRC1_ALPHA\", \"D3D11_BLEND_INV_SRC1_ALPHA\", ]) D3D11_BLEND_OP = Enum(\"D3D11_BLEND_OP\",", "= Enum(\"D3D11_COLOR_WRITE_ENABLE\", [ \"D3D11_COLOR_WRITE_ENABLE_ALL\", \"D3D11_COLOR_WRITE_ENABLE_RED\", \"D3D11_COLOR_WRITE_ENABLE_GREEN\", \"D3D11_COLOR_WRITE_ENABLE_BLUE\", \"D3D11_COLOR_WRITE_ENABLE_ALPHA\", ]) D3D11_RENDER_TARGET_BLEND_DESC", "\"D3D11_TEXTURECUBE_FACE_POSITIVE_X\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_X\", \"D3D11_TEXTURECUBE_FACE_POSITIVE_Y\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_Y\", \"D3D11_TEXTURECUBE_FACE_POSITIVE_Z\", \"D3D11_TEXTURECUBE_FACE_NEGATIVE_Z\", ]) ID3D11View.methods += [", "[ \"D3D11_STANDARD_MULTISAMPLE_PATTERN\", \"D3D11_CENTER_MULTISAMPLE_PATTERN\", ]) D3D11_DEVICE_CONTEXT_TYPE = Enum(\"D3D11_DEVICE_CONTEXT_TYPE\", [ \"D3D11_DEVICE_CONTEXT_IMMEDIATE\", \"D3D11_DEVICE_CONTEXT_DEFERRED\",", "\"D3D11_RESOURCE_MISC_TEXTURECUBE\", \"D3D11_RESOURCE_MISC_DRAWINDIRECT_ARGS\", \"D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS\", \"D3D11_RESOURCE_MISC_BUFFER_STRUCTURED\", \"D3D11_RESOURCE_MISC_RESOURCE_CLAMP\", \"D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX\", \"D3D11_RESOURCE_MISC_GDI_COMPATIBLE\", ]) D3D11_MAP =", "\"RowPitch\"), (UINT, \"DepthPitch\"), ]) ID3D11Resource.methods += [ StdMethod(Void, \"GetType\", [Out(Pointer(D3D11_RESOURCE_DIMENSION),", "StdMethod(Void, \"PSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void,", "(ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11VertexShader)), \"ppVertexShader\")]), StdMethod(HRESULT, \"CreateGeometryShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T,", "(Pointer(Const(D3D11_BOX)), \"pDstBox\"), (OpaquePointer(Const(Void)), \"pSrcData\"), (UINT, \"SrcRowPitch\"), (UINT, \"SrcDepthPitch\")]), StdMethod(Void, \"CopyStructureCount\",", "(OpaquePointer(Const(IUnknown)), \"pData\")]), StdMethod(D3D_FEATURE_LEVEL, \"GetFeatureLevel\", []), StdMethod(D3D11_CREATE_DEVICE_FLAG, \"GetCreationFlags\", []), StdMethod(HRESULT, \"GetDeviceRemovedReason\",", "ID3D11DeviceChild) ID3D11PixelShader = Interface(\"ID3D11PixelShader\", ID3D11DeviceChild) ID3D11ComputeShader = Interface(\"ID3D11ComputeShader\", ID3D11DeviceChild) ID3D11InputLayout", "\"D3D11_COMPARISON_NOT_EQUAL\", \"D3D11_COMPARISON_GREATER_EQUAL\", \"D3D11_COMPARISON_ALWAYS\", ]) D3D11_DEPTH_WRITE_MASK = Enum(\"D3D11_DEPTH_WRITE_MASK\", [ \"D3D11_DEPTH_WRITE_MASK_ZERO\", \"D3D11_DEPTH_WRITE_MASK_ALL\",", "Software is # furnished to do so, subject to the", "\"MiscFlags\"), ]) ID3D11Texture3D.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_TEXTURE3D_DESC), \"pDesc\")]), ]", "D3D11_TEXTURE1D_DESC = Struct(\"D3D11_TEXTURE1D_DESC\", [ (UINT, \"Width\"), (UINT, \"MipLevels\"), (UINT, \"ArraySize\"),", "ID3D11DeviceChild) ID3D11ShaderResourceView = Interface(\"ID3D11ShaderResourceView\", ID3D11View) ID3D11RenderTargetView = Interface(\"ID3D11RenderTargetView\", ID3D11View) ID3D11DepthStencilView", "\"Format\"), (D3D11_UAV_DIMENSION, \"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_UAV, \"Buffer\"), (D3D11_TEX1D_UAV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_UAV,", "Out(Pointer(ObjPointer(ID3D11InputLayout)), \"ppInputLayout\")]), StdMethod(HRESULT, \"CreateVertexShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage),", "\"ppShaderResourceViews\")]), StdMethod(Void, \"DSSetShader\", [(ObjPointer(ID3D11DomainShader), \"pDomainShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]),", "\"D3D11_RESOURCE_MISC_SHARED\", \"D3D11_RESOURCE_MISC_TEXTURECUBE\", \"D3D11_RESOURCE_MISC_DRAWINDIRECT_ARGS\", \"D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS\", \"D3D11_RESOURCE_MISC_BUFFER_STRUCTURED\", \"D3D11_RESOURCE_MISC_RESOURCE_CLAMP\", \"D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX\", \"D3D11_RESOURCE_MISC_GDI_COMPATIBLE\", ]) D3D11_MAP", "= Interface(\"ID3D11RenderTargetView\", ID3D11View) ID3D11DepthStencilView = Interface(\"ID3D11DepthStencilView\", ID3D11View) ID3D11UnorderedAccessView = Interface(\"ID3D11UnorderedAccessView\",", "whom the Software is # furnished to do so, subject", "[ (BOOL, \"DoublePrecisionFloatShaderOps\"), ]) D3D11_FEATURE_DATA_FORMAT_SUPPORT = Struct(\"D3D11_FEATURE_DATA_FORMAT_SUPPORT\", [ (DXGI_FORMAT, \"InFormat\"),", "]) D3D11_STENCIL_OP = Enum(\"D3D11_STENCIL_OP\", [ \"D3D11_STENCIL_OP_KEEP\", \"D3D11_STENCIL_OP_ZERO\", \"D3D11_STENCIL_OP_REPLACE\", \"D3D11_STENCIL_OP_INCR_SAT\", \"D3D11_STENCIL_OP_DECR_SAT\",", "\"ppConstantBuffers\")]), StdMethod(Void, \"ClearState\", []), StdMethod(Void, \"Flush\", []), StdMethod(D3D11_DEVICE_CONTEXT_TYPE, \"GetType\", []),", "\"DepthFunc\"), (BOOL, \"StencilEnable\"), (UINT8, \"StencilReadMask\"), (UINT8, \"StencilWriteMask\"), (D3D11_DEPTH_STENCILOP_DESC, \"FrontFace\"), (D3D11_DEPTH_STENCILOP_DESC,", "ID3D11Resource) ID3D11View = Interface(\"ID3D11View\", ID3D11DeviceChild) ID3D11ShaderResourceView = Interface(\"ID3D11ShaderResourceView\", ID3D11View) ID3D11RenderTargetView", "\"D3D11_FILTER_TYPE_POINT\", \"D3D11_FILTER_TYPE_LINEAR\", ]) D3D11_TEXTURE_ADDRESS_MODE = Enum(\"D3D11_TEXTURE_ADDRESS_MODE\", [ \"D3D11_TEXTURE_ADDRESS_WRAP\", \"D3D11_TEXTURE_ADDRESS_MIRROR\", \"D3D11_TEXTURE_ADDRESS_CLAMP\",", "\"pIndexBuffer\"), Out(Pointer(DXGI_FORMAT), \"Format\"), Out(Pointer(UINT), \"Offset\")]), StdMethod(Void, \"GSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT,", "\"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"CSSetUnorderedAccessViews\", [(UINT, \"StartSlot\"),", "do so, subject to the following conditions: # # The", "(UINT, \"InputSlot\"), (D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET, \"AlignedByteOffset\"), (D3D11_INPUT_CLASSIFICATION, \"InputSlotClass\"), (UINT, \"InstanceDataStepRate\"), ]) D3D11_FILL_MODE", "ID3D11Counter.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_COUNTER_DESC), \"pDesc\")]), ] D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS =", "\"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEXCUBE_SRV = Struct(\"D3D11_TEXCUBE_SRV\", [ (UINT, \"MostDetailedMip\"),", "StdFunction(SIZE_T, \"D3D11CoreGetLayeredDeviceSize\", [LPCVOID, DWORD], internal=True), StdFunction(HRESULT, \"D3D11CoreCreateLayeredDevice\", [LPCVOID, DWORD, LPCVOID,", "\"Texture2DMSArray\"), (D3D11_TEX3D_RTV, \"Texture3D\"), ]), None), ]) ID3D11RenderTargetView.methods += [ StdMethod(Void,", "Out(Pointer(ObjPointer(Void)), \"ppResource\")]), StdMethod(HRESULT, \"CheckFormatSupport\", [(DXGI_FORMAT, \"Format\"), Out(Pointer(D3D11_FORMAT_SUPPORT), \"pFormatSupport\")]), StdMethod(HRESULT, \"CheckMultisampleQualityLevels\",", "\"Height\"), (UINT, \"Depth\"), (UINT, \"MipLevels\"), (DXGI_FORMAT, \"Format\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG,", "\"D3D11_RESOURCE_MISC_GDI_COMPATIBLE\", ]) D3D11_MAP = Enum(\"D3D11_MAP\", [ \"D3D11_MAP_READ\", \"D3D11_MAP_WRITE\", \"D3D11_MAP_READ_WRITE\", \"D3D11_MAP_WRITE_DISCARD\",", "# furnished to do so, subject to the following conditions:", "Out(Pointer(ObjPointer(ID3D11CommandList)), \"ppCommandList\")]), ] D3D11_CREATE_DEVICE_FLAG = Flags(UINT, [ \"D3D11_CREATE_DEVICE_SINGLETHREADED\", \"D3D11_CREATE_DEVICE_DEBUG\", \"D3D11_CREATE_DEVICE_SWITCH_TO_REF\",", "[Out(Pointer(ObjPointer(ID3D11Resource)), \"ppResource\")]), ] D3D11_BUFFER_SRV = Struct(\"D3D11_BUFFER_SRV\", [ (Union(None, [(UINT, \"FirstElement\"),", "StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_DEPTH_STENCIL_DESC), \"pDesc\")]), ] D3D11_BLEND = Enum(\"D3D11_BLEND\", [ \"D3D11_BLEND_ZERO\",", "\"pDesc\")]), ] D3D11_BUFFER_RTV = Struct(\"D3D11_BUFFER_RTV\", [ (Union(None, [(UINT, \"FirstElement\"), (UINT,", "ID3D11DeviceContext.methods += [ StdMethod(Void, \"VSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)),", "StdMethod(HRESULT, \"CreateComputeShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11ComputeShader)),", "# # Copyright 2012 <NAME> # All Rights Reserved. #", "shall be included in # all copies or substantial portions", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "\"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"DSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"),", "<NAME> # All Rights Reserved. # # Permission is hereby", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "D3D11_CPU_ACCESS_FLAG = Flags(UINT, [ \"D3D11_CPU_ACCESS_WRITE\", \"D3D11_CPU_ACCESS_READ\", ]) D3D11_RESOURCE_MISC_FLAG = Flags(UINT,", "ID3D11Texture2D = Interface(\"ID3D11Texture2D\", ID3D11Resource) ID3D11Texture3D = Interface(\"ID3D11Texture3D\", ID3D11Resource) ID3D11View =", "\"PSGetShader\", [Out(Pointer(ObjPointer(ID3D11PixelShader)), \"ppPixelShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"PSGetSamplers\",", "(UINT8, \"RenderTargetWriteMask\"), ]) D3D11_BLEND_DESC = Struct(\"D3D11_BLEND_DESC\", [ (BOOL, \"AlphaToCoverageEnable\"), (BOOL,", "StdMethod(UINT, \"GetContextFlags\", []), StdMethod(HRESULT, \"FinishCommandList\", [(BOOL, \"RestoreDeferredContextState\"), Out(Pointer(ObjPointer(ID3D11CommandList)), \"ppCommandList\")]), ]", "[(ObjPointer(ID3D11InputLayout), \"pInputLayout\")]), StdMethod(Void, \"IASetVertexBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"),", "StdMethod(D3D11_DEVICE_CONTEXT_TYPE, \"GetType\", []), StdMethod(UINT, \"GetContextFlags\", []), StdMethod(HRESULT, \"FinishCommandList\", [(BOOL, \"RestoreDeferredContextState\"),", "(UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"CSSetUnorderedAccessViews\", [(UINT, \"StartSlot\"), (UINT,", "DWORD], internal=True), StdFunction(SIZE_T, \"D3D11CoreGetLayeredDeviceSize\", [LPCVOID, DWORD], internal=True), StdFunction(HRESULT, \"D3D11CoreCreateLayeredDevice\", [LPCVOID,", "Out(OpaquePointer(Void), \"pData\")]), StdMethod(HRESULT, \"SetPrivateData\", [(REFGUID, \"guid\"), (UINT, \"DataSize\"), (OpaqueBlob(Const(Void), \"DataSize\"),", "\"MipLevels\"), ]) D3D11_TEX2D_ARRAY_SRV = Struct(\"D3D11_TEX2D_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"),", "\"SetExceptionMode\", [(D3D11_RAISE_FLAG, \"RaiseFlags\")]), StdMethod(UINT, \"GetExceptionMode\", []), ] d3d11 = API(\"d3d11\")", "D3D11_CREATE_DEVICE_DEBUG is set StdFunction(HRESULT, \"D3D11CoreRegisterLayers\", [LPCVOID, DWORD], internal=True), StdFunction(SIZE_T, \"D3D11CoreGetLayeredDeviceSize\",", "d3d11sdklayers.dll when D3D11_CREATE_DEVICE_DEBUG is set StdFunction(HRESULT, \"D3D11CoreRegisterLayers\", [LPCVOID, DWORD], internal=True),", "\"BaseVertexLocation\")]), StdMethod(Void, \"Draw\", [(UINT, \"VertexCount\"), (UINT, \"StartVertexLocation\")]), StdMethod(HRESULT, \"Map\", [(ObjPointer(ID3D11Resource),", "Interface(\"ID3D11HullShader\", ID3D11DeviceChild) ID3D11DomainShader = Interface(\"ID3D11DomainShader\", ID3D11DeviceChild) ID3D11GeometryShader = Interface(\"ID3D11GeometryShader\", ID3D11DeviceChild)", "(D3D11_COUNTER, \"LastDeviceDependentCounter\"), (UINT, \"NumSimultaneousCounters\"), (UINT8, \"NumDetectableParallelUnits\"), ]) ID3D11Counter.methods += [", "\"MipLevels\"), ]) D3D11_TEXCUBE_ARRAY_SRV = Struct(\"D3D11_TEXCUBE_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"),", "(Array(ObjPointer(ID3D11RenderTargetView), \"NumRTVs\"), \"ppRenderTargetViews\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\"), (UINT, \"UAVStartSlot\"), (UINT, \"NumUAVs\"), (Array(ObjPointer(ID3D11UnorderedAccessView),", "\"ppUnorderedAccessViews\")]), StdMethod(Void, \"CSGetShader\", [Out(Pointer(ObjPointer(ID3D11ComputeShader)), \"ppComputeShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]),", "[(Pointer(Const(D3D11_QUERY_DESC)), \"pQueryDesc\"), Out(Pointer(ObjPointer(ID3D11Query)), \"ppQuery\")]), StdMethod(HRESULT, \"CreatePredicate\", [(Pointer(Const(D3D11_QUERY_DESC)), \"pPredicateDesc\"), Out(Pointer(ObjPointer(ID3D11Predicate)), \"ppPredicate\")]),", "StdMethod(Void, \"CSGetShader\", [Out(Pointer(ObjPointer(ID3D11ComputeShader)), \"ppComputeShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void,", "(UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppVertexBuffers\"), (Pointer(Const(UINT)), \"pStrides\"), (Pointer(Const(UINT)), \"pOffsets\")]), StdMethod(Void,", "]) D3D11_DEPTH_WRITE_MASK = Enum(\"D3D11_DEPTH_WRITE_MASK\", [ \"D3D11_DEPTH_WRITE_MASK_ZERO\", \"D3D11_DEPTH_WRITE_MASK_ALL\", ]) D3D11_STENCIL_OP =", "(UINT, \"right\"), (UINT, \"bottom\"), (UINT, \"back\"), ]) ID3D11DeviceChild.methods += [", "\"D3D11_SRV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE2D\", \"D3D11_SRV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE2DMS\", \"D3D11_SRV_DIMENSION_TEXTURE2DMSARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE3D\", \"D3D11_SRV_DIMENSION_TEXTURECUBE\", \"D3D11_SRV_DIMENSION_TEXTURECUBEARRAY\", \"D3D11_SRV_DIMENSION_BUFFEREX\", ])", "(Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"CSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView),", "[ (D3D11_FILTER, \"Filter\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressU\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressV\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressW\"), (FLOAT,", "(UINT, \"front\"), (UINT, \"right\"), (UINT, \"bottom\"), (UINT, \"back\"), ]) ID3D11DeviceChild.methods", "[ (UINT, \"UnusedField_NothingToDefine\"), ]) D3D11_TEX2DMS_ARRAY_SRV = Struct(\"D3D11_TEX2DMS_ARRAY_SRV\", [ (UINT, \"FirstArraySlice\"),", "[(UINT, \"StartSlot\"), (UINT, \"NumUAVs\"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), \"NumUAVs\"), \"ppUnorderedAccessViews\"), (Pointer(Const(UINT)), \"pUAVInitialCounts\")]), StdMethod(Void,", "\"IAGetIndexBuffer\", [Out(Pointer(ObjPointer(ID3D11Buffer)), \"pIndexBuffer\"), Out(Pointer(DXGI_FORMAT), \"Format\"), Out(Pointer(UINT), \"Offset\")]), StdMethod(Void, \"GSGetConstantBuffers\", [(UINT,", "StdMethod(HRESULT, \"CreateTexture1D\", [(Pointer(Const(D3D11_TEXTURE1D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture1D)), \"ppTexture1D\")]), StdMethod(HRESULT, \"CreateTexture2D\",", "[ (BOOL, \"DriverConcurrentCreates\"), (BOOL, \"DriverCommandLists\"), ]) D3D11_FEATURE_DATA_DOUBLES = Struct(\"D3D11_FEATURE_DATA_DOUBLES\", [", "\"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11PixelShader)), \"ppPixelShader\")]), StdMethod(HRESULT, \"CreateHullShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"),", "\"D3D11_FILTER_MIN_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR\", \"D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINT\", \"D3D11_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR\", \"D3D11_FILTER_MIN_MAG_LINEAR_MIP_POINT\", \"D3D11_FILTER_MIN_MAG_MIP_LINEAR\", \"D3D11_FILTER_ANISOTROPIC\", \"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_POINT\", \"D3D11_FILTER_COMPARISON_MIN_MAG_POINT_MIP_LINEAR\",", "] d3d11 = API(\"d3d11\") d3d11.addFunctions([ StdFunction(HRESULT, \"D3D11CreateDevice\", [(ObjPointer(IDXGIAdapter), \"pAdapter\"), (D3D_DRIVER_TYPE,", "]) ID3D11RasterizerState.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_RASTERIZER_DESC), \"pDesc\")]), ] D3D11_SUBRESOURCE_DATA", "(UINT, \"FeatureLevels\"), (UINT, \"SDKVersion\"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC)), \"pSwapChainDesc\"), Out(Pointer(ObjPointer(IDXGISwapChain)), \"ppSwapChain\"), Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\"),", "(BOOL, \"StencilEnable\"), (UINT8, \"StencilReadMask\"), (UINT8, \"StencilWriteMask\"), (D3D11_DEPTH_STENCILOP_DESC, \"FrontFace\"), (D3D11_DEPTH_STENCILOP_DESC, \"BackFace\"),", "DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD], internal=True), ])", "StdMethod(Void, \"IASetPrimitiveTopology\", [(D3D11_PRIMITIVE_TOPOLOGY, \"Topology\")]), StdMethod(Void, \"VSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"),", "StdMethod(HRESULT, \"CreateUnorderedAccessView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_UNORDERED_ACCESS_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11UnorderedAccessView)), \"ppUAView\")]), StdMethod(HRESULT, \"CreateRenderTargetView\",", "\"CreateQuery\", [(Pointer(Const(D3D11_QUERY_DESC)), \"pQueryDesc\"), Out(Pointer(ObjPointer(ID3D11Query)), \"ppQuery\")]), StdMethod(HRESULT, \"CreatePredicate\", [(Pointer(Const(D3D11_QUERY_DESC)), \"pPredicateDesc\"), Out(Pointer(ObjPointer(ID3D11Predicate)),", "[(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"VSSetShader\", [(ObjPointer(ID3D11VertexShader),", "D3D11_RAISE_FLAG = Flags(UINT, [ \"D3D11_RAISE_FLAG_DRIVER_INTERNAL_ERROR\", ]) D3D11_CLEAR_FLAG = Flags(UINT, [", "\"ThreadGroupCountY\"), (UINT, \"ThreadGroupCountZ\")]), StdMethod(Void, \"DispatchIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void,", "ID3D11View) ID3D11UnorderedAccessView = Interface(\"ID3D11UnorderedAccessView\", ID3D11View) ID3D11VertexShader = Interface(\"ID3D11VertexShader\", ID3D11DeviceChild) ID3D11HullShader", "(D3D11_TEX2DMS_SRV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_SRV, \"Texture2DMSArray\"), (D3D11_TEX3D_SRV, \"Texture3D\"), (D3D11_TEXCUBE_SRV, \"TextureCube\"), (D3D11_TEXCUBE_ARRAY_SRV, \"TextureCubeArray\"),", "Out(Pointer(ObjPointer(ID3D11HullShader)), \"ppHullShader\")]), StdMethod(HRESULT, \"CreateDomainShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage),", "StdMethod(Void, \"ClearRenderTargetView\", [(ObjPointer(ID3D11RenderTargetView), \"pRenderTargetView\"), (Array(Const(FLOAT), 4), \"ColorRGBA\")]), StdMethod(Void, \"ClearUnorderedAccessViewUint\", [(ObjPointer(ID3D11UnorderedAccessView),", "\"SrcRowPitch\"), (UINT, \"SrcDepthPitch\")]), StdMethod(Void, \"CopyStructureCount\", [(ObjPointer(ID3D11Buffer), \"pDstBuffer\"), (UINT, \"DstAlignedByteOffset\"), (ObjPointer(ID3D11UnorderedAccessView),", "\"ppDepthStencilView\")]), StdMethod(HRESULT, \"CreateInputLayout\", [(Array(Const(D3D11_INPUT_ELEMENT_DESC), \"NumElements\"), \"pInputElementDescs\"), (UINT, \"NumElements\"), (Blob(Const(Void), \"BytecodeLength\"),", "\"StencilFailOp\"), (D3D11_STENCIL_OP, \"StencilDepthFailOp\"), (D3D11_STENCIL_OP, \"StencilPassOp\"), (D3D11_COMPARISON_FUNC, \"StencilFunc\"), ]) D3D11_DEPTH_STENCIL_DESC =", "\"D3D11_PRIMITIVE_6_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_7_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_8_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_9_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_10_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_11_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_12_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_13_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_14_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_15_CONTROL_POINT_PATCH\",", "(Array(FLOAT, 4), \"BorderColor\"), (FLOAT, \"MinLOD\"), (FLOAT, \"MaxLOD\"), ]) ID3D11SamplerState.methods +=", "= Interface(\"ID3D11ShaderResourceView\", ID3D11View) ID3D11RenderTargetView = Interface(\"ID3D11RenderTargetView\", ID3D11View) ID3D11DepthStencilView = Interface(\"ID3D11DepthStencilView\",", "Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppDeferredContext\")]), StdMethod(HRESULT, \"OpenSharedResource\", [(HANDLE, \"hResource\"), (REFIID, \"ReturnedInterface\"), Out(Pointer(ObjPointer(Void)), \"ppResource\")]),", "(FLOAT, \"MaxDepth\"), ]) D3D11_RESOURCE_DIMENSION = Enum(\"D3D11_RESOURCE_DIMENSION\", [ \"D3D11_RESOURCE_DIMENSION_UNKNOWN\", \"D3D11_RESOURCE_DIMENSION_BUFFER\", \"D3D11_RESOURCE_DIMENSION_TEXTURE1D\",", "D3D11_RENDER_TARGET_VIEW_DESC = Struct(\"D3D11_RENDER_TARGET_VIEW_DESC\", [ (DXGI_FORMAT, \"Format\"), (D3D11_RTV_DIMENSION, \"ViewDimension\"), (Union(None, [", "\"PSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"PSSetShader\",", "\"GetData\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\"), Out(OpaqueBlob(Void, \"DataSize\"), \"pData\"), (UINT, \"DataSize\"), (D3D11_ASYNC_GETDATA_FLAG, \"GetDataFlags\")]),", "(UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"OMSetRenderTargets\", [(UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11RenderTargetView)),", "(UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppVertexBuffers\"), Out(Pointer(UINT), \"pStrides\"), Out(Pointer(UINT), \"pOffsets\")]), StdMethod(Void,", "\"SlopeScaledDepthBias\"), (BOOL, \"DepthClipEnable\"), (BOOL, \"ScissorEnable\"), (BOOL, \"MultisampleEnable\"), (BOOL, \"AntialiasedLineEnable\"), ])", "\"CreateDepthStencilState\", [(Pointer(Const(D3D11_DEPTH_STENCIL_DESC)), \"pDepthStencilDesc\"), Out(Pointer(ObjPointer(ID3D11DepthStencilState)), \"ppDepthStencilState\")]), StdMethod(HRESULT, \"CreateRasterizerState\", [(Pointer(Const(D3D11_RASTERIZER_DESC)), \"pRasterizerDesc\"), Out(Pointer(ObjPointer(ID3D11RasterizerState)),", "(Array(Const(FLOAT), 4), \"Values\")]), StdMethod(Void, \"ClearDepthStencilView\", [(ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\"), (D3D11_CLEAR_FLAG, \"ClearFlags\"), (FLOAT,", "(UINT, \"MipLevels\"), (DXGI_FORMAT, \"Format\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"),", "\"DrawInstanced\", [(UINT, \"VertexCountPerInstance\"), (UINT, \"InstanceCount\"), (UINT, \"StartVertexLocation\"), (UINT, \"StartInstanceLocation\")]), StdMethod(Void,", "Out(Pointer(ObjPointer(ID3D11Buffer)), \"ppBuffer\")]), StdMethod(HRESULT, \"CreateTexture1D\", [(Pointer(Const(D3D11_TEXTURE1D_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Texture1D)), \"ppTexture1D\")]),", "[Out(Pointer(D3D11_BLEND_DESC), \"pDesc\")]), ] D3D11_RASTERIZER_DESC = Struct(\"D3D11_RASTERIZER_DESC\", [ (D3D11_FILL_MODE, \"FillMode\"), (D3D11_CULL_MODE,", "(Array(Const(D3D_FEATURE_LEVEL), \"FeatureLevels\"), \"pFeatureLevels\"), (UINT, \"FeatureLevels\"), (UINT, \"SDKVersion\"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC)), \"pSwapChainDesc\"), Out(Pointer(ObjPointer(IDXGISwapChain)),", "= Struct(\"D3D11_TEX1D_ARRAY_RTV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ])", "\"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"GSGetSamplers\", [(UINT, \"StartSlot\"),", "ID3D11DepthStencilView.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_DEPTH_STENCIL_VIEW_DESC), \"pDesc\")]), ] D3D11_BUFFER_UAV_FLAG =", "StdMethod(HRESULT, \"CreateBuffer\", [(Pointer(Const(D3D11_BUFFER_DESC)), \"pDesc\"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), \"pInitialData\"), Out(Pointer(ObjPointer(ID3D11Buffer)), \"ppBuffer\")]), StdMethod(HRESULT, \"CreateTexture1D\",", "\"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"DSGetShader\", [Out(Pointer(ObjPointer(ID3D11DomainShader)), \"ppDomainShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"),", "[ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_DEPTH_STENCIL_VIEW_DESC), \"pDesc\")]), ] D3D11_BUFFER_UAV_FLAG = Flags(UINT, [", "\"D3D11_PRIMITIVE_TOPOLOGY_17_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_18_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_19_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_20_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_21_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_22_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_23_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_24_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_25_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_26_CONTROL_POINT_PATCHLIST\",", "\"D3D11_TEXTURECUBE_FACE_NEGATIVE_Z\", ]) ID3D11View.methods += [ StdMethod(Void, \"GetResource\", [Out(Pointer(ObjPointer(ID3D11Resource)), \"ppResource\")]), ]", "\"IAGetPrimitiveTopology\", [Out(Pointer(D3D11_PRIMITIVE_TOPOLOGY), \"pTopology\")]), StdMethod(Void, \"VSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView),", "[ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_UAV =", "# XXX: Undocumented functions, called by d3d11sdklayers.dll when D3D11_CREATE_DEVICE_DEBUG is", "\"D3D11_STENCIL_OP_REPLACE\", \"D3D11_STENCIL_OP_INCR_SAT\", \"D3D11_STENCIL_OP_DECR_SAT\", \"D3D11_STENCIL_OP_INVERT\", \"D3D11_STENCIL_OP_INCR\", \"D3D11_STENCIL_OP_DECR\", ]) D3D11_DEPTH_STENCILOP_DESC = Struct(\"D3D11_DEPTH_STENCILOP_DESC\",", "\"End\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\")]), StdMethod(HRESULT, \"GetData\", [(ObjPointer(ID3D11Asynchronous), \"pAsync\"), Out(OpaqueBlob(Void, \"DataSize\"), \"pData\"),", "Struct(\"D3D11_SAMPLER_DESC\", [ (D3D11_FILTER, \"Filter\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressU\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressV\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressW\"),", "\"D3D11_BLEND_INV_SRC_ALPHA\", \"D3D11_BLEND_DEST_ALPHA\", \"D3D11_BLEND_INV_DEST_ALPHA\", \"D3D11_BLEND_DEST_COLOR\", \"D3D11_BLEND_INV_DEST_COLOR\", \"D3D11_BLEND_SRC_ALPHA_SAT\", \"D3D11_BLEND_BLEND_FACTOR\", \"D3D11_BLEND_INV_BLEND_FACTOR\", \"D3D11_BLEND_SRC1_COLOR\", \"D3D11_BLEND_INV_SRC1_COLOR\",", "\"GetDevice\", [Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\")]), StdMethod(HRESULT, \"GetPrivateData\", [(REFGUID, \"guid\"), Out(Pointer(UINT), \"pDataSize\"), Out(OpaquePointer(Void),", "\"D3D11_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_2_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_3_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_4_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_5_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_6_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_7_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_8_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_9_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_10_CONTROL_POINT_PATCHLIST\",", "(UINT, \"MipLevels\"), ]) D3D11_TEXCUBE_ARRAY_SRV = Struct(\"D3D11_TEXCUBE_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT,", "= Struct(\"D3D11_FEATURE_DATA_FORMAT_SUPPORT\", [ (DXGI_FORMAT, \"InFormat\"), (D3D11_FORMAT_SUPPORT, \"OutFormatSupport\"), ]) D3D11_FEATURE_DATA_FORMAT_SUPPORT2 =", "\"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"CSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"),", "\"D3D11_RTV_DIMENSION_TEXTURE2D\", \"D3D11_RTV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE2DMS\", \"D3D11_RTV_DIMENSION_TEXTURE2DMSARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE3D\", ]) D3D11_UAV_DIMENSION = Enum(\"D3D11_UAV_DIMENSION\", [", "(UINT8, \"StencilWriteMask\"), (D3D11_DEPTH_STENCILOP_DESC, \"FrontFace\"), (D3D11_DEPTH_STENCILOP_DESC, \"BackFace\"), ]) ID3D11DepthStencilState.methods += [", "(D3D11_COMPARISON_FUNC, \"ComparisonFunc\"), (Array(FLOAT, 4), \"BorderColor\"), (FLOAT, \"MinLOD\"), (FLOAT, \"MaxLOD\"), ])", "[ \"D3D11_INPUT_PER_VERTEX_DATA\", \"D3D11_INPUT_PER_INSTANCE_DATA\", ]) D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET = FakeEnum(UINT, [ \"D3D11_APPEND_ALIGNED_ELEMENT\", ])", "\"NumClassInstances\")]), StdMethod(Void, \"HSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]),", "\"ppConstantBuffers\")]), StdMethod(Void, \"CSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]),", "\"D3D11_PRIMITIVE_TOPOLOGY_24_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_25_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_26_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_27_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_28_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_29_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_30_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_31_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_32_CONTROL_POINT_PATCHLIST\", ])", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "\"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"VSSetShader\", [(ObjPointer(ID3D11VertexShader), \"pVertexShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"),", "\"CreateGeometryShaderWithStreamOutput\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (Array(Const(D3D11_SO_DECLARATION_ENTRY), \"NumEntries\"), \"pSODeclaration\"), (UINT,", "d3dcommon import * from d3d11sdklayers import * HRESULT = MAKE_HRESULT([", "\"ppSamplers\")]), StdMethod(Void, \"DSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]),", "\"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_2_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_3_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_4_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_5_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_6_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_7_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_8_CONTROL_POINT_PATCHLIST\",", "\"MostDetailedMip\"), (UINT, \"MipLevels\"), (UINT, \"First2DArrayFace\"), (UINT, \"NumCubes\"), ]) D3D11_TEX2DMS_SRV =", "the Software is # furnished to do so, subject to", "\"pNumViewports\"), Out(Array(D3D11_VIEWPORT, \"*pNumViewports\"), \"pViewports\")]), StdMethod(Void, \"RSGetScissorRects\", [Out(Pointer(UINT), \"pNumRects\"), Out(Array(D3D11_RECT, \"*pNumRects\"),", "(UINT, \"Stream\"), (LPCSTR, \"SemanticName\"), (UINT, \"SemanticIndex\"), (BYTE, \"StartComponent\"), (BYTE, \"ComponentCount\"),", "\"pAdapter\"), (D3D_DRIVER_TYPE, \"DriverType\"), (HMODULE, \"Software\"), (D3D11_CREATE_DEVICE_FLAG, \"Flags\"), (Array(Const(D3D_FEATURE_LEVEL), \"FeatureLevels\"), \"pFeatureLevels\"),", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "\"ppInputLayout\")]), StdMethod(Void, \"IAGetVertexBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppVertexBuffers\"),", "]) D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS = Struct(\"D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS\", [ (BOOL, \"ComputeShaders_Plus_RawAndStructuredBuffers_Via_Shader_4_x\"), ]) D3D11_FEATURE, D3D11_FEATURE_DATA", "= Enum(\"D3D11_FORMAT_SUPPORT2\", [ \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_ADD\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_BITWISE_OPS\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_COMPARE_STORE_OR_COMPARE_EXCHANGE\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_EXCHANGE\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_SIGNED_MIN_OR_MAX\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_UNSIGNED_MIN_OR_MAX\", \"D3D11_FORMAT_SUPPORT2_UAV_TYPED_LOAD\",", "limitation the rights # to use, copy, modify, merge, publish,", "\"ppDeferredContext\")]), StdMethod(HRESULT, \"OpenSharedResource\", [(HANDLE, \"hResource\"), (REFIID, \"ReturnedInterface\"), Out(Pointer(ObjPointer(Void)), \"ppResource\")]), StdMethod(HRESULT,", "\"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11ComputeShader)), \"ppComputeShader\")]), StdMethod(HRESULT, \"CreateClassLinkage\", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)),", "\"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_DSV = Struct(\"D3D11_TEX2D_DSV\", [ (UINT, \"MipSlice\"),", "[(ObjPointer(ID3D11ComputeShader), \"pComputeShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"CSSetSamplers\", [(UINT,", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "(D3D11_CREATE_DEVICE_FLAG, \"Flags\"), (Array(Const(D3D_FEATURE_LEVEL), \"FeatureLevels\"), \"pFeatureLevels\"), (UINT, \"FeatureLevels\"), (UINT, \"SDKVersion\"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC)),", "= Enum(\"D3D11_BLEND_OP\", [ \"D3D11_BLEND_OP_ADD\", \"D3D11_BLEND_OP_SUBTRACT\", \"D3D11_BLEND_OP_REV_SUBTRACT\", \"D3D11_BLEND_OP_MIN\", \"D3D11_BLEND_OP_MAX\", ]) D3D11_COLOR_WRITE_ENABLE", "\"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"ClearState\", []), StdMethod(Void, \"Flush\", []), StdMethod(D3D11_DEVICE_CONTEXT_TYPE, \"GetType\",", "\"D3D11_BUFFEREX_SRV_FLAG_RAW\", ]) D3D11_BUFFEREX_SRV = Struct(\"D3D11_BUFFEREX_SRV\", [ (UINT, \"FirstElement\"), (UINT, \"NumElements\"),", "[(UINT, \"IndexCountPerInstance\"), (UINT, \"InstanceCount\"), (UINT, \"StartIndexLocation\"), (INT, \"BaseVertexLocation\"), (UINT, \"StartInstanceLocation\")]),", "[ (D3D11_BUFFER_UAV, \"Buffer\"), (D3D11_TEX1D_UAV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_UAV, \"Texture1DArray\"), (D3D11_TEX2D_UAV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_UAV,", "\"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_SIGNED_MIN_OR_MAX\", \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_UNSIGNED_MIN_OR_MAX\", \"D3D11_FORMAT_SUPPORT2_UAV_TYPED_LOAD\", \"D3D11_FORMAT_SUPPORT2_UAV_TYPED_STORE\", ]) ID3D11Asynchronous.methods += [ StdMethod(UINT, \"GetDataSize\",", "\"D3D11_COUNTER_TYPE_UINT32\", \"D3D11_COUNTER_TYPE_UINT64\", ]) D3D11_COUNTER_DESC = Struct(\"D3D11_COUNTER_DESC\", [ (D3D11_COUNTER, \"Counter\"), (UINT,", "StdMethod(Void, \"RSGetScissorRects\", [Out(Pointer(UINT), \"pNumRects\"), Out(Array(D3D11_RECT, \"*pNumRects\"), \"pRects\")]), StdMethod(Void, \"HSGetShaderResources\", [(UINT,", "= Struct(\"D3D11_TEX3D_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ]) D3D11_TEXCUBE_SRV =", "##########################################################################/ from dxgi import * from d3dcommon import * from", "(Pointer(Const(D3D11_RENDER_TARGET_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11RenderTargetView)), \"ppRTView\")]), StdMethod(HRESULT, \"CreateDepthStencilView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_DEPTH_STENCIL_VIEW_DESC)), \"pDesc\"),", "D3D11_DSV_FLAG = Flags(UINT, [ \"D3D11_DSV_READ_ONLY_DEPTH\", \"D3D11_DSV_READ_ONLY_STENCIL\", ]) D3D11_DEPTH_STENCIL_VIEW_DESC = Struct(\"D3D11_DEPTH_STENCIL_VIEW_DESC\",", "StdMethod(Void, \"HSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void,", "\"ppConstantBuffers\")]), StdMethod(Void, \"VSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]),", "[(ObjPointer(ID3D11Resource), \"pDstResource\"), (ObjPointer(ID3D11Resource), \"pSrcResource\")]), StdMethod(Void, \"UpdateSubresource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"),", "# ##########################################################################/ from dxgi import * from d3dcommon import *", "]) D3D11_TEX1D_SRV = Struct(\"D3D11_TEX1D_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), ])", "(D3D11_FILTER, \"Filter\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressU\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressV\"), (D3D11_TEXTURE_ADDRESS_MODE, \"AddressW\"), (FLOAT, \"MipLODBias\"),", "= Flags(UINT, [ \"D3D11_CLEAR_DEPTH\", \"D3D11_CLEAR_STENCIL\", ]) D3D11_RECT = Alias(\"D3D11_RECT\", RECT)", "documentation files (the \"Software\"), to deal # in the Software", "\"D3D11_USAGE_DYNAMIC\", \"D3D11_USAGE_STAGING\", ]) D3D11_BIND_FLAG = Flags(UINT, [ \"D3D11_BIND_VERTEX_BUFFER\", \"D3D11_BIND_INDEX_BUFFER\", \"D3D11_BIND_CONSTANT_BUFFER\",", "\"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\")]), StdMethod(HRESULT, \"CreateGeometryShaderWithStreamOutput\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"),", "\"ArraySize\"), (DXGI_FORMAT, \"Format\"), (DXGI_SAMPLE_DESC, \"SampleDesc\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG,", "(Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"DSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer),", "copies or substantial portions of the Software. # # THE", "(UINT, \"top\"), (UINT, \"front\"), (UINT, \"right\"), (UINT, \"bottom\"), (UINT, \"back\"),", "\"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"), (D3D11_RESOURCE_MISC_FLAG, \"MiscFlags\"), ]) ID3D11Texture2D.methods +=", "Out(Pointer(ObjPointer(ID3D11ClassInstance)), \"ppInstance\")]), ] ID3D11CommandList.methods += [ StdMethod(UINT, \"GetContextFlags\", []), ]", "\"ClearFlags\"), (FLOAT, \"Depth\"), (UINT8, \"Stencil\")]), StdMethod(Void, \"GenerateMips\", [(ObjPointer(ID3D11ShaderResourceView), \"pShaderResourceView\")]), StdMethod(Void,", "\"Texture2DMSArray\"), (D3D11_TEX3D_SRV, \"Texture3D\"), (D3D11_TEXCUBE_SRV, \"TextureCube\"), (D3D11_TEXCUBE_ARRAY_SRV, \"TextureCubeArray\"), (D3D11_BUFFEREX_SRV, \"BufferEx\"), ]),", "D3D11_TEX1D_ARRAY_SRV = Struct(\"D3D11_TEX1D_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), (UINT, \"FirstArraySlice\"),", "\"ppSRView\")]), StdMethod(HRESULT, \"CreateUnorderedAccessView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_UNORDERED_ACCESS_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11UnorderedAccessView)), \"ppUAView\")]), StdMethod(HRESULT,", "\"pBlendState\"), (Array(Const(FLOAT), 4), \"BlendFactor\"), (UINT, \"SampleMask\")]), StdMethod(Void, \"OMSetDepthStencilState\", [(ObjPointer(ID3D11DepthStencilState), \"pDepthStencilState\"),", "\"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (Array(Const(D3D11_SO_DECLARATION_ENTRY), \"NumEntries\"), \"pSODeclaration\"), (UINT, \"NumEntries\"), (Array(Const(UINT), \"NumStrides\"),", "\"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"DrawInstancedIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void,", "\"ArraySize\"), ]) D3D11_TEX2D_RTV = Struct(\"D3D11_TEX2D_RTV\", [ (UINT, \"MipSlice\"), ]) D3D11_TEX2DMS_RTV", "\"ppConstantBuffers\")]), StdMethod(Void, \"IAGetInputLayout\", [Out(Pointer(ObjPointer(ID3D11InputLayout)), \"ppInputLayout\")]), StdMethod(Void, \"IAGetVertexBuffers\", [(UINT, \"StartSlot\"), (UINT,", "D3D11_RESOURCE_MISC_FLAG = Flags(UINT, [ \"D3D11_RESOURCE_MISC_GENERATE_MIPS\", \"D3D11_RESOURCE_MISC_SHARED\", \"D3D11_RESOURCE_MISC_TEXTURECUBE\", \"D3D11_RESOURCE_MISC_DRAWINDIRECT_ARGS\", \"D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS\", \"D3D11_RESOURCE_MISC_BUFFER_STRUCTURED\",", "(BOOL, \"AlphaToCoverageEnable\"), (BOOL, \"IndependentBlendEnable\"), (Array(D3D11_RENDER_TARGET_BLEND_DESC, 8), \"RenderTarget\"), ]) ID3D11BlendState.methods +=", "D3D11_DEPTH_STENCIL_DESC = Struct(\"D3D11_DEPTH_STENCIL_DESC\", [ (BOOL, \"DepthEnable\"), (D3D11_DEPTH_WRITE_MASK, \"DepthWriteMask\"), (D3D11_COMPARISON_FUNC, \"DepthFunc\"),", "(FLOAT, \"Depth\"), (UINT8, \"Stencil\")]), StdMethod(Void, \"GenerateMips\", [(ObjPointer(ID3D11ShaderResourceView), \"pShaderResourceView\")]), StdMethod(Void, \"SetResourceMinLOD\",", "\"StencilPassOp\"), (D3D11_COMPARISON_FUNC, \"StencilFunc\"), ]) D3D11_DEPTH_STENCIL_DESC = Struct(\"D3D11_DEPTH_STENCIL_DESC\", [ (BOOL, \"DepthEnable\"),", "(UINT, \"MipLevels\"), ]) D3D11_TEXCUBE_SRV = Struct(\"D3D11_TEXCUBE_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT,", "(UINT, \"StartInstanceLocation\")]), StdMethod(Void, \"GSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"),", "\"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"CSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"),", "\"D3D11_SRV_DIMENSION_TEXTURE2DMS\", \"D3D11_SRV_DIMENSION_TEXTURE2DMSARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE3D\", \"D3D11_SRV_DIMENSION_TEXTURECUBE\", \"D3D11_SRV_DIMENSION_TEXTURECUBEARRAY\", \"D3D11_SRV_DIMENSION_BUFFEREX\", ]) D3D11_DSV_DIMENSION = Enum(\"D3D11_DSV_DIMENSION\",", "Enum(\"D3D11_SRV_DIMENSION\", [ \"D3D11_SRV_DIMENSION_UNKNOWN\", \"D3D11_SRV_DIMENSION_BUFFER\", \"D3D11_SRV_DIMENSION_TEXTURE1D\", \"D3D11_SRV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE2D\", \"D3D11_SRV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE2DMS\", \"D3D11_SRV_DIMENSION_TEXTURE2DMSARRAY\",", "(ObjPointer(ID3D11Resource), \"pSrcResource\")]), StdMethod(Void, \"UpdateSubresource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"), (UINT, \"DstSubresource\"), (Pointer(Const(D3D11_BOX)), \"pDstBox\"),", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "\"CreateGeometryShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), \"ppGeometryShader\")]),", "(UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_DSV = Struct(\"D3D11_TEX2D_DSV\",", "[(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"DSSetShader\", [(ObjPointer(ID3D11DomainShader),", "\"D3D11_PRIMITIVE_TRIANGLE\", \"D3D11_PRIMITIVE_LINE_ADJ\", \"D3D11_PRIMITIVE_TRIANGLE_ADJ\", \"D3D11_PRIMITIVE_1_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_2_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_3_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_4_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_5_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_6_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_7_CONTROL_POINT_PATCH\",", "\"CreateClassLinkage\", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), \"ppLinkage\")]), StdMethod(HRESULT, \"CreateBlendState\", [(Pointer(Const(D3D11_BLEND_DESC)), \"pBlendStateDesc\"), Out(Pointer(ObjPointer(ID3D11BlendState)), \"ppBlendState\")]), StdMethod(HRESULT,", "\"DstAlignedByteOffset\"), (ObjPointer(ID3D11UnorderedAccessView), \"pSrcView\")]), StdMethod(Void, \"ClearRenderTargetView\", [(ObjPointer(ID3D11RenderTargetView), \"pRenderTargetView\"), (Array(Const(FLOAT), 4), \"ColorRGBA\")]),", "(Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"CSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)),", "(UINT, \"ArraySize\"), (DXGI_FORMAT, \"Format\"), (D3D11_USAGE, \"Usage\"), (D3D11_BIND_FLAG, \"BindFlags\"), (D3D11_CPU_ACCESS_FLAG, \"CPUAccessFlags\"),", "\"GetDesc\", [Out(Pointer(D3D11_DEPTH_STENCIL_VIEW_DESC), \"pDesc\")]), ] D3D11_BUFFER_UAV_FLAG = Flags(UINT, [ \"D3D11_BUFFER_UAV_FLAG_RAW\", \"D3D11_BUFFER_UAV_FLAG_APPEND\",", "\"D3D11_QUERY_OCCLUSION\", \"D3D11_QUERY_TIMESTAMP\", \"D3D11_QUERY_TIMESTAMP_DISJOINT\", \"D3D11_QUERY_PIPELINE_STATISTICS\", \"D3D11_QUERY_OCCLUSION_PREDICATE\", \"D3D11_QUERY_SO_STATISTICS\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE\", \"D3D11_QUERY_SO_STATISTICS_STREAM0\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM0\", \"D3D11_QUERY_SO_STATISTICS_STREAM1\",", "\"RenderTargetWriteMask\"), ]) D3D11_BLEND_DESC = Struct(\"D3D11_BLEND_DESC\", [ (BOOL, \"AlphaToCoverageEnable\"), (BOOL, \"IndependentBlendEnable\"),", "\"CreateHullShader\", [(Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecode\"), (SIZE_T, \"BytecodeLength\"), (ObjPointer(ID3D11ClassLinkage), \"pClassLinkage\"), Out(Pointer(ObjPointer(ID3D11HullShader)), \"ppHullShader\")]),", "IUnknown) D3D11_INPUT_CLASSIFICATION = Enum(\"D3D11_INPUT_CLASSIFICATION\", [ \"D3D11_INPUT_PER_VERTEX_DATA\", \"D3D11_INPUT_PER_INSTANCE_DATA\", ]) D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET =", "\"SOGetTargets\", [(UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppSOTargets\")]), StdMethod(Void, \"RSGetState\", [Out(Pointer(ObjPointer(ID3D11RasterizerState)), \"ppRasterizerState\")]),", "= Interface(\"ID3D11ComputeShader\", ID3D11DeviceChild) ID3D11InputLayout = Interface(\"ID3D11InputLayout\", ID3D11DeviceChild) ID3D11SamplerState = Interface(\"ID3D11SamplerState\",", "(Pointer(Const(D3D11_SHADER_RESOURCE_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11ShaderResourceView)), \"ppSRView\")]), StdMethod(HRESULT, \"CreateUnorderedAccessView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_UNORDERED_ACCESS_VIEW_DESC)), \"pDesc\"),", "\"ScissorEnable\"), (BOOL, \"MultisampleEnable\"), (BOOL, \"AntialiasedLineEnable\"), ]) ID3D11RasterizerState.methods += [ StdMethod(Void,", "]) D3D11_RAISE_FLAG = Flags(UINT, [ \"D3D11_RAISE_FLAG_DRIVER_INTERNAL_ERROR\", ]) D3D11_CLEAR_FLAG = Flags(UINT,", "(D3D11_FORMAT_SUPPORT, \"OutFormatSupport\"), ]) D3D11_FEATURE_DATA_FORMAT_SUPPORT2 = Struct(\"D3D11_FEATURE_DATA_FORMAT_SUPPORT2\", [ (DXGI_FORMAT, \"InFormat\"), (D3D11_FORMAT_SUPPORT2,", "\"D3D11_COLOR_WRITE_ENABLE_RED\", \"D3D11_COLOR_WRITE_ENABLE_GREEN\", \"D3D11_COLOR_WRITE_ENABLE_BLUE\", \"D3D11_COLOR_WRITE_ENABLE_ALPHA\", ]) D3D11_RENDER_TARGET_BLEND_DESC = Struct(\"D3D11_RENDER_TARGET_BLEND_DESC\", [ (BOOL,", "= Enum(\"D3D11_CULL_MODE\", [ \"D3D11_CULL_NONE\", \"D3D11_CULL_FRONT\", \"D3D11_CULL_BACK\", ]) D3D11_SO_DECLARATION_ENTRY = Struct(\"D3D11_SO_DECLARATION_ENTRY\",", "StdMethod(Void, \"GSSetShader\", [(ObjPointer(ID3D11GeometryShader), \"pShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void,", "(D3D11_RTV_DIMENSION, \"ViewDimension\"), (Union(None, [ (D3D11_BUFFER_RTV, \"Buffer\"), (D3D11_TEX1D_RTV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_RTV, \"Texture1DArray\"),", "(D3D11_BLEND_OP, \"BlendOpAlpha\"), (UINT8, \"RenderTargetWriteMask\"), ]) D3D11_BLEND_DESC = Struct(\"D3D11_BLEND_DESC\", [ (BOOL,", "+= [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_RASTERIZER_DESC), \"pDesc\")]), ] D3D11_SUBRESOURCE_DATA = Struct(\"D3D11_SUBRESOURCE_DATA\",", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "]) D3D11_CULL_MODE = Enum(\"D3D11_CULL_MODE\", [ \"D3D11_CULL_NONE\", \"D3D11_CULL_FRONT\", \"D3D11_CULL_BACK\", ]) D3D11_SO_DECLARATION_ENTRY", "(D3D11_DSV_FLAG, \"Flags\"), (Union(None, [ (D3D11_TEX1D_DSV, \"Texture1D\"), (D3D11_TEX1D_ARRAY_DSV, \"Texture1DArray\"), (D3D11_TEX2D_DSV, \"Texture2D\"),", "(UINT, \"SrcRowPitch\"), (UINT, \"SrcDepthPitch\")]), StdMethod(Void, \"CopyStructureCount\", [(ObjPointer(ID3D11Buffer), \"pDstBuffer\"), (UINT, \"DstAlignedByteOffset\"),", "(UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_RTV = Struct(\"D3D11_TEX2D_RTV\", [ (UINT,", "Software, and to permit persons to whom the Software is", "(UINT, \"ThreadGroupCountZ\")]), StdMethod(Void, \"DispatchIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"RSSetState\",", "\"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"DSSetShader\", [(ObjPointer(ID3D11DomainShader), \"pDomainShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT,", "(BOOL, \"ScissorEnable\"), (BOOL, \"MultisampleEnable\"), (BOOL, \"AntialiasedLineEnable\"), ]) ID3D11RasterizerState.methods += [", "Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\")]), StdMethod(Void, \"OMGetRenderTargetsAndUnorderedAccessViews\", [(UINT, \"NumRTVs\"), (Array(ObjPointer(ID3D11RenderTargetView), \"NumRTVs\"), \"ppRenderTargetViews\"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)),", "(UINT, \"InstanceIndex\"), (UINT, \"TypeId\"), (UINT, \"ConstantBuffer\"), (UINT, \"BaseConstantBufferOffset\"), (UINT, \"BaseTexture\"),", "(Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"CSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)),", "\"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ\", \"D3D11_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_2_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_3_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_4_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_5_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_6_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_7_CONTROL_POINT_PATCHLIST\",", "Out(Pointer(ObjPointer(ID3D11Counter)), \"ppCounter\")]), StdMethod(HRESULT, \"CreateDeferredContext\", [(UINT, \"ContextFlags\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppDeferredContext\")]), StdMethod(HRESULT, \"OpenSharedResource\",", "\"D3D11CoreRegisterLayers\", [LPCVOID, DWORD], internal=True), StdFunction(SIZE_T, \"D3D11CoreGetLayeredDeviceSize\", [LPCVOID, DWORD], internal=True), StdFunction(HRESULT,", "[ (D3D11_FILL_MODE, \"FillMode\"), (D3D11_CULL_MODE, \"CullMode\"), (BOOL, \"FrontCounterClockwise\"), (INT, \"DepthBias\"), (FLOAT,", "]) ID3D11SamplerState.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_SAMPLER_DESC), \"pDesc\")]), ] D3D11_FORMAT_SUPPORT", "(Array(Const(FLOAT), 4), \"ColorRGBA\")]), StdMethod(Void, \"ClearUnorderedAccessViewUint\", [(ObjPointer(ID3D11UnorderedAccessView), \"pUnorderedAccessView\"), (Array(Const(UINT), 4), \"Values\")]),", "(UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_DSV = Struct(\"D3D11_TEX2D_DSV\", [ (UINT,", "Struct(\"D3D11_QUERY_DATA_SO_STATISTICS\", [ (UINT64, \"NumPrimitivesWritten\"), (UINT64, \"PrimitivesStorageNeeded\"), ]) D3D11_COUNTER = Enum(\"D3D11_COUNTER\",", "\"D3D11_FORMAT_SUPPORT_MIP_AUTOGEN\", \"D3D11_FORMAT_SUPPORT_RENDER_TARGET\", \"D3D11_FORMAT_SUPPORT_BLENDABLE\", \"D3D11_FORMAT_SUPPORT_DEPTH_STENCIL\", \"D3D11_FORMAT_SUPPORT_CPU_LOCKABLE\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RESOLVE\", \"D3D11_FORMAT_SUPPORT_DISPLAY\", \"D3D11_FORMAT_SUPPORT_CAST_WITHIN_BIT_LAYOUT\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RENDERTARGET\", \"D3D11_FORMAT_SUPPORT_MULTISAMPLE_LOAD\",", "\"VSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"VSGetSamplers\",", "DWORD, DWORD, DWORD, DWORD, DWORD], internal=True), ]) d3d11.addInterfaces([ IDXGIAdapter1, IDXGIDevice1,", "(UINT, \"Height\"), (UINT, \"Depth\"), (UINT, \"MipLevels\"), (DXGI_FORMAT, \"Format\"), (D3D11_USAGE, \"Usage\"),", "]) D3D11_DSV_FLAG = Flags(UINT, [ \"D3D11_DSV_READ_ONLY_DEPTH\", \"D3D11_DSV_READ_ONLY_STENCIL\", ]) D3D11_DEPTH_STENCIL_VIEW_DESC =", "\"BytecodeLength\"), \"pShaderBytecodeWithInputSignature\"), (SIZE_T, \"BytecodeLength\"), Out(Pointer(ObjPointer(ID3D11InputLayout)), \"ppInputLayout\")]), StdMethod(HRESULT, \"CreateVertexShader\", [(Blob(Const(Void), \"BytecodeLength\"),", "\"NumViews\"), (Array(ObjPointer(ID3D11ShaderResourceView), \"NumViews\"), \"ppShaderResourceViews\")]), StdMethod(Void, \"HSGetShader\", [Out(Pointer(ObjPointer(ID3D11HullShader)), \"ppHullShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"),", "(D3D11_TEX1D_ARRAY_RTV, \"Texture1DArray\"), (D3D11_TEX2D_RTV, \"Texture2D\"), (D3D11_TEX2D_ARRAY_RTV, \"Texture2DArray\"), (D3D11_TEX2DMS_RTV, \"Texture2DMS\"), (D3D11_TEX2DMS_ARRAY_RTV, \"Texture2DMSArray\"),", "\"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"DSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)),", "Out(Pointer(ObjPointer(ID3D11DepthStencilView)), \"ppDepthStencilView\")]), StdMethod(HRESULT, \"CreateInputLayout\", [(Array(Const(D3D11_INPUT_ELEMENT_DESC), \"NumElements\"), \"pInputElementDescs\"), (UINT, \"NumElements\"), (Blob(Const(Void),", "\"ArraySize\"), ]) D3D11_TEX2DMS_ARRAY_RTV = Struct(\"D3D11_TEX2DMS_ARRAY_RTV\", [ (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"),", "\"back\"), ]) ID3D11DeviceChild.methods += [ StdMethod(Void, \"GetDevice\", [Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\")]), StdMethod(HRESULT,", "\"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppSOTargets\")]), StdMethod(Void, \"RSGetState\", [Out(Pointer(ObjPointer(ID3D11RasterizerState)), \"ppRasterizerState\")]), StdMethod(Void, \"RSGetViewports\",", "\"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"HSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"),", "\"D3D11_RESOURCE_DIMENSION_TEXTURE1D\", \"D3D11_RESOURCE_DIMENSION_TEXTURE2D\", \"D3D11_RESOURCE_DIMENSION_TEXTURE3D\", ]) D3D11_SRV_DIMENSION = Enum(\"D3D11_SRV_DIMENSION\", [ \"D3D11_SRV_DIMENSION_UNKNOWN\", \"D3D11_SRV_DIMENSION_BUFFER\",", "\"pSwapChainDesc\"), Out(Pointer(ObjPointer(IDXGISwapChain)), \"ppSwapChain\"), Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\"), Out(Pointer(D3D_FEATURE_LEVEL), \"pFeatureLevel\"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), \"ppImmediateContext\")]), #", "= Enum(\"D3D11_SRV_DIMENSION\", [ \"D3D11_SRV_DIMENSION_UNKNOWN\", \"D3D11_SRV_DIMENSION_BUFFER\", \"D3D11_SRV_DIMENSION_TEXTURE1D\", \"D3D11_SRV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE2D\", \"D3D11_SRV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_SRV_DIMENSION_TEXTURE2DMS\",", "\"D3D11_FORMAT_SUPPORT_SHADER_GATHER\", \"D3D11_FORMAT_SUPPORT_BACK_BUFFER_CAST\", \"D3D11_FORMAT_SUPPORT_TYPED_UNORDERED_ACCESS_VIEW\", \"D3D11_FORMAT_SUPPORT_SHADER_GATHER_COMPARISON\", ]) D3D11_FORMAT_SUPPORT2 = Enum(\"D3D11_FORMAT_SUPPORT2\", [ \"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_ADD\",", "\"FeatureLevels\"), (UINT, \"SDKVersion\"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC)), \"pSwapChainDesc\"), Out(Pointer(ObjPointer(IDXGISwapChain)), \"ppSwapChain\"), Out(Pointer(ObjPointer(ID3D11Device)), \"ppDevice\"), Out(Pointer(D3D_FEATURE_LEVEL),", "= Struct(\"D3D11_MAPPED_SUBRESOURCE\", [ (OpaquePointer(Void), \"pData\"), (UINT, \"RowPitch\"), (UINT, \"DepthPitch\"), ])", "(UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2D_UAV = Struct(\"D3D11_TEX2D_UAV\", [ (UINT,", "\"NumRTVs\"), \"ppRenderTargetViews\"), (ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\"), (UINT, \"UAVStartSlot\"), (UINT, \"NumUAVs\"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), \"NumUAVs\"),", "\"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppVertexBuffers\"), Out(Pointer(UINT), \"pStrides\"), Out(Pointer(UINT), \"pOffsets\")]),", "(FLOAT, \"TopLeftY\"), (FLOAT, \"Width\"), (FLOAT, \"Height\"), (FLOAT, \"MinDepth\"), (FLOAT, \"MaxDepth\"),", "[(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"IAGetInputLayout\", [Out(Pointer(ObjPointer(ID3D11InputLayout)),", "\"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"CSGetConstantBuffers\", [(UINT, \"StartSlot\"),", "\"D3D11_PRIMITIVE_17_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_18_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_19_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_20_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_21_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_22_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_23_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_24_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_25_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_26_CONTROL_POINT_PATCH\",", "(UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"PSGetShaderResources\", [(UINT, \"StartSlot\"), (UINT,", "[ \"D3D11_ASYNC_GETDATA_DONOTFLUSH\", ]) D3D11_QUERY = Enum(\"D3D11_QUERY\", [ \"D3D11_QUERY_EVENT\", \"D3D11_QUERY_OCCLUSION\", \"D3D11_QUERY_TIMESTAMP\",", "= Enum(\"D3D11_INPUT_CLASSIFICATION\", [ \"D3D11_INPUT_PER_VERTEX_DATA\", \"D3D11_INPUT_PER_INSTANCE_DATA\", ]) D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET = FakeEnum(UINT, [", "Out(Pointer(ObjPointer(ID3D11Texture3D)), \"ppTexture3D\")]), StdMethod(HRESULT, \"CreateShaderResourceView\", [(ObjPointer(ID3D11Resource), \"pResource\"), (Pointer(Const(D3D11_SHADER_RESOURCE_VIEW_DESC)), \"pDesc\"), Out(Pointer(ObjPointer(ID3D11ShaderResourceView)), \"ppSRView\")]),", "ID3D11SamplerState = Interface(\"ID3D11SamplerState\", ID3D11DeviceChild) ID3D11Asynchronous = Interface(\"ID3D11Asynchronous\", ID3D11DeviceChild) ID3D11Query =", "+= [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_UNORDERED_ACCESS_VIEW_DESC), \"pDesc\")]), ] D3D11_FILTER = Enum(\"D3D11_FILTER\",", "[ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_QUERY_DESC), \"pDesc\")]), ] D3D11_QUERY_DATA_TIMESTAMP_DISJOINT = Struct(\"D3D11_QUERY_DATA_TIMESTAMP_DISJOINT\", [", "\"MipLevels\"), ]) D3D11_TEXCUBE_SRV = Struct(\"D3D11_TEXCUBE_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"),", "]) ID3D11UnorderedAccessView.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_UNORDERED_ACCESS_VIEW_DESC), \"pDesc\")]), ] D3D11_FILTER", "Enum(\"D3D11_RTV_DIMENSION\", [ \"D3D11_RTV_DIMENSION_UNKNOWN\", \"D3D11_RTV_DIMENSION_BUFFER\", \"D3D11_RTV_DIMENSION_TEXTURE1D\", \"D3D11_RTV_DIMENSION_TEXTURE1DARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE2D\", \"D3D11_RTV_DIMENSION_TEXTURE2DARRAY\", \"D3D11_RTV_DIMENSION_TEXTURE2DMS\", \"D3D11_RTV_DIMENSION_TEXTURE2DMSARRAY\",", "\"pData\"), (UINT, \"DataSize\"), (D3D11_ASYNC_GETDATA_FLAG, \"GetDataFlags\")]), StdMethod(Void, \"SetPredication\", [(ObjPointer(ID3D11Predicate), \"pPredicate\"), (BOOL,", "StdMethod(Void, \"ClearDepthStencilView\", [(ObjPointer(ID3D11DepthStencilView), \"pDepthStencilView\"), (D3D11_CLEAR_FLAG, \"ClearFlags\"), (FLOAT, \"Depth\"), (UINT8, \"Stencil\")]),", "\"Draw\", [(UINT, \"VertexCount\"), (UINT, \"StartVertexLocation\")]), StdMethod(HRESULT, \"Map\", [(ObjPointer(ID3D11Resource), \"pResource\"), (UINT,", "(UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"DrawInstancedIndirect\", [(ObjPointer(ID3D11Buffer), \"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"Dispatch\",", "Interface(\"ID3D11ClassLinkage\", ID3D11DeviceChild) ID3D11CommandList = Interface(\"ID3D11CommandList\", ID3D11DeviceChild) ID3D11Device = Interface(\"ID3D11Device\", IUnknown)", "ID3D11Texture1D = Interface(\"ID3D11Texture1D\", ID3D11Resource) ID3D11Texture2D = Interface(\"ID3D11Texture2D\", ID3D11Resource) ID3D11Texture3D =", "Struct(\"D3D11_TEX2DMS_ARRAY_SRV\", [ (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_SHADER_RESOURCE_VIEW_DESC = Struct(\"D3D11_SHADER_RESOURCE_VIEW_DESC\",", "= Enum(\"D3D11_PRIMITIVE_TOPOLOGY\", [ \"D3D11_PRIMITIVE_TOPOLOGY_UNDEFINED\", \"D3D11_PRIMITIVE_TOPOLOGY_POINTLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_LINELIST\", \"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST\", \"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP\", \"D3D11_PRIMITIVE_TOPOLOGY_LINELIST_ADJ\",", "[(ObjPointer(ID3D11GeometryShader), \"pShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]), StdMethod(Void, \"IASetPrimitiveTopology\", [(D3D11_PRIMITIVE_TOPOLOGY,", "[ \"D3D11_RESOURCE_MISC_GENERATE_MIPS\", \"D3D11_RESOURCE_MISC_SHARED\", \"D3D11_RESOURCE_MISC_TEXTURECUBE\", \"D3D11_RESOURCE_MISC_DRAWINDIRECT_ARGS\", \"D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS\", \"D3D11_RESOURCE_MISC_BUFFER_STRUCTURED\", \"D3D11_RESOURCE_MISC_RESOURCE_CLAMP\", \"D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX\", \"D3D11_RESOURCE_MISC_GDI_COMPATIBLE\",", "\"D3D11_CREATE_DEVICE_SWITCH_TO_REF\", \"D3D11_CREATE_DEVICE_PREVENT_INTERNAL_THREADING_OPTIMIZATIONS\", \"D3D11_CREATE_DEVICE_BGRA_SUPPORT\", ]) ID3D11Device.methods += [ StdMethod(HRESULT, \"CreateBuffer\", [(Pointer(Const(D3D11_BUFFER_DESC)),", "(Array(ObjPointer(ID3D11Buffer), \"NumBuffers\"), \"ppConstantBuffers\")]), StdMethod(Void, \"IAGetInputLayout\", [Out(Pointer(ObjPointer(ID3D11InputLayout)), \"ppInputLayout\")]), StdMethod(Void, \"IAGetVertexBuffers\", [(UINT,", "obtaining a copy # of this software and associated documentation", "\"guid\"), (UINT, \"DataSize\"), (OpaqueBlob(Const(Void), \"DataSize\"), \"pData\")]), StdMethod(HRESULT, \"SetPrivateDataInterface\", [(REFGUID, \"guid\"),", "\"pDesc\")]), ] D3D11_TEXTURE1D_DESC = Struct(\"D3D11_TEXTURE1D_DESC\", [ (UINT, \"Width\"), (UINT, \"MipLevels\"),", "(FLOAT, \"MinLOD\")]), StdMethod(FLOAT, \"GetResourceMinLOD\", [(ObjPointer(ID3D11Resource), \"pResource\")]), StdMethod(Void, \"ResolveSubresource\", [(ObjPointer(ID3D11Resource), \"pDstResource\"),", "\"D3D11_PRIMITIVE_2_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_3_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_4_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_5_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_6_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_7_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_8_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_9_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_10_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_11_CONTROL_POINT_PATCH\",", "is # furnished to do so, subject to the following", "\"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM3\", ]) D3D11_QUERY_MISC_FLAG = Flags(UINT, [ \"D3D11_QUERY_MISC_PREDICATEHINT\", ]) D3D11_QUERY_DESC =", "to whom the Software is # furnished to do so,", "\"D3D11_PRIMITIVE_TOPOLOGY_5_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_6_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_7_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_8_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_9_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_10_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_11_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_12_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_13_CONTROL_POINT_PATCHLIST\", \"D3D11_PRIMITIVE_TOPOLOGY_14_CONTROL_POINT_PATCHLIST\",", "StdMethod(Void, \"IAGetIndexBuffer\", [Out(Pointer(ObjPointer(ID3D11Buffer)), \"pIndexBuffer\"), Out(Pointer(DXGI_FORMAT), \"Format\"), Out(Pointer(UINT), \"Offset\")]), StdMethod(Void, \"GSGetConstantBuffers\",", "\"GetContextFlags\", []), StdMethod(HRESULT, \"FinishCommandList\", [(BOOL, \"RestoreDeferredContextState\"), Out(Pointer(ObjPointer(ID3D11CommandList)), \"ppCommandList\")]), ] D3D11_CREATE_DEVICE_FLAG", "\"pSamplerDesc\"), Out(Pointer(ObjPointer(ID3D11SamplerState)), \"ppSamplerState\")]), StdMethod(HRESULT, \"CreateQuery\", [(Pointer(Const(D3D11_QUERY_DESC)), \"pQueryDesc\"), Out(Pointer(ObjPointer(ID3D11Query)), \"ppQuery\")]), StdMethod(HRESULT,", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "None), ]) ID3D11UnorderedAccessView.methods += [ StdMethod(Void, \"GetDesc\", [Out(Pointer(D3D11_UNORDERED_ACCESS_VIEW_DESC), \"pDesc\")]), ]", "(UINT, \"DstSubresource\"), (Pointer(Const(D3D11_BOX)), \"pDstBox\"), (OpaquePointer(Const(Void)), \"pSrcData\"), (UINT, \"SrcRowPitch\"), (UINT, \"SrcDepthPitch\")]),", "\"Feature\", [ (\"D3D11_FEATURE_THREADING\", Pointer(D3D11_FEATURE_DATA_THREADING)), (\"D3D11_FEATURE_DOUBLES\", Pointer(D3D11_FEATURE_DATA_DOUBLES)), (\"D3D11_FEATURE_FORMAT_SUPPORT\", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT)), (\"D3D11_FEATURE_FORMAT_SUPPORT2\", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT2)),", "\"ppShaderResourceViews\")]), StdMethod(Void, \"PSSetShader\", [(ObjPointer(ID3D11PixelShader), \"pPixelShader\"), (Array(Const(ObjPointer(ID3D11ClassInstance)), \"NumClassInstances\"), \"ppClassInstances\"), (UINT, \"NumClassInstances\")]),", "(UINT64, \"PSInvocations\"), (UINT64, \"HSInvocations\"), (UINT64, \"DSInvocations\"), (UINT64, \"CSInvocations\"), ]) D3D11_QUERY_DATA_SO_STATISTICS", "\"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM0\", \"D3D11_QUERY_SO_STATISTICS_STREAM1\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM1\", \"D3D11_QUERY_SO_STATISTICS_STREAM2\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM2\", \"D3D11_QUERY_SO_STATISTICS_STREAM3\", \"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM3\", ]) D3D11_QUERY_MISC_FLAG =", "[Out(Pointer(D3D11_TEXTURE1D_DESC), \"pDesc\")]), ] D3D11_TEXTURE2D_DESC = Struct(\"D3D11_TEXTURE2D_DESC\", [ (UINT, \"Width\"), (UINT,", "StdMethod(Void, \"DrawInstanced\", [(UINT, \"VertexCountPerInstance\"), (UINT, \"InstanceCount\"), (UINT, \"StartVertexLocation\"), (UINT, \"StartInstanceLocation\")]),", "Out(OpaqueBlob(Void, \"DataSize\"), \"pData\"), (UINT, \"DataSize\"), (D3D11_ASYNC_GETDATA_FLAG, \"GetDataFlags\")]), StdMethod(Void, \"SetPredication\", [(ObjPointer(ID3D11Predicate),", "Struct(\"D3D11_TEX2D_ARRAY_DSV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX2DMS_DSV", "D3D11_TEX3D_UAV = Struct(\"D3D11_TEX3D_UAV\", [ (UINT, \"MipSlice\"), (UINT, \"FirstWSlice\"), (UINT, \"WSize\"),", "The above copyright notice and this permission notice shall be", "]) D3D11_TEX2DMS_ARRAY_RTV = Struct(\"D3D11_TEX2DMS_ARRAY_RTV\", [ (UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ])", "\"PredicateValue\")]), StdMethod(Void, \"GSSetShaderResources\", [(UINT, \"StartSlot\"), (UINT, \"NumViews\"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), \"NumViews\"), \"ppShaderResourceViews\")]),", "\"pInputElementDescs\"), (UINT, \"NumElements\"), (Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecodeWithInputSignature\"), (SIZE_T, \"BytecodeLength\"), Out(Pointer(ObjPointer(ID3D11InputLayout)), \"ppInputLayout\")]),", "[ StdMethod(Void, \"VSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(Const(ObjPointer(ID3D11Buffer)), \"NumBuffers\"), \"ppConstantBuffers\")]),", "DWORD, DWORD], internal=True), ]) d3d11.addInterfaces([ IDXGIAdapter1, IDXGIDevice1, IDXGIResource, ID3D11Debug, ID3D11InfoQueue,", "\"Disjoint\"), ]) D3D11_QUERY_DATA_PIPELINE_STATISTICS = Struct(\"D3D11_QUERY_DATA_PIPELINE_STATISTICS\", [ (UINT64, \"IAVertices\"), (UINT64, \"IAPrimitives\"),", "D3D11_DEVICE_CONTEXT_TYPE = Enum(\"D3D11_DEVICE_CONTEXT_TYPE\", [ \"D3D11_DEVICE_CONTEXT_IMMEDIATE\", \"D3D11_DEVICE_CONTEXT_DEFERRED\", ]) D3D11_CLASS_INSTANCE_DESC = Struct(\"D3D11_CLASS_INSTANCE_DESC\",", "4), \"BlendFactor\"), (UINT, \"SampleMask\")]), StdMethod(Void, \"OMSetDepthStencilState\", [(ObjPointer(ID3D11DepthStencilState), \"pDepthStencilState\"), (UINT, \"StencilRef\")]),", "\"NumBuffers\"), \"ppSOTargets\")]), StdMethod(Void, \"RSGetState\", [Out(Pointer(ObjPointer(ID3D11RasterizerState)), \"ppRasterizerState\")]), StdMethod(Void, \"RSGetViewports\", [Out(Pointer(UINT), \"pNumViewports\"),", "\"CSSetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(Const(ObjPointer(ID3D11SamplerState)), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"CSSetConstantBuffers\",", "[Out(Pointer(ObjPointer(ID3D11VertexShader)), \"ppVertexShader\"), Out(Array(ObjPointer(ID3D11ClassInstance), \"*pNumClassInstances\"), \"ppClassInstances\"), Out(Pointer(UINT), \"pNumClassInstances\")]), StdMethod(Void, \"PSGetConstantBuffers\", [(UINT,", "(UINT, \"FirstArraySlice\"), (UINT, \"ArraySize\"), ]) D3D11_TEX3D_UAV = Struct(\"D3D11_TEX3D_UAV\", [ (UINT,", "[ (UINT, \"InstanceId\"), (UINT, \"InstanceIndex\"), (UINT, \"TypeId\"), (UINT, \"ConstantBuffer\"), (UINT,", "\"StartVertexLocation\")]), StdMethod(HRESULT, \"Map\", [(ObjPointer(ID3D11Resource), \"pResource\"), (UINT, \"Subresource\"), (D3D11_MAP, \"MapType\"), (D3D11_MAP_FLAG,", "[(ObjPointer(ID3D11Resource), \"pResource\"), (UINT, \"Subresource\")]), StdMethod(Void, \"PSSetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"),", "\"DstSubresource\"), (ObjPointer(ID3D11Resource), \"pSrcResource\"), (UINT, \"SrcSubresource\"), (DXGI_FORMAT, \"Format\")]), StdMethod(Void, \"ExecuteCommandList\", [(ObjPointer(ID3D11CommandList),", "\"ppDepthStencilView\"), (UINT, \"UAVStartSlot\"), (UINT, \"NumUAVs\"), (Array(ObjPointer(ID3D11UnorderedAccessView), \"NumUAVs\"), \"ppUnorderedAccessViews\")]), StdMethod(Void, \"OMGetBlendState\",", "\"pDesc\")]), ] D3D11_TEXTURE3D_DESC = Struct(\"D3D11_TEXTURE3D_DESC\", [ (UINT, \"Width\"), (UINT, \"Height\"),", "\"pNumClassInstances\")]), StdMethod(Void, \"HSGetSamplers\", [(UINT, \"StartSlot\"), (UINT, \"NumSamplers\"), (Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]),", "(BOOL, \"BlendEnable\"), (D3D11_BLEND, \"SrcBlend\"), (D3D11_BLEND, \"DestBlend\"), (D3D11_BLEND_OP, \"BlendOp\"), (D3D11_BLEND, \"SrcBlendAlpha\"),", "(Array(ObjPointer(ID3D11SamplerState), \"NumSamplers\"), \"ppSamplers\")]), StdMethod(Void, \"CSGetConstantBuffers\", [(UINT, \"StartSlot\"), (UINT, \"NumBuffers\"), (Array(ObjPointer(ID3D11Buffer),", "\"D3D11_PRIMITIVE_23_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_24_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_25_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_26_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_27_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_28_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_29_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_30_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_31_CONTROL_POINT_PATCH\", \"D3D11_PRIMITIVE_32_CONTROL_POINT_PATCH\",", "(\"D3D11_FEATURE_FORMAT_SUPPORT2\", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT2)), (\"D3D11_FEATURE_D3D10_X_HARDWARE_OPTIONS\", Pointer(D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS)), ], Blob(Void, \"FeatureSupportDataSize\"), False) ID3D11DeviceContext.methods +=", "\"pBufferForArgs\"), (UINT, \"AlignedByteOffsetForArgs\")]), StdMethod(Void, \"Dispatch\", [(UINT, \"ThreadGroupCountX\"), (UINT, \"ThreadGroupCountY\"), (UINT,", "= Struct(\"D3D11_TEXCUBE_ARRAY_SRV\", [ (UINT, \"MostDetailedMip\"), (UINT, \"MipLevels\"), (UINT, \"First2DArrayFace\"), (UINT,", "[ (UINT, \"FirstElement\"), (UINT, \"NumElements\"), (D3D11_BUFFEREX_SRV_FLAG, \"Flags\"), ]) D3D11_TEX1D_SRV =", "[]), StdMethod(UINT, \"GetContextFlags\", []), StdMethod(HRESULT, \"FinishCommandList\", [(BOOL, \"RestoreDeferredContextState\"), Out(Pointer(ObjPointer(ID3D11CommandList)), \"ppCommandList\")]),", "\"NumElements\"), (Blob(Const(Void), \"BytecodeLength\"), \"pShaderBytecodeWithInputSignature\"), (SIZE_T, \"BytecodeLength\"), Out(Pointer(ObjPointer(ID3D11InputLayout)), \"ppInputLayout\")]), StdMethod(HRESULT, \"CreateVertexShader\"," ]
[ "op == 'nop': index += 1 if op == 'acc':", "result(input_): # Part 1 part_one = boot(input_)[1] # Part 2", "all_seqs.append(seq) if value[:3] == 'jmp': seq = deepcopy(list_) seq[idx] =", "== 'nop': seq = deepcopy(list_) seq[idx] = 'jmp' + value[3:]", "from copy import deepcopy def boot(seq): index = 0 played_indices", "in all_sequences: result = boot(sequence) if result[0] is not False:", "value index += 1 if op == 'jmp': index +=", "len(seq): return True, acc if index in played_indices: return False,", "+= value index += 1 if op == 'jmp': index", "return True, acc if index in played_indices: return False, acc", "= line[0] value = int(line[1]) if op == 'nop': index", "value = int(line[1]) if op == 'nop': index += 1", "= set() acc = 0 while True: if index ==", "seq[idx] = 'jmp' + value[3:] all_seqs.append(seq) if value[:3] == 'jmp':", "def result(input_): # Part 1 part_one = boot(input_)[1] # Part", "'jmp': index += value def generate_sequences(list_): all_seqs = [] for", "set() acc = 0 while True: if index == len(seq):", "import deepcopy def boot(seq): index = 0 played_indices = set()", "played_indices.add(index) line = seq[index].split() op = line[0] value = int(line[1])", "seq[idx] = 'nop' + value[3:] all_seqs.append(seq) return all_seqs def result(input_):", "'nop': seq = deepcopy(list_) seq[idx] = 'jmp' + value[3:] all_seqs.append(seq)", "== 'nop': index += 1 if op == 'acc': acc", "result[0] is not False: part_two = result[1] break return part_one,", "= deepcopy(list_) seq[idx] = 'jmp' + value[3:] all_seqs.append(seq) if value[:3]", "sequence in all_sequences: result = boot(sequence) if result[0] is not", "seq = deepcopy(list_) seq[idx] = 'jmp' + value[3:] all_seqs.append(seq) if", "line[0] value = int(line[1]) if op == 'nop': index +=", "== 'acc': acc += value index += 1 if op", "in played_indices: return False, acc played_indices.add(index) line = seq[index].split() op", "op == 'jmp': index += value def generate_sequences(list_): all_seqs =", "[] for idx, value in enumerate(list_): if value[:3] == 'nop':", "seq = deepcopy(list_) seq[idx] = 'nop' + value[3:] all_seqs.append(seq) return", "+= 1 if op == 'acc': acc += value index", "if value[:3] == 'nop': seq = deepcopy(list_) seq[idx] = 'jmp'", "generate_sequences(input_) for sequence in all_sequences: result = boot(sequence) if result[0]", "all_seqs = [] for idx, value in enumerate(list_): if value[:3]", "= 0 played_indices = set() acc = 0 while True:", "<filename>day08.py from copy import deepcopy def boot(seq): index = 0", "= boot(input_)[1] # Part 2 all_sequences = generate_sequences(input_) for sequence", "return False, acc played_indices.add(index) line = seq[index].split() op = line[0]", "== 'jmp': seq = deepcopy(list_) seq[idx] = 'nop' + value[3:]", "+ value[3:] all_seqs.append(seq) if value[:3] == 'jmp': seq = deepcopy(list_)", "index = 0 played_indices = set() acc = 0 while", "# Part 1 part_one = boot(input_)[1] # Part 2 all_sequences", "value in enumerate(list_): if value[:3] == 'nop': seq = deepcopy(list_)", "value[:3] == 'nop': seq = deepcopy(list_) seq[idx] = 'jmp' +", "+ value[3:] all_seqs.append(seq) return all_seqs def result(input_): # Part 1", "deepcopy(list_) seq[idx] = 'nop' + value[3:] all_seqs.append(seq) return all_seqs def", "is not False: part_two = result[1] break return part_one, part_two", "acc played_indices.add(index) line = seq[index].split() op = line[0] value =", "Part 2 all_sequences = generate_sequences(input_) for sequence in all_sequences: result", "acc if index in played_indices: return False, acc played_indices.add(index) line", "index += value def generate_sequences(list_): all_seqs = [] for idx,", "'nop' + value[3:] all_seqs.append(seq) return all_seqs def result(input_): # Part", "def boot(seq): index = 0 played_indices = set() acc =", "= generate_sequences(input_) for sequence in all_sequences: result = boot(sequence) if", "for idx, value in enumerate(list_): if value[:3] == 'nop': seq", "if op == 'acc': acc += value index += 1", "value[:3] == 'jmp': seq = deepcopy(list_) seq[idx] = 'nop' +", "0 while True: if index == len(seq): return True, acc", "all_sequences = generate_sequences(input_) for sequence in all_sequences: result = boot(sequence)", "line = seq[index].split() op = line[0] value = int(line[1]) if", "= [] for idx, value in enumerate(list_): if value[:3] ==", "= 0 while True: if index == len(seq): return True,", "True, acc if index in played_indices: return False, acc played_indices.add(index)", "for sequence in all_sequences: result = boot(sequence) if result[0] is", "if op == 'jmp': index += value def generate_sequences(list_): all_seqs", "op = line[0] value = int(line[1]) if op == 'nop':", "if value[:3] == 'jmp': seq = deepcopy(list_) seq[idx] = 'nop'", "enumerate(list_): if value[:3] == 'nop': seq = deepcopy(list_) seq[idx] =", "= int(line[1]) if op == 'nop': index += 1 if", "def generate_sequences(list_): all_seqs = [] for idx, value in enumerate(list_):", "op == 'acc': acc += value index += 1 if", "= seq[index].split() op = line[0] value = int(line[1]) if op", "index += 1 if op == 'acc': acc += value", "if result[0] is not False: part_two = result[1] break return", "'jmp' + value[3:] all_seqs.append(seq) if value[:3] == 'jmp': seq =", "= 'nop' + value[3:] all_seqs.append(seq) return all_seqs def result(input_): #", "copy import deepcopy def boot(seq): index = 0 played_indices =", "boot(seq): index = 0 played_indices = set() acc = 0", "index += 1 if op == 'jmp': index += value", "'jmp': seq = deepcopy(list_) seq[idx] = 'nop' + value[3:] all_seqs.append(seq)", "acc = 0 while True: if index == len(seq): return", "+= value def generate_sequences(list_): all_seqs = [] for idx, value", "all_seqs def result(input_): # Part 1 part_one = boot(input_)[1] #", "value def generate_sequences(list_): all_seqs = [] for idx, value in", "idx, value in enumerate(list_): if value[:3] == 'nop': seq =", "if index in played_indices: return False, acc played_indices.add(index) line =", "played_indices: return False, acc played_indices.add(index) line = seq[index].split() op =", "return all_seqs def result(input_): # Part 1 part_one = boot(input_)[1]", "while True: if index == len(seq): return True, acc if", "# Part 2 all_sequences = generate_sequences(input_) for sequence in all_sequences:", "int(line[1]) if op == 'nop': index += 1 if op", "0 played_indices = set() acc = 0 while True: if", "generate_sequences(list_): all_seqs = [] for idx, value in enumerate(list_): if", "boot(sequence) if result[0] is not False: part_two = result[1] break", "+= 1 if op == 'jmp': index += value def", "played_indices = set() acc = 0 while True: if index", "if op == 'nop': index += 1 if op ==", "acc += value index += 1 if op == 'jmp':", "if index == len(seq): return True, acc if index in", "deepcopy(list_) seq[idx] = 'jmp' + value[3:] all_seqs.append(seq) if value[:3] ==", "deepcopy def boot(seq): index = 0 played_indices = set() acc", "== len(seq): return True, acc if index in played_indices: return", "'nop': index += 1 if op == 'acc': acc +=", "in enumerate(list_): if value[:3] == 'nop': seq = deepcopy(list_) seq[idx]", "value[3:] all_seqs.append(seq) return all_seqs def result(input_): # Part 1 part_one", "boot(input_)[1] # Part 2 all_sequences = generate_sequences(input_) for sequence in", "all_seqs.append(seq) return all_seqs def result(input_): # Part 1 part_one =", "2 all_sequences = generate_sequences(input_) for sequence in all_sequences: result =", "Part 1 part_one = boot(input_)[1] # Part 2 all_sequences =", "all_sequences: result = boot(sequence) if result[0] is not False: part_two", "result = boot(sequence) if result[0] is not False: part_two =", "== 'jmp': index += value def generate_sequences(list_): all_seqs = []", "1 if op == 'acc': acc += value index +=", "= boot(sequence) if result[0] is not False: part_two = result[1]", "= 'jmp' + value[3:] all_seqs.append(seq) if value[:3] == 'jmp': seq", "False, acc played_indices.add(index) line = seq[index].split() op = line[0] value", "= deepcopy(list_) seq[idx] = 'nop' + value[3:] all_seqs.append(seq) return all_seqs", "seq[index].split() op = line[0] value = int(line[1]) if op ==", "1 part_one = boot(input_)[1] # Part 2 all_sequences = generate_sequences(input_)", "1 if op == 'jmp': index += value def generate_sequences(list_):", "value[3:] all_seqs.append(seq) if value[:3] == 'jmp': seq = deepcopy(list_) seq[idx]", "True: if index == len(seq): return True, acc if index", "index in played_indices: return False, acc played_indices.add(index) line = seq[index].split()", "part_one = boot(input_)[1] # Part 2 all_sequences = generate_sequences(input_) for", "index == len(seq): return True, acc if index in played_indices:", "'acc': acc += value index += 1 if op ==" ]
[ "lr = tf.train.exponential_decay(0.001, global_step, 10000, 0.8, staircase=True) #lr = tf.constant(0.001,", "samples=input_data.get_images_labels(tfRecorf_dir,num_classes,66, crop_size=[224,224], batch_size=4) batch_queue = slim.prefetch_queue.prefetch_queue(samples, capacity=128 ) tra_batch =", "@author: LiHongWang \"\"\" import os import tensorflow as tf from", "some summaries to visualize the training process: tf.summary.scalar('losses/Total_Loss', total_loss) tf.summary.image(\"image\",", "tf.float32) tf.summary.scalar('learning_rate', lr) for variable in slim.get_model_variables(): tf.summary.histogram(variable.op.name, variable) #", "import tensorflow as tf from model import fcn_vgg from model", "Run the training: gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7) config=tf.ConfigProto(gpu_options=gpu_options) final_loss = slim.learning.train(train_op,", "init_fn=None,#fcn_mobile.get_init_fn(), session_config=config, number_of_steps=65000) print('Finished training. Last batch loss %f' %", "tf.device(\"/cpu:0\"): samples=input_data.get_images_labels(tfRecorf_dir,num_classes,66, crop_size=[224,224], batch_size=4) batch_queue = slim.prefetch_queue.prefetch_queue(samples, capacity=128 ) tra_batch", "tf.uint8), max_outputs=4) lr = tf.train.exponential_decay(0.001, global_step, 10000, 0.8, staircase=True) #lr", "= tf.contrib.slim def main(): num_classes=2 tfRecorf_dir= 'D:/dataSet/kitti/road/sub_um_lane_tra66.tfrecord' train_dir = './fm2/'", "final_loss = slim.learning.train(train_op, logdir=train_dir, log_every_n_steps=100, save_summaries_secs=20, save_interval_secs=1800, init_fn=None,#fcn_mobile.get_init_fn(), session_config=config, number_of_steps=65000)", "tf.Graph().as_default(): global_step = tf.contrib.framework.get_or_create_global_step() tf.logging.set_verbosity(tf.logging.INFO) with tf.device(\"/cpu:0\"): samples=input_data.get_images_labels(tfRecorf_dir,num_classes,66, crop_size=[224,224], batch_size=4)", "batch_queue.dequeue() logit,prediction=fcn_mobile.fcn_mobv1(tra_batch['image'],num_classes) # logit,prediction=fcn_vgg.fcn_vgg16(tra_batch['image'],num_classes) # logit,prediction=fcn_resnet_v2.fcn_res101(tra_batch['image'],num_classes) cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=tf.squeeze(tra_batch['label'], squeeze_dims=[3]),name=\"entropy\") loss", "Create some summaries to visualize the training process: tf.summary.scalar('losses/Total_Loss', total_loss)", "import input_data slim = tf.contrib.slim def main(): num_classes=2 tfRecorf_dir= 'D:/dataSet/kitti/road/sub_um_lane_tra66.tfrecord'", "tf.logging.set_verbosity(tf.logging.INFO) with tf.device(\"/cpu:0\"): samples=input_data.get_images_labels(tfRecorf_dir,num_classes,66, crop_size=[224,224], batch_size=4) batch_queue = slim.prefetch_queue.prefetch_queue(samples, capacity=128", "tra_batch['image']) # print(\"label\", tf.cast(tra_batch['label']*255, tf.uint8)) # print(\"prediction\", tf.cast(prediction*255, tf.uint8)) #", "optimizer and create the train op: optimizer = tf.train.RMSPropOptimizer(lr,0.9) train_op", "training: gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7) config=tf.ConfigProto(gpu_options=gpu_options) final_loss = slim.learning.train(train_op, logdir=train_dir, log_every_n_steps=100,", "# Create some summaries to visualize the training process: tf.summary.scalar('losses/Total_Loss',", "tf.train.exponential_decay(0.001, global_step, 10000, 0.8, staircase=True) #lr = tf.constant(0.001, tf.float32) tf.summary.scalar('learning_rate',", "train_op = slim.learning.create_train_op(total_loss, optimizer) # Run the training: gpu_options =", "as tf from model import fcn_vgg from model import fcn_mobile", "'./fm2/' if not os.path.exists(train_dir): os.makedirs(train_dir) with tf.Graph().as_default(): global_step = tf.contrib.framework.get_or_create_global_step()", "= tf.contrib.framework.get_or_create_global_step() tf.logging.set_verbosity(tf.logging.INFO) with tf.device(\"/cpu:0\"): samples=input_data.get_images_labels(tfRecorf_dir,num_classes,66, crop_size=[224,224], batch_size=4) batch_queue =", "global_step, 10000, 0.8, staircase=True) #lr = tf.constant(0.001, tf.float32) tf.summary.scalar('learning_rate', lr)", "= slim.learning.create_train_op(total_loss, optimizer) # Run the training: gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)", "cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=tf.squeeze(tra_batch['label'], squeeze_dims=[3]),name=\"entropy\") loss = tf.reduce_mean(cross_entropy,name='loss') slim.losses.add_loss(loss) total_loss = slim.losses.get_total_loss()", "labels=tf.squeeze(tra_batch['label'], squeeze_dims=[3]),name=\"entropy\") loss = tf.reduce_mean(cross_entropy,name='loss') slim.losses.add_loss(loss) total_loss = slim.losses.get_total_loss() #", "total_loss = slim.losses.get_total_loss() # print(\"image\", tra_batch['image']) # print(\"label\", tf.cast(tra_batch['label']*255, tf.uint8))", "not os.path.exists(train_dir): os.makedirs(train_dir) with tf.Graph().as_default(): global_step = tf.contrib.framework.get_or_create_global_step() tf.logging.set_verbosity(tf.logging.INFO) with", "slim.learning.train(train_op, logdir=train_dir, log_every_n_steps=100, save_summaries_secs=20, save_interval_secs=1800, init_fn=None,#fcn_mobile.get_init_fn(), session_config=config, number_of_steps=65000) print('Finished training.", "tensorflow as tf from model import fcn_vgg from model import", "import fcn_resnet_v2 from data import input_data slim = tf.contrib.slim def", "-*- \"\"\" Created on Tue Jun 26 16:34:21 2018 @author:", "tf.summary.image(\"image\", tra_batch['image'], max_outputs=4) tf.summary.image(\"label\", tf.cast(tra_batch['label']*255, tf.uint8), max_outputs=4) tf.summary.image(\"prediction\", tf.cast(prediction*255, tf.uint8),", "0.8, staircase=True) #lr = tf.constant(0.001, tf.float32) tf.summary.scalar('learning_rate', lr) for variable", "slim.learning.create_train_op(total_loss, optimizer) # Run the training: gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7) config=tf.ConfigProto(gpu_options=gpu_options)", "slim.prefetch_queue.prefetch_queue(samples, capacity=128 ) tra_batch = batch_queue.dequeue() logit,prediction=fcn_mobile.fcn_mobv1(tra_batch['image'],num_classes) # logit,prediction=fcn_vgg.fcn_vgg16(tra_batch['image'],num_classes) #", "-*- coding: utf-8 -*- \"\"\" Created on Tue Jun 26", "# Run the training: gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7) config=tf.ConfigProto(gpu_options=gpu_options) final_loss =", "# logit,prediction=fcn_resnet_v2.fcn_res101(tra_batch['image'],num_classes) cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=tf.squeeze(tra_batch['label'], squeeze_dims=[3]),name=\"entropy\") loss = tf.reduce_mean(cross_entropy,name='loss') slim.losses.add_loss(loss) total_loss", "tf.cast(prediction*255, tf.uint8)) # Create some summaries to visualize the training", "tf.summary.scalar('losses/Total_Loss', total_loss) tf.summary.image(\"image\", tra_batch['image'], max_outputs=4) tf.summary.image(\"label\", tf.cast(tra_batch['label']*255, tf.uint8), max_outputs=4) tf.summary.image(\"prediction\",", "# Specify the optimizer and create the train op: optimizer", "# print(\"prediction\", tf.cast(prediction*255, tf.uint8)) # Create some summaries to visualize", "logit,prediction=fcn_vgg.fcn_vgg16(tra_batch['image'],num_classes) # logit,prediction=fcn_resnet_v2.fcn_res101(tra_batch['image'],num_classes) cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=tf.squeeze(tra_batch['label'], squeeze_dims=[3]),name=\"entropy\") loss = tf.reduce_mean(cross_entropy,name='loss') slim.losses.add_loss(loss)", "import os import tensorflow as tf from model import fcn_vgg", "variable) # Specify the optimizer and create the train op:", "tf.uint8), max_outputs=4) tf.summary.image(\"prediction\", tf.cast(prediction*255, tf.uint8), max_outputs=4) lr = tf.train.exponential_decay(0.001, global_step,", "tf.train.RMSPropOptimizer(lr,0.9) train_op = slim.learning.create_train_op(total_loss, optimizer) # Run the training: gpu_options", "from model import fcn_resnet_v2 from data import input_data slim =", "# -*- coding: utf-8 -*- \"\"\" Created on Tue Jun", "total_loss) tf.summary.image(\"image\", tra_batch['image'], max_outputs=4) tf.summary.image(\"label\", tf.cast(tra_batch['label']*255, tf.uint8), max_outputs=4) tf.summary.image(\"prediction\", tf.cast(prediction*255,", "squeeze_dims=[3]),name=\"entropy\") loss = tf.reduce_mean(cross_entropy,name='loss') slim.losses.add_loss(loss) total_loss = slim.losses.get_total_loss() # print(\"image\",", "tf.summary.image(\"prediction\", tf.cast(prediction*255, tf.uint8), max_outputs=4) lr = tf.train.exponential_decay(0.001, global_step, 10000, 0.8,", "tf.constant(0.001, tf.float32) tf.summary.scalar('learning_rate', lr) for variable in slim.get_model_variables(): tf.summary.histogram(variable.op.name, variable)", "= batch_queue.dequeue() logit,prediction=fcn_mobile.fcn_mobv1(tra_batch['image'],num_classes) # logit,prediction=fcn_vgg.fcn_vgg16(tra_batch['image'],num_classes) # logit,prediction=fcn_resnet_v2.fcn_res101(tra_batch['image'],num_classes) cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=tf.squeeze(tra_batch['label'], squeeze_dims=[3]),name=\"entropy\")", "crop_size=[224,224], batch_size=4) batch_queue = slim.prefetch_queue.prefetch_queue(samples, capacity=128 ) tra_batch = batch_queue.dequeue()", "fcn_resnet_v2 from data import input_data slim = tf.contrib.slim def main():", "tf.contrib.slim def main(): num_classes=2 tfRecorf_dir= 'D:/dataSet/kitti/road/sub_um_lane_tra66.tfrecord' train_dir = './fm2/' if", "optimizer) # Run the training: gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7) config=tf.ConfigProto(gpu_options=gpu_options) final_loss", "print(\"prediction\", tf.cast(prediction*255, tf.uint8)) # Create some summaries to visualize the", "to visualize the training process: tf.summary.scalar('losses/Total_Loss', total_loss) tf.summary.image(\"image\", tra_batch['image'], max_outputs=4)", "# logit,prediction=fcn_vgg.fcn_vgg16(tra_batch['image'],num_classes) # logit,prediction=fcn_resnet_v2.fcn_res101(tra_batch['image'],num_classes) cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=tf.squeeze(tra_batch['label'], squeeze_dims=[3]),name=\"entropy\") loss = tf.reduce_mean(cross_entropy,name='loss')", "\"\"\" import os import tensorflow as tf from model import", "config=tf.ConfigProto(gpu_options=gpu_options) final_loss = slim.learning.train(train_op, logdir=train_dir, log_every_n_steps=100, save_summaries_secs=20, save_interval_secs=1800, init_fn=None,#fcn_mobile.get_init_fn(), session_config=config,", "for variable in slim.get_model_variables(): tf.summary.histogram(variable.op.name, variable) # Specify the optimizer", "data import input_data slim = tf.contrib.slim def main(): num_classes=2 tfRecorf_dir=", "the training process: tf.summary.scalar('losses/Total_Loss', total_loss) tf.summary.image(\"image\", tra_batch['image'], max_outputs=4) tf.summary.image(\"label\", tf.cast(tra_batch['label']*255,", "if not os.path.exists(train_dir): os.makedirs(train_dir) with tf.Graph().as_default(): global_step = tf.contrib.framework.get_or_create_global_step() tf.logging.set_verbosity(tf.logging.INFO)", "tf.uint8)) # Create some summaries to visualize the training process:", "16:34:21 2018 @author: LiHongWang \"\"\" import os import tensorflow as", "tf.reduce_mean(cross_entropy,name='loss') slim.losses.add_loss(loss) total_loss = slim.losses.get_total_loss() # print(\"image\", tra_batch['image']) # print(\"label\",", "in slim.get_model_variables(): tf.summary.histogram(variable.op.name, variable) # Specify the optimizer and create", "model import fcn_mobile from model import fcn_resnet_v2 from data import", "tf.cast(tra_batch['label']*255, tf.uint8), max_outputs=4) tf.summary.image(\"prediction\", tf.cast(prediction*255, tf.uint8), max_outputs=4) lr = tf.train.exponential_decay(0.001,", "# print(\"label\", tf.cast(tra_batch['label']*255, tf.uint8)) # print(\"prediction\", tf.cast(prediction*255, tf.uint8)) # Create", "create the train op: optimizer = tf.train.RMSPropOptimizer(lr,0.9) train_op = slim.learning.create_train_op(total_loss,", "print('Finished training. Last batch loss %f' % final_loss) if __name__=='__main__':", "visualize the training process: tf.summary.scalar('losses/Total_Loss', total_loss) tf.summary.image(\"image\", tra_batch['image'], max_outputs=4) tf.summary.image(\"label\",", "tf.summary.image(\"label\", tf.cast(tra_batch['label']*255, tf.uint8), max_outputs=4) tf.summary.image(\"prediction\", tf.cast(prediction*255, tf.uint8), max_outputs=4) lr =", "batch_size=4) batch_queue = slim.prefetch_queue.prefetch_queue(samples, capacity=128 ) tra_batch = batch_queue.dequeue() logit,prediction=fcn_mobile.fcn_mobv1(tra_batch['image'],num_classes)", "\"\"\" Created on Tue Jun 26 16:34:21 2018 @author: LiHongWang", "save_summaries_secs=20, save_interval_secs=1800, init_fn=None,#fcn_mobile.get_init_fn(), session_config=config, number_of_steps=65000) print('Finished training. Last batch loss", "and create the train op: optimizer = tf.train.RMSPropOptimizer(lr,0.9) train_op =", "logit,prediction=fcn_mobile.fcn_mobv1(tra_batch['image'],num_classes) # logit,prediction=fcn_vgg.fcn_vgg16(tra_batch['image'],num_classes) # logit,prediction=fcn_resnet_v2.fcn_res101(tra_batch['image'],num_classes) cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=tf.squeeze(tra_batch['label'], squeeze_dims=[3]),name=\"entropy\") loss =", "26 16:34:21 2018 @author: LiHongWang \"\"\" import os import tensorflow", "= slim.prefetch_queue.prefetch_queue(samples, capacity=128 ) tra_batch = batch_queue.dequeue() logit,prediction=fcn_mobile.fcn_mobv1(tra_batch['image'],num_classes) # logit,prediction=fcn_vgg.fcn_vgg16(tra_batch['image'],num_classes)", "main(): num_classes=2 tfRecorf_dir= 'D:/dataSet/kitti/road/sub_um_lane_tra66.tfrecord' train_dir = './fm2/' if not os.path.exists(train_dir):", "slim = tf.contrib.slim def main(): num_classes=2 tfRecorf_dir= 'D:/dataSet/kitti/road/sub_um_lane_tra66.tfrecord' train_dir =", "os.makedirs(train_dir) with tf.Graph().as_default(): global_step = tf.contrib.framework.get_or_create_global_step() tf.logging.set_verbosity(tf.logging.INFO) with tf.device(\"/cpu:0\"): samples=input_data.get_images_labels(tfRecorf_dir,num_classes,66,", "with tf.device(\"/cpu:0\"): samples=input_data.get_images_labels(tfRecorf_dir,num_classes,66, crop_size=[224,224], batch_size=4) batch_queue = slim.prefetch_queue.prefetch_queue(samples, capacity=128 )", "10000, 0.8, staircase=True) #lr = tf.constant(0.001, tf.float32) tf.summary.scalar('learning_rate', lr) for", "tf.summary.scalar('learning_rate', lr) for variable in slim.get_model_variables(): tf.summary.histogram(variable.op.name, variable) # Specify", "slim.losses.get_total_loss() # print(\"image\", tra_batch['image']) # print(\"label\", tf.cast(tra_batch['label']*255, tf.uint8)) # print(\"prediction\",", "variable in slim.get_model_variables(): tf.summary.histogram(variable.op.name, variable) # Specify the optimizer and", "tf.summary.histogram(variable.op.name, variable) # Specify the optimizer and create the train", "training process: tf.summary.scalar('losses/Total_Loss', total_loss) tf.summary.image(\"image\", tra_batch['image'], max_outputs=4) tf.summary.image(\"label\", tf.cast(tra_batch['label']*255, tf.uint8),", "process: tf.summary.scalar('losses/Total_Loss', total_loss) tf.summary.image(\"image\", tra_batch['image'], max_outputs=4) tf.summary.image(\"label\", tf.cast(tra_batch['label']*255, tf.uint8), max_outputs=4)", "model import fcn_resnet_v2 from data import input_data slim = tf.contrib.slim", "with tf.Graph().as_default(): global_step = tf.contrib.framework.get_or_create_global_step() tf.logging.set_verbosity(tf.logging.INFO) with tf.device(\"/cpu:0\"): samples=input_data.get_images_labels(tfRecorf_dir,num_classes,66, crop_size=[224,224],", "train_dir = './fm2/' if not os.path.exists(train_dir): os.makedirs(train_dir) with tf.Graph().as_default(): global_step", "tf.GPUOptions(per_process_gpu_memory_fraction=0.7) config=tf.ConfigProto(gpu_options=gpu_options) final_loss = slim.learning.train(train_op, logdir=train_dir, log_every_n_steps=100, save_summaries_secs=20, save_interval_secs=1800, init_fn=None,#fcn_mobile.get_init_fn(),", "fcn_vgg from model import fcn_mobile from model import fcn_resnet_v2 from", "tf.uint8)) # print(\"prediction\", tf.cast(prediction*255, tf.uint8)) # Create some summaries to", "the training: gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7) config=tf.ConfigProto(gpu_options=gpu_options) final_loss = slim.learning.train(train_op, logdir=train_dir,", "utf-8 -*- \"\"\" Created on Tue Jun 26 16:34:21 2018", "2018 @author: LiHongWang \"\"\" import os import tensorflow as tf", "Created on Tue Jun 26 16:34:21 2018 @author: LiHongWang \"\"\"", "from model import fcn_vgg from model import fcn_mobile from model", "op: optimizer = tf.train.RMSPropOptimizer(lr,0.9) train_op = slim.learning.create_train_op(total_loss, optimizer) # Run", "log_every_n_steps=100, save_summaries_secs=20, save_interval_secs=1800, init_fn=None,#fcn_mobile.get_init_fn(), session_config=config, number_of_steps=65000) print('Finished training. Last batch", "import fcn_mobile from model import fcn_resnet_v2 from data import input_data", "max_outputs=4) tf.summary.image(\"prediction\", tf.cast(prediction*255, tf.uint8), max_outputs=4) lr = tf.train.exponential_decay(0.001, global_step, 10000,", "= slim.learning.train(train_op, logdir=train_dir, log_every_n_steps=100, save_summaries_secs=20, save_interval_secs=1800, init_fn=None,#fcn_mobile.get_init_fn(), session_config=config, number_of_steps=65000) print('Finished", "Jun 26 16:34:21 2018 @author: LiHongWang \"\"\" import os import", "from model import fcn_mobile from model import fcn_resnet_v2 from data", "session_config=config, number_of_steps=65000) print('Finished training. Last batch loss %f' % final_loss)", "staircase=True) #lr = tf.constant(0.001, tf.float32) tf.summary.scalar('learning_rate', lr) for variable in", "on Tue Jun 26 16:34:21 2018 @author: LiHongWang \"\"\" import", "'D:/dataSet/kitti/road/sub_um_lane_tra66.tfrecord' train_dir = './fm2/' if not os.path.exists(train_dir): os.makedirs(train_dir) with tf.Graph().as_default():", "logdir=train_dir, log_every_n_steps=100, save_summaries_secs=20, save_interval_secs=1800, init_fn=None,#fcn_mobile.get_init_fn(), session_config=config, number_of_steps=65000) print('Finished training. Last", "logit,prediction=fcn_resnet_v2.fcn_res101(tra_batch['image'],num_classes) cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=tf.squeeze(tra_batch['label'], squeeze_dims=[3]),name=\"entropy\") loss = tf.reduce_mean(cross_entropy,name='loss') slim.losses.add_loss(loss) total_loss =", "print(\"label\", tf.cast(tra_batch['label']*255, tf.uint8)) # print(\"prediction\", tf.cast(prediction*255, tf.uint8)) # Create some", "slim.get_model_variables(): tf.summary.histogram(variable.op.name, variable) # Specify the optimizer and create the", "#lr = tf.constant(0.001, tf.float32) tf.summary.scalar('learning_rate', lr) for variable in slim.get_model_variables():", "tra_batch['image'], max_outputs=4) tf.summary.image(\"label\", tf.cast(tra_batch['label']*255, tf.uint8), max_outputs=4) tf.summary.image(\"prediction\", tf.cast(prediction*255, tf.uint8), max_outputs=4)", "fcn_mobile from model import fcn_resnet_v2 from data import input_data slim", "num_classes=2 tfRecorf_dir= 'D:/dataSet/kitti/road/sub_um_lane_tra66.tfrecord' train_dir = './fm2/' if not os.path.exists(train_dir): os.makedirs(train_dir)", "tf.contrib.framework.get_or_create_global_step() tf.logging.set_verbosity(tf.logging.INFO) with tf.device(\"/cpu:0\"): samples=input_data.get_images_labels(tfRecorf_dir,num_classes,66, crop_size=[224,224], batch_size=4) batch_queue = slim.prefetch_queue.prefetch_queue(samples,", "model import fcn_vgg from model import fcn_mobile from model import", "tfRecorf_dir= 'D:/dataSet/kitti/road/sub_um_lane_tra66.tfrecord' train_dir = './fm2/' if not os.path.exists(train_dir): os.makedirs(train_dir) with", "gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7) config=tf.ConfigProto(gpu_options=gpu_options) final_loss = slim.learning.train(train_op, logdir=train_dir, log_every_n_steps=100, save_summaries_secs=20,", "max_outputs=4) tf.summary.image(\"label\", tf.cast(tra_batch['label']*255, tf.uint8), max_outputs=4) tf.summary.image(\"prediction\", tf.cast(prediction*255, tf.uint8), max_outputs=4) lr", "max_outputs=4) lr = tf.train.exponential_decay(0.001, global_step, 10000, 0.8, staircase=True) #lr =", "= tf.train.exponential_decay(0.001, global_step, 10000, 0.8, staircase=True) #lr = tf.constant(0.001, tf.float32)", "os import tensorflow as tf from model import fcn_vgg from", "= tf.constant(0.001, tf.float32) tf.summary.scalar('learning_rate', lr) for variable in slim.get_model_variables(): tf.summary.histogram(variable.op.name,", "= tf.GPUOptions(per_process_gpu_memory_fraction=0.7) config=tf.ConfigProto(gpu_options=gpu_options) final_loss = slim.learning.train(train_op, logdir=train_dir, log_every_n_steps=100, save_summaries_secs=20, save_interval_secs=1800,", "import fcn_vgg from model import fcn_mobile from model import fcn_resnet_v2", ") tra_batch = batch_queue.dequeue() logit,prediction=fcn_mobile.fcn_mobv1(tra_batch['image'],num_classes) # logit,prediction=fcn_vgg.fcn_vgg16(tra_batch['image'],num_classes) # logit,prediction=fcn_resnet_v2.fcn_res101(tra_batch['image'],num_classes) cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit,", "LiHongWang \"\"\" import os import tensorflow as tf from model", "def main(): num_classes=2 tfRecorf_dir= 'D:/dataSet/kitti/road/sub_um_lane_tra66.tfrecord' train_dir = './fm2/' if not", "print(\"image\", tra_batch['image']) # print(\"label\", tf.cast(tra_batch['label']*255, tf.uint8)) # print(\"prediction\", tf.cast(prediction*255, tf.uint8))", "= tf.train.RMSPropOptimizer(lr,0.9) train_op = slim.learning.create_train_op(total_loss, optimizer) # Run the training:", "number_of_steps=65000) print('Finished training. Last batch loss %f' % final_loss) if", "= tf.reduce_mean(cross_entropy,name='loss') slim.losses.add_loss(loss) total_loss = slim.losses.get_total_loss() # print(\"image\", tra_batch['image']) #", "coding: utf-8 -*- \"\"\" Created on Tue Jun 26 16:34:21", "loss = tf.reduce_mean(cross_entropy,name='loss') slim.losses.add_loss(loss) total_loss = slim.losses.get_total_loss() # print(\"image\", tra_batch['image'])", "tf from model import fcn_vgg from model import fcn_mobile from", "= './fm2/' if not os.path.exists(train_dir): os.makedirs(train_dir) with tf.Graph().as_default(): global_step =", "tf.cast(tra_batch['label']*255, tf.uint8)) # print(\"prediction\", tf.cast(prediction*255, tf.uint8)) # Create some summaries", "training. Last batch loss %f' % final_loss) if __name__=='__main__': main()", "Specify the optimizer and create the train op: optimizer =", "# print(\"image\", tra_batch['image']) # print(\"label\", tf.cast(tra_batch['label']*255, tf.uint8)) # print(\"prediction\", tf.cast(prediction*255,", "save_interval_secs=1800, init_fn=None,#fcn_mobile.get_init_fn(), session_config=config, number_of_steps=65000) print('Finished training. Last batch loss %f'", "batch_queue = slim.prefetch_queue.prefetch_queue(samples, capacity=128 ) tra_batch = batch_queue.dequeue() logit,prediction=fcn_mobile.fcn_mobv1(tra_batch['image'],num_classes) #", "the optimizer and create the train op: optimizer = tf.train.RMSPropOptimizer(lr,0.9)", "slim.losses.add_loss(loss) total_loss = slim.losses.get_total_loss() # print(\"image\", tra_batch['image']) # print(\"label\", tf.cast(tra_batch['label']*255,", "capacity=128 ) tra_batch = batch_queue.dequeue() logit,prediction=fcn_mobile.fcn_mobv1(tra_batch['image'],num_classes) # logit,prediction=fcn_vgg.fcn_vgg16(tra_batch['image'],num_classes) # logit,prediction=fcn_resnet_v2.fcn_res101(tra_batch['image'],num_classes)", "tra_batch = batch_queue.dequeue() logit,prediction=fcn_mobile.fcn_mobv1(tra_batch['image'],num_classes) # logit,prediction=fcn_vgg.fcn_vgg16(tra_batch['image'],num_classes) # logit,prediction=fcn_resnet_v2.fcn_res101(tra_batch['image'],num_classes) cross_entropy=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=tf.squeeze(tra_batch['label'],", "= slim.losses.get_total_loss() # print(\"image\", tra_batch['image']) # print(\"label\", tf.cast(tra_batch['label']*255, tf.uint8)) #", "Tue Jun 26 16:34:21 2018 @author: LiHongWang \"\"\" import os", "lr) for variable in slim.get_model_variables(): tf.summary.histogram(variable.op.name, variable) # Specify the", "optimizer = tf.train.RMSPropOptimizer(lr,0.9) train_op = slim.learning.create_train_op(total_loss, optimizer) # Run the", "train op: optimizer = tf.train.RMSPropOptimizer(lr,0.9) train_op = slim.learning.create_train_op(total_loss, optimizer) #", "global_step = tf.contrib.framework.get_or_create_global_step() tf.logging.set_verbosity(tf.logging.INFO) with tf.device(\"/cpu:0\"): samples=input_data.get_images_labels(tfRecorf_dir,num_classes,66, crop_size=[224,224], batch_size=4) batch_queue", "input_data slim = tf.contrib.slim def main(): num_classes=2 tfRecorf_dir= 'D:/dataSet/kitti/road/sub_um_lane_tra66.tfrecord' train_dir", "tf.cast(prediction*255, tf.uint8), max_outputs=4) lr = tf.train.exponential_decay(0.001, global_step, 10000, 0.8, staircase=True)", "the train op: optimizer = tf.train.RMSPropOptimizer(lr,0.9) train_op = slim.learning.create_train_op(total_loss, optimizer)", "summaries to visualize the training process: tf.summary.scalar('losses/Total_Loss', total_loss) tf.summary.image(\"image\", tra_batch['image'],", "os.path.exists(train_dir): os.makedirs(train_dir) with tf.Graph().as_default(): global_step = tf.contrib.framework.get_or_create_global_step() tf.logging.set_verbosity(tf.logging.INFO) with tf.device(\"/cpu:0\"):", "from data import input_data slim = tf.contrib.slim def main(): num_classes=2" ]
[ "Python :: 3.8', 'Topic :: System', 'Topic :: System ::", ":: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language", "long_description=codecs.open('README.rst', 'r', encoding='utf-8', errors='ignore').read(), keywords='file libmagic magic infer numbers magicnumbers", "dependencies.', long_description=codecs.open('README.rst', 'r', encoding='utf-8', errors='ignore').read(), keywords='file libmagic magic infer numbers", "Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming", "'No external dependencies.', long_description=codecs.open('README.rst', 'r', encoding='utf-8', errors='ignore').read(), keywords='file libmagic magic", ":: Developers', 'Intended Audience :: System Administrators', 'License :: OSI", ":: Python :: 3.6', 'Programming Language :: Python :: 3.7',", "from setuptools import find_packages, setup setup( name='filetype', version='1.0.7', description='Infer file", "Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment ::", "3', 'Programming Language :: Python :: 3.5', 'Programming Language ::", "numbers magicnumbers discovery mime ' 'type kind', url='https://github.com/h2non/filetype.py', download_url='https://github.com/h2non/filetype.py/tarball/master', author='<NAME>',", "classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console',", "author_email='<EMAIL>', license='MIT', license_files=['LICENSE'], classifiers=[ 'Development Status :: 5 - Production/Stable',", "Utilities'], platforms=['any'], packages=find_packages(exclude=['dist', 'build', 'docs', 'tests', 'examples']), package_data={'filetype': ['LICENSE', '*.md']},", "3.5', 'Programming Language :: Python :: 3.6', 'Programming Language ::", ":: System Administrators', 'License :: OSI Approved :: MIT License',", "utf-8 -*- import codecs from setuptools import find_packages, setup setup(", "Environment', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators',", "Language :: Python :: 3', 'Programming Language :: Python ::", "OSI Approved :: MIT License', 'Operating System :: OS Independent',", "3.7', 'Programming Language :: Python :: 3.8', 'Topic :: System',", ":: Python :: 3.5', 'Programming Language :: Python :: 3.6',", "Administrators', 'License :: OSI Approved :: MIT License', 'Operating System", "setuptools import find_packages, setup setup( name='filetype', version='1.0.7', description='Infer file type", "MIME type of any file/buffer. ' 'No external dependencies.', long_description=codecs.open('README.rst',", "license_files=['LICENSE'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment ::", "download_url='https://github.com/h2non/filetype.py/tarball/master', author='<NAME>', author_email='<EMAIL>', license='MIT', license_files=['LICENSE'], classifiers=[ 'Development Status :: 5", "author='<NAME>', author_email='<EMAIL>', license='MIT', license_files=['LICENSE'], classifiers=[ 'Development Status :: 5 -", "find_packages, setup setup( name='filetype', version='1.0.7', description='Infer file type and MIME", ":: 3.8', 'Topic :: System', 'Topic :: System :: Filesystems',", ":: Python :: 3.8', 'Topic :: System', 'Topic :: System", "MIT License', 'Operating System :: OS Independent', 'Programming Language ::", "magicnumbers discovery mime ' 'type kind', url='https://github.com/h2non/filetype.py', download_url='https://github.com/h2non/filetype.py/tarball/master', author='<NAME>', author_email='<EMAIL>',", "'type kind', url='https://github.com/h2non/filetype.py', download_url='https://github.com/h2non/filetype.py/tarball/master', author='<NAME>', author_email='<EMAIL>', license='MIT', license_files=['LICENSE'], classifiers=[ 'Development", "Audience :: Developers', 'Intended Audience :: System Administrators', 'License ::", "Language :: Python :: 3.6', 'Programming Language :: Python ::", "url='https://github.com/h2non/filetype.py', download_url='https://github.com/h2non/filetype.py/tarball/master', author='<NAME>', author_email='<EMAIL>', license='MIT', license_files=['LICENSE'], classifiers=[ 'Development Status ::", ":: Filesystems', 'Topic :: Utilities'], platforms=['any'], packages=find_packages(exclude=['dist', 'build', 'docs', 'tests',", "Web Environment', 'Intended Audience :: Developers', 'Intended Audience :: System", "'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment", "license='MIT', license_files=['LICENSE'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment", "infer numbers magicnumbers discovery mime ' 'type kind', url='https://github.com/h2non/filetype.py', download_url='https://github.com/h2non/filetype.py/tarball/master',", "System :: OS Independent', 'Programming Language :: Python :: 3',", ":: Python :: 3.7', 'Programming Language :: Python :: 3.8',", "and MIME type of any file/buffer. ' 'No external dependencies.',", "' 'type kind', url='https://github.com/h2non/filetype.py', download_url='https://github.com/h2non/filetype.py/tarball/master', author='<NAME>', author_email='<EMAIL>', license='MIT', license_files=['LICENSE'], classifiers=[", "file/buffer. ' 'No external dependencies.', long_description=codecs.open('README.rst', 'r', encoding='utf-8', errors='ignore').read(), keywords='file", "-*- import codecs from setuptools import find_packages, setup setup( name='filetype',", "Language :: Python :: 3.5', 'Programming Language :: Python ::", "type of any file/buffer. ' 'No external dependencies.', long_description=codecs.open('README.rst', 'r',", "kind', url='https://github.com/h2non/filetype.py', download_url='https://github.com/h2non/filetype.py/tarball/master', author='<NAME>', author_email='<EMAIL>', license='MIT', license_files=['LICENSE'], classifiers=[ 'Development Status", "Approved :: MIT License', 'Operating System :: OS Independent', 'Programming", ":: System', 'Topic :: System :: Filesystems', 'Topic :: Utilities'],", "python # -*- coding: utf-8 -*- import codecs from setuptools", "# -*- coding: utf-8 -*- import codecs from setuptools import", "' 'No external dependencies.', long_description=codecs.open('README.rst', 'r', encoding='utf-8', errors='ignore').read(), keywords='file libmagic", "#!/usr/bin/env python # -*- coding: utf-8 -*- import codecs from", "'Programming Language :: Python :: 3.6', 'Programming Language :: Python", ":: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language", "System', 'Topic :: System :: Filesystems', 'Topic :: Utilities'], platforms=['any'],", "'Operating System :: OS Independent', 'Programming Language :: Python ::", ":: 3', 'Programming Language :: Python :: 3.5', 'Programming Language", "import find_packages, setup setup( name='filetype', version='1.0.7', description='Infer file type and", "version='1.0.7', description='Infer file type and MIME type of any file/buffer.", "mime ' 'type kind', url='https://github.com/h2non/filetype.py', download_url='https://github.com/h2non/filetype.py/tarball/master', author='<NAME>', author_email='<EMAIL>', license='MIT', license_files=['LICENSE'],", "'Intended Audience :: System Administrators', 'License :: OSI Approved ::", "'Programming Language :: Python :: 3', 'Programming Language :: Python", "License', 'Operating System :: OS Independent', 'Programming Language :: Python", "codecs from setuptools import find_packages, setup setup( name='filetype', version='1.0.7', description='Infer", ":: OSI Approved :: MIT License', 'Operating System :: OS", "Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Topic", "external dependencies.', long_description=codecs.open('README.rst', 'r', encoding='utf-8', errors='ignore').read(), keywords='file libmagic magic infer", "System Administrators', 'License :: OSI Approved :: MIT License', 'Operating", "Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming", ":: Web Environment', 'Intended Audience :: Developers', 'Intended Audience ::", "magic infer numbers magicnumbers discovery mime ' 'type kind', url='https://github.com/h2non/filetype.py',", ":: Console', 'Environment :: Web Environment', 'Intended Audience :: Developers',", "setup setup( name='filetype', version='1.0.7', description='Infer file type and MIME type", "of any file/buffer. ' 'No external dependencies.', long_description=codecs.open('README.rst', 'r', encoding='utf-8',", "description='Infer file type and MIME type of any file/buffer. '", "Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved", "errors='ignore').read(), keywords='file libmagic magic infer numbers magicnumbers discovery mime '", "Language :: Python :: 3.8', 'Topic :: System', 'Topic ::", "3.8', 'Topic :: System', 'Topic :: System :: Filesystems', 'Topic", "3.6', 'Programming Language :: Python :: 3.7', 'Programming Language ::", "'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License", ":: System :: Filesystems', 'Topic :: Utilities'], platforms=['any'], packages=find_packages(exclude=['dist', 'build',", "- Production/Stable', 'Environment :: Console', 'Environment :: Web Environment', 'Intended", "any file/buffer. ' 'No external dependencies.', long_description=codecs.open('README.rst', 'r', encoding='utf-8', errors='ignore').read(),", "'Topic :: System :: Filesystems', 'Topic :: Utilities'], platforms=['any'], packages=find_packages(exclude=['dist',", ":: Utilities'], platforms=['any'], packages=find_packages(exclude=['dist', 'build', 'docs', 'tests', 'examples']), package_data={'filetype': ['LICENSE',", ":: OS Independent', 'Programming Language :: Python :: 3', 'Programming", "platforms=['any'], packages=find_packages(exclude=['dist', 'build', 'docs', 'tests', 'examples']), package_data={'filetype': ['LICENSE', '*.md']}, zip_safe=True)", "5 - Production/Stable', 'Environment :: Console', 'Environment :: Web Environment',", ":: 3.7', 'Programming Language :: Python :: 3.8', 'Topic ::", "'Environment :: Web Environment', 'Intended Audience :: Developers', 'Intended Audience", "'r', encoding='utf-8', errors='ignore').read(), keywords='file libmagic magic infer numbers magicnumbers discovery", "encoding='utf-8', errors='ignore').read(), keywords='file libmagic magic infer numbers magicnumbers discovery mime", "'Programming Language :: Python :: 3.7', 'Programming Language :: Python", "System :: Filesystems', 'Topic :: Utilities'], platforms=['any'], packages=find_packages(exclude=['dist', 'build', 'docs',", "Independent', 'Programming Language :: Python :: 3', 'Programming Language ::", "Language :: Python :: 3.7', 'Programming Language :: Python ::", "OS Independent', 'Programming Language :: Python :: 3', 'Programming Language", "-*- coding: utf-8 -*- import codecs from setuptools import find_packages,", "Filesystems', 'Topic :: Utilities'], platforms=['any'], packages=find_packages(exclude=['dist', 'build', 'docs', 'tests', 'examples']),", "'License :: OSI Approved :: MIT License', 'Operating System ::", "Audience :: System Administrators', 'License :: OSI Approved :: MIT", "'Programming Language :: Python :: 3.5', 'Programming Language :: Python", "discovery mime ' 'type kind', url='https://github.com/h2non/filetype.py', download_url='https://github.com/h2non/filetype.py/tarball/master', author='<NAME>', author_email='<EMAIL>', license='MIT',", "Console', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'Intended", "type and MIME type of any file/buffer. ' 'No external", "'Topic :: System', 'Topic :: System :: Filesystems', 'Topic ::", "setup( name='filetype', version='1.0.7', description='Infer file type and MIME type of", "libmagic magic infer numbers magicnumbers discovery mime ' 'type kind',", "'Topic :: Utilities'], platforms=['any'], packages=find_packages(exclude=['dist', 'build', 'docs', 'tests', 'examples']), package_data={'filetype':", ":: MIT License', 'Operating System :: OS Independent', 'Programming Language", "file type and MIME type of any file/buffer. ' 'No", "'Environment :: Console', 'Environment :: Web Environment', 'Intended Audience ::", "Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming", "import codecs from setuptools import find_packages, setup setup( name='filetype', version='1.0.7',", ":: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Web", "'Programming Language :: Python :: 3.8', 'Topic :: System', 'Topic", ":: Python :: 3', 'Programming Language :: Python :: 3.5',", "name='filetype', version='1.0.7', description='Infer file type and MIME type of any", "keywords='file libmagic magic infer numbers magicnumbers discovery mime ' 'type", "Production/Stable', 'Environment :: Console', 'Environment :: Web Environment', 'Intended Audience", "coding: utf-8 -*- import codecs from setuptools import find_packages, setup" ]
[ "templates from netmiko import ConnectHandler import json user = 'ntc'", "dicts print (type(sh_ip_int_br[0])) for each_dict in sh_ip_int_br: print \"\\n\" for", "if type(value) is list: print key + \" is \"", "value: print list_entry if type(value) is str: print key +", "'config_register': '0x2102', 'hardware': ['CSR1000V'], 'version': '16.6.2', 'serial': ['9KIBQAQ3OPE'], 'rommon': 'IOS-XE'}]", "https://github.com/networktocode/ntc-templates # export NET_TEXTFSM=/home/ntc/ntc-templates/templates/ # see https://github.com/networktocode/ntc-templates/tree/master/templates # for list", "in each_dict.items(): if type(value) is list: print key + \"", "'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet4', 'ipaddr': '172.16.17.32', 'proto': 'up'},", "'packages.conf', 'hostname': 'csr1', 'uptime': '6 hours, 59 minutes', 'config_register': '0x2102',", "is str: print key + \" is \" + value", "print (json.dumps(sh_ver_ios, indent=4)) print sh_ver_ios # list print type(sh_ver_ios) #", "print key + \" is \" for list_entry in value:", "in each_dict.keys(): print key for each_dict in sh_ip_int_br: print \"\\n\"", "sh_ip_int_br: print \"\\n\" for key, value in each_dict.items(): print key", "[{'running_image': 'packages.conf', 'hostname': 'csr1', 'uptime': '6 hours, 59 minutes', 'config_register':", "['CSR1000V'], 'version': '16.6.2', 'serial': ['9KIBQAQ3OPE'], 'rommon': 'IOS-XE'}] # print the", "each item is a dict print type(sh_ver_ios[0]) # list of", "print list_entry if type(value) is str: print key + \"", "'ipaddr': 'unassigned', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet3', 'ipaddr': 'unassigned',", "json user = 'ntc' pwd = '<PASSWORD>' d_type = 'cisco_ios'", "# for list of templates from netmiko import ConnectHandler import", "json nicely print (json.dumps(sh_ver_ios, indent=4)) print sh_ver_ios # list print", "'unassigned', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet3', 'ipaddr': 'unassigned', 'proto':", "= 'cisco_ios' csr1 = ConnectHandler(ip='csr1', username=user, password=pwd, device_type=d_type) sh_ip_int_br =", "is a dict print type(sh_ver_ios[0]) # list of dicts with", "type(value) is list: print key + \" is \" for", "print sh_ver_ios # list print type(sh_ver_ios) # each item is", "print \"\\n\" for key, value in each_dict.items(): print key +", "= 'ntc' pwd = '<PASSWORD>' d_type = 'cisco_ios' csr1 =", "'ipaddr': '172.16.17.32', 'proto': 'up'}, {'status': 'up', 'intf': 'Loopback100', 'ipaddr': '10.200.1.20',", "# see https://github.com/networktocode/ntc-templates/tree/master/templates # for list of templates from netmiko", "import json user = 'ntc' pwd = '<PASSWORD>' d_type =", "'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet2', 'ipaddr': 'unassigned', 'proto': 'up'},", "{'status': 'up', 'intf': 'Loopback100', 'ipaddr': '10.200.1.20', 'proto': 'up'}] # is", "list of templates from netmiko import ConnectHandler import json user", "sh_ver_ios = csr1.send_command(\"show version\", use_textfsm=True) # [{'running_image': 'packages.conf', 'hostname': 'csr1',", "= csr1.send_command(\"show ip int brief\", use_textfsm=True) # [{'status': 'up', 'intf':", "'up'}, {'status': 'up', 'intf': 'GigabitEthernet4', 'ipaddr': '172.16.17.32', 'proto': 'up'}, {'status':", "type(sh_ver_ios) # each item is a dict print type(sh_ver_ios[0]) #", "key, value in each_dict.items(): print key + \" is \"", "for list_entry in value: print list_entry if type(value) is str:", "'up', 'intf': 'GigabitEthernet2', 'ipaddr': 'unassigned', 'proto': 'up'}, {'status': 'up', 'intf':", "\" + value sh_ver_ios = csr1.send_command(\"show version\", use_textfsm=True) # [{'running_image':", "'rommon': 'IOS-XE'}] # print the json nicely print (json.dumps(sh_ver_ios, indent=4))", "for each_dict in sh_ip_int_br: print \"\\n\" for key in each_dict.keys():", "of templates from netmiko import ConnectHandler import json user =", "'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet3', 'ipaddr': 'unassigned', 'proto': 'up'},", "minutes', 'config_register': '0x2102', 'hardware': ['CSR1000V'], 'version': '16.6.2', 'serial': ['9KIBQAQ3OPE'], 'rommon':", "sure templates are present and netmiko knows about them #", "# [{'status': 'up', 'intf': 'GigabitEthernet1', 'ipaddr': '10.0.0.51', 'proto': 'up'}, {'status':", "'6 hours, 59 minutes', 'config_register': '0x2102', 'hardware': ['CSR1000V'], 'version': '16.6.2',", "# print the json nicely print (json.dumps(sh_ver_ios, indent=4)) print sh_ver_ios", "for key, value in each_dict.items(): if type(value) is list: print", "each_dict in sh_ip_int_br: print \"\\n\" for key, value in each_dict.items():", "# list of dicts print (type(sh_ip_int_br[0])) for each_dict in sh_ip_int_br:", "'up', 'intf': 'Loopback100', 'ipaddr': '10.200.1.20', 'proto': 'up'}] # is type", "(type(sh_ip_int_br[0])) for each_dict in sh_ip_int_br: print \"\\n\" for key in", "'up'}, {'status': 'up', 'intf': 'GigabitEthernet3', 'ipaddr': 'unassigned', 'proto': 'up'}, {'status':", "lists with the dicts for each_dict in sh_ver_ios: print \"\\n\"", "'up'}, {'status': 'up', 'intf': 'Loopback100', 'ipaddr': '10.200.1.20', 'proto': 'up'}] #", "\" for list_entry in value: print list_entry if type(value) is", "'up', 'intf': 'GigabitEthernet4', 'ipaddr': '172.16.17.32', 'proto': 'up'}, {'status': 'up', 'intf':", "value in each_dict.items(): print key + \" is \" +", "'intf': 'Loopback100', 'ipaddr': '10.200.1.20', 'proto': 'up'}] # is type list", "'Loopback100', 'ipaddr': '10.200.1.20', 'proto': 'up'}] # is type list print", "59 minutes', 'config_register': '0x2102', 'hardware': ['CSR1000V'], 'version': '16.6.2', 'serial': ['9KIBQAQ3OPE'],", "about them # git clone https://github.com/networktocode/ntc-templates # export NET_TEXTFSM=/home/ntc/ntc-templates/templates/ #", "netmiko knows about them # git clone https://github.com/networktocode/ntc-templates # export", "present and netmiko knows about them # git clone https://github.com/networktocode/ntc-templates", "'10.200.1.20', 'proto': 'up'}] # is type list print (type(sh_ip_int_br)) #", "print type(sh_ver_ios) # each item is a dict print type(sh_ver_ios[0])", "them # git clone https://github.com/networktocode/ntc-templates # export NET_TEXTFSM=/home/ntc/ntc-templates/templates/ # see", "nested lists with the dicts for each_dict in sh_ver_ios: print", "{'status': 'up', 'intf': 'GigabitEthernet2', 'ipaddr': 'unassigned', 'proto': 'up'}, {'status': 'up',", "netmiko import ConnectHandler import json user = 'ntc' pwd =", "dicts for each_dict in sh_ver_ios: print \"\\n\" for key, value", "int brief\", use_textfsm=True) # [{'status': 'up', 'intf': 'GigabitEthernet1', 'ipaddr': '10.0.0.51',", "= csr1.send_command(\"show version\", use_textfsm=True) # [{'running_image': 'packages.conf', 'hostname': 'csr1', 'uptime':", "hours, 59 minutes', 'config_register': '0x2102', 'hardware': ['CSR1000V'], 'version': '16.6.2', 'serial':", "key for each_dict in sh_ip_int_br: print \"\\n\" for key, value", "device_type=d_type) sh_ip_int_br = csr1.send_command(\"show ip int brief\", use_textfsm=True) # [{'status':", "for list of templates from netmiko import ConnectHandler import json", "'<PASSWORD>' d_type = 'cisco_ios' csr1 = ConnectHandler(ip='csr1', username=user, password=pwd, device_type=d_type)", "make sure templates are present and netmiko knows about them", "is list: print key + \" is \" for list_entry", "sh_ip_int_br: print \"\\n\" for key in each_dict.keys(): print key for", "'GigabitEthernet3', 'ipaddr': 'unassigned', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet4', 'ipaddr':", "is \" for list_entry in value: print list_entry if type(value)", "print (type(sh_ip_int_br[0])) for each_dict in sh_ip_int_br: print \"\\n\" for key", "\" is \" + value sh_ver_ios = csr1.send_command(\"show version\", use_textfsm=True)", "ConnectHandler(ip='csr1', username=user, password=pwd, device_type=d_type) sh_ip_int_br = csr1.send_command(\"show ip int brief\",", "print type(sh_ver_ios[0]) # list of dicts with some nested lists", "type(sh_ver_ios[0]) # list of dicts with some nested lists with", "[{'status': 'up', 'intf': 'GigabitEthernet1', 'ipaddr': '10.0.0.51', 'proto': 'up'}, {'status': 'up',", "'172.16.17.32', 'proto': 'up'}, {'status': 'up', 'intf': 'Loopback100', 'ipaddr': '10.200.1.20', 'proto':", "'hostname': 'csr1', 'uptime': '6 hours, 59 minutes', 'config_register': '0x2102', 'hardware':", "each_dict.keys(): print key for each_dict in sh_ip_int_br: print \"\\n\" for", "git clone https://github.com/networktocode/ntc-templates # export NET_TEXTFSM=/home/ntc/ntc-templates/templates/ # see https://github.com/networktocode/ntc-templates/tree/master/templates #", "value sh_ver_ios = csr1.send_command(\"show version\", use_textfsm=True) # [{'running_image': 'packages.conf', 'hostname':", "with the dicts for each_dict in sh_ver_ios: print \"\\n\" for", "https://github.com/networktocode/ntc-templates/tree/master/templates # for list of templates from netmiko import ConnectHandler", "with some nested lists with the dicts for each_dict in", "in sh_ver_ios: print \"\\n\" for key, value in each_dict.items(): if", "list print (type(sh_ip_int_br)) # list of dicts print (type(sh_ip_int_br[0])) for", "use_textfsm=True) # [{'status': 'up', 'intf': 'GigabitEthernet1', 'ipaddr': '10.0.0.51', 'proto': 'up'},", "each_dict in sh_ip_int_br: print \"\\n\" for key in each_dict.keys(): print", "indent=4)) print sh_ver_ios # list print type(sh_ver_ios) # each item", "'up', 'intf': 'GigabitEthernet1', 'ipaddr': '10.0.0.51', 'proto': 'up'}, {'status': 'up', 'intf':", "in sh_ip_int_br: print \"\\n\" for key in each_dict.keys(): print key", "if type(value) is str: print key + \" is \"", "'GigabitEthernet4', 'ipaddr': '172.16.17.32', 'proto': 'up'}, {'status': 'up', 'intf': 'Loopback100', 'ipaddr':", "user = 'ntc' pwd = '<PASSWORD>' d_type = 'cisco_ios' csr1", "\"\\n\" for key, value in each_dict.items(): print key + \"", "'hardware': ['CSR1000V'], 'version': '16.6.2', 'serial': ['9KIBQAQ3OPE'], 'rommon': 'IOS-XE'}] # print", "in sh_ip_int_br: print \"\\n\" for key, value in each_dict.items(): print", "= '<PASSWORD>' d_type = 'cisco_ios' csr1 = ConnectHandler(ip='csr1', username=user, password=pwd,", "list of dicts print (type(sh_ip_int_br[0])) for each_dict in sh_ip_int_br: print", "ip int brief\", use_textfsm=True) # [{'status': 'up', 'intf': 'GigabitEthernet1', 'ipaddr':", "import ConnectHandler import json user = 'ntc' pwd = '<PASSWORD>'", "key + \" is \" + value sh_ver_ios = csr1.send_command(\"show", "# [{'running_image': 'packages.conf', 'hostname': 'csr1', 'uptime': '6 hours, 59 minutes',", "dicts with some nested lists with the dicts for each_dict", "key in each_dict.keys(): print key for each_dict in sh_ip_int_br: print", "print (type(sh_ip_int_br)) # list of dicts print (type(sh_ip_int_br[0])) for each_dict", "each_dict.items(): if type(value) is list: print key + \" is", "print \"\\n\" for key in each_dict.keys(): print key for each_dict", "csr1 = ConnectHandler(ip='csr1', username=user, password=pwd, device_type=d_type) sh_ip_int_br = csr1.send_command(\"show ip", "'version': '16.6.2', 'serial': ['9KIBQAQ3OPE'], 'rommon': 'IOS-XE'}] # print the json", "'intf': 'GigabitEthernet3', 'ipaddr': 'unassigned', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet4',", "nicely print (json.dumps(sh_ver_ios, indent=4)) print sh_ver_ios # list print type(sh_ver_ios)", "'ipaddr': 'unassigned', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet4', 'ipaddr': '172.16.17.32',", "\"\\n\" for key, value in each_dict.items(): if type(value) is list:", "'up'}] # is type list print (type(sh_ip_int_br)) # list of", "'serial': ['9KIBQAQ3OPE'], 'rommon': 'IOS-XE'}] # print the json nicely print", "type list print (type(sh_ip_int_br)) # list of dicts print (type(sh_ip_int_br[0]))", "'cisco_ios' csr1 = ConnectHandler(ip='csr1', username=user, password=pwd, device_type=d_type) sh_ip_int_br = csr1.send_command(\"show", "for key, value in each_dict.items(): print key + \" is", "'0x2102', 'hardware': ['CSR1000V'], 'version': '16.6.2', 'serial': ['9KIBQAQ3OPE'], 'rommon': 'IOS-XE'}] #", "templates are present and netmiko knows about them # git", "(json.dumps(sh_ver_ios, indent=4)) print sh_ver_ios # list print type(sh_ver_ios) # each", "NET_TEXTFSM=/home/ntc/ntc-templates/templates/ # see https://github.com/networktocode/ntc-templates/tree/master/templates # for list of templates from", "a dict print type(sh_ver_ios[0]) # list of dicts with some", "sh_ver_ios: print \"\\n\" for key, value in each_dict.items(): if type(value)", "the dicts for each_dict in sh_ver_ios: print \"\\n\" for key,", "csr1.send_command(\"show ip int brief\", use_textfsm=True) # [{'status': 'up', 'intf': 'GigabitEthernet1',", "clone https://github.com/networktocode/ntc-templates # export NET_TEXTFSM=/home/ntc/ntc-templates/templates/ # see https://github.com/networktocode/ntc-templates/tree/master/templates # for", "is \" + value sh_ver_ios = csr1.send_command(\"show version\", use_textfsm=True) #", "type(value) is str: print key + \" is \" +", "are present and netmiko knows about them # git clone", "see https://github.com/networktocode/ntc-templates/tree/master/templates # for list of templates from netmiko import", "+ \" is \" + value sh_ver_ios = csr1.send_command(\"show version\",", "= ConnectHandler(ip='csr1', username=user, password=pwd, device_type=d_type) sh_ip_int_br = csr1.send_command(\"show ip int", "'intf': 'GigabitEthernet2', 'ipaddr': 'unassigned', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet3',", "some nested lists with the dicts for each_dict in sh_ver_ios:", "version\", use_textfsm=True) # [{'running_image': 'packages.conf', 'hostname': 'csr1', 'uptime': '6 hours,", "'up'}, {'status': 'up', 'intf': 'GigabitEthernet2', 'ipaddr': 'unassigned', 'proto': 'up'}, {'status':", "'IOS-XE'}] # print the json nicely print (json.dumps(sh_ver_ios, indent=4)) print", "'ipaddr': '10.0.0.51', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet2', 'ipaddr': 'unassigned',", "['9KIBQAQ3OPE'], 'rommon': 'IOS-XE'}] # print the json nicely print (json.dumps(sh_ver_ios,", "# each item is a dict print type(sh_ver_ios[0]) # list", "dict print type(sh_ver_ios[0]) # list of dicts with some nested", "# is type list print (type(sh_ip_int_br)) # list of dicts", "{'status': 'up', 'intf': 'GigabitEthernet4', 'ipaddr': '172.16.17.32', 'proto': 'up'}, {'status': 'up',", "# make sure templates are present and netmiko knows about", "'proto': 'up'}, {'status': 'up', 'intf': 'Loopback100', 'ipaddr': '10.200.1.20', 'proto': 'up'}]", "+ \" is \" for list_entry in value: print list_entry", "of dicts with some nested lists with the dicts for", "'GigabitEthernet1', 'ipaddr': '10.0.0.51', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet2', 'ipaddr':", "sh_ver_ios # list print type(sh_ver_ios) # each item is a", "d_type = 'cisco_ios' csr1 = ConnectHandler(ip='csr1', username=user, password=pwd, device_type=d_type) sh_ip_int_br", "from netmiko import ConnectHandler import json user = 'ntc' pwd", "# git clone https://github.com/networktocode/ntc-templates # export NET_TEXTFSM=/home/ntc/ntc-templates/templates/ # see https://github.com/networktocode/ntc-templates/tree/master/templates", "list: print key + \" is \" for list_entry in", "list of dicts with some nested lists with the dicts", "print key + \" is \" + value sh_ver_ios =", "the json nicely print (json.dumps(sh_ver_ios, indent=4)) print sh_ver_ios # list", "'intf': 'GigabitEthernet1', 'ipaddr': '10.0.0.51', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet2',", "username=user, password=pwd, device_type=d_type) sh_ip_int_br = csr1.send_command(\"show ip int brief\", use_textfsm=True)", "sh_ip_int_br = csr1.send_command(\"show ip int brief\", use_textfsm=True) # [{'status': 'up',", "print key for each_dict in sh_ip_int_br: print \"\\n\" for key,", "(type(sh_ip_int_br)) # list of dicts print (type(sh_ip_int_br[0])) for each_dict in", "'16.6.2', 'serial': ['9KIBQAQ3OPE'], 'rommon': 'IOS-XE'}] # print the json nicely", "'proto': 'up'}] # is type list print (type(sh_ip_int_br)) # list", "use_textfsm=True) # [{'running_image': 'packages.conf', 'hostname': 'csr1', 'uptime': '6 hours, 59", "'10.0.0.51', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet2', 'ipaddr': 'unassigned', 'proto':", "of dicts print (type(sh_ip_int_br[0])) for each_dict in sh_ip_int_br: print \"\\n\"", "in each_dict.items(): print key + \" is \" + value", "# export NET_TEXTFSM=/home/ntc/ntc-templates/templates/ # see https://github.com/networktocode/ntc-templates/tree/master/templates # for list of", "# list print type(sh_ver_ios) # each item is a dict", "each_dict.items(): print key + \" is \" + value sh_ver_ios", "item is a dict print type(sh_ver_ios[0]) # list of dicts", "key, value in each_dict.items(): if type(value) is list: print key", "brief\", use_textfsm=True) # [{'status': 'up', 'intf': 'GigabitEthernet1', 'ipaddr': '10.0.0.51', 'proto':", "\" is \" for list_entry in value: print list_entry if", "knows about them # git clone https://github.com/networktocode/ntc-templates # export NET_TEXTFSM=/home/ntc/ntc-templates/templates/", "key + \" is \" for list_entry in value: print", "ConnectHandler import json user = 'ntc' pwd = '<PASSWORD>' d_type", "'uptime': '6 hours, 59 minutes', 'config_register': '0x2102', 'hardware': ['CSR1000V'], 'version':", "'unassigned', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet4', 'ipaddr': '172.16.17.32', 'proto':", "'csr1', 'uptime': '6 hours, 59 minutes', 'config_register': '0x2102', 'hardware': ['CSR1000V'],", "list print type(sh_ver_ios) # each item is a dict print", "{'status': 'up', 'intf': 'GigabitEthernet3', 'ipaddr': 'unassigned', 'proto': 'up'}, {'status': 'up',", "value in each_dict.items(): if type(value) is list: print key +", "+ value sh_ver_ios = csr1.send_command(\"show version\", use_textfsm=True) # [{'running_image': 'packages.conf',", "in value: print list_entry if type(value) is str: print key", "list_entry if type(value) is str: print key + \" is", "for each_dict in sh_ip_int_br: print \"\\n\" for key, value in", "print \"\\n\" for key, value in each_dict.items(): if type(value) is", "print the json nicely print (json.dumps(sh_ver_ios, indent=4)) print sh_ver_ios #", "# list of dicts with some nested lists with the", "list_entry in value: print list_entry if type(value) is str: print", "for each_dict in sh_ver_ios: print \"\\n\" for key, value in", "\"\\n\" for key in each_dict.keys(): print key for each_dict in", "'intf': 'GigabitEthernet4', 'ipaddr': '172.16.17.32', 'proto': 'up'}, {'status': 'up', 'intf': 'Loopback100',", "each_dict in sh_ver_ios: print \"\\n\" for key, value in each_dict.items():", "pwd = '<PASSWORD>' d_type = 'cisco_ios' csr1 = ConnectHandler(ip='csr1', username=user,", "and netmiko knows about them # git clone https://github.com/networktocode/ntc-templates #", "export NET_TEXTFSM=/home/ntc/ntc-templates/templates/ # see https://github.com/networktocode/ntc-templates/tree/master/templates # for list of templates", "'up', 'intf': 'GigabitEthernet3', 'ipaddr': 'unassigned', 'proto': 'up'}, {'status': 'up', 'intf':", "'GigabitEthernet2', 'ipaddr': 'unassigned', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet3', 'ipaddr':", "is type list print (type(sh_ip_int_br)) # list of dicts print", "for key in each_dict.keys(): print key for each_dict in sh_ip_int_br:", "'ipaddr': '10.200.1.20', 'proto': 'up'}] # is type list print (type(sh_ip_int_br))", "password=pwd, device_type=d_type) sh_ip_int_br = csr1.send_command(\"show ip int brief\", use_textfsm=True) #", "'ntc' pwd = '<PASSWORD>' d_type = 'cisco_ios' csr1 = ConnectHandler(ip='csr1',", "csr1.send_command(\"show version\", use_textfsm=True) # [{'running_image': 'packages.conf', 'hostname': 'csr1', 'uptime': '6" ]
[ "JWT passed to your App Engine app by Identity-Aware Proxy.", "a public key from the list published by Identity-Aware Proxy,", "found'.format(key_id)) return key # Used to cache the Identity-Aware Proxy", "Cloud project. Returns: (user_id, user_email, error_str). \"\"\" expected_audience = '/projects/{}/apps/{}'.format(", "cloud_project_number: The project *number* for your Google Cloud project. This", "2.0 (the \"License\"); # you may not use this file", "2016 Google Inc. All Rights Reserved. # # Licensed under", "App Engine app by Identity-Aware Proxy. Args: iap_jwt: The contents", "if resp.status_code != 200: raise Exception( 'Unable to fetch IAP", "the application. See https://cloud.google.com/iap/docs/signed-headers-howto for details on how to get", "Google Container Engine) to provide an extra layer of assurance", "to access the application. See https://cloud.google.com/iap/docs/signed-headers-howto for details on how", "keys: {} / {} / {}'.format( resp.status_code, resp.headers, resp.text)) key_cache", "JWT for your (Compute|Container) Engine service. Args: iap_jwt: The contents", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "Identity-Aware Proxy. Args: iap_jwt: The contents of the X-Goog-IAP-JWT-Assertion header.", "(decoded_jwt['sub'], decoded_jwt['email'], '') except (jwt.exceptions.InvalidTokenError, requests.exceptions.RequestException) as e: return (None,", "cloud_project_id) return _validate_iap_jwt(iap_jwt, expected_audience) def validate_iap_jwt_from_compute_engine(iap_jwt, cloud_project_number, backend_service_id): \"\"\"Validate an", "[START iap_validate_jwt] import jwt import requests def validate_iap_jwt_from_app_engine(iap_jwt, cloud_project_number, cloud_project_id):", "Google Compute Engine, or Google Container Engine) to provide an", "_validate_iap_jwt(iap_jwt, expected_audience): try: key_id = jwt.get_unverified_header(iap_jwt).get('kid') if not key_id: return", "import requests def validate_iap_jwt_from_app_engine(iap_jwt, cloud_project_number, cloud_project_id): \"\"\"Validate a JWT passed", "key = get_iap_key(key_id) decoded_jwt = jwt.decode( iap_jwt, key, algorithms=['ES256'], audience=expected_audience)", "expected_audience) def _validate_iap_jwt(iap_jwt, expected_audience): try: key_id = jwt.get_unverified_header(iap_jwt).get('kid') if not", "(IAP) JWT. This code should be used by applications in", "if not key_id: return (None, None, '**ERROR: no key ID**')", "use this file except in compliance with the License. #", "key, algorithms=['ES256'], audience=expected_audience) return (decoded_jwt['sub'], decoded_jwt['email'], '') except (jwt.exceptions.InvalidTokenError, requests.exceptions.RequestException)", "should be used by applications in Google Compute Engine-based environments", "Console. backend_service_id: The ID of the backend service used to", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "limitations under the License. \"\"\"Sample showing how to validate the", "License. # You may obtain a copy of the License", "'**ERROR: no key ID**') key = get_iap_key(key_id) decoded_jwt = jwt.decode(", "API instead. \"\"\" # [START iap_validate_jwt] import jwt import requests", "or Google Container Engine) to provide an extra layer of", "try: key_id = jwt.get_unverified_header(iap_jwt).get('kid') if not key_id: return (None, None,", "passed to your App Engine app by Identity-Aware Proxy. Args:", "service. Args: iap_jwt: The contents of the X-Goog-IAP-JWT-Assertion header. cloud_project_number:", "under the License is distributed on an \"AS IS\" BASIS,", "or in the Project Info card in Cloud Console. cloud_project_id:", "License for the specific language governing permissions and # limitations", "expected_audience = '/projects/{}/global/backendServices/{}'.format( cloud_project_number, backend_service_id) return _validate_iap_jwt(iap_jwt, expected_audience) def _validate_iap_jwt(iap_jwt,", "Engine's Users API instead. \"\"\" # [START iap_validate_jwt] import jwt", "Reserved. # # Licensed under the Apache License, Version 2.0", "(None, None, '**ERROR: JWT validation error {}**'.format(e)) def get_iap_key(key_id): \"\"\"Retrieves", "!= 200: raise Exception( 'Unable to fetch IAP keys: {}", "contents of the X-Goog-IAP-JWT-Assertion header. cloud_project_number: The project *number* for", "cloud_project_number, backend_service_id): \"\"\"Validate an IAP JWT for your (Compute|Container) Engine", "'Unable to fetch IAP keys: {} / {} / {}'.format(", "iap_jwt, key, algorithms=['ES256'], audience=expected_audience) return (decoded_jwt['sub'], decoded_jwt['email'], '') except (jwt.exceptions.InvalidTokenError,", "def _validate_iap_jwt(iap_jwt, expected_audience): try: key_id = jwt.get_unverified_header(iap_jwt).get('kid') if not key_id:", "service used to access the application. See https://cloud.google.com/iap/docs/signed-headers-howto for details", "expected_audience = '/projects/{}/apps/{}'.format( cloud_project_number, cloud_project_id) return _validate_iap_jwt(iap_jwt, expected_audience) def validate_iap_jwt_from_compute_engine(iap_jwt,", "{} / {}'.format( resp.status_code, resp.headers, resp.text)) key_cache = resp.json() get_iap_key.key_cache", "Copyright 2016 Google Inc. All Rights Reserved. # # Licensed", "access the application. See https://cloud.google.com/iap/docs/signed-headers-howto for details on how to", "in compliance with the License. # You may obtain a", "the list published by Identity-Aware Proxy, re-fetching the key file", "software # distributed under the License is distributed on an", "an IAP JWT for your (Compute|Container) Engine service. Args: iap_jwt:", "key ID**') key = get_iap_key(key_id) decoded_jwt = jwt.decode( iap_jwt, key,", "requests.get( 'https://www.gstatic.com/iap/verify/public_key') if resp.status_code != 200: raise Exception( 'Unable to", "= resp.json() get_iap_key.key_cache = key_cache key = key_cache.get(key_id) if not", "jwt.decode( iap_jwt, key, algorithms=['ES256'], audience=expected_audience) return (decoded_jwt['sub'], decoded_jwt['email'], '') except", "decoded_jwt = jwt.decode( iap_jwt, key, algorithms=['ES256'], audience=expected_audience) return (decoded_jwt['sub'], decoded_jwt['email'],", "return (decoded_jwt['sub'], decoded_jwt['email'], '') except (jwt.exceptions.InvalidTokenError, requests.exceptions.RequestException) as e: return", "return _validate_iap_jwt(iap_jwt, expected_audience) def _validate_iap_jwt(iap_jwt, expected_audience): try: key_id = jwt.get_unverified_header(iap_jwt).get('kid')", "Compute Engine, or Google Container Engine) to provide an extra", "except (jwt.exceptions.InvalidTokenError, requests.exceptions.RequestException) as e: return (None, None, '**ERROR: JWT", "get this value. Returns: (user_id, user_email, error_str). \"\"\" expected_audience =", "that a request was authorized by IAP. For applications running", "your (Compute|Container) Engine service. Args: iap_jwt: The contents of the", "environment, use App Engine's Users API instead. \"\"\" # [START", "JWT is signed with a key not present in #", "Cloud Console. cloud_project_id: The project *ID* for your Google Cloud", "in Cloud Console. cloud_project_id: The project *ID* for your Google", "Identity-Aware Proxy public keys. This code only # refetches the", "jwt import requests def validate_iap_jwt_from_app_engine(iap_jwt, cloud_project_number, cloud_project_id): \"\"\"Validate a JWT", "validate the Identity-Aware Proxy (IAP) JWT. This code should be", "def validate_iap_jwt_from_compute_engine(iap_jwt, cloud_project_number, backend_service_id): \"\"\"Validate an IAP JWT for your", "e: return (None, None, '**ERROR: JWT validation error {}**'.format(e)) def", "\"\"\" expected_audience = '/projects/{}/apps/{}'.format( cloud_project_number, cloud_project_id) return _validate_iap_jwt(iap_jwt, expected_audience) def", "the key file. resp = requests.get( 'https://www.gstatic.com/iap/verify/public_key') if resp.status_code !=", "no key ID**') key = get_iap_key(key_id) decoded_jwt = jwt.decode( iap_jwt,", "OF ANY KIND, either express or implied. # See the", "the Project Info card in Cloud Console. backend_service_id: The ID", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "to in writing, software # distributed under the License is", "Users API instead. \"\"\" # [START iap_validate_jwt] import jwt import", "# See the License for the specific language governing permissions", "key_cache = get_iap_key.key_cache key = key_cache.get(key_id) if not key: #", "Proxy (IAP) JWT. This code should be used by applications", "IAP. For applications running in the App Engine standard environment,", "or agreed to in writing, software # distributed under the", "be used by applications in Google Compute Engine-based environments (such", "required by applicable law or agreed to in writing, software", "for your Google Cloud project. This is returned by 'gcloud", "Cloud project. This is returned by 'gcloud projects describe $PROJECT_ID',", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "Identity-Aware Proxy, re-fetching the key file if necessary. \"\"\" key_cache", "the file when a JWT is signed with a key", "with the License. # You may obtain a copy of", "application. See https://cloud.google.com/iap/docs/signed-headers-howto for details on how to get this", "used by applications in Google Compute Engine-based environments (such as", "# limitations under the License. \"\"\"Sample showing how to validate", "compliance with the License. # You may obtain a copy", "All Rights Reserved. # # Licensed under the Apache License,", "agreed to in writing, software # distributed under the License", "running in the App Engine standard environment, use App Engine's", "a request was authorized by IAP. For applications running in", "distributed under the License is distributed on an \"AS IS\"", "Inc. All Rights Reserved. # # Licensed under the Apache", "layer of assurance that a request was authorized by IAP.", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "Google Cloud project. Returns: (user_id, user_email, error_str). \"\"\" expected_audience =", "to provide an extra layer of assurance that a request", "_validate_iap_jwt(iap_jwt, expected_audience) def _validate_iap_jwt(iap_jwt, expected_audience): try: key_id = jwt.get_unverified_header(iap_jwt).get('kid') if", "the X-Goog-IAP-JWT-Assertion header. cloud_project_number: The project *number* for your Google", "refetches the file when a JWT is signed with a", "The contents of the X-Goog-IAP-JWT-Assertion header. cloud_project_number: The project *number*", "writing, software # distributed under the License is distributed on", "Engine-based environments (such as Google App Engine flexible environment, Google", "you may not use this file except in compliance with", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "in Google Compute Engine-based environments (such as Google App Engine", "None, '**ERROR: no key ID**') key = get_iap_key(key_id) decoded_jwt =", "# [START iap_validate_jwt] import jwt import requests def validate_iap_jwt_from_app_engine(iap_jwt, cloud_project_number,", "jwt.get_unverified_header(iap_jwt).get('kid') if not key_id: return (None, None, '**ERROR: no key", "in the App Engine standard environment, use App Engine's Users", "CONDITIONS OF ANY KIND, either express or implied. # See", "assurance that a request was authorized by IAP. For applications", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "= jwt.get_unverified_header(iap_jwt).get('kid') if not key_id: return (None, None, '**ERROR: no", "in Cloud Console. backend_service_id: The ID of the backend service", "JWT. This code should be used by applications in Google", "cloud_project_number, cloud_project_id) return _validate_iap_jwt(iap_jwt, expected_audience) def validate_iap_jwt_from_compute_engine(iap_jwt, cloud_project_number, backend_service_id): \"\"\"Validate", "This code only # refetches the file when a JWT", "details on how to get this value. Returns: (user_id, user_email,", "backend_service_id: The ID of the backend service used to access", "def get_iap_key(key_id): \"\"\"Retrieves a public key from the list published", "X-Goog-IAP-JWT-Assertion header. cloud_project_number: The project *number* for your Google Cloud", "by Identity-Aware Proxy, re-fetching the key file if necessary. \"\"\"", "card in Cloud Console. backend_service_id: The ID of the backend", "JWT validation error {}**'.format(e)) def get_iap_key(key_id): \"\"\"Retrieves a public key", "resp = requests.get( 'https://www.gstatic.com/iap/verify/public_key') if resp.status_code != 200: raise Exception(", "key file if necessary. \"\"\" key_cache = get_iap_key.key_cache key =", "re-fetching the key file if necessary. \"\"\" key_cache = get_iap_key.key_cache", "# Copyright 2016 Google Inc. All Rights Reserved. # #", "OR CONDITIONS OF ANY KIND, either express or implied. #", "return _validate_iap_jwt(iap_jwt, expected_audience) def validate_iap_jwt_from_compute_engine(iap_jwt, cloud_project_number, backend_service_id): \"\"\"Validate an IAP", "App Engine standard environment, use App Engine's Users API instead.", "validate_iap_jwt_from_app_engine(iap_jwt, cloud_project_number, cloud_project_id): \"\"\"Validate a JWT passed to your App", "the License is distributed on an \"AS IS\" BASIS, #", "= jwt.decode( iap_jwt, key, algorithms=['ES256'], audience=expected_audience) return (decoded_jwt['sub'], decoded_jwt['email'], '')", "cloud_project_id: The project *ID* for your Google Cloud project. Returns:", "under the License. \"\"\"Sample showing how to validate the Identity-Aware", "*number* for your Google Cloud project. This is returned by", "import jwt import requests def validate_iap_jwt_from_app_engine(iap_jwt, cloud_project_number, cloud_project_id): \"\"\"Validate a", "# Re-fetch the key file. resp = requests.get( 'https://www.gstatic.com/iap/verify/public_key') if", "when a JWT is signed with a key not present", "the License. \"\"\"Sample showing how to validate the Identity-Aware Proxy", "law or agreed to in writing, software # distributed under", "https://cloud.google.com/iap/docs/signed-headers-howto for details on how to get this value. Returns:", "a JWT passed to your App Engine app by Identity-Aware", "Engine, or Google Container Engine) to provide an extra layer", "project. This is returned by 'gcloud projects describe $PROJECT_ID', or", "= key_cache.get(key_id) if not key: # Re-fetch the key file.", "(user_id, user_email, error_str). \"\"\" expected_audience = '/projects/{}/global/backendServices/{}'.format( cloud_project_number, backend_service_id) return", "if not key: # Re-fetch the key file. resp =", "resp.headers, resp.text)) key_cache = resp.json() get_iap_key.key_cache = key_cache key =", "to your App Engine app by Identity-Aware Proxy. Args: iap_jwt:", "$PROJECT_ID', or in the Project Info card in Cloud Console.", "'gcloud projects describe $PROJECT_ID', or in the Project Info card", "Proxy public keys. This code only # refetches the file", "from the list published by Identity-Aware Proxy, re-fetching the key", "Compute Engine-based environments (such as Google App Engine flexible environment,", "(such as Google App Engine flexible environment, Google Compute Engine,", "\"\"\" key_cache = get_iap_key.key_cache key = key_cache.get(key_id) if not key:", "resp.text)) key_cache = resp.json() get_iap_key.key_cache = key_cache key = key_cache.get(key_id)", "<gh_stars>1-10 # Copyright 2016 Google Inc. All Rights Reserved. #", "= '/projects/{}/global/backendServices/{}'.format( cloud_project_number, backend_service_id) return _validate_iap_jwt(iap_jwt, expected_audience) def _validate_iap_jwt(iap_jwt, expected_audience):", "cache the Identity-Aware Proxy public keys. This code only #", "\"\"\"Validate an IAP JWT for your (Compute|Container) Engine service. Args:", "as e: return (None, None, '**ERROR: JWT validation error {}**'.format(e))", "provide an extra layer of assurance that a request was", "may obtain a copy of the License at # #", "if not key: raise Exception('Key {!r} not found'.format(key_id)) return key", "(user_id, user_email, error_str). \"\"\" expected_audience = '/projects/{}/apps/{}'.format( cloud_project_number, cloud_project_id) return", "or in the Project Info card in Cloud Console. backend_service_id:", "to get this value. Returns: (user_id, user_email, error_str). \"\"\" expected_audience", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "See https://cloud.google.com/iap/docs/signed-headers-howto for details on how to get this value.", "user_email, error_str). \"\"\" expected_audience = '/projects/{}/global/backendServices/{}'.format( cloud_project_number, backend_service_id) return _validate_iap_jwt(iap_jwt,", "'https://www.gstatic.com/iap/verify/public_key') if resp.status_code != 200: raise Exception( 'Unable to fetch", "may not use this file except in compliance with the", "error {}**'.format(e)) def get_iap_key(key_id): \"\"\"Retrieves a public key from the", "public keys. This code only # refetches the file when", "Console. cloud_project_id: The project *ID* for your Google Cloud project.", "projects describe $PROJECT_ID', or in the Project Info card in", "not key_id: return (None, None, '**ERROR: no key ID**') key", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "not key: # Re-fetch the key file. resp = requests.get(", "this file except in compliance with the License. # You", "key_id = jwt.get_unverified_header(iap_jwt).get('kid') if not key_id: return (None, None, '**ERROR:", "'/projects/{}/apps/{}'.format( cloud_project_number, cloud_project_id) return _validate_iap_jwt(iap_jwt, expected_audience) def validate_iap_jwt_from_compute_engine(iap_jwt, cloud_project_number, backend_service_id):", "file when a JWT is signed with a key not", "{}'.format( resp.status_code, resp.headers, resp.text)) key_cache = resp.json() get_iap_key.key_cache = key_cache", "not found'.format(key_id)) return key # Used to cache the Identity-Aware", "audience=expected_audience) return (decoded_jwt['sub'], decoded_jwt['email'], '') except (jwt.exceptions.InvalidTokenError, requests.exceptions.RequestException) as e:", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "the Identity-Aware Proxy (IAP) JWT. This code should be used", "the Project Info card in Cloud Console. cloud_project_id: The project", "# # Licensed under the Apache License, Version 2.0 (the", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "resp.status_code, resp.headers, resp.text)) key_cache = resp.json() get_iap_key.key_cache = key_cache key", "environments (such as Google App Engine flexible environment, Google Compute", "Google App Engine flexible environment, Google Compute Engine, or Google", "Exception('Key {!r} not found'.format(key_id)) return key # Used to cache", "present in # this cache. get_iap_key.key_cache = {} # [END", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "expected_audience) def validate_iap_jwt_from_compute_engine(iap_jwt, cloud_project_number, backend_service_id): \"\"\"Validate an IAP JWT for", "instead. \"\"\" # [START iap_validate_jwt] import jwt import requests def", "/ {} / {}'.format( resp.status_code, resp.headers, resp.text)) key_cache = resp.json()", "get_iap_key.key_cache key = key_cache.get(key_id) if not key: # Re-fetch the", "License. \"\"\"Sample showing how to validate the Identity-Aware Proxy (IAP)", "flexible environment, Google Compute Engine, or Google Container Engine) to", "'/projects/{}/global/backendServices/{}'.format( cloud_project_number, backend_service_id) return _validate_iap_jwt(iap_jwt, expected_audience) def _validate_iap_jwt(iap_jwt, expected_audience): try:", "Google Inc. All Rights Reserved. # # Licensed under the", "the Identity-Aware Proxy public keys. This code only # refetches", "by IAP. For applications running in the App Engine standard", "governing permissions and # limitations under the License. \"\"\"Sample showing", "code should be used by applications in Google Compute Engine-based", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "App Engine's Users API instead. \"\"\" # [START iap_validate_jwt] import", "key_cache = resp.json() get_iap_key.key_cache = key_cache key = key_cache.get(key_id) if", "(Compute|Container) Engine service. Args: iap_jwt: The contents of the X-Goog-IAP-JWT-Assertion", "or implied. # See the License for the specific language", "file. resp = requests.get( 'https://www.gstatic.com/iap/verify/public_key') if resp.status_code != 200: raise", "Rights Reserved. # # Licensed under the Apache License, Version", "key from the list published by Identity-Aware Proxy, re-fetching the", "list published by Identity-Aware Proxy, re-fetching the key file if", "Proxy, re-fetching the key file if necessary. \"\"\" key_cache =", "by Identity-Aware Proxy. Args: iap_jwt: The contents of the X-Goog-IAP-JWT-Assertion", "= requests.get( 'https://www.gstatic.com/iap/verify/public_key') if resp.status_code != 200: raise Exception( 'Unable", "extra layer of assurance that a request was authorized by", "Engine) to provide an extra layer of assurance that a", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "describe $PROJECT_ID', or in the Project Info card in Cloud", "for your Google Cloud project. Returns: (user_id, user_email, error_str). \"\"\"", "{!r} not found'.format(key_id)) return key # Used to cache the", "project *ID* for your Google Cloud project. Returns: (user_id, user_email,", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "header. cloud_project_number: The project *number* for your Google Cloud project.", "The project *ID* for your Google Cloud project. Returns: (user_id,", "resp.status_code != 200: raise Exception( 'Unable to fetch IAP keys:", "key_cache key = key_cache.get(key_id) if not key: raise Exception('Key {!r}", "signed with a key not present in # this cache.", "\"\"\"Validate a JWT passed to your App Engine app by", "(the \"License\"); # you may not use this file except", "of assurance that a request was authorized by IAP. For", "your Google Cloud project. This is returned by 'gcloud projects", "key # Used to cache the Identity-Aware Proxy public keys.", "# you may not use this file except in compliance", "Engine standard environment, use App Engine's Users API instead. \"\"\"", "(jwt.exceptions.InvalidTokenError, requests.exceptions.RequestException) as e: return (None, None, '**ERROR: JWT validation", "key = key_cache.get(key_id) if not key: raise Exception('Key {!r} not", "For applications running in the App Engine standard environment, use", "App Engine flexible environment, Google Compute Engine, or Google Container", "ID of the backend service used to access the application.", "# Used to cache the Identity-Aware Proxy public keys. This", "_validate_iap_jwt(iap_jwt, expected_audience) def validate_iap_jwt_from_compute_engine(iap_jwt, cloud_project_number, backend_service_id): \"\"\"Validate an IAP JWT", "how to validate the Identity-Aware Proxy (IAP) JWT. This code", "backend_service_id): \"\"\"Validate an IAP JWT for your (Compute|Container) Engine service.", "= get_iap_key.key_cache key = key_cache.get(key_id) if not key: # Re-fetch", "# # Unless required by applicable law or agreed to", "validate_iap_jwt_from_compute_engine(iap_jwt, cloud_project_number, backend_service_id): \"\"\"Validate an IAP JWT for your (Compute|Container)", "and # limitations under the License. \"\"\"Sample showing how to", "\"\"\" expected_audience = '/projects/{}/global/backendServices/{}'.format( cloud_project_number, backend_service_id) return _validate_iap_jwt(iap_jwt, expected_audience) def", "Project Info card in Cloud Console. cloud_project_id: The project *ID*", "request was authorized by IAP. For applications running in the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "iap_validate_jwt] import jwt import requests def validate_iap_jwt_from_app_engine(iap_jwt, cloud_project_number, cloud_project_id): \"\"\"Validate", "key not present in # this cache. get_iap_key.key_cache = {}", "get_iap_key(key_id) decoded_jwt = jwt.decode( iap_jwt, key, algorithms=['ES256'], audience=expected_audience) return (decoded_jwt['sub'],", "Version 2.0 (the \"License\"); # you may not use this", "in # this cache. get_iap_key.key_cache = {} # [END iap_validate_jwt]", "validation error {}**'.format(e)) def get_iap_key(key_id): \"\"\"Retrieves a public key from", "Re-fetch the key file. resp = requests.get( 'https://www.gstatic.com/iap/verify/public_key') if resp.status_code", "project *number* for your Google Cloud project. This is returned", "implied. # See the License for the specific language governing", "returned by 'gcloud projects describe $PROJECT_ID', or in the Project", "{} / {} / {}'.format( resp.status_code, resp.headers, resp.text)) key_cache =", "under the Apache License, Version 2.0 (the \"License\"); # you", "used to access the application. See https://cloud.google.com/iap/docs/signed-headers-howto for details on", "\"\"\"Sample showing how to validate the Identity-Aware Proxy (IAP) JWT.", "raise Exception('Key {!r} not found'.format(key_id)) return key # Used to", "Engine service. Args: iap_jwt: The contents of the X-Goog-IAP-JWT-Assertion header.", "Returns: (user_id, user_email, error_str). \"\"\" expected_audience = '/projects/{}/apps/{}'.format( cloud_project_number, cloud_project_id)", "by applicable law or agreed to in writing, software #", "get_iap_key.key_cache = key_cache key = key_cache.get(key_id) if not key: raise", "to fetch IAP keys: {} / {} / {}'.format( resp.status_code,", "'') except (jwt.exceptions.InvalidTokenError, requests.exceptions.RequestException) as e: return (None, None, '**ERROR:", "the backend service used to access the application. See https://cloud.google.com/iap/docs/signed-headers-howto", "app by Identity-Aware Proxy. Args: iap_jwt: The contents of the", "a key not present in # this cache. get_iap_key.key_cache =", "Info card in Cloud Console. backend_service_id: The ID of the", "requests def validate_iap_jwt_from_app_engine(iap_jwt, cloud_project_number, cloud_project_id): \"\"\"Validate a JWT passed to", "cloud_project_number, backend_service_id) return _validate_iap_jwt(iap_jwt, expected_audience) def _validate_iap_jwt(iap_jwt, expected_audience): try: key_id", "key: # Re-fetch the key file. resp = requests.get( 'https://www.gstatic.com/iap/verify/public_key')", "= key_cache key = key_cache.get(key_id) if not key: raise Exception('Key", "resp.json() get_iap_key.key_cache = key_cache key = key_cache.get(key_id) if not key:", "'**ERROR: JWT validation error {}**'.format(e)) def get_iap_key(key_id): \"\"\"Retrieves a public", "Google Compute Engine-based environments (such as Google App Engine flexible", "necessary. \"\"\" key_cache = get_iap_key.key_cache key = key_cache.get(key_id) if not", "key file. resp = requests.get( 'https://www.gstatic.com/iap/verify/public_key') if resp.status_code != 200:", "IAP keys: {} / {} / {}'.format( resp.status_code, resp.headers, resp.text))", "/ {}'.format( resp.status_code, resp.headers, resp.text)) key_cache = resp.json() get_iap_key.key_cache =", "this value. Returns: (user_id, user_email, error_str). \"\"\" expected_audience = '/projects/{}/global/backendServices/{}'.format(", "Args: iap_jwt: The contents of the X-Goog-IAP-JWT-Assertion header. cloud_project_number: The", "key_cache.get(key_id) if not key: raise Exception('Key {!r} not found'.format(key_id)) return", "use App Engine's Users API instead. \"\"\" # [START iap_validate_jwt]", "the App Engine standard environment, use App Engine's Users API", "standard environment, use App Engine's Users API instead. \"\"\" #", "of the X-Goog-IAP-JWT-Assertion header. cloud_project_number: The project *number* for your", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "Unless required by applicable law or agreed to in writing,", "= '/projects/{}/apps/{}'.format( cloud_project_number, cloud_project_id) return _validate_iap_jwt(iap_jwt, expected_audience) def validate_iap_jwt_from_compute_engine(iap_jwt, cloud_project_number,", "the specific language governing permissions and # limitations under the", "if necessary. \"\"\" key_cache = get_iap_key.key_cache key = key_cache.get(key_id) if", "applicable law or agreed to in writing, software # distributed", "(None, None, '**ERROR: no key ID**') key = get_iap_key(key_id) decoded_jwt", "Engine flexible environment, Google Compute Engine, or Google Container Engine)", "code only # refetches the file when a JWT is", "showing how to validate the Identity-Aware Proxy (IAP) JWT. This", "with a key not present in # this cache. get_iap_key.key_cache", "file if necessary. \"\"\" key_cache = get_iap_key.key_cache key = key_cache.get(key_id)", "\"\"\" # [START iap_validate_jwt] import jwt import requests def validate_iap_jwt_from_app_engine(iap_jwt,", "raise Exception( 'Unable to fetch IAP keys: {} / {}", "card in Cloud Console. cloud_project_id: The project *ID* for your", "algorithms=['ES256'], audience=expected_audience) return (decoded_jwt['sub'], decoded_jwt['email'], '') except (jwt.exceptions.InvalidTokenError, requests.exceptions.RequestException) as", "environment, Google Compute Engine, or Google Container Engine) to provide", "in writing, software # distributed under the License is distributed", "applications in Google Compute Engine-based environments (such as Google App", "expected_audience): try: key_id = jwt.get_unverified_header(iap_jwt).get('kid') if not key_id: return (None,", "of the backend service used to access the application. See", "fetch IAP keys: {} / {} / {}'.format( resp.status_code, resp.headers,", "cloud_project_id): \"\"\"Validate a JWT passed to your App Engine app", "Engine app by Identity-Aware Proxy. Args: iap_jwt: The contents of", "Cloud Console. backend_service_id: The ID of the backend service used", "Exception( 'Unable to fetch IAP keys: {} / {} /", "The project *number* for your Google Cloud project. This is", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License, Version 2.0 (the \"License\"); # you may not use", "by 'gcloud projects describe $PROJECT_ID', or in the Project Info", "# You may obtain a copy of the License at", "value. Returns: (user_id, user_email, error_str). \"\"\" expected_audience = '/projects/{}/global/backendServices/{}'.format( cloud_project_number,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "key = key_cache.get(key_id) if not key: # Re-fetch the key", "for your (Compute|Container) Engine service. Args: iap_jwt: The contents of", "on how to get this value. Returns: (user_id, user_email, error_str).", "to cache the Identity-Aware Proxy public keys. This code only", "Returns: (user_id, user_email, error_str). \"\"\" expected_audience = '/projects/{}/global/backendServices/{}'.format( cloud_project_number, backend_service_id)", "error_str). \"\"\" expected_audience = '/projects/{}/global/backendServices/{}'.format( cloud_project_number, backend_service_id) return _validate_iap_jwt(iap_jwt, expected_audience)", "to validate the Identity-Aware Proxy (IAP) JWT. This code should", "by applications in Google Compute Engine-based environments (such as Google", "the License for the specific language governing permissions and #", "in the Project Info card in Cloud Console. backend_service_id: The", "Apache License, Version 2.0 (the \"License\"); # you may not", "key_id: return (None, None, '**ERROR: no key ID**') key =", "return (None, None, '**ERROR: JWT validation error {}**'.format(e)) def get_iap_key(key_id):", "only # refetches the file when a JWT is signed", "either express or implied. # See the License for the", "an extra layer of assurance that a request was authorized", "Used to cache the Identity-Aware Proxy public keys. This code", "Identity-Aware Proxy (IAP) JWT. This code should be used by", "cloud_project_number, cloud_project_id): \"\"\"Validate a JWT passed to your App Engine", "backend_service_id) return _validate_iap_jwt(iap_jwt, expected_audience) def _validate_iap_jwt(iap_jwt, expected_audience): try: key_id =", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "was authorized by IAP. For applications running in the App", "your App Engine app by Identity-Aware Proxy. Args: iap_jwt: The", "is signed with a key not present in # this", "for details on how to get this value. Returns: (user_id,", "error_str). \"\"\" expected_audience = '/projects/{}/apps/{}'.format( cloud_project_number, cloud_project_id) return _validate_iap_jwt(iap_jwt, expected_audience)", "{}**'.format(e)) def get_iap_key(key_id): \"\"\"Retrieves a public key from the list", "Project Info card in Cloud Console. backend_service_id: The ID of", "keys. This code only # refetches the file when a", "not key: raise Exception('Key {!r} not found'.format(key_id)) return key #", "# refetches the file when a JWT is signed with", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "Proxy. Args: iap_jwt: The contents of the X-Goog-IAP-JWT-Assertion header. cloud_project_number:", "applications running in the App Engine standard environment, use App", "\"\"\"Retrieves a public key from the list published by Identity-Aware", "IAP JWT for your (Compute|Container) Engine service. Args: iap_jwt: The", "as Google App Engine flexible environment, Google Compute Engine, or", "how to get this value. Returns: (user_id, user_email, error_str). \"\"\"", "in the Project Info card in Cloud Console. cloud_project_id: The", "authorized by IAP. For applications running in the App Engine", "None, '**ERROR: JWT validation error {}**'.format(e)) def get_iap_key(key_id): \"\"\"Retrieves a", "not present in # this cache. get_iap_key.key_cache = {} #", "*ID* for your Google Cloud project. Returns: (user_id, user_email, error_str).", "Info card in Cloud Console. cloud_project_id: The project *ID* for", "public key from the list published by Identity-Aware Proxy, re-fetching", "is returned by 'gcloud projects describe $PROJECT_ID', or in the", "= get_iap_key(key_id) decoded_jwt = jwt.decode( iap_jwt, key, algorithms=['ES256'], audience=expected_audience) return", "\"License\"); # you may not use this file except in", "Google Cloud project. This is returned by 'gcloud projects describe", "This is returned by 'gcloud projects describe $PROJECT_ID', or in", "return key # Used to cache the Identity-Aware Proxy public", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "return (None, None, '**ERROR: no key ID**') key = get_iap_key(key_id)", "requests.exceptions.RequestException) as e: return (None, None, '**ERROR: JWT validation error", "iap_jwt: The contents of the X-Goog-IAP-JWT-Assertion header. cloud_project_number: The project", "ID**') key = get_iap_key(key_id) decoded_jwt = jwt.decode( iap_jwt, key, algorithms=['ES256'],", "# distributed under the License is distributed on an \"AS", "This code should be used by applications in Google Compute", "def validate_iap_jwt_from_app_engine(iap_jwt, cloud_project_number, cloud_project_id): \"\"\"Validate a JWT passed to your", "# Unless required by applicable law or agreed to in", "your Google Cloud project. Returns: (user_id, user_email, error_str). \"\"\" expected_audience", "decoded_jwt['email'], '') except (jwt.exceptions.InvalidTokenError, requests.exceptions.RequestException) as e: return (None, None,", "a JWT is signed with a key not present in", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "backend service used to access the application. See https://cloud.google.com/iap/docs/signed-headers-howto for", "language governing permissions and # limitations under the License. \"\"\"Sample", "key_cache.get(key_id) if not key: # Re-fetch the key file. resp", "project. Returns: (user_id, user_email, error_str). \"\"\" expected_audience = '/projects/{}/apps/{}'.format( cloud_project_number,", "Container Engine) to provide an extra layer of assurance that", "published by Identity-Aware Proxy, re-fetching the key file if necessary.", "You may obtain a copy of the License at #", "200: raise Exception( 'Unable to fetch IAP keys: {} /", "permissions and # limitations under the License. \"\"\"Sample showing how", "user_email, error_str). \"\"\" expected_audience = '/projects/{}/apps/{}'.format( cloud_project_number, cloud_project_id) return _validate_iap_jwt(iap_jwt,", "the Apache License, Version 2.0 (the \"License\"); # you may", "key: raise Exception('Key {!r} not found'.format(key_id)) return key # Used", "The ID of the backend service used to access the", "get_iap_key(key_id): \"\"\"Retrieves a public key from the list published by", "= key_cache.get(key_id) if not key: raise Exception('Key {!r} not found'.format(key_id))", "the key file if necessary. \"\"\" key_cache = get_iap_key.key_cache key" ]
[ "self.txt ) def compute(self): try: self.txt = str(eval(self.txt.replace(\"x\",\"*\"))) self.aff.set( self.txt", "self.compute() elif key in [\"Delete\",\"Backspace\"]: self.clean() #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ def press(self,val): self.txt", "# and execute it in a pywebview instance from htag.runners", "events) There is no work for rendering the layout ;-)", "browser (thru ajax calls) BrowserHTTP( Calc ).run() # PyWebWiew( Calc", "self.aff = Tag.Div(\"&nbsp;\",_style=\"border:1px solid black\") self[\"class\"]=\"mycalc\" self <= self.aff self", "htag import Tag \"\"\" This example show you how to", "Tag \"\"\" This example show you how to make a", "for i in \"0123456789+-x/.\"] self <= Tag.button(\"=\", _onclick=self.bind( self.compute )", "= self.bind( self.presskey, b\"event.key\" ) def presskey(self,key): if key in", "def init(self): self.txt=\"\" self.aff = Tag.Div(\"&nbsp;\",_style=\"border:1px solid black\") self[\"class\"]=\"mycalc\" self", "\"\" self.aff.set( \"Error\" ) def clean(self): self.txt=\"\" self.aff.set(\"&nbsp;\") if __name__==\"__main__\":", "<= self.aff self <= Tag.button(\"C\", _onclick=self.bind( self.clean) ) self <=", "Tag.Div(\"&nbsp;\",_style=\"border:1px solid black\") self[\"class\"]=\"mycalc\" self <= self.aff self <= Tag.button(\"C\",", "presskey(self,key): if key in \"0123456789+-*/.\": self.press(key) elif key==\"Enter\": self.compute() elif", "\"0123456789+-*/.\": self.press(key) elif key==\"Enter\": self.compute() elif key in [\"Delete\",\"Backspace\"]: self.clean()", "Calc(Tag.div): statics=[Tag.H.style(\"\"\" .mycalc *,button {font-size:2em;font-family: monospace} \"\"\")] def init(self): self.txt=\"\"", "another runner, in a simple browser (thru ajax calls) BrowserHTTP(", "keyboard self[\"onkeyup\"] = self.bind( self.presskey, b\"event.key\" ) def presskey(self,key): if", "\"\"\" This example show you how to make a \"Calc", "self.txt += val self.aff.set( self.txt ) def compute(self): try: self.txt", "#-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ with real keyboard self[\"onkeyup\"] = self.bind( self.presskey, b\"event.key\" )", "compute(self): try: self.txt = str(eval(self.txt.replace(\"x\",\"*\"))) self.aff.set( self.txt ) except: self.txt", "class Calc(Tag.div): statics=[Tag.H.style(\"\"\" .mycalc *,button {font-size:2em;font-family: monospace} \"\"\")] def init(self):", "physical buttons + keyboard events) There is no work for", "a simple browser (thru ajax calls) BrowserHTTP( Calc ).run() #", "self.aff.set( self.txt ) def compute(self): try: self.txt = str(eval(self.txt.replace(\"x\",\"*\"))) self.aff.set(", "logging # logging.basicConfig(format='[%(levelname)-5s] %(name)s: %(message)s',level=logging.DEBUG) # logging.getLogger(\"htag.tag\").setLevel( logging.INFO ) #", "i in \"0123456789+-x/.\"] self <= Tag.button(\"=\", _onclick=self.bind( self.compute ) )", "press(self,val): self.txt += val self.aff.set( self.txt ) def compute(self): try:", "htag.runners import * # here is another runner, in a", "+= val self.aff.set( self.txt ) def compute(self): try: self.txt =", "self.presskey, b\"event.key\" ) def presskey(self,key): if key in \"0123456789+-*/.\": self.press(key)", "if __name__==\"__main__\": # import logging # logging.basicConfig(format='[%(levelname)-5s] %(name)s: %(message)s',level=logging.DEBUG) #", "with real keyboard self[\"onkeyup\"] = self.bind( self.presskey, b\"event.key\" ) def", "init(self): self.txt=\"\" self.aff = Tag.Div(\"&nbsp;\",_style=\"border:1px solid black\") self[\"class\"]=\"mycalc\" self <=", "os,sys; sys.path.insert(0,os.path.dirname(os.path.dirname(__file__))) from htag import Tag \"\"\" This example show", "self <= self.aff self <= Tag.button(\"C\", _onclick=self.bind( self.clean) ) self", "= \"\" self.aff.set( \"Error\" ) def clean(self): self.txt=\"\" self.aff.set(\"&nbsp;\") if", "clean(self): self.txt=\"\" self.aff.set(\"&nbsp;\") if __name__==\"__main__\": # import logging # logging.basicConfig(format='[%(levelname)-5s]", "no work for rendering the layout ;-) Can't be simpler", "self.compute ) ) #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ with real keyboard self[\"onkeyup\"] = self.bind(", "to make a \"Calc App\" (with physical buttons + keyboard", "if key in \"0123456789+-*/.\": self.press(key) elif key==\"Enter\": self.compute() elif key", "Tag.button(\"C\", _onclick=self.bind( self.clean) ) self <= [Tag.button(i, _onclick=self.bind( self.press, i)", "__name__==\"__main__\": # import logging # logging.basicConfig(format='[%(levelname)-5s] %(name)s: %(message)s',level=logging.DEBUG) # logging.getLogger(\"htag.tag\").setLevel(", "be simpler ! \"\"\" class Calc(Tag.div): statics=[Tag.H.style(\"\"\" .mycalc *,button {font-size:2em;font-family:", "There is no work for rendering the layout ;-) Can't", "self <= [Tag.button(i, _onclick=self.bind( self.press, i) ) for i in", "self.txt = \"\" self.aff.set( \"Error\" ) def clean(self): self.txt=\"\" self.aff.set(\"&nbsp;\")", "key in \"0123456789+-*/.\": self.press(key) elif key==\"Enter\": self.compute() elif key in", "This example show you how to make a \"Calc App\"", "str(eval(self.txt.replace(\"x\",\"*\"))) self.aff.set( self.txt ) except: self.txt = \"\" self.aff.set( \"Error\"", "\"Error\" ) def clean(self): self.txt=\"\" self.aff.set(\"&nbsp;\") if __name__==\"__main__\": # import", "how to make a \"Calc App\" (with physical buttons +", "= str(eval(self.txt.replace(\"x\",\"*\"))) self.aff.set( self.txt ) except: self.txt = \"\" self.aff.set(", "a \"Calc App\" (with physical buttons + keyboard events) There", "is another runner, in a simple browser (thru ajax calls)", "example show you how to make a \"Calc App\" (with", "self.clean) ) self <= [Tag.button(i, _onclick=self.bind( self.press, i) ) for", "i) ) for i in \"0123456789+-x/.\"] self <= Tag.button(\"=\", _onclick=self.bind(", "self.aff.set( \"Error\" ) def clean(self): self.txt=\"\" self.aff.set(\"&nbsp;\") if __name__==\"__main__\": #", "%(name)s: %(message)s',level=logging.DEBUG) # logging.getLogger(\"htag.tag\").setLevel( logging.INFO ) # and execute it", ";-) Can't be simpler ! \"\"\" class Calc(Tag.div): statics=[Tag.H.style(\"\"\" .mycalc", ") def compute(self): try: self.txt = str(eval(self.txt.replace(\"x\",\"*\"))) self.aff.set( self.txt )", "App\" (with physical buttons + keyboard events) There is no", "key==\"Enter\": self.compute() elif key in [\"Delete\",\"Backspace\"]: self.clean() #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ def press(self,val):", "<= Tag.button(\"=\", _onclick=self.bind( self.compute ) ) #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ with real keyboard", "import logging # logging.basicConfig(format='[%(levelname)-5s] %(name)s: %(message)s',level=logging.DEBUG) # logging.getLogger(\"htag.tag\").setLevel( logging.INFO )", "statics=[Tag.H.style(\"\"\" .mycalc *,button {font-size:2em;font-family: monospace} \"\"\")] def init(self): self.txt=\"\" self.aff", "import os,sys; sys.path.insert(0,os.path.dirname(os.path.dirname(__file__))) from htag import Tag \"\"\" This example", "key in [\"Delete\",\"Backspace\"]: self.clean() #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ def press(self,val): self.txt += val", "b\"event.key\" ) def presskey(self,key): if key in \"0123456789+-*/.\": self.press(key) elif", "self.txt=\"\" self.aff = Tag.Div(\"&nbsp;\",_style=\"border:1px solid black\") self[\"class\"]=\"mycalc\" self <= self.aff", "the layout ;-) Can't be simpler ! \"\"\" class Calc(Tag.div):", "(with physical buttons + keyboard events) There is no work", "self.aff.set(\"&nbsp;\") if __name__==\"__main__\": # import logging # logging.basicConfig(format='[%(levelname)-5s] %(name)s: %(message)s',level=logging.DEBUG)", "pywebview instance from htag.runners import * # here is another", "# logging.basicConfig(format='[%(levelname)-5s] %(name)s: %(message)s',level=logging.DEBUG) # logging.getLogger(\"htag.tag\").setLevel( logging.INFO ) # and", "simple browser (thru ajax calls) BrowserHTTP( Calc ).run() # PyWebWiew(", "# here is another runner, in a simple browser (thru", "! \"\"\" class Calc(Tag.div): statics=[Tag.H.style(\"\"\" .mycalc *,button {font-size:2em;font-family: monospace} \"\"\")]", "def presskey(self,key): if key in \"0123456789+-*/.\": self.press(key) elif key==\"Enter\": self.compute()", "layout ;-) Can't be simpler ! \"\"\" class Calc(Tag.div): statics=[Tag.H.style(\"\"\"", "[\"Delete\",\"Backspace\"]: self.clean() #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ def press(self,val): self.txt += val self.aff.set( self.txt", "*,button {font-size:2em;font-family: monospace} \"\"\")] def init(self): self.txt=\"\" self.aff = Tag.Div(\"&nbsp;\",_style=\"border:1px", "val self.aff.set( self.txt ) def compute(self): try: self.txt = str(eval(self.txt.replace(\"x\",\"*\")))", ".mycalc *,button {font-size:2em;font-family: monospace} \"\"\")] def init(self): self.txt=\"\" self.aff =", "keyboard events) There is no work for rendering the layout", "it in a pywebview instance from htag.runners import * #", "_onclick=self.bind( self.compute ) ) #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ with real keyboard self[\"onkeyup\"] =", "import Tag \"\"\" This example show you how to make", "solid black\") self[\"class\"]=\"mycalc\" self <= self.aff self <= Tag.button(\"C\", _onclick=self.bind(", "rendering the layout ;-) Can't be simpler ! \"\"\" class", "self.aff.set( self.txt ) except: self.txt = \"\" self.aff.set( \"Error\" )", "self.txt = str(eval(self.txt.replace(\"x\",\"*\"))) self.aff.set( self.txt ) except: self.txt = \"\"", "in \"0123456789+-x/.\"] self <= Tag.button(\"=\", _onclick=self.bind( self.compute ) ) #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/", "def clean(self): self.txt=\"\" self.aff.set(\"&nbsp;\") if __name__==\"__main__\": # import logging #", "in [\"Delete\",\"Backspace\"]: self.clean() #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ def press(self,val): self.txt += val self.aff.set(", "monospace} \"\"\")] def init(self): self.txt=\"\" self.aff = Tag.Div(\"&nbsp;\",_style=\"border:1px solid black\")", "= Tag.Div(\"&nbsp;\",_style=\"border:1px solid black\") self[\"class\"]=\"mycalc\" self <= self.aff self <=", "self.txt ) except: self.txt = \"\" self.aff.set( \"Error\" ) def", "execute it in a pywebview instance from htag.runners import *", "simpler ! \"\"\" class Calc(Tag.div): statics=[Tag.H.style(\"\"\" .mycalc *,button {font-size:2em;font-family: monospace}", "in a pywebview instance from htag.runners import * # here", "<filename>examples/calc.py<gh_stars>1-10 import os,sys; sys.path.insert(0,os.path.dirname(os.path.dirname(__file__))) from htag import Tag \"\"\" This", "instance from htag.runners import * # here is another runner,", "self <= Tag.button(\"C\", _onclick=self.bind( self.clean) ) self <= [Tag.button(i, _onclick=self.bind(", "import * # here is another runner, in a simple", "+ keyboard events) There is no work for rendering the", "* # here is another runner, in a simple browser", "a pywebview instance from htag.runners import * # here is", ") self <= [Tag.button(i, _onclick=self.bind( self.press, i) ) for i", "# import logging # logging.basicConfig(format='[%(levelname)-5s] %(name)s: %(message)s',level=logging.DEBUG) # logging.getLogger(\"htag.tag\").setLevel( logging.INFO", "in a simple browser (thru ajax calls) BrowserHTTP( Calc ).run()", "Can't be simpler ! \"\"\" class Calc(Tag.div): statics=[Tag.H.style(\"\"\" .mycalc *,button", "and execute it in a pywebview instance from htag.runners import", "\"0123456789+-x/.\"] self <= Tag.button(\"=\", _onclick=self.bind( self.compute ) ) #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ with", "buttons + keyboard events) There is no work for rendering", "\"Calc App\" (with physical buttons + keyboard events) There is", "logging.getLogger(\"htag.tag\").setLevel( logging.INFO ) # and execute it in a pywebview", "runner, in a simple browser (thru ajax calls) BrowserHTTP( Calc", "<= Tag.button(\"C\", _onclick=self.bind( self.clean) ) self <= [Tag.button(i, _onclick=self.bind( self.press,", "self.press(key) elif key==\"Enter\": self.compute() elif key in [\"Delete\",\"Backspace\"]: self.clean() #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/", "self.clean() #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ def press(self,val): self.txt += val self.aff.set( self.txt )", ") ) #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ with real keyboard self[\"onkeyup\"] = self.bind( self.presskey,", "self.aff self <= Tag.button(\"C\", _onclick=self.bind( self.clean) ) self <= [Tag.button(i,", "[Tag.button(i, _onclick=self.bind( self.press, i) ) for i in \"0123456789+-x/.\"] self", "self.bind( self.presskey, b\"event.key\" ) def presskey(self,key): if key in \"0123456789+-*/.\":", "in \"0123456789+-*/.\": self.press(key) elif key==\"Enter\": self.compute() elif key in [\"Delete\",\"Backspace\"]:", "is no work for rendering the layout ;-) Can't be", "self.press, i) ) for i in \"0123456789+-x/.\"] self <= Tag.button(\"=\",", "here is another runner, in a simple browser (thru ajax", ") # and execute it in a pywebview instance from", "logging.basicConfig(format='[%(levelname)-5s] %(name)s: %(message)s',level=logging.DEBUG) # logging.getLogger(\"htag.tag\").setLevel( logging.INFO ) # and execute", "elif key in [\"Delete\",\"Backspace\"]: self.clean() #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ def press(self,val): self.txt +=", "(thru ajax calls) BrowserHTTP( Calc ).run() # PyWebWiew( Calc ).run()", "sys.path.insert(0,os.path.dirname(os.path.dirname(__file__))) from htag import Tag \"\"\" This example show you", "you how to make a \"Calc App\" (with physical buttons", "def press(self,val): self.txt += val self.aff.set( self.txt ) def compute(self):", "logging.INFO ) # and execute it in a pywebview instance", ") for i in \"0123456789+-x/.\"] self <= Tag.button(\"=\", _onclick=self.bind( self.compute", ") except: self.txt = \"\" self.aff.set( \"Error\" ) def clean(self):", "self <= Tag.button(\"=\", _onclick=self.bind( self.compute ) ) #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ with real", "<= [Tag.button(i, _onclick=self.bind( self.press, i) ) for i in \"0123456789+-x/.\"]", "# logging.getLogger(\"htag.tag\").setLevel( logging.INFO ) # and execute it in a", "show you how to make a \"Calc App\" (with physical", "self.txt=\"\" self.aff.set(\"&nbsp;\") if __name__==\"__main__\": # import logging # logging.basicConfig(format='[%(levelname)-5s] %(name)s:", "from htag.runners import * # here is another runner, in", "def compute(self): try: self.txt = str(eval(self.txt.replace(\"x\",\"*\"))) self.aff.set( self.txt ) except:", "%(message)s',level=logging.DEBUG) # logging.getLogger(\"htag.tag\").setLevel( logging.INFO ) # and execute it in", ") def presskey(self,key): if key in \"0123456789+-*/.\": self.press(key) elif key==\"Enter\":", "for rendering the layout ;-) Can't be simpler ! \"\"\"", "from htag import Tag \"\"\" This example show you how", "Tag.button(\"=\", _onclick=self.bind( self.compute ) ) #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ with real keyboard self[\"onkeyup\"]", "self[\"onkeyup\"] = self.bind( self.presskey, b\"event.key\" ) def presskey(self,key): if key", "except: self.txt = \"\" self.aff.set( \"Error\" ) def clean(self): self.txt=\"\"", "_onclick=self.bind( self.clean) ) self <= [Tag.button(i, _onclick=self.bind( self.press, i) )", "elif key==\"Enter\": self.compute() elif key in [\"Delete\",\"Backspace\"]: self.clean() #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ def", ") def clean(self): self.txt=\"\" self.aff.set(\"&nbsp;\") if __name__==\"__main__\": # import logging", "black\") self[\"class\"]=\"mycalc\" self <= self.aff self <= Tag.button(\"C\", _onclick=self.bind( self.clean)", ") #-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ with real keyboard self[\"onkeyup\"] = self.bind( self.presskey, b\"event.key\"", "#-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/-/ def press(self,val): self.txt += val self.aff.set( self.txt ) def", "make a \"Calc App\" (with physical buttons + keyboard events)", "work for rendering the layout ;-) Can't be simpler !", "_onclick=self.bind( self.press, i) ) for i in \"0123456789+-x/.\"] self <=", "try: self.txt = str(eval(self.txt.replace(\"x\",\"*\"))) self.aff.set( self.txt ) except: self.txt =", "\"\"\" class Calc(Tag.div): statics=[Tag.H.style(\"\"\" .mycalc *,button {font-size:2em;font-family: monospace} \"\"\")] def", "\"\"\")] def init(self): self.txt=\"\" self.aff = Tag.Div(\"&nbsp;\",_style=\"border:1px solid black\") self[\"class\"]=\"mycalc\"", "real keyboard self[\"onkeyup\"] = self.bind( self.presskey, b\"event.key\" ) def presskey(self,key):", "self[\"class\"]=\"mycalc\" self <= self.aff self <= Tag.button(\"C\", _onclick=self.bind( self.clean) )", "{font-size:2em;font-family: monospace} \"\"\")] def init(self): self.txt=\"\" self.aff = Tag.Div(\"&nbsp;\",_style=\"border:1px solid" ]
[ "return (x-3)*(x+2)*x*0.2 g = Graph(800,600,8,6, 'example1.svg') g.bg() g.grid() g.axes() g.graph(func)", "g = Graph(800,600,8,6, 'example1.svg') g.bg() g.grid() g.axes() g.graph(func) g.save() g.display()", "from giraphics.graphing.graph import Graph def func(x): return (x-3)*(x+2)*x*0.2 g =", "<filename>res/example1.py from giraphics.graphing.graph import Graph def func(x): return (x-3)*(x+2)*x*0.2 g", "func(x): return (x-3)*(x+2)*x*0.2 g = Graph(800,600,8,6, 'example1.svg') g.bg() g.grid() g.axes()", "giraphics.graphing.graph import Graph def func(x): return (x-3)*(x+2)*x*0.2 g = Graph(800,600,8,6,", "def func(x): return (x-3)*(x+2)*x*0.2 g = Graph(800,600,8,6, 'example1.svg') g.bg() g.grid()", "(x-3)*(x+2)*x*0.2 g = Graph(800,600,8,6, 'example1.svg') g.bg() g.grid() g.axes() g.graph(func) g.save()", "Graph def func(x): return (x-3)*(x+2)*x*0.2 g = Graph(800,600,8,6, 'example1.svg') g.bg()", "import Graph def func(x): return (x-3)*(x+2)*x*0.2 g = Graph(800,600,8,6, 'example1.svg')" ]
[ "for i in range(0,df.shape[0]): for j in range(0,df['num'][i]): poly=json.loads(df['polygon'][i]) GeoFunc.normData(poly,scale[index])", "name=[\"ga\",\"albano\",\"blaz1\",\"blaz2\",\"dighe1\",\"dighe2\",\"fu\",\"han\",\"jakobs1\",\"jakobs2\",\"mao\",\"marques\",\"shapes\",\"shirts\",\"swim\",\"trousers\"] print(\"开始处理\",name[index],\"数据集\") '''暂时没有考虑宽度,全部缩放来表示''' scale=[100,0.5,100,100,20,20,20,10,20,20,0.5,20,50] print(\"缩放\",scale[index],\"倍\") df = pd.read_csv(\"data/\"+name[index]+\".csv\") polygons=[] for", "range(0,df.shape[0]): for j in range(0,df['num'][i]): poly=json.loads(df['polygon'][i]) GeoFunc.normData(poly,scale[index]) polygons.append(poly) return polygons", "'''报错数据集有(空心):han,jakobs1,jakobs2 ''' '''形状过多暂时未处理:shapes、shirt、swim、trousers''' name=[\"ga\",\"albano\",\"blaz1\",\"blaz2\",\"dighe1\",\"dighe2\",\"fu\",\"han\",\"jakobs1\",\"jakobs2\",\"mao\",\"marques\",\"shapes\",\"shirts\",\"swim\",\"trousers\"] print(\"开始处理\",name[index],\"数据集\") '''暂时没有考虑宽度,全部缩放来表示''' scale=[100,0.5,100,100,20,20,20,10,20,20,0.5,20,50] print(\"缩放\",scale[index],\"倍\") df =", "import pandas as pd import json def getData(index): '''报错数据集有(空心):han,jakobs1,jakobs2 '''", "'''暂时没有考虑宽度,全部缩放来表示''' scale=[100,0.5,100,100,20,20,20,10,20,20,0.5,20,50] print(\"缩放\",scale[index],\"倍\") df = pd.read_csv(\"data/\"+name[index]+\".csv\") polygons=[] for i in", "def getData(index): '''报错数据集有(空心):han,jakobs1,jakobs2 ''' '''形状过多暂时未处理:shapes、shirt、swim、trousers''' name=[\"ga\",\"albano\",\"blaz1\",\"blaz2\",\"dighe1\",\"dighe2\",\"fu\",\"han\",\"jakobs1\",\"jakobs2\",\"mao\",\"marques\",\"shapes\",\"shirts\",\"swim\",\"trousers\"] print(\"开始处理\",name[index],\"数据集\") '''暂时没有考虑宽度,全部缩放来表示''' scale=[100,0.5,100,100,20,20,20,10,20,20,0.5,20,50] print(\"缩放\",scale[index],\"倍\")", "print(\"开始处理\",name[index],\"数据集\") '''暂时没有考虑宽度,全部缩放来表示''' scale=[100,0.5,100,100,20,20,20,10,20,20,0.5,20,50] print(\"缩放\",scale[index],\"倍\") df = pd.read_csv(\"data/\"+name[index]+\".csv\") polygons=[] for i", "i in range(0,df.shape[0]): for j in range(0,df['num'][i]): poly=json.loads(df['polygon'][i]) GeoFunc.normData(poly,scale[index]) polygons.append(poly)", "as pd import json def getData(index): '''报错数据集有(空心):han,jakobs1,jakobs2 ''' '''形状过多暂时未处理:shapes、shirt、swim、trousers''' name=[\"ga\",\"albano\",\"blaz1\",\"blaz2\",\"dighe1\",\"dighe2\",\"fu\",\"han\",\"jakobs1\",\"jakobs2\",\"mao\",\"marques\",\"shapes\",\"shirts\",\"swim\",\"trousers\"]", "''' '''形状过多暂时未处理:shapes、shirt、swim、trousers''' name=[\"ga\",\"albano\",\"blaz1\",\"blaz2\",\"dighe1\",\"dighe2\",\"fu\",\"han\",\"jakobs1\",\"jakobs2\",\"mao\",\"marques\",\"shapes\",\"shirts\",\"swim\",\"trousers\"] print(\"开始处理\",name[index],\"数据集\") '''暂时没有考虑宽度,全部缩放来表示''' scale=[100,0.5,100,100,20,20,20,10,20,20,0.5,20,50] print(\"缩放\",scale[index],\"倍\") df = pd.read_csv(\"data/\"+name[index]+\".csv\")", "tools.geofunc import GeoFunc import pandas as pd import json def", "scale=[100,0.5,100,100,20,20,20,10,20,20,0.5,20,50] print(\"缩放\",scale[index],\"倍\") df = pd.read_csv(\"data/\"+name[index]+\".csv\") polygons=[] for i in range(0,df.shape[0]):", "= pd.read_csv(\"data/\"+name[index]+\".csv\") polygons=[] for i in range(0,df.shape[0]): for j in", "pd import json def getData(index): '''报错数据集有(空心):han,jakobs1,jakobs2 ''' '''形状过多暂时未处理:shapes、shirt、swim、trousers''' name=[\"ga\",\"albano\",\"blaz1\",\"blaz2\",\"dighe1\",\"dighe2\",\"fu\",\"han\",\"jakobs1\",\"jakobs2\",\"mao\",\"marques\",\"shapes\",\"shirts\",\"swim\",\"trousers\"] print(\"开始处理\",name[index],\"数据集\")", "df = pd.read_csv(\"data/\"+name[index]+\".csv\") polygons=[] for i in range(0,df.shape[0]): for j", "json def getData(index): '''报错数据集有(空心):han,jakobs1,jakobs2 ''' '''形状过多暂时未处理:shapes、shirt、swim、trousers''' name=[\"ga\",\"albano\",\"blaz1\",\"blaz2\",\"dighe1\",\"dighe2\",\"fu\",\"han\",\"jakobs1\",\"jakobs2\",\"mao\",\"marques\",\"shapes\",\"shirts\",\"swim\",\"trousers\"] print(\"开始处理\",name[index],\"数据集\") '''暂时没有考虑宽度,全部缩放来表示''' scale=[100,0.5,100,100,20,20,20,10,20,20,0.5,20,50]", "polygons=[] for i in range(0,df.shape[0]): for j in range(0,df['num'][i]): poly=json.loads(df['polygon'][i])", "in range(0,df.shape[0]): for j in range(0,df['num'][i]): poly=json.loads(df['polygon'][i]) GeoFunc.normData(poly,scale[index]) polygons.append(poly) return", "import GeoFunc import pandas as pd import json def getData(index):", "'''形状过多暂时未处理:shapes、shirt、swim、trousers''' name=[\"ga\",\"albano\",\"blaz1\",\"blaz2\",\"dighe1\",\"dighe2\",\"fu\",\"han\",\"jakobs1\",\"jakobs2\",\"mao\",\"marques\",\"shapes\",\"shirts\",\"swim\",\"trousers\"] print(\"开始处理\",name[index],\"数据集\") '''暂时没有考虑宽度,全部缩放来表示''' scale=[100,0.5,100,100,20,20,20,10,20,20,0.5,20,50] print(\"缩放\",scale[index],\"倍\") df = pd.read_csv(\"data/\"+name[index]+\".csv\") polygons=[]", "print(\"缩放\",scale[index],\"倍\") df = pd.read_csv(\"data/\"+name[index]+\".csv\") polygons=[] for i in range(0,df.shape[0]): for", "from tools.geofunc import GeoFunc import pandas as pd import json", "import json def getData(index): '''报错数据集有(空心):han,jakobs1,jakobs2 ''' '''形状过多暂时未处理:shapes、shirt、swim、trousers''' name=[\"ga\",\"albano\",\"blaz1\",\"blaz2\",\"dighe1\",\"dighe2\",\"fu\",\"han\",\"jakobs1\",\"jakobs2\",\"mao\",\"marques\",\"shapes\",\"shirts\",\"swim\",\"trousers\"] print(\"开始处理\",name[index],\"数据集\") '''暂时没有考虑宽度,全部缩放来表示'''", "pandas as pd import json def getData(index): '''报错数据集有(空心):han,jakobs1,jakobs2 ''' '''形状过多暂时未处理:shapes、shirt、swim、trousers'''", "getData(index): '''报错数据集有(空心):han,jakobs1,jakobs2 ''' '''形状过多暂时未处理:shapes、shirt、swim、trousers''' name=[\"ga\",\"albano\",\"blaz1\",\"blaz2\",\"dighe1\",\"dighe2\",\"fu\",\"han\",\"jakobs1\",\"jakobs2\",\"mao\",\"marques\",\"shapes\",\"shirts\",\"swim\",\"trousers\"] print(\"开始处理\",name[index],\"数据集\") '''暂时没有考虑宽度,全部缩放来表示''' scale=[100,0.5,100,100,20,20,20,10,20,20,0.5,20,50] print(\"缩放\",scale[index],\"倍\") df", "GeoFunc import pandas as pd import json def getData(index): '''报错数据集有(空心):han,jakobs1,jakobs2", "pd.read_csv(\"data/\"+name[index]+\".csv\") polygons=[] for i in range(0,df.shape[0]): for j in range(0,df['num'][i]):" ]
[ "import collect_hierarchical_module_name, collect_hierarchical_parameter_name, get_batch_n, to_value, \\ # safe_lookup, len_batch from", "\\ # safe_lookup, len_batch from .export import as_image_ui8, as_rgb_image, export_image,", "# safe_lookup, len_batch from .export import as_image_ui8, as_rgb_image, export_image, export_sample,", ".export import as_image_ui8, as_rgb_image, export_image, export_sample, export_as_image from .table_sqlite import", "to_value, \\ # safe_lookup, len_batch from .export import as_image_ui8, as_rgb_image,", "as_rgb_image, export_image, export_sample, export_as_image from .table_sqlite import TableStream, SQLITE_TYPE_PATTERN, get_table_number_of_rows", "get_table_number_of_rows from .reporting_bokeh import report, create_default_reporting_options from .reporting_bokeh_samples import PanelDataSamplesTabular", "collect_hierarchical_module_name, collect_hierarchical_parameter_name, get_batch_n, to_value, \\ # safe_lookup, len_batch from .export", "import as_image_ui8, as_rgb_image, export_image, export_sample, export_as_image from .table_sqlite import TableStream,", ".table_sqlite import TableStream, SQLITE_TYPE_PATTERN, get_table_number_of_rows from .reporting_bokeh import report, create_default_reporting_options", "from .table_sqlite import TableStream, SQLITE_TYPE_PATTERN, get_table_number_of_rows from .reporting_bokeh import report,", "import TableStream, SQLITE_TYPE_PATTERN, get_table_number_of_rows from .reporting_bokeh import report, create_default_reporting_options from", "from .export import as_image_ui8, as_rgb_image, export_image, export_sample, export_as_image from .table_sqlite", "export_image, export_sample, export_as_image from .table_sqlite import TableStream, SQLITE_TYPE_PATTERN, get_table_number_of_rows from", "TableStream, SQLITE_TYPE_PATTERN, get_table_number_of_rows from .reporting_bokeh import report, create_default_reporting_options from .reporting_bokeh_samples", "<reponame>civodlu/trw #from trw.utils import collect_hierarchical_module_name, collect_hierarchical_parameter_name, get_batch_n, to_value, \\ #", "get_batch_n, to_value, \\ # safe_lookup, len_batch from .export import as_image_ui8,", "export_sample, export_as_image from .table_sqlite import TableStream, SQLITE_TYPE_PATTERN, get_table_number_of_rows from .reporting_bokeh", "as_image_ui8, as_rgb_image, export_image, export_sample, export_as_image from .table_sqlite import TableStream, SQLITE_TYPE_PATTERN,", "safe_lookup, len_batch from .export import as_image_ui8, as_rgb_image, export_image, export_sample, export_as_image", "export_as_image from .table_sqlite import TableStream, SQLITE_TYPE_PATTERN, get_table_number_of_rows from .reporting_bokeh import", "len_batch from .export import as_image_ui8, as_rgb_image, export_image, export_sample, export_as_image from", "collect_hierarchical_parameter_name, get_batch_n, to_value, \\ # safe_lookup, len_batch from .export import", "trw.utils import collect_hierarchical_module_name, collect_hierarchical_parameter_name, get_batch_n, to_value, \\ # safe_lookup, len_batch", "#from trw.utils import collect_hierarchical_module_name, collect_hierarchical_parameter_name, get_batch_n, to_value, \\ # safe_lookup,", "SQLITE_TYPE_PATTERN, get_table_number_of_rows from .reporting_bokeh import report, create_default_reporting_options from .reporting_bokeh_samples import" ]
[ "from tqdm import tqdm from pathos.multiprocessing import ProcessingPool as Pool", "with Pool(opt_threads) as p: pool_results = list(tqdm(p.imap(pool_worker, pool_items), total=len(fp_items), desc=desc))", "range(20): im = cv.blur(im, (35,35)) return result # end pool", "default=(None, None), help='Slice list of files') @click.option('-t', '--threads', 'opt_threads', default=None)", "DNNFactory from vframe.utils import file_utils from vframe.utils.video_utils import FileVideoStream, mediainfo", "worker # ----------------------------------------------------------- # convert file list into object with", "from vframe.image.dnn_factory import DNNFactory from vframe.utils import file_utils from vframe.utils.video_utils", "init processing pool iterator # use imap instead of map", "of map via @hkyi Stack Overflow 41920124 desc = f'image-mp", "None), help='Slice list of files') @click.option('-t', '--threads', 'opt_threads', default=None) @click.pass_context", "dataclasses import asdict import numpy as np import cv2 as", "f'image-mp x{opt_threads}' with Pool(opt_threads) as p: pool_results = list(tqdm(p.imap(pool_worker, pool_items),", "if not opt_threads: opt_threads = cpu_count() # maximum # glob", "not opt_threads: opt_threads = cpu_count() # maximum # glob items", "end pool worker # ----------------------------------------------------------- # convert file list into", "from pathos.multiprocessing import ProcessingPool as Pool from pathos.multiprocessing import cpu_count", "# add media metadata im = cv.imread(fp) for i in", "object with pool_items = [{'fp': fp} for fp in fp_items]", "default=['jpg', 'png'], multiple=True, help='Glob extension') @click.option('--slice', 'opt_slice', type=(int, int), default=(None,", "file_utils.glob_multi(opt_dir_in, opt_exts, recursive=opt_recursive) if any(opt_slice): fp_items = fp_items[opt_slice[0]:opt_slice[1]] log.info(f'Processing: {len(fp_items):,}", "vframe.settings.modelzoo_cfg import modelzoo from vframe.models.dnn import DNN from vframe.image.dnn_factory import", "fp} # add media metadata im = cv.imread(fp) for i", "def cli(ctx, opt_dir_in, opt_recursive, opt_exts, opt_slice, opt_threads): \"\"\"Multiprocessor image template\"\"\"", "= pool_item['fp'] result = {'fp': fp} # add media metadata", "pool worker # ----------------------------------------------------------- # convert file list into object", "desc = f'image-mp x{opt_threads}' with Pool(opt_threads) as p: pool_results =", "asdict import numpy as np import cv2 as cv from", "# # VFRAME # MIT License # Copyright (c) 2020", "= app_cfg.LOG # set N threads if not opt_threads: opt_threads", "import tqdm from pathos.multiprocessing import ProcessingPool as Pool from pathos.multiprocessing", "file_utils from vframe.utils.video_utils import FileVideoStream, mediainfo log = app_cfg.LOG #", "opt_exts, recursive=opt_recursive) if any(opt_slice): fp_items = fp_items[opt_slice[0]:opt_slice[1]] log.info(f'Processing: {len(fp_items):,} files')", "pool_items = [{'fp': fp} for fp in fp_items] # init", "in fp_items] # init processing pool iterator # use imap", "'opt_recursive', is_flag=True) @click.option('-e', '--ext', 'opt_exts', default=['jpg', 'png'], multiple=True, help='Glob extension')", "from dataclasses import asdict import numpy as np import cv2", "threads if not opt_threads: opt_threads = cpu_count() # maximum #", "is_flag=True) @click.option('-e', '--ext', 'opt_exts', default=['jpg', 'png'], multiple=True, help='Glob extension') @click.option('--slice',", "maximum # glob items fp_items = file_utils.glob_multi(opt_dir_in, opt_exts, recursive=opt_recursive) if", "# imports from os.path import join from pathlib import Path", "import Path from dataclasses import asdict import numpy as np", "{'fp': fp} # add media metadata im = cv.imread(fp) for", "imports from os.path import join from pathlib import Path from", "'--input', 'opt_dir_in', required=True) @click.option('-r', '--recursive', 'opt_recursive', is_flag=True) @click.option('-e', '--ext', 'opt_exts',", "app_cfg.LOG # set N threads if not opt_threads: opt_threads =", "os.path import join from pathlib import Path from dataclasses import", "reader fp = pool_item['fp'] result = {'fp': fp} # add", "threaded video reader fp = pool_item['fp'] result = {'fp': fp}", "cli(ctx, opt_dir_in, opt_recursive, opt_exts, opt_slice, opt_threads): \"\"\"Multiprocessor image template\"\"\" #", "DNN from vframe.image.dnn_factory import DNNFactory from vframe.utils import file_utils from", "from vframe.settings import app_cfg from vframe.settings.modelzoo_cfg import modelzoo from vframe.models.dnn", "result = {'fp': fp} # add media metadata im =", "# Copyright (c) 2020 <NAME> and VFRAME # https://vframe.io #", "# ############################################################################# import click @click.command('') @click.option('-i', '--input', 'opt_dir_in', required=True) @click.option('-r',", "pathlib import Path from dataclasses import asdict import numpy as", "= cv.blur(im, (35,35)) return result # end pool worker #", "result # end pool worker # ----------------------------------------------------------- # convert file", "import app_cfg from vframe.settings.modelzoo_cfg import modelzoo from vframe.models.dnn import DNN", "import DNNFactory from vframe.utils import file_utils from vframe.utils.video_utils import FileVideoStream,", "fp_items = file_utils.glob_multi(opt_dir_in, opt_exts, recursive=opt_recursive) if any(opt_slice): fp_items = fp_items[opt_slice[0]:opt_slice[1]]", "any(opt_slice): fp_items = fp_items[opt_slice[0]:opt_slice[1]] log.info(f'Processing: {len(fp_items):,} files') # ----------------------------------------------------------- #", "'opt_slice', type=(int, int), default=(None, None), help='Slice list of files') @click.option('-t',", "list into object with pool_items = [{'fp': fp} for fp", "help='Slice list of files') @click.option('-t', '--threads', 'opt_threads', default=None) @click.pass_context def", "from pathlib import Path from dataclasses import asdict import numpy", "import numpy as np import cv2 as cv from tqdm", "app_cfg from vframe.settings.modelzoo_cfg import modelzoo from vframe.models.dnn import DNN from", "and VFRAME # https://vframe.io # ############################################################################# import click @click.command('') @click.option('-i',", "'--ext', 'opt_exts', default=['jpg', 'png'], multiple=True, help='Glob extension') @click.option('--slice', 'opt_slice', type=(int,", "# ----------------------------------------------------------- # convert file list into object with pool_items", "if any(opt_slice): fp_items = fp_items[opt_slice[0]:opt_slice[1]] log.info(f'Processing: {len(fp_items):,} files') # -----------------------------------------------------------", "# https://vframe.io # ############################################################################# import click @click.command('') @click.option('-i', '--input', 'opt_dir_in',", "cv.imread(fp) for i in range(20): im = cv.blur(im, (35,35)) return", "multiple=True, help='Glob extension') @click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), help='Slice", "fp} for fp in fp_items] # init processing pool iterator", "@click.option('-i', '--input', 'opt_dir_in', required=True) @click.option('-r', '--recursive', 'opt_recursive', is_flag=True) @click.option('-e', '--ext',", "import file_utils from vframe.utils.video_utils import FileVideoStream, mediainfo log = app_cfg.LOG", "required=True) @click.option('-r', '--recursive', 'opt_recursive', is_flag=True) @click.option('-e', '--ext', 'opt_exts', default=['jpg', 'png'],", "type=(int, int), default=(None, None), help='Slice list of files') @click.option('-t', '--threads',", "as cv from tqdm import tqdm from pathos.multiprocessing import ProcessingPool", "# ----------------------------------------------------------- # start pool worker def pool_worker(pool_item): # init", "processing pool iterator # use imap instead of map via", "opt_slice, opt_threads): \"\"\"Multiprocessor image template\"\"\" # ------------------------------------------------ # imports from", "opt_threads = cpu_count() # maximum # glob items fp_items =", "opt_threads: opt_threads = cpu_count() # maximum # glob items fp_items", "# maximum # glob items fp_items = file_utils.glob_multi(opt_dir_in, opt_exts, recursive=opt_recursive)", "from os.path import join from pathlib import Path from dataclasses", "add media metadata im = cv.imread(fp) for i in range(20):", "into object with pool_items = [{'fp': fp} for fp in", "convert file list into object with pool_items = [{'fp': fp}", "'opt_exts', default=['jpg', 'png'], multiple=True, help='Glob extension') @click.option('--slice', 'opt_slice', type=(int, int),", "'--recursive', 'opt_recursive', is_flag=True) @click.option('-e', '--ext', 'opt_exts', default=['jpg', 'png'], multiple=True, help='Glob", "N threads if not opt_threads: opt_threads = cpu_count() # maximum", "import click @click.command('') @click.option('-i', '--input', 'opt_dir_in', required=True) @click.option('-r', '--recursive', 'opt_recursive',", "= {'fp': fp} # add media metadata im = cv.imread(fp)", "Stack Overflow 41920124 desc = f'image-mp x{opt_threads}' with Pool(opt_threads) as", "= fp_items[opt_slice[0]:opt_slice[1]] log.info(f'Processing: {len(fp_items):,} files') # ----------------------------------------------------------- # start pool", "pathos.multiprocessing import ProcessingPool as Pool from pathos.multiprocessing import cpu_count from", "pool worker def pool_worker(pool_item): # init threaded video reader fp", "np import cv2 as cv from tqdm import tqdm from", "fp in fp_items] # init processing pool iterator # use", "pool iterator # use imap instead of map via @hkyi", "# MIT License # Copyright (c) 2020 <NAME> and VFRAME", "im = cv.blur(im, (35,35)) return result # end pool worker", "FileVideoStream, mediainfo log = app_cfg.LOG # set N threads if", "import ProcessingPool as Pool from pathos.multiprocessing import cpu_count from vframe.settings", "click @click.command('') @click.option('-i', '--input', 'opt_dir_in', required=True) @click.option('-r', '--recursive', 'opt_recursive', is_flag=True)", "41920124 desc = f'image-mp x{opt_threads}' with Pool(opt_threads) as p: pool_results", "# init processing pool iterator # use imap instead of", "import modelzoo from vframe.models.dnn import DNN from vframe.image.dnn_factory import DNNFactory", "vframe.models.dnn import DNN from vframe.image.dnn_factory import DNNFactory from vframe.utils import", "@click.option('-t', '--threads', 'opt_threads', default=None) @click.pass_context def cli(ctx, opt_dir_in, opt_recursive, opt_exts,", "cpu_count from vframe.settings import app_cfg from vframe.settings.modelzoo_cfg import modelzoo from", "map via @hkyi Stack Overflow 41920124 desc = f'image-mp x{opt_threads}'", "in range(20): im = cv.blur(im, (35,35)) return result # end", "Pool from pathos.multiprocessing import cpu_count from vframe.settings import app_cfg from", "recursive=opt_recursive) if any(opt_slice): fp_items = fp_items[opt_slice[0]:opt_slice[1]] log.info(f'Processing: {len(fp_items):,} files') #", "# start pool worker def pool_worker(pool_item): # init threaded video", "worker def pool_worker(pool_item): # init threaded video reader fp =", "# end pool worker # ----------------------------------------------------------- # convert file list", "# VFRAME # MIT License # Copyright (c) 2020 <NAME>", "fp_items = fp_items[opt_slice[0]:opt_slice[1]] log.info(f'Processing: {len(fp_items):,} files') # ----------------------------------------------------------- # start", "files') # ----------------------------------------------------------- # start pool worker def pool_worker(pool_item): #", "fp_items] # init processing pool iterator # use imap instead", "(35,35)) return result # end pool worker # ----------------------------------------------------------- #", "extension') @click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), help='Slice list of", "iterator # use imap instead of map via @hkyi Stack", "x{opt_threads}' with Pool(opt_threads) as p: pool_results = list(tqdm(p.imap(pool_worker, pool_items), total=len(fp_items),", "# convert file list into object with pool_items = [{'fp':", "mediainfo log = app_cfg.LOG # set N threads if not", "opt_exts, opt_slice, opt_threads): \"\"\"Multiprocessor image template\"\"\" # ------------------------------------------------ # imports", "cv2 as cv from tqdm import tqdm from pathos.multiprocessing import", "# use imap instead of map via @hkyi Stack Overflow", "return result # end pool worker # ----------------------------------------------------------- # convert", "from vframe.models.dnn import DNN from vframe.image.dnn_factory import DNNFactory from vframe.utils", "VFRAME # https://vframe.io # ############################################################################# import click @click.command('') @click.option('-i', '--input',", "via @hkyi Stack Overflow 41920124 desc = f'image-mp x{opt_threads}' with", "'png'], multiple=True, help='Glob extension') @click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),", "@click.pass_context def cli(ctx, opt_dir_in, opt_recursive, opt_exts, opt_slice, opt_threads): \"\"\"Multiprocessor image", "cv from tqdm import tqdm from pathos.multiprocessing import ProcessingPool as", "@click.command('') @click.option('-i', '--input', 'opt_dir_in', required=True) @click.option('-r', '--recursive', 'opt_recursive', is_flag=True) @click.option('-e',", "'--threads', 'opt_threads', default=None) @click.pass_context def cli(ctx, opt_dir_in, opt_recursive, opt_exts, opt_slice,", "list of files') @click.option('-t', '--threads', 'opt_threads', default=None) @click.pass_context def cli(ctx,", "import DNN from vframe.image.dnn_factory import DNNFactory from vframe.utils import file_utils", "cv.blur(im, (35,35)) return result # end pool worker # -----------------------------------------------------------", "vframe.image.dnn_factory import DNNFactory from vframe.utils import file_utils from vframe.utils.video_utils import", "import cv2 as cv from tqdm import tqdm from pathos.multiprocessing", "# init threaded video reader fp = pool_item['fp'] result =", "numpy as np import cv2 as cv from tqdm import", "instead of map via @hkyi Stack Overflow 41920124 desc =", "pool_item['fp'] result = {'fp': fp} # add media metadata im", "@hkyi Stack Overflow 41920124 desc = f'image-mp x{opt_threads}' with Pool(opt_threads)", "# glob items fp_items = file_utils.glob_multi(opt_dir_in, opt_exts, recursive=opt_recursive) if any(opt_slice):", "files') @click.option('-t', '--threads', 'opt_threads', default=None) @click.pass_context def cli(ctx, opt_dir_in, opt_recursive,", "Path from dataclasses import asdict import numpy as np import", "------------------------------------------------ # imports from os.path import join from pathlib import", "Copyright (c) 2020 <NAME> and VFRAME # https://vframe.io # #############################################################################", "start pool worker def pool_worker(pool_item): # init threaded video reader", "media metadata im = cv.imread(fp) for i in range(20): im", "join from pathlib import Path from dataclasses import asdict import", "from vframe.utils.video_utils import FileVideoStream, mediainfo log = app_cfg.LOG # set", "imap instead of map via @hkyi Stack Overflow 41920124 desc", "'opt_dir_in', required=True) @click.option('-r', '--recursive', 'opt_recursive', is_flag=True) @click.option('-e', '--ext', 'opt_exts', default=['jpg',", "# ------------------------------------------------ # imports from os.path import join from pathlib", "from vframe.settings.modelzoo_cfg import modelzoo from vframe.models.dnn import DNN from vframe.image.dnn_factory", "----------------------------------------------------------- # start pool worker def pool_worker(pool_item): # init threaded", "cpu_count() # maximum # glob items fp_items = file_utils.glob_multi(opt_dir_in, opt_exts,", "use imap instead of map via @hkyi Stack Overflow 41920124", "int), default=(None, None), help='Slice list of files') @click.option('-t', '--threads', 'opt_threads',", "log.info(f'Processing: {len(fp_items):,} files') # ----------------------------------------------------------- # start pool worker def", "import cpu_count from vframe.settings import app_cfg from vframe.settings.modelzoo_cfg import modelzoo", "template\"\"\" # ------------------------------------------------ # imports from os.path import join from", "as Pool from pathos.multiprocessing import cpu_count from vframe.settings import app_cfg", "help='Glob extension') @click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), help='Slice list", "pool_worker(pool_item): # init threaded video reader fp = pool_item['fp'] result", "for fp in fp_items] # init processing pool iterator #", "fp_items[opt_slice[0]:opt_slice[1]] log.info(f'Processing: {len(fp_items):,} files') # ----------------------------------------------------------- # start pool worker", "glob items fp_items = file_utils.glob_multi(opt_dir_in, opt_exts, recursive=opt_recursive) if any(opt_slice): fp_items", "= file_utils.glob_multi(opt_dir_in, opt_exts, recursive=opt_recursive) if any(opt_slice): fp_items = fp_items[opt_slice[0]:opt_slice[1]] log.info(f'Processing:", "as np import cv2 as cv from tqdm import tqdm", "with pool_items = [{'fp': fp} for fp in fp_items] #", "[{'fp': fp} for fp in fp_items] # init processing pool", "= f'image-mp x{opt_threads}' with Pool(opt_threads) as p: pool_results = list(tqdm(p.imap(pool_worker,", "im = cv.imread(fp) for i in range(20): im = cv.blur(im,", "= [{'fp': fp} for fp in fp_items] # init processing", "def pool_worker(pool_item): # init threaded video reader fp = pool_item['fp']", "metadata im = cv.imread(fp) for i in range(20): im =", "https://vframe.io # ############################################################################# import click @click.command('') @click.option('-i', '--input', 'opt_dir_in', required=True)", "fp = pool_item['fp'] result = {'fp': fp} # add media", "log = app_cfg.LOG # set N threads if not opt_threads:", "VFRAME # MIT License # Copyright (c) 2020 <NAME> and", "file list into object with pool_items = [{'fp': fp} for", "vframe.utils.video_utils import FileVideoStream, mediainfo log = app_cfg.LOG # set N", "tqdm import tqdm from pathos.multiprocessing import ProcessingPool as Pool from", "= cpu_count() # maximum # glob items fp_items = file_utils.glob_multi(opt_dir_in,", "image template\"\"\" # ------------------------------------------------ # imports from os.path import join", "of files') @click.option('-t', '--threads', 'opt_threads', default=None) @click.pass_context def cli(ctx, opt_dir_in,", "vframe.utils import file_utils from vframe.utils.video_utils import FileVideoStream, mediainfo log =", "pathos.multiprocessing import cpu_count from vframe.settings import app_cfg from vframe.settings.modelzoo_cfg import", "init threaded video reader fp = pool_item['fp'] result = {'fp':", "MIT License # Copyright (c) 2020 <NAME> and VFRAME #", "import FileVideoStream, mediainfo log = app_cfg.LOG # set N threads", "# set N threads if not opt_threads: opt_threads = cpu_count()", "import join from pathlib import Path from dataclasses import asdict", "2020 <NAME> and VFRAME # https://vframe.io # ############################################################################# import click", "set N threads if not opt_threads: opt_threads = cpu_count() #", "i in range(20): im = cv.blur(im, (35,35)) return result #", "opt_dir_in, opt_recursive, opt_exts, opt_slice, opt_threads): \"\"\"Multiprocessor image template\"\"\" # ------------------------------------------------", "\"\"\"Multiprocessor image template\"\"\" # ------------------------------------------------ # imports from os.path import", "############################################################################# import click @click.command('') @click.option('-i', '--input', 'opt_dir_in', required=True) @click.option('-r', '--recursive',", "(c) 2020 <NAME> and VFRAME # https://vframe.io # ############################################################################# import", "modelzoo from vframe.models.dnn import DNN from vframe.image.dnn_factory import DNNFactory from", "{len(fp_items):,} files') # ----------------------------------------------------------- # start pool worker def pool_worker(pool_item):", "Overflow 41920124 desc = f'image-mp x{opt_threads}' with Pool(opt_threads) as p:", "vframe.settings import app_cfg from vframe.settings.modelzoo_cfg import modelzoo from vframe.models.dnn import", "<NAME> and VFRAME # https://vframe.io # ############################################################################# import click @click.command('')", "for i in range(20): im = cv.blur(im, (35,35)) return result", "'opt_threads', default=None) @click.pass_context def cli(ctx, opt_dir_in, opt_recursive, opt_exts, opt_slice, opt_threads):", "@click.option('-r', '--recursive', 'opt_recursive', is_flag=True) @click.option('-e', '--ext', 'opt_exts', default=['jpg', 'png'], multiple=True,", "from pathos.multiprocessing import cpu_count from vframe.settings import app_cfg from vframe.settings.modelzoo_cfg", "from vframe.utils import file_utils from vframe.utils.video_utils import FileVideoStream, mediainfo log", "video reader fp = pool_item['fp'] result = {'fp': fp} #", "tqdm from pathos.multiprocessing import ProcessingPool as Pool from pathos.multiprocessing import", "opt_recursive, opt_exts, opt_slice, opt_threads): \"\"\"Multiprocessor image template\"\"\" # ------------------------------------------------ #", "= cv.imread(fp) for i in range(20): im = cv.blur(im, (35,35))", "items fp_items = file_utils.glob_multi(opt_dir_in, opt_exts, recursive=opt_recursive) if any(opt_slice): fp_items =", "opt_threads): \"\"\"Multiprocessor image template\"\"\" # ------------------------------------------------ # imports from os.path", "@click.option('-e', '--ext', 'opt_exts', default=['jpg', 'png'], multiple=True, help='Glob extension') @click.option('--slice', 'opt_slice',", "ProcessingPool as Pool from pathos.multiprocessing import cpu_count from vframe.settings import", "default=None) @click.pass_context def cli(ctx, opt_dir_in, opt_recursive, opt_exts, opt_slice, opt_threads): \"\"\"Multiprocessor", "############################################################################# # # VFRAME # MIT License # Copyright (c)", "@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None), help='Slice list of files')", "----------------------------------------------------------- # convert file list into object with pool_items =", "import asdict import numpy as np import cv2 as cv", "License # Copyright (c) 2020 <NAME> and VFRAME # https://vframe.io" ]
[ "<filename>src/learndash/api_resources/user.py import learndash from learndash.api_resources.abstract import ListableAPIResource from learndash.api_resources.abstract import", "User(RetrievableAPIResource[UserDict], ListableAPIResource[UserDict]): api_path = learndash.path_users def course_progress(self, id=None): return UserCourseProgress(id,", "UserCourseProgress(id, parent=self) def courses(self, id=None): return UserCourse(id, parent=self) def groups(self,", "ListableAPIResource from learndash.api_resources.abstract import RetrievableAPIResource from learndash.api_resources.abstract import UpdateableAPIResource from", "UserCourseProgressDict from learndash.api_resources.typing import UserCourseDict from learndash.api_resources.typing import UserGroupDict from", "import UserQuizProgressDict class User(RetrievableAPIResource[UserDict], ListableAPIResource[UserDict]): api_path = learndash.path_users def course_progress(self,", "api_path = learndash.path_user_course_progress # class UserCourseProgressSteps(ListableAPIResource, NestedAPIResource): class UserCourse(ListableAPIResource[UserCourseDict], UpdateableAPIResource,", "UserCourse(ListableAPIResource[UserCourseDict], UpdateableAPIResource, NestedAPIResource): # also deletable api_path = learndash.path_user_courses def", "deleteable api_path = learndash.path_user_groups def instance_url(self): # This endpoint accepts", "= learndash.path_user_courses def instance_url(self): # This endpoint accepts updates and", "id=None): return UserGroup(id, parent=self) def quiz_progress(self, id=None): return UserQuizProgress(id, parent=self)", "api_path = learndash.path_user_groups def instance_url(self): # This endpoint accepts updates", "updates and deletions at it's base endpoint return self.class_url() class", "# also deletable api_path = learndash.path_user_courses def instance_url(self): # This", "return UserGroup(id, parent=self) def quiz_progress(self, id=None): return UserQuizProgress(id, parent=self) class", "learndash.api_resources.typing import UserQuizProgressDict class User(RetrievableAPIResource[UserDict], ListableAPIResource[UserDict]): api_path = learndash.path_users def", "also deletable api_path = learndash.path_user_courses def instance_url(self): # This endpoint", "= learndash.path_user_course_progress # class UserCourseProgressSteps(ListableAPIResource, NestedAPIResource): class UserCourse(ListableAPIResource[UserCourseDict], UpdateableAPIResource, NestedAPIResource):", "NestedAPIResource): # also deleteable api_path = learndash.path_user_groups def instance_url(self): #", "UserGroup(ListableAPIResource[UserGroupDict], UpdateableAPIResource, NestedAPIResource): # also deleteable api_path = learndash.path_user_groups def", "def course_progress(self, id=None): return UserCourseProgress(id, parent=self) def courses(self, id=None): return", "import RetrievableAPIResource from learndash.api_resources.abstract import UpdateableAPIResource from learndash.api_resources.abstract import NestedAPIResource", "from learndash.api_resources.typing import UserCourseProgressDict from learndash.api_resources.typing import UserCourseDict from learndash.api_resources.typing", "from learndash.api_resources.abstract import RetrievableAPIResource from learndash.api_resources.abstract import UpdateableAPIResource from learndash.api_resources.abstract", "deletable api_path = learndash.path_user_courses def instance_url(self): # This endpoint accepts", "learndash.path_user_groups def instance_url(self): # This endpoint accepts updates and deletions", "learndash.api_resources.typing import UserDict from learndash.api_resources.typing import UserCourseProgressDict from learndash.api_resources.typing import", "return UserCourse(id, parent=self) def groups(self, id=None): return UserGroup(id, parent=self) def", "learndash from learndash.api_resources.abstract import ListableAPIResource from learndash.api_resources.abstract import RetrievableAPIResource from", "learndash.api_resources.typing import UserGroupDict from learndash.api_resources.typing import UserQuizProgressDict class User(RetrievableAPIResource[UserDict], ListableAPIResource[UserDict]):", "learndash.api_resources.abstract import NestedAPIResource from learndash.api_resources.typing import UserDict from learndash.api_resources.typing import", "learndash.api_resources.typing import UserCourseDict from learndash.api_resources.typing import UserGroupDict from learndash.api_resources.typing import", "accepts updates and deletions at it's base endpoint return self.class_url()", "parent=self) class UserCourseProgress(ListableAPIResource[UserCourseProgressDict], NestedAPIResource): api_path = learndash.path_user_course_progress # class UserCourseProgressSteps(ListableAPIResource,", "learndash.path_users def course_progress(self, id=None): return UserCourseProgress(id, parent=self) def courses(self, id=None):", "UserCourseDict from learndash.api_resources.typing import UserGroupDict from learndash.api_resources.typing import UserQuizProgressDict class", "import UserCourseProgressDict from learndash.api_resources.typing import UserCourseDict from learndash.api_resources.typing import UserGroupDict", "parent=self) def groups(self, id=None): return UserGroup(id, parent=self) def quiz_progress(self, id=None):", "id=None): return UserCourseProgress(id, parent=self) def courses(self, id=None): return UserCourse(id, parent=self)", "NestedAPIResource from learndash.api_resources.typing import UserDict from learndash.api_resources.typing import UserCourseProgressDict from", "class UserCourse(ListableAPIResource[UserCourseDict], UpdateableAPIResource, NestedAPIResource): # also deletable api_path = learndash.path_user_courses", "base endpoint return self.class_url() class UserGroup(ListableAPIResource[UserGroupDict], UpdateableAPIResource, NestedAPIResource): # also", "= learndash.path_users def course_progress(self, id=None): return UserCourseProgress(id, parent=self) def courses(self,", "UserQuizProgress(id, parent=self) class UserCourseProgress(ListableAPIResource[UserCourseProgressDict], NestedAPIResource): api_path = learndash.path_user_course_progress # class", "UpdateableAPIResource, NestedAPIResource): # also deleteable api_path = learndash.path_user_groups def instance_url(self):", "import ListableAPIResource from learndash.api_resources.abstract import RetrievableAPIResource from learndash.api_resources.abstract import UpdateableAPIResource", "courses(self, id=None): return UserCourse(id, parent=self) def groups(self, id=None): return UserGroup(id,", "from learndash.api_resources.typing import UserCourseDict from learndash.api_resources.typing import UserGroupDict from learndash.api_resources.typing", "learndash.api_resources.abstract import ListableAPIResource from learndash.api_resources.abstract import RetrievableAPIResource from learndash.api_resources.abstract import", "parent=self) def courses(self, id=None): return UserCourse(id, parent=self) def groups(self, id=None):", "def groups(self, id=None): return UserGroup(id, parent=self) def quiz_progress(self, id=None): return", "UserGroupDict from learndash.api_resources.typing import UserQuizProgressDict class User(RetrievableAPIResource[UserDict], ListableAPIResource[UserDict]): api_path =", "id=None): return UserCourse(id, parent=self) def groups(self, id=None): return UserGroup(id, parent=self)", "class UserGroup(ListableAPIResource[UserGroupDict], UpdateableAPIResource, NestedAPIResource): # also deleteable api_path = learndash.path_user_groups", "learndash.api_resources.abstract import UpdateableAPIResource from learndash.api_resources.abstract import NestedAPIResource from learndash.api_resources.typing import", "from learndash.api_resources.abstract import UpdateableAPIResource from learndash.api_resources.abstract import NestedAPIResource from learndash.api_resources.typing", "from learndash.api_resources.typing import UserDict from learndash.api_resources.typing import UserCourseProgressDict from learndash.api_resources.typing", "also deleteable api_path = learndash.path_user_groups def instance_url(self): # This endpoint", "RetrievableAPIResource from learndash.api_resources.abstract import UpdateableAPIResource from learndash.api_resources.abstract import NestedAPIResource from", "it's base endpoint return self.class_url() class UserQuizProgress(ListableAPIResource[UserQuizProgressDict], NestedAPIResource): api_path =", "This endpoint accepts updates and deletions at it's base endpoint", "parent=self) def quiz_progress(self, id=None): return UserQuizProgress(id, parent=self) class UserCourseProgress(ListableAPIResource[UserCourseProgressDict], NestedAPIResource):", "deletions at it's base endpoint return self.class_url() class UserGroup(ListableAPIResource[UserGroupDict], UpdateableAPIResource,", "def instance_url(self): # This endpoint accepts updates and deletions at", "api_path = learndash.path_user_courses def instance_url(self): # This endpoint accepts updates", "return UserCourseProgress(id, parent=self) def courses(self, id=None): return UserCourse(id, parent=self) def", "class UserCourseProgressSteps(ListableAPIResource, NestedAPIResource): class UserCourse(ListableAPIResource[UserCourseDict], UpdateableAPIResource, NestedAPIResource): # also deletable", "UserCourseProgress(ListableAPIResource[UserCourseProgressDict], NestedAPIResource): api_path = learndash.path_user_course_progress # class UserCourseProgressSteps(ListableAPIResource, NestedAPIResource): class", "from learndash.api_resources.abstract import NestedAPIResource from learndash.api_resources.typing import UserDict from learndash.api_resources.typing", "groups(self, id=None): return UserGroup(id, parent=self) def quiz_progress(self, id=None): return UserQuizProgress(id,", "NestedAPIResource): class UserCourse(ListableAPIResource[UserCourseDict], UpdateableAPIResource, NestedAPIResource): # also deletable api_path =", "def quiz_progress(self, id=None): return UserQuizProgress(id, parent=self) class UserCourseProgress(ListableAPIResource[UserCourseProgressDict], NestedAPIResource): api_path", "and deletions at it's base endpoint return self.class_url() class UserQuizProgress(ListableAPIResource[UserQuizProgressDict],", "at it's base endpoint return self.class_url() class UserGroup(ListableAPIResource[UserGroupDict], UpdateableAPIResource, NestedAPIResource):", "class User(RetrievableAPIResource[UserDict], ListableAPIResource[UserDict]): api_path = learndash.path_users def course_progress(self, id=None): return", "# also deleteable api_path = learndash.path_user_groups def instance_url(self): # This", "it's base endpoint return self.class_url() class UserGroup(ListableAPIResource[UserGroupDict], UpdateableAPIResource, NestedAPIResource): #", "NestedAPIResource): api_path = learndash.path_user_course_progress # class UserCourseProgressSteps(ListableAPIResource, NestedAPIResource): class UserCourse(ListableAPIResource[UserCourseDict],", "return UserQuizProgress(id, parent=self) class UserCourseProgress(ListableAPIResource[UserCourseProgressDict], NestedAPIResource): api_path = learndash.path_user_course_progress #", "UserQuizProgressDict class User(RetrievableAPIResource[UserDict], ListableAPIResource[UserDict]): api_path = learndash.path_users def course_progress(self, id=None):", "class UserCourseProgress(ListableAPIResource[UserCourseProgressDict], NestedAPIResource): api_path = learndash.path_user_course_progress # class UserCourseProgressSteps(ListableAPIResource, NestedAPIResource):", "import UserCourseDict from learndash.api_resources.typing import UserGroupDict from learndash.api_resources.typing import UserQuizProgressDict", "# This endpoint accepts updates and deletions at it's base", "import UpdateableAPIResource from learndash.api_resources.abstract import NestedAPIResource from learndash.api_resources.typing import UserDict", "from learndash.api_resources.typing import UserGroupDict from learndash.api_resources.typing import UserQuizProgressDict class User(RetrievableAPIResource[UserDict],", "instance_url(self): # This endpoint accepts updates and deletions at it's", "quiz_progress(self, id=None): return UserQuizProgress(id, parent=self) class UserCourseProgress(ListableAPIResource[UserCourseProgressDict], NestedAPIResource): api_path =", "import NestedAPIResource from learndash.api_resources.typing import UserDict from learndash.api_resources.typing import UserCourseProgressDict", "= learndash.path_user_groups def instance_url(self): # This endpoint accepts updates and", "NestedAPIResource): # also deletable api_path = learndash.path_user_courses def instance_url(self): #", "learndash.api_resources.abstract import RetrievableAPIResource from learndash.api_resources.abstract import UpdateableAPIResource from learndash.api_resources.abstract import", "base endpoint return self.class_url() class UserQuizProgress(ListableAPIResource[UserQuizProgressDict], NestedAPIResource): api_path = learndash.path_user_quiz_progress", "UpdateableAPIResource, NestedAPIResource): # also deletable api_path = learndash.path_user_courses def instance_url(self):", "and deletions at it's base endpoint return self.class_url() class UserGroup(ListableAPIResource[UserGroupDict],", "def courses(self, id=None): return UserCourse(id, parent=self) def groups(self, id=None): return", "course_progress(self, id=None): return UserCourseProgress(id, parent=self) def courses(self, id=None): return UserCourse(id,", "return self.class_url() class UserGroup(ListableAPIResource[UserGroupDict], UpdateableAPIResource, NestedAPIResource): # also deleteable api_path", "endpoint return self.class_url() class UserGroup(ListableAPIResource[UserGroupDict], UpdateableAPIResource, NestedAPIResource): # also deleteable", "deletions at it's base endpoint return self.class_url() class UserQuizProgress(ListableAPIResource[UserQuizProgressDict], NestedAPIResource):", "UserCourse(id, parent=self) def groups(self, id=None): return UserGroup(id, parent=self) def quiz_progress(self,", "from learndash.api_resources.abstract import ListableAPIResource from learndash.api_resources.abstract import RetrievableAPIResource from learndash.api_resources.abstract", "learndash.api_resources.typing import UserCourseProgressDict from learndash.api_resources.typing import UserCourseDict from learndash.api_resources.typing import", "UserDict from learndash.api_resources.typing import UserCourseProgressDict from learndash.api_resources.typing import UserCourseDict from", "from learndash.api_resources.typing import UserQuizProgressDict class User(RetrievableAPIResource[UserDict], ListableAPIResource[UserDict]): api_path = learndash.path_users", "id=None): return UserQuizProgress(id, parent=self) class UserCourseProgress(ListableAPIResource[UserCourseProgressDict], NestedAPIResource): api_path = learndash.path_user_course_progress", "at it's base endpoint return self.class_url() class UserQuizProgress(ListableAPIResource[UserQuizProgressDict], NestedAPIResource): api_path", "UpdateableAPIResource from learndash.api_resources.abstract import NestedAPIResource from learndash.api_resources.typing import UserDict from", "import UserDict from learndash.api_resources.typing import UserCourseProgressDict from learndash.api_resources.typing import UserCourseDict", "UserCourseProgressSteps(ListableAPIResource, NestedAPIResource): class UserCourse(ListableAPIResource[UserCourseDict], UpdateableAPIResource, NestedAPIResource): # also deletable api_path", "UserGroup(id, parent=self) def quiz_progress(self, id=None): return UserQuizProgress(id, parent=self) class UserCourseProgress(ListableAPIResource[UserCourseProgressDict],", "import learndash from learndash.api_resources.abstract import ListableAPIResource from learndash.api_resources.abstract import RetrievableAPIResource", "api_path = learndash.path_users def course_progress(self, id=None): return UserCourseProgress(id, parent=self) def", "learndash.path_user_courses def instance_url(self): # This endpoint accepts updates and deletions", "endpoint accepts updates and deletions at it's base endpoint return", "import UserGroupDict from learndash.api_resources.typing import UserQuizProgressDict class User(RetrievableAPIResource[UserDict], ListableAPIResource[UserDict]): api_path", "# class UserCourseProgressSteps(ListableAPIResource, NestedAPIResource): class UserCourse(ListableAPIResource[UserCourseDict], UpdateableAPIResource, NestedAPIResource): # also", "ListableAPIResource[UserDict]): api_path = learndash.path_users def course_progress(self, id=None): return UserCourseProgress(id, parent=self)", "self.class_url() class UserGroup(ListableAPIResource[UserGroupDict], UpdateableAPIResource, NestedAPIResource): # also deleteable api_path =", "learndash.path_user_course_progress # class UserCourseProgressSteps(ListableAPIResource, NestedAPIResource): class UserCourse(ListableAPIResource[UserCourseDict], UpdateableAPIResource, NestedAPIResource): #" ]
[ "subclasses.\"\"\" self.app_info = app_info self.resolver_kwds = kwds def _get_config_option(self, key,", "getattr(self.app_info, key) else: return default @abstractmethod def resolve(self, enabled_container_types, tool_info,", "argument is a :class:`galaxy.tool_util.deps.containers.ToolInfo` description of the tool and its", "Keys for dictification. dict_collection_visible_keys = ['resolver_type', 'can_uninstall_dependencies'] can_uninstall_dependencies = False", "images for tool execution.\"\"\" from abc import ( ABCMeta, abstractmethod,", "\"\"\"Default initializer for ``ContainerResolver`` subclasses.\"\"\" self.app_info = app_info self.resolver_kwds =", "matching all supplied requirements for tool. The supplied argument is", "**kwds): \"\"\"Default initializer for ``ContainerResolver`` subclasses.\"\"\" self.app_info = app_info self.resolver_kwds", "def _get_config_option(self, key, default=None): \"\"\"Look in resolver-specific settings for option", "for the type of container resolution.\"\"\" def _container_type_enabled(self, container_description, enabled_container_types):", "key): return getattr(self.app_info, key) else: return default @abstractmethod def resolve(self,", "settings. \"\"\" if self.app_info and hasattr(self.app_info, key): return getattr(self.app_info, key)", "is a :class:`galaxy.tool_util.deps.containers.ToolInfo` description of the tool and its requirements.", "option and then fallback to global settings. \"\"\" if self.app_info", "_container_type_enabled(self, container_description, enabled_container_types): \"\"\"Return a boolean indicating if the specified", "resolve(self, enabled_container_types, tool_info, **kwds): \"\"\"Find a container matching all supplied", "self.app_info = app_info self.resolver_kwds = kwds def _get_config_option(self, key, default=None):", "supplied argument is a :class:`galaxy.tool_util.deps.containers.ToolInfo` description of the tool and", "self.resolver_kwds = kwds def _get_config_option(self, key, default=None): \"\"\"Look in resolver-specific", "key) else: return default @abstractmethod def resolve(self, enabled_container_types, tool_info, **kwds):", "default=None): \"\"\"Look in resolver-specific settings for option and then fallback", "= False def __init__(self, app_info=None, **kwds): \"\"\"Default initializer for ``ContainerResolver``", "( ABCMeta, abstractmethod, abstractproperty, ) import six from galaxy.util.dictifiable import", "for tool execution.\"\"\" from abc import ( ABCMeta, abstractmethod, abstractproperty,", "'can_uninstall_dependencies'] can_uninstall_dependencies = False def __init__(self, app_info=None, **kwds): \"\"\"Default initializer", "\"\"\"The module defines the abstract interface for resolving container images", "resolver-specific settings for option and then fallback to global settings.", "for option and then fallback to global settings. \"\"\" if", "requirements. \"\"\" @abstractproperty def resolver_type(self): \"\"\"Short label for the type", "def resolve(self, enabled_container_types, tool_info, **kwds): \"\"\"Find a container matching all", "requirements for tool. The supplied argument is a :class:`galaxy.tool_util.deps.containers.ToolInfo` description", "@six.python_2_unicode_compatible @six.add_metaclass(ABCMeta) class ContainerResolver(Dictifiable): \"\"\"Description of a technique for resolving", "fallback to global settings. \"\"\" if self.app_info and hasattr(self.app_info, key):", "<filename>lib/galaxy/tool_util/deps/container_resolvers/__init__.py<gh_stars>1-10 \"\"\"The module defines the abstract interface for resolving container", "tool execution.\"\"\" from abc import ( ABCMeta, abstractmethod, abstractproperty, )", "technique for resolving container images for tool execution.\"\"\" # Keys", "\"\"\"Look in resolver-specific settings for option and then fallback to", "ContainerResolver(Dictifiable): \"\"\"Description of a technique for resolving container images for", "dictification. dict_collection_visible_keys = ['resolver_type', 'can_uninstall_dependencies'] can_uninstall_dependencies = False def __init__(self,", "abstractmethod, abstractproperty, ) import six from galaxy.util.dictifiable import Dictifiable @six.python_2_unicode_compatible", "type of container resolution.\"\"\" def _container_type_enabled(self, container_description, enabled_container_types): \"\"\"Return a", "all supplied requirements for tool. The supplied argument is a", "its requirements. \"\"\" @abstractproperty def resolver_type(self): \"\"\"Short label for the", "hasattr(self.app_info, key): return getattr(self.app_info, key) else: return default @abstractmethod def", "container resolution.\"\"\" def _container_type_enabled(self, container_description, enabled_container_types): \"\"\"Return a boolean indicating", "abc import ( ABCMeta, abstractmethod, abstractproperty, ) import six from", "import ( ABCMeta, abstractmethod, abstractproperty, ) import six from galaxy.util.dictifiable", "@six.add_metaclass(ABCMeta) class ContainerResolver(Dictifiable): \"\"\"Description of a technique for resolving container", "dict_collection_visible_keys = ['resolver_type', 'can_uninstall_dependencies'] can_uninstall_dependencies = False def __init__(self, app_info=None,", "\"\"\"Find a container matching all supplied requirements for tool. The", "abstract interface for resolving container images for tool execution.\"\"\" from", "module defines the abstract interface for resolving container images for", "False def __init__(self, app_info=None, **kwds): \"\"\"Default initializer for ``ContainerResolver`` subclasses.\"\"\"", "initializer for ``ContainerResolver`` subclasses.\"\"\" self.app_info = app_info self.resolver_kwds = kwds", "of container resolution.\"\"\" def _container_type_enabled(self, container_description, enabled_container_types): \"\"\"Return a boolean", "in resolver-specific settings for option and then fallback to global", "from galaxy.util.dictifiable import Dictifiable @six.python_2_unicode_compatible @six.add_metaclass(ABCMeta) class ContainerResolver(Dictifiable): \"\"\"Description of", ") import six from galaxy.util.dictifiable import Dictifiable @six.python_2_unicode_compatible @six.add_metaclass(ABCMeta) class", "\"\"\"Short label for the type of container resolution.\"\"\" def _container_type_enabled(self,", "container type is enabled.\"\"\" return container_description.type in enabled_container_types def __str__(self):", "a container matching all supplied requirements for tool. The supplied", "execution.\"\"\" from abc import ( ABCMeta, abstractmethod, abstractproperty, ) import", "return container_description.type in enabled_container_types def __str__(self): return \"%s[]\" % self.__class__.__name__", "container_description, enabled_container_types): \"\"\"Return a boolean indicating if the specified container", "defines the abstract interface for resolving container images for tool", "from abc import ( ABCMeta, abstractmethod, abstractproperty, ) import six", "The supplied argument is a :class:`galaxy.tool_util.deps.containers.ToolInfo` description of the tool", "def __init__(self, app_info=None, **kwds): \"\"\"Default initializer for ``ContainerResolver`` subclasses.\"\"\" self.app_info", "default @abstractmethod def resolve(self, enabled_container_types, tool_info, **kwds): \"\"\"Find a container", "@abstractmethod def resolve(self, enabled_container_types, tool_info, **kwds): \"\"\"Find a container matching", "container images for tool execution.\"\"\" from abc import ( ABCMeta,", "= app_info self.resolver_kwds = kwds def _get_config_option(self, key, default=None): \"\"\"Look", "the specified container type is enabled.\"\"\" return container_description.type in enabled_container_types", "a :class:`galaxy.tool_util.deps.containers.ToolInfo` description of the tool and its requirements. \"\"\"", "and hasattr(self.app_info, key): return getattr(self.app_info, key) else: return default @abstractmethod", "enabled_container_types): \"\"\"Return a boolean indicating if the specified container type", "the abstract interface for resolving container images for tool execution.\"\"\"", "a technique for resolving container images for tool execution.\"\"\" #", "the type of container resolution.\"\"\" def _container_type_enabled(self, container_description, enabled_container_types): \"\"\"Return", "six from galaxy.util.dictifiable import Dictifiable @six.python_2_unicode_compatible @six.add_metaclass(ABCMeta) class ContainerResolver(Dictifiable): \"\"\"Description", "enabled.\"\"\" return container_description.type in enabled_container_types def __str__(self): return \"%s[]\" %", "tool_info, **kwds): \"\"\"Find a container matching all supplied requirements for", "resolving container images for tool execution.\"\"\" # Keys for dictification.", "boolean indicating if the specified container type is enabled.\"\"\" return", "# Keys for dictification. dict_collection_visible_keys = ['resolver_type', 'can_uninstall_dependencies'] can_uninstall_dependencies =", "for tool execution.\"\"\" # Keys for dictification. dict_collection_visible_keys = ['resolver_type',", "@abstractproperty def resolver_type(self): \"\"\"Short label for the type of container", "['resolver_type', 'can_uninstall_dependencies'] can_uninstall_dependencies = False def __init__(self, app_info=None, **kwds): \"\"\"Default", "\"\"\"Return a boolean indicating if the specified container type is", "can_uninstall_dependencies = False def __init__(self, app_info=None, **kwds): \"\"\"Default initializer for", "and its requirements. \"\"\" @abstractproperty def resolver_type(self): \"\"\"Short label for", "key, default=None): \"\"\"Look in resolver-specific settings for option and then", "description of the tool and its requirements. \"\"\" @abstractproperty def", "kwds def _get_config_option(self, key, default=None): \"\"\"Look in resolver-specific settings for", "the tool and its requirements. \"\"\" @abstractproperty def resolver_type(self): \"\"\"Short", "tool. The supplied argument is a :class:`galaxy.tool_util.deps.containers.ToolInfo` description of the", "container images for tool execution.\"\"\" # Keys for dictification. dict_collection_visible_keys", "_get_config_option(self, key, default=None): \"\"\"Look in resolver-specific settings for option and", "import Dictifiable @six.python_2_unicode_compatible @six.add_metaclass(ABCMeta) class ContainerResolver(Dictifiable): \"\"\"Description of a technique", "and then fallback to global settings. \"\"\" if self.app_info and", "container matching all supplied requirements for tool. The supplied argument", "tool and its requirements. \"\"\" @abstractproperty def resolver_type(self): \"\"\"Short label", "to global settings. \"\"\" if self.app_info and hasattr(self.app_info, key): return", "resolving container images for tool execution.\"\"\" from abc import (", "``ContainerResolver`` subclasses.\"\"\" self.app_info = app_info self.resolver_kwds = kwds def _get_config_option(self,", "def _container_type_enabled(self, container_description, enabled_container_types): \"\"\"Return a boolean indicating if the", "ABCMeta, abstractmethod, abstractproperty, ) import six from galaxy.util.dictifiable import Dictifiable", "then fallback to global settings. \"\"\" if self.app_info and hasattr(self.app_info,", "for tool. The supplied argument is a :class:`galaxy.tool_util.deps.containers.ToolInfo` description of", "if the specified container type is enabled.\"\"\" return container_description.type in", "= ['resolver_type', 'can_uninstall_dependencies'] can_uninstall_dependencies = False def __init__(self, app_info=None, **kwds):", "if self.app_info and hasattr(self.app_info, key): return getattr(self.app_info, key) else: return", "resolution.\"\"\" def _container_type_enabled(self, container_description, enabled_container_types): \"\"\"Return a boolean indicating if", "a boolean indicating if the specified container type is enabled.\"\"\"", "tool execution.\"\"\" # Keys for dictification. dict_collection_visible_keys = ['resolver_type', 'can_uninstall_dependencies']", "self.app_info and hasattr(self.app_info, key): return getattr(self.app_info, key) else: return default", "enabled_container_types, tool_info, **kwds): \"\"\"Find a container matching all supplied requirements", "resolver_type(self): \"\"\"Short label for the type of container resolution.\"\"\" def", "def resolver_type(self): \"\"\"Short label for the type of container resolution.\"\"\"", "class ContainerResolver(Dictifiable): \"\"\"Description of a technique for resolving container images", "\"\"\" @abstractproperty def resolver_type(self): \"\"\"Short label for the type of", ":class:`galaxy.tool_util.deps.containers.ToolInfo` description of the tool and its requirements. \"\"\" @abstractproperty", "__init__(self, app_info=None, **kwds): \"\"\"Default initializer for ``ContainerResolver`` subclasses.\"\"\" self.app_info =", "for dictification. dict_collection_visible_keys = ['resolver_type', 'can_uninstall_dependencies'] can_uninstall_dependencies = False def", "global settings. \"\"\" if self.app_info and hasattr(self.app_info, key): return getattr(self.app_info,", "Dictifiable @six.python_2_unicode_compatible @six.add_metaclass(ABCMeta) class ContainerResolver(Dictifiable): \"\"\"Description of a technique for", "\"\"\" if self.app_info and hasattr(self.app_info, key): return getattr(self.app_info, key) else:", "indicating if the specified container type is enabled.\"\"\" return container_description.type", "\"\"\"Description of a technique for resolving container images for tool", "type is enabled.\"\"\" return container_description.type in enabled_container_types def __str__(self): return", "for ``ContainerResolver`` subclasses.\"\"\" self.app_info = app_info self.resolver_kwds = kwds def", "= kwds def _get_config_option(self, key, default=None): \"\"\"Look in resolver-specific settings", "for resolving container images for tool execution.\"\"\" # Keys for", "specified container type is enabled.\"\"\" return container_description.type in enabled_container_types def", "app_info self.resolver_kwds = kwds def _get_config_option(self, key, default=None): \"\"\"Look in", "else: return default @abstractmethod def resolve(self, enabled_container_types, tool_info, **kwds): \"\"\"Find", "abstractproperty, ) import six from galaxy.util.dictifiable import Dictifiable @six.python_2_unicode_compatible @six.add_metaclass(ABCMeta)", "interface for resolving container images for tool execution.\"\"\" from abc", "execution.\"\"\" # Keys for dictification. dict_collection_visible_keys = ['resolver_type', 'can_uninstall_dependencies'] can_uninstall_dependencies", "for resolving container images for tool execution.\"\"\" from abc import", "of the tool and its requirements. \"\"\" @abstractproperty def resolver_type(self):", "label for the type of container resolution.\"\"\" def _container_type_enabled(self, container_description,", "images for tool execution.\"\"\" # Keys for dictification. dict_collection_visible_keys =", "is enabled.\"\"\" return container_description.type in enabled_container_types def __str__(self): return \"%s[]\"", "settings for option and then fallback to global settings. \"\"\"", "app_info=None, **kwds): \"\"\"Default initializer for ``ContainerResolver`` subclasses.\"\"\" self.app_info = app_info", "return getattr(self.app_info, key) else: return default @abstractmethod def resolve(self, enabled_container_types,", "return default @abstractmethod def resolve(self, enabled_container_types, tool_info, **kwds): \"\"\"Find a", "import six from galaxy.util.dictifiable import Dictifiable @six.python_2_unicode_compatible @six.add_metaclass(ABCMeta) class ContainerResolver(Dictifiable):", "galaxy.util.dictifiable import Dictifiable @six.python_2_unicode_compatible @six.add_metaclass(ABCMeta) class ContainerResolver(Dictifiable): \"\"\"Description of a", "of a technique for resolving container images for tool execution.\"\"\"", "supplied requirements for tool. The supplied argument is a :class:`galaxy.tool_util.deps.containers.ToolInfo`", "**kwds): \"\"\"Find a container matching all supplied requirements for tool." ]
[ "file_names: df: pd.DataFrame = pd.read_csv(f'{SRC_DIR}/{file_name}').set_index('EyeTrackerTimestamp').sort_index()[ ['GazePointX (ADCSpx)', 'GazePointY (ADCSpx)', 'PupilLeft',", "python3 import glob import os import pandas as pd import", "df['d'] = (df['dl'] + df['dr']) / 2 # start with", "to ms df['t'] = (df['t'] - df['t'].min()) / 1000 df", "= glob.glob(f\"{SRC_DIR}/*.csv\") file_names = list(map(os.path.basename, files)) for file_name in file_names:", "blanks (order=interpolate(inter)->bfill+ffill(edges))->zerofill df = df.apply(lambda x: x.interpolate().fillna(method=\"bfill\").fillna(method=\"ffill\")).fillna(0) df['x'] = df['x']", "pd.DataFrame = pd.read_csv(f'{SRC_DIR}/{file_name}').set_index('EyeTrackerTimestamp').sort_index()[ ['GazePointX (ADCSpx)', 'GazePointY (ADCSpx)', 'PupilLeft', 'PupilRight']].reset_index() df.columns", "x: x.interpolate().fillna(method=\"bfill\").fillna(method=\"ffill\")).fillna(0) df['x'] = df['x'] / 1920 df['y'] = df['y']", "(df['dl'] + df['dr']) / 2 # start with t=0, and", "<reponame>nirdslab/streaminghub #!/usr/bin/env python3 import glob import os import pandas as", "import os import pandas as pd import dfs SRC_DIR =", "= (df['dl'] + df['dr']) / 2 # start with t=0,", "set unit to ms df['t'] = (df['t'] - df['t'].min()) /", "glob import os import pandas as pd import dfs SRC_DIR", "df['y'] / 1080 df['d'] = (df['dl'] + df['dr']) / 2", "if __name__ == '__main__': files = glob.glob(f\"{SRC_DIR}/*.csv\") file_names = list(map(os.path.basename,", "== '__main__': files = glob.glob(f\"{SRC_DIR}/*.csv\") file_names = list(map(os.path.basename, files)) for", "'PupilRight']].reset_index() df.columns = ['t', 'x', 'y', 'dl', 'dr'] # fill", "t=0, and set unit to ms df['t'] = (df['t'] -", "'__main__': files = glob.glob(f\"{SRC_DIR}/*.csv\") file_names = list(map(os.path.basename, files)) for file_name", "file_name in file_names: df: pd.DataFrame = pd.read_csv(f'{SRC_DIR}/{file_name}').set_index('EyeTrackerTimestamp').sort_index()[ ['GazePointX (ADCSpx)', 'GazePointY", "df['x'] / 1920 df['y'] = df['y'] / 1080 df['d'] =", "ms df['t'] = (df['t'] - df['t'].min()) / 1000 df =", "- df['t'].min()) / 1000 df = df[['t', 'x', 'y', 'd']].round(6).set_index('t')", "f\"{dfs.get_data_dir()}/adhd_sin\" if __name__ == '__main__': files = glob.glob(f\"{SRC_DIR}/*.csv\") file_names =", "# start with t=0, and set unit to ms df['t']", "import dfs SRC_DIR = f\"{dfs.get_data_dir()}/adhd_sin_orig\" OUT_DIR = f\"{dfs.get_data_dir()}/adhd_sin\" if __name__", "import glob import os import pandas as pd import dfs", "+ df['dr']) / 2 # start with t=0, and set", "__name__ == '__main__': files = glob.glob(f\"{SRC_DIR}/*.csv\") file_names = list(map(os.path.basename, files))", "f\"{dfs.get_data_dir()}/adhd_sin_orig\" OUT_DIR = f\"{dfs.get_data_dir()}/adhd_sin\" if __name__ == '__main__': files =", "#!/usr/bin/env python3 import glob import os import pandas as pd", "start with t=0, and set unit to ms df['t'] =", "df['y'] = df['y'] / 1080 df['d'] = (df['dl'] + df['dr'])", "= ['t', 'x', 'y', 'dl', 'dr'] # fill blanks (order=interpolate(inter)->bfill+ffill(edges))->zerofill", "in file_names: df: pd.DataFrame = pd.read_csv(f'{SRC_DIR}/{file_name}').set_index('EyeTrackerTimestamp').sort_index()[ ['GazePointX (ADCSpx)', 'GazePointY (ADCSpx)',", "fill blanks (order=interpolate(inter)->bfill+ffill(edges))->zerofill df = df.apply(lambda x: x.interpolate().fillna(method=\"bfill\").fillna(method=\"ffill\")).fillna(0) df['x'] =", "'x', 'y', 'dl', 'dr'] # fill blanks (order=interpolate(inter)->bfill+ffill(edges))->zerofill df =", "= f\"{dfs.get_data_dir()}/adhd_sin_orig\" OUT_DIR = f\"{dfs.get_data_dir()}/adhd_sin\" if __name__ == '__main__': files", "/ 1000 df = df[['t', 'x', 'y', 'd']].round(6).set_index('t') df.to_csv(f'{OUT_DIR}/{file_name}') print(f'Processed:", "/ 1080 df['d'] = (df['dl'] + df['dr']) / 2 #", "df['dr']) / 2 # start with t=0, and set unit", "files)) for file_name in file_names: df: pd.DataFrame = pd.read_csv(f'{SRC_DIR}/{file_name}').set_index('EyeTrackerTimestamp').sort_index()[ ['GazePointX", "OUT_DIR = f\"{dfs.get_data_dir()}/adhd_sin\" if __name__ == '__main__': files = glob.glob(f\"{SRC_DIR}/*.csv\")", "(ADCSpx)', 'PupilLeft', 'PupilRight']].reset_index() df.columns = ['t', 'x', 'y', 'dl', 'dr']", "# fill blanks (order=interpolate(inter)->bfill+ffill(edges))->zerofill df = df.apply(lambda x: x.interpolate().fillna(method=\"bfill\").fillna(method=\"ffill\")).fillna(0) df['x']", "/ 2 # start with t=0, and set unit to", "import pandas as pd import dfs SRC_DIR = f\"{dfs.get_data_dir()}/adhd_sin_orig\" OUT_DIR", "'y', 'dl', 'dr'] # fill blanks (order=interpolate(inter)->bfill+ffill(edges))->zerofill df = df.apply(lambda", "df = df.apply(lambda x: x.interpolate().fillna(method=\"bfill\").fillna(method=\"ffill\")).fillna(0) df['x'] = df['x'] / 1920", "df['t'] = (df['t'] - df['t'].min()) / 1000 df = df[['t',", "'GazePointY (ADCSpx)', 'PupilLeft', 'PupilRight']].reset_index() df.columns = ['t', 'x', 'y', 'dl',", "list(map(os.path.basename, files)) for file_name in file_names: df: pd.DataFrame = pd.read_csv(f'{SRC_DIR}/{file_name}').set_index('EyeTrackerTimestamp').sort_index()[", "(ADCSpx)', 'GazePointY (ADCSpx)', 'PupilLeft', 'PupilRight']].reset_index() df.columns = ['t', 'x', 'y',", "as pd import dfs SRC_DIR = f\"{dfs.get_data_dir()}/adhd_sin_orig\" OUT_DIR = f\"{dfs.get_data_dir()}/adhd_sin\"", "file_names = list(map(os.path.basename, files)) for file_name in file_names: df: pd.DataFrame", "unit to ms df['t'] = (df['t'] - df['t'].min()) / 1000", "os import pandas as pd import dfs SRC_DIR = f\"{dfs.get_data_dir()}/adhd_sin_orig\"", "/ 1920 df['y'] = df['y'] / 1080 df['d'] = (df['dl']", "1080 df['d'] = (df['dl'] + df['dr']) / 2 # start", "= f\"{dfs.get_data_dir()}/adhd_sin\" if __name__ == '__main__': files = glob.glob(f\"{SRC_DIR}/*.csv\") file_names", "files = glob.glob(f\"{SRC_DIR}/*.csv\") file_names = list(map(os.path.basename, files)) for file_name in", "'dl', 'dr'] # fill blanks (order=interpolate(inter)->bfill+ffill(edges))->zerofill df = df.apply(lambda x:", "with t=0, and set unit to ms df['t'] = (df['t']", "['GazePointX (ADCSpx)', 'GazePointY (ADCSpx)', 'PupilLeft', 'PupilRight']].reset_index() df.columns = ['t', 'x',", "x.interpolate().fillna(method=\"bfill\").fillna(method=\"ffill\")).fillna(0) df['x'] = df['x'] / 1920 df['y'] = df['y'] /", "= df['x'] / 1920 df['y'] = df['y'] / 1080 df['d']", "df.apply(lambda x: x.interpolate().fillna(method=\"bfill\").fillna(method=\"ffill\")).fillna(0) df['x'] = df['x'] / 1920 df['y'] =", "'dr'] # fill blanks (order=interpolate(inter)->bfill+ffill(edges))->zerofill df = df.apply(lambda x: x.interpolate().fillna(method=\"bfill\").fillna(method=\"ffill\")).fillna(0)", "'PupilLeft', 'PupilRight']].reset_index() df.columns = ['t', 'x', 'y', 'dl', 'dr'] #", "['t', 'x', 'y', 'dl', 'dr'] # fill blanks (order=interpolate(inter)->bfill+ffill(edges))->zerofill df", "dfs SRC_DIR = f\"{dfs.get_data_dir()}/adhd_sin_orig\" OUT_DIR = f\"{dfs.get_data_dir()}/adhd_sin\" if __name__ ==", "SRC_DIR = f\"{dfs.get_data_dir()}/adhd_sin_orig\" OUT_DIR = f\"{dfs.get_data_dir()}/adhd_sin\" if __name__ == '__main__':", "= list(map(os.path.basename, files)) for file_name in file_names: df: pd.DataFrame =", "df: pd.DataFrame = pd.read_csv(f'{SRC_DIR}/{file_name}').set_index('EyeTrackerTimestamp').sort_index()[ ['GazePointX (ADCSpx)', 'GazePointY (ADCSpx)', 'PupilLeft', 'PupilRight']].reset_index()", "df.columns = ['t', 'x', 'y', 'dl', 'dr'] # fill blanks", "glob.glob(f\"{SRC_DIR}/*.csv\") file_names = list(map(os.path.basename, files)) for file_name in file_names: df:", "and set unit to ms df['t'] = (df['t'] - df['t'].min())", "2 # start with t=0, and set unit to ms", "df['t'].min()) / 1000 df = df[['t', 'x', 'y', 'd']].round(6).set_index('t') df.to_csv(f'{OUT_DIR}/{file_name}')", "= (df['t'] - df['t'].min()) / 1000 df = df[['t', 'x',", "1920 df['y'] = df['y'] / 1080 df['d'] = (df['dl'] +", "= df.apply(lambda x: x.interpolate().fillna(method=\"bfill\").fillna(method=\"ffill\")).fillna(0) df['x'] = df['x'] / 1920 df['y']", "(df['t'] - df['t'].min()) / 1000 df = df[['t', 'x', 'y',", "pd import dfs SRC_DIR = f\"{dfs.get_data_dir()}/adhd_sin_orig\" OUT_DIR = f\"{dfs.get_data_dir()}/adhd_sin\" if", "for file_name in file_names: df: pd.DataFrame = pd.read_csv(f'{SRC_DIR}/{file_name}').set_index('EyeTrackerTimestamp').sort_index()[ ['GazePointX (ADCSpx)',", "1000 df = df[['t', 'x', 'y', 'd']].round(6).set_index('t') df.to_csv(f'{OUT_DIR}/{file_name}') print(f'Processed: {file_name}')", "df['x'] = df['x'] / 1920 df['y'] = df['y'] / 1080", "pd.read_csv(f'{SRC_DIR}/{file_name}').set_index('EyeTrackerTimestamp').sort_index()[ ['GazePointX (ADCSpx)', 'GazePointY (ADCSpx)', 'PupilLeft', 'PupilRight']].reset_index() df.columns = ['t',", "= df['y'] / 1080 df['d'] = (df['dl'] + df['dr']) /", "(order=interpolate(inter)->bfill+ffill(edges))->zerofill df = df.apply(lambda x: x.interpolate().fillna(method=\"bfill\").fillna(method=\"ffill\")).fillna(0) df['x'] = df['x'] /", "pandas as pd import dfs SRC_DIR = f\"{dfs.get_data_dir()}/adhd_sin_orig\" OUT_DIR =", "= pd.read_csv(f'{SRC_DIR}/{file_name}').set_index('EyeTrackerTimestamp').sort_index()[ ['GazePointX (ADCSpx)', 'GazePointY (ADCSpx)', 'PupilLeft', 'PupilRight']].reset_index() df.columns =" ]
[ "and ('learning_rate' in data.columns): sortedData=data.sort_values(by=\"epochs\", axis=0, ascending=True) epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss'])", "DIRECTORIES containing non-hidden files ending in FILENAME def getDataDirectories(DIRECTORY, FILENAME=\"valLoss.txt\"):", "#if we get to this point in function, it means", "import pandas as pd import numpy as np import matplotlib.pyplot", "else: print(\"Missing a column in NN datafile\") raise Exception('NN datafile", "of datapoints in the vectors. if x[0] > xsize: return", "z=z[int(result-1)::int(result)] if w is None: return x,y,z else: return x,y", "the accuracy and loss files are stored. want to move", "data files in DIRECTORY with extension EXT def getDataFiles(DIRECTORY, EXT='txt'):", "by grabNNData. def createFolders(SEARCHDIR, SAVEDIR): for item in os.scandir(SEARCHDIR): name=str(item.name)", "same format expected by grabNNData. def createFolders(SEARCHDIR, SAVEDIR): for item", "createFolders(SEARCHDIR, SAVEDIR): for item in os.scandir(SEARCHDIR): name=str(item.name) files=name.split('-') SAVEFULLDIR=SAVEDIR+str(files[0]) if", "at FILENAME def grabNNData(FILENAME, header='infer', sep=' '): data = pd.read_csv(FILENAME,", "often, but not more often. We verify that we're not", "of the expected columns: epochs trainLoss valLoss valAcc [optional extra", "it means z and w are both not None. w=w[int(result-1)::int(result)]", "xsize: return x,y,z,w else: result=(1.0/x[0])*xsize #result is how often we", "i, minLoss #dirpath is where the accuracy and loss files", "and ('trainLoss' in data.columns) and ('valLoss' in data.columns) and ('valAcc'", "loss files are stored. want to move the files into", "trainLoss, valLoss, valAcc, batch_size, learning_rate, convKers) elif ('epochs' in data.columns)", "between the if statement & our attempt at making directory", "& our attempt at making directory pass shutil.move(item.path, SAVEFULLDIR+\"/\"+str(files[1])) #a", "we should take datapoints if we wish to consider values", "np import matplotlib.pyplot as plt import os import matplotlib.pyplot as", "one of the expected columns: epochs trainLoss valLoss valAcc [optional", "ascending=True) epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc']) batch_size=np.array(sortedData['batch_size']) learning_rate=np.array(sortedData['learning_rate']) convKers=np.array(sortedData['convKernels']) return(epoch, trainLoss,", "in os.scandir(SEARCHDIR): name=str(item.name) files=name.split('-') SAVEFULLDIR=SAVEDIR+str(files[0]) if not os.path.exists(SAVEFULLDIR): try: os.makedirs(SAVEFULLDIR)", "item.name.startswith(\".\"): directories.append(directory.path) return directories #get all non-hidden data files in", "in range(0,loss.size): if loss[i] < minLoss: minLoss=loss[i] epochMin=epoch[i] elif (epoch[i]-epochMin)", "batch_size, learning_rate, convKers) elif ('epochs' in data.columns) and ('trainLoss' in", "if item.name.endswith(\".\"+EXT) and not item.name.startswith(\".\"): datafiles.append(item.path) return datafiles #checking if", "E=0.5, which we use by default def sliceData(xsize, x, y,", "how often we should take datapoints if we wish to", "as pd import numpy as np import matplotlib.pyplot as plt", "at making directory pass shutil.move(item.path, SAVEFULLDIR+\"/\"+str(files[1])) #a function to read", "('valAcc' in data.columns) and ('batch_size' in data.columns) and ('learning_rate' in", "FILENAME def getDataDirectories(DIRECTORY, FILENAME=\"valLoss.txt\"): directories=[] for directory in os.scandir(DIRECTORY): for", "shutil #find all DIRECTORIES containing non-hidden files ending in FILENAME", "files=name.split('-') SAVEFULLDIR=SAVEDIR+str(files[0]) if not os.path.exists(SAVEFULLDIR): try: os.makedirs(SAVEFULLDIR) except FileExistsError: #directory", "if z is not None: z=z[int(result-1)::int(result)] if w is None:", "x, y, z=None, w=None): #we can slice the data to", "in data.columns) and ('valAcc' in data.columns): sortedData=data.sort_values(by=\"epochs\", axis=0, ascending=True) epoch=np.array(sortedData['epochs'])", "wish to consider values every xsize x=x[int(result-1)::int(result)] y=y[int(result-1)::int(result)] if z", "print(\"Missing a column in NN datafile\") raise Exception('NN datafile is", "as np import matplotlib.pyplot as plt import os import matplotlib.pyplot", "numpy as np import matplotlib.pyplot as plt import os import", "the vectors. if x[0] > xsize: return x,y,z,w else: result=(1.0/x[0])*xsize", "not more often. We verify that we're not being asked", "that we're not being asked for a granularity that is", "valLoss valAcc [optional extra columns: batch_size, learning_rate]') #slice data could", "for numEpochs epochs in a row. def stopsDecreasing(loss, epoch, numEpochs):", "in DIRECTORY with extension EXT def getDataFiles(DIRECTORY, EXT='txt'): datafiles=[] for", "return x,y,z else: return x,y #if we get to this", "data.columns) and ('learning_rate' in data.columns): sortedData=data.sort_values(by=\"epochs\", axis=0, ascending=True) epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss'])", "('epochs' in data.columns) and ('trainLoss' in data.columns) and ('valLoss' in", "[optional extra columns: batch_size, learning_rate]') #slice data could be used", "not being asked for a granularity that is smaller than", "being asked for a granularity that is smaller than the", "def stopsDecreasing(loss, epoch, numEpochs): minLoss=np.inf epochMin=0 for i in range(0,loss.size):", "the if statement & our attempt at making directory pass", "not os.path.exists(SAVEFULLDIR): try: os.makedirs(SAVEFULLDIR) except FileExistsError: #directory already exists--must have", "getDataFiles(DIRECTORY, EXT='txt'): datafiles=[] for item in os.scandir(DIRECTORY): if item.name.endswith(\".\"+EXT) and", "return directories #get all non-hidden data files in DIRECTORY with", "data = pd.read_csv(FILENAME, sep, header=header) if ('epochs' in data.columns) and", "y, z=None, w=None): #we can slice the data to sample", "data could be used to test values of E other", "data.columns): sortedData=data.sort_values(by=\"epochs\", axis=0, ascending=True) epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc']) else: print(\"Missing", "raise Exception('NN datafile is missing one of the expected columns:", "epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc']) else: print(\"Missing a column in NN", "item in os.scandir(DIRECTORY): if item.name.endswith(\".\"+EXT) and not item.name.startswith(\".\"): datafiles.append(item.path) return", "> xsize: return x,y,z,w else: result=(1.0/x[0])*xsize #result is how often", "SAVEFULLDIR=SAVEDIR+str(files[0]) if not os.path.exists(SAVEFULLDIR): try: os.makedirs(SAVEFULLDIR) except FileExistsError: #directory already", "header='infer', sep=' '): data = pd.read_csv(FILENAME, sep, header=header) if ('epochs'", "None: z=z[int(result-1)::int(result)] if w is None: return x,y,z else: return", "could be used to test values of E other than", "EXT='txt'): datafiles=[] for item in os.scandir(DIRECTORY): if item.name.endswith(\".\"+EXT) and not", "x[0] > xsize: return x,y,z,w else: result=(1.0/x[0])*xsize #result is how", "None: return x,y,z else: return x,y #if we get to", "directory pass shutil.move(item.path, SAVEFULLDIR+\"/\"+str(files[1])) #a function to read in information", "learning_rate=np.array(sortedData['learning_rate']) convKers=np.array(sortedData['convKernels']) return(epoch, trainLoss, valLoss, valAcc, batch_size, learning_rate, convKers) elif", "in FILENAME def getDataDirectories(DIRECTORY, FILENAME=\"valLoss.txt\"): directories=[] for directory in os.scandir(DIRECTORY):", "return i, minLoss #dirpath is where the accuracy and loss", "plt import CurveFit import shutil #find all DIRECTORIES containing non-hidden", "as plt import os import matplotlib.pyplot as plt import CurveFit", "return datafiles #checking if loss ever doesn't decrease for numEpochs", "x,y,z,w else: result=(1.0/x[0])*xsize #result is how often we should take", "item.name.endswith(FILENAME) and not item.name.startswith(\".\"): directories.append(directory.path) return directories #get all non-hidden", "if ('epochs' in data.columns) and ('trainLoss' in data.columns) and ('valLoss'", "directories.append(directory.path) return directories #get all non-hidden data files in DIRECTORY", "should take datapoints if we wish to consider values every", "data to sample less often, but not more often. We", "x=x[int(result-1)::int(result)] y=y[int(result-1)::int(result)] if z is not None: z=z[int(result-1)::int(result)] if w", "by default def sliceData(xsize, x, y, z=None, w=None): #we can", "is smaller than the frequency of datapoints in the vectors.", "files into the same format expected by grabNNData. def createFolders(SEARCHDIR,", "the frequency of datapoints in the vectors. if x[0] >", "pd import numpy as np import matplotlib.pyplot as plt import", "plt import os import matplotlib.pyplot as plt import CurveFit import", "pd.read_csv(FILENAME, sep, header=header) if ('epochs' in data.columns) and ('trainLoss' in", "and not item.name.startswith(\".\"): datafiles.append(item.path) return datafiles #checking if loss ever", "#a function to read in information (e.g. accuracy, loss) stored", "valLoss, valAcc, batch_size, learning_rate, convKers) elif ('epochs' in data.columns) and", "DIRECTORY with extension EXT def getDataFiles(DIRECTORY, EXT='txt'): datafiles=[] for item", "every xsize x=x[int(result-1)::int(result)] y=y[int(result-1)::int(result)] if z is not None: z=z[int(result-1)::int(result)]", "which we use by default def sliceData(xsize, x, y, z=None,", "directory in os.scandir(DIRECTORY): for item in os.scandir(directory): if item.name.endswith(FILENAME) and", "z and w are both not None. w=w[int(result-1)::int(result)] return x,y,z,w", "to move the files into the same format expected by", "often we should take datapoints if we wish to consider", "numEpochs): minLoss=np.inf epochMin=0 for i in range(0,loss.size): if loss[i] <", "data.columns) and ('valLoss' in data.columns) and ('valAcc' in data.columns): sortedData=data.sort_values(by=\"epochs\",", "numEpochs: return i, minLoss return i, minLoss #dirpath is where", "and ('valLoss' in data.columns) and ('valAcc' in data.columns) and ('batch_size'", "to this point in function, it means z and w", "data.columns) and ('valAcc' in data.columns): sortedData=data.sort_values(by=\"epochs\", axis=0, ascending=True) epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss'])", "to read in information (e.g. accuracy, loss) stored at FILENAME", "if we wish to consider values every xsize x=x[int(result-1)::int(result)] y=y[int(result-1)::int(result)]", "minLoss=loss[i] epochMin=epoch[i] elif (epoch[i]-epochMin) >= numEpochs: return i, minLoss return", "x,y,z else: return x,y #if we get to this point", "smaller than the frequency of datapoints in the vectors. if", "def getDataDirectories(DIRECTORY, FILENAME=\"valLoss.txt\"): directories=[] for directory in os.scandir(DIRECTORY): for item", "for item in os.scandir(DIRECTORY): if item.name.endswith(\".\"+EXT) and not item.name.startswith(\".\"): datafiles.append(item.path)", "the data to sample less often, but not more often.", "('valLoss' in data.columns) and ('valAcc' in data.columns) and ('batch_size' in", "often. We verify that we're not being asked for a", "ending in FILENAME def getDataDirectories(DIRECTORY, FILENAME=\"valLoss.txt\"): directories=[] for directory in", "pass shutil.move(item.path, SAVEFULLDIR+\"/\"+str(files[1])) #a function to read in information (e.g.", "we get to this point in function, it means z", "loss ever doesn't decrease for numEpochs epochs in a row.", "#checking if loss ever doesn't decrease for numEpochs epochs in", "consider values every xsize x=x[int(result-1)::int(result)] y=y[int(result-1)::int(result)] if z is not", "#slice data could be used to test values of E", "with extension EXT def getDataFiles(DIRECTORY, EXT='txt'): datafiles=[] for item in", "os.path.exists(SAVEFULLDIR): try: os.makedirs(SAVEFULLDIR) except FileExistsError: #directory already exists--must have been", "grabNNData(FILENAME, header='infer', sep=' '): data = pd.read_csv(FILENAME, sep, header=header) if", "already exists--must have been created between the if statement &", "in data.columns) and ('valLoss' in data.columns) and ('valAcc' in data.columns)", "in information (e.g. accuracy, loss) stored at FILENAME def grabNNData(FILENAME,", "stopsDecreasing(loss, epoch, numEpochs): minLoss=np.inf epochMin=0 for i in range(0,loss.size): if", "in os.scandir(DIRECTORY): for item in os.scandir(directory): if item.name.endswith(FILENAME) and not", "granularity that is smaller than the frequency of datapoints in", "if loss[i] < minLoss: minLoss=loss[i] epochMin=epoch[i] elif (epoch[i]-epochMin) >= numEpochs:", "#dirpath is where the accuracy and loss files are stored.", "def getDataFiles(DIRECTORY, EXT='txt'): datafiles=[] for item in os.scandir(DIRECTORY): if item.name.endswith(\".\"+EXT)", "take datapoints if we wish to consider values every xsize", "('valLoss' in data.columns) and ('valAcc' in data.columns): sortedData=data.sort_values(by=\"epochs\", axis=0, ascending=True)", "We verify that we're not being asked for a granularity", "datapoints if we wish to consider values every xsize x=x[int(result-1)::int(result)]", "os.scandir(SEARCHDIR): name=str(item.name) files=name.split('-') SAVEFULLDIR=SAVEDIR+str(files[0]) if not os.path.exists(SAVEFULLDIR): try: os.makedirs(SAVEFULLDIR) except", "not None: z=z[int(result-1)::int(result)] if w is None: return x,y,z else:", "datafile is missing one of the expected columns: epochs trainLoss", "valAcc=np.array(sortedData['valAcc']) batch_size=np.array(sortedData['batch_size']) learning_rate=np.array(sortedData['learning_rate']) convKers=np.array(sortedData['convKernels']) return(epoch, trainLoss, valLoss, valAcc, batch_size, learning_rate,", "and ('batch_size' in data.columns) and ('learning_rate' in data.columns): sortedData=data.sort_values(by=\"epochs\", axis=0,", "that is smaller than the frequency of datapoints in the", "header=header) if ('epochs' in data.columns) and ('trainLoss' in data.columns) and", "return x,y #if we get to this point in function,", "files in DIRECTORY with extension EXT def getDataFiles(DIRECTORY, EXT='txt'): datafiles=[]", "import matplotlib.pyplot as plt import CurveFit import shutil #find all", "z is not None: z=z[int(result-1)::int(result)] if w is None: return", "is not None: z=z[int(result-1)::int(result)] if w is None: return x,y,z", "not item.name.startswith(\".\"): directories.append(directory.path) return directories #get all non-hidden data files", "all non-hidden data files in DIRECTORY with extension EXT def", "data.columns): sortedData=data.sort_values(by=\"epochs\", axis=0, ascending=True) epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc']) batch_size=np.array(sortedData['batch_size']) learning_rate=np.array(sortedData['learning_rate'])", "accuracy, loss) stored at FILENAME def grabNNData(FILENAME, header='infer', sep=' '):", "but not more often. We verify that we're not being", "for directory in os.scandir(DIRECTORY): for item in os.scandir(directory): if item.name.endswith(FILENAME)", "#find all DIRECTORIES containing non-hidden files ending in FILENAME def", "epochs trainLoss valLoss valAcc [optional extra columns: batch_size, learning_rate]') #slice", "shutil.move(item.path, SAVEFULLDIR+\"/\"+str(files[1])) #a function to read in information (e.g. accuracy,", "('batch_size' in data.columns) and ('learning_rate' in data.columns): sortedData=data.sort_values(by=\"epochs\", axis=0, ascending=True)", "function to read in information (e.g. accuracy, loss) stored at", "to test values of E other than E=0.5, which we", "other than E=0.5, which we use by default def sliceData(xsize,", "we wish to consider values every xsize x=x[int(result-1)::int(result)] y=y[int(result-1)::int(result)] if", "else: result=(1.0/x[0])*xsize #result is how often we should take datapoints", "item in os.scandir(SEARCHDIR): name=str(item.name) files=name.split('-') SAVEFULLDIR=SAVEDIR+str(files[0]) if not os.path.exists(SAVEFULLDIR): try:", "directories=[] for directory in os.scandir(DIRECTORY): for item in os.scandir(directory): if", "learning_rate]') #slice data could be used to test values of", "get to this point in function, it means z and", "and ('valLoss' in data.columns) and ('valAcc' in data.columns): sortedData=data.sort_values(by=\"epochs\", axis=0,", "directories #get all non-hidden data files in DIRECTORY with extension", "and ('valAcc' in data.columns): sortedData=data.sort_values(by=\"epochs\", axis=0, ascending=True) epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss'])", "else: return x,y #if we get to this point in", "accuracy and loss files are stored. want to move the", "stored. want to move the files into the same format", "point in function, it means z and w are both", "EXT def getDataFiles(DIRECTORY, EXT='txt'): datafiles=[] for item in os.scandir(DIRECTORY): if", "convKers) elif ('epochs' in data.columns) and ('trainLoss' in data.columns) and", "statement & our attempt at making directory pass shutil.move(item.path, SAVEFULLDIR+\"/\"+str(files[1]))", "a column in NN datafile\") raise Exception('NN datafile is missing", "stored at FILENAME def grabNNData(FILENAME, header='infer', sep=' '): data =", "valAcc, batch_size, learning_rate, convKers) elif ('epochs' in data.columns) and ('trainLoss'", "import os import matplotlib.pyplot as plt import CurveFit import shutil", "more often. We verify that we're not being asked for", "return(epoch, trainLoss, valLoss, valAcc, batch_size, learning_rate, convKers) elif ('epochs' in", "valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc']) else: print(\"Missing a column in NN datafile\") raise", "function, it means z and w are both not None.", "in data.columns) and ('batch_size' in data.columns) and ('learning_rate' in data.columns):", "datafile\") raise Exception('NN datafile is missing one of the expected", "loss[i] < minLoss: minLoss=loss[i] epochMin=epoch[i] elif (epoch[i]-epochMin) >= numEpochs: return", "is where the accuracy and loss files are stored. want", "is None: return x,y,z else: return x,y #if we get", "trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc']) batch_size=np.array(sortedData['batch_size']) learning_rate=np.array(sortedData['learning_rate']) convKers=np.array(sortedData['convKernels']) return(epoch, trainLoss, valLoss, valAcc,", "i in range(0,loss.size): if loss[i] < minLoss: minLoss=loss[i] epochMin=epoch[i] elif", "ever doesn't decrease for numEpochs epochs in a row. def", "the expected columns: epochs trainLoss valLoss valAcc [optional extra columns:", "batch_size, learning_rate]') #slice data could be used to test values", "xsize x=x[int(result-1)::int(result)] y=y[int(result-1)::int(result)] if z is not None: z=z[int(result-1)::int(result)] if", "axis=0, ascending=True) epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc']) batch_size=np.array(sortedData['batch_size']) learning_rate=np.array(sortedData['learning_rate']) convKers=np.array(sortedData['convKernels']) return(epoch,", "than the frequency of datapoints in the vectors. if x[0]", "we're not being asked for a granularity that is smaller", "sep, header=header) if ('epochs' in data.columns) and ('trainLoss' in data.columns)", "decrease for numEpochs epochs in a row. def stopsDecreasing(loss, epoch,", "and not item.name.startswith(\".\"): directories.append(directory.path) return directories #get all non-hidden data", "in data.columns): sortedData=data.sort_values(by=\"epochs\", axis=0, ascending=True) epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc']) else:", "making directory pass shutil.move(item.path, SAVEFULLDIR+\"/\"+str(files[1])) #a function to read in", "epochMin=epoch[i] elif (epoch[i]-epochMin) >= numEpochs: return i, minLoss return i,", "into the same format expected by grabNNData. def createFolders(SEARCHDIR, SAVEDIR):", "#we can slice the data to sample less often, but", "if w is None: return x,y,z else: return x,y #if", "os.makedirs(SAVEFULLDIR) except FileExistsError: #directory already exists--must have been created between", "learning_rate, convKers) elif ('epochs' in data.columns) and ('trainLoss' in data.columns)", "batch_size=np.array(sortedData['batch_size']) learning_rate=np.array(sortedData['learning_rate']) convKers=np.array(sortedData['convKernels']) return(epoch, trainLoss, valLoss, valAcc, batch_size, learning_rate, convKers)", "is how often we should take datapoints if we wish", "x,y #if we get to this point in function, it", "#get all non-hidden data files in DIRECTORY with extension EXT", "os import matplotlib.pyplot as plt import CurveFit import shutil #find", "valAcc=np.array(sortedData['valAcc']) else: print(\"Missing a column in NN datafile\") raise Exception('NN", "values of E other than E=0.5, which we use by", "default def sliceData(xsize, x, y, z=None, w=None): #we can slice", "w=None): #we can slice the data to sample less often,", "pandas as pd import numpy as np import matplotlib.pyplot as", "('trainLoss' in data.columns) and ('valLoss' in data.columns) and ('valAcc' in", "i, minLoss return i, minLoss #dirpath is where the accuracy", "in os.scandir(directory): if item.name.endswith(FILENAME) and not item.name.startswith(\".\"): directories.append(directory.path) return directories", "import shutil #find all DIRECTORIES containing non-hidden files ending in", "extension EXT def getDataFiles(DIRECTORY, EXT='txt'): datafiles=[] for item in os.scandir(DIRECTORY):", "in data.columns) and ('learning_rate' in data.columns): sortedData=data.sort_values(by=\"epochs\", axis=0, ascending=True) epoch=np.array(sortedData['epochs'])", "for a granularity that is smaller than the frequency of", "CurveFit import shutil #find all DIRECTORIES containing non-hidden files ending", "matplotlib.pyplot as plt import CurveFit import shutil #find all DIRECTORIES", "axis=0, ascending=True) epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc']) else: print(\"Missing a column", "sortedData=data.sort_values(by=\"epochs\", axis=0, ascending=True) epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc']) batch_size=np.array(sortedData['batch_size']) learning_rate=np.array(sortedData['learning_rate']) convKers=np.array(sortedData['convKernels'])", "be used to test values of E other than E=0.5,", "return x,y,z,w else: result=(1.0/x[0])*xsize #result is how often we should", "this point in function, it means z and w are", "a row. def stopsDecreasing(loss, epoch, numEpochs): minLoss=np.inf epochMin=0 for i", "in data.columns) and ('valLoss' in data.columns) and ('valAcc' in data.columns):", "vectors. if x[0] > xsize: return x,y,z,w else: result=(1.0/x[0])*xsize #result", "where the accuracy and loss files are stored. want to", "def sliceData(xsize, x, y, z=None, w=None): #we can slice the", "FileExistsError: #directory already exists--must have been created between the if", "move the files into the same format expected by grabNNData.", "if statement & our attempt at making directory pass shutil.move(item.path,", "convKers=np.array(sortedData['convKernels']) return(epoch, trainLoss, valLoss, valAcc, batch_size, learning_rate, convKers) elif ('epochs'", "as plt import CurveFit import shutil #find all DIRECTORIES containing", "epochs in a row. def stopsDecreasing(loss, epoch, numEpochs): minLoss=np.inf epochMin=0", "matplotlib.pyplot as plt import os import matplotlib.pyplot as plt import", "and loss files are stored. want to move the files", "grabNNData. def createFolders(SEARCHDIR, SAVEDIR): for item in os.scandir(SEARCHDIR): name=str(item.name) files=name.split('-')", "datafiles #checking if loss ever doesn't decrease for numEpochs epochs", "slice the data to sample less often, but not more", "if loss ever doesn't decrease for numEpochs epochs in a", "our attempt at making directory pass shutil.move(item.path, SAVEFULLDIR+\"/\"+str(files[1])) #a function", "verify that we're not being asked for a granularity that", "numEpochs epochs in a row. def stopsDecreasing(loss, epoch, numEpochs): minLoss=np.inf", "row. def stopsDecreasing(loss, epoch, numEpochs): minLoss=np.inf epochMin=0 for i in", "range(0,loss.size): if loss[i] < minLoss: minLoss=loss[i] epochMin=epoch[i] elif (epoch[i]-epochMin) >=", "NN datafile\") raise Exception('NN datafile is missing one of the", "w is None: return x,y,z else: return x,y #if we", "data.columns) and ('batch_size' in data.columns) and ('learning_rate' in data.columns): sortedData=data.sort_values(by=\"epochs\",", "not item.name.startswith(\".\"): datafiles.append(item.path) return datafiles #checking if loss ever doesn't", "epoch, numEpochs): minLoss=np.inf epochMin=0 for i in range(0,loss.size): if loss[i]", "in a row. def stopsDecreasing(loss, epoch, numEpochs): minLoss=np.inf epochMin=0 for", "files are stored. want to move the files into the", "non-hidden data files in DIRECTORY with extension EXT def getDataFiles(DIRECTORY,", "datafiles=[] for item in os.scandir(DIRECTORY): if item.name.endswith(\".\"+EXT) and not item.name.startswith(\".\"):", "import matplotlib.pyplot as plt import os import matplotlib.pyplot as plt", "information (e.g. accuracy, loss) stored at FILENAME def grabNNData(FILENAME, header='infer',", "elif ('epochs' in data.columns) and ('trainLoss' in data.columns) and ('valLoss'", "valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc']) batch_size=np.array(sortedData['batch_size']) learning_rate=np.array(sortedData['learning_rate']) convKers=np.array(sortedData['convKernels']) return(epoch, trainLoss, valLoss, valAcc, batch_size,", "values every xsize x=x[int(result-1)::int(result)] y=y[int(result-1)::int(result)] if z is not None:", "have been created between the if statement & our attempt", "#result is how often we should take datapoints if we", "sortedData=data.sort_values(by=\"epochs\", axis=0, ascending=True) epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc']) else: print(\"Missing a", "Exception('NN datafile is missing one of the expected columns: epochs", "in the vectors. if x[0] > xsize: return x,y,z,w else:", "trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc']) else: print(\"Missing a column in NN datafile\")", "non-hidden files ending in FILENAME def getDataDirectories(DIRECTORY, FILENAME=\"valLoss.txt\"): directories=[] for", "#directory already exists--must have been created between the if statement", "return i, minLoss return i, minLoss #dirpath is where the", "less often, but not more often. We verify that we're", "containing non-hidden files ending in FILENAME def getDataDirectories(DIRECTORY, FILENAME=\"valLoss.txt\"): directories=[]", "use by default def sliceData(xsize, x, y, z=None, w=None): #we", "doesn't decrease for numEpochs epochs in a row. def stopsDecreasing(loss,", "are stored. want to move the files into the same", "except FileExistsError: #directory already exists--must have been created between the", "name=str(item.name) files=name.split('-') SAVEFULLDIR=SAVEDIR+str(files[0]) if not os.path.exists(SAVEFULLDIR): try: os.makedirs(SAVEFULLDIR) except FileExistsError:", "E other than E=0.5, which we use by default def", "try: os.makedirs(SAVEFULLDIR) except FileExistsError: #directory already exists--must have been created", "datapoints in the vectors. if x[0] > xsize: return x,y,z,w", "test values of E other than E=0.5, which we use", "FILENAME=\"valLoss.txt\"): directories=[] for directory in os.scandir(DIRECTORY): for item in os.scandir(directory):", "if item.name.endswith(FILENAME) and not item.name.startswith(\".\"): directories.append(directory.path) return directories #get all", "elif (epoch[i]-epochMin) >= numEpochs: return i, minLoss return i, minLoss", "the files into the same format expected by grabNNData. def", "read in information (e.g. accuracy, loss) stored at FILENAME def", "columns: epochs trainLoss valLoss valAcc [optional extra columns: batch_size, learning_rate]')", "os.scandir(DIRECTORY): if item.name.endswith(\".\"+EXT) and not item.name.startswith(\".\"): datafiles.append(item.path) return datafiles #checking", "if x[0] > xsize: return x,y,z,w else: result=(1.0/x[0])*xsize #result is", "in data.columns): sortedData=data.sort_values(by=\"epochs\", axis=0, ascending=True) epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc']) batch_size=np.array(sortedData['batch_size'])", "been created between the if statement & our attempt at", "getDataDirectories(DIRECTORY, FILENAME=\"valLoss.txt\"): directories=[] for directory in os.scandir(DIRECTORY): for item in", "minLoss #dirpath is where the accuracy and loss files are", "('valAcc' in data.columns): sortedData=data.sort_values(by=\"epochs\", axis=0, ascending=True) epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc'])", "SAVEFULLDIR+\"/\"+str(files[1])) #a function to read in information (e.g. accuracy, loss)", "attempt at making directory pass shutil.move(item.path, SAVEFULLDIR+\"/\"+str(files[1])) #a function to", "item.name.endswith(\".\"+EXT) and not item.name.startswith(\".\"): datafiles.append(item.path) return datafiles #checking if loss", "minLoss return i, minLoss #dirpath is where the accuracy and", "minLoss: minLoss=loss[i] epochMin=epoch[i] elif (epoch[i]-epochMin) >= numEpochs: return i, minLoss", "datafiles.append(item.path) return datafiles #checking if loss ever doesn't decrease for", "a granularity that is smaller than the frequency of datapoints", "we use by default def sliceData(xsize, x, y, z=None, w=None):", "os.scandir(directory): if item.name.endswith(FILENAME) and not item.name.startswith(\".\"): directories.append(directory.path) return directories #get", "valAcc [optional extra columns: batch_size, learning_rate]') #slice data could be", "in function, it means z and w are both not", ">= numEpochs: return i, minLoss return i, minLoss #dirpath is", "created between the if statement & our attempt at making", "missing one of the expected columns: epochs trainLoss valLoss valAcc", "to consider values every xsize x=x[int(result-1)::int(result)] y=y[int(result-1)::int(result)] if z is", "means z and w are both not None. w=w[int(result-1)::int(result)] return", "for item in os.scandir(directory): if item.name.endswith(FILENAME) and not item.name.startswith(\".\"): directories.append(directory.path)", "trainLoss valLoss valAcc [optional extra columns: batch_size, learning_rate]') #slice data", "minLoss=np.inf epochMin=0 for i in range(0,loss.size): if loss[i] < minLoss:", "y=y[int(result-1)::int(result)] if z is not None: z=z[int(result-1)::int(result)] if w is", "def createFolders(SEARCHDIR, SAVEDIR): for item in os.scandir(SEARCHDIR): name=str(item.name) files=name.split('-') SAVEFULLDIR=SAVEDIR+str(files[0])", "format expected by grabNNData. def createFolders(SEARCHDIR, SAVEDIR): for item in", "used to test values of E other than E=0.5, which", "expected columns: epochs trainLoss valLoss valAcc [optional extra columns: batch_size,", "exists--must have been created between the if statement & our", "(e.g. accuracy, loss) stored at FILENAME def grabNNData(FILENAME, header='infer', sep='", "(epoch[i]-epochMin) >= numEpochs: return i, minLoss return i, minLoss #dirpath", "SAVEDIR): for item in os.scandir(SEARCHDIR): name=str(item.name) files=name.split('-') SAVEFULLDIR=SAVEDIR+str(files[0]) if not", "data.columns) and ('valLoss' in data.columns) and ('valAcc' in data.columns) and", "data.columns) and ('valAcc' in data.columns) and ('batch_size' in data.columns) and", "is missing one of the expected columns: epochs trainLoss valLoss", "loss) stored at FILENAME def grabNNData(FILENAME, header='infer', sep=' '): data", "< minLoss: minLoss=loss[i] epochMin=epoch[i] elif (epoch[i]-epochMin) >= numEpochs: return i,", "item in os.scandir(directory): if item.name.endswith(FILENAME) and not item.name.startswith(\".\"): directories.append(directory.path) return", "def grabNNData(FILENAME, header='infer', sep=' '): data = pd.read_csv(FILENAME, sep, header=header)", "'): data = pd.read_csv(FILENAME, sep, header=header) if ('epochs' in data.columns)", "if not os.path.exists(SAVEFULLDIR): try: os.makedirs(SAVEFULLDIR) except FileExistsError: #directory already exists--must", "import CurveFit import shutil #find all DIRECTORIES containing non-hidden files", "asked for a granularity that is smaller than the frequency", "in os.scandir(DIRECTORY): if item.name.endswith(\".\"+EXT) and not item.name.startswith(\".\"): datafiles.append(item.path) return datafiles", "sliceData(xsize, x, y, z=None, w=None): #we can slice the data", "import numpy as np import matplotlib.pyplot as plt import os", "sample less often, but not more often. We verify that", "data.columns) and ('trainLoss' in data.columns) and ('valLoss' in data.columns) and", "('learning_rate' in data.columns): sortedData=data.sort_values(by=\"epochs\", axis=0, ascending=True) epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc'])", "ascending=True) epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc']) else: print(\"Missing a column in", "of E other than E=0.5, which we use by default", "column in NN datafile\") raise Exception('NN datafile is missing one", "z=None, w=None): #we can slice the data to sample less", "extra columns: batch_size, learning_rate]') #slice data could be used to", "all DIRECTORIES containing non-hidden files ending in FILENAME def getDataDirectories(DIRECTORY,", "columns: batch_size, learning_rate]') #slice data could be used to test", "epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc']) batch_size=np.array(sortedData['batch_size']) learning_rate=np.array(sortedData['learning_rate']) convKers=np.array(sortedData['convKernels']) return(epoch, trainLoss, valLoss,", "for item in os.scandir(SEARCHDIR): name=str(item.name) files=name.split('-') SAVEFULLDIR=SAVEDIR+str(files[0]) if not os.path.exists(SAVEFULLDIR):", "to sample less often, but not more often. We verify", "in data.columns) and ('trainLoss' in data.columns) and ('valLoss' in data.columns)", "os.scandir(DIRECTORY): for item in os.scandir(directory): if item.name.endswith(FILENAME) and not item.name.startswith(\".\"):", "than E=0.5, which we use by default def sliceData(xsize, x,", "frequency of datapoints in the vectors. if x[0] > xsize:", "and ('valAcc' in data.columns) and ('batch_size' in data.columns) and ('learning_rate'", "sep=' '): data = pd.read_csv(FILENAME, sep, header=header) if ('epochs' in", "in data.columns) and ('valAcc' in data.columns) and ('batch_size' in data.columns)", "= pd.read_csv(FILENAME, sep, header=header) if ('epochs' in data.columns) and ('trainLoss'", "FILENAME def grabNNData(FILENAME, header='infer', sep=' '): data = pd.read_csv(FILENAME, sep,", "in NN datafile\") raise Exception('NN datafile is missing one of", "the same format expected by grabNNData. def createFolders(SEARCHDIR, SAVEDIR): for", "files ending in FILENAME def getDataDirectories(DIRECTORY, FILENAME=\"valLoss.txt\"): directories=[] for directory", "epochMin=0 for i in range(0,loss.size): if loss[i] < minLoss: minLoss=loss[i]", "can slice the data to sample less often, but not", "expected by grabNNData. def createFolders(SEARCHDIR, SAVEDIR): for item in os.scandir(SEARCHDIR):", "want to move the files into the same format expected", "for i in range(0,loss.size): if loss[i] < minLoss: minLoss=loss[i] epochMin=epoch[i]", "result=(1.0/x[0])*xsize #result is how often we should take datapoints if", "item.name.startswith(\".\"): datafiles.append(item.path) return datafiles #checking if loss ever doesn't decrease" ]
[ "char in dic: stack.append(char) elif stack and dic.get(stack[-1])!=char: return False", "in dic: stack.append(char) elif stack and dic.get(stack[-1])!=char: return False else:", "elif stack and dic.get(stack[-1])!=char: return False else: stack.pop() continue return", "[] dic = {'{':'}','[':']','(':')'} for char in s: if not", "= {'{':'}','[':']','(':')'} for char in s: if not stack or", "def isValid(self , s ): # write code here if", "题意:给出一个仅包含字符'(',')','{','}','['和']',的字符串,判断给出的字符串是否是合法的括号序列。括号必须以正确的顺序关闭,\"()\"和\"()[]{}\"都是合法的括号序列,但\"(]\"和\"([)]\"不合法。 # @param s string字符串 # @return bool布尔型 # class", "stack or char in dic: stack.append(char) elif stack and dic.get(stack[-1])!=char:", "code here if not s: return True stack = []", "char in s: if not stack or char in dic:", "not s: return True stack = [] dic = {'{':'}','[':']','(':')'}", "= [] dic = {'{':'}','[':']','(':')'} for char in s: if", "write code here if not s: return True stack =", "<filename>algo_probs/newcoder/classic/nc52.py # 题意:给出一个仅包含字符'(',')','{','}','['和']',的字符串,判断给出的字符串是否是合法的括号序列。括号必须以正确的顺序关闭,\"()\"和\"()[]{}\"都是合法的括号序列,但\"(]\"和\"([)]\"不合法。 # @param s string字符串 # @return bool布尔型", "# class Solution: def isValid(self , s ): # write", "for char in s: if not stack or char in", "s string字符串 # @return bool布尔型 # class Solution: def isValid(self", "@return bool布尔型 # class Solution: def isValid(self , s ):", "stack and dic.get(stack[-1])!=char: return False else: stack.pop() continue return True", "s: return True stack = [] dic = {'{':'}','[':']','(':')'} for", "return True stack = [] dic = {'{':'}','[':']','(':')'} for char", "dic = {'{':'}','[':']','(':')'} for char in s: if not stack", "stack.append(char) elif stack and dic.get(stack[-1])!=char: return False else: stack.pop() continue", "in s: if not stack or char in dic: stack.append(char)", "# write code here if not s: return True stack", "True stack = [] dic = {'{':'}','[':']','(':')'} for char in", "Solution: def isValid(self , s ): # write code here", "string字符串 # @return bool布尔型 # class Solution: def isValid(self ,", "or char in dic: stack.append(char) elif stack and dic.get(stack[-1])!=char: return", "not stack or char in dic: stack.append(char) elif stack and", ", s ): # write code here if not s:", "class Solution: def isValid(self , s ): # write code", "if not s: return True stack = [] dic =", "isValid(self , s ): # write code here if not", "bool布尔型 # class Solution: def isValid(self , s ): #", "@param s string字符串 # @return bool布尔型 # class Solution: def", "s ): # write code here if not s: return", "s: if not stack or char in dic: stack.append(char) elif", "dic: stack.append(char) elif stack and dic.get(stack[-1])!=char: return False else: stack.pop()", "{'{':'}','[':']','(':')'} for char in s: if not stack or char", "stack = [] dic = {'{':'}','[':']','(':')'} for char in s:", "here if not s: return True stack = [] dic", "if not stack or char in dic: stack.append(char) elif stack", "# 题意:给出一个仅包含字符'(',')','{','}','['和']',的字符串,判断给出的字符串是否是合法的括号序列。括号必须以正确的顺序关闭,\"()\"和\"()[]{}\"都是合法的括号序列,但\"(]\"和\"([)]\"不合法。 # @param s string字符串 # @return bool布尔型 #", "# @param s string字符串 # @return bool布尔型 # class Solution:", "# @return bool布尔型 # class Solution: def isValid(self , s", "): # write code here if not s: return True" ]
[ "None: info = _ConcatInfo() self._cache[path] = info with open(path, 'r')", "yaml.load(fp) info.files = config.get('files', []) info.delim = config.get('delim', \"\\n\") info.timestamp", "getDependencies(self, path): info = self._load(path) return info.files def getOutputFilenames(self, filename):", "import logging import yaml from piecrust.processing.base import Processor logger =", "and (cur_time - info.timestamp <= 1 or os.path.getmtime(path) < info.timestamp)):", "= config.get('delim', \"\\n\") info.timestamp = cur_time path_mode = config.get('path_mode', 'relative')", "= \"\\n\" class ConcatProcessor(Processor): PROCESSOR_NAME = 'concat' def __init__(self): super(ConcatProcessor,", "info = self._load(path) if not info.files: raise Exception(\"No files specified", "class ConcatProcessor(Processor): PROCESSOR_NAME = 'concat' def __init__(self): super(ConcatProcessor, self).__init__() self._cache", "self._cache = {} def matches(self, path): return path.endswith('.concat') def getDependencies(self,", "if (info is not None and (cur_time - info.timestamp <=", "info.delim = config.get('delim', \"\\n\") info.timestamp = cur_time path_mode = config.get('path_mode',", "f) for f in info.files] else: raise Exception(\"Unknown path mode:", "0 files = None delim = \"\\n\" class ConcatProcessor(Processor): PROCESSOR_NAME", "= [os.path.join(self.app.root_dir, f) for f in info.files] else: raise Exception(\"Unknown", "= cur_time path_mode = config.get('path_mode', 'relative') if path_mode == 'relative':", "__init__(self): super(ConcatProcessor, self).__init__() self._cache = {} def matches(self, path): return", "not None and (cur_time - info.timestamp <= 1 or os.path.getmtime(path)", "- info.timestamp <= 1 or os.path.getmtime(path) < info.timestamp)): return info", "[os.path.join(self.app.root_dir, f) for f in info.files] else: raise Exception(\"Unknown path", "f) for f in info.files] elif path_mode == 'absolute': info.files", "encoded_delim = info.delim.encode('utf8') with open(out_path, 'wb') as ofp: for p", "info.files] else: raise Exception(\"Unknown path mode: %s\" % path_mode) return", "path): info = self._load(path) return info.files def getOutputFilenames(self, filename): return", "1 or os.path.getmtime(path) < info.timestamp)): return info if info is", "[os.path.join(dirname, f) for f in info.files] elif path_mode == 'absolute':", "with open(path, 'r') as fp: config = yaml.load(fp) info.files =", "_ = os.path.split(path) info.files = [os.path.join(dirname, f) for f in", "info.files] elif path_mode == 'absolute': info.files = [os.path.join(self.app.root_dir, f) for", "= os.path.split(path) out_path = os.path.join(out_dir, filename[:-7]) info = self._load(path) if", "{} def matches(self, path): return path.endswith('.concat') def getDependencies(self, path): info", "path_mode == 'absolute': info.files = [os.path.join(self.app.root_dir, f) for f in", "out_path)) encoded_delim = info.delim.encode('utf8') with open(out_path, 'wb') as ofp: for", "specified in: %s\" % os.path.relpath(path, self.app.root_dir)) logger.debug(\"Concatenating %d files to:", "self._load(path) if not info.files: raise Exception(\"No files specified in: %s\"", "in: %s\" % os.path.relpath(path, self.app.root_dir)) logger.debug(\"Concatenating %d files to: %s\"", "ifp: ofp.write(ifp.read()) if info.delim: ofp.write(encoded_delim) return True def _load(self, path):", "= 'concat' def __init__(self): super(ConcatProcessor, self).__init__() self._cache = {} def", "info is None: info = _ConcatInfo() self._cache[path] = info with", "with open(p, 'rb') as ifp: ofp.write(ifp.read()) if info.delim: ofp.write(encoded_delim) return", "if info.delim: ofp.write(encoded_delim) return True def _load(self, path): cur_time =", "= yaml.load(fp) info.files = config.get('files', []) info.delim = config.get('delim', \"\\n\")", "out_dir): dirname, filename = os.path.split(path) out_path = os.path.join(out_dir, filename[:-7]) info", "info.delim: ofp.write(encoded_delim) return True def _load(self, path): cur_time = time.time()", "delim = \"\\n\" class ConcatProcessor(Processor): PROCESSOR_NAME = 'concat' def __init__(self):", "not info.files: raise Exception(\"No files specified in: %s\" % os.path.relpath(path,", "info.files: raise Exception(\"No files specified in: %s\" % os.path.relpath(path, self.app.root_dir))", "%s\" % os.path.relpath(path, self.app.root_dir)) logger.debug(\"Concatenating %d files to: %s\" %", "info.files def getOutputFilenames(self, filename): return [filename[:-7]] def process(self, path, out_dir):", "os.path.getmtime(path) < info.timestamp)): return info if info is None: info", "import Processor logger = logging.getLogger(__name__) class _ConcatInfo(object): timestamp = 0", "== 'relative': dirname, _ = os.path.split(path) info.files = [os.path.join(dirname, f)", "'rb') as ifp: ofp.write(ifp.read()) if info.delim: ofp.write(encoded_delim) return True def", "ofp.write(ifp.read()) if info.delim: ofp.write(encoded_delim) return True def _load(self, path): cur_time", "filename): return [filename[:-7]] def process(self, path, out_dir): dirname, filename =", "is None: info = _ConcatInfo() self._cache[path] = info with open(path,", "super(ConcatProcessor, self).__init__() self._cache = {} def matches(self, path): return path.endswith('.concat')", "return path.endswith('.concat') def getDependencies(self, path): info = self._load(path) return info.files", "self._cache.get(path) if (info is not None and (cur_time - info.timestamp", "self._load(path) return info.files def getOutputFilenames(self, filename): return [filename[:-7]] def process(self,", "= _ConcatInfo() self._cache[path] = info with open(path, 'r') as fp:", "files = None delim = \"\\n\" class ConcatProcessor(Processor): PROCESSOR_NAME =", "or os.path.getmtime(path) < info.timestamp)): return info if info is None:", "path, out_dir): dirname, filename = os.path.split(path) out_path = os.path.join(out_dir, filename[:-7])", "time.time() info = self._cache.get(path) if (info is not None and", "open(path, 'r') as fp: config = yaml.load(fp) info.files = config.get('files',", "'relative': dirname, _ = os.path.split(path) info.files = [os.path.join(dirname, f) for", "\"\\n\" class ConcatProcessor(Processor): PROCESSOR_NAME = 'concat' def __init__(self): super(ConcatProcessor, self).__init__()", "self.app.root_dir)) logger.debug(\"Concatenating %d files to: %s\" % (len(info.files), out_path)) encoded_delim", "== 'absolute': info.files = [os.path.join(self.app.root_dir, f) for f in info.files]", "f in info.files] else: raise Exception(\"Unknown path mode: %s\" %", "def getDependencies(self, path): info = self._load(path) return info.files def getOutputFilenames(self,", "info.timestamp = cur_time path_mode = config.get('path_mode', 'relative') if path_mode ==", "% os.path.relpath(path, self.app.root_dir)) logger.debug(\"Concatenating %d files to: %s\" % (len(info.files),", "(cur_time - info.timestamp <= 1 or os.path.getmtime(path) < info.timestamp)): return", "[filename[:-7]] def process(self, path, out_dir): dirname, filename = os.path.split(path) out_path", "cur_time = time.time() info = self._cache.get(path) if (info is not", "return info.files def getOutputFilenames(self, filename): return [filename[:-7]] def process(self, path,", "logger.debug(\"Concatenating %d files to: %s\" % (len(info.files), out_path)) encoded_delim =", "info.timestamp <= 1 or os.path.getmtime(path) < info.timestamp)): return info if", "in info.files: with open(p, 'rb') as ifp: ofp.write(ifp.read()) if info.delim:", "'wb') as ofp: for p in info.files: with open(p, 'rb')", "return True def _load(self, path): cur_time = time.time() info =", "os.path.join(out_dir, filename[:-7]) info = self._load(path) if not info.files: raise Exception(\"No", "= 0 files = None delim = \"\\n\" class ConcatProcessor(Processor):", "_ConcatInfo() self._cache[path] = info with open(path, 'r') as fp: config", "config.get('files', []) info.delim = config.get('delim', \"\\n\") info.timestamp = cur_time path_mode", "process(self, path, out_dir): dirname, filename = os.path.split(path) out_path = os.path.join(out_dir,", "dirname, _ = os.path.split(path) info.files = [os.path.join(dirname, f) for f", "% (len(info.files), out_path)) encoded_delim = info.delim.encode('utf8') with open(out_path, 'wb') as", "_load(self, path): cur_time = time.time() info = self._cache.get(path) if (info", "info.timestamp)): return info if info is None: info = _ConcatInfo()", "ConcatProcessor(Processor): PROCESSOR_NAME = 'concat' def __init__(self): super(ConcatProcessor, self).__init__() self._cache =", "elif path_mode == 'absolute': info.files = [os.path.join(self.app.root_dir, f) for f", "= info with open(path, 'r') as fp: config = yaml.load(fp)", "from piecrust.processing.base import Processor logger = logging.getLogger(__name__) class _ConcatInfo(object): timestamp", "'r') as fp: config = yaml.load(fp) info.files = config.get('files', [])", "\"\\n\") info.timestamp = cur_time path_mode = config.get('path_mode', 'relative') if path_mode", "info.files: with open(p, 'rb') as ifp: ofp.write(ifp.read()) if info.delim: ofp.write(encoded_delim)", "for f in info.files] elif path_mode == 'absolute': info.files =", "os.path.split(path) out_path = os.path.join(out_dir, filename[:-7]) info = self._load(path) if not", "if not info.files: raise Exception(\"No files specified in: %s\" %", "config.get('path_mode', 'relative') if path_mode == 'relative': dirname, _ = os.path.split(path)", "%s\" % (len(info.files), out_path)) encoded_delim = info.delim.encode('utf8') with open(out_path, 'wb')", "as ofp: for p in info.files: with open(p, 'rb') as", "timestamp = 0 files = None delim = \"\\n\" class", "in info.files] else: raise Exception(\"Unknown path mode: %s\" % path_mode)", "open(out_path, 'wb') as ofp: for p in info.files: with open(p,", "path_mode = config.get('path_mode', 'relative') if path_mode == 'relative': dirname, _", "info.files = [os.path.join(self.app.root_dir, f) for f in info.files] else: raise", "as ifp: ofp.write(ifp.read()) if info.delim: ofp.write(encoded_delim) return True def _load(self,", "raise Exception(\"No files specified in: %s\" % os.path.relpath(path, self.app.root_dir)) logger.debug(\"Concatenating", "= self._cache.get(path) if (info is not None and (cur_time -", "else: raise Exception(\"Unknown path mode: %s\" % path_mode) return info", "'concat' def __init__(self): super(ConcatProcessor, self).__init__() self._cache = {} def matches(self,", "= info.delim.encode('utf8') with open(out_path, 'wb') as ofp: for p in", "for p in info.files: with open(p, 'rb') as ifp: ofp.write(ifp.read())", "= self._load(path) return info.files def getOutputFilenames(self, filename): return [filename[:-7]] def", "import os.path import time import logging import yaml from piecrust.processing.base", "None delim = \"\\n\" class ConcatProcessor(Processor): PROCESSOR_NAME = 'concat' def", "open(p, 'rb') as ifp: ofp.write(ifp.read()) if info.delim: ofp.write(encoded_delim) return True", "info if info is None: info = _ConcatInfo() self._cache[path] =", "None and (cur_time - info.timestamp <= 1 or os.path.getmtime(path) <", "= {} def matches(self, path): return path.endswith('.concat') def getDependencies(self, path):", "return info if info is None: info = _ConcatInfo() self._cache[path]", "in info.files] elif path_mode == 'absolute': info.files = [os.path.join(self.app.root_dir, f)", "self._cache[path] = info with open(path, 'r') as fp: config =", "import yaml from piecrust.processing.base import Processor logger = logging.getLogger(__name__) class", "yaml from piecrust.processing.base import Processor logger = logging.getLogger(__name__) class _ConcatInfo(object):", "out_path = os.path.join(out_dir, filename[:-7]) info = self._load(path) if not info.files:", "info = self._load(path) return info.files def getOutputFilenames(self, filename): return [filename[:-7]]", "time import logging import yaml from piecrust.processing.base import Processor logger", "info.files = config.get('files', []) info.delim = config.get('delim', \"\\n\") info.timestamp =", "def process(self, path, out_dir): dirname, filename = os.path.split(path) out_path =", "cur_time path_mode = config.get('path_mode', 'relative') if path_mode == 'relative': dirname,", "os.path.relpath(path, self.app.root_dir)) logger.debug(\"Concatenating %d files to: %s\" % (len(info.files), out_path))", "with open(out_path, 'wb') as ofp: for p in info.files: with", "path): cur_time = time.time() info = self._cache.get(path) if (info is", "= None delim = \"\\n\" class ConcatProcessor(Processor): PROCESSOR_NAME = 'concat'", "logging import yaml from piecrust.processing.base import Processor logger = logging.getLogger(__name__)", "%d files to: %s\" % (len(info.files), out_path)) encoded_delim = info.delim.encode('utf8')", "def _load(self, path): cur_time = time.time() info = self._cache.get(path) if", "self).__init__() self._cache = {} def matches(self, path): return path.endswith('.concat') def", "def matches(self, path): return path.endswith('.concat') def getDependencies(self, path): info =", "to: %s\" % (len(info.files), out_path)) encoded_delim = info.delim.encode('utf8') with open(out_path,", "= [os.path.join(dirname, f) for f in info.files] elif path_mode ==", "as fp: config = yaml.load(fp) info.files = config.get('files', []) info.delim", "Exception(\"No files specified in: %s\" % os.path.relpath(path, self.app.root_dir)) logger.debug(\"Concatenating %d", "ofp: for p in info.files: with open(p, 'rb') as ifp:", "f in info.files] elif path_mode == 'absolute': info.files = [os.path.join(self.app.root_dir,", "= self._load(path) if not info.files: raise Exception(\"No files specified in:", "_ConcatInfo(object): timestamp = 0 files = None delim = \"\\n\"", "files to: %s\" % (len(info.files), out_path)) encoded_delim = info.delim.encode('utf8') with", "PROCESSOR_NAME = 'concat' def __init__(self): super(ConcatProcessor, self).__init__() self._cache = {}", "= os.path.join(out_dir, filename[:-7]) info = self._load(path) if not info.files: raise", "fp: config = yaml.load(fp) info.files = config.get('files', []) info.delim =", "os.path.split(path) info.files = [os.path.join(dirname, f) for f in info.files] elif", "if info is None: info = _ConcatInfo() self._cache[path] = info", "matches(self, path): return path.endswith('.concat') def getDependencies(self, path): info = self._load(path)", "(info is not None and (cur_time - info.timestamp <= 1", "True def _load(self, path): cur_time = time.time() info = self._cache.get(path)", "filename = os.path.split(path) out_path = os.path.join(out_dir, filename[:-7]) info = self._load(path)", "piecrust.processing.base import Processor logger = logging.getLogger(__name__) class _ConcatInfo(object): timestamp =", "info with open(path, 'r') as fp: config = yaml.load(fp) info.files", "def getOutputFilenames(self, filename): return [filename[:-7]] def process(self, path, out_dir): dirname,", "filename[:-7]) info = self._load(path) if not info.files: raise Exception(\"No files", "path): return path.endswith('.concat') def getDependencies(self, path): info = self._load(path) return", "files specified in: %s\" % os.path.relpath(path, self.app.root_dir)) logger.debug(\"Concatenating %d files", "path_mode == 'relative': dirname, _ = os.path.split(path) info.files = [os.path.join(dirname,", "class _ConcatInfo(object): timestamp = 0 files = None delim =", "= time.time() info = self._cache.get(path) if (info is not None", "'absolute': info.files = [os.path.join(self.app.root_dir, f) for f in info.files] else:", "= config.get('path_mode', 'relative') if path_mode == 'relative': dirname, _ =", "p in info.files: with open(p, 'rb') as ifp: ofp.write(ifp.read()) if", "return [filename[:-7]] def process(self, path, out_dir): dirname, filename = os.path.split(path)", "config = yaml.load(fp) info.files = config.get('files', []) info.delim = config.get('delim',", "dirname, filename = os.path.split(path) out_path = os.path.join(out_dir, filename[:-7]) info =", "os.path import time import logging import yaml from piecrust.processing.base import", "info = _ConcatInfo() self._cache[path] = info with open(path, 'r') as", "info.delim.encode('utf8') with open(out_path, 'wb') as ofp: for p in info.files:", "logging.getLogger(__name__) class _ConcatInfo(object): timestamp = 0 files = None delim", "= config.get('files', []) info.delim = config.get('delim', \"\\n\") info.timestamp = cur_time", "import time import logging import yaml from piecrust.processing.base import Processor", "getOutputFilenames(self, filename): return [filename[:-7]] def process(self, path, out_dir): dirname, filename", "(len(info.files), out_path)) encoded_delim = info.delim.encode('utf8') with open(out_path, 'wb') as ofp:", "'relative') if path_mode == 'relative': dirname, _ = os.path.split(path) info.files", "info.files = [os.path.join(dirname, f) for f in info.files] elif path_mode", "= os.path.split(path) info.files = [os.path.join(dirname, f) for f in info.files]", "for f in info.files] else: raise Exception(\"Unknown path mode: %s\"", "def __init__(self): super(ConcatProcessor, self).__init__() self._cache = {} def matches(self, path):", "path.endswith('.concat') def getDependencies(self, path): info = self._load(path) return info.files def", "[]) info.delim = config.get('delim', \"\\n\") info.timestamp = cur_time path_mode =", "< info.timestamp)): return info if info is None: info =", "if path_mode == 'relative': dirname, _ = os.path.split(path) info.files =", "logger = logging.getLogger(__name__) class _ConcatInfo(object): timestamp = 0 files =", "ofp.write(encoded_delim) return True def _load(self, path): cur_time = time.time() info", "<= 1 or os.path.getmtime(path) < info.timestamp)): return info if info", "= logging.getLogger(__name__) class _ConcatInfo(object): timestamp = 0 files = None", "config.get('delim', \"\\n\") info.timestamp = cur_time path_mode = config.get('path_mode', 'relative') if", "is not None and (cur_time - info.timestamp <= 1 or", "info = self._cache.get(path) if (info is not None and (cur_time", "Processor logger = logging.getLogger(__name__) class _ConcatInfo(object): timestamp = 0 files" ]
[ "CellPressed(Event): def __init__(self, position): self.position = position def get_position(self): return", "import Event class CellPressed(Event): def __init__(self, position): self.position = position", "from src.events import Event class CellPressed(Event): def __init__(self, position): self.position", "Event class CellPressed(Event): def __init__(self, position): self.position = position def", "class CellPressed(Event): def __init__(self, position): self.position = position def get_position(self):", "src.events import Event class CellPressed(Event): def __init__(self, position): self.position =", "def __init__(self, position): self.position = position def get_position(self): return self.position" ]
[ "discriminator value allowed for non-b jets minBDiscBJets = cms.double(1.0), maxBDiscLightJets", "useBTagging = cms.bool(False), ## choose algorithm for b-tagging bTagAlgorithm =", "as cms # # module to make the MaxSumPtWMass jet", "jets and ## maximum b discriminator value allowed for non-b", "b-tagging two distinguish between light and b jets useBTagging =", "lepton input leps = cms.InputTag(\"selectedPatMuons\"), ## maximum number of jets", "use b-tagging two distinguish between light and b jets useBTagging", "distinguish between light and b jets useBTagging = cms.bool(False), ##", "to make the MaxSumPtWMass jet combination # findTtSemiLepJetCombMaxSumPtWMass = cms.EDProducer(\"TtSemiLepJetCombMaxSumPtWMass\",", "# module to make the MaxSumPtWMass jet combination # findTtSemiLepJetCombMaxSumPtWMass", "findTtSemiLepJetCombMaxSumPtWMass = cms.EDProducer(\"TtSemiLepJetCombMaxSumPtWMass\", ## jet input jets = cms.InputTag(\"selectedPatJets\"), ##", "cms.InputTag(\"selectedPatJets\"), ## lepton input leps = cms.InputTag(\"selectedPatMuons\"), ## maximum number", "parameter (in GeV) wMass = cms.double(80.4), ## use b-tagging two", "make the MaxSumPtWMass jet combination # findTtSemiLepJetCombMaxSumPtWMass = cms.EDProducer(\"TtSemiLepJetCombMaxSumPtWMass\", ##", "maxNJets = cms.int32(4), ## nominal WMass parameter (in GeV) wMass", "MaxSumPtWMass jet combination # findTtSemiLepJetCombMaxSumPtWMass = cms.EDProducer(\"TtSemiLepJetCombMaxSumPtWMass\", ## jet input", "## maximum b discriminator value allowed for non-b jets minBDiscBJets", "maximum number of jets to be considered maxNJets = cms.int32(4),", "= cms.bool(False), ## choose algorithm for b-tagging bTagAlgorithm = cms.string(\"trackCountingHighEffBJetTags\"),", "GeV) wMass = cms.double(80.4), ## use b-tagging two distinguish between", "be considered maxNJets = cms.int32(4), ## nominal WMass parameter (in", "= cms.double(80.4), ## use b-tagging two distinguish between light and", "= cms.InputTag(\"selectedPatJets\"), ## lepton input leps = cms.InputTag(\"selectedPatMuons\"), ## maximum", "of jets to be considered maxNJets = cms.int32(4), ## nominal", "algorithm for b-tagging bTagAlgorithm = cms.string(\"trackCountingHighEffBJetTags\"), ## minimum b discriminator", "jets to be considered maxNJets = cms.int32(4), ## nominal WMass", "## lepton input leps = cms.InputTag(\"selectedPatMuons\"), ## maximum number of", "choose algorithm for b-tagging bTagAlgorithm = cms.string(\"trackCountingHighEffBJetTags\"), ## minimum b", "discriminator value required for b jets and ## maximum b", "FWCore.ParameterSet.Config as cms # # module to make the MaxSumPtWMass", "# findTtSemiLepJetCombMaxSumPtWMass = cms.EDProducer(\"TtSemiLepJetCombMaxSumPtWMass\", ## jet input jets = cms.InputTag(\"selectedPatJets\"),", "b discriminator value allowed for non-b jets minBDiscBJets = cms.double(1.0),", "value required for b jets and ## maximum b discriminator", "jet input jets = cms.InputTag(\"selectedPatJets\"), ## lepton input leps =", "# # module to make the MaxSumPtWMass jet combination #", "allowed for non-b jets minBDiscBJets = cms.double(1.0), maxBDiscLightJets = cms.double(3.0)", "and ## maximum b discriminator value allowed for non-b jets", "cms.InputTag(\"selectedPatMuons\"), ## maximum number of jets to be considered maxNJets", "to be considered maxNJets = cms.int32(4), ## nominal WMass parameter", "considered maxNJets = cms.int32(4), ## nominal WMass parameter (in GeV)", "number of jets to be considered maxNJets = cms.int32(4), ##", "= cms.InputTag(\"selectedPatMuons\"), ## maximum number of jets to be considered", "value allowed for non-b jets minBDiscBJets = cms.double(1.0), maxBDiscLightJets =", "cms.EDProducer(\"TtSemiLepJetCombMaxSumPtWMass\", ## jet input jets = cms.InputTag(\"selectedPatJets\"), ## lepton input", "jets = cms.InputTag(\"selectedPatJets\"), ## lepton input leps = cms.InputTag(\"selectedPatMuons\"), ##", "cms.string(\"trackCountingHighEffBJetTags\"), ## minimum b discriminator value required for b jets", "leps = cms.InputTag(\"selectedPatMuons\"), ## maximum number of jets to be", "## use b-tagging two distinguish between light and b jets", "= cms.string(\"trackCountingHighEffBJetTags\"), ## minimum b discriminator value required for b", "two distinguish between light and b jets useBTagging = cms.bool(False),", "input jets = cms.InputTag(\"selectedPatJets\"), ## lepton input leps = cms.InputTag(\"selectedPatMuons\"),", "(in GeV) wMass = cms.double(80.4), ## use b-tagging two distinguish", "for b jets and ## maximum b discriminator value allowed", "nominal WMass parameter (in GeV) wMass = cms.double(80.4), ## use", "for non-b jets minBDiscBJets = cms.double(1.0), maxBDiscLightJets = cms.double(3.0) )", "light and b jets useBTagging = cms.bool(False), ## choose algorithm", "bTagAlgorithm = cms.string(\"trackCountingHighEffBJetTags\"), ## minimum b discriminator value required for", "= cms.EDProducer(\"TtSemiLepJetCombMaxSumPtWMass\", ## jet input jets = cms.InputTag(\"selectedPatJets\"), ## lepton", "jet combination # findTtSemiLepJetCombMaxSumPtWMass = cms.EDProducer(\"TtSemiLepJetCombMaxSumPtWMass\", ## jet input jets", "b discriminator value required for b jets and ## maximum", "b jets and ## maximum b discriminator value allowed for", "## maximum number of jets to be considered maxNJets =", "## choose algorithm for b-tagging bTagAlgorithm = cms.string(\"trackCountingHighEffBJetTags\"), ## minimum", "## nominal WMass parameter (in GeV) wMass = cms.double(80.4), ##", "cms.double(80.4), ## use b-tagging two distinguish between light and b", "and b jets useBTagging = cms.bool(False), ## choose algorithm for", "b-tagging bTagAlgorithm = cms.string(\"trackCountingHighEffBJetTags\"), ## minimum b discriminator value required", "the MaxSumPtWMass jet combination # findTtSemiLepJetCombMaxSumPtWMass = cms.EDProducer(\"TtSemiLepJetCombMaxSumPtWMass\", ## jet", "= cms.int32(4), ## nominal WMass parameter (in GeV) wMass =", "module to make the MaxSumPtWMass jet combination # findTtSemiLepJetCombMaxSumPtWMass =", "jets useBTagging = cms.bool(False), ## choose algorithm for b-tagging bTagAlgorithm", "required for b jets and ## maximum b discriminator value", "import FWCore.ParameterSet.Config as cms # # module to make the", "WMass parameter (in GeV) wMass = cms.double(80.4), ## use b-tagging", "## jet input jets = cms.InputTag(\"selectedPatJets\"), ## lepton input leps", "for b-tagging bTagAlgorithm = cms.string(\"trackCountingHighEffBJetTags\"), ## minimum b discriminator value", "between light and b jets useBTagging = cms.bool(False), ## choose", "## minimum b discriminator value required for b jets and", "b jets useBTagging = cms.bool(False), ## choose algorithm for b-tagging", "cms.bool(False), ## choose algorithm for b-tagging bTagAlgorithm = cms.string(\"trackCountingHighEffBJetTags\"), ##", "cms # # module to make the MaxSumPtWMass jet combination", "wMass = cms.double(80.4), ## use b-tagging two distinguish between light", "maximum b discriminator value allowed for non-b jets minBDiscBJets =", "cms.int32(4), ## nominal WMass parameter (in GeV) wMass = cms.double(80.4),", "minimum b discriminator value required for b jets and ##", "combination # findTtSemiLepJetCombMaxSumPtWMass = cms.EDProducer(\"TtSemiLepJetCombMaxSumPtWMass\", ## jet input jets =", "input leps = cms.InputTag(\"selectedPatMuons\"), ## maximum number of jets to" ]
[ "#-*- coding:utf-8 -*- __all__ = [\"args\", \"colors\", \"libcolors\", \"routine\"] __version__", "-*- __all__ = [\"args\", \"colors\", \"libcolors\", \"routine\"] __version__ = \"0.96\"", "<reponame>runapp/xortool<filename>xortool/__init__.py #!/usr/bin/env python #-*- coding:utf-8 -*- __all__ = [\"args\", \"colors\",", "#!/usr/bin/env python #-*- coding:utf-8 -*- __all__ = [\"args\", \"colors\", \"libcolors\",", "coding:utf-8 -*- __all__ = [\"args\", \"colors\", \"libcolors\", \"routine\"] __version__ =", "python #-*- coding:utf-8 -*- __all__ = [\"args\", \"colors\", \"libcolors\", \"routine\"]" ]
[ "# TODO : ButtonRessourcePack.style.create_surface(size) class _RessourcePack: def __init__(self): self.font =", "class ScenesRessourcePack(RessourcePack): def __init__(self, background_color=(170, 170, 170), ): assert is_color(background_color)", "Object from baopig.pybao.issomething import * class RessourcePack: def config(self, **kwargs):", "self._file) color = property(lambda self: self._color) height = property(lambda self:", "= property(lambda self: self._color) height = property(lambda self: self._height) class", "= height self._color = color file = property(lambda self: self._file)", "file = property(lambda self: self._file) color = property(lambda self: self._color)", "self._background_color = background_color background_color = property(lambda self: self._background_color) # TODO", "_RessourcePack: def __init__(self): self.font = FontsRessourcePack() self.scene = ScenesRessourcePack() ressources", "def __init__(self, file=None, height=15, color=(0, 0, 0), ): assert is_color(color)", "from baopig.pybao.objectutilities import Object from baopig.pybao.issomething import * class RessourcePack:", "__init__(self): self.font = FontsRessourcePack() self.scene = ScenesRessourcePack() ressources = _RessourcePack()", "= color file = property(lambda self: self._file) color = property(lambda", "in kwargs.items(): self.__setattr__('_'+name, value) class FontsRessourcePack(RessourcePack): def __init__(self, file=None, height=15,", "class _RessourcePack: def __init__(self): self.font = FontsRessourcePack() self.scene = ScenesRessourcePack()", "self: self._color) height = property(lambda self: self._height) class ScenesRessourcePack(RessourcePack): def", "property(lambda self: self._height) class ScenesRessourcePack(RessourcePack): def __init__(self, background_color=(170, 170, 170),", "* class RessourcePack: def config(self, **kwargs): for name, value in", "background_color background_color = property(lambda self: self._background_color) # TODO : ButtonRessourcePack.style.create_surface(size)", "height self._color = color file = property(lambda self: self._file) color", "= property(lambda self: self._file) color = property(lambda self: self._color) height", "= background_color background_color = property(lambda self: self._background_color) # TODO :", "property(lambda self: self._file) color = property(lambda self: self._color) height =", "__init__(self, background_color=(170, 170, 170), ): assert is_color(background_color) self._background_color = background_color", "class RessourcePack: def config(self, **kwargs): for name, value in kwargs.items():", "= file self._height = height self._color = color file =", "color = property(lambda self: self._color) height = property(lambda self: self._height)", "self._background_color) # TODO : ButtonRessourcePack.style.create_surface(size) class _RessourcePack: def __init__(self): self.font", "__init__(self, file=None, height=15, color=(0, 0, 0), ): assert is_color(color) self._file", "class FontsRessourcePack(RessourcePack): def __init__(self, file=None, height=15, color=(0, 0, 0), ):", "0, 0), ): assert is_color(color) self._file = file self._height =", "self: self._background_color) # TODO : ButtonRessourcePack.style.create_surface(size) class _RessourcePack: def __init__(self):", "): assert is_color(color) self._file = file self._height = height self._color", "is_color(background_color) self._background_color = background_color background_color = property(lambda self: self._background_color) #", "ScenesRessourcePack(RessourcePack): def __init__(self, background_color=(170, 170, 170), ): assert is_color(background_color) self._background_color", "baopig.pybao.issomething import * class RessourcePack: def config(self, **kwargs): for name,", "= property(lambda self: self._background_color) # TODO : ButtonRessourcePack.style.create_surface(size) class _RessourcePack:", "self._file = file self._height = height self._color = color file", "name, value in kwargs.items(): self.__setattr__('_'+name, value) class FontsRessourcePack(RessourcePack): def __init__(self,", "baopig.pybao.objectutilities import Object from baopig.pybao.issomething import * class RessourcePack: def", "170), ): assert is_color(background_color) self._background_color = background_color background_color = property(lambda", ": ButtonRessourcePack.style.create_surface(size) class _RessourcePack: def __init__(self): self.font = FontsRessourcePack() self.scene", "is_color(color) self._file = file self._height = height self._color = color", "file self._height = height self._color = color file = property(lambda", "TODO : ButtonRessourcePack.style.create_surface(size) class _RessourcePack: def __init__(self): self.font = FontsRessourcePack()", "170, 170), ): assert is_color(background_color) self._background_color = background_color background_color =", "for name, value in kwargs.items(): self.__setattr__('_'+name, value) class FontsRessourcePack(RessourcePack): def", "): assert is_color(background_color) self._background_color = background_color background_color = property(lambda self:", "height = property(lambda self: self._height) class ScenesRessourcePack(RessourcePack): def __init__(self, background_color=(170,", "self._height = height self._color = color file = property(lambda self:", "background_color = property(lambda self: self._background_color) # TODO : ButtonRessourcePack.style.create_surface(size) class", "from baopig.pybao.issomething import * class RessourcePack: def config(self, **kwargs): for", "= property(lambda self: self._height) class ScenesRessourcePack(RessourcePack): def __init__(self, background_color=(170, 170,", "assert is_color(color) self._file = file self._height = height self._color =", "property(lambda self: self._background_color) # TODO : ButtonRessourcePack.style.create_surface(size) class _RessourcePack: def", "**kwargs): for name, value in kwargs.items(): self.__setattr__('_'+name, value) class FontsRessourcePack(RessourcePack):", "file=None, height=15, color=(0, 0, 0), ): assert is_color(color) self._file =", "color file = property(lambda self: self._file) color = property(lambda self:", "kwargs.items(): self.__setattr__('_'+name, value) class FontsRessourcePack(RessourcePack): def __init__(self, file=None, height=15, color=(0,", "def __init__(self, background_color=(170, 170, 170), ): assert is_color(background_color) self._background_color =", "self: self._file) color = property(lambda self: self._color) height = property(lambda", "self._height) class ScenesRessourcePack(RessourcePack): def __init__(self, background_color=(170, 170, 170), ): assert", "background_color=(170, 170, 170), ): assert is_color(background_color) self._background_color = background_color background_color", "import Object from baopig.pybao.issomething import * class RessourcePack: def config(self,", "property(lambda self: self._color) height = property(lambda self: self._height) class ScenesRessourcePack(RessourcePack):", "self._color) height = property(lambda self: self._height) class ScenesRessourcePack(RessourcePack): def __init__(self,", "def config(self, **kwargs): for name, value in kwargs.items(): self.__setattr__('_'+name, value)", "ButtonRessourcePack.style.create_surface(size) class _RessourcePack: def __init__(self): self.font = FontsRessourcePack() self.scene =", "FontsRessourcePack(RessourcePack): def __init__(self, file=None, height=15, color=(0, 0, 0), ): assert", "color=(0, 0, 0), ): assert is_color(color) self._file = file self._height", "config(self, **kwargs): for name, value in kwargs.items(): self.__setattr__('_'+name, value) class", "0), ): assert is_color(color) self._file = file self._height = height", "import * class RessourcePack: def config(self, **kwargs): for name, value", "self: self._height) class ScenesRessourcePack(RessourcePack): def __init__(self, background_color=(170, 170, 170), ):", "assert is_color(background_color) self._background_color = background_color background_color = property(lambda self: self._background_color)", "value) class FontsRessourcePack(RessourcePack): def __init__(self, file=None, height=15, color=(0, 0, 0),", "height=15, color=(0, 0, 0), ): assert is_color(color) self._file = file", "self._color = color file = property(lambda self: self._file) color =", "RessourcePack: def config(self, **kwargs): for name, value in kwargs.items(): self.__setattr__('_'+name,", "def __init__(self): self.font = FontsRessourcePack() self.scene = ScenesRessourcePack() ressources =", "self.__setattr__('_'+name, value) class FontsRessourcePack(RessourcePack): def __init__(self, file=None, height=15, color=(0, 0,", "value in kwargs.items(): self.__setattr__('_'+name, value) class FontsRessourcePack(RessourcePack): def __init__(self, file=None,", "<gh_stars>0 from baopig.pybao.objectutilities import Object from baopig.pybao.issomething import * class" ]
[ "int(infilename.split(\".\")[0].split(\"_\")[-1]) cmatch = 0 counter = 0 # loop all", "extent change from colormesh plt.title(title) if doText: plt.text(0.01, 0.98, \"#stations:", "year station_names = np.array([]) fixed_station = np.array([]) latitudes = np.array([])", "= \"MARS - SYNOP - {}\".format(year) else: title = \"MARS", "if listed as land and now marine, take marine fixed_station[tloc[0]]", "else: cmatch += 1 processed = True except CodesInternalError: raw_input(\"key", "[\"erai_\"] data') parser.add_argument('--year', dest='year', action='store', default = 1980, help='Which year", "elif tloc[0] != -1 and tloc[0] == nloc[0]: # if", "allow splitting of land and marine/mobile if nk == \"#1#stationNumber\":", "plt.savefig(outname) plt.close() return # scatter_map #*************************************************** def main(ms = \"era40_\",", "= 0 gc.collect() return # main #*************************************************** if __name__ ==", "== lat) if lon in longitudes: nloc, = np.where(longitudes ==", "0 start_year = 0 end_year = 0 land = 0", "\"#1#latitude\") lon = codes_get(bufr, \"#1#longitude\") sloc = tloc = nloc", "latitudes, longitudes, observations, start_year, end_year) except CodesInternalError as err: if", "if fixed_station[tloc[0]] != False: # easier to leave as mobile/marine", "False: # easier to leave as mobile/marine than to move", "+= 1 codes_release(bufr) # print \"Number of unique locations in", "and nloc[0] == -1: # if not in list, then", "np.append(observations, 1) start_year = np.append(start_year, year) end_year = np.append(end_year, year)", "marine = np.where(np.array(fixed_station) == False) bounds = np.linspace(0,max(observations),10).astype(int) cmap =", "output filename root :param array data: data to plot :param", "= [] # get BUFR key iterator iterid = codes_bufr_keys_iterator_new(bufr)", "colormesh plt.title(title) if doText: plt.text(0.01, 0.98, \"#stations: {}\".format(data.shape[0]), transform =", "\"erai_\" and year < 1979: return else: INFILE = \"{}mars_{}{}.bufr\".format(LOCS,", "# ECMWF import defaults import traceback import sys from eccodes", "orientation = 'horizontal', pad = 0.05, fraction = 0.05, \\", "latitudes = np.append(latitudes, lat) longitudes = np.append(longitudes, lon) observations =", "name keyname = codes_bufr_keys_iterator_get_name(iterid) # print(\" %s\" % keyname) these_keys", "colorbar label ''' norm=mpl.cm.colors.BoundaryNorm(bounds,cmap.N) fig = plt.figure(figsize =(10,6.5)) plt.clf() ax", "open(infilename) year = int(infilename.split(\".\")[0].split(\"_\")[-1]) cmatch = 0 counter = 0", "= \"\", doText = False): ''' Standard scatter map :param", "main(ms = args.ms, year = args.year) sys.exit() #*************************************************** # END", "scatter map :param str outname: output filename root :param array", "ticks = bounds[1:-1], label = cb_label, drawedges=True) # thicken border", "these_keys: try: name = codes_get(bufr, nk) lat = codes_get(bufr, \"#1#latitude\")", "descriptors # i.e. unpack the data values codes_set(bufr, 'unpack', 1)", "if name in station_names: sloc, = np.where(station_names == name) if", "plt.figure(figsize =(10,6.5)) plt.clf() ax = plt.axes([0.05, 0.10, 0.90, 0.90], projection=cartopy.crs.Robinson())", "if nk == \"#1#stationNumber\": fixed_station = np.append(fixed_station, True) else: fixed_station", "facecolor = \"0.9\", edgecolor = \"k\") ax.coastlines() ext = ax.get_extent()", "\"#stations: {}\".format(data.shape[0]), transform = ax.transAxes, fontsize = 10) plt.savefig(outname) plt.close()", "lat = codes_get(bufr, \"#1#latitude\") lon = codes_get(bufr, \"#1#longitude\") sloc =", "year: {}\".format(len(latitudes)) return station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year", "#*************************************************** def main(ms = \"era40_\", year = 1980): LOCS =", "codes_get(bufr, nk) lat = codes_get(bufr, \"#1#latitude\") lon = codes_get(bufr, \"#1#longitude\")", "matches exactly, up observation counter observations[tloc[0]] += 1 end_year[tloc[0]] =", "unique station_names = np.append(station_names, name) latitudes = np.append(latitudes, lat) longitudes", "bounds, \"Number of Observations\", title) station_names = 0 fixed_station =", "observations, start_year, end_year # process_file #*************************************************** def scatter_map(outname, data, lons,", "cb_label: colorbar label ''' norm=mpl.cm.colors.BoundaryNorm(bounds,cmap.N) fig = plt.figure(figsize =(10,6.5)) plt.clf()", "over the keys while codes_bufr_keys_iterator_next(iterid): # print key name keyname", "np.append(longitudes, lon) observations = np.append(observations, 1) start_year = np.append(start_year, year)", "# cb.outline.set_color('k') # cb.outline.set_linewidth(2) cb.dividers.set_color('k') cb.dividers.set_linewidth(2) ax.set_extent(ext, ax.projection) # fix", "print \"Number of unique locations in this year: {}\".format(len(latitudes)) return", "-1 and nloc[0] == -1: # if not in list,", "= np.where(np.array(fixed_station) == False) bounds = np.linspace(0,max(observations),10).astype(int) cmap = plt.cm.YlOrRd_r", "sloc = tloc = nloc = [-1] if name in", "= 0 counter = 0 # loop all messages (with", "and the dividers # http://stackoverflow.com/questions/14477696/customizing-colorbar-border-color-on-matplotlib # cb.set_ticklabels([\"{:g}\".format(b) for b in", "latitudes, longitudes, observations, start_year, end_year # process_file #*************************************************** def scatter_map(outname,", "fig = plt.figure(figsize =(10,6.5)) plt.clf() ax = plt.axes([0.05, 0.10, 0.90,", "np.append(latitudes, lat) longitudes = np.append(longitudes, lon) observations = np.append(observations, 1)", "longitudes = np.array([]) observations = np.array([]) start_year = np.array([]) end_year", "= int(infilename.split(\".\")[0].split(\"_\")[-1]) cmatch = 0 counter = 0 # loop", "on ERA40 [\"era40_\"] (default) or ERA-I [\"erai_\"] data') parser.add_argument('--year', dest='year',", "mpl mpl.use('Agg') import matplotlib.pyplot as plt import gc VERBOSE =", "= True except CodesInternalError: raw_input(\"key error?\") # check for new", "np.append(fixed_station, True) else: fixed_station = np.append(fixed_station, False) elif tloc[0] !=", "one element of position is unique station_names = np.append(station_names, name)", "import traceback import sys from eccodes import * # RJHD", "codes_bufr_keys_iterator_next(iterid): # print key name keyname = codes_bufr_keys_iterator_get_name(iterid) # print(\"", "Use these to select obs from land/marine surface name_keys =", "True) else: fixed_station = np.append(fixed_station, False) elif (tloc[0] != -1", "lon in longitudes: nloc, = np.where(longitudes == lon) if tloc[0]", "lons: longitudes :param array lats: latitudes :param obj cmap: colourmap", "zorder = 0, facecolor = \"0.9\", edgecolor = \"k\") ax.coastlines()", "import sys from eccodes import * # RJHD imports import", "from eccodes import * # RJHD imports import cartopy import", "cartopy.crs.Geodetic(), edgecolor = \"r\", linewidth = 0.1) cb=plt.colorbar(scatter, orientation =", "= np.append(fixed_station, False) elif (tloc[0] != -1 or nloc[0] !=", ":param array lons: longitudes :param array lats: latitudes :param obj", "dividers # http://stackoverflow.com/questions/14477696/customizing-colorbar-border-color-on-matplotlib # cb.set_ticklabels([\"{:g}\".format(b) for b in bounds[1:-1]]) #", "i.e. unpack the data values codes_set(bufr, 'unpack', 1) \"\"\"ITERATOR TO", "title = \"\", figtext = \"\", doText = False): '''", "lats: latitudes :param obj cmap: colourmap to use :param array", "these_keys: new_key = False if new_key: raw_input(these_keys) # if counter", "True) else: fixed_station = np.append(fixed_station, False) elif tloc[0] != -1", "1 codes_release(bufr) # print \"Number of unique locations in this", "start_year, end_year = \\ process_file(INFILE, station_names, fixed_station, latitudes, longitudes, observations,", "1 processed = True except CodesInternalError: raw_input(\"key error?\") # check", "= np.append(latitudes, lat) longitudes = np.append(longitudes, lon) observations = np.append(observations,", "# loop over the keys while codes_bufr_keys_iterator_next(iterid): # print key", "= \"0.9\", edgecolor = \"k\") ax.coastlines() ext = ax.get_extent() #", "longitudes = np.append(longitudes, lon) observations = np.append(observations, 1) start_year =", "'reference', 'width' ] INTMDI = 2147483647 #*************************************************** def process_file(infilename, station_names,", "True for ok in other_keys: if ok in these_keys: new_key", "data to plot :param array lons: longitudes :param array lats:", "in these_keys: new_key = False if new_key: raw_input(these_keys) # if", "2017 \"\"\" # ECMWF import defaults import traceback import sys", "if nk == \"#1#stationNumber\": if fixed_station[tloc[0]] != True: # if", "if VERBOSE: traceback.print_exc(file=sys.stderr) else: sys.stderr.write(err.msg + '\\n') land = np.where(np.array(fixed_station)", "which give station ID information if not processed: other_keys =", "RJHD imports import cartopy import numpy as np import matplotlib", "is None: break if counter%100000 == 0: print \"message: {:d}\".format(counter)", "iterator iterid = codes_bufr_keys_iterator_new(bufr) # loop over the keys while", "# get BUFR key iterator iterid = codes_bufr_keys_iterator_new(bufr) # loop", "# add if one element of position is unique station_names", "is unique station_names = np.append(station_names, name) latitudes = np.append(latitudes, lat)", "nk == \"#1#stationNumber\": if fixed_station[tloc[0]] != True: # if listed", "== \"erai_\": title = \"MARS - SYNOP - {}\".format(year) else:", "0 observations = 0 start_year = 0 end_year = 0", "0 land = 0 marine = 0 gc.collect() return #", "in bounds[1:-1]]) # cb.outline.set_color('k') # cb.outline.set_linewidth(2) cb.dividers.set_color('k') cb.dividers.set_linewidth(2) ax.set_extent(ext, ax.projection)", "plt.scatter(lons, lats, c = data, cmap = cmap, norm =", "Exeter - October 2017 \"\"\" # ECMWF import defaults import", "unpack the data values codes_set(bufr, 'unpack', 1) \"\"\"ITERATOR TO EXTRACT", "= nloc = [-1] if name in station_names: sloc, =", "np.where(longitudes == lon) if tloc[0] == -1 and nloc[0] ==", "take marine fixed_station[tloc[0]] = False else: if fixed_station[tloc[0]] != False:", "loop over the keys while codes_bufr_keys_iterator_next(iterid): # print key name", "codes_get(bufr, \"#1#latitude\") lon = codes_get(bufr, \"#1#longitude\") sloc = tloc =", "= year # allow splitting of land and marine/mobile if", "error?\") # check for new keys which give station ID", "cb.dividers.set_linewidth(2) ax.set_extent(ext, ax.projection) # fix the extent change from colormesh", "these_keys += [keyname] # delete the key iterator codes_bufr_keys_iterator_delete(iterid) #", "lat) longitudes = np.append(longitudes, lon) observations = np.append(observations, 1) start_year", "= data, cmap = cmap, norm = norm, s=10, \\", "station ID information if not processed: other_keys = [\"#1#carrierBalloonOrAircraftIdentifier\", \"#1#aircraftFlightNumber\"]", "np.array([]) latitudes = np.array([]) longitudes = np.array([]) observations = np.array([])", "== name) if lat in latitudes: tloc, = np.where(latitudes ==", "== \"#1#stationNumber\": fixed_station = np.append(fixed_station, True) else: fixed_station = np.append(fixed_station,", "(and names) along with number of obs RJHD - Exeter", "data values codes_set(bufr, 'unpack', 1) \"\"\"ITERATOR TO EXTRACT KEYS\"\"\" these_keys", "import matplotlib.pyplot as plt import gc VERBOSE = 1 #", "nloc[0]: # if position matches exactly, up observation counter observations[tloc[0]]", "ID information if not processed: other_keys = [\"#1#carrierBalloonOrAircraftIdentifier\", \"#1#aircraftFlightNumber\"] new_key", "= 0, facecolor = \"0.9\", edgecolor = \"k\") ax.coastlines() ext", "= 0 longitudes = 0 observations = 0 start_year =", "year) # allow splitting of land and marine/mobile if nk", "if ms == \"erai_\": title = \"MARS - SYNOP -", "if doText: plt.text(0.01, 0.98, \"#stations: {}\".format(data.shape[0]), transform = ax.transAxes, fontsize", "lat) if lon in longitudes: nloc, = np.where(longitudes == lon)", "np.append(station_names, name) latitudes = np.append(latitudes, lat) longitudes = np.append(longitudes, lon)", "- {}\".format(year) else: title = \"MARS - ERA40 - {}\".format(year)", "or ERA-I [\"erai_\"] data') parser.add_argument('--year', dest='year', action='store', default = 1980,", "fixed_station, latitudes, longitudes, observations, start_year, end_year) except CodesInternalError as err:", "# if not in list, then add station_names = np.append(station_names,", "of station locations (and names) along with number of obs", "reporting. ATTRS = [ 'code', 'units', 'scale', 'reference', 'width' ]", "b in bounds[1:-1]]) # cb.outline.set_color('k') # cb.outline.set_linewidth(2) cb.dividers.set_color('k') cb.dividers.set_linewidth(2) ax.set_extent(ext,", "name_keys = [\"#1#shipOrMobileLandStationIdentifier\", \"#1#stationNumber\"] processed = False for nk in", "observations, start_year, end_year): infile = open(infilename) year = int(infilename.split(\".\")[0].split(\"_\")[-1]) cmatch", "latitudes, longitudes, observations, start_year, end_year): infile = open(infilename) year =", "and year < 1979: return else: INFILE = \"{}mars_{}{}.bufr\".format(LOCS, ms,", "cmap, norm = norm, s=10, \\ transform = cartopy.crs.Geodetic(), edgecolor", "[\"#1#carrierBalloonOrAircraftIdentifier\", \"#1#aircraftFlightNumber\"] new_key = True for ok in other_keys: if", "# http://stackoverflow.com/questions/14477696/customizing-colorbar-border-color-on-matplotlib # cb.set_ticklabels([\"{:g}\".format(b) for b in bounds[1:-1]]) # cb.outline.set_color('k')", "tloc[0] == nloc[0]: # if position matches exactly, up observation", "#*************************************************** def process_file(infilename, station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year):", "to move # hopefully will stand out later pass else:", "== \"#1#stationNumber\": if fixed_station[tloc[0]] != True: # if listed as", "colorbar and the dividers # http://stackoverflow.com/questions/14477696/customizing-colorbar-border-color-on-matplotlib # cb.set_ticklabels([\"{:g}\".format(b) for b", "# set up keyword arguments parser = argparse.ArgumentParser() parser.add_argument('--ms', dest='ms',", "counter observations[tloc[0]] += 1 end_year[tloc[0]] = year # allow splitting", "http://stackoverflow.com/questions/14477696/customizing-colorbar-border-color-on-matplotlib # cb.set_ticklabels([\"{:g}\".format(b) for b in bounds[1:-1]]) # cb.outline.set_color('k') #", "parser = argparse.ArgumentParser() parser.add_argument('--ms', dest='ms', action='store', default = \"era40_\", help='Run", "lon) if tloc[0] == -1 and nloc[0] == -1: #", "cb_label, drawedges=True) # thicken border of colorbar and the dividers", "ERA40 - {}\".format(year) scatter_map(\"mars_{}{}_land_observations.png\".format(ms, year), observations[land], longitudes[land], latitudes[land], cmap, bounds,", "0.05, fraction = 0.05, \\ aspect = 30, ticks =", "new_key = False if new_key: raw_input(these_keys) # if counter >", "= np.append(end_year, year) # allow splitting of land and marine/mobile", "instruct ecCodes to expand all the descriptors # i.e. unpack", "\"Number of Observations\", title, doText = True) scatter_map(\"mars_{}{}_marine_observations.png\".format(ms, year), observations[marine],", "0 counter = 0 # loop all messages (with stop", "if bufr is None: break if counter%100000 == 0: print", "and tloc[0] == nloc[0]: # if position matches exactly, up", "# print \"Number of unique locations in this year: {}\".format(len(latitudes))", "INFILE = \"{}mars_{}{}.bufr\".format(LOCS, ms, year) try: station_names, fixed_station, latitudes, longitudes,", "= 0 observations = 0 start_year = 0 end_year =", "[-1] if name in station_names: sloc, = np.where(station_names == name)", "-1) and tloc[0] != nloc[0]: # add if one element", "parser.add_argument('--year', dest='year', action='store', default = 1980, help='Which year to process", "= 30, ticks = bounds[1:-1], label = cb_label, drawedges=True) #", "we need to instruct ecCodes to expand all the descriptors", "select obs from land/marine surface name_keys = [\"#1#shipOrMobileLandStationIdentifier\", \"#1#stationNumber\"] processed", "== True) marine = np.where(np.array(fixed_station) == False) bounds = np.linspace(0,max(observations),10).astype(int)", "processed = True except CodesInternalError: raw_input(\"key error?\") # check for", "s=10, \\ transform = cartopy.crs.Geodetic(), edgecolor = \"r\", linewidth =", "set of station locations (and names) along with number of", "names) along with number of obs RJHD - Exeter -", "as plt import gc VERBOSE = 1 # verbose error", "= tloc = nloc = [-1] if name in station_names:", "= True for ok in other_keys: if ok in these_keys:", "array data: data to plot :param array lons: longitudes :param", "#!/usr/bin/python2.7 \"\"\" Extract unique set of station locations (and names)", "'\\n') land = np.where(np.array(fixed_station) == True) marine = np.where(np.array(fixed_station) ==", "lon = codes_get(bufr, \"#1#longitude\") sloc = tloc = nloc =", "title, doText = True) scatter_map(\"mars_{}{}_marine_observations.png\".format(ms, year), observations[marine], longitudes[marine], latitudes[marine], cmap,", "1980): LOCS = \"/group_workspaces/jasmin2/c3s311a_lot2/data/incoming/mars/v20170628/data/\" print year station_names = np.array([]) fixed_station", "if not processed: other_keys = [\"#1#carrierBalloonOrAircraftIdentifier\", \"#1#aircraftFlightNumber\"] new_key = True", "in other_keys: if ok in these_keys: new_key = False if", "{}\".format(year) else: title = \"MARS - ERA40 - {}\".format(year) scatter_map(\"mars_{}{}_land_observations.png\".format(ms,", "argparse # set up keyword arguments parser = argparse.ArgumentParser() parser.add_argument('--ms',", "name = codes_get(bufr, nk) lat = codes_get(bufr, \"#1#latitude\") lon =", "lats, cmap, bounds, cb_label, title = \"\", figtext = \"\",", "of colorbar and the dividers # http://stackoverflow.com/questions/14477696/customizing-colorbar-border-color-on-matplotlib # cb.set_ticklabels([\"{:g}\".format(b) for", "= np.array([]) fixed_station = np.array([]) latitudes = np.array([]) longitudes =", "0.1) cb=plt.colorbar(scatter, orientation = 'horizontal', pad = 0.05, fraction =", "except CodesInternalError: raw_input(\"key error?\") # check for new keys which", "+= [keyname] # delete the key iterator codes_bufr_keys_iterator_delete(iterid) # Use", "= 0 fixed_station = 0 latitudes = 0 longitudes =", "= ax.transAxes, fontsize = 10) plt.savefig(outname) plt.close() return # scatter_map", "to process - default 1980') args = parser.parse_args() main(ms =", "True: # if listed as land and now marine, take", "cb=plt.colorbar(scatter, orientation = 'horizontal', pad = 0.05, fraction = 0.05,", "to leave as mobile/marine than to move # hopefully will", "figtext = \"\", doText = False): ''' Standard scatter map", "__name__ == \"__main__\": import argparse # set up keyword arguments", "main(ms = \"era40_\", year = 1980): LOCS = \"/group_workspaces/jasmin2/c3s311a_lot2/data/incoming/mars/v20170628/data/\" print", "scatter_map(outname, data, lons, lats, cmap, bounds, cb_label, title = \"\",", "# print(\" %s\" % keyname) these_keys += [keyname] # delete", "ERA-I [\"erai_\"] data') parser.add_argument('--year', dest='year', action='store', default = 1980, help='Which", "(default) or ERA-I [\"erai_\"] data') parser.add_argument('--year', dest='year', action='store', default =", "= True) scatter_map(\"mars_{}{}_marine_observations.png\".format(ms, year), observations[marine], longitudes[marine], latitudes[marine], cmap, bounds, \"Number", "True) scatter_map(\"mars_{}{}_marine_observations.png\".format(ms, year), observations[marine], longitudes[marine], latitudes[marine], cmap, bounds, \"Number of", "False): ''' Standard scatter map :param str outname: output filename", "!= -1) and tloc[0] != nloc[0]: # add if one", "expand all the descriptors # i.e. unpack the data values", "= 0 marine = 0 gc.collect() return # main #***************************************************", "try: station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year = \\", "colourmap to use :param array bounds: bounds for discrete colormap", "marine/mobile if nk == \"#1#stationNumber\": fixed_station = np.append(fixed_station, True) else:", "0 # loop all messages (with stop statement) while 1:", "= np.append(longitudes, lon) observations = np.append(observations, 1) start_year = np.append(start_year,", "'code', 'units', 'scale', 'reference', 'width' ] INTMDI = 2147483647 #***************************************************", "root :param array data: data to plot :param array lons:", "while 1: \"\"\"OPEN MESSAGE\"\"\" # get handle for message bufr", "infile = open(infilename) year = int(infilename.split(\".\")[0].split(\"_\")[-1]) cmatch = 0 counter", "fixed_station, latitudes, longitudes, observations, start_year, end_year): infile = open(infilename) year", "= np.array([]) latitudes = np.array([]) longitudes = np.array([]) observations =", "= [\"#1#carrierBalloonOrAircraftIdentifier\", \"#1#aircraftFlightNumber\"] new_key = True for ok in other_keys:", "longitudes, observations, start_year, end_year): infile = open(infilename) year = int(infilename.split(\".\")[0].split(\"_\")[-1])", "except CodesInternalError as err: if VERBOSE: traceback.print_exc(file=sys.stderr) else: sys.stderr.write(err.msg +", "fix the extent change from colormesh plt.title(title) if doText: plt.text(0.01,", "\"__main__\": import argparse # set up keyword arguments parser =", "np.append(end_year, year) # allow splitting of land and marine/mobile if", "True except CodesInternalError: raw_input(\"key error?\") # check for new keys", "observations, start_year, end_year = \\ process_file(INFILE, station_names, fixed_station, latitudes, longitudes,", "nk == \"#1#stationNumber\": fixed_station = np.append(fixed_station, True) else: fixed_station =", "label ''' norm=mpl.cm.colors.BoundaryNorm(bounds,cmap.N) fig = plt.figure(figsize =(10,6.5)) plt.clf() ax =", "= \"\", figtext = \"\", doText = False): ''' Standard", "1980, help='Which year to process - default 1980') args =", "% keyname) these_keys += [keyname] # delete the key iterator", "with number of obs RJHD - Exeter - October 2017", "+= 1 end_year[tloc[0]] = year # allow splitting of land", "RJHD - Exeter - October 2017 \"\"\" # ECMWF import", "= codes_get(bufr, \"#1#latitude\") lon = codes_get(bufr, \"#1#longitude\") sloc = tloc", "all the descriptors # i.e. unpack the data values codes_set(bufr,", "{}\".format(year) scatter_map(\"mars_{}{}_land_observations.png\".format(ms, year), observations[land], longitudes[land], latitudes[land], cmap, bounds, \"Number of", "if lat in latitudes: tloc, = np.where(latitudes == lat) if", "return # main #*************************************************** if __name__ == \"__main__\": import argparse", "keyname = codes_bufr_keys_iterator_get_name(iterid) # print(\" %s\" % keyname) these_keys +=", "return # scatter_map #*************************************************** def main(ms = \"era40_\", year =", "= np.where(station_names == name) if lat in latitudes: tloc, =", "year), observations[land], longitudes[land], latitudes[land], cmap, bounds, \"Number of Observations\", title,", "0 fixed_station = 0 latitudes = 0 longitudes = 0", "lons, lats, cmap, bounds, cb_label, title = \"\", figtext =", "\"\"\"OPEN MESSAGE\"\"\" # get handle for message bufr = codes_bufr_new_from_file(infile)", "move # hopefully will stand out later pass else: cmatch", "station_names = np.array([]) fixed_station = np.array([]) latitudes = np.array([]) longitudes", ":param array data: data to plot :param array lons: longitudes", "if fixed_station[tloc[0]] != True: # if listed as land and", "return else: INFILE = \"{}mars_{}{}.bufr\".format(LOCS, ms, year) try: station_names, fixed_station,", "later pass else: cmatch += 1 processed = True except", "LOCS = \"/group_workspaces/jasmin2/c3s311a_lot2/data/incoming/mars/v20170628/data/\" print year station_names = np.array([]) fixed_station =", "traceback import sys from eccodes import * # RJHD imports", "from land/marine surface name_keys = [\"#1#shipOrMobileLandStationIdentifier\", \"#1#stationNumber\"] processed = False", "name) if lat in latitudes: tloc, = np.where(latitudes == lat)", "in longitudes: nloc, = np.where(longitudes == lon) if tloc[0] ==", "longitudes, observations, start_year, end_year) except CodesInternalError as err: if VERBOSE:", "plt.text(0.01, 0.98, \"#stations: {}\".format(data.shape[0]), transform = ax.transAxes, fontsize = 10)", "extent scatter = plt.scatter(lons, lats, c = data, cmap =", "''' norm=mpl.cm.colors.BoundaryNorm(bounds,cmap.N) fig = plt.figure(figsize =(10,6.5)) plt.clf() ax = plt.axes([0.05,", "1) start_year = np.append(start_year, year) end_year = np.append(end_year, year) #", "other_keys = [\"#1#carrierBalloonOrAircraftIdentifier\", \"#1#aircraftFlightNumber\"] new_key = True for ok in", "longitudes :param array lats: latitudes :param obj cmap: colourmap to", "cmatch = 0 counter = 0 # loop all messages", "= np.array([]) longitudes = np.array([]) observations = np.array([]) start_year =", "start_year, end_year) except CodesInternalError as err: if VERBOSE: traceback.print_exc(file=sys.stderr) else:", "False) elif (tloc[0] != -1 or nloc[0] != -1) and", "eccodes import * # RJHD imports import cartopy import numpy", "edgecolor = \"r\", linewidth = 0.1) cb=plt.colorbar(scatter, orientation = 'horizontal',", "> 10000: break counter += 1 codes_release(bufr) # print \"Number", "= \"/group_workspaces/jasmin2/c3s311a_lot2/data/incoming/mars/v20170628/data/\" print year station_names = np.array([]) fixed_station = np.array([])", "\"\"\" Extract unique set of station locations (and names) along", "{}\".format(data.shape[0]), transform = ax.transAxes, fontsize = 10) plt.savefig(outname) plt.close() return", "if tloc[0] == -1 and nloc[0] == -1: # if", "-1 and tloc[0] == nloc[0]: # if position matches exactly,", "tloc = nloc = [-1] if name in station_names: sloc,", "1 end_year[tloc[0]] = year # allow splitting of land and", "CodesInternalError as err: if VERBOSE: traceback.print_exc(file=sys.stderr) else: sys.stderr.write(err.msg + '\\n')", "bounds for discrete colormap :param str cb_label: colorbar label '''", "= 0.05, \\ aspect = 30, ticks = bounds[1:-1], label", "key name keyname = codes_bufr_keys_iterator_get_name(iterid) # print(\" %s\" % keyname)", "\"MARS - ERA40 - {}\".format(year) scatter_map(\"mars_{}{}_land_observations.png\".format(ms, year), observations[land], longitudes[land], latitudes[land],", "year # allow splitting of land and marine/mobile if nk", "fixed_station = np.append(fixed_station, False) elif tloc[0] != -1 and tloc[0]", "nloc = [-1] if name in station_names: sloc, = np.where(station_names", "= norm, s=10, \\ transform = cartopy.crs.Geodetic(), edgecolor = \"r\",", "ok in these_keys: new_key = False if new_key: raw_input(these_keys) #", "\"#1#longitude\") sloc = tloc = nloc = [-1] if name", "then add station_names = np.append(station_names, name) latitudes = np.append(latitudes, lat)", "station_names = 0 fixed_station = 0 latitudes = 0 longitudes", "these_keys = [] # get BUFR key iterator iterid =", "scatter_map(\"mars_{}{}_land_observations.png\".format(ms, year), observations[land], longitudes[land], latitudes[land], cmap, bounds, \"Number of Observations\",", "= 0.05, fraction = 0.05, \\ aspect = 30, ticks", "= [ 'code', 'units', 'scale', 'reference', 'width' ] INTMDI =", "= False): ''' Standard scatter map :param str outname: output", "if nk in these_keys: try: name = codes_get(bufr, nk) lat", "try: name = codes_get(bufr, nk) lat = codes_get(bufr, \"#1#latitude\") lon", "= np.append(fixed_station, True) else: fixed_station = np.append(fixed_station, False) elif (tloc[0]", "end_year = 0 land = 0 marine = 0 gc.collect()", "of land and marine/mobile if nk == \"#1#stationNumber\": fixed_station =", "the extent change from colormesh plt.title(title) if doText: plt.text(0.01, 0.98,", "= 1980, help='Which year to process - default 1980') args", "handle for message bufr = codes_bufr_new_from_file(infile) if bufr is None:", "else: if fixed_station[tloc[0]] != False: # easier to leave as", "hopefully will stand out later pass else: cmatch += 1", "longitudes, observations, start_year, end_year # process_file #*************************************************** def scatter_map(outname, data,", "= np.array([]) end_year = np.array([]) if ms == \"erai_\" and", "fixed_station = np.append(fixed_station, False) elif (tloc[0] != -1 or nloc[0]", "np.linspace(0,max(observations),10).astype(int) cmap = plt.cm.YlOrRd_r if ms == \"erai_\": title =", "= 0 latitudes = 0 longitudes = 0 observations =", "plt.axes([0.05, 0.10, 0.90, 0.90], projection=cartopy.crs.Robinson()) ax.gridlines() #draw_labels=True) ax.add_feature(cartopy.feature.LAND, zorder =", "%s\" % keyname) these_keys += [keyname] # delete the key", "def scatter_map(outname, data, lons, lats, cmap, bounds, cb_label, title =", "= np.where(longitudes == lon) if tloc[0] == -1 and nloc[0]", "\"\", doText = False): ''' Standard scatter map :param str", "[keyname] # delete the key iterator codes_bufr_keys_iterator_delete(iterid) # Use these", ":param str cb_label: colorbar label ''' norm=mpl.cm.colors.BoundaryNorm(bounds,cmap.N) fig = plt.figure(figsize", "= args.ms, year = args.year) sys.exit() #*************************************************** # END #***************************************************", "information if not processed: other_keys = [\"#1#carrierBalloonOrAircraftIdentifier\", \"#1#aircraftFlightNumber\"] new_key =", "transform = ax.transAxes, fontsize = 10) plt.savefig(outname) plt.close() return #", "name in station_names: sloc, = np.where(station_names == name) if lat", "str cb_label: colorbar label ''' norm=mpl.cm.colors.BoundaryNorm(bounds,cmap.N) fig = plt.figure(figsize =(10,6.5))", "{}\".format(len(latitudes)) return station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year #", "# cb.outline.set_linewidth(2) cb.dividers.set_color('k') cb.dividers.set_linewidth(2) ax.set_extent(ext, ax.projection) # fix the extent", "nk in name_keys: if nk in these_keys: try: name =", "dest='year', action='store', default = 1980, help='Which year to process -", "of unique locations in this year: {}\".format(len(latitudes)) return station_names, fixed_station,", "VERBOSE: traceback.print_exc(file=sys.stderr) else: sys.stderr.write(err.msg + '\\n') land = np.where(np.array(fixed_station) ==", "help='Run on ERA40 [\"era40_\"] (default) or ERA-I [\"erai_\"] data') parser.add_argument('--year',", "year to process - default 1980') args = parser.parse_args() main(ms", "1: \"\"\"OPEN MESSAGE\"\"\" # get handle for message bufr =", "# we need to instruct ecCodes to expand all the", "\"MARS - SYNOP - {}\".format(year) else: title = \"MARS -", "parser.add_argument('--ms', dest='ms', action='store', default = \"era40_\", help='Run on ERA40 [\"era40_\"]", "raw_input(these_keys) # if counter > 10000: break counter += 1", "\"{}mars_{}{}.bufr\".format(LOCS, ms, year) try: station_names, fixed_station, latitudes, longitudes, observations, start_year,", "= 1 # verbose error reporting. ATTRS = [ 'code',", "\"/group_workspaces/jasmin2/c3s311a_lot2/data/incoming/mars/v20170628/data/\" print year station_names = np.array([]) fixed_station = np.array([]) latitudes", "= \"r\", linewidth = 0.1) cb=plt.colorbar(scatter, orientation = 'horizontal', pad", "= 10) plt.savefig(outname) plt.close() return # scatter_map #*************************************************** def main(ms", "1980') args = parser.parse_args() main(ms = args.ms, year = args.year)", "year) try: station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year =", "new_key = True for ok in other_keys: if ok in", "= cartopy.crs.Geodetic(), edgecolor = \"r\", linewidth = 0.1) cb=plt.colorbar(scatter, orientation", "== \"__main__\": import argparse # set up keyword arguments parser", "latitudes :param obj cmap: colourmap to use :param array bounds:", "as err: if VERBOSE: traceback.print_exc(file=sys.stderr) else: sys.stderr.write(err.msg + '\\n') land", "cmap: colourmap to use :param array bounds: bounds for discrete", "if __name__ == \"__main__\": import argparse # set up keyword", "!= True: # if listed as land and now marine,", "fixed_station[tloc[0]] != False: # easier to leave as mobile/marine than", "default = 1980, help='Which year to process - default 1980')", "Observations\", title) station_names = 0 fixed_station = 0 latitudes =", "get BUFR key iterator iterid = codes_bufr_keys_iterator_new(bufr) # loop over", "element of position is unique station_names = np.append(station_names, name) latitudes", "than to move # hopefully will stand out later pass", "observations[land], longitudes[land], latitudes[land], cmap, bounds, \"Number of Observations\", title, doText", "if lon in longitudes: nloc, = np.where(longitudes == lon) if", "or nloc[0] != -1) and tloc[0] != nloc[0]: # add", "ax.coastlines() ext = ax.get_extent() # save the original extent scatter", "help='Which year to process - default 1980') args = parser.parse_args()", "exactly, up observation counter observations[tloc[0]] += 1 end_year[tloc[0]] = year", "# save the original extent scatter = plt.scatter(lons, lats, c", "cb.set_ticklabels([\"{:g}\".format(b) for b in bounds[1:-1]]) # cb.outline.set_color('k') # cb.outline.set_linewidth(2) cb.dividers.set_color('k')", ":param array bounds: bounds for discrete colormap :param str cb_label:", "= cb_label, drawedges=True) # thicken border of colorbar and the", "fontsize = 10) plt.savefig(outname) plt.close() return # scatter_map #*************************************************** def", "fixed_station, latitudes, longitudes, observations, start_year, end_year # process_file #*************************************************** def", "of position is unique station_names = np.append(station_names, name) latitudes =", "0.05, \\ aspect = 30, ticks = bounds[1:-1], label =", "np.append(fixed_station, False) elif tloc[0] != -1 and tloc[0] == nloc[0]:", "- default 1980') args = parser.parse_args() main(ms = args.ms, year", "splitting of land and marine/mobile if nk == \"#1#stationNumber\": if", "land and marine/mobile if nk == \"#1#stationNumber\": fixed_station = np.append(fixed_station,", "== \"erai_\" and year < 1979: return else: INFILE =", "counter += 1 codes_release(bufr) # print \"Number of unique locations", "= codes_bufr_new_from_file(infile) if bufr is None: break if counter%100000 ==", "#*************************************************** def scatter_map(outname, data, lons, lats, cmap, bounds, cb_label, title", "pad = 0.05, fraction = 0.05, \\ aspect = 30,", "data') parser.add_argument('--year', dest='year', action='store', default = 1980, help='Which year to", "ERA40 [\"era40_\"] (default) or ERA-I [\"erai_\"] data') parser.add_argument('--year', dest='year', action='store',", "codes_bufr_keys_iterator_delete(iterid) # Use these to select obs from land/marine surface", "# RJHD imports import cartopy import numpy as np import", "observations = 0 start_year = 0 end_year = 0 land", "= ax.get_extent() # save the original extent scatter = plt.scatter(lons,", "of land and marine/mobile if nk == \"#1#stationNumber\": if fixed_station[tloc[0]]", "unique set of station locations (and names) along with number", "add station_names = np.append(station_names, name) latitudes = np.append(latitudes, lat) longitudes", "latitudes, longitudes, observations, start_year, end_year = \\ process_file(INFILE, station_names, fixed_station,", "arguments parser = argparse.ArgumentParser() parser.add_argument('--ms', dest='ms', action='store', default = \"era40_\",", "get handle for message bufr = codes_bufr_new_from_file(infile) if bufr is", "in name_keys: if nk in these_keys: try: name = codes_get(bufr,", "if ms == \"erai_\" and year < 1979: return else:", "0 gc.collect() return # main #*************************************************** if __name__ == \"__main__\":", "-1: # if not in list, then add station_names =", "keys while codes_bufr_keys_iterator_next(iterid): # print key name keyname = codes_bufr_keys_iterator_get_name(iterid)", "== -1 and nloc[0] == -1: # if not in", "marine fixed_station[tloc[0]] = False else: if fixed_station[tloc[0]] != False: #", "of Observations\", title) station_names = 0 fixed_station = 0 latitudes", "statement) while 1: \"\"\"OPEN MESSAGE\"\"\" # get handle for message", "30, ticks = bounds[1:-1], label = cb_label, drawedges=True) # thicken", "tloc, = np.where(latitudes == lat) if lon in longitudes: nloc,", "- ERA40 - {}\".format(year) scatter_map(\"mars_{}{}_land_observations.png\".format(ms, year), observations[land], longitudes[land], latitudes[land], cmap,", "verbose error reporting. ATTRS = [ 'code', 'units', 'scale', 'reference',", "tloc[0] != -1 and tloc[0] == nloc[0]: # if position", "scatter_map #*************************************************** def main(ms = \"era40_\", year = 1980): LOCS", "False for nk in name_keys: if nk in these_keys: try:", "c = data, cmap = cmap, norm = norm, s=10,", "as mpl mpl.use('Agg') import matplotlib.pyplot as plt import gc VERBOSE", "# get handle for message bufr = codes_bufr_new_from_file(infile) if bufr", "codes_bufr_new_from_file(infile) if bufr is None: break if counter%100000 == 0:", "# hopefully will stand out later pass else: cmatch +=", "- October 2017 \"\"\" # ECMWF import defaults import traceback", "scatter_map(\"mars_{}{}_marine_observations.png\".format(ms, year), observations[marine], longitudes[marine], latitudes[marine], cmap, bounds, \"Number of Observations\",", "# scatter_map #*************************************************** def main(ms = \"era40_\", year = 1980):", "fixed_station = np.array([]) latitudes = np.array([]) longitudes = np.array([]) observations", "10) plt.savefig(outname) plt.close() return # scatter_map #*************************************************** def main(ms =", "bounds[1:-1]]) # cb.outline.set_color('k') # cb.outline.set_linewidth(2) cb.dividers.set_color('k') cb.dividers.set_linewidth(2) ax.set_extent(ext, ax.projection) #", "- Exeter - October 2017 \"\"\" # ECMWF import defaults", "ax.set_extent(ext, ax.projection) # fix the extent change from colormesh plt.title(title)", "as mobile/marine than to move # hopefully will stand out", "in list, then add station_names = np.append(station_names, name) latitudes =", "for ok in other_keys: if ok in these_keys: new_key =", "station_names = np.append(station_names, name) latitudes = np.append(latitudes, lat) longitudes =", "marine = 0 gc.collect() return # main #*************************************************** if __name__", "cmap = cmap, norm = norm, s=10, \\ transform =", "# Use these to select obs from land/marine surface name_keys", "import gc VERBOSE = 1 # verbose error reporting. ATTRS", "lat in latitudes: tloc, = np.where(latitudes == lat) if lon", "station locations (and names) along with number of obs RJHD", "np.where(latitudes == lat) if lon in longitudes: nloc, = np.where(longitudes", "number of obs RJHD - Exeter - October 2017 \"\"\"", "iterator codes_bufr_keys_iterator_delete(iterid) # Use these to select obs from land/marine", "obs from land/marine surface name_keys = [\"#1#shipOrMobileLandStationIdentifier\", \"#1#stationNumber\"] processed =", "np.append(fixed_station, False) elif (tloc[0] != -1 or nloc[0] != -1)", "1 # verbose error reporting. ATTRS = [ 'code', 'units',", "True) marine = np.where(np.array(fixed_station) == False) bounds = np.linspace(0,max(observations),10).astype(int) cmap", "unique locations in this year: {}\".format(len(latitudes)) return station_names, fixed_station, latitudes,", "!= nloc[0]: # add if one element of position is", "def process_file(infilename, station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year): infile", "as land and now marine, take marine fixed_station[tloc[0]] = False", "+= 1 processed = True except CodesInternalError: raw_input(\"key error?\") #", "not in list, then add station_names = np.append(station_names, name) latitudes", "else: sys.stderr.write(err.msg + '\\n') land = np.where(np.array(fixed_station) == True) marine", "doText: plt.text(0.01, 0.98, \"#stations: {}\".format(data.shape[0]), transform = ax.transAxes, fontsize =", "plt.title(title) if doText: plt.text(0.01, 0.98, \"#stations: {}\".format(data.shape[0]), transform = ax.transAxes,", "10000: break counter += 1 codes_release(bufr) # print \"Number of", "land/marine surface name_keys = [\"#1#shipOrMobileLandStationIdentifier\", \"#1#stationNumber\"] processed = False for", "= \"{}mars_{}{}.bufr\".format(LOCS, ms, year) try: station_names, fixed_station, latitudes, longitudes, observations,", "argparse.ArgumentParser() parser.add_argument('--ms', dest='ms', action='store', default = \"era40_\", help='Run on ERA40", ":param array lats: latitudes :param obj cmap: colourmap to use", "key iterator iterid = codes_bufr_keys_iterator_new(bufr) # loop over the keys", "leave as mobile/marine than to move # hopefully will stand", "locations in this year: {}\".format(len(latitudes)) return station_names, fixed_station, latitudes, longitudes,", "\"#1#stationNumber\": if fixed_station[tloc[0]] != True: # if listed as land", "keyname) these_keys += [keyname] # delete the key iterator codes_bufr_keys_iterator_delete(iterid)", "messages (with stop statement) while 1: \"\"\"OPEN MESSAGE\"\"\" # get", "== 0: print \"message: {:d}\".format(counter) # we need to instruct", "as np import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as", "= np.append(observations, 1) start_year = np.append(start_year, year) end_year = np.append(end_year,", "edgecolor = \"k\") ax.coastlines() ext = ax.get_extent() # save the", "raw_input(\"key error?\") # check for new keys which give station", "drawedges=True) # thicken border of colorbar and the dividers #", "0 latitudes = 0 longitudes = 0 observations = 0", "start_year = 0 end_year = 0 land = 0 marine", "action='store', default = 1980, help='Which year to process - default", "= codes_get(bufr, nk) lat = codes_get(bufr, \"#1#latitude\") lon = codes_get(bufr,", "codes_get(bufr, \"#1#longitude\") sloc = tloc = nloc = [-1] if", "while codes_bufr_keys_iterator_next(iterid): # print key name keyname = codes_bufr_keys_iterator_get_name(iterid) #", "easier to leave as mobile/marine than to move # hopefully", "position is unique station_names = np.append(station_names, name) latitudes = np.append(latitudes,", "(tloc[0] != -1 or nloc[0] != -1) and tloc[0] !=", "station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year) except CodesInternalError as", "= \\ process_file(INFILE, station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year)", "MESSAGE\"\"\" # get handle for message bufr = codes_bufr_new_from_file(infile) if", "1) \"\"\"ITERATOR TO EXTRACT KEYS\"\"\" these_keys = [] # get", "latitudes[marine], cmap, bounds, \"Number of Observations\", title) station_names = 0", "== lon) if tloc[0] == -1 and nloc[0] == -1:", "in latitudes: tloc, = np.where(latitudes == lat) if lon in", "to expand all the descriptors # i.e. unpack the data", "# i.e. unpack the data values codes_set(bufr, 'unpack', 1) \"\"\"ITERATOR", "matplotlib.pyplot as plt import gc VERBOSE = 1 # verbose", "= 2147483647 #*************************************************** def process_file(infilename, station_names, fixed_station, latitudes, longitudes, observations,", "-1 or nloc[0] != -1) and tloc[0] != nloc[0]: #", "0 marine = 0 gc.collect() return # main #*************************************************** if", "border of colorbar and the dividers # http://stackoverflow.com/questions/14477696/customizing-colorbar-border-color-on-matplotlib # cb.set_ticklabels([\"{:g}\".format(b)", "year) end_year = np.append(end_year, year) # allow splitting of land", "end_year = np.array([]) if ms == \"erai_\" and year <", "lats, c = data, cmap = cmap, norm = norm,", "sys.stderr.write(err.msg + '\\n') land = np.where(np.array(fixed_station) == True) marine =", "] INTMDI = 2147483647 #*************************************************** def process_file(infilename, station_names, fixed_station, latitudes,", "year = int(infilename.split(\".\")[0].split(\"_\")[-1]) cmatch = 0 counter = 0 #", "# main #*************************************************** if __name__ == \"__main__\": import argparse #", "SYNOP - {}\".format(year) else: title = \"MARS - ERA40 -", "sloc, = np.where(station_names == name) if lat in latitudes: tloc,", "== -1: # if not in list, then add station_names", "= 0 end_year = 0 land = 0 marine =", "cmap, bounds, \"Number of Observations\", title) station_names = 0 fixed_station", "else: fixed_station = np.append(fixed_station, False) elif tloc[0] != -1 and", "tloc[0] == -1 and nloc[0] == -1: # if not", "= 0.1) cb=plt.colorbar(scatter, orientation = 'horizontal', pad = 0.05, fraction", "all messages (with stop statement) while 1: \"\"\"OPEN MESSAGE\"\"\" #", "import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt import", "np.where(station_names == name) if lat in latitudes: tloc, = np.where(latitudes", "# if position matches exactly, up observation counter observations[tloc[0]] +=", "to instruct ecCodes to expand all the descriptors # i.e.", "np.append(start_year, year) end_year = np.append(end_year, year) # allow splitting of", "ok in other_keys: if ok in these_keys: new_key = False", "observations = np.append(observations, 1) start_year = np.append(start_year, year) end_year =", "obj cmap: colourmap to use :param array bounds: bounds for", "for discrete colormap :param str cb_label: colorbar label ''' norm=mpl.cm.colors.BoundaryNorm(bounds,cmap.N)", "0 longitudes = 0 observations = 0 start_year = 0", "= bounds[1:-1], label = cb_label, drawedges=True) # thicken border of", "tloc[0] != nloc[0]: # add if one element of position", "the descriptors # i.e. unpack the data values codes_set(bufr, 'unpack',", "linewidth = 0.1) cb=plt.colorbar(scatter, orientation = 'horizontal', pad = 0.05,", "ms, year) try: station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year", "norm = norm, s=10, \\ transform = cartopy.crs.Geodetic(), edgecolor =", "up observation counter observations[tloc[0]] += 1 end_year[tloc[0]] = year #", "break counter += 1 codes_release(bufr) # print \"Number of unique", "\"\", figtext = \"\", doText = False): ''' Standard scatter", "longitudes: nloc, = np.where(longitudes == lon) if tloc[0] == -1", "save the original extent scatter = plt.scatter(lons, lats, c =", "codes_bufr_keys_iterator_new(bufr) # loop over the keys while codes_bufr_keys_iterator_next(iterid): # print", "== nloc[0]: # if position matches exactly, up observation counter", "obs RJHD - Exeter - October 2017 \"\"\" # ECMWF", "0.90, 0.90], projection=cartopy.crs.Robinson()) ax.gridlines() #draw_labels=True) ax.add_feature(cartopy.feature.LAND, zorder = 0, facecolor", "if counter > 10000: break counter += 1 codes_release(bufr) #", "of obs RJHD - Exeter - October 2017 \"\"\" #", "plt.close() return # scatter_map #*************************************************** def main(ms = \"era40_\", year", "!= -1 and tloc[0] == nloc[0]: # if position matches", "fraction = 0.05, \\ aspect = 30, ticks = bounds[1:-1],", "title = \"MARS - SYNOP - {}\".format(year) else: title =", "(with stop statement) while 1: \"\"\"OPEN MESSAGE\"\"\" # get handle", "= argparse.ArgumentParser() parser.add_argument('--ms', dest='ms', action='store', default = \"era40_\", help='Run on", "ax.get_extent() # save the original extent scatter = plt.scatter(lons, lats,", "from colormesh plt.title(title) if doText: plt.text(0.01, 0.98, \"#stations: {}\".format(data.shape[0]), transform", "'horizontal', pad = 0.05, fraction = 0.05, \\ aspect =", "= plt.cm.YlOrRd_r if ms == \"erai_\": title = \"MARS -", "mobile/marine than to move # hopefully will stand out later", "import * # RJHD imports import cartopy import numpy as", "will stand out later pass else: cmatch += 1 processed", "= False if new_key: raw_input(these_keys) # if counter > 10000:", "= codes_get(bufr, \"#1#longitude\") sloc = tloc = nloc = [-1]", "outname: output filename root :param array data: data to plot", "= np.append(fixed_station, True) else: fixed_station = np.append(fixed_station, False) elif tloc[0]", "np.array([]) observations = np.array([]) start_year = np.array([]) end_year = np.array([])", "= \"k\") ax.coastlines() ext = ax.get_extent() # save the original", "1979: return else: INFILE = \"{}mars_{}{}.bufr\".format(LOCS, ms, year) try: station_names,", "observations = np.array([]) start_year = np.array([]) end_year = np.array([]) if", "title) station_names = 0 fixed_station = 0 latitudes = 0", "# delete the key iterator codes_bufr_keys_iterator_delete(iterid) # Use these to", "the data values codes_set(bufr, 'unpack', 1) \"\"\"ITERATOR TO EXTRACT KEYS\"\"\"", "doText = False): ''' Standard scatter map :param str outname:", "'scale', 'reference', 'width' ] INTMDI = 2147483647 #*************************************************** def process_file(infilename,", "this year: {}\".format(len(latitudes)) return station_names, fixed_station, latitudes, longitudes, observations, start_year,", "process_file(infilename, station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year): infile =", "= 0 # loop all messages (with stop statement) while", "= plt.scatter(lons, lats, c = data, cmap = cmap, norm", "latitudes = 0 longitudes = 0 observations = 0 start_year", "keys which give station ID information if not processed: other_keys", "nk in these_keys: try: name = codes_get(bufr, nk) lat =", "= np.array([]) if ms == \"erai_\" and year < 1979:", "\"Number of Observations\", title) station_names = 0 fixed_station = 0", "# loop all messages (with stop statement) while 1: \"\"\"OPEN", "== False) bounds = np.linspace(0,max(observations),10).astype(int) cmap = plt.cm.YlOrRd_r if ms", "np import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt", "counter = 0 # loop all messages (with stop statement)", "bounds = np.linspace(0,max(observations),10).astype(int) cmap = plt.cm.YlOrRd_r if ms == \"erai_\":", "0: print \"message: {:d}\".format(counter) # we need to instruct ecCodes", "0.98, \"#stations: {}\".format(data.shape[0]), transform = ax.transAxes, fontsize = 10) plt.savefig(outname)", "parser.parse_args() main(ms = args.ms, year = args.year) sys.exit() #*************************************************** #", "marine/mobile if nk == \"#1#stationNumber\": if fixed_station[tloc[0]] != True: #", "# if counter > 10000: break counter += 1 codes_release(bufr)", "= codes_bufr_keys_iterator_new(bufr) # loop over the keys while codes_bufr_keys_iterator_next(iterid): #", "ms == \"erai_\" and year < 1979: return else: INFILE", "main #*************************************************** if __name__ == \"__main__\": import argparse # set", "#*************************************************** if __name__ == \"__main__\": import argparse # set up", "keyword arguments parser = argparse.ArgumentParser() parser.add_argument('--ms', dest='ms', action='store', default =", "numpy as np import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot", "and tloc[0] != nloc[0]: # add if one element of", "= parser.parse_args() main(ms = args.ms, year = args.year) sys.exit() #***************************************************", "Extract unique set of station locations (and names) along with", "data: data to plot :param array lons: longitudes :param array", "= \"era40_\", help='Run on ERA40 [\"era40_\"] (default) or ERA-I [\"erai_\"]", "= 0 start_year = 0 end_year = 0 land =", "label = cb_label, drawedges=True) # thicken border of colorbar and", "now marine, take marine fixed_station[tloc[0]] = False else: if fixed_station[tloc[0]]", "False if new_key: raw_input(these_keys) # if counter > 10000: break", "if one element of position is unique station_names = np.append(station_names,", "longitudes, observations, start_year, end_year = \\ process_file(INFILE, station_names, fixed_station, latitudes,", "in these_keys: try: name = codes_get(bufr, nk) lat = codes_get(bufr,", "fixed_station = np.append(fixed_station, True) else: fixed_station = np.append(fixed_station, False) elif", "# allow splitting of land and marine/mobile if nk ==", "to use :param array bounds: bounds for discrete colormap :param", "matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt import gc", "and now marine, take marine fixed_station[tloc[0]] = False else: if", "print(\" %s\" % keyname) these_keys += [keyname] # delete the", "data, lons, lats, cmap, bounds, cb_label, title = \"\", figtext", "\"\"\"ITERATOR TO EXTRACT KEYS\"\"\" these_keys = [] # get BUFR", "bounds, cb_label, title = \"\", figtext = \"\", doText =", "# print key name keyname = codes_bufr_keys_iterator_get_name(iterid) # print(\" %s\"", "end_year) except CodesInternalError as err: if VERBOSE: traceback.print_exc(file=sys.stderr) else: sys.stderr.write(err.msg", "stop statement) while 1: \"\"\"OPEN MESSAGE\"\"\" # get handle for", "\"#1#stationNumber\"] processed = False for nk in name_keys: if nk", "np.array([]) end_year = np.array([]) if ms == \"erai_\" and year", "set up keyword arguments parser = argparse.ArgumentParser() parser.add_argument('--ms', dest='ms', action='store',", "to plot :param array lons: longitudes :param array lats: latitudes", "October 2017 \"\"\" # ECMWF import defaults import traceback import", "\"0.9\", edgecolor = \"k\") ax.coastlines() ext = ax.get_extent() # save", "bounds: bounds for discrete colormap :param str cb_label: colorbar label", "# thicken border of colorbar and the dividers # http://stackoverflow.com/questions/14477696/customizing-colorbar-border-color-on-matplotlib", "loop all messages (with stop statement) while 1: \"\"\"OPEN MESSAGE\"\"\"", "0 end_year = 0 land = 0 marine = 0", "args = parser.parse_args() main(ms = args.ms, year = args.year) sys.exit()", "= codes_bufr_keys_iterator_get_name(iterid) # print(\" %s\" % keyname) these_keys += [keyname]", "start_year = np.append(start_year, year) end_year = np.append(end_year, year) # allow", "import defaults import traceback import sys from eccodes import *", "cb.outline.set_linewidth(2) cb.dividers.set_color('k') cb.dividers.set_linewidth(2) ax.set_extent(ext, ax.projection) # fix the extent change", "= plt.figure(figsize =(10,6.5)) plt.clf() ax = plt.axes([0.05, 0.10, 0.90, 0.90],", "observations[marine], longitudes[marine], latitudes[marine], cmap, bounds, \"Number of Observations\", title) station_names", "if position matches exactly, up observation counter observations[tloc[0]] += 1", "use :param array bounds: bounds for discrete colormap :param str", "cartopy import numpy as np import matplotlib as mpl mpl.use('Agg')", "= \"MARS - ERA40 - {}\".format(year) scatter_map(\"mars_{}{}_land_observations.png\".format(ms, year), observations[land], longitudes[land],", "latitudes[land], cmap, bounds, \"Number of Observations\", title, doText = True)", "list, then add station_names = np.append(station_names, name) latitudes = np.append(latitudes,", "if new_key: raw_input(these_keys) # if counter > 10000: break counter", "these to select obs from land/marine surface name_keys = [\"#1#shipOrMobileLandStationIdentifier\",", "map :param str outname: output filename root :param array data:", "name) latitudes = np.append(latitudes, lat) longitudes = np.append(longitudes, lon) observations", "= np.append(station_names, name) latitudes = np.append(latitudes, lat) longitudes = np.append(longitudes,", ":param obj cmap: colourmap to use :param array bounds: bounds", "out later pass else: cmatch += 1 processed = True", "scatter = plt.scatter(lons, lats, c = data, cmap = cmap,", "counter%100000 == 0: print \"message: {:d}\".format(counter) # we need to", "print key name keyname = codes_bufr_keys_iterator_get_name(iterid) # print(\" %s\" %", "* # RJHD imports import cartopy import numpy as np", "nloc[0]: # add if one element of position is unique", "colormap :param str cb_label: colorbar label ''' norm=mpl.cm.colors.BoundaryNorm(bounds,cmap.N) fig =", "{:d}\".format(counter) # we need to instruct ecCodes to expand all", "def main(ms = \"era40_\", year = 1980): LOCS = \"/group_workspaces/jasmin2/c3s311a_lot2/data/incoming/mars/v20170628/data/\"", "ms == \"erai_\": title = \"MARS - SYNOP - {}\".format(year)", "np.array([]) if ms == \"erai_\" and year < 1979: return", "\\ process_file(INFILE, station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year) except", "Observations\", title, doText = True) scatter_map(\"mars_{}{}_marine_observations.png\".format(ms, year), observations[marine], longitudes[marine], latitudes[marine],", "None: break if counter%100000 == 0: print \"message: {:d}\".format(counter) #", "print \"message: {:d}\".format(counter) # we need to instruct ecCodes to", "CodesInternalError: raw_input(\"key error?\") # check for new keys which give", "cb.outline.set_color('k') # cb.outline.set_linewidth(2) cb.dividers.set_color('k') cb.dividers.set_linewidth(2) ax.set_extent(ext, ax.projection) # fix the", "# fix the extent change from colormesh plt.title(title) if doText:", "\"message: {:d}\".format(counter) # we need to instruct ecCodes to expand", "= np.append(fixed_station, False) elif tloc[0] != -1 and tloc[0] ==", "along with number of obs RJHD - Exeter - October", "cb.dividers.set_color('k') cb.dividers.set_linewidth(2) ax.set_extent(ext, ax.projection) # fix the extent change from", "land = np.where(np.array(fixed_station) == True) marine = np.where(np.array(fixed_station) == False)", "pass else: cmatch += 1 processed = True except CodesInternalError:", ":param str outname: output filename root :param array data: data", "\"\"\" # ECMWF import defaults import traceback import sys from", "bufr is None: break if counter%100000 == 0: print \"message:", "TO EXTRACT KEYS\"\"\" these_keys = [] # get BUFR key", "\"#1#stationNumber\": fixed_station = np.append(fixed_station, True) else: fixed_station = np.append(fixed_station, False)", "'unpack', 1) \"\"\"ITERATOR TO EXTRACT KEYS\"\"\" these_keys = [] #", "norm, s=10, \\ transform = cartopy.crs.Geodetic(), edgecolor = \"r\", linewidth", "norm=mpl.cm.colors.BoundaryNorm(bounds,cmap.N) fig = plt.figure(figsize =(10,6.5)) plt.clf() ax = plt.axes([0.05, 0.10,", "in station_names: sloc, = np.where(station_names == name) if lat in", "for nk in name_keys: if nk in these_keys: try: name", "\"#1#aircraftFlightNumber\"] new_key = True for ok in other_keys: if ok", "import numpy as np import matplotlib as mpl mpl.use('Agg') import", "original extent scatter = plt.scatter(lons, lats, c = data, cmap", "discrete colormap :param str cb_label: colorbar label ''' norm=mpl.cm.colors.BoundaryNorm(bounds,cmap.N) fig", "observations[tloc[0]] += 1 end_year[tloc[0]] = year # allow splitting of", "else: INFILE = \"{}mars_{}{}.bufr\".format(LOCS, ms, year) try: station_names, fixed_station, latitudes,", "= cmap, norm = norm, s=10, \\ transform = cartopy.crs.Geodetic(),", "fixed_station[tloc[0]] = False else: if fixed_station[tloc[0]] != False: # easier", "new keys which give station ID information if not processed:", "ATTRS = [ 'code', 'units', 'scale', 'reference', 'width' ] INTMDI", "= np.where(latitudes == lat) if lon in longitudes: nloc, =", "start_year, end_year # process_file #*************************************************** def scatter_map(outname, data, lons, lats,", "VERBOSE = 1 # verbose error reporting. ATTRS = [", "gc VERBOSE = 1 # verbose error reporting. ATTRS =", "need to instruct ecCodes to expand all the descriptors #", "= np.where(np.array(fixed_station) == True) marine = np.where(np.array(fixed_station) == False) bounds", "- {}\".format(year) scatter_map(\"mars_{}{}_land_observations.png\".format(ms, year), observations[land], longitudes[land], latitudes[land], cmap, bounds, \"Number", "fixed_station[tloc[0]] != True: # if listed as land and now", "= 1980): LOCS = \"/group_workspaces/jasmin2/c3s311a_lot2/data/incoming/mars/v20170628/data/\" print year station_names = np.array([])", "plt import gc VERBOSE = 1 # verbose error reporting.", "marine, take marine fixed_station[tloc[0]] = False else: if fixed_station[tloc[0]] !=", "[ 'code', 'units', 'scale', 'reference', 'width' ] INTMDI = 2147483647", "processed: other_keys = [\"#1#carrierBalloonOrAircraftIdentifier\", \"#1#aircraftFlightNumber\"] new_key = True for ok", "the original extent scatter = plt.scatter(lons, lats, c = data,", "= plt.axes([0.05, 0.10, 0.90, 0.90], projection=cartopy.crs.Robinson()) ax.gridlines() #draw_labels=True) ax.add_feature(cartopy.feature.LAND, zorder", "array lons: longitudes :param array lats: latitudes :param obj cmap:", "defaults import traceback import sys from eccodes import * #", "= open(infilename) year = int(infilename.split(\".\")[0].split(\"_\")[-1]) cmatch = 0 counter =", "key iterator codes_bufr_keys_iterator_delete(iterid) # Use these to select obs from", "plot :param array lons: longitudes :param array lats: latitudes :param", "2147483647 #*************************************************** def process_file(infilename, station_names, fixed_station, latitudes, longitudes, observations, start_year,", "plt.cm.YlOrRd_r if ms == \"erai_\": title = \"MARS - SYNOP", "codes_release(bufr) # print \"Number of unique locations in this year:", "if ok in these_keys: new_key = False if new_key: raw_input(these_keys)", "#draw_labels=True) ax.add_feature(cartopy.feature.LAND, zorder = 0, facecolor = \"0.9\", edgecolor =", "= np.array([]) start_year = np.array([]) end_year = np.array([]) if ms", "ecCodes to expand all the descriptors # i.e. unpack the", "end_year = \\ process_file(INFILE, station_names, fixed_station, latitudes, longitudes, observations, start_year,", "check for new keys which give station ID information if", "np.where(np.array(fixed_station) == False) bounds = np.linspace(0,max(observations),10).astype(int) cmap = plt.cm.YlOrRd_r if", "\\ transform = cartopy.crs.Geodetic(), edgecolor = \"r\", linewidth = 0.1)", "''' Standard scatter map :param str outname: output filename root", "codes_bufr_keys_iterator_get_name(iterid) # print(\" %s\" % keyname) these_keys += [keyname] #", "import argparse # set up keyword arguments parser = argparse.ArgumentParser()", "not processed: other_keys = [\"#1#carrierBalloonOrAircraftIdentifier\", \"#1#aircraftFlightNumber\"] new_key = True for", "splitting of land and marine/mobile if nk == \"#1#stationNumber\": fixed_station", "fixed_station, latitudes, longitudes, observations, start_year, end_year = \\ process_file(INFILE, station_names,", "data, cmap = cmap, norm = norm, s=10, \\ transform", "=(10,6.5)) plt.clf() ax = plt.axes([0.05, 0.10, 0.90, 0.90], projection=cartopy.crs.Robinson()) ax.gridlines()", "cmap = plt.cm.YlOrRd_r if ms == \"erai_\": title = \"MARS", "INTMDI = 2147483647 #*************************************************** def process_file(infilename, station_names, fixed_station, latitudes, longitudes,", "= np.linspace(0,max(observations),10).astype(int) cmap = plt.cm.YlOrRd_r if ms == \"erai_\": title", "surface name_keys = [\"#1#shipOrMobileLandStationIdentifier\", \"#1#stationNumber\"] processed = False for nk", "year < 1979: return else: INFILE = \"{}mars_{}{}.bufr\".format(LOCS, ms, year)", "the keys while codes_bufr_keys_iterator_next(iterid): # print key name keyname =", "= False else: if fixed_station[tloc[0]] != False: # easier to", "aspect = 30, ticks = bounds[1:-1], label = cb_label, drawedges=True)", "[] # get BUFR key iterator iterid = codes_bufr_keys_iterator_new(bufr) #", "= np.append(start_year, year) end_year = np.append(end_year, year) # allow splitting", "< 1979: return else: INFILE = \"{}mars_{}{}.bufr\".format(LOCS, ms, year) try:", "- SYNOP - {}\".format(year) else: title = \"MARS - ERA40", "start_year, end_year): infile = open(infilename) year = int(infilename.split(\".\")[0].split(\"_\")[-1]) cmatch =", "end_year[tloc[0]] = year # allow splitting of land and marine/mobile", "for message bufr = codes_bufr_new_from_file(infile) if bufr is None: break", "land = 0 marine = 0 gc.collect() return # main", "EXTRACT KEYS\"\"\" these_keys = [] # get BUFR key iterator", "\"era40_\", help='Run on ERA40 [\"era40_\"] (default) or ERA-I [\"erai_\"] data')", "default = \"era40_\", help='Run on ERA40 [\"era40_\"] (default) or ERA-I", "of Observations\", title, doText = True) scatter_map(\"mars_{}{}_marine_observations.png\".format(ms, year), observations[marine], longitudes[marine],", "the key iterator codes_bufr_keys_iterator_delete(iterid) # Use these to select obs", "latitudes = np.array([]) longitudes = np.array([]) observations = np.array([]) start_year", "transform = cartopy.crs.Geodetic(), edgecolor = \"r\", linewidth = 0.1) cb=plt.colorbar(scatter,", "bounds[1:-1], label = cb_label, drawedges=True) # thicken border of colorbar", "action='store', default = \"era40_\", help='Run on ERA40 [\"era40_\"] (default) or", "0.10, 0.90, 0.90], projection=cartopy.crs.Robinson()) ax.gridlines() #draw_labels=True) ax.add_feature(cartopy.feature.LAND, zorder = 0,", "\\ aspect = 30, ticks = bounds[1:-1], label = cb_label,", "position matches exactly, up observation counter observations[tloc[0]] += 1 end_year[tloc[0]]", "locations (and names) along with number of obs RJHD -", "= [-1] if name in station_names: sloc, = np.where(station_names ==", "0.90], projection=cartopy.crs.Robinson()) ax.gridlines() #draw_labels=True) ax.add_feature(cartopy.feature.LAND, zorder = 0, facecolor =", "the dividers # http://stackoverflow.com/questions/14477696/customizing-colorbar-border-color-on-matplotlib # cb.set_ticklabels([\"{:g}\".format(b) for b in bounds[1:-1]])", "elif (tloc[0] != -1 or nloc[0] != -1) and tloc[0]", "nloc[0] == -1: # if not in list, then add", "start_year = np.array([]) end_year = np.array([]) if ms == \"erai_\"", "name_keys: if nk in these_keys: try: name = codes_get(bufr, nk)", "ext = ax.get_extent() # save the original extent scatter =", "bounds, \"Number of Observations\", title, doText = True) scatter_map(\"mars_{}{}_marine_observations.png\".format(ms, year),", "end_year # process_file #*************************************************** def scatter_map(outname, data, lons, lats, cmap,", "process_file #*************************************************** def scatter_map(outname, data, lons, lats, cmap, bounds, cb_label,", "np.array([]) start_year = np.array([]) end_year = np.array([]) if ms ==", "import cartopy import numpy as np import matplotlib as mpl", "ECMWF import defaults import traceback import sys from eccodes import", "ax.add_feature(cartopy.feature.LAND, zorder = 0, facecolor = \"0.9\", edgecolor = \"k\")", "'width' ] INTMDI = 2147483647 #*************************************************** def process_file(infilename, station_names, fixed_station,", "longitudes[land], latitudes[land], cmap, bounds, \"Number of Observations\", title, doText =", "projection=cartopy.crs.Robinson()) ax.gridlines() #draw_labels=True) ax.add_feature(cartopy.feature.LAND, zorder = 0, facecolor = \"0.9\",", "False else: if fixed_station[tloc[0]] != False: # easier to leave", "year), observations[marine], longitudes[marine], latitudes[marine], cmap, bounds, \"Number of Observations\", title)", "\"Number of unique locations in this year: {}\".format(len(latitudes)) return station_names,", "delete the key iterator codes_bufr_keys_iterator_delete(iterid) # Use these to select", "np.append(fixed_station, True) else: fixed_station = np.append(fixed_station, False) elif (tloc[0] !=", "add if one element of position is unique station_names =", "str outname: output filename root :param array data: data to", "ax.gridlines() #draw_labels=True) ax.add_feature(cartopy.feature.LAND, zorder = 0, facecolor = \"0.9\", edgecolor", "year = 1980): LOCS = \"/group_workspaces/jasmin2/c3s311a_lot2/data/incoming/mars/v20170628/data/\" print year station_names =", "if not in list, then add station_names = np.append(station_names, name)", "dest='ms', action='store', default = \"era40_\", help='Run on ERA40 [\"era40_\"] (default)", "sys from eccodes import * # RJHD imports import cartopy", "# verbose error reporting. ATTRS = [ 'code', 'units', 'scale',", "gc.collect() return # main #*************************************************** if __name__ == \"__main__\": import", "fixed_station = 0 latitudes = 0 longitudes = 0 observations", "False) elif tloc[0] != -1 and tloc[0] == nloc[0]: #", "!= -1 or nloc[0] != -1) and tloc[0] != nloc[0]:", "return station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year # process_file", "mpl.use('Agg') import matplotlib.pyplot as plt import gc VERBOSE = 1", "KEYS\"\"\" these_keys = [] # get BUFR key iterator iterid", "station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year # process_file #***************************************************", "filename root :param array data: data to plot :param array", "# cb.set_ticklabels([\"{:g}\".format(b) for b in bounds[1:-1]]) # cb.outline.set_color('k') # cb.outline.set_linewidth(2)", "station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year): infile = open(infilename)", "nloc, = np.where(longitudes == lon) if tloc[0] == -1 and", "cb_label, title = \"\", figtext = \"\", doText = False):", "plt.clf() ax = plt.axes([0.05, 0.10, 0.90, 0.90], projection=cartopy.crs.Robinson()) ax.gridlines() #draw_labels=True)", "default 1980') args = parser.parse_args() main(ms = args.ms, year =", "else: title = \"MARS - ERA40 - {}\".format(year) scatter_map(\"mars_{}{}_land_observations.png\".format(ms, year),", "Standard scatter map :param str outname: output filename root :param", "array lats: latitudes :param obj cmap: colourmap to use :param", "if counter%100000 == 0: print \"message: {:d}\".format(counter) # we need", "ax.projection) # fix the extent change from colormesh plt.title(title) if", "ax.transAxes, fontsize = 10) plt.savefig(outname) plt.close() return # scatter_map #***************************************************", "0, facecolor = \"0.9\", edgecolor = \"k\") ax.coastlines() ext =", "= 'horizontal', pad = 0.05, fraction = 0.05, \\ aspect", "latitudes: tloc, = np.where(latitudes == lat) if lon in longitudes:", "other_keys: if ok in these_keys: new_key = False if new_key:", "else: fixed_station = np.append(fixed_station, False) elif (tloc[0] != -1 or", "in this year: {}\".format(len(latitudes)) return station_names, fixed_station, latitudes, longitudes, observations,", "# if listed as land and now marine, take marine", "bufr = codes_bufr_new_from_file(infile) if bufr is None: break if counter%100000", "for new keys which give station ID information if not", "np.array([]) fixed_station = np.array([]) latitudes = np.array([]) longitudes = np.array([])", "# process_file #*************************************************** def scatter_map(outname, data, lons, lats, cmap, bounds,", "cmap, bounds, cb_label, title = \"\", figtext = \"\", doText", "ax = plt.axes([0.05, 0.10, 0.90, 0.90], projection=cartopy.crs.Robinson()) ax.gridlines() #draw_labels=True) ax.add_feature(cartopy.feature.LAND,", "iterid = codes_bufr_keys_iterator_new(bufr) # loop over the keys while codes_bufr_keys_iterator_next(iterid):", "nk) lat = codes_get(bufr, \"#1#latitude\") lon = codes_get(bufr, \"#1#longitude\") sloc", "listed as land and now marine, take marine fixed_station[tloc[0]] =", "[\"era40_\"] (default) or ERA-I [\"erai_\"] data') parser.add_argument('--year', dest='year', action='store', default", "codes_set(bufr, 'unpack', 1) \"\"\"ITERATOR TO EXTRACT KEYS\"\"\" these_keys = []", "[\"#1#shipOrMobileLandStationIdentifier\", \"#1#stationNumber\"] processed = False for nk in name_keys: if", "= False for nk in name_keys: if nk in these_keys:", "!= False: # easier to leave as mobile/marine than to", "break if counter%100000 == 0: print \"message: {:d}\".format(counter) # we", "BUFR key iterator iterid = codes_bufr_keys_iterator_new(bufr) # loop over the", "= [\"#1#shipOrMobileLandStationIdentifier\", \"#1#stationNumber\"] processed = False for nk in name_keys:", "doText = True) scatter_map(\"mars_{}{}_marine_observations.png\".format(ms, year), observations[marine], longitudes[marine], latitudes[marine], cmap, bounds,", "# check for new keys which give station ID information", "= 0 land = 0 marine = 0 gc.collect() return", "print year station_names = np.array([]) fixed_station = np.array([]) latitudes =", "= np.array([]) observations = np.array([]) start_year = np.array([]) end_year =", "counter > 10000: break counter += 1 codes_release(bufr) # print", "message bufr = codes_bufr_new_from_file(infile) if bufr is None: break if", "station_names: sloc, = np.where(station_names == name) if lat in latitudes:", "observation counter observations[tloc[0]] += 1 end_year[tloc[0]] = year # allow", "change from colormesh plt.title(title) if doText: plt.text(0.01, 0.98, \"#stations: {}\".format(data.shape[0]),", "and marine/mobile if nk == \"#1#stationNumber\": fixed_station = np.append(fixed_station, True)", "station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year = \\ process_file(INFILE,", "process_file(INFILE, station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year) except CodesInternalError", "to select obs from land/marine surface name_keys = [\"#1#shipOrMobileLandStationIdentifier\", \"#1#stationNumber\"]", "np.where(np.array(fixed_station) == True) marine = np.where(np.array(fixed_station) == False) bounds =", "title = \"MARS - ERA40 - {}\".format(year) scatter_map(\"mars_{}{}_land_observations.png\".format(ms, year), observations[land],", "'units', 'scale', 'reference', 'width' ] INTMDI = 2147483647 #*************************************************** def", "\"era40_\", year = 1980): LOCS = \"/group_workspaces/jasmin2/c3s311a_lot2/data/incoming/mars/v20170628/data/\" print year station_names", "process - default 1980') args = parser.parse_args() main(ms = args.ms,", "new_key: raw_input(these_keys) # if counter > 10000: break counter +=", "imports import cartopy import numpy as np import matplotlib as", "processed = False for nk in name_keys: if nk in", "end_year): infile = open(infilename) year = int(infilename.split(\".\")[0].split(\"_\")[-1]) cmatch = 0", "end_year = np.append(end_year, year) # allow splitting of land and", "longitudes[marine], latitudes[marine], cmap, bounds, \"Number of Observations\", title) station_names =", "\"erai_\": title = \"MARS - SYNOP - {}\".format(year) else: title", "error reporting. ATTRS = [ 'code', 'units', 'scale', 'reference', 'width'", "\"k\") ax.coastlines() ext = ax.get_extent() # save the original extent", "np.array([]) longitudes = np.array([]) observations = np.array([]) start_year = np.array([])", "longitudes = 0 observations = 0 start_year = 0 end_year", "land and marine/mobile if nk == \"#1#stationNumber\": if fixed_station[tloc[0]] !=", "array bounds: bounds for discrete colormap :param str cb_label: colorbar", "# easier to leave as mobile/marine than to move #", "lon) observations = np.append(observations, 1) start_year = np.append(start_year, year) end_year", "stand out later pass else: cmatch += 1 processed =", "observations, start_year, end_year) except CodesInternalError as err: if VERBOSE: traceback.print_exc(file=sys.stderr)", "traceback.print_exc(file=sys.stderr) else: sys.stderr.write(err.msg + '\\n') land = np.where(np.array(fixed_station) == True)", "= \"era40_\", year = 1980): LOCS = \"/group_workspaces/jasmin2/c3s311a_lot2/data/incoming/mars/v20170628/data/\" print year", "values codes_set(bufr, 'unpack', 1) \"\"\"ITERATOR TO EXTRACT KEYS\"\"\" these_keys =", "False) bounds = np.linspace(0,max(observations),10).astype(int) cmap = plt.cm.YlOrRd_r if ms ==", "give station ID information if not processed: other_keys = [\"#1#carrierBalloonOrAircraftIdentifier\",", "\"r\", linewidth = 0.1) cb=plt.colorbar(scatter, orientation = 'horizontal', pad =", "thicken border of colorbar and the dividers # http://stackoverflow.com/questions/14477696/customizing-colorbar-border-color-on-matplotlib #", "nloc[0] != -1) and tloc[0] != nloc[0]: # add if", "and marine/mobile if nk == \"#1#stationNumber\": if fixed_station[tloc[0]] != True:", "cmatch += 1 processed = True except CodesInternalError: raw_input(\"key error?\")", "err: if VERBOSE: traceback.print_exc(file=sys.stderr) else: sys.stderr.write(err.msg + '\\n') land =", "up keyword arguments parser = argparse.ArgumentParser() parser.add_argument('--ms', dest='ms', action='store', default", "land and now marine, take marine fixed_station[tloc[0]] = False else:", "+ '\\n') land = np.where(np.array(fixed_station) == True) marine = np.where(np.array(fixed_station)", "cmap, bounds, \"Number of Observations\", title, doText = True) scatter_map(\"mars_{}{}_marine_observations.png\".format(ms,", "for b in bounds[1:-1]]) # cb.outline.set_color('k') # cb.outline.set_linewidth(2) cb.dividers.set_color('k') cb.dividers.set_linewidth(2)" ]
[ "Authorized Connect Apps belonging to an account. :var Page: The", "used to avoid listing duplicated resources if new ones are", "get(self, FriendlyName=None, Page=None, PageSize=None, AfterSid=None): \"\"\" Fetch the Applications belonging", "= http.Request('GET', self.get_url(), params) return request, parsers.parse_json def update(self, *args,", "PageSize=None, AfterSid=None): \"\"\" Fetch the Authorized Connect Apps belonging to", "= 'Applications' class Application(ApplicationsBase): def create(self, *args, **kwargs): raise base.MethodNotSupported()", ":vartype AfterSid: str \"\"\" params = resource.get_params(None, locals()) request =", "= resource.get_params(None, locals()) request = http.Request('GET', self.get_url(), params) return request,", "in the previous page, used to avoid listing duplicated resources", "base.MethodNotSupported() class ConnectAppsBase(resource.TwilioResource): path = 'ConnectApps' def create(self, *args, **kwargs):", "base.MethodNotSupported() class AuthorizedConnectAppsBase(resource.TwilioResource): path = 'AuthorizedConnectApps' def create(self, *args, **kwargs):", "from libsaas import http, parsers from libsaas.services import base from", "resource.get_params(None, locals()) request = http.Request('GET', self.get_url(), params) return request, parsers.parse_json", "class AuthorizedConnectApp(AuthorizedConnectAppsBase): pass class AuthorizedConnectApps(AuthorizedConnectAppsBase): @base.apimethod def get(self, Page=None, PageSize=None,", "path = 'Applications' class Application(ApplicationsBase): def create(self, *args, **kwargs): raise", "The current page number. Zero-indexed, so the first page is", "return request, parsers.parse_json def update(self, *args, **kwargs): raise base.MethodNotSupported() class", "*args, **kwargs): raise base.MethodNotSupported() def update(self, *args, **kwargs): raise base.MethodNotSupported()", "page, used to avoid listing duplicated resources if new ones", "'ConnectApps' def create(self, *args, **kwargs): raise base.MethodNotSupported() def delete(self, *args,", "Page: int :var PageSize: How many resources to return in", "def delete(self, *args, **kwargs): raise base.MethodNotSupported() class ConnectAppsBase(resource.TwilioResource): path =", "AuthorizedConnectAppsBase(resource.TwilioResource): path = 'AuthorizedConnectApps' def create(self, *args, **kwargs): raise base.MethodNotSupported()", "def get(self, FriendlyName=None, Page=None, PageSize=None, AfterSid=None): \"\"\" Fetch the Applications", "first page is 0. :vartype Page: int :var PageSize: How", "returned in the previous page, used to avoid listing duplicated", "int :var AfterSid: The last Sid returned in the previous", "is 50, and the maximum is 1000. :vartype PageSize: int", "class AuthorizedConnectApps(AuthorizedConnectAppsBase): @base.apimethod def get(self, Page=None, PageSize=None, AfterSid=None): \"\"\" Fetch", "to return in each list page. The default is 50,", "return request, parsers.parse_json def update(self, *args, **kwargs): raise base.MethodNotSupported() def", "is 0. :vartype Page: int :var PageSize: How many resources", "raise base.MethodNotSupported() class ConnectApp(ConnectAppsBase): pass class ConnectApps(ConnectAppsBase): @base.apimethod def get(self,", "**kwargs): raise base.MethodNotSupported() class AuthorizedConnectApp(AuthorizedConnectAppsBase): pass class AuthorizedConnectApps(AuthorizedConnectAppsBase): @base.apimethod def", "return the Account resources with friendly names that exactly match", "Zero-indexed, so the first page is 0. :vartype Page: int", "def update(self, *args, **kwargs): raise base.MethodNotSupported() class AuthorizedConnectAppsBase(resource.TwilioResource): path =", "the previous page, used to avoid listing duplicated resources if", "def get(self, Page=None, PageSize=None, AfterSid=None): \"\"\" Fetch the Authorized Connect", "resource class ApplicationsBase(resource.TwilioResource): path = 'Applications' class Application(ApplicationsBase): def create(self,", "default is 50, and the maximum is 1000. :vartype PageSize:", "class ApplicationsBase(resource.TwilioResource): path = 'Applications' class Application(ApplicationsBase): def create(self, *args,", "names that exactly match this name. :vartype FriendlyName: str :var", "create(self, *args, **kwargs): raise base.MethodNotSupported() class Applications(ApplicationsBase): @base.apimethod def get(self,", "AfterSid: The last Sid returned in the previous page, used", "Account resources with friendly names that exactly match this name.", "to avoid listing duplicated resources if new ones are created", "exactly match this name. :vartype FriendlyName: str :var Page: The", "**kwargs): raise base.MethodNotSupported() def update(self, *args, **kwargs): raise base.MethodNotSupported() def", "\"\"\" Fetch the Applications belonging to an account. :var FriendlyName:", ":vartype FriendlyName: str :var Page: The current page number. Zero-indexed,", "*args, **kwargs): raise base.MethodNotSupported() class ConnectApp(ConnectAppsBase): pass class ConnectApps(ConnectAppsBase): @base.apimethod", "new ones are created while paging. :vartype AfterSid: str \"\"\"", "import resource class ApplicationsBase(resource.TwilioResource): path = 'Applications' class Application(ApplicationsBase): def", "class ConnectApp(ConnectAppsBase): pass class ConnectApps(ConnectAppsBase): @base.apimethod def get(self, Page=None, PageSize=None,", "Apps belonging to an account. :var Page: The current page", "*args, **kwargs): raise base.MethodNotSupported() class Applications(ApplicationsBase): @base.apimethod def get(self, FriendlyName=None,", "get(self, Page=None, PageSize=None, AfterSid=None): \"\"\" Fetch the Connect Apps belonging", "def delete(self, *args, **kwargs): raise base.MethodNotSupported() class ConnectApp(ConnectAppsBase): pass class", "base.MethodNotSupported() class ConnectApp(ConnectAppsBase): pass class ConnectApps(ConnectAppsBase): @base.apimethod def get(self, Page=None,", "update(self, *args, **kwargs): raise base.MethodNotSupported() def delete(self, *args, **kwargs): raise", "raise base.MethodNotSupported() class Applications(ApplicationsBase): @base.apimethod def get(self, FriendlyName=None, Page=None, PageSize=None,", "ConnectAppsBase(resource.TwilioResource): path = 'ConnectApps' def create(self, *args, **kwargs): raise base.MethodNotSupported()", "**kwargs): raise base.MethodNotSupported() class AuthorizedConnectAppsBase(resource.TwilioResource): path = 'AuthorizedConnectApps' def create(self,", "Fetch the Authorized Connect Apps belonging to an account. :var", "path = 'AuthorizedConnectApps' def create(self, *args, **kwargs): raise base.MethodNotSupported() def", "Only return the Account resources with friendly names that exactly", "class AuthorizedConnectAppsBase(resource.TwilioResource): path = 'AuthorizedConnectApps' def create(self, *args, **kwargs): raise", "import http, parsers from libsaas.services import base from libsaas.services.twilio import", "'AuthorizedConnectApps' def create(self, *args, **kwargs): raise base.MethodNotSupported() def update(self, *args,", "Page=None, PageSize=None, AfterSid=None): \"\"\" Fetch the Connect Apps belonging to", "PageSize=None, AfterSid=None): \"\"\" Fetch the Connect Apps belonging to an", "PageSize=None, AfterSid=None): \"\"\" Fetch the Applications belonging to an account.", "Fetch the Connect Apps belonging to an account. :var Page:", "raise base.MethodNotSupported() def update(self, *args, **kwargs): raise base.MethodNotSupported() def delete(self,", "AuthorizedConnectApp(AuthorizedConnectAppsBase): pass class AuthorizedConnectApps(AuthorizedConnectAppsBase): @base.apimethod def get(self, Page=None, PageSize=None, AfterSid=None):", "update(self, *args, **kwargs): raise base.MethodNotSupported() class AuthorizedConnectAppsBase(resource.TwilioResource): path = 'AuthorizedConnectApps'", "each list page. The default is 50, and the maximum", "*args, **kwargs): raise base.MethodNotSupported() class AuthorizedConnectApp(AuthorizedConnectAppsBase): pass class AuthorizedConnectApps(AuthorizedConnectAppsBase): @base.apimethod", "PageSize: int :var AfterSid: The last Sid returned in the", "resources to return in each list page. The default is", "delete(self, *args, **kwargs): raise base.MethodNotSupported() class AuthorizedConnectApp(AuthorizedConnectAppsBase): pass class AuthorizedConnectApps(AuthorizedConnectAppsBase):", "friendly names that exactly match this name. :vartype FriendlyName: str", "from libsaas.services.twilio import resource class ApplicationsBase(resource.TwilioResource): path = 'Applications' class", "create(self, *args, **kwargs): raise base.MethodNotSupported() def update(self, *args, **kwargs): raise", "raise base.MethodNotSupported() class AuthorizedConnectAppsBase(resource.TwilioResource): path = 'AuthorizedConnectApps' def create(self, *args,", "str :var Page: The current page number. Zero-indexed, so the", "base.MethodNotSupported() def update(self, *args, **kwargs): raise base.MethodNotSupported() def delete(self, *args,", "resources if new ones are created while paging. :vartype AfterSid:", "@base.apimethod def get(self, Page=None, PageSize=None, AfterSid=None): \"\"\" Fetch the Connect", "base from libsaas.services.twilio import resource class ApplicationsBase(resource.TwilioResource): path = 'Applications'", "Connect Apps belonging to an account. :var Page: The current", "last Sid returned in the previous page, used to avoid", "@base.apimethod def get(self, Page=None, PageSize=None, AfterSid=None): \"\"\" Fetch the Authorized", "to an account. :var FriendlyName: Only return the Account resources", "create(self, *args, **kwargs): raise base.MethodNotSupported() def delete(self, *args, **kwargs): raise", "parsers.parse_json def update(self, *args, **kwargs): raise base.MethodNotSupported() class AuthorizedConnectAppsBase(resource.TwilioResource): path", "*args, **kwargs): raise base.MethodNotSupported() class AuthorizedConnectAppsBase(resource.TwilioResource): path = 'AuthorizedConnectApps' def", "locals()) request = http.Request('GET', self.get_url(), params) return request, parsers.parse_json def", "the Account resources with friendly names that exactly match this", ":vartype Page: int :var PageSize: How many resources to return", "belonging to an account. :var Page: The current page number.", "class Application(ApplicationsBase): def create(self, *args, **kwargs): raise base.MethodNotSupported() class Applications(ApplicationsBase):", "libsaas import http, parsers from libsaas.services import base from libsaas.services.twilio", "maximum is 1000. :vartype PageSize: int :var AfterSid: The last", "delete(self, *args, **kwargs): raise base.MethodNotSupported() class ConnectApp(ConnectAppsBase): pass class ConnectApps(ConnectAppsBase):", "class Applications(ApplicationsBase): @base.apimethod def get(self, FriendlyName=None, Page=None, PageSize=None, AfterSid=None): \"\"\"", "pass class ConnectApps(ConnectAppsBase): @base.apimethod def get(self, Page=None, PageSize=None, AfterSid=None): \"\"\"", "**kwargs): raise base.MethodNotSupported() class ConnectApp(ConnectAppsBase): pass class ConnectApps(ConnectAppsBase): @base.apimethod def", "The default is 50, and the maximum is 1000. :vartype", "AfterSid=None): \"\"\" Fetch the Connect Apps belonging to an account.", "import base from libsaas.services.twilio import resource class ApplicationsBase(resource.TwilioResource): path =", "in each list page. The default is 50, and the", "with friendly names that exactly match this name. :vartype FriendlyName:", "Applications(ApplicationsBase): @base.apimethod def get(self, FriendlyName=None, Page=None, PageSize=None, AfterSid=None): \"\"\" Fetch", "= 'ConnectApps' def create(self, *args, **kwargs): raise base.MethodNotSupported() def delete(self,", "params) return request, parsers.parse_json def update(self, *args, **kwargs): raise base.MethodNotSupported()", "**kwargs): raise base.MethodNotSupported() class ConnectAppsBase(resource.TwilioResource): path = 'ConnectApps' def create(self,", "Sid returned in the previous page, used to avoid listing", "FriendlyName: Only return the Account resources with friendly names that", "so the first page is 0. :vartype Page: int :var", "an account. :var Page: The current page number. Zero-indexed, so", "resources with friendly names that exactly match this name. :vartype", "ConnectApp(ConnectAppsBase): pass class ConnectApps(ConnectAppsBase): @base.apimethod def get(self, Page=None, PageSize=None, AfterSid=None):", "raise base.MethodNotSupported() def delete(self, *args, **kwargs): raise base.MethodNotSupported() class AuthorizedConnectApp(AuthorizedConnectAppsBase):", "libsaas.services.twilio import resource class ApplicationsBase(resource.TwilioResource): path = 'Applications' class Application(ApplicationsBase):", "base.MethodNotSupported() def delete(self, *args, **kwargs): raise base.MethodNotSupported() class ConnectAppsBase(resource.TwilioResource): path", "\"\"\" Fetch the Connect Apps belonging to an account. :var", "self.get_url(), params) return request, parsers.parse_json def update(self, *args, **kwargs): raise", "delete(self, *args, **kwargs): raise base.MethodNotSupported() class ConnectAppsBase(resource.TwilioResource): path = 'ConnectApps'", "this name. :vartype FriendlyName: str :var Page: The current page", "\"\"\" params = resource.get_params(None, locals()) request = http.Request('GET', self.get_url(), params)", "AfterSid=None): \"\"\" Fetch the Applications belonging to an account. :var", "def create(self, *args, **kwargs): raise base.MethodNotSupported() def update(self, *args, **kwargs):", "**kwargs): raise base.MethodNotSupported() def delete(self, *args, **kwargs): raise base.MethodNotSupported() class", "def create(self, *args, **kwargs): raise base.MethodNotSupported() def delete(self, *args, **kwargs):", "many resources to return in each list page. The default", "base.MethodNotSupported() def delete(self, *args, **kwargs): raise base.MethodNotSupported() class ConnectApp(ConnectAppsBase): pass", "PageSize: How many resources to return in each list page.", ":vartype PageSize: int :var AfterSid: The last Sid returned in", "FriendlyName=None, Page=None, PageSize=None, AfterSid=None): \"\"\" Fetch the Applications belonging to", "listing duplicated resources if new ones are created while paging.", "duplicated resources if new ones are created while paging. :vartype", "request = http.Request('GET', self.get_url(), params) return request, parsers.parse_json def update(self,", "<gh_stars>100-1000 from libsaas import http, parsers from libsaas.services import base", "base.MethodNotSupported() class Applications(ApplicationsBase): @base.apimethod def get(self, FriendlyName=None, Page=None, PageSize=None, AfterSid=None):", ":var PageSize: How many resources to return in each list", "50, and the maximum is 1000. :vartype PageSize: int :var", "Page=None, PageSize=None, AfterSid=None): \"\"\" Fetch the Applications belonging to an", "base.MethodNotSupported() class AuthorizedConnectApp(AuthorizedConnectAppsBase): pass class AuthorizedConnectApps(AuthorizedConnectAppsBase): @base.apimethod def get(self, Page=None,", "page is 0. :vartype Page: int :var PageSize: How many", "params = resource.get_params(None, locals()) request = http.Request('GET', self.get_url(), params) return", "created while paging. :vartype AfterSid: str \"\"\" params = resource.get_params(None,", "raise base.MethodNotSupported() def delete(self, *args, **kwargs): raise base.MethodNotSupported() class ConnectApp(ConnectAppsBase):", "AfterSid: str \"\"\" params = resource.get_params(None, locals()) request = http.Request('GET',", "raise base.MethodNotSupported() class ConnectAppsBase(resource.TwilioResource): path = 'ConnectApps' def create(self, *args,", "The last Sid returned in the previous page, used to", "Page=None, PageSize=None, AfterSid=None): \"\"\" Fetch the Authorized Connect Apps belonging", "that exactly match this name. :vartype FriendlyName: str :var Page:", "name. :vartype FriendlyName: str :var Page: The current page number.", "ConnectApps(ConnectAppsBase): @base.apimethod def get(self, Page=None, PageSize=None, AfterSid=None): \"\"\" Fetch the", "page number. Zero-indexed, so the first page is 0. :vartype", ":var FriendlyName: Only return the Account resources with friendly names", "request, parsers.parse_json def update(self, *args, **kwargs): raise base.MethodNotSupported() def delete(self,", "the Authorized Connect Apps belonging to an account. :var Page:", "http.Request('GET', self.get_url(), params) return request, parsers.parse_json def update(self, *args, **kwargs):", "list page. The default is 50, and the maximum is", "class ConnectApps(ConnectAppsBase): @base.apimethod def get(self, Page=None, PageSize=None, AfterSid=None): \"\"\" Fetch", "def delete(self, *args, **kwargs): raise base.MethodNotSupported() class AuthorizedConnectApp(AuthorizedConnectAppsBase): pass class", "get(self, Page=None, PageSize=None, AfterSid=None): \"\"\" Fetch the Authorized Connect Apps", "Applications belonging to an account. :var FriendlyName: Only return the", "AuthorizedConnectApps(AuthorizedConnectAppsBase): @base.apimethod def get(self, Page=None, PageSize=None, AfterSid=None): \"\"\" Fetch the", "request, parsers.parse_json def update(self, *args, **kwargs): raise base.MethodNotSupported() class AuthorizedConnectAppsBase(resource.TwilioResource):", "**kwargs): raise base.MethodNotSupported() class Applications(ApplicationsBase): @base.apimethod def get(self, FriendlyName=None, Page=None,", "libsaas.services import base from libsaas.services.twilio import resource class ApplicationsBase(resource.TwilioResource): path", "class ConnectAppsBase(resource.TwilioResource): path = 'ConnectApps' def create(self, *args, **kwargs): raise", "to an account. :var Page: The current page number. Zero-indexed,", ":var Page: The current page number. Zero-indexed, so the first", "int :var PageSize: How many resources to return in each", "'Applications' class Application(ApplicationsBase): def create(self, *args, **kwargs): raise base.MethodNotSupported() class", "the first page is 0. :vartype Page: int :var PageSize:", "from libsaas.services import base from libsaas.services.twilio import resource class ApplicationsBase(resource.TwilioResource):", "match this name. :vartype FriendlyName: str :var Page: The current", "def update(self, *args, **kwargs): raise base.MethodNotSupported() def delete(self, *args, **kwargs):", "base.MethodNotSupported() def delete(self, *args, **kwargs): raise base.MethodNotSupported() class AuthorizedConnectApp(AuthorizedConnectAppsBase): pass", "is 1000. :vartype PageSize: int :var AfterSid: The last Sid", "*args, **kwargs): raise base.MethodNotSupported() class ConnectAppsBase(resource.TwilioResource): path = 'ConnectApps' def", "avoid listing duplicated resources if new ones are created while", "AfterSid=None): \"\"\" Fetch the Authorized Connect Apps belonging to an", "path = 'ConnectApps' def create(self, *args, **kwargs): raise base.MethodNotSupported() def", "are created while paging. :vartype AfterSid: str \"\"\" params =", "raise base.MethodNotSupported() class AuthorizedConnectApp(AuthorizedConnectAppsBase): pass class AuthorizedConnectApps(AuthorizedConnectAppsBase): @base.apimethod def get(self,", "@base.apimethod def get(self, FriendlyName=None, Page=None, PageSize=None, AfterSid=None): \"\"\" Fetch the", "pass class AuthorizedConnectApps(AuthorizedConnectAppsBase): @base.apimethod def get(self, Page=None, PageSize=None, AfterSid=None): \"\"\"", "raise base.MethodNotSupported() def delete(self, *args, **kwargs): raise base.MethodNotSupported() class ConnectAppsBase(resource.TwilioResource):", "def get(self, Page=None, PageSize=None, AfterSid=None): \"\"\" Fetch the Connect Apps", "the Applications belonging to an account. :var FriendlyName: Only return", "account. :var FriendlyName: Only return the Account resources with friendly", "number. Zero-indexed, so the first page is 0. :vartype Page:", "if new ones are created while paging. :vartype AfterSid: str", "FriendlyName: str :var Page: The current page number. Zero-indexed, so", "an account. :var FriendlyName: Only return the Account resources with", "Application(ApplicationsBase): def create(self, *args, **kwargs): raise base.MethodNotSupported() class Applications(ApplicationsBase): @base.apimethod", "belonging to an account. :var FriendlyName: Only return the Account", "1000. :vartype PageSize: int :var AfterSid: The last Sid returned", "return in each list page. The default is 50, and", "= 'AuthorizedConnectApps' def create(self, *args, **kwargs): raise base.MethodNotSupported() def update(self,", "http, parsers from libsaas.services import base from libsaas.services.twilio import resource", "parsers from libsaas.services import base from libsaas.services.twilio import resource class", "Fetch the Applications belonging to an account. :var FriendlyName: Only", "str \"\"\" params = resource.get_params(None, locals()) request = http.Request('GET', self.get_url(),", ":var AfterSid: The last Sid returned in the previous page,", "ones are created while paging. :vartype AfterSid: str \"\"\" params", "the Connect Apps belonging to an account. :var Page: The", "0. :vartype Page: int :var PageSize: How many resources to", "\"\"\" Fetch the Authorized Connect Apps belonging to an account.", "How many resources to return in each list page. The", "the maximum is 1000. :vartype PageSize: int :var AfterSid: The", "Page: The current page number. Zero-indexed, so the first page", "previous page, used to avoid listing duplicated resources if new", "def create(self, *args, **kwargs): raise base.MethodNotSupported() class Applications(ApplicationsBase): @base.apimethod def", "page. The default is 50, and the maximum is 1000.", "*args, **kwargs): raise base.MethodNotSupported() def delete(self, *args, **kwargs): raise base.MethodNotSupported()", "account. :var Page: The current page number. Zero-indexed, so the", "ApplicationsBase(resource.TwilioResource): path = 'Applications' class Application(ApplicationsBase): def create(self, *args, **kwargs):", "current page number. Zero-indexed, so the first page is 0.", "and the maximum is 1000. :vartype PageSize: int :var AfterSid:", "paging. :vartype AfterSid: str \"\"\" params = resource.get_params(None, locals()) request", "while paging. :vartype AfterSid: str \"\"\" params = resource.get_params(None, locals())", "parsers.parse_json def update(self, *args, **kwargs): raise base.MethodNotSupported() def delete(self, *args," ]
[ "feature extraction dimensions. Default is 64.\") args_opt = parser.parse_args() #", "64.\") args_opt = parser.parse_args() # Runtime context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', device_id=0) #", "value t_sum = np.sum(e_x, axis=1, keepdims=True) # returns sum of", "2.0 (the \"License\"); # you may not use this file", "argparse.ArgumentParser(description='postprocess') parser.add_argument('--dataset_name', type=str, default='bitcoin-otc', choices=['bitcoin-otc', 'bitcoin-alpha'], help='dataset name') parser.add_argument('--result_path', type=str,", "with shapes (4288,128) (128,3) scores = np.dot(np.concatenate((test_positive_z, test_negative_z), axis=0), weight)", "max value t_sum = np.sum(e_x, axis=1, keepdims=True) # returns sum", "f_x def score_model(preds, test_pos, test_neg, weight, bias): \"\"\" Score the", "test set edges in each epoch. Args: epoch (LongTensor): Training", "np.sum(e_x, axis=1, keepdims=True) # returns sum of each row and", "= np.fromfile('./result_Files/repos_0.bin', np.float32) if args_opt.dataset_name == 'bitcoin-otc': pred = pred.reshape(5881,", "param_dict = load_checkpoint(args_opt.checkpoint_file) print(type(param_dict)) print(param_dict) print(type(param_dict['regression_weights'])) print(param_dict['regression_weights']) # load_param_into_net(net, param_dict)", "test_neg, weight, bias): \"\"\" Score the model on the test", "default=\"./input/bitcoin_alpha.csv\", help=\"Edge list csv.\") parser.add_argument(\"--test-size\", type=float, default=0.2, help=\"Test dataset size.", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "# ============================================================================ \"\"\" postprocess. \"\"\" import os import argparse import", "def score_model(preds, test_pos, test_neg, weight, bias): \"\"\" Score the model", "default='sgcn_alpha_f1.ckpt', help=\"Checkpoint file path.\") parser.add_argument(\"--edge_path\", nargs=\"?\", default=\"./input/bitcoin_alpha.csv\", help=\"Edge list csv.\")", "parser.add_argument(\"--edge_path\", nargs=\"?\", default=\"./input/bitcoin_alpha.csv\", help=\"Edge list csv.\") parser.add_argument(\"--features-path\", nargs=\"?\", default=\"./input/bitcoin_alpha.csv\", help=\"Edge", "default='bitcoin-otc', choices=['bitcoin-otc', 'bitcoin-alpha'], help='dataset name') parser.add_argument('--result_path', type=str, default='./ascend310_infer/input/', help='result Files')", "0:2].sum(1) # predictions = predictions.asnumpy() targets = [0]*len(test_pos) + [1]*len(test_neg)", "epoch (LongTensor): Training epochs. Returns: auc(Float32): AUC result. f1(Float32): F1-Score", "import calculate_auc from mindspore import context, load_checkpoint def softmax(x): t_max", "context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', device_id=0) # Create network test_pos = np.load(os.path.join(args_opt.result_path, 'pos_test.npy'))", "- t_max) # subtracts each row with its max value", "use this file except in compliance with the License. #", "# Create network test_pos = np.load(os.path.join(args_opt.result_path, 'pos_test.npy')) test_neg = np.load(os.path.join(args_opt.result_path,", "= argparse.ArgumentParser(description='postprocess') parser.add_argument('--dataset_name', type=str, default='bitcoin-otc', choices=['bitcoin-otc', 'bitcoin-alpha'], help='dataset name') parser.add_argument('--result_path',", "together with shapes (4288,128) (128,3) scores = np.dot(np.concatenate((test_positive_z, test_negative_z), axis=0),", "each row with its max value t_sum = np.sum(e_x, axis=1,", "numpy as np from src.ms_utils import calculate_auc from mindspore import", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "License. # You may obtain a copy of the License", "axis=0), weight) + bias probability_scores = np.exp(softmax(scores)) predictions = probability_scores[:,", "# Load parameters from checkpoint into network param_dict = load_checkpoint(args_opt.checkpoint_file)", "type=int, default=30, help=\"Number of SVD iterations. Default is 30.\") parser.add_argument(\"--reduction-dimensions\",", "scores = np.dot(np.concatenate((test_positive_z, test_negative_z), axis=0), weight) + bias probability_scores =", "Files') parser.add_argument(\"--checkpoint_file\", type=str, default='sgcn_alpha_f1.ckpt', help=\"Checkpoint file path.\") parser.add_argument(\"--edge_path\", nargs=\"?\", default=\"./input/bitcoin_alpha.csv\",", "under the License is distributed on an \"AS IS\" BASIS,", "is 42.\") parser.add_argument(\"--spectral-features\", default=True, dest=\"spectral_features\", action=\"store_true\") parser.add_argument(\"--reduction-iterations\", type=int, default=30, help=\"Number", "License for the specific language governing permissions and # limitations", "test_pos, test_neg, param_dict['regression_weights'].asnumpy(), param_dict['regression_bias'].asnumpy()) print(\"Test set results:\", \"auc=\", \"{:.5f}\".format(auc), \"f1=\",", "size. Default is 0.2.\") parser.add_argument(\"--seed\", type=int, default=42, help=\"Random seed for", "help=\"Edge list csv.\") parser.add_argument(\"--test-size\", type=float, default=0.2, help=\"Test dataset size. Default", "default='./ascend310_infer/input/', help='result Files') parser.add_argument('--label_path', type=str, default='', help='y_test npy Files') parser.add_argument('--mask_path',", "dims e_x = np.exp(x - t_max) # subtracts each row", "e_x / t_sum return f_x def score_model(preds, test_pos, test_neg, weight,", "test_negative_z), axis=0), weight) + bias probability_scores = np.exp(softmax(scores)) predictions =", "softmax(x): t_max = np.max(x, axis=1, keepdims=True) # returns max of", "\"\"\" score_positive_edges = np.array(test_pos, dtype=np.int32).T score_negative_edges = np.array(test_neg, dtype=np.int32).T test_positive_z", "+ bias probability_scores = np.exp(softmax(scores)) predictions = probability_scores[:, 0]/probability_scores[:, 0:2].sum(1)", "in compliance with the License. # You may obtain a", "software # distributed under the License is distributed on an", "the model on the test set edges in each epoch.", "return auc, f1 def get_acc(): \"\"\"get infer Accuracy.\"\"\" parser =", "Huawei Technologies Co., Ltd # # Licensed under the Apache", "default='', help='y_test npy Files') parser.add_argument('--mask_path', type=str, default='', help='test_mask npy Files')", "/ t_sum return f_x def score_model(preds, test_pos, test_neg, weight, bias):", "auc, f1 = calculate_auc(targets, predictions) return auc, f1 def get_acc():", "src.ms_utils import calculate_auc from mindspore import context, load_checkpoint def softmax(x):", "64) else: pred = pred.reshape(3783, 64) auc, f1 = score_model(pred,", "default='', help='test_mask npy Files') parser.add_argument(\"--checkpoint_file\", type=str, default='sgcn_alpha_f1.ckpt', help=\"Checkpoint file path.\")", "epochs. Returns: auc(Float32): AUC result. f1(Float32): F1-Score result. \"\"\" score_positive_edges", "t_sum = np.sum(e_x, axis=1, keepdims=True) # returns sum of each", "help='test_mask npy Files') parser.add_argument(\"--checkpoint_file\", type=str, default='sgcn_alpha_f1.ckpt', help=\"Checkpoint file path.\") parser.add_argument(\"--edge_path\",", "parser.add_argument(\"--spectral-features\", default=True, dest=\"spectral_features\", action=\"store_true\") parser.add_argument(\"--reduction-iterations\", type=int, default=30, help=\"Number of SVD", "subtracts each row with its max value t_sum = np.sum(e_x,", "npy Files') parser.add_argument('--mask_path', type=str, default='', help='test_mask npy Files') parser.add_argument(\"--checkpoint_file\", type=str,", "test_pos = np.load(os.path.join(args_opt.result_path, 'pos_test.npy')) test_neg = np.load(os.path.join(args_opt.result_path, 'neg_test.npy')) # Load", "type=str, default='', help='test_mask npy Files') parser.add_argument(\"--checkpoint_file\", type=str, default='sgcn_alpha_f1.ckpt', help=\"Checkpoint file", "# subtracts each row with its max value t_sum =", "Co., Ltd # # Licensed under the Apache License, Version", "score_model(pred, test_pos, test_neg, param_dict['regression_weights'].asnumpy(), param_dict['regression_bias'].asnumpy()) print(\"Test set results:\", \"auc=\", \"{:.5f}\".format(auc),", "np.exp(x - t_max) # subtracts each row with its max", "epoch. Args: epoch (LongTensor): Training epochs. Returns: auc(Float32): AUC result.", "\"\"\" postprocess. \"\"\" import os import argparse import numpy as", "probability_scores = np.exp(softmax(scores)) predictions = probability_scores[:, 0]/probability_scores[:, 0:2].sum(1) # predictions", "= np.concatenate((preds[score_negative_edges[0, :], :], preds[score_negative_edges[1, :], :]), axis=1) # operands", "choices=['bitcoin-otc', 'bitcoin-alpha'], help='dataset name') parser.add_argument('--result_path', type=str, default='./ascend310_infer/input/', help='result Files') parser.add_argument('--label_path',", "\"\"\" import os import argparse import numpy as np from", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "help='y_test npy Files') parser.add_argument('--mask_path', type=str, default='', help='test_mask npy Files') parser.add_argument(\"--checkpoint_file\",", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", ":]), axis=1) # operands could not be broadcast together with", "pred = pred.reshape(5881, 64) else: pred = pred.reshape(3783, 64) auc,", "to in writing, software # distributed under the License is", "parser = argparse.ArgumentParser(description='postprocess') parser.add_argument('--dataset_name', type=str, default='bitcoin-otc', choices=['bitcoin-otc', 'bitcoin-alpha'], help='dataset name')", "dest=\"spectral_features\", action=\"store_true\") parser.add_argument(\"--reduction-iterations\", type=int, default=30, help=\"Number of SVD iterations. Default", "# See the License for the specific language governing permissions", "language governing permissions and # limitations under the License. #", "or agreed to in writing, software # distributed under the", "0.2.\") parser.add_argument(\"--seed\", type=int, default=42, help=\"Random seed for sklearn pre-training. Default", "set edges in each epoch. Args: epoch (LongTensor): Training epochs.", "required by applicable law or agreed to in writing, software", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "keeps same dims e_x = np.exp(x - t_max) # subtracts", "results:\", \"auc=\", \"{:.5f}\".format(auc), \"f1=\", \"{:.5f}\".format(f1)) if __name__ == '__main__': get_acc()", "with the License. # You may obtain a copy of", "(128,3) scores = np.dot(np.concatenate((test_positive_z, test_negative_z), axis=0), weight) + bias probability_scores", "from src.ms_utils import calculate_auc from mindspore import context, load_checkpoint def", "compliance with the License. # You may obtain a copy", "agreed to in writing, software # distributed under the License", "each epoch. Args: epoch (LongTensor): Training epochs. Returns: auc(Float32): AUC", "\"\"\"get infer Accuracy.\"\"\" parser = argparse.ArgumentParser(description='postprocess') parser.add_argument('--dataset_name', type=str, default='bitcoin-otc', choices=['bitcoin-otc',", "import numpy as np from src.ms_utils import calculate_auc from mindspore", "distributed under the License is distributed on an \"AS IS\"", "calculate_auc from mindspore import context, load_checkpoint def softmax(x): t_max =", "type=str, default='bitcoin-otc', choices=['bitcoin-otc', 'bitcoin-alpha'], help='dataset name') parser.add_argument('--result_path', type=str, default='./ascend310_infer/input/', help='result", "predictions.asnumpy() targets = [0]*len(test_pos) + [1]*len(test_neg) auc, f1 = calculate_auc(targets,", ":], :]), axis=1) # operands could not be broadcast together", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "SVD iterations. Default is 30.\") parser.add_argument(\"--reduction-dimensions\", type=int, default=64, help=\"Number of", "np.float32) if args_opt.dataset_name == 'bitcoin-otc': pred = pred.reshape(5881, 64) else:", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "probability_scores[:, 0]/probability_scores[:, 0:2].sum(1) # predictions = predictions.asnumpy() targets = [0]*len(test_pos)", "permissions and # limitations under the License. # ============================================================================ \"\"\"", "writing, software # distributed under the License is distributed on", "= calculate_auc(targets, predictions) return auc, f1 def get_acc(): \"\"\"get infer", "you may not use this file except in compliance with", "parser.add_argument('--dataset_name', type=str, default='bitcoin-otc', choices=['bitcoin-otc', 'bitcoin-alpha'], help='dataset name') parser.add_argument('--result_path', type=str, default='./ascend310_infer/input/',", "help=\"Edge list csv.\") parser.add_argument(\"--features-path\", nargs=\"?\", default=\"./input/bitcoin_alpha.csv\", help=\"Edge list csv.\") parser.add_argument(\"--test-size\",", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "npy Files') parser.add_argument(\"--checkpoint_file\", type=str, default='sgcn_alpha_f1.ckpt', help=\"Checkpoint file path.\") parser.add_argument(\"--edge_path\", nargs=\"?\",", "print(type(param_dict['regression_weights'])) print(param_dict['regression_weights']) # load_param_into_net(net, param_dict) pred = np.fromfile('./result_Files/repos_0.bin', np.float32) if", "= np.exp(x - t_max) # subtracts each row with its", "auc, f1 = score_model(pred, test_pos, test_neg, param_dict['regression_weights'].asnumpy(), param_dict['regression_bias'].asnumpy()) print(\"Test set", "License. # ============================================================================ \"\"\" postprocess. \"\"\" import os import argparse", "return f_x def score_model(preds, test_pos, test_neg, weight, bias): \"\"\" Score", ":], :], preds[score_positive_edges[1, :], :]), axis=1) test_negative_z = np.concatenate((preds[score_negative_edges[0, :],", "Accuracy.\"\"\" parser = argparse.ArgumentParser(description='postprocess') parser.add_argument('--dataset_name', type=str, default='bitcoin-otc', choices=['bitcoin-otc', 'bitcoin-alpha'], help='dataset", "help=\"Test dataset size. Default is 0.2.\") parser.add_argument(\"--seed\", type=int, default=42, help=\"Random", "Score the model on the test set edges in each", "'bitcoin-otc': pred = pred.reshape(5881, 64) else: pred = pred.reshape(3783, 64)", "CONDITIONS OF ANY KIND, either express or implied. # See", "Ltd # # Licensed under the Apache License, Version 2.0", "action=\"store_true\") parser.add_argument(\"--reduction-iterations\", type=int, default=30, help=\"Number of SVD iterations. Default is", "import argparse import numpy as np from src.ms_utils import calculate_auc", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "name') parser.add_argument('--result_path', type=str, default='./ascend310_infer/input/', help='result Files') parser.add_argument('--label_path', type=str, default='', help='y_test", "pred.reshape(3783, 64) auc, f1 = score_model(pred, test_pos, test_neg, param_dict['regression_weights'].asnumpy(), param_dict['regression_bias'].asnumpy())", "[0]*len(test_pos) + [1]*len(test_neg) auc, f1 = calculate_auc(targets, predictions) return auc,", "print(type(param_dict)) print(param_dict) print(type(param_dict['regression_weights'])) print(param_dict['regression_weights']) # load_param_into_net(net, param_dict) pred = np.fromfile('./result_Files/repos_0.bin',", "be broadcast together with shapes (4288,128) (128,3) scores = np.dot(np.concatenate((test_positive_z,", "type=str, default='sgcn_alpha_f1.ckpt', help=\"Checkpoint file path.\") parser.add_argument(\"--edge_path\", nargs=\"?\", default=\"./input/bitcoin_alpha.csv\", help=\"Edge list", "test_negative_z = np.concatenate((preds[score_negative_edges[0, :], :], preds[score_negative_edges[1, :], :]), axis=1) #", "parser.parse_args() # Runtime context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', device_id=0) # Create network test_pos", "0]/probability_scores[:, 0:2].sum(1) # predictions = predictions.asnumpy() targets = [0]*len(test_pos) +", "f1 def get_acc(): \"\"\"get infer Accuracy.\"\"\" parser = argparse.ArgumentParser(description='postprocess') parser.add_argument('--dataset_name',", "Files') parser.add_argument('--mask_path', type=str, default='', help='test_mask npy Files') parser.add_argument(\"--checkpoint_file\", type=str, default='sgcn_alpha_f1.ckpt',", "2021 Huawei Technologies Co., Ltd # # Licensed under the", "keeps same dims f_x = e_x / t_sum return f_x", "each row and keeps same dims f_x = e_x /", "dtype=np.int32).T test_positive_z = np.concatenate((preds[score_positive_edges[0, :], :], preds[score_positive_edges[1, :], :]), axis=1)", "is 64.\") args_opt = parser.parse_args() # Runtime context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', device_id=0)", "could not be broadcast together with shapes (4288,128) (128,3) scores", "csv.\") parser.add_argument(\"--test-size\", type=float, default=0.2, help=\"Test dataset size. Default is 0.2.\")", "result. f1(Float32): F1-Score result. \"\"\" score_positive_edges = np.array(test_pos, dtype=np.int32).T score_negative_edges", "OR CONDITIONS OF ANY KIND, either express or implied. #", "t_max = np.max(x, axis=1, keepdims=True) # returns max of each", "np.array(test_pos, dtype=np.int32).T score_negative_edges = np.array(test_neg, dtype=np.int32).T test_positive_z = np.concatenate((preds[score_positive_edges[0, :],", "the License is distributed on an \"AS IS\" BASIS, #", "same dims e_x = np.exp(x - t_max) # subtracts each", "test_neg, param_dict['regression_weights'].asnumpy(), param_dict['regression_bias'].asnumpy()) print(\"Test set results:\", \"auc=\", \"{:.5f}\".format(auc), \"f1=\", \"{:.5f}\".format(f1))", "param_dict['regression_weights'].asnumpy(), param_dict['regression_bias'].asnumpy()) print(\"Test set results:\", \"auc=\", \"{:.5f}\".format(auc), \"f1=\", \"{:.5f}\".format(f1)) if", "Returns: auc(Float32): AUC result. f1(Float32): F1-Score result. \"\"\" score_positive_edges =", "auc(Float32): AUC result. f1(Float32): F1-Score result. \"\"\" score_positive_edges = np.array(test_pos,", "its max value t_sum = np.sum(e_x, axis=1, keepdims=True) # returns", "csv.\") parser.add_argument(\"--features-path\", nargs=\"?\", default=\"./input/bitcoin_alpha.csv\", help=\"Edge list csv.\") parser.add_argument(\"--test-size\", type=float, default=0.2,", "test_positive_z = np.concatenate((preds[score_positive_edges[0, :], :], preds[score_positive_edges[1, :], :]), axis=1) test_negative_z", "Runtime context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', device_id=0) # Create network test_pos = np.load(os.path.join(args_opt.result_path,", "Files') parser.add_argument('--label_path', type=str, default='', help='y_test npy Files') parser.add_argument('--mask_path', type=str, default='',", "network test_pos = np.load(os.path.join(args_opt.result_path, 'pos_test.npy')) test_neg = np.load(os.path.join(args_opt.result_path, 'neg_test.npy')) #", "t_sum return f_x def score_model(preds, test_pos, test_neg, weight, bias): \"\"\"", "law or agreed to in writing, software # distributed under", "e_x = np.exp(x - t_max) # subtracts each row with", "# returns sum of each row and keeps same dims", "preds[score_negative_edges[1, :], :]), axis=1) # operands could not be broadcast", "== 'bitcoin-otc': pred = pred.reshape(5881, 64) else: pred = pred.reshape(3783,", "+ [1]*len(test_neg) auc, f1 = calculate_auc(targets, predictions) return auc, f1", "f1(Float32): F1-Score result. \"\"\" score_positive_edges = np.array(test_pos, dtype=np.int32).T score_negative_edges =", "result. \"\"\" score_positive_edges = np.array(test_pos, dtype=np.int32).T score_negative_edges = np.array(test_neg, dtype=np.int32).T", "import os import argparse import numpy as np from src.ms_utils", "type=str, default='./ascend310_infer/input/', help='result Files') parser.add_argument('--label_path', type=str, default='', help='y_test npy Files')", "type=int, default=64, help=\"Number of SVD feature extraction dimensions. Default is", "may obtain a copy of the License at # #", "test_pos, test_neg, weight, bias): \"\"\" Score the model on the", "axis=1, keepdims=True) # returns sum of each row and keeps", "dims f_x = e_x / t_sum return f_x def score_model(preds,", "dtype=np.int32).T score_negative_edges = np.array(test_neg, dtype=np.int32).T test_positive_z = np.concatenate((preds[score_positive_edges[0, :], :],", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "list csv.\") parser.add_argument(\"--features-path\", nargs=\"?\", default=\"./input/bitcoin_alpha.csv\", help=\"Edge list csv.\") parser.add_argument(\"--test-size\", type=float,", "may not use this file except in compliance with the", "from checkpoint into network param_dict = load_checkpoint(args_opt.checkpoint_file) print(type(param_dict)) print(param_dict) print(type(param_dict['regression_weights']))", "= np.load(os.path.join(args_opt.result_path, 'neg_test.npy')) # Load parameters from checkpoint into network", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", ":], :]), axis=1) test_negative_z = np.concatenate((preds[score_negative_edges[0, :], :], preds[score_negative_edges[1, :],", "type=float, default=0.2, help=\"Test dataset size. Default is 0.2.\") parser.add_argument(\"--seed\", type=int,", "this file except in compliance with the License. # You", "model on the test set edges in each epoch. Args:", "np.max(x, axis=1, keepdims=True) # returns max of each row and", "# Runtime context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', device_id=0) # Create network test_pos =", "if args_opt.dataset_name == 'bitcoin-otc': pred = pred.reshape(5881, 64) else: pred", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "help=\"Checkpoint file path.\") parser.add_argument(\"--edge_path\", nargs=\"?\", default=\"./input/bitcoin_alpha.csv\", help=\"Edge list csv.\") parser.add_argument(\"--features-path\",", "Create network test_pos = np.load(os.path.join(args_opt.result_path, 'pos_test.npy')) test_neg = np.load(os.path.join(args_opt.result_path, 'neg_test.npy'))", "# # Licensed under the Apache License, Version 2.0 (the", "predictions = probability_scores[:, 0]/probability_scores[:, 0:2].sum(1) # predictions = predictions.asnumpy() targets", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "as np from src.ms_utils import calculate_auc from mindspore import context,", ":]), axis=1) test_negative_z = np.concatenate((preds[score_negative_edges[0, :], :], preds[score_negative_edges[1, :], :]),", "(4288,128) (128,3) scores = np.dot(np.concatenate((test_positive_z, test_negative_z), axis=0), weight) + bias", "============================================================================ \"\"\" postprocess. \"\"\" import os import argparse import numpy", "parser.add_argument(\"--checkpoint_file\", type=str, default='sgcn_alpha_f1.ckpt', help=\"Checkpoint file path.\") parser.add_argument(\"--edge_path\", nargs=\"?\", default=\"./input/bitcoin_alpha.csv\", help=\"Edge", "list csv.\") parser.add_argument(\"--test-size\", type=float, default=0.2, help=\"Test dataset size. Default is", "checkpoint into network param_dict = load_checkpoint(args_opt.checkpoint_file) print(type(param_dict)) print(param_dict) print(type(param_dict['regression_weights'])) print(param_dict['regression_weights'])", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "max of each row and keeps same dims e_x =", "returns sum of each row and keeps same dims f_x", "parser.add_argument('--mask_path', type=str, default='', help='test_mask npy Files') parser.add_argument(\"--checkpoint_file\", type=str, default='sgcn_alpha_f1.ckpt', help=\"Checkpoint", "is 30.\") parser.add_argument(\"--reduction-dimensions\", type=int, default=64, help=\"Number of SVD feature extraction", "get_acc(): \"\"\"get infer Accuracy.\"\"\" parser = argparse.ArgumentParser(description='postprocess') parser.add_argument('--dataset_name', type=str, default='bitcoin-otc',", "Default is 64.\") args_opt = parser.parse_args() # Runtime context.set_context(mode=context.GRAPH_MODE, device_target='Ascend',", "Default is 30.\") parser.add_argument(\"--reduction-dimensions\", type=int, default=64, help=\"Number of SVD feature", "# predictions = predictions.asnumpy() targets = [0]*len(test_pos) + [1]*len(test_neg) auc,", "parser.add_argument('--result_path', type=str, default='./ascend310_infer/input/', help='result Files') parser.add_argument('--label_path', type=str, default='', help='y_test npy", "42.\") parser.add_argument(\"--spectral-features\", default=True, dest=\"spectral_features\", action=\"store_true\") parser.add_argument(\"--reduction-iterations\", type=int, default=30, help=\"Number of", "os import argparse import numpy as np from src.ms_utils import", "network param_dict = load_checkpoint(args_opt.checkpoint_file) print(type(param_dict)) print(param_dict) print(type(param_dict['regression_weights'])) print(param_dict['regression_weights']) # load_param_into_net(net,", "load_checkpoint def softmax(x): t_max = np.max(x, axis=1, keepdims=True) # returns", "bias probability_scores = np.exp(softmax(scores)) predictions = probability_scores[:, 0]/probability_scores[:, 0:2].sum(1) #", "parser.add_argument(\"--reduction-iterations\", type=int, default=30, help=\"Number of SVD iterations. Default is 30.\")", "the test set edges in each epoch. Args: epoch (LongTensor):", "predictions) return auc, f1 def get_acc(): \"\"\"get infer Accuracy.\"\"\" parser", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "and keeps same dims f_x = e_x / t_sum return", "np.exp(softmax(scores)) predictions = probability_scores[:, 0]/probability_scores[:, 0:2].sum(1) # predictions = predictions.asnumpy()", "print(\"Test set results:\", \"auc=\", \"{:.5f}\".format(auc), \"f1=\", \"{:.5f}\".format(f1)) if __name__ ==", "auc, f1 def get_acc(): \"\"\"get infer Accuracy.\"\"\" parser = argparse.ArgumentParser(description='postprocess')", "= np.array(test_pos, dtype=np.int32).T score_negative_edges = np.array(test_neg, dtype=np.int32).T test_positive_z = np.concatenate((preds[score_positive_edges[0,", "or implied. # See the License for the specific language", "operands could not be broadcast together with shapes (4288,128) (128,3)", "axis=1) test_negative_z = np.concatenate((preds[score_negative_edges[0, :], :], preds[score_negative_edges[1, :], :]), axis=1)", "help=\"Random seed for sklearn pre-training. Default is 42.\") parser.add_argument(\"--spectral-features\", default=True,", "pre-training. Default is 42.\") parser.add_argument(\"--spectral-features\", default=True, dest=\"spectral_features\", action=\"store_true\") parser.add_argument(\"--reduction-iterations\", type=int,", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "= np.dot(np.concatenate((test_positive_z, test_negative_z), axis=0), weight) + bias probability_scores = np.exp(softmax(scores))", "of SVD iterations. Default is 30.\") parser.add_argument(\"--reduction-dimensions\", type=int, default=64, help=\"Number", "np.concatenate((preds[score_positive_edges[0, :], :], preds[score_positive_edges[1, :], :]), axis=1) test_negative_z = np.concatenate((preds[score_negative_edges[0,", "default=True, dest=\"spectral_features\", action=\"store_true\") parser.add_argument(\"--reduction-iterations\", type=int, default=30, help=\"Number of SVD iterations.", "in each epoch. Args: epoch (LongTensor): Training epochs. Returns: auc(Float32):", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "default=\"./input/bitcoin_alpha.csv\", help=\"Edge list csv.\") parser.add_argument(\"--features-path\", nargs=\"?\", default=\"./input/bitcoin_alpha.csv\", help=\"Edge list csv.\")", "score_model(preds, test_pos, test_neg, weight, bias): \"\"\" Score the model on", "'pos_test.npy')) test_neg = np.load(os.path.join(args_opt.result_path, 'neg_test.npy')) # Load parameters from checkpoint", "\"\"\" Score the model on the test set edges in", "dimensions. Default is 64.\") args_opt = parser.parse_args() # Runtime context.set_context(mode=context.GRAPH_MODE,", "row and keeps same dims f_x = e_x / t_sum", "load_param_into_net(net, param_dict) pred = np.fromfile('./result_Files/repos_0.bin', np.float32) if args_opt.dataset_name == 'bitcoin-otc':", "AUC result. f1(Float32): F1-Score result. \"\"\" score_positive_edges = np.array(test_pos, dtype=np.int32).T", "(the \"License\"); # you may not use this file except", "# returns max of each row and keeps same dims", "= predictions.asnumpy() targets = [0]*len(test_pos) + [1]*len(test_neg) auc, f1 =", "# you may not use this file except in compliance", "nargs=\"?\", default=\"./input/bitcoin_alpha.csv\", help=\"Edge list csv.\") parser.add_argument(\"--test-size\", type=float, default=0.2, help=\"Test dataset", "sum of each row and keeps same dims f_x =", "parser.add_argument(\"--features-path\", nargs=\"?\", default=\"./input/bitcoin_alpha.csv\", help=\"Edge list csv.\") parser.add_argument(\"--test-size\", type=float, default=0.2, help=\"Test", "'neg_test.npy')) # Load parameters from checkpoint into network param_dict =", "default=0.2, help=\"Test dataset size. Default is 0.2.\") parser.add_argument(\"--seed\", type=int, default=42,", "def get_acc(): \"\"\"get infer Accuracy.\"\"\" parser = argparse.ArgumentParser(description='postprocess') parser.add_argument('--dataset_name', type=str,", "mindspore import context, load_checkpoint def softmax(x): t_max = np.max(x, axis=1,", "row and keeps same dims e_x = np.exp(x - t_max)", "axis=1, keepdims=True) # returns max of each row and keeps", "# # Unless required by applicable law or agreed to", "parser.add_argument(\"--seed\", type=int, default=42, help=\"Random seed for sklearn pre-training. Default is", "help='result Files') parser.add_argument('--label_path', type=str, default='', help='y_test npy Files') parser.add_argument('--mask_path', type=str,", "weight) + bias probability_scores = np.exp(softmax(scores)) predictions = probability_scores[:, 0]/probability_scores[:,", "parser.add_argument('--label_path', type=str, default='', help='y_test npy Files') parser.add_argument('--mask_path', type=str, default='', help='test_mask", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "f_x = e_x / t_sum return f_x def score_model(preds, test_pos,", "calculate_auc(targets, predictions) return auc, f1 def get_acc(): \"\"\"get infer Accuracy.\"\"\"", "broadcast together with shapes (4288,128) (128,3) scores = np.dot(np.concatenate((test_positive_z, test_negative_z),", "returns max of each row and keeps same dims e_x", "30.\") parser.add_argument(\"--reduction-dimensions\", type=int, default=64, help=\"Number of SVD feature extraction dimensions.", "Version 2.0 (the \"License\"); # you may not use this", "default=64, help=\"Number of SVD feature extraction dimensions. Default is 64.\")", "default=30, help=\"Number of SVD iterations. Default is 30.\") parser.add_argument(\"--reduction-dimensions\", type=int,", "f1 = score_model(pred, test_pos, test_neg, param_dict['regression_weights'].asnumpy(), param_dict['regression_bias'].asnumpy()) print(\"Test set results:\",", "def softmax(x): t_max = np.max(x, axis=1, keepdims=True) # returns max", "t_max) # subtracts each row with its max value t_sum", "np.load(os.path.join(args_opt.result_path, 'neg_test.npy')) # Load parameters from checkpoint into network param_dict", "# load_param_into_net(net, param_dict) pred = np.fromfile('./result_Files/repos_0.bin', np.float32) if args_opt.dataset_name ==", "implied. # See the License for the specific language governing", "under the Apache License, Version 2.0 (the \"License\"); # you", "is 0.2.\") parser.add_argument(\"--seed\", type=int, default=42, help=\"Random seed for sklearn pre-training.", "iterations. Default is 30.\") parser.add_argument(\"--reduction-dimensions\", type=int, default=64, help=\"Number of SVD", "score_positive_edges = np.array(test_pos, dtype=np.int32).T score_negative_edges = np.array(test_neg, dtype=np.int32).T test_positive_z =", "with its max value t_sum = np.sum(e_x, axis=1, keepdims=True) #", "limitations under the License. # ============================================================================ \"\"\" postprocess. \"\"\" import", "by applicable law or agreed to in writing, software #", "bias): \"\"\" Score the model on the test set edges", "64) auc, f1 = score_model(pred, test_pos, test_neg, param_dict['regression_weights'].asnumpy(), param_dict['regression_bias'].asnumpy()) print(\"Test", "help='dataset name') parser.add_argument('--result_path', type=str, default='./ascend310_infer/input/', help='result Files') parser.add_argument('--label_path', type=str, default='',", "Default is 42.\") parser.add_argument(\"--spectral-features\", default=True, dest=\"spectral_features\", action=\"store_true\") parser.add_argument(\"--reduction-iterations\", type=int, default=30,", "device_id=0) # Create network test_pos = np.load(os.path.join(args_opt.result_path, 'pos_test.npy')) test_neg =", "device_target='Ascend', device_id=0) # Create network test_pos = np.load(os.path.join(args_opt.result_path, 'pos_test.npy')) test_neg", "= np.exp(softmax(scores)) predictions = probability_scores[:, 0]/probability_scores[:, 0:2].sum(1) # predictions =", "np.array(test_neg, dtype=np.int32).T test_positive_z = np.concatenate((preds[score_positive_edges[0, :], :], preds[score_positive_edges[1, :], :]),", "parser.add_argument(\"--reduction-dimensions\", type=int, default=64, help=\"Number of SVD feature extraction dimensions. Default", "help=\"Number of SVD feature extraction dimensions. Default is 64.\") args_opt", "test_neg = np.load(os.path.join(args_opt.result_path, 'neg_test.npy')) # Load parameters from checkpoint into", "print(param_dict['regression_weights']) # load_param_into_net(net, param_dict) pred = np.fromfile('./result_Files/repos_0.bin', np.float32) if args_opt.dataset_name", "Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under", "f1 = calculate_auc(targets, predictions) return auc, f1 def get_acc(): \"\"\"get", "seed for sklearn pre-training. Default is 42.\") parser.add_argument(\"--spectral-features\", default=True, dest=\"spectral_features\",", "dataset size. Default is 0.2.\") parser.add_argument(\"--seed\", type=int, default=42, help=\"Random seed", "on the test set edges in each epoch. Args: epoch", "keepdims=True) # returns sum of each row and keeps same", "args_opt = parser.parse_args() # Runtime context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', device_id=0) # Create", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "the License. # ============================================================================ \"\"\" postprocess. \"\"\" import os import", "Unless required by applicable law or agreed to in writing,", "postprocess. \"\"\" import os import argparse import numpy as np", "F1-Score result. \"\"\" score_positive_edges = np.array(test_pos, dtype=np.int32).T score_negative_edges = np.array(test_neg,", "= score_model(pred, test_pos, test_neg, param_dict['regression_weights'].asnumpy(), param_dict['regression_bias'].asnumpy()) print(\"Test set results:\", \"auc=\",", "weight, bias): \"\"\" Score the model on the test set", "Technologies Co., Ltd # # Licensed under the Apache License,", "the specific language governing permissions and # limitations under the", "[1]*len(test_neg) auc, f1 = calculate_auc(targets, predictions) return auc, f1 def", "applicable law or agreed to in writing, software # distributed", "parameters from checkpoint into network param_dict = load_checkpoint(args_opt.checkpoint_file) print(type(param_dict)) print(param_dict)", ":], preds[score_negative_edges[1, :], :]), axis=1) # operands could not be", "file path.\") parser.add_argument(\"--edge_path\", nargs=\"?\", default=\"./input/bitcoin_alpha.csv\", help=\"Edge list csv.\") parser.add_argument(\"--features-path\", nargs=\"?\",", "each row and keeps same dims e_x = np.exp(x -", "nargs=\"?\", default=\"./input/bitcoin_alpha.csv\", help=\"Edge list csv.\") parser.add_argument(\"--features-path\", nargs=\"?\", default=\"./input/bitcoin_alpha.csv\", help=\"Edge list", "into network param_dict = load_checkpoint(args_opt.checkpoint_file) print(type(param_dict)) print(param_dict) print(type(param_dict['regression_weights'])) print(param_dict['regression_weights']) #", "= np.sum(e_x, axis=1, keepdims=True) # returns sum of each row", "from mindspore import context, load_checkpoint def softmax(x): t_max = np.max(x,", "Load parameters from checkpoint into network param_dict = load_checkpoint(args_opt.checkpoint_file) print(type(param_dict))", "in writing, software # distributed under the License is distributed", "type=int, default=42, help=\"Random seed for sklearn pre-training. Default is 42.\")", "= np.max(x, axis=1, keepdims=True) # returns max of each row", "load_checkpoint(args_opt.checkpoint_file) print(type(param_dict)) print(param_dict) print(type(param_dict['regression_weights'])) print(param_dict['regression_weights']) # load_param_into_net(net, param_dict) pred =", "# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed", "param_dict) pred = np.fromfile('./result_Files/repos_0.bin', np.float32) if args_opt.dataset_name == 'bitcoin-otc': pred", ":], preds[score_positive_edges[1, :], :]), axis=1) test_negative_z = np.concatenate((preds[score_negative_edges[0, :], :],", "pred = pred.reshape(3783, 64) auc, f1 = score_model(pred, test_pos, test_neg,", "same dims f_x = e_x / t_sum return f_x def", "type=str, default='', help='y_test npy Files') parser.add_argument('--mask_path', type=str, default='', help='test_mask npy", "else: pred = pred.reshape(3783, 64) auc, f1 = score_model(pred, test_pos,", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "Default is 0.2.\") parser.add_argument(\"--seed\", type=int, default=42, help=\"Random seed for sklearn", "License, Version 2.0 (the \"License\"); # you may not use", "# You may obtain a copy of the License at", "import context, load_checkpoint def softmax(x): t_max = np.max(x, axis=1, keepdims=True)", "preds[score_positive_edges[1, :], :]), axis=1) test_negative_z = np.concatenate((preds[score_negative_edges[0, :], :], preds[score_negative_edges[1,", "np.concatenate((preds[score_negative_edges[0, :], :], preds[score_negative_edges[1, :], :]), axis=1) # operands could", "row with its max value t_sum = np.sum(e_x, axis=1, keepdims=True)", "shapes (4288,128) (128,3) scores = np.dot(np.concatenate((test_positive_z, test_negative_z), axis=0), weight) +", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "pred = np.fromfile('./result_Files/repos_0.bin', np.float32) if args_opt.dataset_name == 'bitcoin-otc': pred =", "= np.concatenate((preds[score_positive_edges[0, :], :], preds[score_positive_edges[1, :], :]), axis=1) test_negative_z =", "# limitations under the License. # ============================================================================ \"\"\" postprocess. \"\"\"", "context, load_checkpoint def softmax(x): t_max = np.max(x, axis=1, keepdims=True) #", "help=\"Number of SVD iterations. Default is 30.\") parser.add_argument(\"--reduction-dimensions\", type=int, default=64,", "extraction dimensions. Default is 64.\") args_opt = parser.parse_args() # Runtime", "the License for the specific language governing permissions and #", "infer Accuracy.\"\"\" parser = argparse.ArgumentParser(description='postprocess') parser.add_argument('--dataset_name', type=str, default='bitcoin-otc', choices=['bitcoin-otc', 'bitcoin-alpha'],", "Apache License, Version 2.0 (the \"License\"); # you may not", "Training epochs. Returns: auc(Float32): AUC result. f1(Float32): F1-Score result. \"\"\"", "edges in each epoch. Args: epoch (LongTensor): Training epochs. Returns:", "axis=1) # operands could not be broadcast together with shapes", "= np.array(test_neg, dtype=np.int32).T test_positive_z = np.concatenate((preds[score_positive_edges[0, :], :], preds[score_positive_edges[1, :],", "either express or implied. # See the License for the", "set results:\", \"auc=\", \"{:.5f}\".format(auc), \"f1=\", \"{:.5f}\".format(f1)) if __name__ == '__main__':", "not be broadcast together with shapes (4288,128) (128,3) scores =", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# operands could not be broadcast together with shapes (4288,128)", "parser.add_argument(\"--test-size\", type=float, default=0.2, help=\"Test dataset size. Default is 0.2.\") parser.add_argument(\"--seed\",", "under the License. # ============================================================================ \"\"\" postprocess. \"\"\" import os", "argparse import numpy as np from src.ms_utils import calculate_auc from", "np.dot(np.concatenate((test_positive_z, test_negative_z), axis=0), weight) + bias probability_scores = np.exp(softmax(scores)) predictions", "= load_checkpoint(args_opt.checkpoint_file) print(type(param_dict)) print(param_dict) print(type(param_dict['regression_weights'])) print(param_dict['regression_weights']) # load_param_into_net(net, param_dict) pred", "print(param_dict) print(type(param_dict['regression_weights'])) print(param_dict['regression_weights']) # load_param_into_net(net, param_dict) pred = np.fromfile('./result_Files/repos_0.bin', np.float32)", "'bitcoin-alpha'], help='dataset name') parser.add_argument('--result_path', type=str, default='./ascend310_infer/input/', help='result Files') parser.add_argument('--label_path', type=str,", "and keeps same dims e_x = np.exp(x - t_max) #", "of SVD feature extraction dimensions. Default is 64.\") args_opt =", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "= [0]*len(test_pos) + [1]*len(test_neg) auc, f1 = calculate_auc(targets, predictions) return", "np from src.ms_utils import calculate_auc from mindspore import context, load_checkpoint", "and # limitations under the License. # ============================================================================ \"\"\" postprocess.", "targets = [0]*len(test_pos) + [1]*len(test_neg) auc, f1 = calculate_auc(targets, predictions)", "param_dict['regression_bias'].asnumpy()) print(\"Test set results:\", \"auc=\", \"{:.5f}\".format(auc), \"f1=\", \"{:.5f}\".format(f1)) if __name__", "(LongTensor): Training epochs. Returns: auc(Float32): AUC result. f1(Float32): F1-Score result.", "of each row and keeps same dims e_x = np.exp(x", "\"License\"); # you may not use this file except in", ":], :], preds[score_negative_edges[1, :], :]), axis=1) # operands could not", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "= probability_scores[:, 0]/probability_scores[:, 0:2].sum(1) # predictions = predictions.asnumpy() targets =", "np.fromfile('./result_Files/repos_0.bin', np.float32) if args_opt.dataset_name == 'bitcoin-otc': pred = pred.reshape(5881, 64)", "of each row and keeps same dims f_x = e_x", "predictions = predictions.asnumpy() targets = [0]*len(test_pos) + [1]*len(test_neg) auc, f1", "path.\") parser.add_argument(\"--edge_path\", nargs=\"?\", default=\"./input/bitcoin_alpha.csv\", help=\"Edge list csv.\") parser.add_argument(\"--features-path\", nargs=\"?\", default=\"./input/bitcoin_alpha.csv\",", "pred.reshape(5881, 64) else: pred = pred.reshape(3783, 64) auc, f1 =", "= pred.reshape(3783, 64) auc, f1 = score_model(pred, test_pos, test_neg, param_dict['regression_weights'].asnumpy(),", "# distributed under the License is distributed on an \"AS", "= e_x / t_sum return f_x def score_model(preds, test_pos, test_neg,", "# Unless required by applicable law or agreed to in", "np.load(os.path.join(args_opt.result_path, 'pos_test.npy')) test_neg = np.load(os.path.join(args_opt.result_path, 'neg_test.npy')) # Load parameters from", "= pred.reshape(5881, 64) else: pred = pred.reshape(3783, 64) auc, f1", "score_negative_edges = np.array(test_neg, dtype=np.int32).T test_positive_z = np.concatenate((preds[score_positive_edges[0, :], :], preds[score_positive_edges[1,", "= np.load(os.path.join(args_opt.result_path, 'pos_test.npy')) test_neg = np.load(os.path.join(args_opt.result_path, 'neg_test.npy')) # Load parameters", "= parser.parse_args() # Runtime context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', device_id=0) # Create network", "sklearn pre-training. Default is 42.\") parser.add_argument(\"--spectral-features\", default=True, dest=\"spectral_features\", action=\"store_true\") parser.add_argument(\"--reduction-iterations\",", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "default=42, help=\"Random seed for sklearn pre-training. Default is 42.\") parser.add_argument(\"--spectral-features\",", "You may obtain a copy of the License at #", "args_opt.dataset_name == 'bitcoin-otc': pred = pred.reshape(5881, 64) else: pred =", "SVD feature extraction dimensions. Default is 64.\") args_opt = parser.parse_args()", "keepdims=True) # returns max of each row and keeps same", "Args: epoch (LongTensor): Training epochs. Returns: auc(Float32): AUC result. f1(Float32):", "the Apache License, Version 2.0 (the \"License\"); # you may", "governing permissions and # limitations under the License. # ============================================================================", "for sklearn pre-training. Default is 42.\") parser.add_argument(\"--spectral-features\", default=True, dest=\"spectral_features\", action=\"store_true\")" ]
[ "= 0 elif pykeops.config.torch_found: import torch if all([type(var) in [torch.Tensor,", "= OrderedDict([('host',0), ('device',1)]) possible_options_list = ['auto', 'CPU', 'GPU', 'GPU_1D', 'GPU_1D_device',", "OrderedDict import pykeops import pykeops.config ############################################################ # define backend ############################################################", "# the option is known return self.dev[split_backend[0]], self.grid[split_backend[1]], self.memtype[split_backend[2]] def", "self.memtype[split_backend[2]] def define_backend(self, backend, variables): tagCPUGPU, tag1D2D, tagHostDevice = self.define_tag_backend(backend,", "from device arrays, using the 2D scheme GPU_1D_host : computations", "1D scheme GPU_2D_host : computations performed on the device from", "import re import numpy as np from collections import OrderedDict", "import pykeops import pykeops.config ############################################################ # define backend ############################################################ class", "or GPU_2D return self.dev[split_backend[0]], self.grid[split_backend[1]], self._find_mem(variables) elif len(split_backend) == 3:", "['auto', 'CPU', 'GPU', 'GPU_1D', 'GPU_1D_device', 'GPU_1D_host', 'GPU_2D', 'GPU_2D_device', 'GPU_2D_host' ]", "backend \"\"\" res = SetBackend() if not str: return res.define_tag_backend(backend,", "self._find_grid(), self._find_mem(variables) split_backend = re.split('_',backend) if len(split_backend) == 1: #", "to get the correct backend \"\"\" res = SetBackend() if", "dev = OrderedDict([('CPU',0),('GPU',1)]) grid = OrderedDict([('1D',0),('2D',1)]) memtype = OrderedDict([('host',0), ('device',1)])", "'CPU', 'GPU', 'GPU_1D', 'GPU_1D_device', 'GPU_1D_host', 'GPU_2D', 'GPU_2D_device', 'GPU_2D_host' ] def", "pykeops.config.torch_found: import torch if all([type(var) in [torch.Tensor, torch.nn.parameter.Parameter] for var", "the options used in PyKeops. \"\"\" dev = OrderedDict([('CPU',0),('GPU',1)]) grid", "device arrays, using the 2D scheme GPU_1D_host : computations performed", "to centralized the options used in PyKeops. \"\"\" dev =", "# define backend ############################################################ class SetBackend(): \"\"\" This class is", "backend. Should be one of ', self.possible_options_list) # auto :", "good guess for the backend... available methods are: (host means", "'GPU_2D_host' ] def define_tag_backend(self, backend, variables): \"\"\" Try to make", "TypeError('All variables should either be numpy arrays or torch tensors.')", "= re.split('_',backend) if len(split_backend) == 1: # CPU or GPU", "the device from host data, using the 2D scheme :param", "self.dev[split_backend[0]], self._find_grid(), self._find_mem(variables) elif len(split_backend) == 2: # GPU_1D or", "used to centralized the options used in PyKeops. \"\"\" dev", "]): # Infer if we're working with numpy arrays or", "############################################################ class SetBackend(): \"\"\" This class is used to centralized", "in variables]): from pykeops.torch.utils import is_on_device VarsAreOnGpu = tuple(map(is_on_device, tuple(variables)))", "get the correct backend \"\"\" res = SetBackend() if not", "performed on the device from device arrays, using the 1D", "import pykeops.config ############################################################ # define backend ############################################################ class SetBackend(): \"\"\"", "tuple(map(is_on_device, tuple(variables))) if all(VarsAreOnGpu): MemType = 1 elif not any(VarsAreOnGpu):", "raise TypeError('All variables should either be numpy arrays or torch", "var in variables ]): # Infer if we're working with", "backend, variables): \"\"\" Try to make a good guess for", "= tuple(map(is_on_device, tuple(variables))) if all(VarsAreOnGpu): MemType = 1 elif not", "on the device from host arrays, using the 1D scheme", "not in self.possible_options_list): raise ValueError('Invalid backend. Should be one of", "tagCPUGPU, tag1D2D, tagHostDevice = self.define_tag_backend(backend, variables) return self.dev[tagCPUGPU], self.grid[tag1D2D], self.memtype[tagHostDevice]", "be numpy arrays or torch tensors.') return MemType @staticmethod def", "point to get the correct backend \"\"\" res = SetBackend()", "import numpy as np from collections import OrderedDict import pykeops", "'GPU_1D_device', 'GPU_1D_host', 'GPU_2D', 'GPU_2D_device', 'GPU_2D_host' ] def define_tag_backend(self, backend, variables):", "collections import OrderedDict import pykeops import pykeops.config ############################################################ # define", "SetBackend(): \"\"\" This class is used to centralized the options", "from collections import OrderedDict import pykeops import pykeops.config ############################################################ #", "raise ValueError('At least two input variables have different memory locations", "from host arrays, using the 1D scheme GPU_2D_host : computations", "self._find_grid(), self._find_mem(variables) elif len(split_backend) == 2: # GPU_1D or GPU_2D", "variables should either be numpy arrays or torch tensors.') return", "arrays, using the 1D scheme GPU_2D_device : computations performed on", "GPU_2D return self.dev[split_backend[0]], self.grid[split_backend[1]], self._find_mem(variables) elif len(split_backend) == 3: #", "memtype = OrderedDict([('host',0), ('device',1)]) possible_options_list = ['auto', 'CPU', 'GPU', 'GPU_1D',", "or GPU return self.dev[split_backend[0]], self._find_grid(), self._find_mem(variables) elif len(split_backend) == 2:", "the 2D scheme :param backend (str), variables (tuple) :return (tagCPUGPU,", "self.possible_options_list): raise ValueError('Invalid backend. Should be one of ', self.possible_options_list)", "GPU_2D_device : computations performed on the device from device arrays,", "scheme GPU_2D_device : computations performed on the device from device", "computations performed on the device from host data, using the", "re.split('_',backend) if len(split_backend) == 1: # CPU or GPU return", "two input variables have different memory locations (Cpu/Gpu).') else: raise", "device from host data, using the 2D scheme :param backend", "GPU_1D or GPU_2D return self.dev[split_backend[0]], self.grid[split_backend[1]], self._find_mem(variables) elif len(split_backend) ==", "self.grid[split_backend[1]], self.memtype[split_backend[2]] def define_backend(self, backend, variables): tagCPUGPU, tag1D2D, tagHostDevice =", "as np from collections import OrderedDict import pykeops import pykeops.config", "== 3: # the option is known return self.dev[split_backend[0]], self.grid[split_backend[1]],", "VarsAreOnGpu = tuple(map(is_on_device, tuple(variables))) if all(VarsAreOnGpu): MemType = 1 elif", "memory locations (Cpu/Gpu).') else: raise TypeError('All variables should either be", "tagHostDevice = self.define_tag_backend(backend, variables) return self.dev[tagCPUGPU], self.grid[tag1D2D], self.memtype[tagHostDevice] @staticmethod def", "GPU return self.dev[split_backend[0]], self._find_grid(), self._find_mem(variables) elif len(split_backend) == 2: #", "CPU : computations performed with the host from host arrays", "self.memtype[tagHostDevice] @staticmethod def _find_dev(): return int(pykeops.config.gpu_available) @staticmethod def _find_mem(variables): if", "on the device from host data, using the 2D scheme", "device from host arrays, using the 1D scheme GPU_2D_host :", "1D scheme GPU_2D_device : computations performed on the device from", "the device from device arrays, using the 1D scheme GPU_2D_device", "return self.dev[split_backend[0]], self.grid[split_backend[1]], self._find_mem(variables) elif len(split_backend) == 3: # the", "return 0 def get_tag_backend(backend, variables, str = False): \"\"\" entry", "option is valid if (backend not in self.possible_options_list): raise ValueError('Invalid", "to make a good guess for the backend... available methods", ": infer everything if backend == 'auto': return int(pykeops.config.gpu_available), self._find_grid(),", "GPU_2D_host : computations performed on the device from host data,", "def _find_dev(): return int(pykeops.config.gpu_available) @staticmethod def _find_mem(variables): if all([type(var) is", "the 1D scheme GPU_2D_device : computations performed on the device", "tag1D2D, tagHostDevice = self.define_tag_backend(backend, variables) return self.dev[tagCPUGPU], self.grid[tag1D2D], self.memtype[tagHostDevice] @staticmethod", "in variables ]): # Infer if we're working with numpy", "'GPU_1D_host', 'GPU_2D', 'GPU_2D_device', 'GPU_2D_host' ] def define_tag_backend(self, backend, variables): \"\"\"", "infer everything if backend == 'auto': return int(pykeops.config.gpu_available), self._find_grid(), self._find_mem(variables)", "MemType @staticmethod def _find_grid(): return 0 def get_tag_backend(backend, variables, str", "PyKeops. \"\"\" dev = OrderedDict([('CPU',0),('GPU',1)]) grid = OrderedDict([('1D',0),('2D',1)]) memtype =", "tag1D2D, tagHostDevice) \"\"\" # check that the option is valid", "def _find_mem(variables): if all([type(var) is np.ndarray for var in variables", "tensors.') return MemType @staticmethod def _find_grid(): return 0 def get_tag_backend(backend,", "using the 1D scheme GPU_2D_host : computations performed on the", "int(pykeops.config.gpu_available) @staticmethod def _find_mem(variables): if all([type(var) is np.ndarray for var", ": computations performed with the host from host arrays GPU_1D_device", "res = SetBackend() if not str: return res.define_tag_backend(backend, variables) else:", "if (backend not in self.possible_options_list): raise ValueError('Invalid backend. Should be", "elif not any(VarsAreOnGpu): MemType = 0 else: raise ValueError('At least", "len(split_backend) == 1: # CPU or GPU return self.dev[split_backend[0]], self._find_grid(),", "return int(pykeops.config.gpu_available), self._find_grid(), self._find_mem(variables) split_backend = re.split('_',backend) if len(split_backend) ==", "MemType = 0 else: raise ValueError('At least two input variables", "'GPU_2D_device', 'GPU_2D_host' ] def define_tag_backend(self, backend, variables): \"\"\" Try to", "guess for the backend... available methods are: (host means Cpu,", "either be numpy arrays or torch tensors.') return MemType @staticmethod", "import OrderedDict import pykeops import pykeops.config ############################################################ # define backend", "_find_dev(): return int(pykeops.config.gpu_available) @staticmethod def _find_mem(variables): if all([type(var) is np.ndarray", "for var in variables]): from pykeops.torch.utils import is_on_device VarsAreOnGpu =", "(Cpu/Gpu).') else: raise TypeError('All variables should either be numpy arrays", "('device',1)]) possible_options_list = ['auto', 'CPU', 'GPU', 'GPU_1D', 'GPU_1D_device', 'GPU_1D_host', 'GPU_2D',", "tensors: MemType = 0 elif pykeops.config.torch_found: import torch if all([type(var)", "@staticmethod def _find_grid(): return 0 def get_tag_backend(backend, variables, str =", "available methods are: (host means Cpu, device means Gpu) CPU", "performed with the host from host arrays GPU_1D_device : computations", "OrderedDict([('CPU',0),('GPU',1)]) grid = OrderedDict([('1D',0),('2D',1)]) memtype = OrderedDict([('host',0), ('device',1)]) possible_options_list =", "entry point to get the correct backend \"\"\" res =", "is used to centralized the options used in PyKeops. \"\"\"", "arrays, using the 2D scheme GPU_1D_host : computations performed on", "arrays or torch tensors.') return MemType @staticmethod def _find_grid(): return", "1: # CPU or GPU return self.dev[split_backend[0]], self._find_grid(), self._find_mem(variables) elif", "return self.dev[split_backend[0]], self._find_grid(), self._find_mem(variables) elif len(split_backend) == 2: # GPU_1D", "@staticmethod def _find_mem(variables): if all([type(var) is np.ndarray for var in", "correct backend \"\"\" res = SetBackend() if not str: return", "check that the option is valid if (backend not in", "int(pykeops.config.gpu_available), self._find_grid(), self._find_mem(variables) split_backend = re.split('_',backend) if len(split_backend) == 1:", "used in PyKeops. \"\"\" dev = OrderedDict([('CPU',0),('GPU',1)]) grid = OrderedDict([('1D',0),('2D',1)])", "data, using the 2D scheme :param backend (str), variables (tuple)", "= OrderedDict([('CPU',0),('GPU',1)]) grid = OrderedDict([('1D',0),('2D',1)]) memtype = OrderedDict([('host',0), ('device',1)]) possible_options_list", "var in variables]): from pykeops.torch.utils import is_on_device VarsAreOnGpu = tuple(map(is_on_device,", "is valid if (backend not in self.possible_options_list): raise ValueError('Invalid backend.", "variables have different memory locations (Cpu/Gpu).') else: raise TypeError('All variables", "variables) return self.dev[tagCPUGPU], self.grid[tag1D2D], self.memtype[tagHostDevice] @staticmethod def _find_dev(): return int(pykeops.config.gpu_available)", "import is_on_device VarsAreOnGpu = tuple(map(is_on_device, tuple(variables))) if all(VarsAreOnGpu): MemType =", "GPU_1D_host : computations performed on the device from host arrays,", "'GPU', 'GPU_1D', 'GPU_1D_device', 'GPU_1D_host', 'GPU_2D', 'GPU_2D_device', 'GPU_2D_host' ] def define_tag_backend(self,", "backend (str), variables (tuple) :return (tagCPUGPU, tag1D2D, tagHostDevice) \"\"\" #", "Gpu) CPU : computations performed with the host from host", "torch.nn.parameter.Parameter] for var in variables]): from pykeops.torch.utils import is_on_device VarsAreOnGpu", "are: (host means Cpu, device means Gpu) CPU : computations", "class SetBackend(): \"\"\" This class is used to centralized the", "arrays, using the 1D scheme GPU_2D_host : computations performed on", "means Cpu, device means Gpu) CPU : computations performed with", "if all([type(var) is np.ndarray for var in variables ]): #", "grid = OrderedDict([('1D',0),('2D',1)]) memtype = OrderedDict([('host',0), ('device',1)]) possible_options_list = ['auto',", "define_tag_backend(self, backend, variables): \"\"\" Try to make a good guess", "return MemType @staticmethod def _find_grid(): return 0 def get_tag_backend(backend, variables,", "using the 2D scheme GPU_1D_host : computations performed on the", "self._find_mem(variables) elif len(split_backend) == 3: # the option is known", "variables]): from pykeops.torch.utils import is_on_device VarsAreOnGpu = tuple(map(is_on_device, tuple(variables))) if", "pykeops.torch.utils import is_on_device VarsAreOnGpu = tuple(map(is_on_device, tuple(variables))) if all(VarsAreOnGpu): MemType", "== 2: # GPU_1D or GPU_2D return self.dev[split_backend[0]], self.grid[split_backend[1]], self._find_mem(variables)", "the option is valid if (backend not in self.possible_options_list): raise", "performed on the device from host data, using the 2D", ":param backend (str), variables (tuple) :return (tagCPUGPU, tag1D2D, tagHostDevice) \"\"\"", "device arrays, using the 1D scheme GPU_2D_device : computations performed", "1 elif not any(VarsAreOnGpu): MemType = 0 else: raise ValueError('At", "all([type(var) in [torch.Tensor, torch.nn.parameter.Parameter] for var in variables]): from pykeops.torch.utils", "== 'auto': return int(pykeops.config.gpu_available), self._find_grid(), self._find_mem(variables) split_backend = re.split('_',backend) if", "is known return self.dev[split_backend[0]], self.grid[split_backend[1]], self.memtype[split_backend[2]] def define_backend(self, backend, variables):", "self.dev[split_backend[0]], self.grid[split_backend[1]], self._find_mem(variables) elif len(split_backend) == 3: # the option", "def get_tag_backend(backend, variables, str = False): \"\"\" entry point to", "different memory locations (Cpu/Gpu).') else: raise TypeError('All variables should either", "CPU or GPU return self.dev[split_backend[0]], self._find_grid(), self._find_mem(variables) elif len(split_backend) ==", "return self.dev[tagCPUGPU], self.grid[tag1D2D], self.memtype[tagHostDevice] @staticmethod def _find_dev(): return int(pykeops.config.gpu_available) @staticmethod", "OrderedDict([('1D',0),('2D',1)]) memtype = OrderedDict([('host',0), ('device',1)]) possible_options_list = ['auto', 'CPU', 'GPU',", "False): \"\"\" entry point to get the correct backend \"\"\"", "torch tensors: MemType = 0 elif pykeops.config.torch_found: import torch if", "\"\"\" res = SetBackend() if not str: return res.define_tag_backend(backend, variables)", "with the host from host arrays GPU_1D_device : computations performed", "self.define_tag_backend(backend, variables) return self.dev[tagCPUGPU], self.grid[tag1D2D], self.memtype[tagHostDevice] @staticmethod def _find_dev(): return", "host data, using the 2D scheme :param backend (str), variables", "for the backend... available methods are: (host means Cpu, device", "This class is used to centralized the options used in", "least two input variables have different memory locations (Cpu/Gpu).') else:", "(tagCPUGPU, tag1D2D, tagHostDevice) \"\"\" # check that the option is", "= ['auto', 'CPU', 'GPU', 'GPU_1D', 'GPU_1D_device', 'GPU_1D_host', 'GPU_2D', 'GPU_2D_device', 'GPU_2D_host'", "0 def get_tag_backend(backend, variables, str = False): \"\"\" entry point", "define backend ############################################################ class SetBackend(): \"\"\" This class is used", "_find_mem(variables): if all([type(var) is np.ndarray for var in variables ]):", "working with numpy arrays or torch tensors: MemType = 0", "option is known return self.dev[split_backend[0]], self.grid[split_backend[1]], self.memtype[split_backend[2]] def define_backend(self, backend,", "any(VarsAreOnGpu): MemType = 0 else: raise ValueError('At least two input", "performed on the device from host arrays, using the 1D", "pykeops.config ############################################################ # define backend ############################################################ class SetBackend(): \"\"\" This", "on the device from device arrays, using the 2D scheme", "numpy arrays or torch tensors.') return MemType @staticmethod def _find_grid():", "be one of ', self.possible_options_list) # auto : infer everything", "= False): \"\"\" entry point to get the correct backend", "of ', self.possible_options_list) # auto : infer everything if backend", "computations performed with the host from host arrays GPU_1D_device :", "options used in PyKeops. \"\"\" dev = OrderedDict([('CPU',0),('GPU',1)]) grid =", "possible_options_list = ['auto', 'CPU', 'GPU', 'GPU_1D', 'GPU_1D_device', 'GPU_1D_host', 'GPU_2D', 'GPU_2D_device',", "# Infer if we're working with numpy arrays or torch", "torch tensors.') return MemType @staticmethod def _find_grid(): return 0 def", "or torch tensors: MemType = 0 elif pykeops.config.torch_found: import torch", "if backend == 'auto': return int(pykeops.config.gpu_available), self._find_grid(), self._find_mem(variables) split_backend =", "pykeops import pykeops.config ############################################################ # define backend ############################################################ class SetBackend():", "# check that the option is valid if (backend not", "numpy as np from collections import OrderedDict import pykeops import", "def _find_grid(): return 0 def get_tag_backend(backend, variables, str = False):", "have different memory locations (Cpu/Gpu).') else: raise TypeError('All variables should", "else: raise ValueError('At least two input variables have different memory", "= self.define_tag_backend(backend, variables) return self.dev[tagCPUGPU], self.grid[tag1D2D], self.memtype[tagHostDevice] @staticmethod def _find_dev():", "make a good guess for the backend... available methods are:", "computations performed on the device from device arrays, using the", "device means Gpu) CPU : computations performed with the host", "MemType = 0 elif pykeops.config.torch_found: import torch if all([type(var) in", "the 1D scheme GPU_2D_host : computations performed on the device", "variables ]): # Infer if we're working with numpy arrays", "means Gpu) CPU : computations performed with the host from", "device from device arrays, using the 1D scheme GPU_2D_device :", "backend... available methods are: (host means Cpu, device means Gpu)", ":return (tagCPUGPU, tag1D2D, tagHostDevice) \"\"\" # check that the option", "for var in variables ]): # Infer if we're working", "is np.ndarray for var in variables ]): # Infer if", "np from collections import OrderedDict import pykeops import pykeops.config ############################################################", "re import numpy as np from collections import OrderedDict import", "\"\"\" This class is used to centralized the options used", "Try to make a good guess for the backend... available", ": computations performed on the device from host data, using", "self._find_mem(variables) split_backend = re.split('_',backend) if len(split_backend) == 1: # CPU", "# GPU_1D or GPU_2D return self.dev[split_backend[0]], self.grid[split_backend[1]], self._find_mem(variables) elif len(split_backend)", "\"\"\" # check that the option is valid if (backend", "variables): tagCPUGPU, tag1D2D, tagHostDevice = self.define_tag_backend(backend, variables) return self.dev[tagCPUGPU], self.grid[tag1D2D],", "self.grid[split_backend[1]], self._find_mem(variables) elif len(split_backend) == 3: # the option is", "'GPU_2D', 'GPU_2D_device', 'GPU_2D_host' ] def define_tag_backend(self, backend, variables): \"\"\" Try", "in [torch.Tensor, torch.nn.parameter.Parameter] for var in variables]): from pykeops.torch.utils import", "if all([type(var) in [torch.Tensor, torch.nn.parameter.Parameter] for var in variables]): from", "variables (tuple) :return (tagCPUGPU, tag1D2D, tagHostDevice) \"\"\" # check that", "@staticmethod def _find_dev(): return int(pykeops.config.gpu_available) @staticmethod def _find_mem(variables): if all([type(var)", "Should be one of ', self.possible_options_list) # auto : infer", "the host from host arrays GPU_1D_device : computations performed on", "raise ValueError('Invalid backend. Should be one of ', self.possible_options_list) #", "else: raise TypeError('All variables should either be numpy arrays or", "arrays GPU_1D_device : computations performed on the device from device", "get_tag_backend(backend, variables, str = False): \"\"\" entry point to get", "np.ndarray for var in variables ]): # Infer if we're", "len(split_backend) == 3: # the option is known return self.dev[split_backend[0]],", ": computations performed on the device from device arrays, using", "the device from device arrays, using the 2D scheme GPU_1D_host", "2: # GPU_1D or GPU_2D return self.dev[split_backend[0]], self.grid[split_backend[1]], self._find_mem(variables) elif", "tuple(variables))) if all(VarsAreOnGpu): MemType = 1 elif not any(VarsAreOnGpu): MemType", "(backend not in self.possible_options_list): raise ValueError('Invalid backend. Should be one", "all([type(var) is np.ndarray for var in variables ]): # Infer", "(host means Cpu, device means Gpu) CPU : computations performed", "_find_grid(): return 0 def get_tag_backend(backend, variables, str = False): \"\"\"", "############################################################ # define backend ############################################################ class SetBackend(): \"\"\" This class", "from pykeops.torch.utils import is_on_device VarsAreOnGpu = tuple(map(is_on_device, tuple(variables))) if all(VarsAreOnGpu):", "backend == 'auto': return int(pykeops.config.gpu_available), self._find_grid(), self._find_mem(variables) split_backend = re.split('_',backend)", "device from device arrays, using the 2D scheme GPU_1D_host :", "from host arrays GPU_1D_device : computations performed on the device", "scheme :param backend (str), variables (tuple) :return (tagCPUGPU, tag1D2D, tagHostDevice)", "performed on the device from device arrays, using the 2D", "\"\"\" entry point to get the correct backend \"\"\" res", "2D scheme GPU_1D_host : computations performed on the device from", "return int(pykeops.config.gpu_available) @staticmethod def _find_mem(variables): if all([type(var) is np.ndarray for", "one of ', self.possible_options_list) # auto : infer everything if", "backend, variables): tagCPUGPU, tag1D2D, tagHostDevice = self.define_tag_backend(backend, variables) return self.dev[tagCPUGPU],", "if we're working with numpy arrays or torch tensors: MemType", "len(split_backend) == 2: # GPU_1D or GPU_2D return self.dev[split_backend[0]], self.grid[split_backend[1]],", "the option is known return self.dev[split_backend[0]], self.grid[split_backend[1]], self.memtype[split_backend[2]] def define_backend(self,", "elif len(split_backend) == 2: # GPU_1D or GPU_2D return self.dev[split_backend[0]],", "0 else: raise ValueError('At least two input variables have different", "input variables have different memory locations (Cpu/Gpu).') else: raise TypeError('All", "using the 1D scheme GPU_2D_device : computations performed on the", "ValueError('At least two input variables have different memory locations (Cpu/Gpu).')", "] def define_tag_backend(self, backend, variables): \"\"\" Try to make a", "== 1: # CPU or GPU return self.dev[split_backend[0]], self._find_grid(), self._find_mem(variables)", "the backend... available methods are: (host means Cpu, device means", "split_backend = re.split('_',backend) if len(split_backend) == 1: # CPU or", "if all(VarsAreOnGpu): MemType = 1 elif not any(VarsAreOnGpu): MemType =", "return self.dev[split_backend[0]], self.grid[split_backend[1]], self.memtype[split_backend[2]] def define_backend(self, backend, variables): tagCPUGPU, tag1D2D,", "is_on_device VarsAreOnGpu = tuple(map(is_on_device, tuple(variables))) if all(VarsAreOnGpu): MemType = 1", "# auto : infer everything if backend == 'auto': return", "elif len(split_backend) == 3: # the option is known return", "scheme GPU_1D_host : computations performed on the device from host", "str = False): \"\"\" entry point to get the correct", "tagHostDevice) \"\"\" # check that the option is valid if", "methods are: (host means Cpu, device means Gpu) CPU :", "if not str: return res.define_tag_backend(backend, variables) else: return res.define_backend(backend, variables)", "[torch.Tensor, torch.nn.parameter.Parameter] for var in variables]): from pykeops.torch.utils import is_on_device", "self.possible_options_list) # auto : infer everything if backend == 'auto':", "computations performed on the device from host arrays, using the", "'auto': return int(pykeops.config.gpu_available), self._find_grid(), self._find_mem(variables) split_backend = re.split('_',backend) if len(split_backend)", "= 1 elif not any(VarsAreOnGpu): MemType = 0 else: raise", "scheme GPU_2D_host : computations performed on the device from host", "variables, str = False): \"\"\" entry point to get the", "GPU_1D_device : computations performed on the device from device arrays,", "(tuple) :return (tagCPUGPU, tag1D2D, tagHostDevice) \"\"\" # check that the", "import torch if all([type(var) in [torch.Tensor, torch.nn.parameter.Parameter] for var in", "torch if all([type(var) in [torch.Tensor, torch.nn.parameter.Parameter] for var in variables]):", "3: # the option is known return self.dev[split_backend[0]], self.grid[split_backend[1]], self.memtype[split_backend[2]]", "arrays or torch tensors: MemType = 0 elif pykeops.config.torch_found: import", "all(VarsAreOnGpu): MemType = 1 elif not any(VarsAreOnGpu): MemType = 0", "SetBackend() if not str: return res.define_tag_backend(backend, variables) else: return res.define_backend(backend,", "backend ############################################################ class SetBackend(): \"\"\" This class is used to", "that the option is valid if (backend not in self.possible_options_list):", "# CPU or GPU return self.dev[split_backend[0]], self._find_grid(), self._find_mem(variables) elif len(split_backend)", "centralized the options used in PyKeops. \"\"\" dev = OrderedDict([('CPU',0),('GPU',1)])", "self._find_mem(variables) elif len(split_backend) == 2: # GPU_1D or GPU_2D return", "known return self.dev[split_backend[0]], self.grid[split_backend[1]], self.memtype[split_backend[2]] def define_backend(self, backend, variables): tagCPUGPU,", "2D scheme :param backend (str), variables (tuple) :return (tagCPUGPU, tag1D2D,", "0 elif pykeops.config.torch_found: import torch if all([type(var) in [torch.Tensor, torch.nn.parameter.Parameter]", ": computations performed on the device from host arrays, using", "', self.possible_options_list) # auto : infer everything if backend ==", "def define_backend(self, backend, variables): tagCPUGPU, tag1D2D, tagHostDevice = self.define_tag_backend(backend, variables)", "\"\"\" dev = OrderedDict([('CPU',0),('GPU',1)]) grid = OrderedDict([('1D',0),('2D',1)]) memtype = OrderedDict([('host',0),", "from device arrays, using the 1D scheme GPU_2D_device : computations", "should either be numpy arrays or torch tensors.') return MemType", "define_backend(self, backend, variables): tagCPUGPU, tag1D2D, tagHostDevice = self.define_tag_backend(backend, variables) return", "self.dev[split_backend[0]], self.grid[split_backend[1]], self.memtype[split_backend[2]] def define_backend(self, backend, variables): tagCPUGPU, tag1D2D, tagHostDevice", "we're working with numpy arrays or torch tensors: MemType =", "everything if backend == 'auto': return int(pykeops.config.gpu_available), self._find_grid(), self._find_mem(variables) split_backend", "in self.possible_options_list): raise ValueError('Invalid backend. Should be one of ',", "host arrays GPU_1D_device : computations performed on the device from", "host from host arrays GPU_1D_device : computations performed on the", "numpy arrays or torch tensors: MemType = 0 elif pykeops.config.torch_found:", "OrderedDict([('host',0), ('device',1)]) possible_options_list = ['auto', 'CPU', 'GPU', 'GPU_1D', 'GPU_1D_device', 'GPU_1D_host',", "on the device from device arrays, using the 1D scheme", "ValueError('Invalid backend. Should be one of ', self.possible_options_list) # auto", "'GPU_1D', 'GPU_1D_device', 'GPU_1D_host', 'GPU_2D', 'GPU_2D_device', 'GPU_2D_host' ] def define_tag_backend(self, backend,", "auto : infer everything if backend == 'auto': return int(pykeops.config.gpu_available),", "if len(split_backend) == 1: # CPU or GPU return self.dev[split_backend[0]],", "MemType = 1 elif not any(VarsAreOnGpu): MemType = 0 else:", "host arrays, using the 1D scheme GPU_2D_host : computations performed", "or torch tensors.') return MemType @staticmethod def _find_grid(): return 0", "variables): \"\"\" Try to make a good guess for the", "class is used to centralized the options used in PyKeops.", "in PyKeops. \"\"\" dev = OrderedDict([('CPU',0),('GPU',1)]) grid = OrderedDict([('1D',0),('2D',1)]) memtype", "locations (Cpu/Gpu).') else: raise TypeError('All variables should either be numpy", "= 0 else: raise ValueError('At least two input variables have", "self.grid[tag1D2D], self.memtype[tagHostDevice] @staticmethod def _find_dev(): return int(pykeops.config.gpu_available) @staticmethod def _find_mem(variables):", "the device from host arrays, using the 1D scheme GPU_2D_host", "the correct backend \"\"\" res = SetBackend() if not str:", "= SetBackend() if not str: return res.define_tag_backend(backend, variables) else: return", "valid if (backend not in self.possible_options_list): raise ValueError('Invalid backend. Should", "a good guess for the backend... available methods are: (host", "from host data, using the 2D scheme :param backend (str),", "(str), variables (tuple) :return (tagCPUGPU, tag1D2D, tagHostDevice) \"\"\" # check", "\"\"\" Try to make a good guess for the backend...", "the 2D scheme GPU_1D_host : computations performed on the device", "not any(VarsAreOnGpu): MemType = 0 else: raise ValueError('At least two", "using the 2D scheme :param backend (str), variables (tuple) :return", "self.dev[tagCPUGPU], self.grid[tag1D2D], self.memtype[tagHostDevice] @staticmethod def _find_dev(): return int(pykeops.config.gpu_available) @staticmethod def", "elif pykeops.config.torch_found: import torch if all([type(var) in [torch.Tensor, torch.nn.parameter.Parameter] for", "with numpy arrays or torch tensors: MemType = 0 elif", "Cpu, device means Gpu) CPU : computations performed with the", "Infer if we're working with numpy arrays or torch tensors:", "def define_tag_backend(self, backend, variables): \"\"\" Try to make a good", "= OrderedDict([('1D',0),('2D',1)]) memtype = OrderedDict([('host',0), ('device',1)]) possible_options_list = ['auto', 'CPU'," ]
[ "= wavfile.read(wav_path) x = x.astype(np.float64) f0, timeaxis = pyworld.dio(x, fs,", "y = P.adjust_frame_lengths(x, y, pad=True, divisible_by=2) # Save np.save(src_path, x)", "= FileSourceDataset(MGCSource(DATA_ROOT, [target_speaker], max_files=max_files)) skip_feature_extraction = exists(join(dst_dir, \"X\")) \\ and", "mgc = P.modspec_smoothing(mgc, modfs, cutoff=50) # Add delta mgc =", "import hparams_debug_string # vcc2016.WavFileDataSource and voice_statistics.WavFileDataSource can be # drop-in", "join(dst_dir, name) print(\"Destination dir for {}: {}\".format(speaker, d)) if not", "y = P.trim_zeros_frames(y) x, y = P.adjust_frame_lengths(x, y, pad=True, divisible_by=2)", "source_speaker = args[\"<source_speaker>\"] target_speaker = args[\"<target_speaker>\"] max_files = int(args[\"--max_files\"]) dst_dir", "FileSourceDataset(MGCSource(DATA_ROOT, [target_speaker], max_files=max_files)) skip_feature_extraction = exists(join(dst_dir, \"X\")) \\ and exists(join(dst_dir,", "See below for details: # https://r9y9.github.io/nnmnkwii/latest/references/datasets.html#builtin-data-sources class MGCSource(cmu_arctic.WavFileDataSource): def __init__(self,", "name) print(\"Destination dir for {}: {}\".format(speaker, d)) if not exists(d):", "message and exit \"\"\" from __future__ import division, print_function, absolute_import", "for idx, (x, y) in tqdm(enumerate(zip(X, Y))): # paths src_name", "files. -h, --help show this help message and exit \"\"\"", "import preprocessing as P from nnmnkwii.preprocessing.alignment import DTWAligner from nnmnkwii.datasets", "# drop-in replacement. See below for details: # https://r9y9.github.io/nnmnkwii/latest/references/datasets.html#builtin-data-sources class", "= args[\"<target_speaker>\"] max_files = int(args[\"--max_files\"]) dst_dir = args[\"--dst_dir\"] overwrite =", "f0, timeaxis, fs) spectrogram = pyworld.cheaptrick(x, f0, timeaxis, fs) spectrogram", "drop-in replacement. See below for details: # https://r9y9.github.io/nnmnkwii/latest/references/datasets.html#builtin-data-sources class MGCSource(cmu_arctic.WavFileDataSource):", "import vc as hp from hparams import hparams_debug_string # vcc2016.WavFileDataSource", "is None: self.alpha = pysptk.util.mcepalpha(fs) mgc = pysptk.sp2mc(spectrogram, order=hp.order, alpha=self.alpha)", "to arrays print(\"Convert datasets to arrays\") X, Y = X_dataset.asarray(verbose=1),", "import division, print_function, absolute_import from docopt import docopt import numpy", "# paths src_name = splitext(basename(X_dataset.collected_files[idx][0]))[0] tgt_name = splitext(basename(Y_dataset.collected_files[idx][0]))[0] src_path =", "max_files=max_files) self.alpha = None def collect_features(self, wav_path): fs, x =", "for one-to-one voice conversion. usage: prepare_features_vc.py [options] <DATA_ROOT> <source_speaker> <target_speaker>", "--overwrite Overwrite files. -h, --help show this help message and", "docopt import docopt import numpy as np from nnmnkwii.datasets import", "exists(join(dst_dir, \"X\")) \\ and exists(join(dst_dir, \"Y\")) if overwrite: skip_feature_extraction =", "docopt(__doc__) print(\"Command line args:\\n\", args) DATA_ROOT = args[\"<DATA_ROOT>\"] source_speaker =", "P from nnmnkwii.preprocessing.alignment import DTWAligner from nnmnkwii.datasets import cmu_arctic, voice_statistics,", "from nnmnkwii.datasets import cmu_arctic, voice_statistics, vcc2016 import pysptk import pyworld", "if overwrite: skip_feature_extraction = False if skip_feature_extraction: print(\"Features seems to", "Y_dataset.asarray(verbose=1) # Alignment print(\"Perform alignment\") X, Y = DTWAligner().transform((X, Y))", "= join(dst_dir, name) print(\"Destination dir for {}: {}\".format(speaker, d)) if", "seems to be prepared, skipping feature extraction.\") sys.exit(0) # Create", "from hparams import vc as hp from hparams import hparams_debug_string", "import cmu_arctic, voice_statistics, vcc2016 import pysptk import pyworld from scipy.io", "x = wavfile.read(wav_path) x = x.astype(np.float64) f0, timeaxis = pyworld.dio(x,", "directory [default: data/cmu_arctic_vc]. --overwrite Overwrite files. -h, --help show this", "cutoff=50) # Add delta mgc = P.delta_features(mgc, hp.windows) return mgc.astype(np.float32)", "max_files=max_files)) Y_dataset = FileSourceDataset(MGCSource(DATA_ROOT, [target_speaker], max_files=max_files)) skip_feature_extraction = exists(join(dst_dir, \"X\"))", "Create dirs for speaker, name in [(source_speaker, \"X\"), (target_speaker, \"Y\")]:", "print(\"Save features to disk\") for idx, (x, y) in tqdm(enumerate(zip(X,", "spectrogram = P.trim_zeros_frames(spectrogram) if self.alpha is None: self.alpha = pysptk.util.mcepalpha(fs)", "dir for {}: {}\".format(speaker, d)) if not exists(d): os.makedirs(d) #", "Y = DTWAligner().transform((X, Y)) print(\"Save features to disk\") for idx,", "tgt_name) # Trim and ajast frames x = P.trim_zeros_frames(x) y", "cut-off MS smoothing hop_length = int(fs * (hp.frame_period * 0.001))", "speakers, max_files=None): super(MGCSource, self).__init__(data_root, speakers, max_files=max_files) self.alpha = None def", "= splitext(basename(X_dataset.collected_files[idx][0]))[0] tgt_name = splitext(basename(Y_dataset.collected_files[idx][0]))[0] src_path = join(dst_dir, \"X\", src_name)", "vcc2016.WavFileDataSource and voice_statistics.WavFileDataSource can be # drop-in replacement. See below", "\"Y\", tgt_name) # Trim and ajast frames x = P.trim_zeros_frames(x)", "voice conversion. usage: prepare_features_vc.py [options] <DATA_ROOT> <source_speaker> <target_speaker> options: --max_files=<N>", "splitext(basename(Y_dataset.collected_files[idx][0]))[0] src_path = join(dst_dir, \"X\", src_name) tgt_path = join(dst_dir, \"Y\",", "x, y = P.adjust_frame_lengths(x, y, pad=True, divisible_by=2) # Save np.save(src_path,", "print(\"Command line args:\\n\", args) DATA_ROOT = args[\"<DATA_ROOT>\"] source_speaker = args[\"<source_speaker>\"]", "__init__(self, data_root, speakers, max_files=None): super(MGCSource, self).__init__(data_root, speakers, max_files=max_files) self.alpha =", "nnmnkwii import preprocessing as P from nnmnkwii.preprocessing.alignment import DTWAligner from", "from hparams import hparams_debug_string # vcc2016.WavFileDataSource and voice_statistics.WavFileDataSource can be", "spectrogram = pyworld.cheaptrick(x, f0, timeaxis, fs) spectrogram = P.trim_zeros_frames(spectrogram) if", "P.adjust_frame_lengths(x, y, pad=True, divisible_by=2) # Save np.save(src_path, x) np.save(tgt_path, y)", "FileSourceDataset from nnmnkwii import preprocessing as P from nnmnkwii.preprocessing.alignment import", "import os import sys from hparams import vc as hp", "= None def collect_features(self, wav_path): fs, x = wavfile.read(wav_path) x", "def collect_features(self, wav_path): fs, x = wavfile.read(wav_path) x = x.astype(np.float64)", "sys from hparams import vc as hp from hparams import", "feature extraction.\") sys.exit(0) # Create dirs for speaker, name in", "= x.astype(np.float64) f0, timeaxis = pyworld.dio(x, fs, frame_period=hp.frame_period) f0 =", "to arrays\") X, Y = X_dataset.asarray(verbose=1), Y_dataset.asarray(verbose=1) # Alignment print(\"Perform", "features to disk\") for idx, (x, y) in tqdm(enumerate(zip(X, Y))):", "np from nnmnkwii.datasets import FileSourceDataset from nnmnkwii import preprocessing as", "frame_period=hp.frame_period) f0 = pyworld.stonemask(x, f0, timeaxis, fs) spectrogram = pyworld.cheaptrick(x,", "collected. [default: 100] --dst_dir=<d> Destination directory [default: data/cmu_arctic_vc]. --overwrite Overwrite", "usage: prepare_features_vc.py [options] <DATA_ROOT> <source_speaker> <target_speaker> options: --max_files=<N> Max num", "= pyworld.dio(x, fs, frame_period=hp.frame_period) f0 = pyworld.stonemask(x, f0, timeaxis, fs)", "= docopt(__doc__) print(\"Command line args:\\n\", args) DATA_ROOT = args[\"<DATA_ROOT>\"] source_speaker", "y) in tqdm(enumerate(zip(X, Y))): # paths src_name = splitext(basename(X_dataset.collected_files[idx][0]))[0] tgt_name", "Trim and ajast frames x = P.trim_zeros_frames(x) y = P.trim_zeros_frames(y)", "f0, timeaxis, fs) spectrogram = P.trim_zeros_frames(spectrogram) if self.alpha is None:", "dirname import os import sys from hparams import vc as", "vcc2016 import pysptk import pyworld from scipy.io import wavfile from", "as hp from hparams import hparams_debug_string # vcc2016.WavFileDataSource and voice_statistics.WavFileDataSource", "print(\"Features seems to be prepared, skipping feature extraction.\") sys.exit(0) #", "--max_files=<N> Max num files to be collected. [default: 100] --dst_dir=<d>", "timeaxis, fs) spectrogram = pyworld.cheaptrick(x, f0, timeaxis, fs) spectrogram =", "if not exists(d): os.makedirs(d) # Convert to arrays print(\"Convert datasets", "import tqdm from os.path import basename, splitext, exists, expanduser, join,", "num files to be collected. [default: 100] --dst_dir=<d> Destination directory", "hparams import vc as hp from hparams import hparams_debug_string #", "splitext, exists, expanduser, join, dirname import os import sys from", "show this help message and exit \"\"\" from __future__ import", "import DTWAligner from nnmnkwii.datasets import cmu_arctic, voice_statistics, vcc2016 import pysptk", "fs) spectrogram = pyworld.cheaptrick(x, f0, timeaxis, fs) spectrogram = P.trim_zeros_frames(spectrogram)", "files to be collected. [default: 100] --dst_dir=<d> Destination directory [default:", "from nnmnkwii.preprocessing.alignment import DTWAligner from nnmnkwii.datasets import cmu_arctic, voice_statistics, vcc2016", "division, print_function, absolute_import from docopt import docopt import numpy as", "line args:\\n\", args) DATA_ROOT = args[\"<DATA_ROOT>\"] source_speaker = args[\"<source_speaker>\"] target_speaker", "hp.windows) return mgc.astype(np.float32) if __name__ == \"__main__\": args = docopt(__doc__)", "os.path import basename, splitext, exists, expanduser, join, dirname import os", "exists, expanduser, join, dirname import os import sys from hparams", "<target_speaker> options: --max_files=<N> Max num files to be collected. [default:", "= False if skip_feature_extraction: print(\"Features seems to be prepared, skipping", "pyworld.cheaptrick(x, f0, timeaxis, fs) spectrogram = P.trim_zeros_frames(spectrogram) if self.alpha is", "scipy.io import wavfile from tqdm import tqdm from os.path import", "extraction.\") sys.exit(0) # Create dirs for speaker, name in [(source_speaker,", "voice_statistics, vcc2016 import pysptk import pyworld from scipy.io import wavfile", "/ hop_length mgc = P.modspec_smoothing(mgc, modfs, cutoff=50) # Add delta", "X_dataset.asarray(verbose=1), Y_dataset.asarray(verbose=1) # Alignment print(\"Perform alignment\") X, Y = DTWAligner().transform((X,", "pysptk import pyworld from scipy.io import wavfile from tqdm import", "d = join(dst_dir, name) print(\"Destination dir for {}: {}\".format(speaker, d))", "datasets to arrays\") X, Y = X_dataset.asarray(verbose=1), Y_dataset.asarray(verbose=1) # Alignment", "return mgc.astype(np.float32) if __name__ == \"__main__\": args = docopt(__doc__) print(\"Command", "import pyworld from scipy.io import wavfile from tqdm import tqdm", "= DTWAligner().transform((X, Y)) print(\"Save features to disk\") for idx, (x,", "hparams_debug_string # vcc2016.WavFileDataSource and voice_statistics.WavFileDataSource can be # drop-in replacement.", "= exists(join(dst_dir, \"X\")) \\ and exists(join(dst_dir, \"Y\")) if overwrite: skip_feature_extraction", "from nnmnkwii import preprocessing as P from nnmnkwii.preprocessing.alignment import DTWAligner", "and voice_statistics.WavFileDataSource can be # drop-in replacement. See below for", "d)) if not exists(d): os.makedirs(d) # Convert to arrays print(\"Convert", "# Alignment print(\"Perform alignment\") X, Y = DTWAligner().transform((X, Y)) print(\"Save", "P.trim_zeros_frames(y) x, y = P.adjust_frame_lengths(x, y, pad=True, divisible_by=2) # Save", "DATA_ROOT = args[\"<DATA_ROOT>\"] source_speaker = args[\"<source_speaker>\"] target_speaker = args[\"<target_speaker>\"] max_files", "P.modspec_smoothing(mgc, modfs, cutoff=50) # Add delta mgc = P.delta_features(mgc, hp.windows)", "x = x.astype(np.float64) f0, timeaxis = pyworld.dio(x, fs, frame_period=hp.frame_period) f0", "max_files = int(args[\"--max_files\"]) dst_dir = args[\"--dst_dir\"] overwrite = args[\"--overwrite\"] print(hparams_debug_string(hp))", "pyworld from scipy.io import wavfile from tqdm import tqdm from", "fs) spectrogram = P.trim_zeros_frames(spectrogram) if self.alpha is None: self.alpha =", "skip_feature_extraction: print(\"Features seems to be prepared, skipping feature extraction.\") sys.exit(0)", "name in [(source_speaker, \"X\"), (target_speaker, \"Y\")]: d = join(dst_dir, name)", "exists(join(dst_dir, \"Y\")) if overwrite: skip_feature_extraction = False if skip_feature_extraction: print(\"Features", "--help show this help message and exit \"\"\" from __future__", "= splitext(basename(Y_dataset.collected_files[idx][0]))[0] src_path = join(dst_dir, \"X\", src_name) tgt_path = join(dst_dir,", "nnmnkwii.datasets import cmu_arctic, voice_statistics, vcc2016 import pysptk import pyworld from", "be collected. [default: 100] --dst_dir=<d> Destination directory [default: data/cmu_arctic_vc]. --overwrite", "= P.modspec_smoothing(mgc, modfs, cutoff=50) # Add delta mgc = P.delta_features(mgc,", "Alignment print(\"Perform alignment\") X, Y = DTWAligner().transform((X, Y)) print(\"Save features", "dirs for speaker, name in [(source_speaker, \"X\"), (target_speaker, \"Y\")]: d", "be # drop-in replacement. See below for details: # https://r9y9.github.io/nnmnkwii/latest/references/datasets.html#builtin-data-sources", "wavfile from tqdm import tqdm from os.path import basename, splitext,", "splitext(basename(X_dataset.collected_files[idx][0]))[0] tgt_name = splitext(basename(Y_dataset.collected_files[idx][0]))[0] src_path = join(dst_dir, \"X\", src_name) tgt_path", "\"X\"), (target_speaker, \"Y\")]: d = join(dst_dir, name) print(\"Destination dir for", "[default: 100] --dst_dir=<d> Destination directory [default: data/cmu_arctic_vc]. --overwrite Overwrite files.", "overwrite: skip_feature_extraction = False if skip_feature_extraction: print(\"Features seems to be", "FileSourceDataset(MGCSource(DATA_ROOT, [source_speaker], max_files=max_files)) Y_dataset = FileSourceDataset(MGCSource(DATA_ROOT, [target_speaker], max_files=max_files)) skip_feature_extraction =", "* 0.001)) modfs = fs / hop_length mgc = P.modspec_smoothing(mgc,", "print(\"Destination dir for {}: {}\".format(speaker, d)) if not exists(d): os.makedirs(d)", "arrays\") X, Y = X_dataset.asarray(verbose=1), Y_dataset.asarray(verbose=1) # Alignment print(\"Perform alignment\")", "src_name = splitext(basename(X_dataset.collected_files[idx][0]))[0] tgt_name = splitext(basename(Y_dataset.collected_files[idx][0]))[0] src_path = join(dst_dir, \"X\",", "= int(fs * (hp.frame_period * 0.001)) modfs = fs /", "f0 = pyworld.stonemask(x, f0, timeaxis, fs) spectrogram = pyworld.cheaptrick(x, f0,", "can be # drop-in replacement. See below for details: #", "= mgc[:, 1:] # 50Hz cut-off MS smoothing hop_length =", "Y = X_dataset.asarray(verbose=1), Y_dataset.asarray(verbose=1) # Alignment print(\"Perform alignment\") X, Y", "to be collected. [default: 100] --dst_dir=<d> Destination directory [default: data/cmu_arctic_vc].", "None def collect_features(self, wav_path): fs, x = wavfile.read(wav_path) x =", "be prepared, skipping feature extraction.\") sys.exit(0) # Create dirs for", "super(MGCSource, self).__init__(data_root, speakers, max_files=max_files) self.alpha = None def collect_features(self, wav_path):", "(hp.frame_period * 0.001)) modfs = fs / hop_length mgc =", "https://r9y9.github.io/nnmnkwii/latest/references/datasets.html#builtin-data-sources class MGCSource(cmu_arctic.WavFileDataSource): def __init__(self, data_root, speakers, max_files=None): super(MGCSource, self).__init__(data_root,", "X_dataset = FileSourceDataset(MGCSource(DATA_ROOT, [source_speaker], max_files=max_files)) Y_dataset = FileSourceDataset(MGCSource(DATA_ROOT, [target_speaker], max_files=max_files))", "dst_dir = args[\"--dst_dir\"] overwrite = args[\"--overwrite\"] print(hparams_debug_string(hp)) X_dataset = FileSourceDataset(MGCSource(DATA_ROOT,", "pyworld.dio(x, fs, frame_period=hp.frame_period) f0 = pyworld.stonemask(x, f0, timeaxis, fs) spectrogram", "speakers, max_files=max_files) self.alpha = None def collect_features(self, wav_path): fs, x", "# Convert to arrays print(\"Convert datasets to arrays\") X, Y", "= pysptk.sp2mc(spectrogram, order=hp.order, alpha=self.alpha) # Drop 0-th coefficient mgc =", "numpy as np from nnmnkwii.datasets import FileSourceDataset from nnmnkwii import", "timeaxis = pyworld.dio(x, fs, frame_period=hp.frame_period) f0 = pyworld.stonemask(x, f0, timeaxis,", "collect_features(self, wav_path): fs, x = wavfile.read(wav_path) x = x.astype(np.float64) f0,", "= args[\"<source_speaker>\"] target_speaker = args[\"<target_speaker>\"] max_files = int(args[\"--max_files\"]) dst_dir =", "pysptk.sp2mc(spectrogram, order=hp.order, alpha=self.alpha) # Drop 0-th coefficient mgc = mgc[:,", "None: self.alpha = pysptk.util.mcepalpha(fs) mgc = pysptk.sp2mc(spectrogram, order=hp.order, alpha=self.alpha) #", "hop_length = int(fs * (hp.frame_period * 0.001)) modfs = fs", "prepare_features_vc.py [options] <DATA_ROOT> <source_speaker> <target_speaker> options: --max_files=<N> Max num files", "below for details: # https://r9y9.github.io/nnmnkwii/latest/references/datasets.html#builtin-data-sources class MGCSource(cmu_arctic.WavFileDataSource): def __init__(self, data_root,", "= fs / hop_length mgc = P.modspec_smoothing(mgc, modfs, cutoff=50) #", "import wavfile from tqdm import tqdm from os.path import basename,", "mgc = pysptk.sp2mc(spectrogram, order=hp.order, alpha=self.alpha) # Drop 0-th coefficient mgc", "args[\"<source_speaker>\"] target_speaker = args[\"<target_speaker>\"] max_files = int(args[\"--max_files\"]) dst_dir = args[\"--dst_dir\"]", "= FileSourceDataset(MGCSource(DATA_ROOT, [source_speaker], max_files=max_files)) Y_dataset = FileSourceDataset(MGCSource(DATA_ROOT, [target_speaker], max_files=max_files)) skip_feature_extraction", "int(fs * (hp.frame_period * 0.001)) modfs = fs / hop_length", "speaker, name in [(source_speaker, \"X\"), (target_speaker, \"Y\")]: d = join(dst_dir,", "one-to-one voice conversion. usage: prepare_features_vc.py [options] <DATA_ROOT> <source_speaker> <target_speaker> options:", "wavfile.read(wav_path) x = x.astype(np.float64) f0, timeaxis = pyworld.dio(x, fs, frame_period=hp.frame_period)", "import docopt import numpy as np from nnmnkwii.datasets import FileSourceDataset", "== \"__main__\": args = docopt(__doc__) print(\"Command line args:\\n\", args) DATA_ROOT", "P.delta_features(mgc, hp.windows) return mgc.astype(np.float32) if __name__ == \"__main__\": args =", "order=hp.order, alpha=self.alpha) # Drop 0-th coefficient mgc = mgc[:, 1:]", "args[\"<DATA_ROOT>\"] source_speaker = args[\"<source_speaker>\"] target_speaker = args[\"<target_speaker>\"] max_files = int(args[\"--max_files\"])", "this help message and exit \"\"\" from __future__ import division,", "Overwrite files. -h, --help show this help message and exit", "args) DATA_ROOT = args[\"<DATA_ROOT>\"] source_speaker = args[\"<source_speaker>\"] target_speaker = args[\"<target_speaker>\"]", "__future__ import division, print_function, absolute_import from docopt import docopt import", "[(source_speaker, \"X\"), (target_speaker, \"Y\")]: d = join(dst_dir, name) print(\"Destination dir", "--dst_dir=<d> Destination directory [default: data/cmu_arctic_vc]. --overwrite Overwrite files. -h, --help", "MS smoothing hop_length = int(fs * (hp.frame_period * 0.001)) modfs", "{}\".format(speaker, d)) if not exists(d): os.makedirs(d) # Convert to arrays", "int(args[\"--max_files\"]) dst_dir = args[\"--dst_dir\"] overwrite = args[\"--overwrite\"] print(hparams_debug_string(hp)) X_dataset =", "False if skip_feature_extraction: print(\"Features seems to be prepared, skipping feature", "Drop 0-th coefficient mgc = mgc[:, 1:] # 50Hz cut-off", "print(\"Convert datasets to arrays\") X, Y = X_dataset.asarray(verbose=1), Y_dataset.asarray(verbose=1) #", "import basename, splitext, exists, expanduser, join, dirname import os import", "MGCSource(cmu_arctic.WavFileDataSource): def __init__(self, data_root, speakers, max_files=None): super(MGCSource, self).__init__(data_root, speakers, max_files=max_files)", "self.alpha is None: self.alpha = pysptk.util.mcepalpha(fs) mgc = pysptk.sp2mc(spectrogram, order=hp.order,", "import FileSourceDataset from nnmnkwii import preprocessing as P from nnmnkwii.preprocessing.alignment", "= join(dst_dir, \"X\", src_name) tgt_path = join(dst_dir, \"Y\", tgt_name) #", "0-th coefficient mgc = mgc[:, 1:] # 50Hz cut-off MS", "tgt_path = join(dst_dir, \"Y\", tgt_name) # Trim and ajast frames", "os.makedirs(d) # Convert to arrays print(\"Convert datasets to arrays\") X,", "smoothing hop_length = int(fs * (hp.frame_period * 0.001)) modfs =", "\"Y\")]: d = join(dst_dir, name) print(\"Destination dir for {}: {}\".format(speaker,", "[default: data/cmu_arctic_vc]. --overwrite Overwrite files. -h, --help show this help", "x.astype(np.float64) f0, timeaxis = pyworld.dio(x, fs, frame_period=hp.frame_period) f0 = pyworld.stonemask(x,", "\\ and exists(join(dst_dir, \"Y\")) if overwrite: skip_feature_extraction = False if", "mgc = P.delta_features(mgc, hp.windows) return mgc.astype(np.float32) if __name__ == \"__main__\":", "os import sys from hparams import vc as hp from", "args[\"<target_speaker>\"] max_files = int(args[\"--max_files\"]) dst_dir = args[\"--dst_dir\"] overwrite = args[\"--overwrite\"]", "args = docopt(__doc__) print(\"Command line args:\\n\", args) DATA_ROOT = args[\"<DATA_ROOT>\"]", "= args[\"--dst_dir\"] overwrite = args[\"--overwrite\"] print(hparams_debug_string(hp)) X_dataset = FileSourceDataset(MGCSource(DATA_ROOT, [source_speaker],", "{}: {}\".format(speaker, d)) if not exists(d): os.makedirs(d) # Convert to", "as np from nnmnkwii.datasets import FileSourceDataset from nnmnkwii import preprocessing", "= args[\"--overwrite\"] print(hparams_debug_string(hp)) X_dataset = FileSourceDataset(MGCSource(DATA_ROOT, [source_speaker], max_files=max_files)) Y_dataset =", "as P from nnmnkwii.preprocessing.alignment import DTWAligner from nnmnkwii.datasets import cmu_arctic,", "src_path = join(dst_dir, \"X\", src_name) tgt_path = join(dst_dir, \"Y\", tgt_name)", "from tqdm import tqdm from os.path import basename, splitext, exists,", "fs, x = wavfile.read(wav_path) x = x.astype(np.float64) f0, timeaxis =", "src_name) tgt_path = join(dst_dir, \"Y\", tgt_name) # Trim and ajast", "if skip_feature_extraction: print(\"Features seems to be prepared, skipping feature extraction.\")", "= pyworld.cheaptrick(x, f0, timeaxis, fs) spectrogram = P.trim_zeros_frames(spectrogram) if self.alpha", "= P.delta_features(mgc, hp.windows) return mgc.astype(np.float32) if __name__ == \"__main__\": args", "from __future__ import division, print_function, absolute_import from docopt import docopt", "= P.trim_zeros_frames(spectrogram) if self.alpha is None: self.alpha = pysptk.util.mcepalpha(fs) mgc", "fs / hop_length mgc = P.modspec_smoothing(mgc, modfs, cutoff=50) # Add", "conversion. usage: prepare_features_vc.py [options] <DATA_ROOT> <source_speaker> <target_speaker> options: --max_files=<N> Max", "= int(args[\"--max_files\"]) dst_dir = args[\"--dst_dir\"] overwrite = args[\"--overwrite\"] print(hparams_debug_string(hp)) X_dataset", "\"\"\" from __future__ import division, print_function, absolute_import from docopt import", "tqdm from os.path import basename, splitext, exists, expanduser, join, dirname", "f0, timeaxis = pyworld.dio(x, fs, frame_period=hp.frame_period) f0 = pyworld.stonemask(x, f0,", "details: # https://r9y9.github.io/nnmnkwii/latest/references/datasets.html#builtin-data-sources class MGCSource(cmu_arctic.WavFileDataSource): def __init__(self, data_root, speakers, max_files=None):", "args[\"--overwrite\"] print(hparams_debug_string(hp)) X_dataset = FileSourceDataset(MGCSource(DATA_ROOT, [source_speaker], max_files=max_files)) Y_dataset = FileSourceDataset(MGCSource(DATA_ROOT,", "X, Y = X_dataset.asarray(verbose=1), Y_dataset.asarray(verbose=1) # Alignment print(\"Perform alignment\") X,", "# Add delta mgc = P.delta_features(mgc, hp.windows) return mgc.astype(np.float32) if", "to be prepared, skipping feature extraction.\") sys.exit(0) # Create dirs", "fs, frame_period=hp.frame_period) f0 = pyworld.stonemask(x, f0, timeaxis, fs) spectrogram =", "skipping feature extraction.\") sys.exit(0) # Create dirs for speaker, name", "alpha=self.alpha) # Drop 0-th coefficient mgc = mgc[:, 1:] #", "mgc.astype(np.float32) if __name__ == \"__main__\": args = docopt(__doc__) print(\"Command line", "\"X\", src_name) tgt_path = join(dst_dir, \"Y\", tgt_name) # Trim and", "self.alpha = None def collect_features(self, wav_path): fs, x = wavfile.read(wav_path)", "self).__init__(data_root, speakers, max_files=max_files) self.alpha = None def collect_features(self, wav_path): fs,", "max_files=None): super(MGCSource, self).__init__(data_root, speakers, max_files=max_files) self.alpha = None def collect_features(self,", "[source_speaker], max_files=max_files)) Y_dataset = FileSourceDataset(MGCSource(DATA_ROOT, [target_speaker], max_files=max_files)) skip_feature_extraction = exists(join(dst_dir,", "hparams import hparams_debug_string # vcc2016.WavFileDataSource and voice_statistics.WavFileDataSource can be #", "if __name__ == \"__main__\": args = docopt(__doc__) print(\"Command line args:\\n\",", "and exit \"\"\" from __future__ import division, print_function, absolute_import from", "Convert to arrays print(\"Convert datasets to arrays\") X, Y =", "\"Y\")) if overwrite: skip_feature_extraction = False if skip_feature_extraction: print(\"Features seems", "\"__main__\": args = docopt(__doc__) print(\"Command line args:\\n\", args) DATA_ROOT =", "P.trim_zeros_frames(spectrogram) if self.alpha is None: self.alpha = pysptk.util.mcepalpha(fs) mgc =", "for {}: {}\".format(speaker, d)) if not exists(d): os.makedirs(d) # Convert", "hp from hparams import hparams_debug_string # vcc2016.WavFileDataSource and voice_statistics.WavFileDataSource can", "exit \"\"\" from __future__ import division, print_function, absolute_import from docopt", "class MGCSource(cmu_arctic.WavFileDataSource): def __init__(self, data_root, speakers, max_files=None): super(MGCSource, self).__init__(data_root, speakers,", "# Trim and ajast frames x = P.trim_zeros_frames(x) y =", "frames x = P.trim_zeros_frames(x) y = P.trim_zeros_frames(y) x, y =", "data/cmu_arctic_vc]. --overwrite Overwrite files. -h, --help show this help message", "target_speaker = args[\"<target_speaker>\"] max_files = int(args[\"--max_files\"]) dst_dir = args[\"--dst_dir\"] overwrite", "(target_speaker, \"Y\")]: d = join(dst_dir, name) print(\"Destination dir for {}:", "wav_path): fs, x = wavfile.read(wav_path) x = x.astype(np.float64) f0, timeaxis", "in tqdm(enumerate(zip(X, Y))): # paths src_name = splitext(basename(X_dataset.collected_files[idx][0]))[0] tgt_name =", "= P.trim_zeros_frames(y) x, y = P.adjust_frame_lengths(x, y, pad=True, divisible_by=2) #", "mgc[:, 1:] # 50Hz cut-off MS smoothing hop_length = int(fs", "not exists(d): os.makedirs(d) # Convert to arrays print(\"Convert datasets to", "mgc = mgc[:, 1:] # 50Hz cut-off MS smoothing hop_length", "P.trim_zeros_frames(x) y = P.trim_zeros_frames(y) x, y = P.adjust_frame_lengths(x, y, pad=True,", "acoustic features for one-to-one voice conversion. usage: prepare_features_vc.py [options] <DATA_ROOT>", "delta mgc = P.delta_features(mgc, hp.windows) return mgc.astype(np.float32) if __name__ ==", "import sys from hparams import vc as hp from hparams", "100] --dst_dir=<d> Destination directory [default: data/cmu_arctic_vc]. --overwrite Overwrite files. -h,", "cmu_arctic, voice_statistics, vcc2016 import pysptk import pyworld from scipy.io import", "import numpy as np from nnmnkwii.datasets import FileSourceDataset from nnmnkwii", "skip_feature_extraction = exists(join(dst_dir, \"X\")) \\ and exists(join(dst_dir, \"Y\")) if overwrite:", "X, Y = DTWAligner().transform((X, Y)) print(\"Save features to disk\") for", "import pysptk import pyworld from scipy.io import wavfile from tqdm", "hop_length mgc = P.modspec_smoothing(mgc, modfs, cutoff=50) # Add delta mgc", "features for one-to-one voice conversion. usage: prepare_features_vc.py [options] <DATA_ROOT> <source_speaker>", "pysptk.util.mcepalpha(fs) mgc = pysptk.sp2mc(spectrogram, order=hp.order, alpha=self.alpha) # Drop 0-th coefficient", "and exists(join(dst_dir, \"Y\")) if overwrite: skip_feature_extraction = False if skip_feature_extraction:", "modfs = fs / hop_length mgc = P.modspec_smoothing(mgc, modfs, cutoff=50)", "tqdm import tqdm from os.path import basename, splitext, exists, expanduser,", "from docopt import docopt import numpy as np from nnmnkwii.datasets", "\"X\")) \\ and exists(join(dst_dir, \"Y\")) if overwrite: skip_feature_extraction = False", "to disk\") for idx, (x, y) in tqdm(enumerate(zip(X, Y))): #", "disk\") for idx, (x, y) in tqdm(enumerate(zip(X, Y))): # paths", "if self.alpha is None: self.alpha = pysptk.util.mcepalpha(fs) mgc = pysptk.sp2mc(spectrogram,", "print(\"Perform alignment\") X, Y = DTWAligner().transform((X, Y)) print(\"Save features to", "Y_dataset = FileSourceDataset(MGCSource(DATA_ROOT, [target_speaker], max_files=max_files)) skip_feature_extraction = exists(join(dst_dir, \"X\")) \\", "basename, splitext, exists, expanduser, join, dirname import os import sys", "paths src_name = splitext(basename(X_dataset.collected_files[idx][0]))[0] tgt_name = splitext(basename(Y_dataset.collected_files[idx][0]))[0] src_path = join(dst_dir,", "alignment\") X, Y = DTWAligner().transform((X, Y)) print(\"Save features to disk\")", "0.001)) modfs = fs / hop_length mgc = P.modspec_smoothing(mgc, modfs,", "nnmnkwii.preprocessing.alignment import DTWAligner from nnmnkwii.datasets import cmu_arctic, voice_statistics, vcc2016 import", "timeaxis, fs) spectrogram = P.trim_zeros_frames(spectrogram) if self.alpha is None: self.alpha", "x = P.trim_zeros_frames(x) y = P.trim_zeros_frames(y) x, y = P.adjust_frame_lengths(x,", "<source_speaker> <target_speaker> options: --max_files=<N> Max num files to be collected.", "overwrite = args[\"--overwrite\"] print(hparams_debug_string(hp)) X_dataset = FileSourceDataset(MGCSource(DATA_ROOT, [source_speaker], max_files=max_files)) Y_dataset", "50Hz cut-off MS smoothing hop_length = int(fs * (hp.frame_period *", "pyworld.stonemask(x, f0, timeaxis, fs) spectrogram = pyworld.cheaptrick(x, f0, timeaxis, fs)", "help message and exit \"\"\" from __future__ import division, print_function,", "tqdm(enumerate(zip(X, Y))): # paths src_name = splitext(basename(X_dataset.collected_files[idx][0]))[0] tgt_name = splitext(basename(Y_dataset.collected_files[idx][0]))[0]", "Y)) print(\"Save features to disk\") for idx, (x, y) in", "docopt import numpy as np from nnmnkwii.datasets import FileSourceDataset from", "# Drop 0-th coefficient mgc = mgc[:, 1:] # 50Hz", "from scipy.io import wavfile from tqdm import tqdm from os.path", "-h, --help show this help message and exit \"\"\" from", "for speaker, name in [(source_speaker, \"X\"), (target_speaker, \"Y\")]: d =", "replacement. See below for details: # https://r9y9.github.io/nnmnkwii/latest/references/datasets.html#builtin-data-sources class MGCSource(cmu_arctic.WavFileDataSource): def", "join(dst_dir, \"Y\", tgt_name) # Trim and ajast frames x =", "arrays print(\"Convert datasets to arrays\") X, Y = X_dataset.asarray(verbose=1), Y_dataset.asarray(verbose=1)", "coefficient mgc = mgc[:, 1:] # 50Hz cut-off MS smoothing", "modfs, cutoff=50) # Add delta mgc = P.delta_features(mgc, hp.windows) return", "Add delta mgc = P.delta_features(mgc, hp.windows) return mgc.astype(np.float32) if __name__", "= pysptk.util.mcepalpha(fs) mgc = pysptk.sp2mc(spectrogram, order=hp.order, alpha=self.alpha) # Drop 0-th", "for details: # https://r9y9.github.io/nnmnkwii/latest/references/datasets.html#builtin-data-sources class MGCSource(cmu_arctic.WavFileDataSource): def __init__(self, data_root, speakers,", "__name__ == \"__main__\": args = docopt(__doc__) print(\"Command line args:\\n\", args)", "# Create dirs for speaker, name in [(source_speaker, \"X\"), (target_speaker,", "expanduser, join, dirname import os import sys from hparams import", "ajast frames x = P.trim_zeros_frames(x) y = P.trim_zeros_frames(y) x, y", "prepared, skipping feature extraction.\") sys.exit(0) # Create dirs for speaker,", "from nnmnkwii.datasets import FileSourceDataset from nnmnkwii import preprocessing as P", "vc as hp from hparams import hparams_debug_string # vcc2016.WavFileDataSource and", "data_root, speakers, max_files=None): super(MGCSource, self).__init__(data_root, speakers, max_files=max_files) self.alpha = None", "sys.exit(0) # Create dirs for speaker, name in [(source_speaker, \"X\"),", "= pyworld.stonemask(x, f0, timeaxis, fs) spectrogram = pyworld.cheaptrick(x, f0, timeaxis,", "= args[\"<DATA_ROOT>\"] source_speaker = args[\"<source_speaker>\"] target_speaker = args[\"<target_speaker>\"] max_files =", "tgt_name = splitext(basename(Y_dataset.collected_files[idx][0]))[0] src_path = join(dst_dir, \"X\", src_name) tgt_path =", "skip_feature_extraction = False if skip_feature_extraction: print(\"Features seems to be prepared,", "from os.path import basename, splitext, exists, expanduser, join, dirname import", "Y))): # paths src_name = splitext(basename(X_dataset.collected_files[idx][0]))[0] tgt_name = splitext(basename(Y_dataset.collected_files[idx][0]))[0] src_path", "Destination directory [default: data/cmu_arctic_vc]. --overwrite Overwrite files. -h, --help show", "exists(d): os.makedirs(d) # Convert to arrays print(\"Convert datasets to arrays\")", "[target_speaker], max_files=max_files)) skip_feature_extraction = exists(join(dst_dir, \"X\")) \\ and exists(join(dst_dir, \"Y\"))", "voice_statistics.WavFileDataSource can be # drop-in replacement. See below for details:", "print_function, absolute_import from docopt import docopt import numpy as np", "join(dst_dir, \"X\", src_name) tgt_path = join(dst_dir, \"Y\", tgt_name) # Trim", "# https://r9y9.github.io/nnmnkwii/latest/references/datasets.html#builtin-data-sources class MGCSource(cmu_arctic.WavFileDataSource): def __init__(self, data_root, speakers, max_files=None): super(MGCSource,", "print(hparams_debug_string(hp)) X_dataset = FileSourceDataset(MGCSource(DATA_ROOT, [source_speaker], max_files=max_files)) Y_dataset = FileSourceDataset(MGCSource(DATA_ROOT, [target_speaker],", "= P.adjust_frame_lengths(x, y, pad=True, divisible_by=2) # Save np.save(src_path, x) np.save(tgt_path,", "= join(dst_dir, \"Y\", tgt_name) # Trim and ajast frames x", "# 50Hz cut-off MS smoothing hop_length = int(fs * (hp.frame_period", "(x, y) in tqdm(enumerate(zip(X, Y))): # paths src_name = splitext(basename(X_dataset.collected_files[idx][0]))[0]", "nnmnkwii.datasets import FileSourceDataset from nnmnkwii import preprocessing as P from", "args[\"--dst_dir\"] overwrite = args[\"--overwrite\"] print(hparams_debug_string(hp)) X_dataset = FileSourceDataset(MGCSource(DATA_ROOT, [source_speaker], max_files=max_files))", "def __init__(self, data_root, speakers, max_files=None): super(MGCSource, self).__init__(data_root, speakers, max_files=max_files) self.alpha", "max_files=max_files)) skip_feature_extraction = exists(join(dst_dir, \"X\")) \\ and exists(join(dst_dir, \"Y\")) if", "in [(source_speaker, \"X\"), (target_speaker, \"Y\")]: d = join(dst_dir, name) print(\"Destination", "= X_dataset.asarray(verbose=1), Y_dataset.asarray(verbose=1) # Alignment print(\"Perform alignment\") X, Y =", "[options] <DATA_ROOT> <source_speaker> <target_speaker> options: --max_files=<N> Max num files to", "self.alpha = pysptk.util.mcepalpha(fs) mgc = pysptk.sp2mc(spectrogram, order=hp.order, alpha=self.alpha) # Drop", "and ajast frames x = P.trim_zeros_frames(x) y = P.trim_zeros_frames(y) x,", "Max num files to be collected. [default: 100] --dst_dir=<d> Destination", "<filename>prepare_features_vc.py \"\"\"Prepare acoustic features for one-to-one voice conversion. usage: prepare_features_vc.py", "<DATA_ROOT> <source_speaker> <target_speaker> options: --max_files=<N> Max num files to be", "# vcc2016.WavFileDataSource and voice_statistics.WavFileDataSource can be # drop-in replacement. See", "idx, (x, y) in tqdm(enumerate(zip(X, Y))): # paths src_name =", "= P.trim_zeros_frames(x) y = P.trim_zeros_frames(y) x, y = P.adjust_frame_lengths(x, y,", "\"\"\"Prepare acoustic features for one-to-one voice conversion. usage: prepare_features_vc.py [options]", "DTWAligner from nnmnkwii.datasets import cmu_arctic, voice_statistics, vcc2016 import pysptk import", "args:\\n\", args) DATA_ROOT = args[\"<DATA_ROOT>\"] source_speaker = args[\"<source_speaker>\"] target_speaker =", "join, dirname import os import sys from hparams import vc", "DTWAligner().transform((X, Y)) print(\"Save features to disk\") for idx, (x, y)", "options: --max_files=<N> Max num files to be collected. [default: 100]", "* (hp.frame_period * 0.001)) modfs = fs / hop_length mgc", "1:] # 50Hz cut-off MS smoothing hop_length = int(fs *", "absolute_import from docopt import docopt import numpy as np from", "preprocessing as P from nnmnkwii.preprocessing.alignment import DTWAligner from nnmnkwii.datasets import" ]
[ "json.loads(el.deck_gl_json_chart.json) self.assertEqual(actual[\"layers\"][0][\"@@type\"], \"ScatterplotLayer\") self.assertEqual( actual[\"layers\"][0][\"data\"], [ {\"lat\": 1, \"lon\": 10},", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "testutil import streamlit as st import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart df1", "# # Licensed under the Apache License, Version 2.0 (the", "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "2.0 (the \"License\"); # you may not use this file", "agreed to in writing, software # distributed under the License", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Unless required by applicable law or agreed to in writing,", "distributed under the License is distributed on an \"AS IS\"", "governing permissions and # limitations under the License. import json", "json import pandas as pd import pydeck as pdk from", "orks.\"\"\" st.pydeck_chart( pdk.Deck( layers=[ pdk.Layer(\"ScatterplotLayer\", data=df1), ] ) ) el", "el = self.get_delta_from_queue().new_element actual = json.loads(el.deck_gl_json_chart.json) self.assertEqual(actual[\"layers\"][0][\"@@type\"], \"ScatterplotLayer\") self.assertEqual( actual[\"layers\"][0][\"data\"],", "the specific language governing permissions and # limitations under the", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "import testutil import streamlit as st import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart", "{\"lat\": 4, \"lon\": 40}, ], ) def test_no_args(self): \"\"\"Test that", "\"\"\"Test that pydeck object orks.\"\"\" st.pydeck_chart( pdk.Deck( layers=[ pdk.Layer(\"ScatterplotLayer\", data=df1),", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "except in compliance with the License. # You may obtain", "10}, {\"lat\": 2, \"lon\": 20}, {\"lat\": 3, \"lon\": 30}, {\"lat\":", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "4], \"lon\": [10, 20, 30, 40]}) class PyDeckTest(testutil.DeltaGeneratorTestCase): def test_basic(self):", "class PyDeckTest(testutil.DeltaGeneratorTestCase): def test_basic(self): \"\"\"Test that pydeck object orks.\"\"\" st.pydeck_chart(", "args.\"\"\" st.pydeck_chart() el = self.get_delta_from_queue().new_element actual = json.loads(el.deck_gl_json_chart.json) self.assertEqual(actual, deck_gl_json_chart.EMPTY_MAP)", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "pd import pydeck as pdk from tests import testutil import", "[1, 2, 3, 4], \"lon\": [10, 20, 30, 40]}) class", "{\"lat\": 2, \"lon\": 20}, {\"lat\": 3, \"lon\": 30}, {\"lat\": 4,", "Copyright 2018-2021 Streamlit Inc. # # Licensed under the Apache", "import json import pandas as pd import pydeck as pdk", "writing, software # distributed under the License is distributed on", "in writing, software # distributed under the License is distributed", "{\"lat\": 3, \"lon\": 30}, {\"lat\": 4, \"lon\": 40}, ], )", "you may not use this file except in compliance with", "20}, {\"lat\": 3, \"lon\": 30}, {\"lat\": 4, \"lon\": 40}, ],", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "language governing permissions and # limitations under the License. import", "self.assertEqual( actual[\"layers\"][0][\"data\"], [ {\"lat\": 1, \"lon\": 10}, {\"lat\": 2, \"lon\":", "called with no args.\"\"\" st.pydeck_chart() el = self.get_delta_from_queue().new_element actual =", "1, \"lon\": 10}, {\"lat\": 2, \"lon\": 20}, {\"lat\": 3, \"lon\":", "import pydeck as pdk from tests import testutil import streamlit", "use this file except in compliance with the License. #", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "2, 3, 4], \"lon\": [10, 20, 30, 40]}) class PyDeckTest(testutil.DeltaGeneratorTestCase):", "limitations under the License. import json import pandas as pd", "streamlit as st import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart df1 = pd.DataFrame({\"lat\":", "CONDITIONS OF ANY KIND, either express or implied. # See", "[ {\"lat\": 1, \"lon\": 10}, {\"lat\": 2, \"lon\": 20}, {\"lat\":", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "pdk.Deck( layers=[ pdk.Layer(\"ScatterplotLayer\", data=df1), ] ) ) el = self.get_delta_from_queue().new_element", "df1 = pd.DataFrame({\"lat\": [1, 2, 3, 4], \"lon\": [10, 20,", "self.assertEqual(actual[\"layers\"][0][\"@@type\"], \"ScatterplotLayer\") self.assertEqual( actual[\"layers\"][0][\"data\"], [ {\"lat\": 1, \"lon\": 10}, {\"lat\":", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "<filename>lib/tests/streamlit/pydeck_test.py # Copyright 2018-2021 Streamlit Inc. # # Licensed under", "License, Version 2.0 (the \"License\"); # you may not use", "# Copyright 2018-2021 Streamlit Inc. # # Licensed under the", "# You may obtain a copy of the License at", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "permissions and # limitations under the License. import json import", "under the License is distributed on an \"AS IS\" BASIS,", "pdk.Layer(\"ScatterplotLayer\", data=df1), ] ) ) el = self.get_delta_from_queue().new_element actual =", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "], ) def test_no_args(self): \"\"\"Test that it can be called", "License for the specific language governing permissions and # limitations", "the License. import json import pandas as pd import pydeck", "import pandas as pd import pydeck as pdk from tests", "{\"lat\": 1, \"lon\": 10}, {\"lat\": 2, \"lon\": 20}, {\"lat\": 3,", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "that it can be called with no args.\"\"\" st.pydeck_chart() el", "as pd import pydeck as pdk from tests import testutil", "pydeck object orks.\"\"\" st.pydeck_chart( pdk.Deck( layers=[ pdk.Layer(\"ScatterplotLayer\", data=df1), ] )", "with no args.\"\"\" st.pydeck_chart() el = self.get_delta_from_queue().new_element actual = json.loads(el.deck_gl_json_chart.json)", "pdk from tests import testutil import streamlit as st import", "Streamlit Inc. # # Licensed under the Apache License, Version", "the License for the specific language governing permissions and #", "st import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart df1 = pd.DataFrame({\"lat\": [1, 2,", "\"lon\": 30}, {\"lat\": 4, \"lon\": 40}, ], ) def test_no_args(self):", "no args.\"\"\" st.pydeck_chart() el = self.get_delta_from_queue().new_element actual = json.loads(el.deck_gl_json_chart.json) self.assertEqual(actual,", "(the \"License\"); # you may not use this file except", "Apache License, Version 2.0 (the \"License\"); # you may not", "that pydeck object orks.\"\"\" st.pydeck_chart( pdk.Deck( layers=[ pdk.Layer(\"ScatterplotLayer\", data=df1), ]", "# you may not use this file except in compliance", "either express or implied. # See the License for the", "License. import json import pandas as pd import pydeck as", "\"ScatterplotLayer\") self.assertEqual( actual[\"layers\"][0][\"data\"], [ {\"lat\": 1, \"lon\": 10}, {\"lat\": 2,", "OR CONDITIONS OF ANY KIND, either express or implied. #", "under the License. import json import pandas as pd import", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", ") el = self.get_delta_from_queue().new_element actual = json.loads(el.deck_gl_json_chart.json) self.assertEqual(actual[\"layers\"][0][\"@@type\"], \"ScatterplotLayer\") self.assertEqual(", "30}, {\"lat\": 4, \"lon\": 40}, ], ) def test_no_args(self): \"\"\"Test", "the License is distributed on an \"AS IS\" BASIS, #", "import streamlit as st import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart df1 =", "[10, 20, 30, 40]}) class PyDeckTest(testutil.DeltaGeneratorTestCase): def test_basic(self): \"\"\"Test that", "= self.get_delta_from_queue().new_element actual = json.loads(el.deck_gl_json_chart.json) self.assertEqual(actual[\"layers\"][0][\"@@type\"], \"ScatterplotLayer\") self.assertEqual( actual[\"layers\"][0][\"data\"], [", "in compliance with the License. # You may obtain a", "2, \"lon\": 20}, {\"lat\": 3, \"lon\": 30}, {\"lat\": 4, \"lon\":", "object orks.\"\"\" st.pydeck_chart( pdk.Deck( layers=[ pdk.Layer(\"ScatterplotLayer\", data=df1), ] ) )", "def test_basic(self): \"\"\"Test that pydeck object orks.\"\"\" st.pydeck_chart( pdk.Deck( layers=[", "software # distributed under the License is distributed on an", "\"lon\": 10}, {\"lat\": 2, \"lon\": 20}, {\"lat\": 3, \"lon\": 30},", "be called with no args.\"\"\" st.pydeck_chart() el = self.get_delta_from_queue().new_element actual", "def test_no_args(self): \"\"\"Test that it can be called with no", "\"lon\": [10, 20, 30, 40]}) class PyDeckTest(testutil.DeltaGeneratorTestCase): def test_basic(self): \"\"\"Test", "and # limitations under the License. import json import pandas", "# # Unless required by applicable law or agreed to", "3, 4], \"lon\": [10, 20, 30, 40]}) class PyDeckTest(testutil.DeltaGeneratorTestCase): def", "actual[\"layers\"][0][\"data\"], [ {\"lat\": 1, \"lon\": 10}, {\"lat\": 2, \"lon\": 20},", "layers=[ pdk.Layer(\"ScatterplotLayer\", data=df1), ] ) ) el = self.get_delta_from_queue().new_element actual", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "data=df1), ] ) ) el = self.get_delta_from_queue().new_element actual = json.loads(el.deck_gl_json_chart.json)", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "Version 2.0 (the \"License\"); # you may not use this", "40]}) class PyDeckTest(testutil.DeltaGeneratorTestCase): def test_basic(self): \"\"\"Test that pydeck object orks.\"\"\"", "law or agreed to in writing, software # distributed under", "pandas as pd import pydeck as pdk from tests import", "pd.DataFrame({\"lat\": [1, 2, 3, 4], \"lon\": [10, 20, 30, 40]})", "st.pydeck_chart( pdk.Deck( layers=[ pdk.Layer(\"ScatterplotLayer\", data=df1), ] ) ) el =", "3, \"lon\": 30}, {\"lat\": 4, \"lon\": 40}, ], ) def", "] ) ) el = self.get_delta_from_queue().new_element actual = json.loads(el.deck_gl_json_chart.json) self.assertEqual(actual[\"layers\"][0][\"@@type\"],", "\"lon\": 40}, ], ) def test_no_args(self): \"\"\"Test that it can", "implied. # See the License for the specific language governing", "actual = json.loads(el.deck_gl_json_chart.json) self.assertEqual(actual[\"layers\"][0][\"@@type\"], \"ScatterplotLayer\") self.assertEqual( actual[\"layers\"][0][\"data\"], [ {\"lat\": 1,", "under the Apache License, Version 2.0 (the \"License\"); # you", "pydeck as pdk from tests import testutil import streamlit as", "from tests import testutil import streamlit as st import streamlit.elements.deck_gl_json_chart", "\"License\"); # you may not use this file except in", "test_no_args(self): \"\"\"Test that it can be called with no args.\"\"\"", "can be called with no args.\"\"\" st.pydeck_chart() el = self.get_delta_from_queue().new_element", "import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart df1 = pd.DataFrame({\"lat\": [1, 2, 3,", "deck_gl_json_chart df1 = pd.DataFrame({\"lat\": [1, 2, 3, 4], \"lon\": [10,", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "streamlit.elements.deck_gl_json_chart as deck_gl_json_chart df1 = pd.DataFrame({\"lat\": [1, 2, 3, 4],", "as pdk from tests import testutil import streamlit as st", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "30, 40]}) class PyDeckTest(testutil.DeltaGeneratorTestCase): def test_basic(self): \"\"\"Test that pydeck object", "\"lon\": 20}, {\"lat\": 3, \"lon\": 30}, {\"lat\": 4, \"lon\": 40},", "\"\"\"Test that it can be called with no args.\"\"\" st.pydeck_chart()", "may obtain a copy of the License at # #", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "tests import testutil import streamlit as st import streamlit.elements.deck_gl_json_chart as", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "# limitations under the License. import json import pandas as", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "= pd.DataFrame({\"lat\": [1, 2, 3, 4], \"lon\": [10, 20, 30,", "to in writing, software # distributed under the License is", ") def test_no_args(self): \"\"\"Test that it can be called with", "self.get_delta_from_queue().new_element actual = json.loads(el.deck_gl_json_chart.json) self.assertEqual(actual[\"layers\"][0][\"@@type\"], \"ScatterplotLayer\") self.assertEqual( actual[\"layers\"][0][\"data\"], [ {\"lat\":", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "Inc. # # Licensed under the Apache License, Version 2.0", "# See the License for the specific language governing permissions", "as deck_gl_json_chart df1 = pd.DataFrame({\"lat\": [1, 2, 3, 4], \"lon\":", "You may obtain a copy of the License at #", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "test_basic(self): \"\"\"Test that pydeck object orks.\"\"\" st.pydeck_chart( pdk.Deck( layers=[ pdk.Layer(\"ScatterplotLayer\",", ") ) el = self.get_delta_from_queue().new_element actual = json.loads(el.deck_gl_json_chart.json) self.assertEqual(actual[\"layers\"][0][\"@@type\"], \"ScatterplotLayer\")", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "with the License. # You may obtain a copy of", "this file except in compliance with the License. # You", "PyDeckTest(testutil.DeltaGeneratorTestCase): def test_basic(self): \"\"\"Test that pydeck object orks.\"\"\" st.pydeck_chart( pdk.Deck(", "it can be called with no args.\"\"\" st.pydeck_chart() el =", "the Apache License, Version 2.0 (the \"License\"); # you may", "as st import streamlit.elements.deck_gl_json_chart as deck_gl_json_chart df1 = pd.DataFrame({\"lat\": [1,", "40}, ], ) def test_no_args(self): \"\"\"Test that it can be", "2018-2021 Streamlit Inc. # # Licensed under the Apache License,", "= json.loads(el.deck_gl_json_chart.json) self.assertEqual(actual[\"layers\"][0][\"@@type\"], \"ScatterplotLayer\") self.assertEqual( actual[\"layers\"][0][\"data\"], [ {\"lat\": 1, \"lon\":", "20, 30, 40]}) class PyDeckTest(testutil.DeltaGeneratorTestCase): def test_basic(self): \"\"\"Test that pydeck", "4, \"lon\": 40}, ], ) def test_no_args(self): \"\"\"Test that it" ]
[ "'pipeline_options'): self.project_id = ( sink.pipeline_options.view_as(GoogleCloudOptions).project) assert self.project_id is not None", "self.project_id is not None self.dataset_id = self.sink.table_reference.datasetId self.table_id = self.sink.table_reference.tableId", "values are not JSON compliant # This code will catch", "string does not match the expected format. \"\"\" if isinstance(table,", "row): self.rows_buffer.append(row) if len(self.rows_buffer) > self.rows_buffer_flush_threshold: self._flush_rows_buffer() class RowAsDictJsonCoder(coders.Coder): \"\"\"A", "RowAsDictJsonCoder) # Schema for the rows being read by the", "# If this exception has been raised, the BigQuerySource \"modes\"", "contain a full table reference instead of just a #", "client wrapper with utilities for querying. The wrapper is used", "runner. Returns: a unique row ID string \"\"\" self._unique_row_id +=", "a large number of rows. logging.info('Waiting on response from query:", "insertion. Of special note is the row ID that we", "@staticmethod def _get_table_fn(destination): if callable(destination): return destination else: return lambda", "table elif isinstance(table, value_provider.ValueProvider): return table table_reference = bigquery.TableReference() #", "job_id, page_token) if not response.jobComplete: # The jobComplete field can", "job=bigquery.Job( configuration=bigquery.JobConfiguration( dryRun=True, query=bigquery.JobConfigurationQuery( query=query, useLegacySql=use_legacy_sql, )), jobReference=reference)) response =", "logging.warning( 'Dataset %s:%s does not exist so we will create", "field.type == 'RECORD': # Note that a schema field object", "Output: \"XYZ\" return value elif field.type == 'BOOLEAN': # Input:", "default=default_encoder).encode('utf-8') except ValueError as e: raise ValueError('%s. %s' % (e,", "2.0 # (the \"License\"); you may not use this file", "table_id: The table id. schema: A bigquery.TableSchema instance or None.", "== 'INTEGER': # Input: \"123\" --> Output: 123 return int(value)", "be inserted successfully. Returns: A tuple (bool, errors). If first", "reference into a (project, dataset, table) tuple. Args: destination: Either", "( project_id, self.source.table_reference.datasetId, self.source.table_reference.tableId) elif self.source.query is not None: self.query", "table_reference = bigquery.TableReference() # If dataset argument is not specified,", "%s\", query) return None referenced_tables = response.statistics.query.referencedTables if referenced_tables: #", "if field.mode == 'REPEATED': if value is None: # Ideally", "NULLABLE.' % field.name) result[field.name] = None else: result[field.name] = self._convert_cell_value_to_dict(value,", "/rest/v2/tabledata/insertAll.\"\"\" # The rows argument is a list of #", "'notImplemented'} @staticmethod def should_retry(strategy, error_message): if strategy == RetryStrategy.RETRY_ALWAYS: return", "BigQueryDisposition.WRITE_TRUNCATE: # BigQuery can route data to the old table", "schema = bigquery.TableFieldSchema() schema.name = field['name'] schema.type = field['type'] if", "from apache_beam.internal.gcp.json_value import to_json_value from apache_beam.internal.http_client import get_new_http from apache_beam.io.gcp.internal.clients", "dataset=dataset) response = self.client.datasets.Insert(request) # The response is a bigquery.Dataset", "self.rows_buffer = [] if not passed: raise RuntimeError('Could not successfully", "# pylint: enable=wrong-import-order, wrong-import-position MAX_RETRIES = 3 JSON_COMPLIANCE_ERROR = 'NAN,", "None: # Unittests don't pass projectIds so they can be", "Args: project_id: The project id owning the table. dataset_id: The", "destination def parse_table_schema_from_json(schema_string): \"\"\"Parse the Table Schema provided as string.", "not define a project we default to executing project. if", "page_token) if not response.jobComplete: # The jobComplete field can be", "a string does not match the expected format. \"\"\" if", "num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_table_location(self, project_id, dataset_id, table_id): table = self.get_table(project_id,", "representing the destination containing 'PROJECT:DATASET.TABLE'. Returns: A string representing the", "as well. raise ValueError(\"BigQuerySource must have either a table or", "inferred because the ' 'table does not exist.' % (project_id,", "error_message): if strategy == RetryStrategy.RETRY_ALWAYS: return True elif strategy ==", "here means the # query has no errors. The start_query_job", "json import logging import re import sys import time import", "create it as temporary ' 'with location=%s', project_id, dataset_id, location)", "\"\"\" TEMP_TABLE = 'temp_table_' TEMP_DATASET = 'temp_dataset_' def __init__(self, client=None):", "import time import uuid from builtins import object from future.utils", "%s does not reference any tables.\", query) return None @retry.with_exponential_backoff(", "Should have an option for ignoreUnknownValues? rows=rows)) response = self.client.tabledata.InsertAll(request)", "create tables, query a table, etc.). \"\"\" TEMP_TABLE = 'temp_table_'", "string \"\"\" self._unique_row_id += 1 return '%s_%d' % (self._row_id_prefix, self._unique_row_id)", "> self.rows_buffer_flush_threshold: self._flush_rows_buffer() class RowAsDictJsonCoder(coders.Coder): \"\"\"A coder for a table", "string. This is the default coder for sources and sinks", "to not insert the same row multiple times for fail", "out # (default is 10 seconds). Note that this is", "sources and sinks (e.g., find and create tables, query a", "to the source return self.convert_row_to_dict(value, field) elif field.type == 'NUMERIC':", "project_id, dataset_id, table_id): table = self.get_table(project_id, dataset_id, table_id) return table.location", "field.type) def convert_row_to_dict(self, row, schema): \"\"\"Converts a TableRow instance using", "from the timezone library but this is a known #", "# This code will catch this error to emit an", "buffer_size or 1000 # Figure out the project, dataset, and", "in field: schema.fields = [_parse_schema_field(x) for x in field['fields']] return", "argument will contain a full table reference instead of just", "except HttpError as exn: if exn.status_code == 404: if create_disposition", "object has the following attributes: projectId, datasetId, and tableId. Raises:", "'temp_table_' TEMP_DATASET = 'temp_dataset_' def __init__(self, client=None): self.client = client", "= bigquery.BigqueryTabledataListRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id, maxResults=1) response = self.client.tabledata.List(request) #", "wait # that much time before creating the table and", "+ 'for 2 mins after the delete and create.') #", "plain Python dictionaries. Each dictionary is a row and each", "Note that a schema field object supports also a RECORD", "errors = self._insert_all_rows( project_id, dataset_id, table_id, final_rows, skip_invalid_rows) return result,", "APIs. NOTHING IN THIS FILE HAS BACKWARDS COMPATIBILITY GUARANTEES. \"\"\"", "table reference: 'DATASET.TABLE' or 'PROJECT:DATASET.TABLE'. This argument can be a", "'Missing executing project information. Please use the --project ' 'command", "source.pipeline_options.view_as(GoogleCloudOptions).project) else: self.executing_project = None # TODO(silviuc): Try to automatically", "= self.client.jobs.Insert(request) if response.statistics is None: # This behavior is", "License for the specific language governing permissions and # limitations", "def _value_provider_or_static_val(elm): if isinstance(elm, value_provider.ValueProvider): return elm else: # The", "pass projectIds so they can be run without error raise", "== BigQueryDisposition.WRITE_TRUNCATE: self._delete_table(project_id, dataset_id, table_id) # Create a new table", "self.source.query else: # Enforce the \"modes\" enforced by BigQuerySource.__init__. #", "= None try: found_table = self.get_table(project_id, dataset_id, table_id) except HttpError", "= table return table_reference # ----------------------------------------------------------------------------- # BigQueryWrapper. class BigQueryWrapper(object):", "does not # support the precision that decimal supports. BQ", "must contain the entire table reference: 'DATASET.TABLE' or 'PROJECT:DATASET.TABLE'. This", "@retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_job(self, project, job_id, location=None): request =", "raised since the table was expected to be empty. \"\"\"", "field: schema.fields = [_parse_schema_field(x) for x in field['fields']] return schema", "% (project_id, dataset_id)) except HttpError as exn: if exn.status_code ==", "of the project containing this table or null if the", "lookup parameters Returns: bigquery.Table instance Raises: HttpError if lookup failed.", "\"modes\" have # changed and this method will need to", "referenced table in :data:`source.query` See Also: - :meth:`BigQueryWrapper.get_query_location` - :meth:`BigQueryWrapper.get_table_location`", "self.table_id = self.sink.table_reference.tableId def _flush_rows_buffer(self): if self.rows_buffer: logging.info('Writing %d rows", "(str) used to avoid multiple insertions. If the row ID", "non-empty and non-None table = referenced_tables[0] location = self.get_table_location( table.projectId,", "in sources and sinks (e.g., find and create tables, query", "class RowAsDictJsonCoder(coders.Coder): \"\"\"A coder for a table row (represented as", "must have either a table or query\") def _get_source_location(self): \"\"\"", "!= BigQueryDisposition.WRITE_TRUNCATE: return found_table else: created_table = self._create_table(project_id=project_id, dataset_id=dataset_id, table_id=table_id,", "is not None: # Unittests don't pass projectIds so they", "source self.test_bigquery_client = test_bigquery_client if auth.is_running_in_gce: self.executing_project = auth.executing_project elif", "def __iter__(self): for rows, schema in self.client.run_query( project_id=self.executing_project, query=self.query, use_legacy_sql=self.use_legacy_sql,", "= '' if client else uuid.uuid4() self._temporary_table_suffix = uuid.uuid4().hex @property", "dicts. final_rows = [] for row in rows: json_object =", "the schema to a Python dict.\"\"\" result = {} for", "match: raise ValueError( 'Expected a table reference (PROJECT:DATASET.TABLE or '", "# if write_disposition == BigQueryDisposition.WRITE_TRUNCATE we delete # the table", "table, etc.). \"\"\" TEMP_TABLE = 'temp_table_' TEMP_DATASET = 'temp_dataset_' def", "= bigquery.BigqueryDatasetsInsertRequest( projectId=project_id, dataset=dataset) response = self.client.datasets.Insert(request) # The response", "create and write dispositions. The function mimics the behavior of", "\"Unable to get location, missing response.statistics. Query: %s\", query) return", "the flatten_results flag as False to the source return self.convert_row_to_dict(value,", "are provided. This situation # can happen during retries on", "fields = [_parse_schema_field(f) for f in json_schema['fields']] return bigquery.TableSchema(fields=fields) def", "it offers various functions used both in sources and sinks", "created_table) # if write_disposition == BigQueryDisposition.WRITE_TRUNCATE we delete # the", "then the fact that we get here means the #", "flatten_results self.kms_key = kms_key if self.source.table_reference is not None: #", "compliance with # the License. You may obtain a copy", "the field values in each row but could be useful", "= {'invalid', 'invalidQuery', 'notImplemented'} @staticmethod def should_retry(strategy, error_message): if strategy", "a table or query\") def _get_source_location(self): \"\"\" Get the source", "after the delete and create.') # TODO(BEAM-2673): Remove this sleep", "be a bigquery.TableReference instance in which case dataset and project", "Guards against both non-empty and non-None table = referenced_tables[0] location", "self.row_as_dict = isinstance(self.source.coder, RowAsDictJsonCoder) # Schema for the rows being", "Protect against environments where bigquery library is not available. #", "def get_or_create_table( self, project_id, dataset_id, table_id, schema, create_disposition, write_disposition): \"\"\"Gets", "found table in case the schema was not specified. if", "request = bigquery.BigqueryTabledataListRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id, maxResults=1) response = self.client.tabledata.List(request)", "is None: # Ideally this should never happen as repeated", "project are ignored and the reference is returned as a", "field.type == 'INTEGER': # Input: \"123\" --> Output: 123 return", "appending '$YYYYmmdd' to the table name is supported, e.g. 'DATASET.TABLE$YYYYmmdd'.", "table elif callable(table): return table elif isinstance(table, value_provider.ValueProvider): return table", "apache_beam import coders from apache_beam.internal.gcp import auth from apache_beam.internal.gcp.json_value import", "This behavior is only expected in tests logging.warning( \"Unable to", "the Query or the Table. \"\"\" json_schema = json.loads(schema_string) def", "bigquery from apache_beam.options import value_provider from apache_beam.options.pipeline_options import GoogleCloudOptions from", "= bigquery.JobReference() reference.jobId = job_id reference.projectId = project_id request =", "= 0 # For testing scenarios where we pass in", "client we do not want a # randomized prefix for", "exn.status_code == 404: dataset_reference = bigquery.DatasetReference( projectId=project_id, datasetId=dataset_id) dataset =", "response else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _is_table_empty(self, project_id, dataset_id,", "is provided, BigQuery will make a best effort to not", "project_id, job_id, table_reference, source_uris, schema=None, write_disposition=None, create_disposition=None): reference = bigquery.JobReference(jobId=job_id,", "parameters Returns: bigquery.Table instance Raises: HttpError if lookup failed. \"\"\"", "# The ASF licenses this file to You under the", "the repeated and/or record fields are flattened # unless we", "self.rows_buffer: logging.info('Writing %d rows to %s:%s.%s table.', len(self.rows_buffer), self.project_id, self.dataset_id,", "\"\"\"BigQuery client wrapper with utilities for querying. The wrapper is", "# required by the InsertAll() method. request = bigquery.BigqueryTabledataInsertAllRequest( projectId=project_id,", "KV pair. Outputs a PCollection of KV-pairs where the key", "to interact with BigQuery APIs. NOTHING IN THIS FILE HAS", "or found_table.schema, created_table) # if write_disposition == BigQueryDisposition.WRITE_TRUNCATE we delete", "field) return result # ----------------------------------------------------------------------------- # BigQueryReader, BigQueryWriter. class BigQueryReader(dataflow_io.NativeSourceReader):", "request = bigquery.BigqueryTablesGetRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id) response = self.client.tables.Get(request) return", "tableId=table_id, tableDataInsertAllRequest=bigquery.TableDataInsertAllRequest( skipInvalidRows=skip_invalid_rows, # TODO(silviuc): Should have an option for", "writer for a BigQuerySink.\"\"\" def __init__(self, sink, test_bigquery_client=None, buffer_size=None): self.sink", "for sources and sinks if the coder argument is not", "OF ANY KIND, either express or implied. # See the", "not exist.' % (project_id, dataset_id, table_id)) if found_table and write_disposition", "as we're reading using # utcfromtimestamp. # Input: 1478134176.985864 -->", "cell['v'] if 'v' in cell else None if field.mode ==", "See the License for the specific language governing permissions and", "logging.info('Writing %d rows to %s:%s.%s table.', len(self.rows_buffer), self.project_id, self.dataset_id, self.table_id)", "Python dict.\"\"\" result = {} for index, field in enumerate(schema.fields):", "NAN/INF values. try: return json.dumps( table_row, allow_nan=False, default=default_encoder).encode('utf-8') except ValueError", "from_table_reference, to_table_reference, create_disposition=None, write_disposition=None): reference = bigquery.JobReference() reference.jobId = job_id", "if errors encountered. return not response.insertErrors, response.insertErrors @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter)", "in iteritems(row): if isinstance(v, decimal.Decimal): # decimal values are converted", "== 404: dataset_reference = bigquery.DatasetReference( projectId=project_id, datasetId=dataset_id) dataset = bigquery.Dataset(datasetReference=dataset_reference)", "and/or record fields are flattened # unless we pass the", "self.schema = schema for row in rows: if self.row_as_dict: yield", "self.dataset_id = self.sink.table_reference.datasetId self.table_id = self.sink.table_reference.tableId def _flush_rows_buffer(self): if self.rows_buffer:", "a NoOp, because we assume the argument already has #", "to in writing, software # distributed under the License is", "dataset.location = location request = bigquery.BigqueryDatasetsInsertRequest( projectId=project_id, dataset=dataset) response =", "useful for # getting additional details. self.schema = None self.use_legacy_sql", "of KV-pairs where the key is a TableReference for the", "response.pageToken: break page_token = response.pageToken def insert_rows(self, project_id, dataset_id, table_id,", "projectId=project_id) request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( dryRun=dry_run, query=bigquery.JobConfigurationQuery( query=query,", "for the actual execution of the query in the service.", "or agreed to in writing, software # distributed under the", "API endpoint. Docs for this BQ call: https://cloud.google.com/bigquery/docs/reference\\ /rest/v2/tabledata/insertAll.\"\"\" #", "data to the old table for 2 mins max so", "1.23 return float(value) elif field.type == 'TIMESTAMP': # The UTC", "= bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( dryRun=dry_run, query=bigquery.JobConfigurationQuery( query=query, useLegacySql=use_legacy_sql, allowLargeResults=True,", "to be empty. \"\"\" from apache_beam.io.gcp.bigquery import BigQueryDisposition found_table =", "page_token = None while True: response = self._get_query_results(project_id, job_id, page_token)", "Get the source location (e.g. ``\"EU\"`` or ``\"US\"``) from either", "or - The first referenced table in :data:`source.query` See Also:", "schema in self.client.run_query( project_id=self.executing_project, query=self.query, use_legacy_sql=self.use_legacy_sql, flatten_results=self.flatten_results): if self.schema is", "None if isinstance(schema, bigquery.TableSchema): cell = row.f[index] value = from_json_value(cell.v)", "able to handle # inserts into NUMERIC columns by receiving", "issued several times. This comes into play for sinks executed", "errors def _convert_cell_value_to_dict(self, value, field): if field.type == 'STRING': #", "found but create disposition is CREATE_NEVER.' % (project_id, dataset_id, table_id))", "available. # pylint: disable=wrong-import-order, wrong-import-position try: from apitools.base.py.exceptions import HttpError", "bigquery.JobReference(jobId=job_id, projectId=project_id) request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( load=bigquery.JobConfigurationLoad( sourceUris=source_uris,", "be used as temporary.' % (project_id, dataset_id)) except HttpError as", "dataset_id, table_id, final_rows, skip_invalid_rows) return result, errors def _convert_cell_value_to_dict(self, value,", "project_id, job_id, from_table_reference, to_table_reference, create_disposition=None, write_disposition=None): reference = bigquery.JobReference() reference.jobId", "response.jobReference.jobId @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _get_query_results(self, project_id, job_id, page_token=None, max_results=10000):", "len(self.rows_buffer) > self.rows_buffer_flush_threshold: self._flush_rows_buffer() class RowAsDictJsonCoder(coders.Coder): \"\"\"A coder for a", "into play for sinks executed in a local runner. Returns:", "run_query(self, project_id, query, use_legacy_sql, flatten_results, dry_run=False): job_id = self._start_query_job(project_id, query,", "an option for ignoreUnknownValues? rows=rows)) response = self.client.tabledata.InsertAll(request) # response.insertErrors", "configuration=bigquery.JobConfiguration( copy=bigquery.JobConfigurationTableCopy( destinationTable=to_table_reference, sourceTable=from_table_reference, createDisposition=create_disposition, writeDisposition=write_disposition, ) ), jobReference=reference, )", "of type '%s' is not JSON serializable\" % type(obj).__name__) def", "table_id, rows, skip_invalid_rows=False): \"\"\"Inserts rows into the specified table. Args:", "raise RuntimeError('Unexpected field type: %s' % field.type) def convert_row_to_dict(self, row,", "the InsertAll() method. request = bigquery.BigqueryTabledataInsertAllRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id, tableDataInsertAllRequest=bigquery.TableDataInsertAllRequest(", "dry_run=False): job_id = self._start_query_job(project_id, query, use_legacy_sql, flatten_results, job_id=uuid.uuid4().hex, dry_run=dry_run) if", "self.source = source self.test_bigquery_client = test_bigquery_client if auth.is_running_in_gce: self.executing_project =", "# BigQuery service. self.rows_buffer = [] self.rows_buffer_flush_threshold = buffer_size or", "endpoint. Docs for this BQ call: https://cloud.google.com/bigquery/docs/reference\\ /rest/v2/tabledata/insertAll.\"\"\" # The", "'INTEGER': # Input: \"123\" --> Output: 123 return int(value) elif", "flatten_results=True, kms_key=None): self.source = source self.test_bigquery_client = test_bigquery_client if auth.is_running_in_gce:", "that the # table argument will contain a full table", "self.sink.table_reference.tableId def _flush_rows_buffer(self): if self.rows_buffer: logging.info('Writing %d rows to %s:%s.%s", "of the first referenced table in the query and depends", "projectId=project_id, datasetId=dataset_id, deleteContents=delete_contents) try: self.client.datasets.Delete(request) except HttpError as exn: if", "write dispositions. if found_table: table_empty = self._is_table_empty(project_id, dataset_id, table_id) if", "HttpError as exn: if exn.status_code == 404: logging.warning( 'Dataset %s:%s", "table (and possibly dataset) argument. Returns: A TableReference object from", "JSON. Returns: A TableSchema of the BigQuery export from either", "= json.loads(schema_string) def _parse_schema_field(field): \"\"\"Parse a single schema field from", "is that the # table argument will contain a full", "logging.info(\"Inserting job request: %s\", request) response = self.client.jobs.Insert(request) logging.info(\"Response was", "test_bigquery_client=None, use_legacy_sql=True, flatten_results=True, kms_key=None): self.source = source self.test_bigquery_client = test_bigquery_client", "dataset: The ID of the dataset containing this table or", "whether they should be skipped, and all others should be", "@retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_table_location(self, project_id, dataset_id, table_id): table =", "format. \"\"\" if isinstance(table, bigquery.TableReference): return table elif callable(table): return", "under the Apache License, Version 2.0 # (the \"License\"); you", "= referenced_tables[0] location = self.get_table_location( table.projectId, table.datasetId, table.tableId) logging.info(\"Using location", "table = bigquery.Table( tableReference=bigquery.TableReference( projectId=project_id, datasetId=dataset_id, tableId=table_id), schema=schema) request =", "a bigquery.InserttErrorsValueListEntry instance containing specific errors. \"\"\" # Prepare rows", "if dataset is None: match = re.match( r'^((?P<project>.+):)?(?P<dataset>\\w+)\\.(?P<table>[\\w\\$]+)$', table) if", "schema.fields = [_parse_schema_field(x) for x in field['fields']] return schema fields", "successfully. Returns: A tuple (bool, errors). If first element is", "self.source.query is not None: self.query = self.source.query else: # Enforce", "contain the entire table reference: 'DATASET.TABLE' or 'PROJECT:DATASET.TABLE'. This argument", "TEMP_DATASET = 'temp_dataset_' def __init__(self, client=None): self.client = client or", "the destination containing 'PROJECT:DATASET.TABLE'. Returns: A string representing the destination", "self._temporary_table_suffix = uuid.uuid4().hex @property def unique_row_id(self): \"\"\"Returns a unique row", "service. If # the request times out we keep trying.", "the same row multiple times for fail and retry scenarios", "import value_provider from apache_beam.options.pipeline_options import GoogleCloudOptions from apache_beam.runners.dataflow.native_io import iobase", "def parse_table_schema_from_json(schema_string): \"\"\"Parse the Table Schema provided as string. Args:", "This code will catch this error to emit an error", "by migrating to load api time.sleep(150) return created_table else: return", "either - :data:`source.table_reference` or - The first referenced table in", "you may not use this file except in compliance with", "apache_beam.transforms import DoFn from apache_beam.utils import retry # Protect against", "self.client.clean_up_temporary_dataset(self.executing_project) def __iter__(self): for rows, schema in self.client.run_query( project_id=self.executing_project, query=self.query,", "temp_table.datasetId) return else: raise self._delete_dataset(temp_table.projectId, temp_table.datasetId, True) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter)", "datasetId=dataset_id)) return dataset except HttpError as exn: if exn.status_code ==", "a bigquery.TableDataList instance. return response.totalRows == 0 @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter)", "tr = self.source.table_reference return self.client.get_table_location( tr.projectId if tr.projectId is not", "the License. You may obtain a copy of the License", "the expectation is that the # table argument will contain", "into BigQuery. Returns: bigquery.JobReference with the information about the job", "location=location) else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def clean_up_temporary_dataset(self, project_id): temp_table", "= None while True: response = self._get_query_results(project_id, job_id, page_token) if", "self.rows_buffer = [] self.rows_buffer_flush_threshold = buffer_size or 1000 # Figure", "if not passed: raise RuntimeError('Could not successfully insert rows to", "using the same create and write dispositions. Args: project_id: The", "return value elif field.type == 'RECORD': # Note that a", "TODO(silviuc): Should have an option for ignoreUnknownValues? rows=rows)) response =", "Check if dataset exists to make sure that the temporary", "None # TODO(silviuc): Try to automatically get it from gcloud", "try: self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=temp_table.datasetId)) except HttpError as exn: if exn.status_code", "job_id request.projectId = project request.location = location return self.client.jobs.Get(request) def", "number of rows. logging.info('Waiting on response from query: %s ...',", "location (e.g. ``\"EU\"`` or ``\"US\"``) from either - :data:`source.table_reference` or", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "return schema fields = [_parse_schema_field(f) for f in json_schema['fields']] return", "coders from apache_beam.internal.gcp import auth from apache_beam.internal.gcp.json_value import from_json_value from", "a table reference (PROJECT:DATASET.TABLE or ' 'DATASET.TABLE) instead of %s.'", "if schema is None and found_table is None: raise RuntimeError(", "client: bigquery.BigqueryV2 instance project_id, dataset_id, table_id: table lookup parameters Returns:", "with string attrs. v = str(v) json_object.additionalProperties.append( bigquery.JsonObject.AdditionalProperty( key=k, value=to_json_value(v)))", "table_row, allow_nan=False, default=default_encoder).encode('utf-8') except ValueError as e: raise ValueError('%s. %s'", "rows being read by the reader. It is initialized the", "fact that we get here means the # query has", "self.source.table_reference.projectId if not project_id: project_id = self.executing_project self.query = 'SELECT", "__enter__(self): self.client = BigQueryWrapper(client=self.test_bigquery_client) self.client.create_temporary_dataset( self.executing_project, location=self._get_source_location()) return self def", "@retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _delete_dataset(self, project_id, dataset_id, delete_contents=True): request =", "required by the InsertAll() method. request = bigquery.BigqueryTabledataInsertAllRequest( projectId=project_id, datasetId=dataset_id,", "an error will be raised since the table was expected", "with utilities for querying. The wrapper is used to organize", "error_message not in RetryStrategy._NON_TRANSIENT_ERRORS): return True else: return False class", "If there are rows with insertion errors, whether they should", "argument must contain the entire table reference: 'DATASET.TABLE' or 'PROJECT:DATASET.TABLE'.", "containing specific errors. \"\"\" # Prepare rows for insertion. Of", "The first referenced table in :data:`source.query` See Also: - :meth:`BigQueryWrapper.get_query_location`", "values is: # ValueError: Out of range float values are", "exn.status_code == 404: logging.warning('Dataset %s:%s does not exist', project_id, dataset_id)", "'for 2 mins after the delete and create.') # TODO(BEAM-2673):", "[] if errors encountered. return not response.insertErrors, response.insertErrors @retry.with_exponential_backoff( num_retries=MAX_RETRIES,", "retries on failures. # TODO(silviuc): Must add support to writing", "page_token = response.pageToken def insert_rows(self, project_id, dataset_id, table_id, rows, skip_invalid_rows=False):", "exn.status_code == 404: logging.warning('Dataset %s:%s does not exist', project_id, temp_table.datasetId)", "compliant.' def default_encoder(obj): if isinstance(obj, decimal.Decimal): return str(obj) raise TypeError(", "logging.warning( \"Unable to get location, missing response.statistics. Query: %s\", query)", "return json.dumps( table_row, allow_nan=False, default=default_encoder).encode('utf-8') except ValueError as e: raise", "lookup failed. \"\"\" request = bigquery.BigqueryTablesGetRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id) response", "a # table name. if dataset is None: match =", "interact with BigQuery APIs. NOTHING IN THIS FILE HAS BACKWARDS", "table.location @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def create_temporary_dataset(self, project_id, location): dataset_id =", "Buffer used to batch written rows so we reduce communication", "dispositions. Args: project_id: The project id owning the table. dataset_id:", "exception_type, exception_value, traceback): self._flush_rows_buffer() def Write(self, row): self.rows_buffer.append(row) if len(self.rows_buffer)", "# support the precision that decimal supports. BQ is able", "import datetime import decimal import json import logging import re", "decimal.Decimal): return str(obj) raise TypeError( \"Object of type '%s' is", "else: raise RuntimeError('Unexpected field type: %s' % field.type) def convert_row_to_dict(self,", "logging.warning('Dataset %s:%s does not exist', project_id, temp_table.datasetId) return else: raise", "the second element will be a bigquery.InserttErrorsValueListEntry instance containing specific", "BigQuery source.\"\"\" def __init__(self, source, test_bigquery_client=None, use_legacy_sql=True, flatten_results=True, kms_key=None): self.source", "None @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _insert_copy_job(self, project_id, job_id, from_table_reference, to_table_reference,", "best-effort if unique IDs are provided. This situation # can", "useLegacySql=use_legacy_sql, allowLargeResults=True, destinationTable=self._get_temp_table(project_id), flattenResults=flatten_results)), jobReference=reference)) response = self.client.jobs.Insert(request) return response.jobReference.jobId", "False class AppendDestinationsFn(DoFn): \"\"\"Adds the destination to an element, making", "= self.client.datasets.Insert(request) # The response is a bigquery.Dataset instance. return", "source location, if any. \"\"\" if self.source.table_reference is not None:", "object supports also a RECORD type. However # when querying,", "self.client = BigQueryWrapper(client=self.test_bigquery_client) self.client.create_temporary_dataset( self.executing_project, location=self._get_source_location()) return self def __exit__(self,", "response = self.client.tables.Insert(request) logging.debug(\"Created the table with id %s\", table_id)", "project we default to executing # project. project_id = self.source.table_reference.projectId", "cell = row['f'][index] value = cell['v'] if 'v' in cell", "sink self.test_bigquery_client = test_bigquery_client self.row_as_dict = isinstance(self.sink.coder, RowAsDictJsonCoder) # Buffer", "\"\"\"Tools used by BigQuery sources and sinks. Classes, constants and", "destination: Either a TableReference object from the bigquery API. The", "response = self.client.datasets.Insert(request) # The response is a bigquery.Dataset instance.", "large number of rows. logging.info('Waiting on response from query: %s", "None self.use_legacy_sql = use_legacy_sql self.flatten_results = flatten_results self.kms_key = kms_key", "is a timeout for the query # request not for", "any tables.\", query) return None @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _insert_copy_job(self,", "project: The ID of the project containing this table or", "elif field.type == 'FLOAT': # Input: \"1.23\" --> Output: 1.23", "have no backwards compatibility guarantees. These tools include wrappers and", "retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_or_create_table( self, project_id, dataset_id, table_id, schema, create_disposition, write_disposition):", "= [self._convert_cell_value_to_dict(x['v'], field) for x in value] elif value is", "return table_reference # ----------------------------------------------------------------------------- # BigQueryWrapper. class BigQueryWrapper(object): \"\"\"BigQuery client", "x in field['fields']] return schema fields = [_parse_schema_field(f) for f", "the table is not empty and WRITE_EMPTY was specified then", "self._get_query_results(project_id, job_id, page_token) if not response.jobComplete: # The jobComplete field", "more # contributor license agreements. See the NOTICE file distributed", "Please use the --project ' 'command line option to specify", "otherwise create it try: dataset = self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=dataset_id)) return", "for failures can be controlled. In addition it offers various", "datasetId=dataset_id, table=table) response = self.client.tables.Insert(request) logging.debug(\"Created the table with id", "( source.pipeline_options.view_as(GoogleCloudOptions).project) else: self.executing_project = None # TODO(silviuc): Try to", "= schema for row in rows: if self.row_as_dict: yield self.client.convert_row_to_dict(row,", "for a BigQuerySink.\"\"\" def __init__(self, sink, test_bigquery_client=None, buffer_size=None): self.sink =", "API. The object has the following attributes: projectId, datasetId, and", "self._insert_load_job( destination.projectId, job_id, destination, files, schema=schema, create_disposition=create_disposition, write_disposition=write_disposition) @retry.with_exponential_backoff( num_retries=MAX_RETRIES,", "also a RECORD type. However # when querying, the repeated", "delete and create.') # TODO(BEAM-2673): Remove this sleep by migrating", "Output: 123 return int(value) elif field.type == 'FLOAT': # Input:", "the query # request not for the actual execution of", "date partitioned tables, appending '$YYYYmmdd' to the table name is", "from apache_beam.internal.gcp import auth from apache_beam.internal.gcp.json_value import from_json_value from apache_beam.internal.gcp.json_value", "numbers (0-9), or underscores (_). If dataset argument is None", "to load data into BigQuery. Returns: bigquery.JobReference with the information", "self._unique_row_id = 0 # For testing scenarios where we pass", "= cell['v'] if 'v' in cell else None if field.mode", "dataset id owning the table. table_id: The table id. schema:", "raise ValueError('Received \\'None\\' as the value for the field %s", "is a bigquery.Dataset instance. return response else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES,", "if dry_run: # If this was a dry run then", "parse_table_reference(table, dataset=None, project=None): \"\"\"Parses a table reference into a (project,", "write_disposition=None): reference = bigquery.JobReference() reference.jobId = job_id reference.projectId = project_id", "def get_table(self, project_id, dataset_id, table_id): \"\"\"Lookup a table's metadata object.", "\"\"\" return self._insert_load_job( destination.projectId, job_id, destination, files, schema=schema, create_disposition=create_disposition, write_disposition=write_disposition)", "elif isinstance(table, value_provider.ValueProvider): return table table_reference = bigquery.TableReference() # If", "method. request = bigquery.BigqueryTabledataInsertAllRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id, tableDataInsertAllRequest=bigquery.TableDataInsertAllRequest( skipInvalidRows=skip_invalid_rows, #", "create_disposition, write_disposition): \"\"\"Gets or creates a table based on create", "write dispositions. Args: project_id: The project id owning the table.", "If this was a dry run then the fact that", "Returns: A bigquery.Table instance if table was found or created.", "the NOTICE file distributed with # this work for additional", "elif isinstance(schema, bigquery.TableFieldSchema): cell = row['f'][index] value = cell['v'] if", "@staticmethod def _value_provider_or_static_val(elm): if isinstance(elm, value_provider.ValueProvider): return elm else: #", "exn: if exn.status_code == 404: dataset_reference = bigquery.DatasetReference( projectId=project_id, datasetId=dataset_id)", "# BigQueryReader, BigQueryWriter. class BigQueryReader(dataflow_io.NativeSourceReader): \"\"\"A reader for a BigQuery", "empty. \"\"\" from apache_beam.io.gcp.bigquery import BigQueryDisposition found_table = None try:", "compatibility guarantees. These tools include wrappers and clients to interact", "reference as a string does not match the expected format.", "%s.%s.%s with schema %s. Result: %s.', project_id, dataset_id, table_id, schema", "tuple (bool, errors). If first element is False then the", "dataset except HttpError as exn: if exn.status_code == 404: dataset_reference", "field): if field.type == 'STRING': # Input: \"XYZ\" --> Output:", "= 'SELECT * FROM [%s:%s.%s];' % ( project_id, self.source.table_reference.datasetId, self.source.table_reference.tableId)", "= match.group('table') else: table_reference.projectId = project table_reference.datasetId = dataset table_reference.tableId", "[self._convert_cell_value_to_dict(x['v'], field) for x in value] elif value is None:", "# for reading the field values in each row but", "This situation is quite possible # if the query will", "or underscores (_). If dataset argument is None then the", "# Input: \"XYZ\" --> Output: \"XYZ\" return value elif field.type", "_insert_copy_job(self, project_id, job_id, from_table_reference, to_table_reference, create_disposition=None, write_disposition=None): reference = bigquery.JobReference()", "is the record itself. Experimental; no backwards compatibility guarantees. \"\"\"", "value is None: # Ideally this should never happen as", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "table or null if the table reference is specified entirely", "ValueError: if the table reference as a string does not", "id owning the table. table_id: The table id. schema: A", "num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_job(self, project, job_id, location=None): request = bigquery.BigqueryJobsGetRequest()", "2.7 so we'll just hardcode it as we're reading using", "self.get_or_create_dataset(project_id, dataset_id, location=location) else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def clean_up_temporary_dataset(self,", "bigquery.TableSchema): cell = row.f[index] value = from_json_value(cell.v) if cell.v is", "underscores (_). If dataset argument is None then the table", "argument is not specified, the expectation is that the #", "under the License. # \"\"\"Tools used by BigQuery sources and", "key in it is the name of a field. skip_invalid_rows:", "value elif field.type == 'DATE': # Input: \"2016-11-03\" --> Output:", "= BigQueryWrapper(client=self.test_bigquery_client) self.client.get_or_create_table( self.project_id, self.dataset_id, self.table_id, self.sink.table_schema, self.sink.create_disposition, self.sink.write_disposition) return", "expected format. \"\"\" if isinstance(table, bigquery.TableReference): return table elif callable(table):", "----------------------------------------------------------------------------- # BigQueryWrapper. class BigQueryWrapper(object): \"\"\"BigQuery client wrapper with utilities", "def get_job(self, project, job_id, location=None): request = bigquery.BigqueryJobsGetRequest() request.jobId =", "datasetId=dataset_id, tableId=table_id, maxResults=1) response = self.client.tabledata.List(request) # The response is", "avoid inserting a row multiple times. # BigQuery will do", "(project_id, dataset_id, table_id)) # Delete the table and recreate it", "The table id. schema: A bigquery.TableSchema instance or None. create_disposition:", "UTC\" dt = datetime.datetime.utcfromtimestamp(float(value)) return dt.strftime('%Y-%m-%d %H:%M:%S.%f UTC') elif field.type", "# first time something gets read from the table. It", "It is initialized the # first time something gets read", "row but could be useful for # getting additional details.", "missing pageToken. yield response.rows, response.schema if not response.pageToken: break page_token", "the Table Schema provided as string. Args: schema_string: String serialized", "unless we pass the flatten_results flag as False to the", "be updated as well. raise ValueError(\"BigQuerySource must have either a", "return value_provider.StaticValueProvider(lambda x: x, value=elm) @staticmethod def _get_table_fn(destination): if callable(destination):", "bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( dryRun=True, query=bigquery.JobConfigurationQuery( query=query, useLegacySql=use_legacy_sql, )), jobReference=reference))", "to get location, missing response.statistics. Query: %s\", query) return None", "= str(v) json_object.additionalProperties.append( bigquery.JsonObject.AdditionalProperty( key=k, value=to_json_value(v))) final_rows.append( bigquery.TableDataInsertAllRequest.RowsValueListEntry( insertId=str(self.unique_row_id), json=json_object))", "2 mins after the delete and create.') # TODO(BEAM-2673): Remove", "bigquery.InserttErrorsValueListEntry instance containing specific errors. \"\"\" # Prepare rows for", "\"\"\"Starts a job to load data into BigQuery. Returns: bigquery.JobReference", "\"00:49:36\" --> Output: \"00:49:36\" return value elif field.type == 'RECORD':", "= test_bigquery_client self.row_as_dict = isinstance(self.sink.coder, RowAsDictJsonCoder) # Buffer used to", "dataset_id, table_id)) else: raise # If table exists already then", "self.client.tabledata.List(request) # The response is a bigquery.TableDataList instance. return response.totalRows", "return created_table else: return created_table def run_query(self, project_id, query, use_legacy_sql,", "= row.f[index] value = from_json_value(cell.v) if cell.v is not None", "self.source.table_reference is not None: tr = self.source.table_reference return self.client.get_table_location( tr.projectId", "datasetId=dataset_id, deleteContents=delete_contents) try: self.client.datasets.Delete(request) except HttpError as exn: if exn.status_code", "uuid.uuid4() self._temporary_table_suffix = uuid.uuid4().hex @property def unique_row_id(self): \"\"\"Returns a unique", "rows to BigQuery' ' table [%s:%s.%s]. Errors: %s' % (self.project_id,", "else: self.executing_project = None # TODO(silviuc): Try to automatically get", "rows, skip_invalid_rows=False): \"\"\"Calls the insertAll BigQuery API endpoint. Docs for", "r'^((?P<project>.+):)?(?P<dataset>\\w+)\\.(?P<table>[\\w\\$]+)$', table) if not match: raise ValueError( 'Expected a table", "of plain Python dictionaries. Each dictionary is a row and", "# Input: \"123\" --> Output: 123 return int(value) elif field.type", "will make a best effort to not insert the same", "from query: %s ...', query) time.sleep(1.0) continue # We got", "else None elif isinstance(schema, bigquery.TableFieldSchema): cell = row['f'][index] value =", "dataset_id, table_id, schema): table = bigquery.Table( tableReference=bigquery.TableReference( projectId=project_id, datasetId=dataset_id, tableId=table_id),", "= self.client.jobs.Insert(request) return response.jobReference @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _start_query_job(self, project_id,", "GUARANTEES. \"\"\" from __future__ import absolute_import import datetime import decimal", "elif field.type == 'TIME': # Input: \"00:49:36\" --> Output: \"00:49:36\"", "reference instead of just a # table name. if dataset", "exn.status_code == 404: if create_disposition == BigQueryDisposition.CREATE_NEVER: raise RuntimeError( 'Table", "empty and WRITE_EMPTY was specified then an error will be", "tools include wrappers and clients to interact with BigQuery APIs.", "'GEOGRAPHY': return value else: raise RuntimeError('Unexpected field type: %s' %", "in a query. This method returns the location of the", "mismatches between the state of the table and the create/write", "code will catch this error to emit an error that", "= self.client.tables.Insert(request) logging.debug(\"Created the table with id %s\", table_id) #", "not required # for reading the field values in each", "in RetryStrategy._NON_TRANSIENT_ERRORS): return True else: return False class AppendDestinationsFn(DoFn): \"\"\"Adds", "tr.datasetId, tr.tableId) else: # It's a query source return self.client.get_query_location(", "# each row in order to help BigQuery avoid inserting", "randomized prefix for row IDs. self._row_id_prefix = '' if client", "schema=schema, create_disposition=create_disposition, write_disposition=write_disposition) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_or_create_table( self, project_id,", "in self.client.run_query( project_id=self.executing_project, query=self.query, use_legacy_sql=self.use_legacy_sql, flatten_results=self.flatten_results): if self.schema is None:", "a BigQuery source.\"\"\" def __init__(self, source, test_bigquery_client=None, use_legacy_sql=True, flatten_results=True, kms_key=None):", "----------------------------------------------------------------------------- # BigQueryReader, BigQueryWriter. class BigQueryReader(dataflow_io.NativeSourceReader): \"\"\"A reader for a", "\"\"\" from apache_beam.io.gcp.bigquery import BigQueryDisposition found_table = None try: found_table", "destination, files, schema=schema, create_disposition=create_disposition, write_disposition=write_disposition) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_or_create_table(", "job=bigquery.Job( configuration=bigquery.JobConfiguration( dryRun=dry_run, query=bigquery.JobConfigurationQuery( query=query, useLegacySql=use_legacy_sql, allowLargeResults=True, destinationTable=self._get_temp_table(project_id), flattenResults=flatten_results)), jobReference=reference))", "a bigquery.TableReference instance in which case dataset and project are", "%s\", response) return response.jobReference @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _insert_load_job(self, project_id,", "value = from_json_value(cell.v) if cell.v is not None else None", "self.convert_row_to_dict(value, field) elif field.type == 'NUMERIC': return decimal.Decimal(value) elif field.type", "is a list of # bigquery.TableDataInsertAllRequest.RowsValueListEntry instances as # required", "string because JSON does not # support the precision that", "Apache License, Version 2.0 # (the \"License\"); you may not", "write_disposition != BigQueryDisposition.WRITE_TRUNCATE: return found_table else: created_table = self._create_table(project_id=project_id, dataset_id=dataset_id,", "= self.get_table(project_id, dataset_id, table_id) return table.location @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def", "tr.projectId is not None else self.executing_project, tr.datasetId, tr.tableId) else: #", "query) time.sleep(1.0) continue # We got some results. The last", "self._unique_row_id += 1 return '%s_%d' % (self._row_id_prefix, self._unique_row_id) def _get_temp_table(self,", "a bigquery.Dataset instance. return response else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter)", "jobReference=reference)) response = self.client.jobs.Insert(request) if response.statistics is None: # This", "a (project, dataset, table) tuple. Args: destination: Either a TableReference", "retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_table_location(self, project_id, dataset_id, table_id): table = self.get_table(project_id, dataset_id,", "make sure that the temporary id is unique try: self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest(", "import jobs when using the same create and write dispositions.", "Output: True return value == 'true' elif field.type == 'INTEGER':", "3 JSON_COMPLIANCE_ERROR = 'NAN, INF and -INF values are not", "KIND, either express or implied. # See the License for", "table_id): request = bigquery.BigqueryTablesDeleteRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id) try: self.client.tables.Delete(request) except", "name. if dataset is None: match = re.match( r'^((?P<project>.+):)?(?P<dataset>\\w+)\\.(?P<table>[\\w\\$]+)$', table)", "# Schema for the rows being read by the reader.", "effort to not insert the same row multiple times for", "== 'STRING': # Input: \"XYZ\" --> Output: \"XYZ\" return value", "not for the actual execution of the query in the", "error otherwise. return page_token = None while True: response =", "bigquery.Table( tableReference=bigquery.TableReference( projectId=project_id, datasetId=dataset_id, tableId=table_id), schema=schema) request = bigquery.BigqueryTablesInsertRequest( projectId=project_id,", "table ' + 'for 2 mins after the delete and", "can be routed to deleted table ' + 'for 2", "default_encoder(obj): if isinstance(obj, decimal.Decimal): return str(obj) raise TypeError( \"Object of", "bigquery.DatasetReference( projectId=project_id, datasetId=dataset_id) dataset = bigquery.Dataset(datasetReference=dataset_reference) if location is not", "letters (a-z, A-Z), numbers (0-9), or underscores (_). If dataset", "job_id=uuid.uuid4().hex, dry_run=dry_run) if dry_run: # If this was a dry", "projectId=project_id, datasetId=dataset_id, tableId=table_id) response = self.client.tables.Get(request) return response def _create_table(self,", "file to You under the Apache License, Version 2.0 #", "table_id, schema or found_table.schema, created_table) # if write_disposition == BigQueryDisposition.WRITE_TRUNCATE", "exn: if exn.status_code == 404: logging.warning('Dataset %s:%s does not exist',", "return True else: return False class AppendDestinationsFn(DoFn): \"\"\"Adds the destination", "exception_type, exception_value, traceback): self.client.clean_up_temporary_dataset(self.executing_project) def __iter__(self): for rows, schema in", "return response.jobReference.jobId @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _get_query_results(self, project_id, job_id, page_token=None,", "regarding copyright ownership. # The ASF licenses this file to", "dataset_id, table_id)) # Delete the table and recreate it (later)", "under one or more # contributor license agreements. See the", "table_id=table_id, schema=schema or found_table.schema) logging.info('Created table %s.%s.%s with schema %s.", "is a bigquery.Table instance. return response @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def", "WRITE_EMPTY was specified then an error will be raised since", "_is_table_empty(self, project_id, dataset_id, table_id): request = bigquery.BigqueryTabledataListRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id,", "= bigquery.BigqueryJobsGetQueryResultsRequest( jobId=job_id, pageToken=page_token, projectId=project_id, maxResults=max_results) response = self.client.jobs.GetQueryResults(request) return", "num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def create_temporary_dataset(self, project_id, location): dataset_id = BigQueryWrapper.TEMP_DATASET +", "(e.g., find and create tables, query a table, etc.). \"\"\"", "dictionary is a row and each key in it is", "self.client.tabledata.InsertAll(request) # response.insertErrors is not [] if errors encountered. return", ")), jobReference=reference)) response = self.client.jobs.Insert(request) if response.statistics is None: #", "that explains # to the programmer that they have used", "with schema %s. Result: %s.', project_id, dataset_id, table_id, schema or", "return self.client.get_query_location( self.executing_project, self.source.query, self.source.use_legacy_sql) def __enter__(self): self.client = BigQueryWrapper(client=self.test_bigquery_client)", "if isinstance(elm, value_provider.ValueProvider): return elm else: # The type argument", "various functions used both in sources and sinks (e.g., find", "bigquery.TableReference): return table elif callable(table): return table elif isinstance(table, value_provider.ValueProvider):", "= self.client.tabledata.InsertAll(request) # response.insertErrors is not [] if errors encountered.", "if write_disposition == BigQueryDisposition.WRITE_TRUNCATE we delete # the table before", "from apache_beam.options import value_provider from apache_beam.options.pipeline_options import GoogleCloudOptions from apache_beam.runners.dataflow.native_io", "This comes into play for sinks executed in a local", "skip_invalid_rows=False): \"\"\"Calls the insertAll BigQuery API endpoint. Docs for this", "the table. table_id: The table id. rows: A list of", "rows argument is a list of # bigquery.TableDataInsertAllRequest.RowsValueListEntry instances as", "row multiple times. # BigQuery will do a best-effort if", "we pass in a client we do not want a", "creates a table based on create and write dispositions. The", "num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_table(self, project_id, dataset_id, table_id): \"\"\"Lookup a table's", "not field.mode == 'NULLABLE': raise ValueError('Received \\'None\\' as the value", "row ID string \"\"\" self._unique_row_id += 1 return '%s_%d' %", "library is not available. # pylint: disable=wrong-import-order, wrong-import-position try: from", "be useful for # getting additional details. self.schema = None", "# # Unless required by applicable law or agreed to", "failed. \"\"\" request = bigquery.BigqueryTablesGetRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id) response =", "Output: \"YmJi\" return value elif field.type == 'DATE': # Input:", "bigquery.JobReference() reference.jobId = job_id reference.projectId = project_id request = bigquery.BigqueryJobsInsertRequest(", "projectId=project_id, datasetId=dataset_id, tableId=table_id, tableDataInsertAllRequest=bigquery.TableDataInsertAllRequest( skipInvalidRows=skip_invalid_rows, # TODO(silviuc): Should have an", "ImportError: pass # pylint: enable=wrong-import-order, wrong-import-position MAX_RETRIES = 3 JSON_COMPLIANCE_ERROR", "return table.location @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def create_temporary_dataset(self, project_id, location): dataset_id", "the table and the create/write dispositions passed in. For example", "a new table potentially reusing the schema from a previously", "for querying. The wrapper is used to organize all the", "{'invalid', 'invalidQuery', 'notImplemented'} @staticmethod def should_retry(strategy, error_message): if strategy ==", "else: schema.mode = 'NULLABLE' if 'description' in field: schema.description =", "from table %r referenced by query %s\", location, table, query)", "to_json_value from apache_beam.internal.http_client import get_new_http from apache_beam.io.gcp.internal.clients import bigquery from", "CREATE_NEVER or CREATE_IF_NEEDED. write_disposition: WRITE_APPEND, WRITE_EMPTY or WRITE_TRUNCATE. Returns: A", "the table reference as a string does not match the", "if self.source.table_reference is not None: # If table schema did", "retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _delete_dataset(self, project_id, dataset_id, delete_contents=True): request = bigquery.BigqueryDatasetsDeleteRequest( projectId=project_id,", "match.group('project') table_reference.datasetId = match.group('dataset') table_reference.tableId = match.group('table') else: table_reference.projectId =", "IN THIS FILE HAS BACKWARDS COMPATIBILITY GUARANTEES. \"\"\" from __future__", "dataset_id, delete_contents=True): request = bigquery.BigqueryDatasetsDeleteRequest( projectId=project_id, datasetId=dataset_id, deleteContents=delete_contents) try: self.client.datasets.Delete(request)", "elif (strategy == RetryStrategy.RETRY_ON_TRANSIENT_ERROR and error_message not in RetryStrategy._NON_TRANSIENT_ERRORS): return", "# Input: \"2016-11-03\" --> Output: \"2016-11-03\" return value elif field.type", "Returns: Optional[str]: The source location, if any. \"\"\" if self.source.table_reference", "the first referenced table in the query and depends on", "schema to a Python dict.\"\"\" result = {} for index,", "BigQueryReader, BigQueryWriter. class BigQueryReader(dataflow_io.NativeSourceReader): \"\"\"A reader for a BigQuery source.\"\"\"", "implied. # See the License for the specific language governing", "' 'with location=%s', project_id, dataset_id, location) self.get_or_create_dataset(project_id, dataset_id, location=location) else:", "= source self.test_bigquery_client = test_bigquery_client if auth.is_running_in_gce: self.executing_project = auth.executing_project", "str(v) json_object.additionalProperties.append( bigquery.JsonObject.AdditionalProperty( key=k, value=to_json_value(v))) final_rows.append( bigquery.TableDataInsertAllRequest.RowsValueListEntry( insertId=str(self.unique_row_id), json=json_object)) result,", "UTC should come from the timezone library but this is", "containing serialized schema. Returns: A TableFieldSchema for a single column", "is None and found_table is None: raise RuntimeError( 'Table %s:%s.%s", "id %s\", table_id) # The response is a bigquery.Table instance.", "to an element, making it a KV pair. Outputs a", "so wait # that much time before creating the table", "decimal supports. BQ is able to handle # inserts into", "get_hashable_destination(destination): \"\"\"Parses a table reference into a (project, dataset, table)", "write_disposition=None, create_disposition=None): reference = bigquery.JobReference(jobId=job_id, projectId=project_id) request = bigquery.BigqueryJobsInsertRequest( projectId=project_id,", "the field %s ' 'but the field is not NULLABLE.'", "expected to be empty. \"\"\" from apache_beam.io.gcp.bigquery import BigQueryDisposition found_table", "= self._insert_all_rows( project_id, dataset_id, table_id, final_rows, skip_invalid_rows) return result, errors", "destinationTable=self._get_temp_table(project_id), flattenResults=flatten_results)), jobReference=reference)) response = self.client.jobs.Insert(request) return response.jobReference.jobId @retry.with_exponential_backoff( num_retries=MAX_RETRIES,", "BigQueryDisposition.CREATE_NEVER: raise RuntimeError( 'Table %s:%s.%s not found but create disposition", "values are converted into string because JSON does not #", "is not required # for reading the field values in", "for 2 mins max so wait # that much time", "= None self.use_legacy_sql = use_legacy_sql self.flatten_results = flatten_results self.kms_key =", "disposition is CREATE_NEVER.' % (project_id, dataset_id, table_id)) else: raise #", "def unique_row_id(self): \"\"\"Returns a unique row ID (str) used to", "= 'temp_table_' TEMP_DATASET = 'temp_dataset_' def __init__(self, client=None): self.client =", "\"\"\"Calls the insertAll BigQuery API endpoint. Docs for this BQ", "job=bigquery.Job( configuration=bigquery.JobConfiguration( load=bigquery.JobConfigurationLoad( sourceUris=source_uris, destinationTable=table_reference, schema=schema, writeDisposition=write_disposition, createDisposition=create_disposition, sourceFormat='NEWLINE_DELIMITED_JSON', autodetect=schema", "== 'BYTES': # Input: \"YmJi\" --> Output: \"YmJi\" return value", "have used NAN/INF values. try: return json.dumps( table_row, allow_nan=False, default=default_encoder).encode('utf-8')", "job_id, from_table_reference, to_table_reference, create_disposition=None, write_disposition=None): reference = bigquery.JobReference() reference.jobId =", "project_id, query, use_legacy_sql, flatten_results, job_id, dry_run=False): reference = bigquery.JobReference(jobId=job_id, projectId=project_id)", "Enforce the \"modes\" enforced by BigQuerySource.__init__. # If this exception", "RETRY_NEVER = 'RETRY_NEVER' RETRY_ON_TRANSIENT_ERROR = 'RETRY_ON_TRANSIENT_ERROR' _NON_TRANSIENT_ERRORS = {'invalid', 'invalidQuery',", "else: raise self._delete_dataset(temp_table.projectId, temp_table.datasetId, True) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_job(self,", "raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def clean_up_temporary_dataset(self, project_id): temp_table = self._get_temp_table(project_id)", "apache_beam.options.pipeline_options import GoogleCloudOptions from apache_beam.runners.dataflow.native_io import iobase as dataflow_io from", "in field: schema.description = field['description'] if 'fields' in field: schema.fields", "trying. This situation is quite possible # if the query", "schema): table = bigquery.Table( tableReference=bigquery.TableReference( projectId=project_id, datasetId=dataset_id, tableId=table_id), schema=schema) request", "else uuid.uuid4() self._temporary_table_suffix = uuid.uuid4().hex @property def unique_row_id(self): \"\"\"Returns a", "== BigQueryDisposition.WRITE_EMPTY): raise RuntimeError( 'Table %s:%s.%s is not empty but", "[%s:%s.%s]. Errors: %s' % (self.project_id, self.dataset_id, self.table_id, errors)) def __enter__(self):", "'v' in cell else None if field.mode == 'REPEATED': if", "insert the same row multiple times for fail and retry", "if unique IDs are provided. This situation # can happen", "reference: 'DATASET.TABLE' or 'PROJECT:DATASET.TABLE'. This argument can be a bigquery.TableReference", "projectId=project_id, datasetId=dataset_id)) return dataset except HttpError as exn: if exn.status_code", "row['f'][index] value = cell['v'] if 'v' in cell else None", "# when querying, the repeated and/or record fields are flattened", "BigQuery APIs. NOTHING IN THIS FILE HAS BACKWARDS COMPATIBILITY GUARANTEES.", "several times. This comes into play for sinks executed in", "Args: client: bigquery.BigqueryV2 instance project_id, dataset_id, table_id: table lookup parameters", "def get_or_create_dataset(self, project_id, dataset_id, location=None): # Check if dataset already", "response.jobComplete: # The jobComplete field can be False if the", "the create/write dispositions passed in. For example if the table", "# table argument will contain a full table reference instead", "self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=temp_table.datasetId)) except HttpError as exn: if exn.status_code ==", "then the second element will be a bigquery.InserttErrorsValueListEntry instance containing", "None then the table argument must contain the entire table", "errors)) def __enter__(self): self.client = BigQueryWrapper(client=self.test_bigquery_client) self.client.get_or_create_table( self.project_id, self.dataset_id, self.table_id,", "return self.client.jobs.Get(request) def perform_load_job(self, destination, files, job_id, schema=None, write_disposition=None, create_disposition=None):", "return int(value) elif field.type == 'FLOAT': # Input: \"1.23\" -->", "does not exist', project_id, dataset_id) return else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES,", "return value elif field.type == 'DATETIME': # Input: \"2016-11-03T00:49:36\" -->", "RuntimeError( 'Table %s:%s.%s is not empty but write disposition is", "by BigQuerySource.__init__. # If this exception has been raised, the", "try: dataset = self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=dataset_id)) return dataset except HttpError", "dataset_id, table_id, rows, skip_invalid_rows=False): \"\"\"Inserts rows into the specified table.", "_value_provider_or_static_val(elm): if isinstance(elm, value_provider.ValueProvider): return elm else: # The type", "projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( dryRun=True, query=bigquery.JobConfigurationQuery( query=query, useLegacySql=use_legacy_sql, )), jobReference=reference)) response", "Write(self, row): self.rows_buffer.append(row) if len(self.rows_buffer) > self.rows_buffer_flush_threshold: self._flush_rows_buffer() class RowAsDictJsonCoder(coders.Coder):", "Unless required by applicable law or agreed to in writing,", "from the table. It is not required # for reading", "the project, dataset, and table used for the sink. self.project_id", "table_id): request = bigquery.BigqueryTabledataListRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id, maxResults=1) response =", "return else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _delete_dataset(self, project_id, dataset_id,", "\"\"\" schema = bigquery.TableFieldSchema() schema.name = field['name'] schema.type = field['type']", "projectId=project_id, maxResults=max_results) response = self.client.jobs.GetQueryResults(request) return response @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_timeout_or_quota_issues_filter)", "# TODO(silviuc): Should have an option for ignoreUnknownValues? rows=rows)) response", "formatting. return value_provider.StaticValueProvider(lambda x: x, value=elm) @staticmethod def _get_table_fn(destination): if", "be raised since the table was expected to be empty.", "the specific language governing permissions and # limitations under the", "encode(self, table_row): # The normal error when dumping NAN/INF values", "assert self.project_id is not None self.dataset_id = self.sink.table_reference.datasetId self.table_id =", "project_id, dataset_id, table_id, schema): table = bigquery.Table( tableReference=bigquery.TableReference( projectId=project_id, datasetId=dataset_id,", "kms_key if self.source.table_reference is not None: # If table schema", "in enumerate(schema.fields): value = None if isinstance(schema, bigquery.TableSchema): cell =", "query # request not for the actual execution of the", "insertAll BigQuery API endpoint. Docs for this BQ call: https://cloud.google.com/bigquery/docs/reference\\", "for rows, schema in self.client.run_query( project_id=self.executing_project, query=self.query, use_legacy_sql=self.use_legacy_sql, flatten_results=self.flatten_results): if", "callable(table): return table elif isinstance(table, value_provider.ValueProvider): return table table_reference =", "createDisposition=create_disposition, writeDisposition=write_disposition, ) ), jobReference=reference, ) ) logging.info(\"Inserting job request:", "project_id, dataset_id, table_id, rows, skip_invalid_rows=False): \"\"\"Calls the insertAll BigQuery API", "get location, missing response.statistics. Query: %s\", query) return None referenced_tables", "the reader. It is initialized the # first time something", "[] self.rows_buffer_flush_threshold = buffer_size or 1000 # Figure out the", "= AppendDestinationsFn._get_table_fn(destination) @staticmethod def _value_provider_or_static_val(elm): if isinstance(elm, value_provider.ValueProvider): return elm", "table_id) except HttpError as exn: if exn.status_code == 404: if", "apache_beam.internal.gcp.json_value import from_json_value from apache_beam.internal.gcp.json_value import to_json_value from apache_beam.internal.http_client import", "with # the License. You may obtain a copy of", "can happen during retries on failures. # TODO(silviuc): Must add", "self.flatten_results = flatten_results self.kms_key = kms_key if self.source.table_reference is not", "num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _insert_load_job(self, project_id, job_id, table_reference, source_uris, schema=None, write_disposition=None,", "dataset id owning the table. table_id: The table id. rows:", "value elif field.type == 'TIME': # Input: \"00:49:36\" --> Output:", "ValueError(\"BigQuerySource must have either a table or query\") def _get_source_location(self):", "as False to the source return self.convert_row_to_dict(value, field) elif field.type", "to be updated as well. raise ValueError(\"BigQuerySource must have either", "createDisposition=create_disposition, sourceFormat='NEWLINE_DELIMITED_JSON', autodetect=schema is None, ) ), jobReference=reference, ) )", "Args: field: Dictionary object containing serialized schema. Returns: A TableFieldSchema", "== 'TIMESTAMP': # The UTC should come from the timezone", "raise an error otherwise. return page_token = None while True:", "else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def clean_up_temporary_dataset(self, project_id): temp_table =", "schema for row in rows: if self.row_as_dict: yield self.client.convert_row_to_dict(row, schema)", "'RETRY_ON_TRANSIENT_ERROR' _NON_TRANSIENT_ERRORS = {'invalid', 'invalidQuery', 'notImplemented'} @staticmethod def should_retry(strategy, error_message):", "reference any tables.\", query) return None @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def", "table in :data:`source.query` See Also: - :meth:`BigQueryWrapper.get_query_location` - :meth:`BigQueryWrapper.get_table_location` Returns:", "None else: result[field.name] = self._convert_cell_value_to_dict(value, field) return result # -----------------------------------------------------------------------------", "def __exit__(self, exception_type, exception_value, traceback): self.client.clean_up_temporary_dataset(self.executing_project) def __iter__(self): for rows,", "self.project_id = self.sink.table_reference.projectId # If table schema did not define", "return response.jobReference @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _insert_load_job(self, project_id, job_id, table_reference,", "else: created_table = self._create_table(project_id=project_id, dataset_id=dataset_id, table_id=table_id, schema=schema or found_table.schema) logging.info('Created", "object from future.utils import iteritems from apache_beam import coders from", "that reference tables in multiple locations. \"\"\" reference = bigquery.JobReference(jobId=uuid.uuid4().hex,", "num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _delete_table(self, project_id, dataset_id, table_id): request = bigquery.BigqueryTablesDeleteRequest(", "and each key in it is the name of a", "from gcloud config info. if not self.executing_project and test_bigquery_client is", "isinstance(destination, bigquery.TableReference): return '%s:%s.%s' % ( destination.projectId, destination.datasetId, destination.tableId) else:", "as exn: if exn.status_code == 404: logging.warning('Dataset %s:%s does not", "_parse_schema_field(field): \"\"\"Parse a single schema field from dictionary. Args: field:", "The start_query_job would raise an error otherwise. return page_token =", "result = {} for index, field in enumerate(schema.fields): value =", "response.insertErrors, response.insertErrors @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_table(self, project_id, dataset_id, table_id):", "The response is a bigquery.TableDataList instance. return response.totalRows == 0", "the temporary id is unique try: self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=dataset_id)) if", "for fail and retry scenarios in which the insert request", "first referenced table in :data:`source.query` See Also: - :meth:`BigQueryWrapper.get_query_location` -", "table_id, rows, skip_invalid_rows=False): \"\"\"Calls the insertAll BigQuery API endpoint. Docs", "= {} for index, field in enumerate(schema.fields): value = None", "# The response is a bigquery.Dataset instance. return response else:", "= [] self.rows_buffer_flush_threshold = buffer_size or 1000 # Figure out", "be run without error raise RuntimeError( 'Dataset %s:%s already exists", "about the job that was started. \"\"\" return self._insert_load_job( destination.projectId,", "but could be useful for # getting additional details. self.schema", "organize all the BigQuery integration points and offer a common", "and the create/write dispositions passed in. For example if the", "http=get_new_http(), credentials=auth.get_service_credentials(), response_encoding=None if sys.version_info[0] < 3 else 'utf8') self._unique_row_id", "line option to specify it.') self.row_as_dict = isinstance(self.source.coder, RowAsDictJsonCoder) #", "RuntimeError('Could not successfully insert rows to BigQuery' ' table [%s:%s.%s].", "row ID is provided, BigQuery will make a best effort", "by the reader. It is initialized the # first time", "create_disposition=None): \"\"\"Starts a job to load data into BigQuery. Returns:", "write_disposition == BigQueryDisposition.WRITE_EMPTY): raise RuntimeError( 'Table %s:%s.%s is not empty", "\"\"\" if isinstance(destination, bigquery.TableReference): return '%s:%s.%s' % ( destination.projectId, destination.datasetId,", "table_reference.datasetId = dataset table_reference.tableId = table return table_reference # -----------------------------------------------------------------------------", "404: dataset_reference = bigquery.DatasetReference( projectId=project_id, datasetId=dataset_id) dataset = bigquery.Dataset(datasetReference=dataset_reference) if", ") ) logging.info(\"Inserting job request: %s\", request) response = self.client.jobs.Insert(request)", "sourceFormat='NEWLINE_DELIMITED_JSON', autodetect=schema is None, ) ), jobReference=reference, ) ) response", "It is not required # for reading the field values", "request = bigquery.BigqueryDatasetsInsertRequest( projectId=project_id, dataset=dataset) response = self.client.datasets.Insert(request) # The", "'DATE': # Input: \"2016-11-03\" --> Output: \"2016-11-03\" return value elif", "the dataset containing this table or null if the table", "otherwise. return page_token = None while True: response = self._get_query_results(project_id,", "queries that reference tables in multiple locations. \"\"\" reference =", "table schema, should be a valid JSON. Returns: A TableSchema", "are not JSON compliant # This code will catch this", "self._convert_cell_value_to_dict(value, field) return result # ----------------------------------------------------------------------------- # BigQueryReader, BigQueryWriter. class", "config info. if not self.executing_project and test_bigquery_client is None: raise", "request = bigquery.BigqueryJobsGetQueryResultsRequest( jobId=job_id, pageToken=page_token, projectId=project_id, maxResults=max_results) response = self.client.jobs.GetQueryResults(request)", "table's metadata object. Args: client: bigquery.BigqueryV2 instance project_id, dataset_id, table_id:", "# Check if dataset already exists otherwise create it try:", "configuration=bigquery.JobConfiguration( dryRun=True, query=bigquery.JobConfigurationQuery( query=query, useLegacySql=use_legacy_sql, )), jobReference=reference)) response = self.client.jobs.Insert(request)", "[] for row in rows: json_object = bigquery.JsonObject() for k,", "provided as string. Args: schema_string: String serialized table schema, should", "or null if the table reference is specified entirely by", "list result[field.name] = [] else: result[field.name] = [self._convert_cell_value_to_dict(x['v'], field) for", "if sys.version_info[0] < 3 else 'utf8') self._unique_row_id = 0 #", "means the # query has no errors. The start_query_job would", "an error otherwise. return page_token = None while True: response", "dataset) argument. Returns: A TableReference object from the bigquery API.", "export from either the Query or the Table. \"\"\" json_schema", "as ' + 'BigQuery inserts can be routed to deleted", "projectId=project_id, datasetId=dataset_id) dataset = bigquery.Dataset(datasetReference=dataset_reference) if location is not None:", "type. However # when querying, the repeated and/or record fields", "a common place where retry logic for failures can be", "project containing this table or null if the table reference", "json_schema['fields']] return bigquery.TableSchema(fields=fields) def parse_table_reference(table, dataset=None, project=None): \"\"\"Parses a table", "== 404: logging.warning('Dataset %s:%s does not exist', project_id, temp_table.datasetId) return", "field values in each row but could be useful for", "something gets read from the table. It is not required", "bigquery.Table instance if table was found or created. Raises: RuntimeError:", "%H:%M:%S.%f UTC') elif field.type == 'BYTES': # Input: \"YmJi\" -->", "with id %s\", table_id) # The response is a bigquery.Table", "or WRITE_TRUNCATE. Returns: A bigquery.Table instance if table was found", "# Input: \"YmJi\" --> Output: \"YmJi\" return value elif field.type", "self.sink.create_disposition, self.sink.write_disposition) return self def __exit__(self, exception_type, exception_value, traceback): self._flush_rows_buffer()", "tableId=table_id, maxResults=1) response = self.client.tabledata.List(request) # The response is a", "note is the row ID that we add to #", "write_disposition == BigQueryDisposition.WRITE_TRUNCATE we delete # the table before this", "= [_parse_schema_field(x) for x in field['fields']] return schema fields =", "HttpError except ImportError: pass # pylint: enable=wrong-import-order, wrong-import-position MAX_RETRIES =", "job_id, table_reference, source_uris, schema=None, write_disposition=None, create_disposition=None): reference = bigquery.JobReference(jobId=job_id, projectId=project_id)", "decimal import json import logging import re import sys import", "it (later) if WRITE_TRUNCATE was # specified. if write_disposition ==", "self.project_id is None and hasattr(sink, 'pipeline_options'): self.project_id = ( sink.pipeline_options.view_as(GoogleCloudOptions).project)", "break page_token = response.pageToken def insert_rows(self, project_id, dataset_id, table_id, rows,", "== BigQueryDisposition.WRITE_TRUNCATE we delete # the table before this point.", "None: if not field.mode == 'NULLABLE': raise ValueError('Received \\'None\\' as", "response.insertErrors @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_table(self, project_id, dataset_id, table_id): \"\"\"Lookup", "schema did not define a project we default to executing", "if table was found or created. Raises: RuntimeError: For various", "= self._is_table_empty(project_id, dataset_id, table_id) if (not table_empty and write_disposition ==", "we default to executing project. if self.project_id is None and", "BigQuery can route data to the old table for 2", "the location of the first referenced table in the query", "find and create tables, query a table, etc.). \"\"\" TEMP_TABLE", "_get_table_fn(destination): if callable(destination): return destination else: return lambda x: AppendDestinationsFn._value_provider_or_static_val(", "%s:%s does not exist so we will create it as", "You may obtain a copy of the License at #", "self.client.tables.Insert(request) logging.debug(\"Created the table with id %s\", table_id) # The", "% (project_id, dataset_id, table_id)) else: raise # If table exists", "dry run then the fact that we get here means", "should be skipped, and all others should be inserted successfully.", "JSON does not # support the precision that decimal supports.", "Input: 1478134176.985864 --> Output: \"2016-11-03 00:49:36.985864 UTC\" dt = datetime.datetime.utcfromtimestamp(float(value))", "TableRow's instead of dicts. final_rows = [] for row in", "dataflow_io from apache_beam.transforms import DoFn from apache_beam.utils import retry #", "= [] else: result[field.name] = [self._convert_cell_value_to_dict(x['v'], field) for x in", "found_table is None: raise RuntimeError( 'Table %s:%s.%s requires a schema.", "single schema field from dictionary. Args: field: Dictionary object containing", "# getting additional details. self.schema = None self.use_legacy_sql = use_legacy_sql", "json.loads(encoded_table_row.decode('utf-8')) class RetryStrategy(object): RETRY_ALWAYS = 'RETRY_ALWAYS' RETRY_NEVER = 'RETRY_NEVER' RETRY_ON_TRANSIENT_ERROR", "exception_value, traceback): self.client.clean_up_temporary_dataset(self.executing_project) def __iter__(self): for rows, schema in self.client.run_query(", "ID that we add to # each row in order", "field.type == 'BYTES': # Input: \"YmJi\" --> Output: \"YmJi\" return", "(a-z, A-Z), numbers (0-9), or underscores (_). If dataset argument", "autodetect=schema is None, ) ), jobReference=reference, ) ) response =", "is a NoOp, because we assume the argument already has", "first referenced table in the query and depends on the", "a single column in BigQuery. \"\"\" schema = bigquery.TableFieldSchema() schema.name", "# BigQuery will do a best-effort if unique IDs are", "a valid JSON. Returns: A TableSchema of the BigQuery export", "= use_legacy_sql self.flatten_results = flatten_results self.kms_key = kms_key if self.source.table_reference", "table_id) # The response is a bigquery.Table instance. return response", "= self.source.table_reference return self.client.get_table_location( tr.projectId if tr.projectId is not None", "instance containing specific errors. \"\"\" # Prepare rows for insertion.", "was found or created. Raises: RuntimeError: For various mismatches between", "where bigquery library is not available. # pylint: disable=wrong-import-order, wrong-import-position", "response_encoding=None if sys.version_info[0] < 3 else 'utf8') self._unique_row_id = 0", "return parse_table_reference( table=BigQueryWrapper.TEMP_TABLE + self._temporary_table_suffix, dataset=BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix, project=project_id) @retry.with_exponential_backoff(", "project_id request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( copy=bigquery.JobConfigurationTableCopy( destinationTable=to_table_reference, sourceTable=from_table_reference,", "traceback): self.client.clean_up_temporary_dataset(self.executing_project) def __iter__(self): for rows, schema in self.client.run_query( project_id=self.executing_project,", "schema was not specified. if schema is None and found_table", "the semantics for WRITE_EMPTY and # WRITE_TRUNCATE write dispositions. if", "self.project_id = ( sink.pipeline_options.view_as(GoogleCloudOptions).project) assert self.project_id is not None self.dataset_id", "in the service. If # the request times out we", "return value elif field.type == 'BOOLEAN': # Input: \"true\" -->", "None: self.schema = schema for row in rows: if self.row_as_dict:", "field.mode == 'NULLABLE': raise ValueError('Received \\'None\\' as the value for", "% field.name) result[field.name] = None else: result[field.name] = self._convert_cell_value_to_dict(value, field)", "v = str(v) json_object.additionalProperties.append( bigquery.JsonObject.AdditionalProperty( key=k, value=to_json_value(v))) final_rows.append( bigquery.TableDataInsertAllRequest.RowsValueListEntry( insertId=str(self.unique_row_id),", "the following attributes: projectId, datasetId, and tableId. Raises: ValueError: if", "tuple. Args: table: The ID of the table. The ID", "is: # ValueError: Out of range float values are not", "def _start_query_job(self, project_id, query, use_legacy_sql, flatten_results, job_id, dry_run=False): reference =", "bigquery API. The object has the following attributes: projectId, datasetId,", "project_id, dataset_id, table_id) return else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "Output: \"2016-11-03 00:49:36.985864 UTC\" dt = datetime.datetime.utcfromtimestamp(float(value)) return dt.strftime('%Y-%m-%d %H:%M:%S.%f", "elif callable(table): return table elif isinstance(table, value_provider.ValueProvider): return table table_reference", "use_legacy_sql=self.use_legacy_sql, flatten_results=self.flatten_results): if self.schema is None: self.schema = schema for", "else: raise # If table exists already then handle the", "a TableReference for the destination, and the value is the", "against both non-empty and non-None table = referenced_tables[0] location =", "not response.jobComplete: # The jobComplete field can be False if", "location=self._get_source_location()) return self def __exit__(self, exception_type, exception_value, traceback): self.client.clean_up_temporary_dataset(self.executing_project) def", "exist', project_id, dataset_id) return else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def", "value == 'true' elif field.type == 'INTEGER': # Input: \"123\"", "in which the insert request may be issued several times.", "elif field.type == 'DATE': # Input: \"2016-11-03\" --> Output: \"2016-11-03\"", "fields default to # returning an empty list result[field.name] =", "strategy == RetryStrategy.RETRY_ALWAYS: return True elif strategy == RetryStrategy.RETRY_NEVER: return", "= bigquery.BigqueryJobsGetRequest() request.jobId = job_id request.projectId = project request.location =", "element is False then the second element will be a", "is None: self.schema = schema for row in rows: if", "def should_retry(strategy, error_message): if strategy == RetryStrategy.RETRY_ALWAYS: return True elif", "= bigquery.JobReference(jobId=job_id, projectId=project_id) request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( dryRun=dry_run,", "the table before this point. if write_disposition == BigQueryDisposition.WRITE_TRUNCATE: #", "import auth from apache_beam.internal.gcp.json_value import from_json_value from apache_beam.internal.gcp.json_value import to_json_value", "before the write as ' + 'BigQuery inserts can be", "response from query: %s ...', query) time.sleep(1.0) continue # We", "retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_query_location(self, project_id, query, use_legacy_sql): \"\"\" Get the location", "logging.debug(\"Created the table with id %s\", table_id) # The response", "BACKWARDS COMPATIBILITY GUARANTEES. \"\"\" from __future__ import absolute_import import datetime", "TableRow instance using the schema to a Python dict.\"\"\" result", "@property def unique_row_id(self): \"\"\"Returns a unique row ID (str) used", "return response def _create_table(self, project_id, dataset_id, table_id, schema): table =", "None. create_disposition: CREATE_NEVER or CREATE_IF_NEEDED. write_disposition: WRITE_APPEND, WRITE_EMPTY or WRITE_TRUNCATE.", "@staticmethod def should_retry(strategy, error_message): if strategy == RetryStrategy.RETRY_ALWAYS: return True", "test_bigquery_client is None: raise RuntimeError( 'Missing executing project information. Please", "table reference is specified entirely by the table (and possibly", "# Create a new table potentially reusing the schema from", "elif self.source.query is not None: self.query = self.source.query else: #", "be a valid JSON. Returns: A TableSchema of the BigQuery", "destination.datasetId, destination.tableId) else: return destination def parse_table_schema_from_json(schema_string): \"\"\"Parse the Table", "= field['description'] if 'fields' in field: schema.fields = [_parse_schema_field(x) for", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "return value else: raise RuntimeError('Unexpected field type: %s' % field.type)", "def convert_row_to_dict(self, row, schema): \"\"\"Converts a TableRow instance using the", "function mimics the behavior of BigQuery import jobs when using", "destination): self.destination = AppendDestinationsFn._get_table_fn(destination) @staticmethod def _value_provider_or_static_val(elm): if isinstance(elm, value_provider.ValueProvider):", "is None: raise RuntimeError( 'Missing executing project information. Please use", "RuntimeError( 'Table %s:%s.%s requires a schema. None can be inferred", "= match.group('dataset') table_reference.tableId = match.group('table') else: table_reference.projectId = project table_reference.datasetId", "method returns the location of the first referenced table in", "return self.client.get_table_location( tr.projectId if tr.projectId is not None else self.executing_project,", "= self.executing_project self.query = 'SELECT * FROM [%s:%s.%s];' % (", "key is a TableReference for the destination, and the value", "rows into the specified table. Args: project_id: The project id", "field.mode == 'REPEATED': if value is None: # Ideally this", "a string representing the destination containing 'PROJECT:DATASET.TABLE'. Returns: A string", "exists to make sure that the temporary id is unique", "num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_or_create_dataset(self, project_id, dataset_id, location=None): # Check if", "\"\"\"A coder for a table row (represented as a dict)", "used to organize all the BigQuery integration points and offer", "field from dictionary. Args: field: Dictionary object containing serialized schema.", "write_disposition): \"\"\"Gets or creates a table based on create and", "key=k, value=to_json_value(v))) final_rows.append( bigquery.TableDataInsertAllRequest.RowsValueListEntry( insertId=str(self.unique_row_id), json=json_object)) result, errors = self._insert_all_rows(", "in order to help BigQuery avoid inserting a row multiple", "%s. Result: %s.', project_id, dataset_id, table_id, schema or found_table.schema, created_table)", "into the specified table. Args: project_id: The project id owning", "of range float values are not JSON compliant # This", "the argument already has # the proper formatting. return value_provider.StaticValueProvider(lambda", "the job that was started. \"\"\" return self._insert_load_job( destination.projectId, job_id,", "Input: \"2016-11-03\" --> Output: \"2016-11-03\" return value elif field.type ==", "RetryStrategy.RETRY_ALWAYS: return True elif strategy == RetryStrategy.RETRY_NEVER: return False elif", "location) self.get_or_create_dataset(project_id, dataset_id, location=location) else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def", "if exn.status_code == 404: dataset_reference = bigquery.DatasetReference( projectId=project_id, datasetId=dataset_id) dataset", "table.projectId, table.datasetId, table.tableId) logging.info(\"Using location %r from table %r referenced", "not passed: raise RuntimeError('Could not successfully insert rows to BigQuery'", "making it a KV pair. Outputs a PCollection of KV-pairs", "%d rows to %s:%s.%s table.', len(self.rows_buffer), self.project_id, self.dataset_id, self.table_id) passed,", "rows=rows)) response = self.client.tabledata.InsertAll(request) # response.insertErrors is not [] if", "table. Args: project_id: The project id owning the table. dataset_id:", "issue in python 2.7 so we'll just hardcode it as", "field.type == 'DATE': # Input: \"2016-11-03\" --> Output: \"2016-11-03\" return", "this BQ call: https://cloud.google.com/bigquery/docs/reference\\ /rest/v2/tabledata/insertAll.\"\"\" # The rows argument is", "project_id, job_id, page_token=None, max_results=10000): request = bigquery.BigqueryJobsGetQueryResultsRequest( jobId=job_id, pageToken=page_token, projectId=project_id,", "A-Z), numbers (0-9), or underscores (_). If dataset argument is", "all the BigQuery integration points and offer a common place", "special note is the row ID that we add to", "a best effort to not insert the same row multiple", "could be useful for # getting additional details. self.schema =", "Query: %s\", query) return None referenced_tables = response.statistics.query.referencedTables if referenced_tables:", "BigQueryWrapper(object): \"\"\"BigQuery client wrapper with utilities for querying. The wrapper", "import bigquery from apache_beam.options import value_provider from apache_beam.options.pipeline_options import GoogleCloudOptions", "import logging import re import sys import time import uuid", "insertId=str(self.unique_row_id), json=json_object)) result, errors = self._insert_all_rows( project_id, dataset_id, table_id, final_rows,", "use the --project ' 'command line option to specify it.')", "the field is not NULLABLE.' % field.name) result[field.name] = None", "For various mismatches between the state of the table and", "without error raise RuntimeError( 'Dataset %s:%s already exists so cannot", "and write_disposition == BigQueryDisposition.WRITE_EMPTY): raise RuntimeError( 'Table %s:%s.%s is not", "%s' % (self.project_id, self.dataset_id, self.table_id, errors)) def __enter__(self): self.client =", "Input: \"YmJi\" --> Output: \"YmJi\" return value elif field.type ==", "quite possible # if the query will return a large", "successfully insert rows to BigQuery' ' table [%s:%s.%s]. Errors: %s'", "TODO(silviuc): Must add support to writing TableRow's instead of dicts.", "projectIds so they can be run without error raise RuntimeError(", "the # first time something gets read from the table.", "import get_new_http from apache_beam.io.gcp.internal.clients import bigquery from apache_beam.options import value_provider", "case the schema was not specified. if schema is None", "AppendDestinationsFn._get_table_fn(destination) @staticmethod def _value_provider_or_static_val(elm): if isinstance(elm, value_provider.ValueProvider): return elm else:", "pass in a client we do not want a #", "licenses this file to You under the Apache License, Version", "if location is not None: dataset.location = location request =", "not exist', project_id, temp_table.datasetId) return else: raise self._delete_dataset(temp_table.projectId, temp_table.datasetId, True)", "never happen as repeated fields default to # returning an", "Or a string representing the destination containing 'PROJECT:DATASET.TABLE'. Returns: A", "'RETRY_NEVER' RETRY_ON_TRANSIENT_ERROR = 'RETRY_ON_TRANSIENT_ERROR' _NON_TRANSIENT_ERRORS = {'invalid', 'invalidQuery', 'notImplemented'} @staticmethod", "return table elif callable(table): return table elif isinstance(table, value_provider.ValueProvider): return", "logging.info('Created table %s.%s.%s with schema %s. Result: %s.', project_id, dataset_id,", "if isinstance(table, bigquery.TableReference): return table elif callable(table): return table elif", "instance or None. create_disposition: CREATE_NEVER or CREATE_IF_NEEDED. write_disposition: WRITE_APPEND, WRITE_EMPTY", "location logging.debug(\"Query %s does not reference any tables.\", query) return", "be empty. \"\"\" from apache_beam.io.gcp.bigquery import BigQueryDisposition found_table = None", "raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _delete_dataset(self, project_id, dataset_id, delete_contents=True): request", "table schema did not define a project we default to", "if tr.projectId is not None else self.executing_project, tr.datasetId, tr.tableId) else:", "self.sink.write_disposition) return self def __exit__(self, exception_type, exception_value, traceback): self._flush_rows_buffer() def", "is not None else None elif isinstance(schema, bigquery.TableFieldSchema): cell =", "\"\"\"Parses a table reference into a (project, dataset, table) tuple.", "dictionaries. Each dictionary is a row and each key in", "Args: destination: Either a TableReference object from the bigquery API.", "library but this is a known # issue in python", "# request not for the actual execution of the query", "the reference is returned as a result. Additionally, for date", "_insert_load_job(self, project_id, job_id, table_reference, source_uris, schema=None, write_disposition=None, create_disposition=None): reference =", "Input: \"XYZ\" --> Output: \"XYZ\" return value elif field.type ==", "permissions and # limitations under the License. # \"\"\"Tools used", "= test_bigquery_client if auth.is_running_in_gce: self.executing_project = auth.executing_project elif hasattr(source, 'pipeline_options'):", "for row in rows: if self.row_as_dict: yield self.client.convert_row_to_dict(row, schema) else:", "request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( copy=bigquery.JobConfigurationTableCopy( destinationTable=to_table_reference, sourceTable=from_table_reference, createDisposition=create_disposition,", "% (project_id, dataset_id, table_id)) # Delete the table and recreate", "datetime.datetime.utcfromtimestamp(float(value)) return dt.strftime('%Y-%m-%d %H:%M:%S.%f UTC') elif field.type == 'BYTES': #", "destination, and the value is the record itself. Experimental; no", "\"00:49:36\" return value elif field.type == 'RECORD': # Note that", "[%s:%s.%s];' % ( project_id, self.source.table_reference.datasetId, self.source.table_reference.tableId) elif self.source.query is not", "last page is signalled by a missing pageToken. yield response.rows,", "delete_contents=True): request = bigquery.BigqueryDatasetsDeleteRequest( projectId=project_id, datasetId=dataset_id, deleteContents=delete_contents) try: self.client.datasets.Delete(request) except", "exists otherwise create it try: dataset = self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=dataset_id))", "as dataflow_io from apache_beam.transforms import DoFn from apache_beam.utils import retry", "def _insert_all_rows(self, project_id, dataset_id, table_id, rows, skip_invalid_rows=False): \"\"\"Calls the insertAll", "404: if create_disposition == BigQueryDisposition.CREATE_NEVER: raise RuntimeError( 'Table %s:%s.%s not", "are rows with insertion errors, whether they should be skipped,", "else: return created_table def run_query(self, project_id, query, use_legacy_sql, flatten_results, dry_run=False):", "the row ID is provided, BigQuery will make a best", "projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( dryRun=dry_run, query=bigquery.JobConfigurationQuery( query=query, useLegacySql=use_legacy_sql, allowLargeResults=True, destinationTable=self._get_temp_table(project_id), flattenResults=flatten_results)),", "def __init__(self, destination): self.destination = AppendDestinationsFn._get_table_fn(destination) @staticmethod def _value_provider_or_static_val(elm): if", "enable=wrong-import-order, wrong-import-position MAX_RETRIES = 3 JSON_COMPLIANCE_ERROR = 'NAN, INF and", "dataset, table) tuple. Args: destination: Either a TableReference object from", "just hardcode it as we're reading using # utcfromtimestamp. #", "to/from a JSON string. This is the default coder for", "id. rows: A list of plain Python dictionaries. Each dictionary", "as exn: if exn.status_code == 404: dataset_reference = bigquery.DatasetReference( projectId=project_id,", "but write disposition is WRITE_EMPTY.' % (project_id, dataset_id, table_id)) #", "of rows. logging.info('Waiting on response from query: %s ...', query)", "can be inferred because the ' 'table does not exist.'", "inserts into NUMERIC columns by receiving JSON with string attrs.", "if exn.status_code == 404: logging.warning( 'Dataset %s:%s does not exist", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "response = self.client.jobs.Insert(request) return response.jobReference @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _start_query_job(self,", "encountered. return not response.insertErrors, response.insertErrors @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_table(self,", "then the table argument must contain the entire table reference:", "# If table schema did not define a project we", "(ASF) under one or more # contributor license agreements. See", "def encode(self, table_row): # The normal error when dumping NAN/INF", "project_id, temp_table.datasetId) return else: raise self._delete_dataset(temp_table.projectId, temp_table.datasetId, True) @retry.with_exponential_backoff( num_retries=MAX_RETRIES,", "try: self.client.datasets.Delete(request) except HttpError as exn: if exn.status_code == 404:", "created_table else: return created_table def run_query(self, project_id, query, use_legacy_sql, flatten_results,", "= project request.location = location return self.client.jobs.Get(request) def perform_load_job(self, destination,", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "and test_bigquery_client is None: raise RuntimeError( 'Missing executing project information.", "information. Please use the --project ' 'command line option to", "or found_table.schema) logging.info('Created table %s.%s.%s with schema %s. Result: %s.',", "this should never happen as repeated fields default to #", "table_id)) # Delete the table and recreate it (later) if", "retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _insert_load_job(self, project_id, job_id, table_reference, source_uris, schema=None, write_disposition=None, create_disposition=None):", "else: return destination def parse_table_schema_from_json(schema_string): \"\"\"Parse the Table Schema provided", "as temporary ' 'with location=%s', project_id, dataset_id, location) self.get_or_create_dataset(project_id, dataset_id,", "elif field.type == 'RECORD': # Note that a schema field", "repeated fields default to # returning an empty list result[field.name]", "writeDisposition=write_disposition, createDisposition=create_disposition, sourceFormat='NEWLINE_DELIMITED_JSON', autodetect=schema is None, ) ), jobReference=reference, )", "language governing permissions and # limitations under the License. #", ") ) response = self.client.jobs.Insert(request) return response.jobReference @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter)", "much time before creating the table and writing it logging.warning('Sleeping", "IDs are provided. This situation # can happen during retries", "iteritems(row): if isinstance(v, decimal.Decimal): # decimal values are converted into", "dry_run: # If this was a dry run then the", "are experimental and have no backwards compatibility guarantees. These tools", "dict.\"\"\" result = {} for index, field in enumerate(schema.fields): value", "required by applicable law or agreed to in writing, software", "job_id = self._start_query_job(project_id, query, use_legacy_sql, flatten_results, job_id=uuid.uuid4().hex, dry_run=dry_run) if dry_run:", "elif hasattr(source, 'pipeline_options'): self.executing_project = ( source.pipeline_options.view_as(GoogleCloudOptions).project) else: self.executing_project =", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# Prepare rows for insertion. Of special note is the", "= self.source.query else: # Enforce the \"modes\" enforced by BigQuerySource.__init__.", "table) tuple. Args: table: The ID of the table. The", "the project containing this table or null if the table", "copyright ownership. # The ASF licenses this file to You", "inserting a row multiple times. # BigQuery will do a", "project_id=self.project_id, dataset_id=self.dataset_id, table_id=self.table_id, rows=self.rows_buffer) self.rows_buffer = [] if not passed:", "bigquery.JobReference(jobId=uuid.uuid4().hex, projectId=project_id) request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( dryRun=True, query=bigquery.JobConfigurationQuery(", "The wrapper is used to organize all the BigQuery integration", "Docs for this BQ call: https://cloud.google.com/bigquery/docs/reference\\ /rest/v2/tabledata/insertAll.\"\"\" # The rows", "schema_string: String serialized table schema, should be a valid JSON.", "' 'but the field is not NULLABLE.' % field.name) result[field.name]", "a JSON string. This is the default coder for sources", "error that explains # to the programmer that they have", "add to # each row in order to help BigQuery", "addition it offers various functions used both in sources and", "use_legacy_sql, flatten_results, job_id=uuid.uuid4().hex, dry_run=dry_run) if dry_run: # If this was", "location of the first referenced table in the query and", "'$YYYYmmdd' to the table name is supported, e.g. 'DATASET.TABLE$YYYYmmdd'. dataset:", "False to the source return self.convert_row_to_dict(value, field) elif field.type ==", "table_reference.projectId = match.group('project') table_reference.datasetId = match.group('dataset') table_reference.tableId = match.group('table') else:", "query=bigquery.JobConfigurationQuery( query=query, useLegacySql=use_legacy_sql, )), jobReference=reference)) response = self.client.jobs.Insert(request) if response.statistics", "License, Version 2.0 # (the \"License\"); you may not use", "result[field.name] = [] else: result[field.name] = [self._convert_cell_value_to_dict(x['v'], field) for x", "datasetId=dataset_id, tableId=table_id) response = self.client.tables.Get(request) return response def _create_table(self, project_id,", "= bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( load=bigquery.JobConfigurationLoad( sourceUris=source_uris, destinationTable=table_reference, schema=schema, writeDisposition=write_disposition,", "if self.rows_buffer: logging.info('Writing %d rows to %s:%s.%s table.', len(self.rows_buffer), self.project_id,", "self._create_table(project_id=project_id, dataset_id=dataset_id, table_id=table_id, schema=schema or found_table.schema) logging.info('Created table %s.%s.%s with", "be a bigquery.InserttErrorsValueListEntry instance containing specific errors. \"\"\" # Prepare", "table in the query and depends on the BigQuery service", "self.query = self.source.query else: # Enforce the \"modes\" enforced by", "agreed to in writing, software # distributed under the License", "utcfromtimestamp. # Input: 1478134176.985864 --> Output: \"2016-11-03 00:49:36.985864 UTC\" dt", "is not None self.dataset_id = self.sink.table_reference.datasetId self.table_id = self.sink.table_reference.tableId def", "pass the flatten_results flag as False to the source return", "table id. rows: A list of plain Python dictionaries. Each", "result[field.name] = [self._convert_cell_value_to_dict(x['v'], field) for x in value] elif value", "distributed under the License is distributed on an \"AS IS\"", "0 # For testing scenarios where we pass in a", "not None else None elif isinstance(schema, bigquery.TableFieldSchema): cell = row['f'][index]", "= location request = bigquery.BigqueryDatasetsInsertRequest( projectId=project_id, dataset=dataset) response = self.client.datasets.Insert(request)", "from apache_beam.io.gcp.internal.clients import bigquery from apache_beam.options import value_provider from apache_beam.options.pipeline_options", "containing this table or null if the table reference is", "schema from a previously # found table in case the", "self.query = 'SELECT * FROM [%s:%s.%s];' % ( project_id, self.source.table_reference.datasetId,", "to batch written rows so we reduce communication with the", "create_disposition: CREATE_NEVER or CREATE_IF_NEEDED. write_disposition: WRITE_APPEND, WRITE_EMPTY or WRITE_TRUNCATE. Returns:", "create/write dispositions passed in. For example if the table is", "try: found_table = self.get_table(project_id, dataset_id, table_id) except HttpError as exn:", "not JSON compliant.' def default_encoder(obj): if isinstance(obj, decimal.Decimal): return str(obj)", "executed in a local runner. Returns: a unique row ID", "field object supports also a RECORD type. However # when", "apache_beam.options import value_provider from apache_beam.options.pipeline_options import GoogleCloudOptions from apache_beam.runners.dataflow.native_io import", "table name. if dataset is None: match = re.match( r'^((?P<project>.+):)?(?P<dataset>\\w+)\\.(?P<table>[\\w\\$]+)$',", "table) if not match: raise ValueError( 'Expected a table reference", "= self.client.jobs.Insert(request) return response.jobReference.jobId @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _get_query_results(self, project_id,", "= kms_key if self.source.table_reference is not None: # If table", "ValueError as e: raise ValueError('%s. %s' % (e, JSON_COMPLIANCE_ERROR)) def", "sources and sinks. Classes, constants and functions in this file", "\"\"\"Converts a TableRow instance using the schema to a Python", "a unique row ID (str) used to avoid multiple insertions.", "test_bigquery_client=None, buffer_size=None): self.sink = sink self.test_bigquery_client = test_bigquery_client self.row_as_dict =", "error handling for queries that reference tables in multiple locations.", "= project table_reference.datasetId = dataset table_reference.tableId = table return table_reference", "where the key is a TableReference for the destination, and", "+ 'BigQuery inserts can be routed to deleted table '", "explains # to the programmer that they have used NAN/INF", "table return table_reference # ----------------------------------------------------------------------------- # BigQueryWrapper. class BigQueryWrapper(object): \"\"\"BigQuery", "+ self._temporary_table_suffix # Check if dataset exists to make sure", "handle # inserts into NUMERIC columns by receiving JSON with", "we do not want a # randomized prefix for row", "import BigQueryDisposition found_table = None try: found_table = self.get_table(project_id, dataset_id,", "as string. Args: schema_string: String serialized table schema, should be", "re.match( r'^((?P<project>.+):)?(?P<dataset>\\w+)\\.(?P<table>[\\w\\$]+)$', table) if not match: raise ValueError( 'Expected a", "does not exist', project_id, dataset_id, table_id) return else: raise @retry.with_exponential_backoff(", "the rows being read by the reader. It is initialized", "which case dataset and project are ignored and the reference", "self.client.run_query( project_id=self.executing_project, query=self.query, use_legacy_sql=self.use_legacy_sql, flatten_results=self.flatten_results): if self.schema is None: self.schema", "the table. It is not required # for reading the", "match.group('dataset') table_reference.tableId = match.group('table') else: table_reference.projectId = project table_reference.datasetId =", "def create_temporary_dataset(self, project_id, location): dataset_id = BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix #", "__init__(self, destination): self.destination = AppendDestinationsFn._get_table_fn(destination) @staticmethod def _value_provider_or_static_val(elm): if isinstance(elm,", "seconds before the write as ' + 'BigQuery inserts can", "Version 2.0 # (the \"License\"); you may not use this", "projectId=project_id, datasetId=dataset_id, tableId=table_id), schema=schema) request = bigquery.BigqueryTablesInsertRequest( projectId=project_id, datasetId=dataset_id, table=table)", "of the query in the service. If # the request", "rows, schema in self.client.run_query( project_id=self.executing_project, query=self.query, use_legacy_sql=self.use_legacy_sql, flatten_results=self.flatten_results): if self.schema", "temporary id is unique try: self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=dataset_id)) if project_id", "= self.client.tabledata.List(request) # The response is a bigquery.TableDataList instance. return", "if create_disposition == BigQueryDisposition.CREATE_NEVER: raise RuntimeError( 'Table %s:%s.%s not found", "else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_table_location(self, project_id, dataset_id, table_id):", "list of plain Python dictionaries. Each dictionary is a row", "against environments where bigquery library is not available. # pylint:", "return value == 'true' elif field.type == 'INTEGER': # Input:", "for x in value] elif value is None: if not", "_NON_TRANSIENT_ERRORS = {'invalid', 'invalidQuery', 'notImplemented'} @staticmethod def should_retry(strategy, error_message): if", "gcloud config info. if not self.executing_project and test_bigquery_client is None:", "instance project_id, dataset_id, table_id: table lookup parameters Returns: bigquery.Table instance", "int(value) elif field.type == 'FLOAT': # Input: \"1.23\" --> Output:", "pylint: disable=wrong-import-order, wrong-import-position try: from apitools.base.py.exceptions import HttpError except ImportError:", "reference tables in multiple locations. \"\"\" reference = bigquery.JobReference(jobId=uuid.uuid4().hex, projectId=project_id)", "def _create_table(self, project_id, dataset_id, table_id, schema): table = bigquery.Table( tableReference=bigquery.TableReference(", "# the request times out we keep trying. This situation", "__iter__(self): for rows, schema in self.client.run_query( project_id=self.executing_project, query=self.query, use_legacy_sql=self.use_legacy_sql, flatten_results=self.flatten_results):", "project_id): temp_table = self._get_temp_table(project_id) try: self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=temp_table.datasetId)) except HttpError", "is specified entirely by the table argument. project: The ID", "destinationTable=to_table_reference, sourceTable=from_table_reference, createDisposition=create_disposition, writeDisposition=write_disposition, ) ), jobReference=reference, ) ) logging.info(\"Inserting", "num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _get_query_results(self, project_id, job_id, page_token=None, max_results=10000): request =", "of the table. The ID must contain only letters (a-z,", "that the temporary id is unique try: self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=dataset_id))", "and have no backwards compatibility guarantees. These tools include wrappers", "for x in field['fields']] return schema fields = [_parse_schema_field(f) for", "row in rows: json_object = bigquery.JsonObject() for k, v in", "create_disposition=None): reference = bigquery.JobReference(jobId=job_id, projectId=project_id) request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job(", "= self._get_temp_table(project_id) try: self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=temp_table.datasetId)) except HttpError as exn:", "table=table) response = self.client.tables.Insert(request) logging.debug(\"Created the table with id %s\",", "is initialized the # first time something gets read from", "\"\"\" request = bigquery.BigqueryTablesGetRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id) response = self.client.tables.Get(request)", "BigQuery. \"\"\" schema = bigquery.TableFieldSchema() schema.name = field['name'] schema.type =", "projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( copy=bigquery.JobConfigurationTableCopy( destinationTable=to_table_reference, sourceTable=from_table_reference, createDisposition=create_disposition, writeDisposition=write_disposition, ) ),", "elif value is None: if not field.mode == 'NULLABLE': raise", "parse_table_reference( table=BigQueryWrapper.TEMP_TABLE + self._temporary_table_suffix, dataset=BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix, project=project_id) @retry.with_exponential_backoff( num_retries=MAX_RETRIES,", "empty list result[field.name] = [] else: result[field.name] = [self._convert_cell_value_to_dict(x['v'], field)", "'' if client else uuid.uuid4() self._temporary_table_suffix = uuid.uuid4().hex @property def", "# Enforce the \"modes\" enforced by BigQuerySource.__init__. # If this", "return else: raise self._delete_dataset(temp_table.projectId, temp_table.datasetId, True) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def", "dataset_id, table_id) # Create a new table potentially reusing the", "dataset containing this table or null if the table reference", "project=None): \"\"\"Parses a table reference into a (project, dataset, table)", "best effort to not insert the same row multiple times", "is not available. # pylint: disable=wrong-import-order, wrong-import-position try: from apitools.base.py.exceptions", "for sinks executed in a local runner. Returns: a unique", "lambda x: AppendDestinationsFn._value_provider_or_static_val( destination).get() def process(self, element): yield (self.destination(element), element)", "retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _delete_table(self, project_id, dataset_id, table_id): request = bigquery.BigqueryTablesDeleteRequest( projectId=project_id,", "datetime import decimal import json import logging import re import", "of just a # table name. if dataset is None:", "not specified. if schema is None and found_table is None:", "%s ' 'but the field is not NULLABLE.' % field.name)", "table_id, schema, create_disposition, write_disposition): \"\"\"Gets or creates a table based", "OR CONDITIONS OF ANY KIND, either express or implied. #", "for insertion. Of special note is the row ID that", "is not None: self.query = self.source.query else: # Enforce the", "self.client.jobs.Get(request) def perform_load_job(self, destination, files, job_id, schema=None, write_disposition=None, create_disposition=None): \"\"\"Starts", "the License is distributed on an \"AS IS\" BASIS, #", "response is a bigquery.TableDataList instance. return response.totalRows == 0 @retry.with_exponential_backoff(", "= from_json_value(cell.v) if cell.v is not None else None elif", "# the proper formatting. return value_provider.StaticValueProvider(lambda x: x, value=elm) @staticmethod", "table_id) # Create a new table potentially reusing the schema", "the insertAll BigQuery API endpoint. Docs for this BQ call:", "should come from the timezone library but this is a", "so we'll just hardcode it as we're reading using #", "and write dispositions. Args: project_id: The project id owning the", "column in BigQuery. \"\"\" schema = bigquery.TableFieldSchema() schema.name = field['name']", "can be run without error raise RuntimeError( 'Dataset %s:%s already", "# specified. if write_disposition == BigQueryDisposition.WRITE_TRUNCATE: self._delete_table(project_id, dataset_id, table_id) #", "project_id=self.executing_project, query=self.query, use_legacy_sql=self.use_legacy_sql, flatten_results=self.flatten_results): if self.schema is None: self.schema =", "this table or null if the table reference is specified", "The ID of the dataset containing this table or null", "return location logging.debug(\"Query %s does not reference any tables.\", query)", "# Figure out the project, dataset, and table used for", "element, making it a KV pair. Outputs a PCollection of", "'table does not exist.' % (project_id, dataset_id, table_id)) if found_table", "import DoFn from apache_beam.utils import retry # Protect against environments", "JSON serializable\" % type(obj).__name__) def get_hashable_destination(destination): \"\"\"Parses a table reference", "return a large number of rows. logging.info('Waiting on response from", "add support to writing TableRow's instead of dicts. final_rows =", "If table schema did not define a project we default", "response.schema if not response.pageToken: break page_token = response.pageToken def insert_rows(self,", "# The response is a bigquery.Table instance. return response @retry.with_exponential_backoff(", "example if the table is not empty and WRITE_EMPTY was", "flag as False to the source return self.convert_row_to_dict(value, field) elif", "temporary.' % (project_id, dataset_id)) except HttpError as exn: if exn.status_code", "law or agreed to in writing, software # distributed under", "def __exit__(self, exception_type, exception_value, traceback): self._flush_rows_buffer() def Write(self, row): self.rows_buffer.append(row)", "result[field.name] = self._convert_cell_value_to_dict(value, field) return result # ----------------------------------------------------------------------------- # BigQueryReader,", "python 2.7 so we'll just hardcode it as we're reading", "= self._create_table(project_id=project_id, dataset_id=dataset_id, table_id=table_id, schema=schema or found_table.schema) logging.info('Created table %s.%s.%s", "convert_row_to_dict(self, row, schema): \"\"\"Converts a TableRow instance using the schema", "for a single column in BigQuery. \"\"\" schema = bigquery.TableFieldSchema()", "sourceUris=source_uris, destinationTable=table_reference, schema=schema, writeDisposition=write_disposition, createDisposition=create_disposition, sourceFormat='NEWLINE_DELIMITED_JSON', autodetect=schema is None, )", "the table. dataset_id: The dataset id owning the table. table_id:", "owning the table. table_id: The table id. schema: A bigquery.TableSchema", "(project_id, dataset_id, table_id)) if found_table and write_disposition != BigQueryDisposition.WRITE_TRUNCATE: return", "page_token=None, max_results=10000): request = bigquery.BigqueryJobsGetQueryResultsRequest( jobId=job_id, pageToken=page_token, projectId=project_id, maxResults=max_results) response", "by a missing pageToken. yield response.rows, response.schema if not response.pageToken:", "specified. if write_disposition == BigQueryDisposition.WRITE_TRUNCATE: self._delete_table(project_id, dataset_id, table_id) # Create", "query=self.query, use_legacy_sql=self.use_legacy_sql, flatten_results=self.flatten_results): if self.schema is None: self.schema = schema", "id owning the table. dataset_id: The dataset id owning the", "we assume the argument already has # the proper formatting.", "if len(self.rows_buffer) > self.rows_buffer_flush_threshold: self._flush_rows_buffer() class RowAsDictJsonCoder(coders.Coder): \"\"\"A coder for", "tableDataInsertAllRequest=bigquery.TableDataInsertAllRequest( skipInvalidRows=skip_invalid_rows, # TODO(silviuc): Should have an option for ignoreUnknownValues?", "reading using # utcfromtimestamp. # Input: 1478134176.985864 --> Output: \"2016-11-03", "dispositions passed in. For example if the table is not", "the same create and write dispositions. Args: project_id: The project", "HttpError as exn: if exn.status_code == 404: if create_disposition ==", "table_id) if (not table_empty and write_disposition == BigQueryDisposition.WRITE_EMPTY): raise RuntimeError(", "was not specified. if schema is None and found_table is", "= self._start_query_job(project_id, query, use_legacy_sql, flatten_results, job_id=uuid.uuid4().hex, dry_run=dry_run) if dry_run: #", "decimal.Decimal(value) elif field.type == 'GEOGRAPHY': return value else: raise RuntimeError('Unexpected", "in a client we do not want a # randomized", "found_table = None try: found_table = self.get_table(project_id, dataset_id, table_id) except", "schema=schema) request = bigquery.BigqueryTablesInsertRequest( projectId=project_id, datasetId=dataset_id, table=table) response = self.client.tables.Insert(request)", "query, use_legacy_sql, flatten_results, dry_run=False): job_id = self._start_query_job(project_id, query, use_legacy_sql, flatten_results,", "%s\", table_id) # The response is a bigquery.Table instance. return", "num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def clean_up_temporary_dataset(self, project_id): temp_table = self._get_temp_table(project_id) try: self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest(", "request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( dryRun=True, query=bigquery.JobConfigurationQuery( query=query, useLegacySql=use_legacy_sql,", "depends on the BigQuery service to provide error handling for", "return value elif field.type == 'TIME': # Input: \"00:49:36\" -->", "else: # The type argument is a NoOp, because we", "create_disposition=None, write_disposition=None): reference = bigquery.JobReference() reference.jobId = job_id reference.projectId =", "self.dataset_id, self.table_id, self.sink.table_schema, self.sink.create_disposition, self.sink.write_disposition) return self def __exit__(self, exception_type,", "may obtain a copy of the License at # #", "must contain only letters (a-z, A-Z), numbers (0-9), or underscores", "will catch this error to emit an error that explains", "project request.location = location return self.client.jobs.Get(request) def perform_load_job(self, destination, files,", "for row IDs. self._row_id_prefix = '' if client else uuid.uuid4()", "results. The last page is signalled by a missing pageToken.", "sleep by migrating to load api time.sleep(150) return created_table else:", "can be a bigquery.TableReference instance in which case dataset and", "Additionally, for date partitioned tables, appending '$YYYYmmdd' to the table", "been raised, the BigQuerySource \"modes\" have # changed and this", "columns by receiving JSON with string attrs. v = str(v)", "self.schema is None: self.schema = schema for row in rows:", "referenced_tables[0] location = self.get_table_location( table.projectId, table.datasetId, table.tableId) logging.info(\"Using location %r", "referenced_tables: # Guards against both non-empty and non-None table =", "or ' 'DATASET.TABLE) instead of %s.' % table) table_reference.projectId =", "each key in it is the name of a field.", "serialized schema. Returns: A TableFieldSchema for a single column in", "# returning an empty list result[field.name] = [] else: result[field.name]", "field['description'] if 'fields' in field: schema.fields = [_parse_schema_field(x) for x", "\"\"\"A reader for a BigQuery source.\"\"\" def __init__(self, source, test_bigquery_client=None,", "'description' in field: schema.description = field['description'] if 'fields' in field:", "TableFieldSchema for a single column in BigQuery. \"\"\" schema =", "bigquery.TableReference instance in which case dataset and project are ignored", "elif field.type == 'DATETIME': # Input: \"2016-11-03T00:49:36\" --> Output: \"2016-11-03T00:49:36\"", "for reading the field values in each row but could", "%s:%s does not exist', project_id, dataset_id) return else: raise @retry.with_exponential_backoff(", "then handle the semantics for WRITE_EMPTY and # WRITE_TRUNCATE write", "self.dataset_id, self.table_id) passed, errors = self.client.insert_rows( project_id=self.project_id, dataset_id=self.dataset_id, table_id=self.table_id, rows=self.rows_buffer)", "response = self.client.jobs.Insert(request) logging.info(\"Response was %s\", response) return response.jobReference @retry.with_exponential_backoff(", "NUMERIC columns by receiving JSON with string attrs. v =", "as a string does not match the expected format. \"\"\"", "True: response = self._get_query_results(project_id, job_id, page_token) if not response.jobComplete: #", "from apache_beam.options.pipeline_options import GoogleCloudOptions from apache_beam.runners.dataflow.native_io import iobase as dataflow_io", "passed in. For example if the table is not empty", "in each row but could be useful for # getting", "each row but could be useful for # getting additional", "will return a large number of rows. logging.info('Waiting on response", "return found_table else: created_table = self._create_table(project_id=project_id, dataset_id=dataset_id, table_id=table_id, schema=schema or", "% ( destination.projectId, destination.datasetId, destination.tableId) else: return destination def parse_table_schema_from_json(schema_string):", "to the programmer that they have used NAN/INF values. try:", "get here means the # query has no errors. The", "self.client.datasets.Insert(request) # The response is a bigquery.Dataset instance. return response", "\"\"\"The sink writer for a BigQuerySink.\"\"\" def __init__(self, sink, test_bigquery_client=None,", "def _get_temp_table(self, project_id): return parse_table_reference( table=BigQueryWrapper.TEMP_TABLE + self._temporary_table_suffix, dataset=BigQueryWrapper.TEMP_DATASET +", "repeated and/or record fields are flattened # unless we pass", "license agreements. See the NOTICE file distributed with # this", "get_query_location(self, project_id, query, use_legacy_sql): \"\"\" Get the location of tables", "unique_row_id(self): \"\"\"Returns a unique row ID (str) used to avoid", "= [] for row in rows: json_object = bigquery.JsonObject() for", "to writing TableRow's instead of dicts. final_rows = [] for", "from apache_beam.runners.dataflow.native_io import iobase as dataflow_io from apache_beam.transforms import DoFn", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "def _insert_copy_job(self, project_id, job_id, from_table_reference, to_table_reference, create_disposition=None, write_disposition=None): reference =", "'Dataset %s:%s already exists so cannot be used as temporary.'", "== RetryStrategy.RETRY_ON_TRANSIENT_ERROR and error_message not in RetryStrategy._NON_TRANSIENT_ERRORS): return True else:", "project=project_id) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_query_location(self, project_id, query, use_legacy_sql): \"\"\"", "the proper formatting. return value_provider.StaticValueProvider(lambda x: x, value=elm) @staticmethod def", "the default coder for sources and sinks if the coder", "are not JSON compliant.' def default_encoder(obj): if isinstance(obj, decimal.Decimal): return", "if isinstance(destination, bigquery.TableReference): return '%s:%s.%s' % ( destination.projectId, destination.datasetId, destination.tableId)", "table reference (PROJECT:DATASET.TABLE or ' 'DATASET.TABLE) instead of %s.' %", "self.client.tables.Get(request) return response def _create_table(self, project_id, dataset_id, table_id, schema): table", "query\") def _get_source_location(self): \"\"\" Get the source location (e.g. ``\"EU\"``", "table %s.%s.%s with schema %s. Result: %s.', project_id, dataset_id, table_id,", "errors = self.client.insert_rows( project_id=self.project_id, dataset_id=self.dataset_id, table_id=self.table_id, rows=self.rows_buffer) self.rows_buffer = []", "Args: schema_string: String serialized table schema, should be a valid", "used both in sources and sinks (e.g., find and create", "if referenced_tables: # Guards against both non-empty and non-None table", "information about the job that was started. \"\"\" return self._insert_load_job(", "table [%s:%s.%s]. Errors: %s' % (self.project_id, self.dataset_id, self.table_id, errors)) def", "apitools.base.py.exceptions import HttpError except ImportError: pass # pylint: enable=wrong-import-order, wrong-import-position", "This situation # can happen during retries on failures. #", "for the rows being read by the reader. It is", "file except in compliance with # the License. You may", "query=query, useLegacySql=use_legacy_sql, allowLargeResults=True, destinationTable=self._get_temp_table(project_id), flattenResults=flatten_results)), jobReference=reference)) response = self.client.jobs.Insert(request) return", "deleted table ' + 'for 2 mins after the delete", "this file except in compliance with # the License. You", "location, missing response.statistics. Query: %s\", query) return None referenced_tables =", "already exists otherwise create it try: dataset = self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id,", "class AppendDestinationsFn(DoFn): \"\"\"Adds the destination to an element, making it", "as # required by the InsertAll() method. request = bigquery.BigqueryTabledataInsertAllRequest(", "table reference is specified entirely by the table argument. project:", "(represented as a dict) to/from a JSON string. This is", "configuration=bigquery.JobConfiguration( dryRun=dry_run, query=bigquery.JobConfigurationQuery( query=query, useLegacySql=use_legacy_sql, allowLargeResults=True, destinationTable=self._get_temp_table(project_id), flattenResults=flatten_results)), jobReference=reference)) response", "was expected to be empty. \"\"\" from apache_beam.io.gcp.bigquery import BigQueryDisposition", "location, if any. \"\"\" if self.source.table_reference is not None: tr", "projectId=project_id) request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( load=bigquery.JobConfigurationLoad( sourceUris=source_uris, destinationTable=table_reference,", "Returns: A string representing the destination containing 'PROJECT:DATASET.TABLE'. \"\"\" if", "a table reference into a (project, dataset, table) tuple. Args:", "table potentially reusing the schema from a previously # found", "dataset argument is not specified, the expectation is that the", "retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _insert_copy_job(self, project_id, job_id, from_table_reference, to_table_reference, create_disposition=None, write_disposition=None): reference", "table and writing it logging.warning('Sleeping for 150 seconds before the", "# WRITE_TRUNCATE write dispositions. if found_table: table_empty = self._is_table_empty(project_id, dataset_id,", "object. Args: client: bigquery.BigqueryV2 instance project_id, dataset_id, table_id: table lookup", "class BigQueryWriter(dataflow_io.NativeSinkWriter): \"\"\"The sink writer for a BigQuerySink.\"\"\" def __init__(self,", "return response @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_or_create_dataset(self, project_id, dataset_id, location=None):", "request times out we keep trying. This situation is quite", "we reduce communication with the # BigQuery service. self.rows_buffer =", "hardcode it as we're reading using # utcfromtimestamp. # Input:", "(0-9), or underscores (_). If dataset argument is None then", "Output: \"2016-11-03T00:49:36\" return value elif field.type == 'TIME': # Input:", "\"2016-11-03\" return value elif field.type == 'DATETIME': # Input: \"2016-11-03T00:49:36\"", "and tableId. Or a string representing the destination containing 'PROJECT:DATASET.TABLE'.", "elif field.type == 'INTEGER': # Input: \"123\" --> Output: 123", "retry logic for failures can be controlled. In addition it", "3 else 'utf8') self._unique_row_id = 0 # For testing scenarios", "location request = bigquery.BigqueryDatasetsInsertRequest( projectId=project_id, dataset=dataset) response = self.client.datasets.Insert(request) #", "we get here means the # query has no errors.", "record fields are flattened # unless we pass the flatten_results", "However # when querying, the repeated and/or record fields are", "% (e, JSON_COMPLIANCE_ERROR)) def decode(self, encoded_table_row): return json.loads(encoded_table_row.decode('utf-8')) class RetryStrategy(object):", "any. \"\"\" if self.source.table_reference is not None: tr = self.source.table_reference", "or implied. # See the License for the specific language", "exist.' % (project_id, dataset_id, table_id)) if found_table and write_disposition !=", "except HttpError as exn: if exn.status_code == 404: logging.warning('Dataset %s:%s", "before this point. if write_disposition == BigQueryDisposition.WRITE_TRUNCATE: # BigQuery can", "used to batch written rows so we reduce communication with", "table_id=self.table_id, rows=self.rows_buffer) self.rows_buffer = [] if not passed: raise RuntimeError('Could", "%s:%s.%s requires a schema. None can be inferred because the", "if write_disposition == BigQueryDisposition.WRITE_TRUNCATE: # BigQuery can route data to", "is a bigquery.TableDataList instance. return response.totalRows == 0 @retry.with_exponential_backoff( num_retries=MAX_RETRIES,", "request may be issued several times. This comes into play", "not JSON serializable\" % type(obj).__name__) def get_hashable_destination(destination): \"\"\"Parses a table", "not # support the precision that decimal supports. BQ is", "--> Output: \"2016-11-03T00:49:36\" return value elif field.type == 'TIME': #", "isinstance(obj, decimal.Decimal): return str(obj) raise TypeError( \"Object of type '%s'", "create_temporary_dataset(self, project_id, location): dataset_id = BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix # Check", "in a local runner. Returns: a unique row ID string", "None, ) ), jobReference=reference, ) ) response = self.client.jobs.Insert(request) return", "in cell else None if field.mode == 'REPEATED': if value", "by receiving JSON with string attrs. v = str(v) json_object.additionalProperties.append(", "return table table_reference = bigquery.TableReference() # If dataset argument is", "dataset_id: The dataset id owning the table. table_id: The table", "job_id, destination, files, schema=schema, create_disposition=create_disposition, write_disposition=write_disposition) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def", "for the field %s ' 'but the field is not", "project we default to executing project. if self.project_id is None", "write_disposition: WRITE_APPEND, WRITE_EMPTY or WRITE_TRUNCATE. Returns: A bigquery.Table instance if", "clean_up_temporary_dataset(self, project_id): temp_table = self._get_temp_table(project_id) try: self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=temp_table.datasetId)) except", "the # query has no errors. The start_query_job would raise", "\"123\" --> Output: 123 return int(value) elif field.type == 'FLOAT':", "an element, making it a KV pair. Outputs a PCollection", "files, schema=schema, create_disposition=create_disposition, write_disposition=write_disposition) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_or_create_table( self,", "self._delete_dataset(temp_table.projectId, temp_table.datasetId, True) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_job(self, project, job_id,", "and create.') # TODO(BEAM-2673): Remove this sleep by migrating to", "the programmer that they have used NAN/INF values. try: return", "self.get_table_location( table.projectId, table.datasetId, table.tableId) logging.info(\"Using location %r from table %r", "others should be inserted successfully. Returns: A tuple (bool, errors).", "requires a schema. None can be inferred because the '", "then an error will be raised since the table was", "the precision that decimal supports. BQ is able to handle", "as exn: if exn.status_code == 404: if create_disposition == BigQueryDisposition.CREATE_NEVER:", "for a BigQuery source.\"\"\" def __init__(self, source, test_bigquery_client=None, use_legacy_sql=True, flatten_results=True,", "if the table reference as a string does not match", "pass # pylint: enable=wrong-import-order, wrong-import-position MAX_RETRIES = 3 JSON_COMPLIANCE_ERROR =", "dataset = bigquery.Dataset(datasetReference=dataset_reference) if location is not None: dataset.location =", "HttpError as exn: if exn.status_code == 404: logging.warning('Dataset %s:%s does", "[_parse_schema_field(f) for f in json_schema['fields']] return bigquery.TableSchema(fields=fields) def parse_table_reference(table, dataset=None,", "expected in tests logging.warning( \"Unable to get location, missing response.statistics.", "or None. create_disposition: CREATE_NEVER or CREATE_IF_NEEDED. write_disposition: WRITE_APPEND, WRITE_EMPTY or", "if WRITE_TRUNCATE was # specified. if write_disposition == BigQueryDisposition.WRITE_TRUNCATE: self._delete_table(project_id,", "= 'RETRY_ALWAYS' RETRY_NEVER = 'RETRY_NEVER' RETRY_ON_TRANSIENT_ERROR = 'RETRY_ON_TRANSIENT_ERROR' _NON_TRANSIENT_ERRORS =", "iteritems from apache_beam import coders from apache_beam.internal.gcp import auth from", "isinstance(schema, bigquery.TableFieldSchema): cell = row['f'][index] value = cell['v'] if 'v'", "bigquery.BigqueryV2 instance project_id, dataset_id, table_id: table lookup parameters Returns: bigquery.Table", "in it is the name of a field. skip_invalid_rows: If", "hasattr(source, 'pipeline_options'): self.executing_project = ( source.pipeline_options.view_as(GoogleCloudOptions).project) else: self.executing_project = None", "project id owning the table. dataset_id: The dataset id owning", "datasetId, and tableId. Raises: ValueError: if the table reference as", "the write as ' + 'BigQuery inserts can be routed", "use_legacy_sql self.flatten_results = flatten_results self.kms_key = kms_key if self.source.table_reference is", "info. if not self.executing_project and test_bigquery_client is None: raise RuntimeError(", "raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _is_table_empty(self, project_id, dataset_id, table_id): request", "'%s:%s.%s' % ( destination.projectId, destination.datasetId, destination.tableId) else: return destination def", "the old table for 2 mins max so wait #", "unique row ID (str) used to avoid multiple insertions. If", "returning an empty list result[field.name] = [] else: result[field.name] =", "not None: tr = self.source.table_reference return self.client.get_table_location( tr.projectId if tr.projectId", "\"\"\"Lookup a table's metadata object. Args: client: bigquery.BigqueryV2 instance project_id,", "file distributed with # this work for additional information regarding", "datasetId=dataset_id, tableId=table_id), schema=schema) request = bigquery.BigqueryTablesInsertRequest( projectId=project_id, datasetId=dataset_id, table=table) response", "Figure out the project, dataset, and table used for the", "a schema field object supports also a RECORD type. However", "insert request may be issued several times. This comes into", "is None and hasattr(sink, 'pipeline_options'): self.project_id = ( sink.pipeline_options.view_as(GoogleCloudOptions).project) assert", "argument is not specified. \"\"\" def encode(self, table_row): # The", "_insert_all_rows(self, project_id, dataset_id, table_id, rows, skip_invalid_rows=False): \"\"\"Calls the insertAll BigQuery", "sourceTable=from_table_reference, createDisposition=create_disposition, writeDisposition=write_disposition, ) ), jobReference=reference, ) ) logging.info(\"Inserting job", "for additional information regarding copyright ownership. # The ASF licenses", "all others should be inserted successfully. Returns: A tuple (bool,", "else: result[field.name] = [self._convert_cell_value_to_dict(x['v'], field) for x in value] elif", "True return value == 'true' elif field.type == 'INTEGER': #", "just a # table name. if dataset is None: match", "return response else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _is_table_empty(self, project_id,", "k, v in iteritems(row): if isinstance(v, decimal.Decimal): # decimal values", "except ImportError: pass # pylint: enable=wrong-import-order, wrong-import-position MAX_RETRIES = 3", "except HttpError as exn: if exn.status_code == 404: logging.warning( 'Dataset", "attrs. v = str(v) json_object.additionalProperties.append( bigquery.JsonObject.AdditionalProperty( key=k, value=to_json_value(v))) final_rows.append( bigquery.TableDataInsertAllRequest.RowsValueListEntry(", "TableReference for the destination, and the value is the record", "additional details. self.schema = None self.use_legacy_sql = use_legacy_sql self.flatten_results =", "= 3 JSON_COMPLIANCE_ERROR = 'NAN, INF and -INF values are", "The normal error when dumping NAN/INF values is: # ValueError:", "Software Foundation (ASF) under one or more # contributor license", "referenced table in the query and depends on the BigQuery", "sink writer for a BigQuerySink.\"\"\" def __init__(self, sink, test_bigquery_client=None, buffer_size=None):", "max_results=10000): request = bigquery.BigqueryJobsGetQueryResultsRequest( jobId=job_id, pageToken=page_token, projectId=project_id, maxResults=max_results) response =", "return not response.insertErrors, response.insertErrors @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_table(self, project_id,", "not [] if errors encountered. return not response.insertErrors, response.insertErrors @retry.with_exponential_backoff(", "in the query and depends on the BigQuery service to", "prefix for row IDs. self._row_id_prefix = '' if client else", "support to writing TableRow's instead of dicts. final_rows = []", "\"true\" --> Output: True return value == 'true' elif field.type", "retry_filter=retry.retry_on_server_errors_timeout_or_quota_issues_filter) def _insert_all_rows(self, project_id, dataset_id, table_id, rows, skip_invalid_rows=False): \"\"\"Calls the", "decimal.Decimal): # decimal values are converted into string because JSON", "for f in json_schema['fields']] return bigquery.TableSchema(fields=fields) def parse_table_reference(table, dataset=None, project=None):", "location=%s', project_id, dataset_id, location) self.get_or_create_dataset(project_id, dataset_id, location=location) else: raise @retry.with_exponential_backoff(", "the service. If # the request times out we keep", "partitioned tables, appending '$YYYYmmdd' to the table name is supported,", "id owning the table. table_id: The table id. rows: A", "write as ' + 'BigQuery inserts can be routed to", "The table id. rows: A list of plain Python dictionaries.", "field.type == 'BOOLEAN': # Input: \"true\" --> Output: True return", "1478134176.985864 --> Output: \"2016-11-03 00:49:36.985864 UTC\" dt = datetime.datetime.utcfromtimestamp(float(value)) return", "__init__(self, source, test_bigquery_client=None, use_legacy_sql=True, flatten_results=True, kms_key=None): self.source = source self.test_bigquery_client", "a list of # bigquery.TableDataInsertAllRequest.RowsValueListEntry instances as # required by", "has the following attributes: projectId, datasetId, and tableId. Raises: ValueError:", "place where retry logic for failures can be controlled. In", "# The jobComplete field can be False if the query", "if not project_id: project_id = self.executing_project self.query = 'SELECT *", "or query\") def _get_source_location(self): \"\"\" Get the source location (e.g.", "source location (e.g. ``\"EU\"`` or ``\"US\"``) from either - :data:`source.table_reference`", "used to avoid multiple insertions. If the row ID is", "is not None else self.executing_project, tr.datasetId, tr.tableId) else: # It's", "service to provide error handling for queries that reference tables", "destination.projectId, job_id, destination, files, schema=schema, create_disposition=create_disposition, write_disposition=write_disposition) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter)", "in BigQuery. \"\"\" schema = bigquery.TableFieldSchema() schema.name = field['name'] schema.type", "= uuid.uuid4().hex @property def unique_row_id(self): \"\"\"Returns a unique row ID", "request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( load=bigquery.JobConfigurationLoad( sourceUris=source_uris, destinationTable=table_reference, schema=schema,", "self._flush_rows_buffer() class RowAsDictJsonCoder(coders.Coder): \"\"\"A coder for a table row (represented", "JSON_COMPLIANCE_ERROR)) def decode(self, encoded_table_row): return json.loads(encoded_table_row.decode('utf-8')) class RetryStrategy(object): RETRY_ALWAYS =", "def _delete_dataset(self, project_id, dataset_id, delete_contents=True): request = bigquery.BigqueryDatasetsDeleteRequest( projectId=project_id, datasetId=dataset_id,", "RETRY_ON_TRANSIENT_ERROR = 'RETRY_ON_TRANSIENT_ERROR' _NON_TRANSIENT_ERRORS = {'invalid', 'invalidQuery', 'notImplemented'} @staticmethod def", "if callable(destination): return destination else: return lambda x: AppendDestinationsFn._value_provider_or_static_val( destination).get()", "times out we keep trying. This situation is quite possible", "project_id, dataset_id, table_id: table lookup parameters Returns: bigquery.Table instance Raises:", "dt = datetime.datetime.utcfromtimestamp(float(value)) return dt.strftime('%Y-%m-%d %H:%M:%S.%f UTC') elif field.type ==", "= flatten_results self.kms_key = kms_key if self.source.table_reference is not None:", "compatibility guarantees. \"\"\" def __init__(self, destination): self.destination = AppendDestinationsFn._get_table_fn(destination) @staticmethod", "= 'RETRY_ON_TRANSIENT_ERROR' _NON_TRANSIENT_ERRORS = {'invalid', 'invalidQuery', 'notImplemented'} @staticmethod def should_retry(strategy,", "query: %s ...', query) time.sleep(1.0) continue # We got some", "is a row and each key in it is the", "schema.description = field['description'] if 'fields' in field: schema.fields = [_parse_schema_field(x)", "times. # BigQuery will do a best-effort if unique IDs", "is quite possible # if the query will return a", "TableReference object from the bigquery API. The object has the", "self.rows_buffer_flush_threshold = buffer_size or 1000 # Figure out the project,", "the table. The ID must contain only letters (a-z, A-Z),", "DoFn from apache_beam.utils import retry # Protect against environments where", "The object has the following attributes: projectId, datasetId, and tableId.", "self._is_table_empty(project_id, dataset_id, table_id) if (not table_empty and write_disposition == BigQueryDisposition.WRITE_EMPTY):", "BigQuery avoid inserting a row multiple times. # BigQuery will", "dataset_id, table_id): table = self.get_table(project_id, dataset_id, table_id) return table.location @retry.with_exponential_backoff(", "self._get_temp_table(project_id) try: self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=temp_table.datasetId)) except HttpError as exn: if", "a job to load data into BigQuery. Returns: bigquery.JobReference with", "in rows: if self.row_as_dict: yield self.client.convert_row_to_dict(row, schema) else: yield row", "coder for a table row (represented as a dict) to/from", "if cell.v is not None else None elif isinstance(schema, bigquery.TableFieldSchema):", "and recreate it (later) if WRITE_TRUNCATE was # specified. if", "\"2016-11-03 00:49:36.985864 UTC\" dt = datetime.datetime.utcfromtimestamp(float(value)) return dt.strftime('%Y-%m-%d %H:%M:%S.%f UTC')", "'NULLABLE': raise ValueError('Received \\'None\\' as the value for the field", "allowLargeResults=True, destinationTable=self._get_temp_table(project_id), flattenResults=flatten_results)), jobReference=reference)) response = self.client.jobs.Insert(request) return response.jobReference.jobId @retry.with_exponential_backoff(", "a Python dict.\"\"\" result = {} for index, field in", "@retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def clean_up_temporary_dataset(self, project_id): temp_table = self._get_temp_table(project_id) try:", "else: yield row class BigQueryWriter(dataflow_io.NativeSinkWriter): \"\"\"The sink writer for a", "the query request times out # (default is 10 seconds).", "field.type == 'GEOGRAPHY': return value else: raise RuntimeError('Unexpected field type:", "For example if the table is not empty and WRITE_EMPTY", "or CREATE_IF_NEEDED. write_disposition: WRITE_APPEND, WRITE_EMPTY or WRITE_TRUNCATE. Returns: A bigquery.Table", "The ID must contain only letters (a-z, A-Z), numbers (0-9),", "entirely by the table (and possibly dataset) argument. Returns: A", "RuntimeError( 'Dataset %s:%s already exists so cannot be used as", "bigquery.TableReference() # If dataset argument is not specified, the expectation", "response.pageToken def insert_rows(self, project_id, dataset_id, table_id, rows, skip_invalid_rows=False): \"\"\"Inserts rows", "schema. Returns: A TableFieldSchema for a single column in BigQuery.", "Output: \"2016-11-03\" return value elif field.type == 'DATETIME': # Input:", "FROM [%s:%s.%s];' % ( project_id, self.source.table_reference.datasetId, self.source.table_reference.tableId) elif self.source.query is", "possible # if the query will return a large number", "table reference into a (project, dataset, table) tuple. Args: destination:", "== 'NULLABLE': raise ValueError('Received \\'None\\' as the value for the", "= datetime.datetime.utcfromtimestamp(float(value)) return dt.strftime('%Y-%m-%d %H:%M:%S.%f UTC') elif field.type == 'BYTES':", "potentially reusing the schema from a previously # found table", "raise RuntimeError('Could not successfully insert rows to BigQuery' ' table", "supported, e.g. 'DATASET.TABLE$YYYYmmdd'. dataset: The ID of the dataset containing", "%s:%s.%s does not exist', project_id, dataset_id, table_id) return else: raise", "logging.warning('Sleeping for 150 seconds before the write as ' +", "define a project we default to executing # project. project_id", "404: logging.warning('Dataset %s:%s does not exist', project_id, dataset_id) return else:", "and tableId. Raises: ValueError: if the table reference as a", "used for the sink. self.project_id = self.sink.table_reference.projectId # If table", "table_empty and write_disposition == BigQueryDisposition.WRITE_EMPTY): raise RuntimeError( 'Table %s:%s.%s is", "response @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_timeout_or_quota_issues_filter) def _insert_all_rows(self, project_id, dataset_id, table_id, rows,", "bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( copy=bigquery.JobConfigurationTableCopy( destinationTable=to_table_reference, sourceTable=from_table_reference, createDisposition=create_disposition, writeDisposition=write_disposition, )", "= bigquery.DatasetReference( projectId=project_id, datasetId=dataset_id) dataset = bigquery.Dataset(datasetReference=dataset_reference) if location is", "'FLOAT': # Input: \"1.23\" --> Output: 1.23 return float(value) elif", "service. self.rows_buffer = [] self.rows_buffer_flush_threshold = buffer_size or 1000 #", "rows: json_object = bigquery.JsonObject() for k, v in iteritems(row): if", "self.executing_project self.query = 'SELECT * FROM [%s:%s.%s];' % ( project_id,", "does not match the expected format. \"\"\" if isinstance(table, bigquery.TableReference):", "RECORD type. However # when querying, the repeated and/or record", "rows so we reduce communication with the # BigQuery service.", "are ignored and the reference is returned as a result.", "tables, query a table, etc.). \"\"\" TEMP_TABLE = 'temp_table_' TEMP_DATASET", "table_id: The table id. rows: A list of plain Python", "the name of a field. skip_invalid_rows: If there are rows", "Prepare rows for insertion. Of special note is the row", "BigQuery import jobs when using the same create and write", "ID must contain only letters (a-z, A-Z), numbers (0-9), or", "( destination.projectId, destination.datasetId, destination.tableId) else: return destination def parse_table_schema_from_json(schema_string): \"\"\"Parse", "The ID of the table. The ID must contain only", "batch written rows so we reduce communication with the #", "= bigquery.JobReference(jobId=job_id, projectId=project_id) request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( load=bigquery.JobConfigurationLoad(", "# contributor license agreements. See the NOTICE file distributed with", "raise RuntimeError( 'Missing executing project information. Please use the --project", "% type(obj).__name__) def get_hashable_destination(destination): \"\"\"Parses a table reference into a", "PCollection of KV-pairs where the key is a TableReference for", "project_id, dataset_id, location) self.get_or_create_dataset(project_id, dataset_id, location=location) else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES,", "in writing, software # distributed under the License is distributed", "querying, the repeated and/or record fields are flattened # unless", "they have used NAN/INF values. try: return json.dumps( table_row, allow_nan=False,", "a table row (represented as a dict) to/from a JSON", "pair. Outputs a PCollection of KV-pairs where the key is", "insertion errors, whether they should be skipped, and all others", "return '%s_%d' % (self._row_id_prefix, self._unique_row_id) def _get_temp_table(self, project_id): return parse_table_reference(", "destination else: return lambda x: AppendDestinationsFn._value_provider_or_static_val( destination).get() def process(self, element):", "return float(value) elif field.type == 'TIMESTAMP': # The UTC should", "value_provider.ValueProvider): return table table_reference = bigquery.TableReference() # If dataset argument", "% field.type) def convert_row_to_dict(self, row, schema): \"\"\"Converts a TableRow instance", "table_id, final_rows, skip_invalid_rows) return result, errors def _convert_cell_value_to_dict(self, value, field):", "table_id: table lookup parameters Returns: bigquery.Table instance Raises: HttpError if", "'but the field is not NULLABLE.' % field.name) result[field.name] =", "into a (project, dataset, table) tuple. Args: destination: Either a", "would raise an error otherwise. return page_token = None while", "and WRITE_EMPTY was specified then an error will be raised", "if field.type == 'STRING': # Input: \"XYZ\" --> Output: \"XYZ\"", "%s.' % table) table_reference.projectId = match.group('project') table_reference.datasetId = match.group('dataset') table_reference.tableId", "field: schema.description = field['description'] if 'fields' in field: schema.fields =", "should_retry(strategy, error_message): if strategy == RetryStrategy.RETRY_ALWAYS: return True elif strategy", "= bigquery.Table( tableReference=bigquery.TableReference( projectId=project_id, datasetId=dataset_id, tableId=table_id), schema=schema) request = bigquery.BigqueryTablesInsertRequest(", "bigquery.BigqueryTablesInsertRequest( projectId=project_id, datasetId=dataset_id, table=table) response = self.client.tables.Insert(request) logging.debug(\"Created the table", "bigquery.Dataset(datasetReference=dataset_reference) if location is not None: dataset.location = location request", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "datasetId=dataset_id, tableId=table_id) try: self.client.tables.Delete(request) except HttpError as exn: if exn.status_code", "it logging.warning('Sleeping for 150 seconds before the write as '", "# that much time before creating the table and writing", "dataset_id, table_id, schema or found_table.schema, created_table) # if write_disposition ==", "= client or bigquery.BigqueryV2( http=get_new_http(), credentials=auth.get_service_credentials(), response_encoding=None if sys.version_info[0] <", "= bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( dryRun=True, query=bigquery.JobConfigurationQuery( query=query, useLegacySql=use_legacy_sql, )),", "a project we default to executing project. if self.project_id is", "RuntimeError( 'Missing executing project information. Please use the --project '", "if self.row_as_dict: yield self.client.convert_row_to_dict(row, schema) else: yield row class BigQueryWriter(dataflow_io.NativeSinkWriter):", "does not exist so we will create it as temporary", "backwards compatibility guarantees. These tools include wrappers and clients to", "BigQuerySource \"modes\" have # changed and this method will need", "else: result[field.name] = self._convert_cell_value_to_dict(value, field) return result # ----------------------------------------------------------------------------- #", "exist', project_id, dataset_id, table_id) return else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter)", "to # each row in order to help BigQuery avoid", "project table_reference.datasetId = dataset table_reference.tableId = table return table_reference #", "= bigquery.Dataset(datasetReference=dataset_reference) if location is not None: dataset.location = location", "self.source.use_legacy_sql) def __enter__(self): self.client = BigQueryWrapper(client=self.test_bigquery_client) self.client.create_temporary_dataset( self.executing_project, location=self._get_source_location()) return", "bigquery.TableDataInsertAllRequest.RowsValueListEntry( insertId=str(self.unique_row_id), json=json_object)) result, errors = self._insert_all_rows( project_id, dataset_id, table_id,", "serializable\" % type(obj).__name__) def get_hashable_destination(destination): \"\"\"Parses a table reference into", "The ID of the project containing this table or null", "If table exists already then handle the semantics for WRITE_EMPTY", "already exists so cannot be used as temporary.' % (project_id,", "= BigQueryWrapper(client=self.test_bigquery_client) self.client.create_temporary_dataset( self.executing_project, location=self._get_source_location()) return self def __exit__(self, exception_type,", "not self.executing_project and test_bigquery_client is None: raise RuntimeError( 'Missing executing", "= ( source.pipeline_options.view_as(GoogleCloudOptions).project) else: self.executing_project = None # TODO(silviuc): Try", "(self.project_id, self.dataset_id, self.table_id, errors)) def __enter__(self): self.client = BigQueryWrapper(client=self.test_bigquery_client) self.client.get_or_create_table(", "it try: dataset = self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=dataset_id)) return dataset except", "projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( load=bigquery.JobConfigurationLoad( sourceUris=source_uris, destinationTable=table_reference, schema=schema, writeDisposition=write_disposition, createDisposition=create_disposition, sourceFormat='NEWLINE_DELIMITED_JSON',", "cell.v is not None else None elif isinstance(schema, bigquery.TableFieldSchema): cell", "schema=None, write_disposition=None, create_disposition=None): reference = bigquery.JobReference(jobId=job_id, projectId=project_id) request = bigquery.BigqueryJobsInsertRequest(", "table_empty = self._is_table_empty(project_id, dataset_id, table_id) if (not table_empty and write_disposition", "project_id, location): dataset_id = BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix # Check if", "'true' elif field.type == 'INTEGER': # Input: \"123\" --> Output:", "self.client.create_temporary_dataset( self.executing_project, location=self._get_source_location()) return self def __exit__(self, exception_type, exception_value, traceback):", "# response.insertErrors is not [] if errors encountered. return not", "to emit an error that explains # to the programmer", "the table (and possibly dataset) argument. Returns: A TableReference object", "and depends on the BigQuery service to provide error handling", "project_id, dataset_id, table_id): request = bigquery.BigqueryTablesDeleteRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id) try:", "the License for the specific language governing permissions and #", "reduce communication with the # BigQuery service. self.rows_buffer = []", "table_reference.datasetId = match.group('dataset') table_reference.tableId = match.group('table') else: table_reference.projectId = project", "bigquery.BigqueryV2( http=get_new_http(), credentials=auth.get_service_credentials(), response_encoding=None if sys.version_info[0] < 3 else 'utf8')", "project_id, self.source.table_reference.datasetId, self.source.table_reference.tableId) elif self.source.query is not None: self.query =", "to %s:%s.%s table.', len(self.rows_buffer), self.project_id, self.dataset_id, self.table_id) passed, errors =", "they should be skipped, and all others should be inserted", "value_provider.ValueProvider): return elm else: # The type argument is a", "self.destination = AppendDestinationsFn._get_table_fn(destination) @staticmethod def _value_provider_or_static_val(elm): if isinstance(elm, value_provider.ValueProvider): return", "self.client.insert_rows( project_id=self.project_id, dataset_id=self.dataset_id, table_id=self.table_id, rows=self.rows_buffer) self.rows_buffer = [] if not", "# limitations under the License. # \"\"\"Tools used by BigQuery", "table_reference, source_uris, schema=None, write_disposition=None, create_disposition=None): reference = bigquery.JobReference(jobId=job_id, projectId=project_id) request", "not want a # randomized prefix for row IDs. self._row_id_prefix", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "MAX_RETRIES = 3 JSON_COMPLIANCE_ERROR = 'NAN, INF and -INF values", "%s ...', query) time.sleep(1.0) continue # We got some results.", ") logging.info(\"Inserting job request: %s\", request) response = self.client.jobs.Insert(request) logging.info(\"Response", "credentials=auth.get_service_credentials(), response_encoding=None if sys.version_info[0] < 3 else 'utf8') self._unique_row_id =", "is only expected in tests logging.warning( \"Unable to get location,", "tableId=table_id) response = self.client.tables.Get(request) return response def _create_table(self, project_id, dataset_id,", "150 seconds before the write as ' + 'BigQuery inserts", "the following attributes: projectId, datasetId, and tableId. Or a string", "and table used for the sink. self.project_id = self.sink.table_reference.projectId #", "retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_table(self, project_id, dataset_id, table_id): \"\"\"Lookup a table's metadata", "https://cloud.google.com/bigquery/docs/reference\\ /rest/v2/tabledata/insertAll.\"\"\" # The rows argument is a list of", "--> Output: True return value == 'true' elif field.type ==", "if not match: raise ValueError( 'Expected a table reference (PROJECT:DATASET.TABLE", "Out of range float values are not JSON compliant #", "datasetId=temp_table.datasetId)) except HttpError as exn: if exn.status_code == 404: logging.warning('Dataset", "try: from apitools.base.py.exceptions import HttpError except ImportError: pass # pylint:", "argument is a list of # bigquery.TableDataInsertAllRequest.RowsValueListEntry instances as #", "--> Output: \"00:49:36\" return value elif field.type == 'RECORD': #", "= None # TODO(silviuc): Try to automatically get it from", "JSON compliant # This code will catch this error to", "a single schema field from dictionary. Args: field: Dictionary object", "retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _is_table_empty(self, project_id, dataset_id, table_id): request = bigquery.BigqueryTabledataListRequest( projectId=project_id,", "Input: \"123\" --> Output: 123 return int(value) elif field.type ==", "not empty and WRITE_EMPTY was specified then an error will", "time something gets read from the table. It is not", "specified entirely by the table (and possibly dataset) argument. Returns:", "import absolute_import import datetime import decimal import json import logging", "job_id, schema=None, write_disposition=None, create_disposition=None): \"\"\"Starts a job to load data", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the destination containing 'PROJECT:DATASET.TABLE'. \"\"\" if isinstance(destination, bigquery.TableReference): return '%s:%s.%s'", "return json.loads(encoded_table_row.decode('utf-8')) class RetryStrategy(object): RETRY_ALWAYS = 'RETRY_ALWAYS' RETRY_NEVER = 'RETRY_NEVER'", "will contain a full table reference instead of just a", "if lookup failed. \"\"\" request = bigquery.BigqueryTablesGetRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id)", "'DATETIME': # Input: \"2016-11-03T00:49:36\" --> Output: \"2016-11-03T00:49:36\" return value elif", "self.source.table_reference return self.client.get_table_location( tr.projectId if tr.projectId is not None else", "field: Dictionary object containing serialized schema. Returns: A TableFieldSchema for", "tests logging.warning( \"Unable to get location, missing response.statistics. Query: %s\",", "to executing project. if self.project_id is None and hasattr(sink, 'pipeline_options'):", "tableReference=bigquery.TableReference( projectId=project_id, datasetId=dataset_id, tableId=table_id), schema=schema) request = bigquery.BigqueryTablesInsertRequest( projectId=project_id, datasetId=dataset_id,", "response.jobReference @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _insert_load_job(self, project_id, job_id, table_reference, source_uris,", "and create tables, query a table, etc.). \"\"\" TEMP_TABLE =", "project_id, dataset_id) return else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_table_location(self,", "previously # found table in case the schema was not", "are flattened # unless we pass the flatten_results flag as", "BigQueryDisposition.WRITE_EMPTY): raise RuntimeError( 'Table %s:%s.%s is not empty but write", "# BigQueryWrapper. class BigQueryWrapper(object): \"\"\"BigQuery client wrapper with utilities for", "each row in order to help BigQuery avoid inserting a", "return self.convert_row_to_dict(value, field) elif field.type == 'NUMERIC': return decimal.Decimal(value) elif", "@retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_or_create_table( self, project_id, dataset_id, table_id, schema,", "retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _start_query_job(self, project_id, query, use_legacy_sql, flatten_results, job_id, dry_run=False): reference", "tableId. Raises: ValueError: if the table reference as a string", "def _get_table_fn(destination): if callable(destination): return destination else: return lambda x:", "this file are experimental and have no backwards compatibility guarantees.", "is not None: # If table schema did not define", "value else: raise RuntimeError('Unexpected field type: %s' % field.type) def", "return created_table def run_query(self, project_id, query, use_legacy_sql, flatten_results, dry_run=False): job_id", "field.name) result[field.name] = None else: result[field.name] = self._convert_cell_value_to_dict(value, field) return", "it as temporary ' 'with location=%s', project_id, dataset_id, location) self.get_or_create_dataset(project_id,", "and this method will need to be updated as well.", "Table. \"\"\" json_schema = json.loads(schema_string) def _parse_schema_field(field): \"\"\"Parse a single", "# distributed under the License is distributed on an \"AS", "class BigQueryWrapper(object): \"\"\"BigQuery client wrapper with utilities for querying. The", "need to be updated as well. raise ValueError(\"BigQuerySource must have", "dataset is None: match = re.match( r'^((?P<project>.+):)?(?P<dataset>\\w+)\\.(?P<table>[\\w\\$]+)$', table) if not", "\"Object of type '%s' is not JSON serializable\" % type(obj).__name__)", "= bigquery.TableFieldSchema() schema.name = field['name'] schema.type = field['type'] if 'mode'", "# Unless required by applicable law or agreed to in", "situation # can happen during retries on failures. # TODO(silviuc):", "job_id reference.projectId = project_id request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration(", "UTC') elif field.type == 'BYTES': # Input: \"YmJi\" --> Output:", "get_or_create_table( self, project_id, dataset_id, table_id, schema, create_disposition, write_disposition): \"\"\"Gets or", "field['type'] if 'mode' in field: schema.mode = field['mode'] else: schema.mode", "the table argument must contain the entire table reference: 'DATASET.TABLE'", "row in order to help BigQuery avoid inserting a row", "option to specify it.') self.row_as_dict = isinstance(self.source.coder, RowAsDictJsonCoder) # Schema", "A TableSchema of the BigQuery export from either the Query", "do not want a # randomized prefix for row IDs.", "return response.jobReference @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _start_query_job(self, project_id, query, use_legacy_sql,", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "a table, etc.). \"\"\" TEMP_TABLE = 'temp_table_' TEMP_DATASET = 'temp_dataset_'", "table argument must contain the entire table reference: 'DATASET.TABLE' or", "create it try: dataset = self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=dataset_id)) return dataset", "a table based on create and write dispositions. The function", "WRITE_APPEND, WRITE_EMPTY or WRITE_TRUNCATE. Returns: A bigquery.Table instance if table", "00:49:36.985864 UTC\" dt = datetime.datetime.utcfromtimestamp(float(value)) return dt.strftime('%Y-%m-%d %H:%M:%S.%f UTC') elif", "# Guards against both non-empty and non-None table = referenced_tables[0]", "the BigQuery export from either the Query or the Table.", "not None else self.executing_project, tr.datasetId, tr.tableId) else: # It's a", "BigQuery export from either the Query or the Table. \"\"\"", "is not None: tr = self.source.table_reference return self.client.get_table_location( tr.projectId if", "query. This method returns the location of the first referenced", "request = bigquery.BigqueryTablesInsertRequest( projectId=project_id, datasetId=dataset_id, table=table) response = self.client.tables.Insert(request) logging.debug(\"Created", "is CREATE_NEVER.' % (project_id, dataset_id, table_id)) else: raise # If", "got some results. The last page is signalled by a", "signalled by a missing pageToken. yield response.rows, response.schema if not", "Result: %s.', project_id, dataset_id, table_id, schema or found_table.schema, created_table) #", "in rows: json_object = bigquery.JsonObject() for k, v in iteritems(row):", "write dispositions. The function mimics the behavior of BigQuery import", "# table name. if dataset is None: match = re.match(", "of the table and the create/write dispositions passed in. For", "include wrappers and clients to interact with BigQuery APIs. NOTHING", "bigquery.BigqueryJobsGetQueryResultsRequest( jobId=job_id, pageToken=page_token, projectId=project_id, maxResults=max_results) response = self.client.jobs.GetQueryResults(request) return response", "response = self.client.tabledata.List(request) # The response is a bigquery.TableDataList instance.", "is not specified, the expectation is that the # table", "' table [%s:%s.%s]. Errors: %s' % (self.project_id, self.dataset_id, self.table_id, errors))", "-INF values are not JSON compliant.' def default_encoder(obj): if isinstance(obj,", "time before creating the table and writing it logging.warning('Sleeping for", "response.jobReference @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _start_query_job(self, project_id, query, use_legacy_sql, flatten_results,", "schema field object supports also a RECORD type. However #", "parse_table_schema_from_json(schema_string): \"\"\"Parse the Table Schema provided as string. Args: schema_string:", "pageToken. yield response.rows, response.schema if not response.pageToken: break page_token =", "A list of plain Python dictionaries. Each dictionary is a", "that a schema field object supports also a RECORD type.", "errors encountered. return not response.insertErrors, response.insertErrors @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def", "dataset=None, project=None): \"\"\"Parses a table reference into a (project, dataset,", "String serialized table schema, should be a valid JSON. Returns:", "and error_message not in RetryStrategy._NON_TRANSIENT_ERRORS): return True else: return False", "raise TypeError( \"Object of type '%s' is not JSON serializable\"", "we keep trying. This situation is quite possible # if", "route data to the old table for 2 mins max", "json_schema = json.loads(schema_string) def _parse_schema_field(field): \"\"\"Parse a single schema field", "projectId=project_id, datasetId=dataset_id)) if project_id is not None: # Unittests don't", "job_id, location=None): request = bigquery.BigqueryJobsGetRequest() request.jobId = job_id request.projectId =", "the bigquery API. The object has the following attributes: projectId,", "len(self.rows_buffer), self.project_id, self.dataset_id, self.table_id) passed, errors = self.client.insert_rows( project_id=self.project_id, dataset_id=self.dataset_id,", "Input: \"2016-11-03T00:49:36\" --> Output: \"2016-11-03T00:49:36\" return value elif field.type ==", "apache_beam.internal.http_client import get_new_http from apache_beam.io.gcp.internal.clients import bigquery from apache_beam.options import", "HttpError as exn: if exn.status_code == 404: logging.warning('Table %s:%s.%s does", "None and found_table is None: raise RuntimeError( 'Table %s:%s.%s requires", "if self.source.table_reference is not None: tr = self.source.table_reference return self.client.get_table_location(", "def parse_table_reference(table, dataset=None, project=None): \"\"\"Parses a table reference into a", "has no errors. The start_query_job would raise an error otherwise.", "come from the timezone library but this is a known", "return dt.strftime('%Y-%m-%d %H:%M:%S.%f UTC') elif field.type == 'BYTES': # Input:", "scenarios where we pass in a client we do not", "from apitools.base.py.exceptions import HttpError except ImportError: pass # pylint: enable=wrong-import-order,", "elif field.type == 'BYTES': # Input: \"YmJi\" --> Output: \"YmJi\"", "auth.is_running_in_gce: self.executing_project = auth.executing_project elif hasattr(source, 'pipeline_options'): self.executing_project = (", "e.g. 'DATASET.TABLE$YYYYmmdd'. dataset: The ID of the dataset containing this", "not exist so we will create it as temporary '", "to load api time.sleep(150) return created_table else: return created_table def", "# can happen during retries on failures. # TODO(silviuc): Must", "table and recreate it (later) if WRITE_TRUNCATE was # specified.", "apache_beam.utils import retry # Protect against environments where bigquery library", "WRITE_TRUNCATE. Returns: A bigquery.Table instance if table was found or", "this exception has been raised, the BigQuerySource \"modes\" have #", "happen during retries on failures. # TODO(silviuc): Must add support", "v in iteritems(row): if isinstance(v, decimal.Decimal): # decimal values are", "the schema from a previously # found table in case", "will be a bigquery.InserttErrorsValueListEntry instance containing specific errors. \"\"\" #", "bigquery.BigqueryTablesDeleteRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id) try: self.client.tables.Delete(request) except HttpError as exn:", "self.client.datasets.Delete(request) except HttpError as exn: if exn.status_code == 404: logging.warning('Dataset", "if 'description' in field: schema.description = field['description'] if 'fields' in", "required # for reading the field values in each row", "Note that this is a timeout for the query #", "_delete_table(self, project_id, dataset_id, table_id): request = bigquery.BigqueryTablesDeleteRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id)", "write_disposition == BigQueryDisposition.WRITE_TRUNCATE: # BigQuery can route data to the", "= re.match( r'^((?P<project>.+):)?(?P<dataset>\\w+)\\.(?P<table>[\\w\\$]+)$', table) if not match: raise ValueError( 'Expected", "changed and this method will need to be updated as", "# changed and this method will need to be updated", "num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_query_location(self, project_id, query, use_legacy_sql): \"\"\" Get the", "self.executing_project and test_bigquery_client is None: raise RuntimeError( 'Missing executing project", "retry_filter=retry.retry_on_server_errors_and_timeout_filter) def clean_up_temporary_dataset(self, project_id): temp_table = self._get_temp_table(project_id) try: self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id,", "= self.client.jobs.GetQueryResults(request) return response @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_timeout_or_quota_issues_filter) def _insert_all_rows(self, project_id,", "- :meth:`BigQueryWrapper.get_query_location` - :meth:`BigQueryWrapper.get_table_location` Returns: Optional[str]: The source location, if", "BigQuerySink.\"\"\" def __init__(self, sink, test_bigquery_client=None, buffer_size=None): self.sink = sink self.test_bigquery_client", "project_id, query, use_legacy_sql): \"\"\" Get the location of tables referenced", "table argument. project: The ID of the project containing this", "logic for failures can be controlled. In addition it offers", "Unittests don't pass projectIds so they can be run without", "value for the field %s ' 'but the field is", "response = self._get_query_results(project_id, job_id, page_token) if not response.jobComplete: # The", "method will need to be updated as well. raise ValueError(\"BigQuerySource", "match = re.match( r'^((?P<project>.+):)?(?P<dataset>\\w+)\\.(?P<table>[\\w\\$]+)$', table) if not match: raise ValueError(", "else: # Enforce the \"modes\" enforced by BigQuerySource.__init__. # If", "_convert_cell_value_to_dict(self, value, field): if field.type == 'STRING': # Input: \"XYZ\"", "None referenced_tables = response.statistics.query.referencedTables if referenced_tables: # Guards against both", "that much time before creating the table and writing it", "of tables referenced in a query. This method returns the", "order to help BigQuery avoid inserting a row multiple times.", "as the value for the field %s ' 'but the", "that this is a timeout for the query # request", "This is the default coder for sources and sinks if", "id is unique try: self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=dataset_id)) if project_id is", "exn.status_code == 404: logging.warning( 'Dataset %s:%s does not exist so", "%r from table %r referenced by query %s\", location, table,", "that we add to # each row in order to", "= None if isinstance(schema, bigquery.TableSchema): cell = row.f[index] value =", "# We got some results. The last page is signalled", "retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_job(self, project, job_id, location=None): request = bigquery.BigqueryJobsGetRequest() request.jobId", "source return self.convert_row_to_dict(value, field) elif field.type == 'NUMERIC': return decimal.Decimal(value)", "the record itself. Experimental; no backwards compatibility guarantees. \"\"\" def", "\"2016-11-03T00:49:36\" return value elif field.type == 'TIME': # Input: \"00:49:36\"", "HttpError if lookup failed. \"\"\" request = bigquery.BigqueryTablesGetRequest( projectId=project_id, datasetId=dataset_id,", "table or query\") def _get_source_location(self): \"\"\" Get the source location", "an error that explains # to the programmer that they", "== 'TIME': # Input: \"00:49:36\" --> Output: \"00:49:36\" return value", "destination containing 'PROJECT:DATASET.TABLE'. \"\"\" if isinstance(destination, bigquery.TableReference): return '%s:%s.%s' %", "single column in BigQuery. \"\"\" schema = bigquery.TableFieldSchema() schema.name =", "Get the location of tables referenced in a query. This", "== 'FLOAT': # Input: \"1.23\" --> Output: 1.23 return float(value)", "the source location (e.g. ``\"EU\"`` or ``\"US\"``) from either -", "Also: - :meth:`BigQueryWrapper.get_query_location` - :meth:`BigQueryWrapper.get_table_location` Returns: Optional[str]: The source location,", "used NAN/INF values. try: return json.dumps( table_row, allow_nan=False, default=default_encoder).encode('utf-8') except", "bigquery.JobReference with the information about the job that was started.", "dataset and project are ignored and the reference is returned", "so cannot be used as temporary.' % (project_id, dataset_id)) except", "= isinstance(self.source.coder, RowAsDictJsonCoder) # Schema for the rows being read", "not None: # Unittests don't pass projectIds so they can", "= job_id reference.projectId = project_id request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job(", "[_parse_schema_field(x) for x in field['fields']] return schema fields = [_parse_schema_field(f)", "and clients to interact with BigQuery APIs. NOTHING IN THIS", "catch this error to emit an error that explains #", "= match.group('project') table_reference.datasetId = match.group('dataset') table_reference.tableId = match.group('table') else: table_reference.projectId", "(later) if WRITE_TRUNCATE was # specified. if write_disposition == BigQueryDisposition.WRITE_TRUNCATE:", "under the License is distributed on an \"AS IS\" BASIS,", "bigquery.JobReference(jobId=job_id, projectId=project_id) request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( dryRun=dry_run, query=bigquery.JobConfigurationQuery(", "distributed with # this work for additional information regarding copyright", "create and write dispositions. Args: project_id: The project id owning", "already then handle the semantics for WRITE_EMPTY and # WRITE_TRUNCATE", "# \"\"\"Tools used by BigQuery sources and sinks. Classes, constants", "If # the request times out we keep trying. This", "string representing the destination containing 'PROJECT:DATASET.TABLE'. \"\"\" if isinstance(destination, bigquery.TableReference):", "dataset_id, table_id) except HttpError as exn: if exn.status_code == 404:", "\"XYZ\" return value elif field.type == 'BOOLEAN': # Input: \"true\"", "location of tables referenced in a query. This method returns", "in :data:`source.query` See Also: - :meth:`BigQueryWrapper.get_query_location` - :meth:`BigQueryWrapper.get_table_location` Returns: Optional[str]:", "sys import time import uuid from builtins import object from", "These tools include wrappers and clients to interact with BigQuery", "argument is a NoOp, because we assume the argument already", "def Write(self, row): self.rows_buffer.append(row) if len(self.rows_buffer) > self.rows_buffer_flush_threshold: self._flush_rows_buffer() class", "in case the schema was not specified. if schema is", "is False then the second element will be a bigquery.InserttErrorsValueListEntry", "job request: %s\", request) response = self.client.jobs.Insert(request) logging.info(\"Response was %s\",", "timezone library but this is a known # issue in", "field['fields']] return schema fields = [_parse_schema_field(f) for f in json_schema['fields']]", "wrapper with utilities for querying. The wrapper is used to", "rows=self.rows_buffer) self.rows_buffer = [] if not passed: raise RuntimeError('Could not", "passed, errors = self.client.insert_rows( project_id=self.project_id, dataset_id=self.dataset_id, table_id=self.table_id, rows=self.rows_buffer) self.rows_buffer =", "backwards compatibility guarantees. \"\"\" def __init__(self, destination): self.destination = AppendDestinationsFn._get_table_fn(destination)", "'PROJECT:DATASET.TABLE'. Returns: A string representing the destination containing 'PROJECT:DATASET.TABLE'. \"\"\"", "a local runner. Returns: a unique row ID string \"\"\"", "errors, whether they should be skipped, and all others should", "dataset=BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix, project=project_id) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_query_location(self, project_id,", "from either - :data:`source.table_reference` or - The first referenced table", "TODO(BEAM-2673): Remove this sleep by migrating to load api time.sleep(150)", "to specify it.') self.row_as_dict = isinstance(self.source.coder, RowAsDictJsonCoder) # Schema for", "location return self.client.jobs.Get(request) def perform_load_job(self, destination, files, job_id, schema=None, write_disposition=None,", "ownership. # The ASF licenses this file to You under", "__init__(self, client=None): self.client = client or bigquery.BigqueryV2( http=get_new_http(), credentials=auth.get_service_credentials(), response_encoding=None", "response.totalRows == 0 @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _delete_table(self, project_id, dataset_id,", "self.rows_buffer_flush_threshold: self._flush_rows_buffer() class RowAsDictJsonCoder(coders.Coder): \"\"\"A coder for a table row", "retry # Protect against environments where bigquery library is not", "'NULLABLE' if 'description' in field: schema.description = field['description'] if 'fields'", "retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_or_create_dataset(self, project_id, dataset_id, location=None): # Check if dataset", "None elif isinstance(schema, bigquery.TableFieldSchema): cell = row['f'][index] value = cell['v']", "for 150 seconds before the write as ' + 'BigQuery", "in field['fields']] return schema fields = [_parse_schema_field(f) for f in", "%s:%s already exists so cannot be used as temporary.' %", "self.source.table_reference.tableId) elif self.source.query is not None: self.query = self.source.query else:", "use_legacy_sql, flatten_results, job_id, dry_run=False): reference = bigquery.JobReference(jobId=job_id, projectId=project_id) request =", "table for 2 mins max so wait # that much", "is 10 seconds). Note that this is a timeout for", "dryRun=True, query=bigquery.JobConfigurationQuery( query=query, useLegacySql=use_legacy_sql, )), jobReference=reference)) response = self.client.jobs.Insert(request) if", "return value elif field.type == 'DATE': # Input: \"2016-11-03\" -->", "the location of tables referenced in a query. This method", "insert rows to BigQuery' ' table [%s:%s.%s]. Errors: %s' %", "handling for queries that reference tables in multiple locations. \"\"\"", "try: return json.dumps( table_row, allow_nan=False, default=default_encoder).encode('utf-8') except ValueError as e:", "based on create and write dispositions. The function mimics the", "field['name'] schema.type = field['type'] if 'mode' in field: schema.mode =", "is returned as a result. Additionally, for date partitioned tables,", "= bigquery.BigqueryDatasetsDeleteRequest( projectId=project_id, datasetId=dataset_id, deleteContents=delete_contents) try: self.client.datasets.Delete(request) except HttpError as", "query in the service. If # the request times out", "value_provider from apache_beam.options.pipeline_options import GoogleCloudOptions from apache_beam.runners.dataflow.native_io import iobase as", "uuid.uuid4().hex @property def unique_row_id(self): \"\"\"Returns a unique row ID (str)", "project_id, dataset_id, table_id, rows, skip_invalid_rows=False): \"\"\"Inserts rows into the specified", "bigquery.BigqueryTabledataInsertAllRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id, tableDataInsertAllRequest=bigquery.TableDataInsertAllRequest( skipInvalidRows=skip_invalid_rows, # TODO(silviuc): Should have", "self.test_bigquery_client = test_bigquery_client if auth.is_running_in_gce: self.executing_project = auth.executing_project elif hasattr(source,", "passed: raise RuntimeError('Could not successfully insert rows to BigQuery' '", "import retry # Protect against environments where bigquery library is", "return lambda x: AppendDestinationsFn._value_provider_or_static_val( destination).get() def process(self, element): yield (self.destination(element),", "instead of just a # table name. if dataset is", "return result # ----------------------------------------------------------------------------- # BigQueryReader, BigQueryWriter. class BigQueryReader(dataflow_io.NativeSourceReader): \"\"\"A", "NAN/INF values is: # ValueError: Out of range float values", "project_id, dataset_id, table_id): \"\"\"Lookup a table's metadata object. Args: client:", "return True elif strategy == RetryStrategy.RETRY_NEVER: return False elif (strategy", "already has # the proper formatting. return value_provider.StaticValueProvider(lambda x: x,", "row.f[index] value = from_json_value(cell.v) if cell.v is not None else", "the table with id %s\", table_id) # The response is", "and hasattr(sink, 'pipeline_options'): self.project_id = ( sink.pipeline_options.view_as(GoogleCloudOptions).project) assert self.project_id is", "response) return response.jobReference @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _insert_load_job(self, project_id, job_id,", "time.sleep(150) return created_table else: return created_table def run_query(self, project_id, query,", "Errors: %s' % (self.project_id, self.dataset_id, self.table_id, errors)) def __enter__(self): self.client", "not None self.dataset_id = self.sink.table_reference.datasetId self.table_id = self.sink.table_reference.tableId def _flush_rows_buffer(self):", "this error to emit an error that explains # to", "Check if dataset already exists otherwise create it try: dataset", "limitations under the License. # \"\"\"Tools used by BigQuery sources", "name of a field. skip_invalid_rows: If there are rows with", "not exist', project_id, dataset_id, table_id) return else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES,", "data into BigQuery. Returns: bigquery.JobReference with the information about the", "is None: raise RuntimeError( 'Table %s:%s.%s requires a schema. None", "and offer a common place where retry logic for failures", "and functions in this file are experimental and have no", "None: # Ideally this should never happen as repeated fields", "situation is quite possible # if the query will return", "and sinks (e.g., find and create tables, query a table,", "to handle # inserts into NUMERIC columns by receiving JSON", "'RECORD': # Note that a schema field object supports also", "sink. self.project_id = self.sink.table_reference.projectId # If table schema did not", "tables, appending '$YYYYmmdd' to the table name is supported, e.g.", "table_reference.tableId = table return table_reference # ----------------------------------------------------------------------------- # BigQueryWrapper. class", "table, query) return location logging.debug(\"Query %s does not reference any", "ANY KIND, either express or implied. # See the License", "dataset_id, location) self.get_or_create_dataset(project_id, dataset_id, location=location) else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter)", "import coders from apache_beam.internal.gcp import auth from apache_beam.internal.gcp.json_value import from_json_value", "This argument can be a bigquery.TableReference instance in which case", "table) table_reference.projectId = match.group('project') table_reference.datasetId = match.group('dataset') table_reference.tableId = match.group('table')", "query, use_legacy_sql): \"\"\" Get the location of tables referenced in", "experimental and have no backwards compatibility guarantees. These tools include", "strategy == RetryStrategy.RETRY_NEVER: return False elif (strategy == RetryStrategy.RETRY_ON_TRANSIENT_ERROR and", "provided, BigQuery will make a best effort to not insert", "%s:%s.%s is not empty but write disposition is WRITE_EMPTY.' %", "None: dataset.location = location request = bigquery.BigqueryDatasetsInsertRequest( projectId=project_id, dataset=dataset) response", "deleteContents=delete_contents) try: self.client.datasets.Delete(request) except HttpError as exn: if exn.status_code ==", "the ' 'table does not exist.' % (project_id, dataset_id, table_id))", "# See the License for the specific language governing permissions", "ValueError( 'Expected a table reference (PROJECT:DATASET.TABLE or ' 'DATASET.TABLE) instead", "destination.projectId, destination.datasetId, destination.tableId) else: return destination def parse_table_schema_from_json(schema_string): \"\"\"Parse the", "request = bigquery.BigqueryDatasetsDeleteRequest( projectId=project_id, datasetId=dataset_id, deleteContents=delete_contents) try: self.client.datasets.Delete(request) except HttpError", "the sink. self.project_id = self.sink.table_reference.projectId # If table schema did", "@retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _insert_load_job(self, project_id, job_id, table_reference, source_uris, schema=None,", "additional information regarding copyright ownership. # The ASF licenses this", "type argument is a NoOp, because we assume the argument", "decode(self, encoded_table_row): return json.loads(encoded_table_row.decode('utf-8')) class RetryStrategy(object): RETRY_ALWAYS = 'RETRY_ALWAYS' RETRY_NEVER", "BigQuerySource.__init__. # If this exception has been raised, the BigQuerySource", "encoded_table_row): return json.loads(encoded_table_row.decode('utf-8')) class RetryStrategy(object): RETRY_ALWAYS = 'RETRY_ALWAYS' RETRY_NEVER =", "__exit__(self, exception_type, exception_value, traceback): self.client.clean_up_temporary_dataset(self.executing_project) def __iter__(self): for rows, schema", "None and hasattr(sink, 'pipeline_options'): self.project_id = ( sink.pipeline_options.view_as(GoogleCloudOptions).project) assert self.project_id", "from the bigquery API. The object has the following attributes:", "programmer that they have used NAN/INF values. try: return json.dumps(", "because we assume the argument already has # the proper", "first time something gets read from the table. It is", "instances as # required by the InsertAll() method. request =", "NOTHING IN THIS FILE HAS BACKWARDS COMPATIBILITY GUARANTEES. \"\"\" from", "not empty but write disposition is WRITE_EMPTY.' % (project_id, dataset_id,", "yield row class BigQueryWriter(dataflow_io.NativeSinkWriter): \"\"\"The sink writer for a BigQuerySink.\"\"\"", "exception has been raised, the BigQuerySource \"modes\" have # changed", "if self.project_id is None and hasattr(sink, 'pipeline_options'): self.project_id = (", "make a best effort to not insert the same row", "will do a best-effort if unique IDs are provided. This", "return str(obj) raise TypeError( \"Object of type '%s' is not", "def _flush_rows_buffer(self): if self.rows_buffer: logging.info('Writing %d rows to %s:%s.%s table.',", "self def __exit__(self, exception_type, exception_value, traceback): self._flush_rows_buffer() def Write(self, row):", "may not use this file except in compliance with #", "for queries that reference tables in multiple locations. \"\"\" reference", "rows to %s:%s.%s table.', len(self.rows_buffer), self.project_id, self.dataset_id, self.table_id) passed, errors", "dataset_id, table_id): \"\"\"Lookup a table's metadata object. Args: client: bigquery.BigqueryV2", "'with location=%s', project_id, dataset_id, location) self.get_or_create_dataset(project_id, dataset_id, location=location) else: raise", "specified table. Args: project_id: The project id owning the table.", "# The normal error when dumping NAN/INF values is: #", "specify it.') self.row_as_dict = isinstance(self.source.coder, RowAsDictJsonCoder) # Schema for the", "= [_parse_schema_field(f) for f in json_schema['fields']] return bigquery.TableSchema(fields=fields) def parse_table_reference(table,", "the key is a TableReference for the destination, and the", ":meth:`BigQueryWrapper.get_query_location` - :meth:`BigQueryWrapper.get_table_location` Returns: Optional[str]: The source location, if any.", "query=query, useLegacySql=use_legacy_sql, )), jobReference=reference)) response = self.client.jobs.Insert(request) if response.statistics is", "import uuid from builtins import object from future.utils import iteritems", "Python dictionaries. Each dictionary is a row and each key", "BigQuery will do a best-effort if unique IDs are provided.", "self.client.jobs.Insert(request) if response.statistics is None: # This behavior is only", "@retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _delete_table(self, project_id, dataset_id, table_id): request =", "# the License. You may obtain a copy of the", "= 'temp_dataset_' def __init__(self, client=None): self.client = client or bigquery.BigqueryV2(", "- The first referenced table in :data:`source.query` See Also: -", "this work for additional information regarding copyright ownership. # The", "can route data to the old table for 2 mins", "ID is provided, BigQuery will make a best effort to", "dataset_id=self.dataset_id, table_id=self.table_id, rows=self.rows_buffer) self.rows_buffer = [] if not passed: raise", "_flush_rows_buffer(self): if self.rows_buffer: logging.info('Writing %d rows to %s:%s.%s table.', len(self.rows_buffer),", "dataset_id, table_id) if (not table_empty and write_disposition == BigQueryDisposition.WRITE_EMPTY): raise", "# to the programmer that they have used NAN/INF values.", "integration points and offer a common place where retry logic", "table = referenced_tables[0] location = self.get_table_location( table.projectId, table.datasetId, table.tableId) logging.info(\"Using", "string representing the destination containing 'PROJECT:DATASET.TABLE'. Returns: A string representing", "behavior of BigQuery import jobs when using the same create", "= BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix # Check if dataset exists to", "contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).", "for index, field in enumerate(schema.fields): value = None if isinstance(schema,", "writing, software # distributed under the License is distributed on", "# Unittests don't pass projectIds so they can be run", "a # randomized prefix for row IDs. self._row_id_prefix = ''", "ID (str) used to avoid multiple insertions. If the row", "If dataset argument is not specified, the expectation is that", "dispositions. The function mimics the behavior of BigQuery import jobs", "and sinks. Classes, constants and functions in this file are", "be False if the query request times out # (default", "and project are ignored and the reference is returned as", "JSON_COMPLIANCE_ERROR = 'NAN, INF and -INF values are not JSON", "value = None if isinstance(schema, bigquery.TableSchema): cell = row.f[index] value", "skip_invalid_rows) return result, errors def _convert_cell_value_to_dict(self, value, field): if field.type", "else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _delete_dataset(self, project_id, dataset_id, delete_contents=True):", "%s:%s.%s not found but create disposition is CREATE_NEVER.' % (project_id,", "reference.jobId = job_id reference.projectId = project_id request = bigquery.BigqueryJobsInsertRequest( projectId=project_id,", "Delete the table and recreate it (later) if WRITE_TRUNCATE was", "no backwards compatibility guarantees. \"\"\" def __init__(self, destination): self.destination =", "this method will need to be updated as well. raise", "= bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( copy=bigquery.JobConfigurationTableCopy( destinationTable=to_table_reference, sourceTable=from_table_reference, createDisposition=create_disposition, writeDisposition=write_disposition,", "so they can be run without error raise RuntimeError( 'Dataset", "self.source.table_reference is not None: # If table schema did not", "not response.pageToken: break page_token = response.pageToken def insert_rows(self, project_id, dataset_id,", "= response.statistics.query.referencedTables if referenced_tables: # Guards against both non-empty and", "location %r from table %r referenced by query %s\", location,", "the source return self.convert_row_to_dict(value, field) elif field.type == 'NUMERIC': return", "if the query request times out # (default is 10", "since the table was expected to be empty. \"\"\" from", "== 'REPEATED': if value is None: # Ideally this should", "call: https://cloud.google.com/bigquery/docs/reference\\ /rest/v2/tabledata/insertAll.\"\"\" # The rows argument is a list", "run without error raise RuntimeError( 'Dataset %s:%s already exists so", "field in enumerate(schema.fields): value = None if isinstance(schema, bigquery.TableSchema): cell", "jobId=job_id, pageToken=page_token, projectId=project_id, maxResults=max_results) response = self.client.jobs.GetQueryResults(request) return response @retry.with_exponential_backoff(", "schema=None, write_disposition=None, create_disposition=None): \"\"\"Starts a job to load data into", "traceback): self._flush_rows_buffer() def Write(self, row): self.rows_buffer.append(row) if len(self.rows_buffer) > self.rows_buffer_flush_threshold:", "of BigQuery import jobs when using the same create and", "offer a common place where retry logic for failures can", "cell = row.f[index] value = from_json_value(cell.v) if cell.v is not", "return False elif (strategy == RetryStrategy.RETRY_ON_TRANSIENT_ERROR and error_message not in", "try: self.client.tables.Delete(request) except HttpError as exn: if exn.status_code == 404:", "expectation is that the # table argument will contain a", "field. skip_invalid_rows: If there are rows with insertion errors, whether", "they can be run without error raise RuntimeError( 'Dataset %s:%s", "Of special note is the row ID that we add", "dataset table_reference.tableId = table return table_reference # ----------------------------------------------------------------------------- # BigQueryWrapper.", "apache_beam.internal.gcp.json_value import to_json_value from apache_beam.internal.http_client import get_new_http from apache_beam.io.gcp.internal.clients import", "= 'RETRY_NEVER' RETRY_ON_TRANSIENT_ERROR = 'RETRY_ON_TRANSIENT_ERROR' _NON_TRANSIENT_ERRORS = {'invalid', 'invalidQuery', 'notImplemented'}", "return decimal.Decimal(value) elif field.type == 'GEOGRAPHY': return value else: raise", "% table) table_reference.projectId = match.group('project') table_reference.datasetId = match.group('dataset') table_reference.tableId =", "= bigquery.BigqueryTablesGetRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id) response = self.client.tables.Get(request) return response", "project_id, dataset_id, location=None): # Check if dataset already exists otherwise", "supports. BQ is able to handle # inserts into NUMERIC", "field.type == 'NUMERIC': return decimal.Decimal(value) elif field.type == 'GEOGRAPHY': return", "table_id) return table.location @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def create_temporary_dataset(self, project_id, location):", "raise RuntimeError( 'Dataset %s:%s already exists so cannot be used", "<filename>sdks/python/apache_beam/io/gcp/bigquery_tools.py # # Licensed to the Apache Software Foundation (ASF)", "no errors. The start_query_job would raise an error otherwise. return", "first element is False then the second element will be", "request: %s\", request) response = self.client.jobs.Insert(request) logging.info(\"Response was %s\", response)", "containing 'PROJECT:DATASET.TABLE'. Returns: A string representing the destination containing 'PROJECT:DATASET.TABLE'.", "executing project. if self.project_id is None and hasattr(sink, 'pipeline_options'): self.project_id", "ID string \"\"\" self._unique_row_id += 1 return '%s_%d' % (self._row_id_prefix,", "' + 'for 2 mins after the delete and create.')", "time import uuid from builtins import object from future.utils import", "iobase as dataflow_io from apache_beam.transforms import DoFn from apache_beam.utils import", "+= 1 return '%s_%d' % (self._row_id_prefix, self._unique_row_id) def _get_temp_table(self, project_id):", "with the # BigQuery service. self.rows_buffer = [] self.rows_buffer_flush_threshold =", "the specified table. Args: project_id: The project id owning the", "to avoid multiple insertions. If the row ID is provided,", "write_disposition == BigQueryDisposition.WRITE_TRUNCATE: self._delete_table(project_id, dataset_id, table_id) # Create a new", "404: logging.warning( 'Dataset %s:%s does not exist so we will", "result # ----------------------------------------------------------------------------- # BigQueryReader, BigQueryWriter. class BigQueryReader(dataflow_io.NativeSourceReader): \"\"\"A reader", "bigquery.TableReference): return '%s:%s.%s' % ( destination.projectId, destination.datasetId, destination.tableId) else: return", "return None referenced_tables = response.statistics.query.referencedTables if referenced_tables: # Guards against", "(the \"License\"); you may not use this file except in", "entire table reference: 'DATASET.TABLE' or 'PROJECT:DATASET.TABLE'. This argument can be", "The project id owning the table. dataset_id: The dataset id", "so we reduce communication with the # BigQuery service. self.rows_buffer", "reference is returned as a result. Additionally, for date partitioned", "destination to an element, making it a KV pair. Outputs", "table reference as a string does not match the expected", "TypeError( \"Object of type '%s' is not JSON serializable\" %", "--> Output: \"2016-11-03\" return value elif field.type == 'DATETIME': #", "is WRITE_EMPTY.' % (project_id, dataset_id, table_id)) # Delete the table", "argument. project: The ID of the project containing this table", "%s:%s does not exist', project_id, temp_table.datasetId) return else: raise self._delete_dataset(temp_table.projectId,", "\"\"\"Inserts rows into the specified table. Args: project_id: The project", "details. self.schema = None self.use_legacy_sql = use_legacy_sql self.flatten_results = flatten_results", "return response @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_timeout_or_quota_issues_filter) def _insert_all_rows(self, project_id, dataset_id, table_id,", "which the insert request may be issued several times. This", "source, test_bigquery_client=None, use_legacy_sql=True, flatten_results=True, kms_key=None): self.source = source self.test_bigquery_client =", "test_bigquery_client self.row_as_dict = isinstance(self.sink.coder, RowAsDictJsonCoder) # Buffer used to batch", "# project. project_id = self.source.table_reference.projectId if not project_id: project_id =", "== 404: logging.warning('Dataset %s:%s does not exist', project_id, dataset_id) return", "CREATE_IF_NEEDED. write_disposition: WRITE_APPEND, WRITE_EMPTY or WRITE_TRUNCATE. Returns: A bigquery.Table instance", "dataset_id)) except HttpError as exn: if exn.status_code == 404: logging.warning(", "tableId. Or a string representing the destination containing 'PROJECT:DATASET.TABLE'. Returns:", "as temporary.' % (project_id, dataset_id)) except HttpError as exn: if", "= bigquery.BigqueryTablesDeleteRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id) try: self.client.tables.Delete(request) except HttpError as", "communication with the # BigQuery service. self.rows_buffer = [] self.rows_buffer_flush_threshold", "BigQuery' ' table [%s:%s.%s]. Errors: %s' % (self.project_id, self.dataset_id, self.table_id,", "self._temporary_table_suffix, project=project_id) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_query_location(self, project_id, query, use_legacy_sql):", "happen as repeated fields default to # returning an empty", "Input: \"true\" --> Output: True return value == 'true' elif", "Schema provided as string. Args: schema_string: String serialized table schema,", "project information. Please use the --project ' 'command line option", "self._insert_all_rows( project_id, dataset_id, table_id, final_rows, skip_invalid_rows) return result, errors def", "ValueError('Received \\'None\\' as the value for the field %s '", "= isinstance(self.sink.coder, RowAsDictJsonCoder) # Buffer used to batch written rows", "def perform_load_job(self, destination, files, job_id, schema=None, write_disposition=None, create_disposition=None): \"\"\"Starts a", "representing the destination containing 'PROJECT:DATASET.TABLE'. \"\"\" if isinstance(destination, bigquery.TableReference): return", "a unique row ID string \"\"\" self._unique_row_id += 1 return", "temp_table = self._get_temp_table(project_id) try: self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=temp_table.datasetId)) except HttpError as", "WRITE_EMPTY or WRITE_TRUNCATE. Returns: A bigquery.Table instance if table was", "start_query_job would raise an error otherwise. return page_token = None", "for the query # request not for the actual execution", "written rows so we reduce communication with the # BigQuery", "self def __exit__(self, exception_type, exception_value, traceback): self.client.clean_up_temporary_dataset(self.executing_project) def __iter__(self): for", "True else: return False class AppendDestinationsFn(DoFn): \"\"\"Adds the destination to", "\"\"\"Gets or creates a table based on create and write", "when dumping NAN/INF values is: # ValueError: Out of range", "TableSchema of the BigQuery export from either the Query or", "\"\"\" Get the location of tables referenced in a query.", "serialized table schema, should be a valid JSON. Returns: A", "), jobReference=reference, ) ) response = self.client.jobs.Insert(request) return response.jobReference @retry.with_exponential_backoff(", "Raises: RuntimeError: For various mismatches between the state of the", "elif field.type == 'BOOLEAN': # Input: \"true\" --> Output: True", "executing project information. Please use the --project ' 'command line", "For testing scenarios where we pass in a client we", "\"\"\" Get the source location (e.g. ``\"EU\"`` or ``\"US\"``) from", "assume the argument already has # the proper formatting. return", "and all others should be inserted successfully. Returns: A tuple", "[] if not passed: raise RuntimeError('Could not successfully insert rows", "projectId=project_id, datasetId=dataset_id, tableId=table_id) try: self.client.tables.Delete(request) except HttpError as exn: if", "max so wait # that much time before creating the", "to automatically get it from gcloud config info. if not", "a bigquery.Table instance. return response @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_or_create_dataset(self,", "is not empty but write disposition is WRITE_EMPTY.' % (project_id,", "RowAsDictJsonCoder(coders.Coder): \"\"\"A coder for a table row (represented as a", "there are rows with insertion errors, whether they should be", "== 'BOOLEAN': # Input: \"true\" --> Output: True return value", "value is None: if not field.mode == 'NULLABLE': raise ValueError('Received", "Try to automatically get it from gcloud config info. if", "# if the query will return a large number of", "record itself. Experimental; no backwards compatibility guarantees. \"\"\" def __init__(self,", "was # specified. if write_disposition == BigQueryDisposition.WRITE_TRUNCATE: self._delete_table(project_id, dataset_id, table_id)", "tables referenced in a query. This method returns the location", "project_id, query, use_legacy_sql, flatten_results, dry_run=False): job_id = self._start_query_job(project_id, query, use_legacy_sql,", "during retries on failures. # TODO(silviuc): Must add support to", "Query or the Table. \"\"\" json_schema = json.loads(schema_string) def _parse_schema_field(field):", "schema.name = field['name'] schema.type = field['type'] if 'mode' in field:", "rows: if self.row_as_dict: yield self.client.convert_row_to_dict(row, schema) else: yield row class", "help BigQuery avoid inserting a row multiple times. # BigQuery", "that they have used NAN/INF values. try: return json.dumps( table_row,", "WRITE_EMPTY.' % (project_id, dataset_id, table_id)) # Delete the table and", "* FROM [%s:%s.%s];' % ( project_id, self.source.table_reference.datasetId, self.source.table_reference.tableId) elif self.source.query", "to a Python dict.\"\"\" result = {} for index, field", "= project_id request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( copy=bigquery.JobConfigurationTableCopy( destinationTable=to_table_reference,", "the Apache License, Version 2.0 # (the \"License\"); you may", "project, job_id, location=None): request = bigquery.BigqueryJobsGetRequest() request.jobId = job_id request.projectId", "is None: match = re.match( r'^((?P<project>.+):)?(?P<dataset>\\w+)\\.(?P<table>[\\w\\$]+)$', table) if not match:", "is None: # This behavior is only expected in tests", "# (the \"License\"); you may not use this file except", "import HttpError except ImportError: pass # pylint: enable=wrong-import-order, wrong-import-position MAX_RETRIES", "AppendDestinationsFn(DoFn): \"\"\"Adds the destination to an element, making it a", "should be a valid JSON. Returns: A TableSchema of the", "query request times out # (default is 10 seconds). Note", "raise ValueError('%s. %s' % (e, JSON_COMPLIANCE_ERROR)) def decode(self, encoded_table_row): return", "not specified, the expectation is that the # table argument", "BigQueryWriter(dataflow_io.NativeSinkWriter): \"\"\"The sink writer for a BigQuerySink.\"\"\" def __init__(self, sink,", "# Licensed to the Apache Software Foundation (ASF) under one", "bigquery library is not available. # pylint: disable=wrong-import-order, wrong-import-position try:", "def _delete_table(self, project_id, dataset_id, table_id): request = bigquery.BigqueryTablesDeleteRequest( projectId=project_id, datasetId=dataset_id,", "table lookup parameters Returns: bigquery.Table instance Raises: HttpError if lookup", "bigquery.Table instance Raises: HttpError if lookup failed. \"\"\" request =", "gets read from the table. It is not required #", "The response is a bigquery.Dataset instance. return response else: raise", "in compliance with # the License. You may obtain a", "return destination def parse_table_schema_from_json(schema_string): \"\"\"Parse the Table Schema provided as", "flattenResults=flatten_results)), jobReference=reference)) response = self.client.jobs.Insert(request) return response.jobReference.jobId @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter)", "if not response.jobComplete: # The jobComplete field can be False", "TODO(silviuc): Try to automatically get it from gcloud config info.", "a PCollection of KV-pairs where the key is a TableReference", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "by query %s\", location, table, query) return location logging.debug(\"Query %s", "if response.statistics is None: # This behavior is only expected", "A bigquery.TableSchema instance or None. create_disposition: CREATE_NEVER or CREATE_IF_NEEDED. write_disposition:", "multiple times. # BigQuery will do a best-effort if unique", "bigquery.Table instance. return response @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_or_create_dataset(self, project_id,", "job to load data into BigQuery. Returns: bigquery.JobReference with the", "apache_beam.internal.gcp import auth from apache_beam.internal.gcp.json_value import from_json_value from apache_beam.internal.gcp.json_value import", "dataset_id, location=None): # Check if dataset already exists otherwise create", "the value for the field %s ' 'but the field", "BigQuery will make a best effort to not insert the", "BigQueryDisposition.WRITE_TRUNCATE we delete # the table before this point. if", "in this file are experimental and have no backwards compatibility", "BigQuery. Returns: bigquery.JobReference with the information about the job that", "create.') # TODO(BEAM-2673): Remove this sleep by migrating to load", "isinstance(table, value_provider.ValueProvider): return table table_reference = bigquery.TableReference() # If dataset", "not successfully insert rows to BigQuery' ' table [%s:%s.%s]. Errors:", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "not found but create disposition is CREATE_NEVER.' % (project_id, dataset_id,", "self.executing_project = None # TODO(silviuc): Try to automatically get it", "--> Output: \"2016-11-03 00:49:36.985864 UTC\" dt = datetime.datetime.utcfromtimestamp(float(value)) return dt.strftime('%Y-%m-%d", "raised, the BigQuerySource \"modes\" have # changed and this method", "not exist', project_id, dataset_id) return else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter)", "self.sink = sink self.test_bigquery_client = test_bigquery_client self.row_as_dict = isinstance(self.sink.coder, RowAsDictJsonCoder)", "dumping NAN/INF values is: # ValueError: Out of range float", "contributor license agreements. See the NOTICE file distributed with #", "= auth.executing_project elif hasattr(source, 'pipeline_options'): self.executing_project = ( source.pipeline_options.view_as(GoogleCloudOptions).project) else:", "= bigquery.BigqueryTablesInsertRequest( projectId=project_id, datasetId=dataset_id, table=table) response = self.client.tables.Insert(request) logging.debug(\"Created the", "self.client.convert_row_to_dict(row, schema) else: yield row class BigQueryWriter(dataflow_io.NativeSinkWriter): \"\"\"The sink writer", "api time.sleep(150) return created_table else: return created_table def run_query(self, project_id,", "if isinstance(obj, decimal.Decimal): return str(obj) raise TypeError( \"Object of type", "found or created. Raises: RuntimeError: For various mismatches between the", "between the state of the table and the create/write dispositions", "on response from query: %s ...', query) time.sleep(1.0) continue #", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "It's a query source return self.client.get_query_location( self.executing_project, self.source.query, self.source.use_legacy_sql) def", "referenced in a query. This method returns the location of", "%r referenced by query %s\", location, table, query) return location", "skip_invalid_rows: If there are rows with insertion errors, whether they", "self.table_id, self.sink.table_schema, self.sink.create_disposition, self.sink.write_disposition) return self def __exit__(self, exception_type, exception_value,", "\"\"\" if self.source.table_reference is not None: tr = self.source.table_reference return", "value elif field.type == 'BOOLEAN': # Input: \"true\" --> Output:", "value_provider.StaticValueProvider(lambda x: x, value=elm) @staticmethod def _get_table_fn(destination): if callable(destination): return", "0 @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _delete_table(self, project_id, dataset_id, table_id): request", "else: table_reference.projectId = project table_reference.datasetId = dataset table_reference.tableId = table", "schema is None and found_table is None: raise RuntimeError( 'Table", "flatten_results flag as False to the source return self.convert_row_to_dict(value, field)", "or ``\"US\"``) from either - :data:`source.table_reference` or - The first", "tr.projectId if tr.projectId is not None else self.executing_project, tr.datasetId, tr.tableId)", "not JSON compliant # This code will catch this error", "schema, should be a valid JSON. Returns: A TableSchema of", "# the table before this point. if write_disposition == BigQueryDisposition.WRITE_TRUNCATE:", "specified then an error will be raised since the table", "get_new_http from apache_beam.io.gcp.internal.clients import bigquery from apache_beam.options import value_provider from", "True) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_job(self, project, job_id, location=None): request", "normal error when dumping NAN/INF values is: # ValueError: Out", "+ self._temporary_table_suffix, project=project_id) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_query_location(self, project_id, query,", "or created. Raises: RuntimeError: For various mismatches between the state", "to the old table for 2 mins max so wait", "is supported, e.g. 'DATASET.TABLE$YYYYmmdd'. dataset: The ID of the dataset", "ignoreUnknownValues? rows=rows)) response = self.client.tabledata.InsertAll(request) # response.insertErrors is not []", "supports also a RECORD type. However # when querying, the", "_start_query_job(self, project_id, query, use_legacy_sql, flatten_results, job_id, dry_run=False): reference = bigquery.JobReference(jobId=job_id,", "following attributes: projectId, datasetId, and tableId. Raises: ValueError: if the", "enumerate(schema.fields): value = None if isinstance(schema, bigquery.TableSchema): cell = row.f[index]", "ID of the project containing this table or null if", "raise ValueError(\"BigQuerySource must have either a table or query\") def", "num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _is_table_empty(self, project_id, dataset_id, table_id): request = bigquery.BigqueryTabledataListRequest(", "JSON string. This is the default coder for sources and", "row class BigQueryWriter(dataflow_io.NativeSinkWriter): \"\"\"The sink writer for a BigQuerySink.\"\"\" def", "argument can be a bigquery.TableReference instance in which case dataset", "float values are not JSON compliant # This code will", "and -INF values are not JSON compliant.' def default_encoder(obj): if", "projectId, datasetId, and tableId. Or a string representing the destination", "of # bigquery.TableDataInsertAllRequest.RowsValueListEntry instances as # required by the InsertAll()", "project_id, dataset_id, table_id, schema or found_table.schema, created_table) # if write_disposition", "- :meth:`BigQueryWrapper.get_table_location` Returns: Optional[str]: The source location, if any. \"\"\"", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "' 'DATASET.TABLE) instead of %s.' % table) table_reference.projectId = match.group('project')", "a (project, dataset, table) tuple. Args: table: The ID of", "# randomized prefix for row IDs. self._row_id_prefix = '' if", "get_or_create_dataset(self, project_id, dataset_id, location=None): # Check if dataset already exists", "project_id, dataset_id, delete_contents=True): request = bigquery.BigqueryDatasetsDeleteRequest( projectId=project_id, datasetId=dataset_id, deleteContents=delete_contents) try:", "(PROJECT:DATASET.TABLE or ' 'DATASET.TABLE) instead of %s.' % table) table_reference.projectId", "yield response.rows, response.schema if not response.pageToken: break page_token = response.pageToken", "'invalidQuery', 'notImplemented'} @staticmethod def should_retry(strategy, error_message): if strategy == RetryStrategy.RETRY_ALWAYS:", "result, errors def _convert_cell_value_to_dict(self, value, field): if field.type == 'STRING':", "IDs. self._row_id_prefix = '' if client else uuid.uuid4() self._temporary_table_suffix =", "because the ' 'table does not exist.' % (project_id, dataset_id,", "specific language governing permissions and # limitations under the License.", "in field: schema.mode = field['mode'] else: schema.mode = 'NULLABLE' if", "returns the location of the first referenced table in the", "We got some results. The last page is signalled by", "# Input: 1478134176.985864 --> Output: \"2016-11-03 00:49:36.985864 UTC\" dt =", "None: self.query = self.source.query else: # Enforce the \"modes\" enforced", "None: # This behavior is only expected in tests logging.warning(", "value] elif value is None: if not field.mode == 'NULLABLE':", "will create it as temporary ' 'with location=%s', project_id, dataset_id,", "be issued several times. This comes into play for sinks", "retry_filter=retry.retry_on_server_errors_and_timeout_filter) def create_temporary_dataset(self, project_id, location): dataset_id = BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix", "a row multiple times. # BigQuery will do a best-effort", "\"License\"); you may not use this file except in compliance", "except in compliance with # the License. You may obtain", "# Note that a schema field object supports also a", "insertions. If the row ID is provided, BigQuery will make", "request not for the actual execution of the query in", "request.location = location return self.client.jobs.Get(request) def perform_load_job(self, destination, files, job_id,", "None: raise RuntimeError( 'Missing executing project information. Please use the", "CREATE_NEVER.' % (project_id, dataset_id, table_id)) else: raise # If table", "errors). If first element is False then the second element", "instance in which case dataset and project are ignored and", "is used to organize all the BigQuery integration points and", "return else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_table_location(self, project_id, dataset_id,", "NoOp, because we assume the argument already has # the", "flatten_results=self.flatten_results): if self.schema is None: self.schema = schema for row", "Returns: a unique row ID string \"\"\" self._unique_row_id += 1", "RetryStrategy._NON_TRANSIENT_ERRORS): return True else: return False class AppendDestinationsFn(DoFn): \"\"\"Adds the", "not None: dataset.location = location request = bigquery.BigqueryDatasetsInsertRequest( projectId=project_id, dataset=dataset)", "table=BigQueryWrapper.TEMP_TABLE + self._temporary_table_suffix, dataset=BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix, project=project_id) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter)", "get_table(self, project_id, dataset_id, table_id): \"\"\"Lookup a table's metadata object. Args:", "project_id: The project id owning the table. dataset_id: The dataset", "started. \"\"\" return self._insert_load_job( destination.projectId, job_id, destination, files, schema=schema, create_disposition=create_disposition,", "state of the table and the create/write dispositions passed in.", "If first element is False then the second element will", "in tests logging.warning( \"Unable to get location, missing response.statistics. Query:", "callable(destination): return destination else: return lambda x: AppendDestinationsFn._value_provider_or_static_val( destination).get() def", "dataset_id, table_id)) if found_table and write_disposition != BigQueryDisposition.WRITE_TRUNCATE: return found_table", "by the InsertAll() method. request = bigquery.BigqueryTabledataInsertAllRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id,", "in python 2.7 so we'll just hardcode it as we're", "automatically get it from gcloud config info. if not self.executing_project", "HttpError as exn: if exn.status_code == 404: dataset_reference = bigquery.DatasetReference(", "jobReference=reference)) response = self.client.jobs.Insert(request) return response.jobReference.jobId @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def", "), jobReference=reference, ) ) logging.info(\"Inserting job request: %s\", request) response", "NOTICE file distributed with # this work for additional information", "well. raise ValueError(\"BigQuerySource must have either a table or query\")", "request) response = self.client.jobs.Insert(request) logging.info(\"Response was %s\", response) return response.jobReference", "None else self.executing_project, tr.datasetId, tr.tableId) else: # It's a query", "logging.warning('Table %s:%s.%s does not exist', project_id, dataset_id, table_id) return else:", "try: self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=dataset_id)) if project_id is not None: #", "migrating to load api time.sleep(150) return created_table else: return created_table", "dry_run=dry_run) if dry_run: # If this was a dry run", "field can be False if the query request times out", "= location return self.client.jobs.Get(request) def perform_load_job(self, destination, files, job_id, schema=None,", "GoogleCloudOptions from apache_beam.runners.dataflow.native_io import iobase as dataflow_io from apache_beam.transforms import", "self.rows_buffer.append(row) if len(self.rows_buffer) > self.rows_buffer_flush_threshold: self._flush_rows_buffer() class RowAsDictJsonCoder(coders.Coder): \"\"\"A coder", "'PROJECT:DATASET.TABLE'. This argument can be a bigquery.TableReference instance in which", "JSON compliant.' def default_encoder(obj): if isinstance(obj, decimal.Decimal): return str(obj) raise", "TEMP_TABLE = 'temp_table_' TEMP_DATASET = 'temp_dataset_' def __init__(self, client=None): self.client", "def _get_query_results(self, project_id, job_id, page_token=None, max_results=10000): request = bigquery.BigqueryJobsGetQueryResultsRequest( jobId=job_id,", "does not reference any tables.\", query) return None @retry.with_exponential_backoff( num_retries=MAX_RETRIES,", "isinstance(self.sink.coder, RowAsDictJsonCoder) # Buffer used to batch written rows so", "import iobase as dataflow_io from apache_beam.transforms import DoFn from apache_beam.utils", "value, field): if field.type == 'STRING': # Input: \"XYZ\" -->", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "% ( project_id, self.source.table_reference.datasetId, self.source.table_reference.tableId) elif self.source.query is not None:", "if not response.pageToken: break page_token = response.pageToken def insert_rows(self, project_id,", "--> Output: 123 return int(value) elif field.type == 'FLOAT': #", "get_table_location(self, project_id, dataset_id, table_id): table = self.get_table(project_id, dataset_id, table_id) return", "self.project_id, self.dataset_id, self.table_id, self.sink.table_schema, self.sink.create_disposition, self.sink.write_disposition) return self def __exit__(self,", "= ( sink.pipeline_options.view_as(GoogleCloudOptions).project) assert self.project_id is not None self.dataset_id =", "response.rows, response.schema if not response.pageToken: break page_token = response.pageToken def", "page is signalled by a missing pageToken. yield response.rows, response.schema", "or bigquery.BigqueryV2( http=get_new_http(), credentials=auth.get_service_credentials(), response_encoding=None if sys.version_info[0] < 3 else", "json=json_object)) result, errors = self._insert_all_rows( project_id, dataset_id, table_id, final_rows, skip_invalid_rows)", "it from gcloud config info. if not self.executing_project and test_bigquery_client", "a BigQuerySink.\"\"\" def __init__(self, sink, test_bigquery_client=None, buffer_size=None): self.sink = sink", "ASF licenses this file to You under the Apache License,", "\"\"\"Parse a single schema field from dictionary. Args: field: Dictionary", "(default is 10 seconds). Note that this is a timeout", "None while True: response = self._get_query_results(project_id, job_id, page_token) if not", "self.project_id, self.dataset_id, self.table_id) passed, errors = self.client.insert_rows( project_id=self.project_id, dataset_id=self.dataset_id, table_id=self.table_id,", "logging.info(\"Response was %s\", response) return response.jobReference @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def", "= row['f'][index] value = cell['v'] if 'v' in cell else", "result, errors = self._insert_all_rows( project_id, dataset_id, table_id, final_rows, skip_invalid_rows) return", "query and depends on the BigQuery service to provide error", "\"2016-11-03T00:49:36\" --> Output: \"2016-11-03T00:49:36\" return value elif field.type == 'TIME':", "Returns: bigquery.JobReference with the information about the job that was", "projectId=project_id, datasetId=dataset_id, tableId=table_id, maxResults=1) response = self.client.tabledata.List(request) # The response", "unique IDs are provided. This situation # can happen during", "exn: if exn.status_code == 404: logging.warning( 'Dataset %s:%s does not", "= response.pageToken def insert_rows(self, project_id, dataset_id, table_id, rows, skip_invalid_rows=False): \"\"\"Inserts", "is the default coder for sources and sinks if the", "errors. \"\"\" # Prepare rows for insertion. Of special note", "skip_invalid_rows=False): \"\"\"Inserts rows into the specified table. Args: project_id: The", "to BigQuery' ' table [%s:%s.%s]. Errors: %s' % (self.project_id, self.dataset_id,", "useLegacySql=use_legacy_sql, )), jobReference=reference)) response = self.client.jobs.Insert(request) if response.statistics is None:", "@retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _start_query_job(self, project_id, query, use_legacy_sql, flatten_results, job_id,", "@retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_timeout_or_quota_issues_filter) def _insert_all_rows(self, project_id, dataset_id, table_id, rows, skip_invalid_rows=False):", "not use this file except in compliance with # the", "object containing serialized schema. Returns: A TableFieldSchema for a single", "# This behavior is only expected in tests logging.warning( \"Unable", "schema=schema, writeDisposition=write_disposition, createDisposition=create_disposition, sourceFormat='NEWLINE_DELIMITED_JSON', autodetect=schema is None, ) ), jobReference=reference,", "will need to be updated as well. raise ValueError(\"BigQuerySource must", "# Check if dataset exists to make sure that the", "the Table. \"\"\" json_schema = json.loads(schema_string) def _parse_schema_field(field): \"\"\"Parse a", "seconds). Note that this is a timeout for the query", "instead of dicts. final_rows = [] for row in rows:", "Outputs a PCollection of KV-pairs where the key is a", "Returns: A tuple (bool, errors). If first element is False", "bigquery.TableDataInsertAllRequest.RowsValueListEntry instances as # required by the InsertAll() method. request", "None try: found_table = self.get_table(project_id, dataset_id, table_id) except HttpError as", "BigQueryWriter. class BigQueryReader(dataflow_io.NativeSourceReader): \"\"\"A reader for a BigQuery source.\"\"\" def", "value=elm) @staticmethod def _get_table_fn(destination): if callable(destination): return destination else: return", "from __future__ import absolute_import import datetime import decimal import json", "times out # (default is 10 seconds). Note that this", "\"YmJi\" return value elif field.type == 'DATE': # Input: \"2016-11-03\"", "type(obj).__name__) def get_hashable_destination(destination): \"\"\"Parses a table reference into a (project,", "auth.executing_project elif hasattr(source, 'pipeline_options'): self.executing_project = ( source.pipeline_options.view_as(GoogleCloudOptions).project) else: self.executing_project", "def __init__(self, sink, test_bigquery_client=None, buffer_size=None): self.sink = sink self.test_bigquery_client =", "response = self.client.jobs.Insert(request) return response.jobReference.jobId @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _get_query_results(self,", "def clean_up_temporary_dataset(self, project_id): temp_table = self._get_temp_table(project_id) try: self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=temp_table.datasetId))", "schema=schema or found_table.schema) logging.info('Created table %s.%s.%s with schema %s. Result:", "exception_value, traceback): self._flush_rows_buffer() def Write(self, row): self.rows_buffer.append(row) if len(self.rows_buffer) >", "\"\"\" def encode(self, table_row): # The normal error when dumping", "@retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def create_temporary_dataset(self, project_id, location): dataset_id = BigQueryWrapper.TEMP_DATASET", "will be raised since the table was expected to be", "sinks (e.g., find and create tables, query a table, etc.).", "dataset_id, table_id): request = bigquery.BigqueryTablesDeleteRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id) try: self.client.tables.Delete(request)", "rows. logging.info('Waiting on response from query: %s ...', query) time.sleep(1.0)", "field.type == 'DATETIME': # Input: \"2016-11-03T00:49:36\" --> Output: \"2016-11-03T00:49:36\" return", "value elif field.type == 'DATETIME': # Input: \"2016-11-03T00:49:36\" --> Output:", "not match: raise ValueError( 'Expected a table reference (PROJECT:DATASET.TABLE or", "no backwards compatibility guarantees. These tools include wrappers and clients", "BigQueryDisposition.WRITE_TRUNCATE: return found_table else: created_table = self._create_table(project_id=project_id, dataset_id=dataset_id, table_id=table_id, schema=schema", "# # Licensed to the Apache Software Foundation (ASF) under", "bigquery.JsonObject() for k, v in iteritems(row): if isinstance(v, decimal.Decimal): #", "we will create it as temporary ' 'with location=%s', project_id,", "# Input: \"true\" --> Output: True return value == 'true'", "elif field.type == 'NUMERIC': return decimal.Decimal(value) elif field.type == 'GEOGRAPHY':", "Returns: A TableReference object from the bigquery API. The object", "job_id, page_token=None, max_results=10000): request = bigquery.BigqueryJobsGetQueryResultsRequest( jobId=job_id, pageToken=page_token, projectId=project_id, maxResults=max_results)", "is not [] if errors encountered. return not response.insertErrors, response.insertErrors", "A TableFieldSchema for a single column in BigQuery. \"\"\" schema", "self.executing_project = auth.executing_project elif hasattr(source, 'pipeline_options'): self.executing_project = ( source.pipeline_options.view_as(GoogleCloudOptions).project)", "Optional[str]: The source location, if any. \"\"\" if self.source.table_reference is", "on the BigQuery service to provide error handling for queries", "we're reading using # utcfromtimestamp. # Input: 1478134176.985864 --> Output:", "query has no errors. The start_query_job would raise an error", "maxResults=1) response = self.client.tabledata.List(request) # The response is a bigquery.TableDataList", "def _get_source_location(self): \"\"\" Get the source location (e.g. ``\"EU\"`` or", "create_disposition=create_disposition, write_disposition=write_disposition) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_or_create_table( self, project_id, dataset_id,", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "# bigquery.TableDataInsertAllRequest.RowsValueListEntry instances as # required by the InsertAll() method.", "query %s\", location, table, query) return location logging.debug(\"Query %s does", "found_table = self.get_table(project_id, dataset_id, table_id) except HttpError as exn: if", "Input: \"00:49:36\" --> Output: \"00:49:36\" return value elif field.type ==", "the fact that we get here means the # query", "== 'GEOGRAPHY': return value else: raise RuntimeError('Unexpected field type: %s'", "if (not table_empty and write_disposition == BigQueryDisposition.WRITE_EMPTY): raise RuntimeError( 'Table", "RETRY_ALWAYS = 'RETRY_ALWAYS' RETRY_NEVER = 'RETRY_NEVER' RETRY_ON_TRANSIENT_ERROR = 'RETRY_ON_TRANSIENT_ERROR' _NON_TRANSIENT_ERRORS", "where retry logic for failures can be controlled. In addition", "a TableRow instance using the schema to a Python dict.\"\"\"", "have # changed and this method will need to be", "it a KV pair. Outputs a PCollection of KV-pairs where", "is None then the table argument must contain the entire", "else: return False class AppendDestinationsFn(DoFn): \"\"\"Adds the destination to an", "import from_json_value from apache_beam.internal.gcp.json_value import to_json_value from apache_beam.internal.http_client import get_new_http", "datasetId=dataset_id, tableId=table_id, tableDataInsertAllRequest=bigquery.TableDataInsertAllRequest( skipInvalidRows=skip_invalid_rows, # TODO(silviuc): Should have an option", "123 return int(value) elif field.type == 'FLOAT': # Input: \"1.23\"", "into NUMERIC columns by receiving JSON with string attrs. v", "response = self.client.tabledata.InsertAll(request) # response.insertErrors is not [] if errors", "destination, files, job_id, schema=None, write_disposition=None, create_disposition=None): \"\"\"Starts a job to", "into string because JSON does not # support the precision", "is the row ID that we add to # each", "and the value is the record itself. Experimental; no backwards", "we add to # each row in order to help", "return destination else: return lambda x: AppendDestinationsFn._value_provider_or_static_val( destination).get() def process(self,", "writing it logging.warning('Sleeping for 150 seconds before the write as", "the # BigQuery service. self.rows_buffer = [] self.rows_buffer_flush_threshold = buffer_size", "self.get_table(project_id, dataset_id, table_id) except HttpError as exn: if exn.status_code ==", "error will be raised since the table was expected to", "instance. return response.totalRows == 0 @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _delete_table(self,", "self.client.get_table_location( tr.projectId if tr.projectId is not None else self.executing_project, tr.datasetId,", "jobReference=reference, ) ) logging.info(\"Inserting job request: %s\", request) response =", "WRITE_TRUNCATE write dispositions. if found_table: table_empty = self._is_table_empty(project_id, dataset_id, table_id)", "uuid from builtins import object from future.utils import iteritems from", "= dataset table_reference.tableId = table return table_reference # ----------------------------------------------------------------------------- #", "= 'NAN, INF and -INF values are not JSON compliant.'", "creating the table and writing it logging.warning('Sleeping for 150 seconds", "because JSON does not # support the precision that decimal", "BigQueryWrapper(client=self.test_bigquery_client) self.client.get_or_create_table( self.project_id, self.dataset_id, self.table_id, self.sink.table_schema, self.sink.create_disposition, self.sink.write_disposition) return self", "# this work for additional information regarding copyright ownership. #", "raise ValueError( 'Expected a table reference (PROJECT:DATASET.TABLE or ' 'DATASET.TABLE)", "%s:%s.%s table.', len(self.rows_buffer), self.project_id, self.dataset_id, self.table_id) passed, errors = self.client.insert_rows(", "flattened # unless we pass the flatten_results flag as False", "have either a table or query\") def _get_source_location(self): \"\"\" Get", "schema.mode = field['mode'] else: schema.mode = 'NULLABLE' if 'description' in", "The rows argument is a list of # bigquery.TableDataInsertAllRequest.RowsValueListEntry instances", "' 'command line option to specify it.') self.row_as_dict = isinstance(self.source.coder,", "not NULLABLE.' % field.name) result[field.name] = None else: result[field.name] =", "skipInvalidRows=skip_invalid_rows, # TODO(silviuc): Should have an option for ignoreUnknownValues? rows=rows))", "def decode(self, encoded_table_row): return json.loads(encoded_table_row.decode('utf-8')) class RetryStrategy(object): RETRY_ALWAYS = 'RETRY_ALWAYS'", "string. Args: schema_string: String serialized table schema, should be a", "% (project_id, dataset_id, table_id)) if found_table and write_disposition != BigQueryDisposition.WRITE_TRUNCATE:", "Schema for the rows being read by the reader. It", "did not define a project we default to executing #", "schema) else: yield row class BigQueryWriter(dataflow_io.NativeSinkWriter): \"\"\"The sink writer for", "table) tuple. Args: destination: Either a TableReference object from the", "list of # bigquery.TableDataInsertAllRequest.RowsValueListEntry instances as # required by the", "The UTC should come from the timezone library but this", "if dataset exists to make sure that the temporary id", "sink, test_bigquery_client=None, buffer_size=None): self.sink = sink self.test_bigquery_client = test_bigquery_client self.row_as_dict", "# Input: \"00:49:36\" --> Output: \"00:49:36\" return value elif field.type", "import sys import time import uuid from builtins import object", "self.row_as_dict = isinstance(self.sink.coder, RowAsDictJsonCoder) # Buffer used to batch written", "--project ' 'command line option to specify it.') self.row_as_dict =", "values in each row but could be useful for #", "table_reference # ----------------------------------------------------------------------------- # BigQueryWrapper. class BigQueryWrapper(object): \"\"\"BigQuery client wrapper", "table with id %s\", table_id) # The response is a", "import iteritems from apache_beam import coders from apache_beam.internal.gcp import auth", "dataset_id, table_id): request = bigquery.BigqueryTabledataListRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id, maxResults=1) response", "Foundation (ASF) under one or more # contributor license agreements.", "execution of the query in the service. If # the", "isinstance(v, decimal.Decimal): # decimal values are converted into string because", "for a table row (represented as a dict) to/from a", "Raises: ValueError: if the table reference as a string does", "instance if table was found or created. Raises: RuntimeError: For", "(strategy == RetryStrategy.RETRY_ON_TRANSIENT_ERROR and error_message not in RetryStrategy._NON_TRANSIENT_ERRORS): return True", "flatten_results, job_id, dry_run=False): reference = bigquery.JobReference(jobId=job_id, projectId=project_id) request = bigquery.BigqueryJobsInsertRequest(", "= field['name'] schema.type = field['type'] if 'mode' in field: schema.mode", "create_disposition == BigQueryDisposition.CREATE_NEVER: raise RuntimeError( 'Table %s:%s.%s not found but", "(bool, errors). If first element is False then the second", "table. dataset_id: The dataset id owning the table. table_id: The", "json.dumps( table_row, allow_nan=False, default=default_encoder).encode('utf-8') except ValueError as e: raise ValueError('%s.", "'NAN, INF and -INF values are not JSON compliant.' def", "to organize all the BigQuery integration points and offer a", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "result. Additionally, for date partitioned tables, appending '$YYYYmmdd' to the", "precision that decimal supports. BQ is able to handle #", "Experimental; no backwards compatibility guarantees. \"\"\" def __init__(self, destination): self.destination", "the table reference is specified entirely by the table (and", "raise self._delete_dataset(temp_table.projectId, temp_table.datasetId, True) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_job(self, project,", "an empty list result[field.name] = [] else: result[field.name] = [self._convert_cell_value_to_dict(x['v'],", "def _convert_cell_value_to_dict(self, value, field): if field.type == 'STRING': # Input:", "if write_disposition == BigQueryDisposition.WRITE_TRUNCATE: self._delete_table(project_id, dataset_id, table_id) # Create a", "request = bigquery.BigqueryTabledataInsertAllRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id, tableDataInsertAllRequest=bigquery.TableDataInsertAllRequest( skipInvalidRows=skip_invalid_rows, # TODO(silviuc):", "Classes, constants and functions in this file are experimental and", "returned as a result. Additionally, for date partitioned tables, appending", "before creating the table and writing it logging.warning('Sleeping for 150", "keep trying. This situation is quite possible # if the", "\"\"\" json_schema = json.loads(schema_string) def _parse_schema_field(field): \"\"\"Parse a single schema", "dataset_id, table_id) return else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _delete_dataset(self,", "self.client.jobs.GetQueryResults(request) return response @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_timeout_or_quota_issues_filter) def _insert_all_rows(self, project_id, dataset_id,", "of the BigQuery export from either the Query or the", "if found_table: table_empty = self._is_table_empty(project_id, dataset_id, table_id) if (not table_empty", "used as temporary.' % (project_id, dataset_id)) except HttpError as exn:", "tables.\", query) return None @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _insert_copy_job(self, project_id,", "both in sources and sinks (e.g., find and create tables,", "BigQueryDisposition found_table = None try: found_table = self.get_table(project_id, dataset_id, table_id)", "element will be a bigquery.InserttErrorsValueListEntry instance containing specific errors. \"\"\"", "a row and each key in it is the name", "The source location, if any. \"\"\" if self.source.table_reference is not", "test_bigquery_client if auth.is_running_in_gce: self.executing_project = auth.executing_project elif hasattr(source, 'pipeline_options'): self.executing_project", "environments where bigquery library is not available. # pylint: disable=wrong-import-order,", "# The rows argument is a list of # bigquery.TableDataInsertAllRequest.RowsValueListEntry", "See the NOTICE file distributed with # this work for", "either express or implied. # See the License for the", "create disposition is CREATE_NEVER.' % (project_id, dataset_id, table_id)) else: raise", "rows for insertion. Of special note is the row ID", "# utcfromtimestamp. # Input: 1478134176.985864 --> Output: \"2016-11-03 00:49:36.985864 UTC\"", "to You under the Apache License, Version 2.0 # (the", "datasetId=dataset_id)) if project_id is not None: # Unittests don't pass", "project_id, dataset_id, table_id): request = bigquery.BigqueryTabledataListRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id, maxResults=1)", "is unique try: self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=dataset_id)) if project_id is not", "or more # contributor license agreements. See the NOTICE file", "self._delete_table(project_id, dataset_id, table_id) # Create a new table potentially reusing", "table.', len(self.rows_buffer), self.project_id, self.dataset_id, self.table_id) passed, errors = self.client.insert_rows( project_id=self.project_id,", "x, value=elm) @staticmethod def _get_table_fn(destination): if callable(destination): return destination else:", "project_id = self.source.table_reference.projectId if not project_id: project_id = self.executing_project self.query", "# decimal values are converted into string because JSON does", "wrong-import-position MAX_RETRIES = 3 JSON_COMPLIANCE_ERROR = 'NAN, INF and -INF", "not insert the same row multiple times for fail and", "{} for index, field in enumerate(schema.fields): value = None if", "re import sys import time import uuid from builtins import", "@retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _insert_copy_job(self, project_id, job_id, from_table_reference, to_table_reference, create_disposition=None,", "if the table reference is specified entirely by the table", "the actual execution of the query in the service. If", "skipped, and all others should be inserted successfully. Returns: A", "compliant # This code will catch this error to emit", "# Ideally this should never happen as repeated fields default", "the expected format. \"\"\" if isinstance(table, bigquery.TableReference): return table elif", "THIS FILE HAS BACKWARDS COMPATIBILITY GUARANTEES. \"\"\" from __future__ import", "location, table, query) return location logging.debug(\"Query %s does not reference", "Returns: A TableSchema of the BigQuery export from either the", "table was expected to be empty. \"\"\" from apache_beam.io.gcp.bigquery import", "if 'v' in cell else None if field.mode == 'REPEATED':", "@retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _get_query_results(self, project_id, job_id, page_token=None, max_results=10000): request", "by BigQuery sources and sinks. Classes, constants and functions in", "% (self.project_id, self.dataset_id, self.table_id, errors)) def __enter__(self): self.client = BigQueryWrapper(client=self.test_bigquery_client)", "RuntimeError('Unexpected field type: %s' % field.type) def convert_row_to_dict(self, row, schema):", "# ----------------------------------------------------------------------------- # BigQueryReader, BigQueryWriter. class BigQueryReader(dataflow_io.NativeSourceReader): \"\"\"A reader for", "# TODO(silviuc): Try to automatically get it from gcloud config", "to executing # project. project_id = self.source.table_reference.projectId if not project_id:", "== RetryStrategy.RETRY_ALWAYS: return True elif strategy == RetryStrategy.RETRY_NEVER: return False", "None can be inferred because the ' 'table does not", "functions used both in sources and sinks (e.g., find and", "created_table def run_query(self, project_id, query, use_legacy_sql, flatten_results, dry_run=False): job_id =", "Output: 1.23 return float(value) elif field.type == 'TIMESTAMP': # The", "attributes: projectId, datasetId, and tableId. Or a string representing the", "The ASF licenses this file to You under the Apache", "to the table name is supported, e.g. 'DATASET.TABLE$YYYYmmdd'. dataset: The", "table_reference.projectId = project table_reference.datasetId = dataset table_reference.tableId = table return", "logging.info('Waiting on response from query: %s ...', query) time.sleep(1.0) continue", "but this is a known # issue in python 2.7", "delete # the table before this point. if write_disposition ==", "to # returning an empty list result[field.name] = [] else:", "def __init__(self, client=None): self.client = client or bigquery.BigqueryV2( http=get_new_http(), credentials=auth.get_service_credentials(),", "insert_rows(self, project_id, dataset_id, table_id, rows, skip_invalid_rows=False): \"\"\"Inserts rows into the", "default to # returning an empty list result[field.name] = []", "querying. The wrapper is used to organize all the BigQuery", "a query source return self.client.get_query_location( self.executing_project, self.source.query, self.source.use_legacy_sql) def __enter__(self):", "if exn.status_code == 404: logging.warning('Table %s:%s.%s does not exist', project_id,", "field.type == 'TIMESTAMP': # The UTC should come from the", "projectId=project_id, dataset=dataset) response = self.client.datasets.Insert(request) # The response is a", ") response = self.client.jobs.Insert(request) return response.jobReference @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def", "index, field in enumerate(schema.fields): value = None if isinstance(schema, bigquery.TableSchema):", "reference = bigquery.JobReference(jobId=job_id, projectId=project_id) request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration(", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "%s\", request) response = self.client.jobs.Insert(request) logging.info(\"Response was %s\", response) return", "specified. if schema is None and found_table is None: raise", "ignored and the reference is returned as a result. Additionally,", "== 'DATETIME': # Input: \"2016-11-03T00:49:36\" --> Output: \"2016-11-03T00:49:36\" return value", "for the sink. self.project_id = self.sink.table_reference.projectId # If table schema", "elif strategy == RetryStrategy.RETRY_NEVER: return False elif (strategy == RetryStrategy.RETRY_ON_TRANSIENT_ERROR", "a schema. None can be inferred because the ' 'table", "self.table_id, errors)) def __enter__(self): self.client = BigQueryWrapper(client=self.test_bigquery_client) self.client.get_or_create_table( self.project_id, self.dataset_id,", "The dataset id owning the table. table_id: The table id.", "self.executing_project, self.source.query, self.source.use_legacy_sql) def __enter__(self): self.client = BigQueryWrapper(client=self.test_bigquery_client) self.client.create_temporary_dataset( self.executing_project,", "'utf8') self._unique_row_id = 0 # For testing scenarios where we", "we default to executing # project. project_id = self.source.table_reference.projectId if", "def run_query(self, project_id, query, use_legacy_sql, flatten_results, dry_run=False): job_id = self._start_query_job(project_id,", "semantics for WRITE_EMPTY and # WRITE_TRUNCATE write dispositions. if found_table:", "2 mins max so wait # that much time before", "--> Output: \"YmJi\" return value elif field.type == 'DATE': #", "projectId=project_id) request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( dryRun=True, query=bigquery.JobConfigurationQuery( query=query,", "if auth.is_running_in_gce: self.executing_project = auth.executing_project elif hasattr(source, 'pipeline_options'): self.executing_project =", "exist so we will create it as temporary ' 'with", "in which case dataset and project are ignored and the", "return self._insert_load_job( destination.projectId, job_id, destination, files, schema=schema, create_disposition=create_disposition, write_disposition=write_disposition) @retry.with_exponential_backoff(", "the state of the table and the create/write dispositions passed", "hasattr(sink, 'pipeline_options'): self.project_id = ( sink.pipeline_options.view_as(GoogleCloudOptions).project) assert self.project_id is not", "raise RuntimeError( 'Table %s:%s.%s not found but create disposition is", "the destination to an element, making it a KV pair.", "the BigQuery service to provide error handling for queries that", "( sink.pipeline_options.view_as(GoogleCloudOptions).project) assert self.project_id is not None self.dataset_id = self.sink.table_reference.datasetId", "= [] if not passed: raise RuntimeError('Could not successfully insert", "as a dict) to/from a JSON string. This is the", "location): dataset_id = BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix # Check if dataset", "only expected in tests logging.warning( \"Unable to get location, missing", "the delete and create.') # TODO(BEAM-2673): Remove this sleep by", "# (default is 10 seconds). Note that this is a", "row (represented as a dict) to/from a JSON string. This", "True elif strategy == RetryStrategy.RETRY_NEVER: return False elif (strategy ==", "and # limitations under the License. # \"\"\"Tools used by", "object from the bigquery API. The object has the following", "None: tr = self.source.table_reference return self.client.get_table_location( tr.projectId if tr.projectId is", "== 404: logging.warning('Table %s:%s.%s does not exist', project_id, dataset_id, table_id)", "reference.projectId = project_id request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( copy=bigquery.JobConfigurationTableCopy(", "default coder for sources and sinks if the coder argument", "work for additional information regarding copyright ownership. # The ASF", "table reference into a (project, dataset, table) tuple. Args: table:", "The last page is signalled by a missing pageToken. yield", "# issue in python 2.7 so we'll just hardcode it", "sure that the temporary id is unique try: self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id,", "table name is supported, e.g. 'DATASET.TABLE$YYYYmmdd'. dataset: The ID of", "times. This comes into play for sinks executed in a", "else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _is_table_empty(self, project_id, dataset_id, table_id):", "empty but write disposition is WRITE_EMPTY.' % (project_id, dataset_id, table_id))", "None: raise RuntimeError( 'Table %s:%s.%s requires a schema. None can", "a dry run then the fact that we get here", "``\"EU\"`` or ``\"US\"``) from either - :data:`source.table_reference` or - The", "client or bigquery.BigqueryV2( http=get_new_http(), credentials=auth.get_service_credentials(), response_encoding=None if sys.version_info[0] < 3", "\"\"\" if isinstance(table, bigquery.TableReference): return table elif callable(table): return table", "avoid multiple insertions. If the row ID is provided, BigQuery", "dataset already exists otherwise create it try: dataset = self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest(", "yield self.client.convert_row_to_dict(row, schema) else: yield row class BigQueryWriter(dataflow_io.NativeSinkWriter): \"\"\"The sink", "row multiple times for fail and retry scenarios in which", "containing 'PROJECT:DATASET.TABLE'. \"\"\" if isinstance(destination, bigquery.TableReference): return '%s:%s.%s' % (", "old table for 2 mins max so wait # that", "self.schema = None self.use_legacy_sql = use_legacy_sql self.flatten_results = flatten_results self.kms_key", "metadata object. Args: client: bigquery.BigqueryV2 instance project_id, dataset_id, table_id: table", "if found_table and write_disposition != BigQueryDisposition.WRITE_TRUNCATE: return found_table else: created_table", "instance using the schema to a Python dict.\"\"\" result =", "in value] elif value is None: if not field.mode ==", "'BigQuery inserts can be routed to deleted table ' +", "buffer_size=None): self.sink = sink self.test_bigquery_client = test_bigquery_client self.row_as_dict = isinstance(self.sink.coder,", "field) elif field.type == 'NUMERIC': return decimal.Decimal(value) elif field.type ==", "not available. # pylint: disable=wrong-import-order, wrong-import-position try: from apitools.base.py.exceptions import", "table. It is not required # for reading the field", "specific errors. \"\"\" # Prepare rows for insertion. Of special", "configuration=bigquery.JobConfiguration( load=bigquery.JobConfigurationLoad( sourceUris=source_uris, destinationTable=table_reference, schema=schema, writeDisposition=write_disposition, createDisposition=create_disposition, sourceFormat='NEWLINE_DELIMITED_JSON', autodetect=schema is", "None if field.mode == 'REPEATED': if value is None: #", "response = self.client.jobs.GetQueryResults(request) return response @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_timeout_or_quota_issues_filter) def _insert_all_rows(self,", "None: match = re.match( r'^((?P<project>.+):)?(?P<dataset>\\w+)\\.(?P<table>[\\w\\$]+)$', table) if not match: raise", "and retry scenarios in which the insert request may be", "table_id): \"\"\"Lookup a table's metadata object. Args: client: bigquery.BigqueryV2 instance", "of %s.' % table) table_reference.projectId = match.group('project') table_reference.datasetId = match.group('dataset')", "this is a timeout for the query # request not", "testing scenarios where we pass in a client we do", "if dataset already exists otherwise create it try: dataset =", "not None: # If table schema did not define a", "functions in this file are experimental and have no backwards", "table_row): # The normal error when dumping NAN/INF values is:", "apache_beam.io.gcp.internal.clients import bigquery from apache_beam.options import value_provider from apache_beam.options.pipeline_options import", "table_id, schema): table = bigquery.Table( tableReference=bigquery.TableReference( projectId=project_id, datasetId=dataset_id, tableId=table_id), schema=schema)", "be inferred because the ' 'table does not exist.' %", "from_json_value from apache_beam.internal.gcp.json_value import to_json_value from apache_beam.internal.http_client import get_new_http from", "either a table or query\") def _get_source_location(self): \"\"\" Get the", "else self.executing_project, tr.datasetId, tr.tableId) else: # It's a query source", "referenced_tables = response.statistics.query.referencedTables if referenced_tables: # Guards against both non-empty", "the # table argument will contain a full table reference", "maxResults=max_results) response = self.client.jobs.GetQueryResults(request) return response @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_timeout_or_quota_issues_filter) def", "or 1000 # Figure out the project, dataset, and table", "# The type argument is a NoOp, because we assume", "_get_source_location(self): \"\"\" Get the source location (e.g. ``\"EU\"`` or ``\"US\"``)", "dataset_reference = bigquery.DatasetReference( projectId=project_id, datasetId=dataset_id) dataset = bigquery.Dataset(datasetReference=dataset_reference) if location", "with BigQuery APIs. NOTHING IN THIS FILE HAS BACKWARDS COMPATIBILITY", "argument. Returns: A TableReference object from the bigquery API. The", "is a known # issue in python 2.7 so we'll", "'NUMERIC': return decimal.Decimal(value) elif field.type == 'GEOGRAPHY': return value else:", "id. schema: A bigquery.TableSchema instance or None. create_disposition: CREATE_NEVER or", "'BOOLEAN': # Input: \"true\" --> Output: True return value ==", "was started. \"\"\" return self._insert_load_job( destination.projectId, job_id, destination, files, schema=schema,", "found_table and write_disposition != BigQueryDisposition.WRITE_TRUNCATE: return found_table else: created_table =", "this sleep by migrating to load api time.sleep(150) return created_table", "flatten_results, job_id=uuid.uuid4().hex, dry_run=dry_run) if dry_run: # If this was a", "# pylint: disable=wrong-import-order, wrong-import-position try: from apitools.base.py.exceptions import HttpError except", "fields are flattened # unless we pass the flatten_results flag", "table_id) return else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _delete_dataset(self, project_id,", "'Dataset %s:%s does not exist so we will create it", "reference is specified entirely by the table (and possibly dataset)", ") ), jobReference=reference, ) ) logging.info(\"Inserting job request: %s\", request)", "table. table_id: The table id. schema: A bigquery.TableSchema instance or", "cell else None if field.mode == 'REPEATED': if value is", "type '%s' is not JSON serializable\" % type(obj).__name__) def get_hashable_destination(destination):", "dataset_id = BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix # Check if dataset exists", "(and possibly dataset) argument. Returns: A TableReference object from the", "a KV pair. Outputs a PCollection of KV-pairs where the", "and found_table is None: raise RuntimeError( 'Table %s:%s.%s requires a", "field type: %s' % field.type) def convert_row_to_dict(self, row, schema): \"\"\"Converts", "mins max so wait # that much time before creating", "copy=bigquery.JobConfigurationTableCopy( destinationTable=to_table_reference, sourceTable=from_table_reference, createDisposition=create_disposition, writeDisposition=write_disposition, ) ), jobReference=reference, ) )", "schema. None can be inferred because the ' 'table does", "to deleted table ' + 'for 2 mins after the", "= field['type'] if 'mode' in field: schema.mode = field['mode'] else:", "for row in rows: json_object = bigquery.JsonObject() for k, v", "load data into BigQuery. Returns: bigquery.JobReference with the information about", "from apache_beam.internal.http_client import get_new_http from apache_beam.io.gcp.internal.clients import bigquery from apache_beam.options", "BigQueryWrapper(client=self.test_bigquery_client) self.client.create_temporary_dataset( self.executing_project, location=self._get_source_location()) return self def __exit__(self, exception_type, exception_value,", "if value is None: # Ideally this should never happen", "it as we're reading using # utcfromtimestamp. # Input: 1478134176.985864", "and the reference is returned as a result. Additionally, for", "raise # If table exists already then handle the semantics", "but create disposition is CREATE_NEVER.' % (project_id, dataset_id, table_id)) else:", "is able to handle # inserts into NUMERIC columns by", "'Table %s:%s.%s requires a schema. None can be inferred because", "json.loads(schema_string) def _parse_schema_field(field): \"\"\"Parse a single schema field from dictionary.", "default to executing project. if self.project_id is None and hasattr(sink,", "from either the Query or the Table. \"\"\" json_schema =", "temporary ' 'with location=%s', project_id, dataset_id, location) self.get_or_create_dataset(project_id, dataset_id, location=location)", "information regarding copyright ownership. # The ASF licenses this file", "str(obj) raise TypeError( \"Object of type '%s' is not JSON", "field: schema.mode = field['mode'] else: schema.mode = 'NULLABLE' if 'description'", "only letters (a-z, A-Z), numbers (0-9), or underscores (_). If", "entirely by the table argument. project: The ID of the", "self, project_id, dataset_id, table_id, schema, create_disposition, write_disposition): \"\"\"Gets or creates", "and write dispositions. The function mimics the behavior of BigQuery", "' + 'BigQuery inserts can be routed to deleted table", "if isinstance(schema, bigquery.TableSchema): cell = row.f[index] value = from_json_value(cell.v) if", "is specified entirely by the table (and possibly dataset) argument.", "by the table (and possibly dataset) argument. Returns: A TableReference", "found_table else: created_table = self._create_table(project_id=project_id, dataset_id=dataset_id, table_id=table_id, schema=schema or found_table.schema)", "request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( dryRun=dry_run, query=bigquery.JobConfigurationQuery( query=query, useLegacySql=use_legacy_sql,", "The response is a bigquery.Table instance. return response @retry.with_exponential_backoff( num_retries=MAX_RETRIES,", "table. table_id: The table id. rows: A list of plain", "# It's a query source return self.client.get_query_location( self.executing_project, self.source.query, self.source.use_legacy_sql)", "read from the table. It is not required # for", "= self.sink.table_reference.projectId # If table schema did not define a", "the table name is supported, e.g. 'DATASET.TABLE$YYYYmmdd'. dataset: The ID", "get it from gcloud config info. if not self.executing_project and", "the behavior of BigQuery import jobs when using the same", "found_table.schema) logging.info('Created table %s.%s.%s with schema %s. Result: %s.', project_id,", "schema fields = [_parse_schema_field(f) for f in json_schema['fields']] return bigquery.TableSchema(fields=fields)", "response = self.client.tables.Get(request) return response def _create_table(self, project_id, dataset_id, table_id,", "be skipped, and all others should be inserted successfully. Returns:", "if not self.executing_project and test_bigquery_client is None: raise RuntimeError( 'Missing", "def insert_rows(self, project_id, dataset_id, table_id, rows, skip_invalid_rows=False): \"\"\"Inserts rows into", "'command line option to specify it.') self.row_as_dict = isinstance(self.source.coder, RowAsDictJsonCoder)", "value elif field.type == 'RECORD': # Note that a schema", "clients to interact with BigQuery APIs. NOTHING IN THIS FILE", "return table elif isinstance(table, value_provider.ValueProvider): return table table_reference = bigquery.TableReference()", "itself. Experimental; no backwards compatibility guarantees. \"\"\" def __init__(self, destination):", "\"2016-11-03\" --> Output: \"2016-11-03\" return value elif field.type == 'DATETIME':", "not match the expected format. \"\"\" if isinstance(table, bigquery.TableReference): return", "load api time.sleep(150) return created_table else: return created_table def run_query(self,", "# unless we pass the flatten_results flag as False to", "self._temporary_table_suffix # Check if dataset exists to make sure that", "value = cell['v'] if 'v' in cell else None if", "as a result. Additionally, for date partitioned tables, appending '$YYYYmmdd'", "query) return None @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _insert_copy_job(self, project_id, job_id,", "software # distributed under the License is distributed on an", "project_id is not None: # Unittests don't pass projectIds so", "as exn: if exn.status_code == 404: logging.warning( 'Dataset %s:%s does", "# Input: \"1.23\" --> Output: 1.23 return float(value) elif field.type", "with # this work for additional information regarding copyright ownership.", "from apache_beam.utils import retry # Protect against environments where bigquery", "def get_hashable_destination(destination): \"\"\"Parses a table reference into a (project, dataset,", "bigquery.TableSchema(fields=fields) def parse_table_reference(table, dataset=None, project=None): \"\"\"Parses a table reference into", "# For testing scenarios where we pass in a client", "table_id)) if found_table and write_disposition != BigQueryDisposition.WRITE_TRUNCATE: return found_table else:", "this point. if write_disposition == BigQueryDisposition.WRITE_TRUNCATE: # BigQuery can route", "if self.schema is None: self.schema = schema for row in", "jobReference=reference, ) ) response = self.client.jobs.Insert(request) return response.jobReference @retry.with_exponential_backoff( num_retries=MAX_RETRIES,", "final_rows.append( bigquery.TableDataInsertAllRequest.RowsValueListEntry( insertId=str(self.unique_row_id), json=json_object)) result, errors = self._insert_all_rows( project_id, dataset_id,", "bigquery.BigqueryDatasetsDeleteRequest( projectId=project_id, datasetId=dataset_id, deleteContents=delete_contents) try: self.client.datasets.Delete(request) except HttpError as exn:", "= sink self.test_bigquery_client = test_bigquery_client self.row_as_dict = isinstance(self.sink.coder, RowAsDictJsonCoder) #", "exists so cannot be used as temporary.' % (project_id, dataset_id))", "the insert request may be issued several times. This comes", "some results. The last page is signalled by a missing", "[] else: result[field.name] = [self._convert_cell_value_to_dict(x['v'], field) for x in value]", "== 'true' elif field.type == 'INTEGER': # Input: \"123\" -->", "and sinks if the coder argument is not specified. \"\"\"", "def _insert_load_job(self, project_id, job_id, table_reference, source_uris, schema=None, write_disposition=None, create_disposition=None): reference", "logging import re import sys import time import uuid from", "location=None): # Check if dataset already exists otherwise create it", "allow_nan=False, default=default_encoder).encode('utf-8') except ValueError as e: raise ValueError('%s. %s' %", "define a project we default to executing project. if self.project_id", "# Buffer used to batch written rows so we reduce", "table_reference.tableId = match.group('table') else: table_reference.projectId = project table_reference.datasetId = dataset", "multiple locations. \"\"\" reference = bigquery.JobReference(jobId=uuid.uuid4().hex, projectId=project_id) request = bigquery.BigqueryJobsInsertRequest(", "= bigquery.TableReference() # If dataset argument is not specified, the", "specified. \"\"\" def encode(self, table_row): # The normal error when", "files, job_id, schema=None, write_disposition=None, create_disposition=None): \"\"\"Starts a job to load", "field.type == 'FLOAT': # Input: \"1.23\" --> Output: 1.23 return", "== 0 @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _delete_table(self, project_id, dataset_id, table_id):", "else None if field.mode == 'REPEATED': if value is None:", "== 404: logging.warning( 'Dataset %s:%s does not exist so we", "@retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_or_create_dataset(self, project_id, dataset_id, location=None): # Check", "'%s' is not JSON serializable\" % type(obj).__name__) def get_hashable_destination(destination): \"\"\"Parses", "schema, create_disposition, write_disposition): \"\"\"Gets or creates a table based on", "flatten_results, dry_run=False): job_id = self._start_query_job(project_id, query, use_legacy_sql, flatten_results, job_id=uuid.uuid4().hex, dry_run=dry_run)", "1000 # Figure out the project, dataset, and table used", "is signalled by a missing pageToken. yield response.rows, response.schema if", "BigQueryDisposition.WRITE_TRUNCATE: self._delete_table(project_id, dataset_id, table_id) # Create a new table potentially", "reader for a BigQuery source.\"\"\" def __init__(self, source, test_bigquery_client=None, use_legacy_sql=True,", "dataset_id, table_id, rows, skip_invalid_rows=False): \"\"\"Calls the insertAll BigQuery API endpoint.", "using the schema to a Python dict.\"\"\" result = {}", "num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_or_create_table( self, project_id, dataset_id, table_id, schema, create_disposition,", "= self.get_table_location( table.projectId, table.datasetId, table.tableId) logging.info(\"Using location %r from table", "if project_id is not None: # Unittests don't pass projectIds", "is None: if not field.mode == 'NULLABLE': raise ValueError('Received \\'None\\'", "return None @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _insert_copy_job(self, project_id, job_id, from_table_reference,", "The type argument is a NoOp, because we assume the", "if any. \"\"\" if self.source.table_reference is not None: tr =", "schema field from dictionary. Args: field: Dictionary object containing serialized", "values are not JSON compliant.' def default_encoder(obj): if isinstance(obj, decimal.Decimal):", "case dataset and project are ignored and the reference is", "request = bigquery.BigqueryTablesDeleteRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id) try: self.client.tables.Delete(request) except HttpError", "# Delete the table and recreate it (later) if WRITE_TRUNCATE", ":data:`source.table_reference` or - The first referenced table in :data:`source.query` See", "False elif (strategy == RetryStrategy.RETRY_ON_TRANSIENT_ERROR and error_message not in RetryStrategy._NON_TRANSIENT_ERRORS):", "elif field.type == 'TIMESTAMP': # The UTC should come from", "use this file except in compliance with # the License.", "temp_table.datasetId, True) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_job(self, project, job_id, location=None):", "self._row_id_prefix = '' if client else uuid.uuid4() self._temporary_table_suffix = uuid.uuid4().hex", "query a table, etc.). \"\"\" TEMP_TABLE = 'temp_table_' TEMP_DATASET =", "job that was started. \"\"\" return self._insert_load_job( destination.projectId, job_id, destination,", "was %s\", response) return response.jobReference @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _insert_load_job(self,", "not None: self.query = self.source.query else: # Enforce the \"modes\"", "pylint: enable=wrong-import-order, wrong-import-position MAX_RETRIES = 3 JSON_COMPLIANCE_ERROR = 'NAN, INF", "reading the field values in each row but could be", "instead of %s.' % table) table_reference.projectId = match.group('project') table_reference.datasetId =", "as e: raise ValueError('%s. %s' % (e, JSON_COMPLIANCE_ERROR)) def decode(self,", "else: # It's a query source return self.client.get_query_location( self.executing_project, self.source.query,", "a timeout for the query # request not for the", "object has the following attributes: projectId, datasetId, and tableId. Or", "not define a project we default to executing # project.", "None: # If table schema did not define a project", "bigquery.BigqueryTabledataListRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id, maxResults=1) response = self.client.tabledata.List(request) # The", "...', query) time.sleep(1.0) continue # We got some results. The", "table.datasetId, table.tableId) logging.info(\"Using location %r from table %r referenced by", "schema or found_table.schema, created_table) # if write_disposition == BigQueryDisposition.WRITE_TRUNCATE we", "wrong-import-position try: from apitools.base.py.exceptions import HttpError except ImportError: pass #", "actual execution of the query in the service. If #", "schema.type = field['type'] if 'mode' in field: schema.mode = field['mode']", "getting additional details. self.schema = None self.use_legacy_sql = use_legacy_sql self.flatten_results", "coder argument is not specified. \"\"\" def encode(self, table_row): #", "table argument will contain a full table reference instead of", "future.utils import iteritems from apache_beam import coders from apache_beam.internal.gcp import", "# ValueError: Out of range float values are not JSON", "= bigquery.JobReference(jobId=uuid.uuid4().hex, projectId=project_id) request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( dryRun=True,", "table: The ID of the table. The ID must contain", "except HttpError as exn: if exn.status_code == 404: dataset_reference =", "from builtins import object from future.utils import iteritems from apache_beam", "project_id: project_id = self.executing_project self.query = 'SELECT * FROM [%s:%s.%s];'", "the table argument. project: The ID of the project containing", "exists already then handle the semantics for WRITE_EMPTY and #", "elm else: # The type argument is a NoOp, because", "return page_token = None while True: response = self._get_query_results(project_id, job_id,", "to the Apache Software Foundation (ASF) under one or more", "self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=dataset_id)) return dataset except HttpError as exn: if", "in. For example if the table is not empty and", "the License. # \"\"\"Tools used by BigQuery sources and sinks.", "be controlled. In addition it offers various functions used both", "job_id, dry_run=False): reference = bigquery.JobReference(jobId=job_id, projectId=project_id) request = bigquery.BigqueryJobsInsertRequest( projectId=project_id,", "or the Table. \"\"\" json_schema = json.loads(schema_string) def _parse_schema_field(field): \"\"\"Parse", "be routed to deleted table ' + 'for 2 mins", "False then the second element will be a bigquery.InserttErrorsValueListEntry instance", "behavior is only expected in tests logging.warning( \"Unable to get", "Output: \"00:49:36\" return value elif field.type == 'RECORD': # Note", "response.insertErrors is not [] if errors encountered. return not response.insertErrors,", "same row multiple times for fail and retry scenarios in", "``\"US\"``) from either - :data:`source.table_reference` or - The first referenced", "express or implied. # See the License for the specific", "Licensed to the Apache Software Foundation (ASF) under one or", "table table_reference = bigquery.TableReference() # If dataset argument is not", "def __enter__(self): self.client = BigQueryWrapper(client=self.test_bigquery_client) self.client.get_or_create_table( self.project_id, self.dataset_id, self.table_id, self.sink.table_schema,", "self.client = client or bigquery.BigqueryV2( http=get_new_http(), credentials=auth.get_service_credentials(), response_encoding=None if sys.version_info[0]", "ValueError: Out of range float values are not JSON compliant", "for ignoreUnknownValues? rows=rows)) response = self.client.tabledata.InsertAll(request) # response.insertErrors is not", "field) for x in value] elif value is None: if", "== BigQueryDisposition.WRITE_TRUNCATE: # BigQuery can route data to the old", "if not field.mode == 'NULLABLE': raise ValueError('Received \\'None\\' as the", "initialized the # first time something gets read from the", ":data:`source.query` See Also: - :meth:`BigQueryWrapper.get_query_location` - :meth:`BigQueryWrapper.get_table_location` Returns: Optional[str]: The", "field.type == 'TIME': # Input: \"00:49:36\" --> Output: \"00:49:36\" return", "self.executing_project, tr.datasetId, tr.tableId) else: # It's a query source return", "when using the same create and write dispositions. Args: project_id:", "unique try: self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=dataset_id)) if project_id is not None:", "RetryStrategy(object): RETRY_ALWAYS = 'RETRY_ALWAYS' RETRY_NEVER = 'RETRY_NEVER' RETRY_ON_TRANSIENT_ERROR = 'RETRY_ON_TRANSIENT_ERROR'", "tableId=table_id), schema=schema) request = bigquery.BigqueryTablesInsertRequest( projectId=project_id, datasetId=dataset_id, table=table) response =", "auth from apache_beam.internal.gcp.json_value import from_json_value from apache_beam.internal.gcp.json_value import to_json_value from", "FILE HAS BACKWARDS COMPATIBILITY GUARANTEES. \"\"\" from __future__ import absolute_import", "import re import sys import time import uuid from builtins", "\"\"\" self._unique_row_id += 1 return '%s_%d' % (self._row_id_prefix, self._unique_row_id) def", "attributes: projectId, datasetId, and tableId. Raises: ValueError: if the table", "projectId=project_id, datasetId=dataset_id, table=table) response = self.client.tables.Insert(request) logging.debug(\"Created the table with", "new table potentially reusing the schema from a previously #", "using # utcfromtimestamp. # Input: 1478134176.985864 --> Output: \"2016-11-03 00:49:36.985864", "source.\"\"\" def __init__(self, source, test_bigquery_client=None, use_legacy_sql=True, flatten_results=True, kms_key=None): self.source =", "a client we do not want a # randomized prefix", "__exit__(self, exception_type, exception_value, traceback): self._flush_rows_buffer() def Write(self, row): self.rows_buffer.append(row) if", "not reference any tables.\", query) return None @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter)", "if exn.status_code == 404: logging.warning('Dataset %s:%s does not exist', project_id,", "'SELECT * FROM [%s:%s.%s];' % ( project_id, self.source.table_reference.datasetId, self.source.table_reference.tableId) elif", "for this BQ call: https://cloud.google.com/bigquery/docs/reference\\ /rest/v2/tabledata/insertAll.\"\"\" # The rows argument", "run then the fact that we get here means the", "sink.pipeline_options.view_as(GoogleCloudOptions).project) assert self.project_id is not None self.dataset_id = self.sink.table_reference.datasetId self.table_id", "'Table %s:%s.%s not found but create disposition is CREATE_NEVER.' %", "source return self.client.get_query_location( self.executing_project, self.source.query, self.source.use_legacy_sql) def __enter__(self): self.client =", "not specified. \"\"\" def encode(self, table_row): # The normal error", "as exn: if exn.status_code == 404: logging.warning('Table %s:%s.%s does not", "a TableReference object from the bigquery API. The object has", "A TableReference object from the bigquery API. The object has", "BigQuery integration points and offer a common place where retry", "sys.version_info[0] < 3 else 'utf8') self._unique_row_id = 0 # For", "location = self.get_table_location( table.projectId, table.datasetId, table.tableId) logging.info(\"Using location %r from", "instance. return response @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_or_create_dataset(self, project_id, dataset_id,", "RetryStrategy.RETRY_ON_TRANSIENT_ERROR and error_message not in RetryStrategy._NON_TRANSIENT_ERRORS): return True else: return", "< 3 else 'utf8') self._unique_row_id = 0 # For testing", "CONDITIONS OF ANY KIND, either express or implied. # See", "project_id): return parse_table_reference( table=BigQueryWrapper.TEMP_TABLE + self._temporary_table_suffix, dataset=BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix, project=project_id)", "error when dumping NAN/INF values is: # ValueError: Out of", "@retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _is_table_empty(self, project_id, dataset_id, table_id): request =", "(project_id, dataset_id, table_id)) else: raise # If table exists already", "BQ call: https://cloud.google.com/bigquery/docs/reference\\ /rest/v2/tabledata/insertAll.\"\"\" # The rows argument is a", "the BigQuery integration points and offer a common place where", "logging.warning('Dataset %s:%s does not exist', project_id, dataset_id) return else: raise", "False if the query request times out # (default is", "file are experimental and have no backwards compatibility guarantees. These", "use_legacy_sql, flatten_results, dry_run=False): job_id = self._start_query_job(project_id, query, use_legacy_sql, flatten_results, job_id=uuid.uuid4().hex,", "bigquery.TableFieldSchema() schema.name = field['name'] schema.type = field['type'] if 'mode' in", "ID of the table. The ID must contain only letters", "a known # issue in python 2.7 so we'll just", "executing # project. project_id = self.source.table_reference.projectId if not project_id: project_id", "RuntimeError( 'Table %s:%s.%s not found but create disposition is CREATE_NEVER.'", "import object from future.utils import iteritems from apache_beam import coders", "A bigquery.Table instance if table was found or created. Raises:", "# TODO(BEAM-2673): Remove this sleep by migrating to load api", "use_legacy_sql=True, flatten_results=True, kms_key=None): self.source = source self.test_bigquery_client = test_bigquery_client if", "into a (project, dataset, table) tuple. Args: table: The ID", "# If dataset argument is not specified, the expectation is", "In addition it offers various functions used both in sources", "dataset_id, table_id) return table.location @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def create_temporary_dataset(self, project_id,", "from apache_beam.transforms import DoFn from apache_beam.utils import retry # Protect", "(project, dataset, table) tuple. Args: destination: Either a TableReference object", "missing response.statistics. Query: %s\", query) return None referenced_tables = response.statistics.query.referencedTables", "%s' % (e, JSON_COMPLIANCE_ERROR)) def decode(self, encoded_table_row): return json.loads(encoded_table_row.decode('utf-8')) class", "License. # \"\"\"Tools used by BigQuery sources and sinks. Classes,", "bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( load=bigquery.JobConfigurationLoad( sourceUris=source_uris, destinationTable=table_reference, schema=schema, writeDisposition=write_disposition, createDisposition=create_disposition,", "that was started. \"\"\" return self._insert_load_job( destination.projectId, job_id, destination, files,", "the table reference is specified entirely by the table argument.", "= self._convert_cell_value_to_dict(value, field) return result # ----------------------------------------------------------------------------- # BigQueryReader, BigQueryWriter.", "self.kms_key = kms_key if self.source.table_reference is not None: # If", "multiple insertions. If the row ID is provided, BigQuery will", "for # getting additional details. self.schema = None self.use_legacy_sql =", "table used for the sink. self.project_id = self.sink.table_reference.projectId # If", "# If this was a dry run then the fact", "self.dataset_id, self.table_id, errors)) def __enter__(self): self.client = BigQueryWrapper(client=self.test_bigquery_client) self.client.get_or_create_table( self.project_id,", "it is the name of a field. skip_invalid_rows: If there", "if strategy == RetryStrategy.RETRY_ALWAYS: return True elif strategy == RetryStrategy.RETRY_NEVER:", "self._flush_rows_buffer() def Write(self, row): self.rows_buffer.append(row) if len(self.rows_buffer) > self.rows_buffer_flush_threshold: self._flush_rows_buffer()", "10 seconds). Note that this is a timeout for the", "dataset argument is None then the table argument must contain", "dataset_id=dataset_id, table_id=table_id, schema=schema or found_table.schema) logging.info('Created table %s.%s.%s with schema", "If dataset argument is None then the table argument must", "--> Output: \"XYZ\" return value elif field.type == 'BOOLEAN': #", "guarantees. These tools include wrappers and clients to interact with", "has been raised, the BigQuerySource \"modes\" have # changed and", "raise RuntimeError( 'Table %s:%s.%s requires a schema. None can be", "the timezone library but this is a known # issue", "handle the semantics for WRITE_EMPTY and # WRITE_TRUNCATE write dispositions.", "RowAsDictJsonCoder) # Buffer used to batch written rows so we", "\\'None\\' as the value for the field %s ' 'but", "= None else: result[field.name] = self._convert_cell_value_to_dict(value, field) return result #", "the table. table_id: The table id. schema: A bigquery.TableSchema instance", "we pass the flatten_results flag as False to the source", "reader. It is initialized the # first time something gets", "# Protect against environments where bigquery library is not available.", "response is a bigquery.Dataset instance. return response else: raise @retry.with_exponential_backoff(", "on failures. # TODO(silviuc): Must add support to writing TableRow's", "from dictionary. Args: field: Dictionary object containing serialized schema. Returns:", "bigquery.TableDataList instance. return response.totalRows == 0 @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def", "self.test_bigquery_client = test_bigquery_client self.row_as_dict = isinstance(self.sink.coder, RowAsDictJsonCoder) # Buffer used", "# ----------------------------------------------------------------------------- # BigQueryWrapper. class BigQueryWrapper(object): \"\"\"BigQuery client wrapper with", "import GoogleCloudOptions from apache_beam.runners.dataflow.native_io import iobase as dataflow_io from apache_beam.transforms", "return dataset except HttpError as exn: if exn.status_code == 404:", "pageToken=page_token, projectId=project_id, maxResults=max_results) response = self.client.jobs.GetQueryResults(request) return response @retry.with_exponential_backoff( num_retries=MAX_RETRIES,", "disposition is WRITE_EMPTY.' % (project_id, dataset_id, table_id)) # Delete the", "404: logging.warning('Dataset %s:%s does not exist', project_id, temp_table.datasetId) return else:", "if exn.status_code == 404: if create_disposition == BigQueryDisposition.CREATE_NEVER: raise RuntimeError(", "destination containing 'PROJECT:DATASET.TABLE'. Returns: A string representing the destination containing", ":meth:`BigQueryWrapper.get_table_location` Returns: Optional[str]: The source location, if any. \"\"\" if", "specified entirely by the table argument. project: The ID of", "self.client.get_query_location( self.executing_project, self.source.query, self.source.use_legacy_sql) def __enter__(self): self.client = BigQueryWrapper(client=self.test_bigquery_client) self.client.create_temporary_dataset(", "isinstance(table, bigquery.TableReference): return table elif callable(table): return table elif isinstance(table,", "response.statistics is None: # This behavior is only expected in", "load=bigquery.JobConfigurationLoad( sourceUris=source_uris, destinationTable=table_reference, schema=schema, writeDisposition=write_disposition, createDisposition=create_disposition, sourceFormat='NEWLINE_DELIMITED_JSON', autodetect=schema is None,", "request times out # (default is 10 seconds). Note that", "\"modes\" enforced by BigQuerySource.__init__. # If this exception has been", "reference = bigquery.JobReference() reference.jobId = job_id reference.projectId = project_id request", "the value is the record itself. Experimental; no backwards compatibility", "self.client = BigQueryWrapper(client=self.test_bigquery_client) self.client.get_or_create_table( self.project_id, self.dataset_id, self.table_id, self.sink.table_schema, self.sink.create_disposition, self.sink.write_disposition)", "Apache Software Foundation (ASF) under one or more # contributor", "# If table exists already then handle the semantics for", "self.sink.table_reference.datasetId self.table_id = self.sink.table_reference.tableId def _flush_rows_buffer(self): if self.rows_buffer: logging.info('Writing %d", "dataset, table) tuple. Args: table: The ID of the table.", "offers various functions used both in sources and sinks (e.g.,", "option for ignoreUnknownValues? rows=rows)) response = self.client.tabledata.InsertAll(request) # response.insertErrors is", "self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=dataset_id)) if project_id is not None: # Unittests", "table %r referenced by query %s\", location, table, query) return", "rows: A list of plain Python dictionaries. Each dictionary is", "self.client.jobs.Insert(request) return response.jobReference @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _start_query_job(self, project_id, query,", "field %s ' 'but the field is not NULLABLE.' %", "converted into string because JSON does not # support the", "possibly dataset) argument. Returns: A TableReference object from the bigquery", "exn.status_code == 404: logging.warning('Table %s:%s.%s does not exist', project_id, dataset_id,", "# BigQuery can route data to the old table for", "can be controlled. In addition it offers various functions used", "dict) to/from a JSON string. This is the default coder", "self.client.jobs.Insert(request) logging.info(\"Response was %s\", response) return response.jobReference @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter)", "owning the table. dataset_id: The dataset id owning the table.", "name is supported, e.g. 'DATASET.TABLE$YYYYmmdd'. dataset: The ID of the", "return response.totalRows == 0 @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _delete_table(self, project_id,", "the query will return a large number of rows. logging.info('Waiting", "it.') self.row_as_dict = isinstance(self.source.coder, RowAsDictJsonCoder) # Schema for the rows", "(e.g. ``\"EU\"`` or ``\"US\"``) from either - :data:`source.table_reference` or -", "projectId, datasetId, and tableId. Raises: ValueError: if the table reference", "'fields' in field: schema.fields = [_parse_schema_field(x) for x in field['fields']]", "response.statistics.query.referencedTables if referenced_tables: # Guards against both non-empty and non-None", "if isinstance(v, decimal.Decimal): # decimal values are converted into string", "\"\"\" from __future__ import absolute_import import datetime import decimal import", "the schema was not specified. if schema is None and", "governing permissions and # limitations under the License. # \"\"\"Tools", "to_table_reference, create_disposition=None, write_disposition=None): reference = bigquery.JobReference() reference.jobId = job_id reference.projectId", "# TODO(silviuc): Must add support to writing TableRow's instead of", "# query has no errors. The start_query_job would raise an", "a dict) to/from a JSON string. This is the default", "is not NULLABLE.' % field.name) result[field.name] = None else: result[field.name]", "can be False if the query request times out #", "so we will create it as temporary ' 'with location=%s',", "table. The ID must contain only letters (a-z, A-Z), numbers", "== 'RECORD': # Note that a schema field object supports", "table.tableId) logging.info(\"Using location %r from table %r referenced by query", "table reference instead of just a # table name. if", "exn: if exn.status_code == 404: logging.warning('Table %s:%s.%s does not exist',", "If the row ID is provided, BigQuery will make a", "is not None: dataset.location = location request = bigquery.BigqueryDatasetsInsertRequest( projectId=project_id,", "provide error handling for queries that reference tables in multiple", "exist', project_id, temp_table.datasetId) return else: raise self._delete_dataset(temp_table.projectId, temp_table.datasetId, True) @retry.with_exponential_backoff(", "constants and functions in this file are experimental and have", "a table's metadata object. Args: client: bigquery.BigqueryV2 instance project_id, dataset_id,", "the table was expected to be empty. \"\"\" from apache_beam.io.gcp.bigquery", "the request times out we keep trying. This situation is", "_get_query_results(self, project_id, job_id, page_token=None, max_results=10000): request = bigquery.BigqueryJobsGetQueryResultsRequest( jobId=job_id, pageToken=page_token,", "final_rows, skip_invalid_rows) return result, errors def _convert_cell_value_to_dict(self, value, field): if", "e: raise ValueError('%s. %s' % (e, JSON_COMPLIANCE_ERROR)) def decode(self, encoded_table_row):", "common place where retry logic for failures can be controlled.", "def get_table_location(self, project_id, dataset_id, table_id): table = self.get_table(project_id, dataset_id, table_id)", "bigquery.TableFieldSchema): cell = row['f'][index] value = cell['v'] if 'v' in", "second element will be a bigquery.InserttErrorsValueListEntry instance containing specific errors.", "self.table_id) passed, errors = self.client.insert_rows( project_id=self.project_id, dataset_id=self.dataset_id, table_id=self.table_id, rows=self.rows_buffer) self.rows_buffer", "'RETRY_ALWAYS' RETRY_NEVER = 'RETRY_NEVER' RETRY_ON_TRANSIENT_ERROR = 'RETRY_ON_TRANSIENT_ERROR' _NON_TRANSIENT_ERRORS = {'invalid',", "self.client.jobs.Insert(request) return response.jobReference.jobId @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _get_query_results(self, project_id, job_id,", "== 'DATE': # Input: \"2016-11-03\" --> Output: \"2016-11-03\" return value", "if 'fields' in field: schema.fields = [_parse_schema_field(x) for x in", "the table and recreate it (later) if WRITE_TRUNCATE was #", "WRITE_EMPTY and # WRITE_TRUNCATE write dispositions. if found_table: table_empty =", "from a previously # found table in case the schema", "unique row ID string \"\"\" self._unique_row_id += 1 return '%s_%d'", "referenced by query %s\", location, table, query) return location logging.debug(\"Query", "query) return location logging.debug(\"Query %s does not reference any tables.\",", "= self.get_table(project_id, dataset_id, table_id) except HttpError as exn: if exn.status_code", "self.row_as_dict: yield self.client.convert_row_to_dict(row, schema) else: yield row class BigQueryWriter(dataflow_io.NativeSinkWriter): \"\"\"The", "this is a known # issue in python 2.7 so", "tuple. Args: destination: Either a TableReference object from the bigquery", "error raise RuntimeError( 'Dataset %s:%s already exists so cannot be", "routed to deleted table ' + 'for 2 mins after", "value=to_json_value(v))) final_rows.append( bigquery.TableDataInsertAllRequest.RowsValueListEntry( insertId=str(self.unique_row_id), json=json_object)) result, errors = self._insert_all_rows( project_id,", "dataset, and table used for the sink. self.project_id = self.sink.table_reference.projectId", "float(value) elif field.type == 'TIMESTAMP': # The UTC should come", "%s\", location, table, query) return location logging.debug(\"Query %s does not", "recreate it (later) if WRITE_TRUNCATE was # specified. if write_disposition", "from apache_beam import coders from apache_beam.internal.gcp import auth from apache_beam.internal.gcp.json_value", "out we keep trying. This situation is quite possible #", "reference into a (project, dataset, table) tuple. Args: table: The", "response = self.client.jobs.Insert(request) if response.statistics is None: # This behavior", "dataset = self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=dataset_id)) return dataset except HttpError as", "or 'PROJECT:DATASET.TABLE'. This argument can be a bigquery.TableReference instance in", "\"YmJi\" --> Output: \"YmJi\" return value elif field.type == 'DATE':", "created. Raises: RuntimeError: For various mismatches between the state of", "tr.tableId) else: # It's a query source return self.client.get_query_location( self.executing_project,", "= self.client.datasets.Get(bigquery.BigqueryDatasetsGetRequest( projectId=project_id, datasetId=dataset_id)) return dataset except HttpError as exn:", "_create_table(self, project_id, dataset_id, table_id, schema): table = bigquery.Table( tableReference=bigquery.TableReference( projectId=project_id,", "argument is None then the table argument must contain the", "\"\"\" reference = bigquery.JobReference(jobId=uuid.uuid4().hex, projectId=project_id) request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job(", "import decimal import json import logging import re import sys", "did not define a project we default to executing project.", "write_disposition=write_disposition) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_or_create_table( self, project_id, dataset_id, table_id,", "rows, skip_invalid_rows=False): \"\"\"Inserts rows into the specified table. Args: project_id:", "controlled. In addition it offers various functions used both in", "is a TableReference for the destination, and the value is", "bigquery.BigqueryDatasetsInsertRequest( projectId=project_id, dataset=dataset) response = self.client.datasets.Insert(request) # The response is", "self._temporary_table_suffix, dataset=BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix, project=project_id) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_query_location(self,", "num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _delete_dataset(self, project_id, dataset_id, delete_contents=True): request = bigquery.BigqueryDatasetsDeleteRequest(", "if the table is not empty and WRITE_EMPTY was specified", "row ID that we add to # each row in", "schema): \"\"\"Converts a TableRow instance using the schema to a", "out the project, dataset, and table used for the sink.", "tables in multiple locations. \"\"\" reference = bigquery.JobReference(jobId=uuid.uuid4().hex, projectId=project_id) request", "is None, ) ), jobReference=reference, ) ) response = self.client.jobs.Insert(request)", "when querying, the repeated and/or record fields are flattened #", "value is the record itself. Experimental; no backwards compatibility guarantees.", "@retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_table(self, project_id, dataset_id, table_id): \"\"\"Lookup a", "table id. schema: A bigquery.TableSchema instance or None. create_disposition: CREATE_NEVER", "INF and -INF values are not JSON compliant.' def default_encoder(obj):", "project_id, dataset_id, table_id, schema, create_disposition, write_disposition): \"\"\"Gets or creates a", "If this exception has been raised, the BigQuerySource \"modes\" have", "a full table reference instead of just a # table", "result[field.name] = None else: result[field.name] = self._convert_cell_value_to_dict(value, field) return result", "following attributes: projectId, datasetId, and tableId. Or a string representing", "is not empty and WRITE_EMPTY was specified then an error", "destinationTable=table_reference, schema=schema, writeDisposition=write_disposition, createDisposition=create_disposition, sourceFormat='NEWLINE_DELIMITED_JSON', autodetect=schema is None, ) ),", "does not exist', project_id, temp_table.datasetId) return else: raise self._delete_dataset(temp_table.projectId, temp_table.datasetId,", "(project, dataset, table) tuple. Args: table: The ID of the", "query=bigquery.JobConfigurationQuery( query=query, useLegacySql=use_legacy_sql, allowLargeResults=True, destinationTable=self._get_temp_table(project_id), flattenResults=flatten_results)), jobReference=reference)) response = self.client.jobs.Insert(request)", "= self._get_query_results(project_id, job_id, page_token) if not response.jobComplete: # The jobComplete", "play for sinks executed in a local runner. Returns: a", "values. try: return json.dumps( table_row, allow_nan=False, default=default_encoder).encode('utf-8') except ValueError as", "apache_beam.runners.dataflow.native_io import iobase as dataflow_io from apache_beam.transforms import DoFn from", "local runner. Returns: a unique row ID string \"\"\" self._unique_row_id", "dataset_id, table_id: table lookup parameters Returns: bigquery.Table instance Raises: HttpError", "table row (represented as a dict) to/from a JSON string.", "return result, errors def _convert_cell_value_to_dict(self, value, field): if field.type ==", "write_disposition=None, create_disposition=None): \"\"\"Starts a job to load data into BigQuery.", "projectId=project_id, datasetId=temp_table.datasetId)) except HttpError as exn: if exn.status_code == 404:", "Must add support to writing TableRow's instead of dicts. final_rows", "the entire table reference: 'DATASET.TABLE' or 'PROJECT:DATASET.TABLE'. This argument can", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "if the query will return a large number of rows.", "self._unique_row_id) def _get_temp_table(self, project_id): return parse_table_reference( table=BigQueryWrapper.TEMP_TABLE + self._temporary_table_suffix, dataset=BigQueryWrapper.TEMP_DATASET", "reference = bigquery.JobReference(jobId=uuid.uuid4().hex, projectId=project_id) request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration(", "reference is specified entirely by the table argument. project: The", "utilities for querying. The wrapper is used to organize all", "BigQuery sources and sinks. Classes, constants and functions in this", "a best-effort if unique IDs are provided. This situation #", "of dicts. final_rows = [] for row in rows: json_object", "full table reference instead of just a # table name.", "request.jobId = job_id request.projectId = project request.location = location return", "both non-empty and non-None table = referenced_tables[0] location = self.get_table_location(", "self.client.tables.Delete(request) except HttpError as exn: if exn.status_code == 404: logging.warning('Table", "proper formatting. return value_provider.StaticValueProvider(lambda x: x, value=elm) @staticmethod def _get_table_fn(destination):", "except ValueError as e: raise ValueError('%s. %s' % (e, JSON_COMPLIANCE_ERROR))", "(_). If dataset argument is None then the table argument", "field.type == 'STRING': # Input: \"XYZ\" --> Output: \"XYZ\" return", "'DATASET.TABLE' or 'PROJECT:DATASET.TABLE'. This argument can be a bigquery.TableReference instance", "null if the table reference is specified entirely by the", "emit an error that explains # to the programmer that", "comes into play for sinks executed in a local runner.", "_get_temp_table(self, project_id): return parse_table_reference( table=BigQueryWrapper.TEMP_TABLE + self._temporary_table_suffix, dataset=BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix,", "project. project_id = self.source.table_reference.projectId if not project_id: project_id = self.executing_project", "def _is_table_empty(self, project_id, dataset_id, table_id): request = bigquery.BigqueryTabledataListRequest( projectId=project_id, datasetId=dataset_id,", "exn: if exn.status_code == 404: if create_disposition == BigQueryDisposition.CREATE_NEVER: raise", "jobComplete field can be False if the query request times", "job=bigquery.Job( configuration=bigquery.JobConfiguration( copy=bigquery.JobConfigurationTableCopy( destinationTable=to_table_reference, sourceTable=from_table_reference, createDisposition=create_disposition, writeDisposition=write_disposition, ) ), jobReference=reference,", "query, use_legacy_sql, flatten_results, job_id=uuid.uuid4().hex, dry_run=dry_run) if dry_run: # If this", "is the name of a field. skip_invalid_rows: If there are", "string attrs. v = str(v) json_object.additionalProperties.append( bigquery.JsonObject.AdditionalProperty( key=k, value=to_json_value(v))) final_rows.append(", "of the dataset containing this table or null if the", "dictionary. Args: field: Dictionary object containing serialized schema. Returns: A", "project_id, dataset_id, table_id, final_rows, skip_invalid_rows) return result, errors def _convert_cell_value_to_dict(self,", "= bigquery.BigqueryTabledataInsertAllRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id, tableDataInsertAllRequest=bigquery.TableDataInsertAllRequest( skipInvalidRows=skip_invalid_rows, # TODO(silviuc): Should", "BigQuery API endpoint. Docs for this BQ call: https://cloud.google.com/bigquery/docs/reference\\ /rest/v2/tabledata/insertAll.\"\"\"", "query will return a large number of rows. logging.info('Waiting on", "and writing it logging.warning('Sleeping for 150 seconds before the write", "want a # randomized prefix for row IDs. self._row_id_prefix =", "table and the create/write dispositions passed in. For example if", "datasetId, and tableId. Or a string representing the destination containing", "@retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_query_location(self, project_id, query, use_legacy_sql): \"\"\" Get", "row and each key in it is the name of", "we'll just hardcode it as we're reading using # utcfromtimestamp.", "used by BigQuery sources and sinks. Classes, constants and functions", "found_table: table_empty = self._is_table_empty(project_id, dataset_id, table_id) if (not table_empty and", "rows with insertion errors, whether they should be skipped, and", "not project_id: project_id = self.executing_project self.query = 'SELECT * FROM", "either the Query or the Table. \"\"\" json_schema = json.loads(schema_string)", "failures. # TODO(silviuc): Must add support to writing TableRow's instead", "\"\"\" def __init__(self, destination): self.destination = AppendDestinationsFn._get_table_fn(destination) @staticmethod def _value_provider_or_static_val(elm):", "(self._row_id_prefix, self._unique_row_id) def _get_temp_table(self, project_id): return parse_table_reference( table=BigQueryWrapper.TEMP_TABLE + self._temporary_table_suffix,", "'TIME': # Input: \"00:49:36\" --> Output: \"00:49:36\" return value elif", "sinks if the coder argument is not specified. \"\"\" def", "field is not NULLABLE.' % field.name) result[field.name] = None else:", "(project_id, dataset_id)) except HttpError as exn: if exn.status_code == 404:", "to provide error handling for queries that reference tables in", "logging.debug(\"Query %s does not reference any tables.\", query) return None", "client=None): self.client = client or bigquery.BigqueryV2( http=get_new_http(), credentials=auth.get_service_credentials(), response_encoding=None if", "type: %s' % field.type) def convert_row_to_dict(self, row, schema): \"\"\"Converts a", "on create and write dispositions. The function mimics the behavior", "class BigQueryReader(dataflow_io.NativeSourceReader): \"\"\"A reader for a BigQuery source.\"\"\" def __init__(self,", "def __enter__(self): self.client = BigQueryWrapper(client=self.test_bigquery_client) self.client.create_temporary_dataset( self.executing_project, location=self._get_source_location()) return self", "isinstance(schema, bigquery.TableSchema): cell = row.f[index] value = from_json_value(cell.v) if cell.v", "coder for sources and sinks if the coder argument is", "+ self._temporary_table_suffix, dataset=BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix, project=project_id) @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def", "BQ is able to handle # inserts into NUMERIC columns", "num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _insert_copy_job(self, project_id, job_id, from_table_reference, to_table_reference, create_disposition=None, write_disposition=None):", "\"XYZ\" --> Output: \"XYZ\" return value elif field.type == 'BOOLEAN':", "row, schema): \"\"\"Converts a TableRow instance using the schema to", "None self.dataset_id = self.sink.table_reference.datasetId self.table_id = self.sink.table_reference.tableId def _flush_rows_buffer(self): if", "bigquery.BigqueryTablesGetRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id) response = self.client.tables.Get(request) return response def", "may be issued several times. This comes into play for", "don't pass projectIds so they can be run without error", "A string representing the destination containing 'PROJECT:DATASET.TABLE'. \"\"\" if isinstance(destination,", "dryRun=dry_run, query=bigquery.JobConfigurationQuery( query=query, useLegacySql=use_legacy_sql, allowLargeResults=True, destinationTable=self._get_temp_table(project_id), flattenResults=flatten_results)), jobReference=reference)) response =", "= 'NULLABLE' if 'description' in field: schema.description = field['description'] if", "match the expected format. \"\"\" if isinstance(table, bigquery.TableReference): return table", "non-None table = referenced_tables[0] location = self.get_table_location( table.projectId, table.datasetId, table.tableId)", "# Input: \"2016-11-03T00:49:36\" --> Output: \"2016-11-03T00:49:36\" return value elif field.type", "a RECORD type. However # when querying, the repeated and/or", "client else uuid.uuid4() self._temporary_table_suffix = uuid.uuid4().hex @property def unique_row_id(self): \"\"\"Returns", "bigquery.JsonObject.AdditionalProperty( key=k, value=to_json_value(v))) final_rows.append( bigquery.TableDataInsertAllRequest.RowsValueListEntry( insertId=str(self.unique_row_id), json=json_object)) result, errors =", "from apache_beam.internal.gcp.json_value import from_json_value from apache_beam.internal.gcp.json_value import to_json_value from apache_beam.internal.http_client", "'DATASET.TABLE$YYYYmmdd'. dataset: The ID of the dataset containing this table", "by the table argument. project: The ID of the project", "absolute_import import datetime import decimal import json import logging import", "known # issue in python 2.7 so we'll just hardcode", "- :data:`source.table_reference` or - The first referenced table in :data:`source.query`", "'Expected a table reference (PROJECT:DATASET.TABLE or ' 'DATASET.TABLE) instead of", "wrappers and clients to interact with BigQuery APIs. NOTHING IN", "logging.info(\"Using location %r from table %r referenced by query %s\",", "== BigQueryDisposition.CREATE_NEVER: raise RuntimeError( 'Table %s:%s.%s not found but create", "a field. skip_invalid_rows: If there are rows with insertion errors,", "a missing pageToken. yield response.rows, response.schema if not response.pageToken: break", "should be inserted successfully. Returns: A tuple (bool, errors). If", "= bigquery.JsonObject() for k, v in iteritems(row): if isinstance(v, decimal.Decimal):", "__enter__(self): self.client = BigQueryWrapper(client=self.test_bigquery_client) self.client.get_or_create_table( self.project_id, self.dataset_id, self.table_id, self.sink.table_schema, self.sink.create_disposition,", "scenarios in which the insert request may be issued several", "support the precision that decimal supports. BQ is able to", "x in value] elif value is None: if not field.mode", "Input: \"1.23\" --> Output: 1.23 return float(value) elif field.type ==", "times for fail and retry scenarios in which the insert", "return self def __exit__(self, exception_type, exception_value, traceback): self.client.clean_up_temporary_dataset(self.executing_project) def __iter__(self):", "__future__ import absolute_import import datetime import decimal import json import", "elif field.type == 'GEOGRAPHY': return value else: raise RuntimeError('Unexpected field", "reference (PROJECT:DATASET.TABLE or ' 'DATASET.TABLE) instead of %s.' % table)", "'mode' in field: schema.mode = field['mode'] else: schema.mode = 'NULLABLE'", "query) return None referenced_tables = response.statistics.query.referencedTables if referenced_tables: # Guards", "tableId=table_id) try: self.client.tables.Delete(request) except HttpError as exn: if exn.status_code ==", "by applicable law or agreed to in writing, software #", "provided. This situation # can happen during retries on failures.", "request = bigquery.BigqueryJobsGetRequest() request.jobId = job_id request.projectId = project request.location", "request.projectId = project request.location = location return self.client.jobs.Get(request) def perform_load_job(self,", "table is not empty and WRITE_EMPTY was specified then an", "dt.strftime('%Y-%m-%d %H:%M:%S.%f UTC') elif field.type == 'BYTES': # Input: \"YmJi\"", "created_table = self._create_table(project_id=project_id, dataset_id=dataset_id, table_id=table_id, schema=schema or found_table.schema) logging.info('Created table", "failures can be controlled. In addition it offers various functions", "= job_id request.projectId = project request.location = location return self.client.jobs.Get(request)", "specified, the expectation is that the # table argument will", "= buffer_size or 1000 # Figure out the project, dataset,", "destination.tableId) else: return destination def parse_table_schema_from_json(schema_string): \"\"\"Parse the Table Schema", "return elm else: # The type argument is a NoOp,", "wrapper is used to organize all the BigQuery integration points", "else: return lambda x: AppendDestinationsFn._value_provider_or_static_val( destination).get() def process(self, element): yield", "bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job( configuration=bigquery.JobConfiguration( dryRun=dry_run, query=bigquery.JobConfigurationQuery( query=query, useLegacySql=use_legacy_sql, allowLargeResults=True, destinationTable=self._get_temp_table(project_id),", "while True: response = self._get_query_results(project_id, job_id, page_token) if not response.jobComplete:", "does not exist.' % (project_id, dataset_id, table_id)) if found_table and", "the Apache Software Foundation (ASF) under one or more #", "match.group('table') else: table_reference.projectId = project table_reference.datasetId = dataset table_reference.tableId =", "the BigQuerySource \"modes\" have # changed and this method will", "def get_query_location(self, project_id, query, use_legacy_sql): \"\"\" Get the location of", "Args: table: The ID of the table. The ID must", "where we pass in a client we do not want", "from apache_beam.io.gcp.bigquery import BigQueryDisposition found_table = None try: found_table =", "self._start_query_job(project_id, query, use_legacy_sql, flatten_results, job_id=uuid.uuid4().hex, dry_run=dry_run) if dry_run: # If", "Returns: A TableFieldSchema for a single column in BigQuery. \"\"\"", "ID of the dataset containing this table or null if", "RetryStrategy.RETRY_NEVER: return False elif (strategy == RetryStrategy.RETRY_ON_TRANSIENT_ERROR and error_message not", "'Table %s:%s.%s is not empty but write disposition is WRITE_EMPTY.'", "row ID (str) used to avoid multiple insertions. If the", "BigQuery service to provide error handling for queries that reference", "Raises: HttpError if lookup failed. \"\"\" request = bigquery.BigqueryTablesGetRequest( projectId=project_id,", "% (self._row_id_prefix, self._unique_row_id) def _get_temp_table(self, project_id): return parse_table_reference( table=BigQueryWrapper.TEMP_TABLE +", "mimics the behavior of BigQuery import jobs when using the", "a result. Additionally, for date partitioned tables, appending '$YYYYmmdd' to", "\"\"\"Returns a unique row ID (str) used to avoid multiple", "self.source.table_reference.datasetId, self.source.table_reference.tableId) elif self.source.query is not None: self.query = self.source.query", "table based on create and write dispositions. The function mimics", "KV-pairs where the key is a TableReference for the destination,", "table in case the schema was not specified. if schema", "f in json_schema['fields']] return bigquery.TableSchema(fields=fields) def parse_table_reference(table, dataset=None, project=None): \"\"\"Parses", "= self.client.jobs.Insert(request) logging.info(\"Response was %s\", response) return response.jobReference @retry.with_exponential_backoff( num_retries=MAX_RETRIES,", "table = self.get_table(project_id, dataset_id, table_id) return table.location @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter)", "return self def __exit__(self, exception_type, exception_value, traceback): self._flush_rows_buffer() def Write(self,", "jobs when using the same create and write dispositions. Args:", "final_rows = [] for row in rows: json_object = bigquery.JsonObject()", "\"1.23\" --> Output: 1.23 return float(value) elif field.type == 'TIMESTAMP':", "being read by the reader. It is initialized the #", "404: logging.warning('Table %s:%s.%s does not exist', project_id, dataset_id, table_id) return", "the coder argument is not specified. \"\"\" def encode(self, table_row):", "sinks. Classes, constants and functions in this file are experimental", "if client else uuid.uuid4() self._temporary_table_suffix = uuid.uuid4().hex @property def unique_row_id(self):", "range float values are not JSON compliant # This code", "location is not None: dataset.location = location request = bigquery.BigqueryDatasetsInsertRequest(", "== 404: if create_disposition == BigQueryDisposition.CREATE_NEVER: raise RuntimeError( 'Table %s:%s.%s", "= field['mode'] else: schema.mode = 'NULLABLE' if 'description' in field:", "do a best-effort if unique IDs are provided. This situation", "and non-None table = referenced_tables[0] location = self.get_table_location( table.projectId, table.datasetId,", "(not table_empty and write_disposition == BigQueryDisposition.WRITE_EMPTY): raise RuntimeError( 'Table %s:%s.%s", "The jobComplete field can be False if the query request", "timeout for the query # request not for the actual", "def __init__(self, source, test_bigquery_client=None, use_legacy_sql=True, flatten_results=True, kms_key=None): self.source = source", "'BYTES': # Input: \"YmJi\" --> Output: \"YmJi\" return value elif", "1 return '%s_%d' % (self._row_id_prefix, self._unique_row_id) def _get_temp_table(self, project_id): return", "table_id): table = self.get_table(project_id, dataset_id, table_id) return table.location @retry.with_exponential_backoff( num_retries=MAX_RETRIES,", "self.get_table(project_id, dataset_id, table_id) return table.location @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def create_temporary_dataset(self,", "json_object.additionalProperties.append( bigquery.JsonObject.AdditionalProperty( key=k, value=to_json_value(v))) final_rows.append( bigquery.TableDataInsertAllRequest.RowsValueListEntry( insertId=str(self.unique_row_id), json=json_object)) result, errors", "reusing the schema from a previously # found table in", "self.client.get_or_create_table( self.project_id, self.dataset_id, self.table_id, self.sink.table_schema, self.sink.create_disposition, self.sink.write_disposition) return self def", "applicable law or agreed to in writing, software # distributed", "= self.sink.table_reference.tableId def _flush_rows_buffer(self): if self.rows_buffer: logging.info('Writing %d rows to", "raise RuntimeError( 'Table %s:%s.%s is not empty but write disposition", "writeDisposition=write_disposition, ) ), jobReference=reference, ) ) logging.info(\"Inserting job request: %s\",", "num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_timeout_or_quota_issues_filter) def _insert_all_rows(self, project_id, dataset_id, table_id, rows, skip_invalid_rows=False): \"\"\"Calls", "from future.utils import iteritems from apache_beam import coders from apache_beam.internal.gcp", "in multiple locations. \"\"\" reference = bigquery.JobReference(jobId=uuid.uuid4().hex, projectId=project_id) request =", "locations. \"\"\" reference = bigquery.JobReference(jobId=uuid.uuid4().hex, projectId=project_id) request = bigquery.BigqueryJobsInsertRequest( projectId=project_id,", "disable=wrong-import-order, wrong-import-position try: from apitools.base.py.exceptions import HttpError except ImportError: pass", "= self.client.tables.Get(request) return response def _create_table(self, project_id, dataset_id, table_id, schema):", "BigQueryReader(dataflow_io.NativeSourceReader): \"\"\"A reader for a BigQuery source.\"\"\" def __init__(self, source,", "table was found or created. Raises: RuntimeError: For various mismatches", "json_object = bigquery.JsonObject() for k, v in iteritems(row): if isinstance(v,", "self.sink.table_reference.projectId # If table schema did not define a project", "cannot be used as temporary.' % (project_id, dataset_id)) except HttpError", "a previously # found table in case the schema was", "the information about the job that was started. \"\"\" return", "self.source.query, self.source.use_legacy_sql) def __enter__(self): self.client = BigQueryWrapper(client=self.test_bigquery_client) self.client.create_temporary_dataset( self.executing_project, location=self._get_source_location())", "import to_json_value from apache_beam.internal.http_client import get_new_http from apache_beam.io.gcp.internal.clients import bigquery", "dry_run=False): reference = bigquery.JobReference(jobId=job_id, projectId=project_id) request = bigquery.BigqueryJobsInsertRequest( projectId=project_id, job=bigquery.Job(", "time.sleep(1.0) continue # We got some results. The last page", "RuntimeError: For various mismatches between the state of the table", "x: x, value=elm) @staticmethod def _get_table_fn(destination): if callable(destination): return destination", "Each dictionary is a row and each key in it", "instance. return response else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _is_table_empty(self,", "return bigquery.TableSchema(fields=fields) def parse_table_reference(table, dataset=None, project=None): \"\"\"Parses a table reference", "was specified then an error will be raised since the", "error to emit an error that explains # to the", "point. if write_disposition == BigQueryDisposition.WRITE_TRUNCATE: # BigQuery can route data", "if the coder argument is not specified. \"\"\" def encode(self,", "has # the proper formatting. return value_provider.StaticValueProvider(lambda x: x, value=elm)", "__init__(self, sink, test_bigquery_client=None, buffer_size=None): self.sink = sink self.test_bigquery_client = test_bigquery_client", "use_legacy_sql): \"\"\" Get the location of tables referenced in a", "= self.client.insert_rows( project_id=self.project_id, dataset_id=self.dataset_id, table_id=self.table_id, rows=self.rows_buffer) self.rows_buffer = [] if", "this file to You under the Apache License, Version 2.0", "a project we default to executing # project. project_id =", "the query in the service. If # the request times", "the --project ' 'command line option to specify it.') self.row_as_dict", "'pipeline_options'): self.executing_project = ( source.pipeline_options.view_as(GoogleCloudOptions).project) else: self.executing_project = None #", "'temp_dataset_' def __init__(self, client=None): self.client = client or bigquery.BigqueryV2( http=get_new_http(),", "and # WRITE_TRUNCATE write dispositions. if found_table: table_empty = self._is_table_empty(project_id,", "return False class AppendDestinationsFn(DoFn): \"\"\"Adds the destination to an element,", "to help BigQuery avoid inserting a row multiple times. #", "builtins import object from future.utils import iteritems from apache_beam import", "for WRITE_EMPTY and # WRITE_TRUNCATE write dispositions. if found_table: table_empty", "response def _create_table(self, project_id, dataset_id, table_id, schema): table = bigquery.Table(", "a query. This method returns the location of the first", "the row ID that we add to # each row", "have an option for ignoreUnknownValues? rows=rows)) response = self.client.tabledata.InsertAll(request) #", "decimal values are converted into string because JSON does not", "not response.insertErrors, response.insertErrors @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_table(self, project_id, dataset_id,", "valid JSON. Returns: A TableSchema of the BigQuery export from", "%s' % field.type) def convert_row_to_dict(self, row, schema): \"\"\"Converts a TableRow", "agreements. See the NOTICE file distributed with # this work", "in json_schema['fields']] return bigquery.TableSchema(fields=fields) def parse_table_reference(table, dataset=None, project=None): \"\"\"Parses a", "that we get here means the # query has no", "if 'mode' in field: schema.mode = field['mode'] else: schema.mode =", "InsertAll() method. request = bigquery.BigqueryTabledataInsertAllRequest( projectId=project_id, datasetId=dataset_id, tableId=table_id, tableDataInsertAllRequest=bigquery.TableDataInsertAllRequest( skipInvalidRows=skip_invalid_rows,", "# inserts into NUMERIC columns by receiving JSON with string", "Ideally this should never happen as repeated fields default to", "'%s_%d' % (self._row_id_prefix, self._unique_row_id) def _get_temp_table(self, project_id): return parse_table_reference( table=BigQueryWrapper.TEMP_TABLE", "dispositions. if found_table: table_empty = self._is_table_empty(project_id, dataset_id, table_id) if (not", "dataset_id) return else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_table_location(self, project_id,", "Returns: bigquery.Table instance Raises: HttpError if lookup failed. \"\"\" request", "errors. The start_query_job would raise an error otherwise. return page_token", "should never happen as repeated fields default to # returning", "sources and sinks if the coder argument is not specified.", "isinstance(self.source.coder, RowAsDictJsonCoder) # Schema for the rows being read by", "' 'table does not exist.' % (project_id, dataset_id, table_id)) if", "default to executing # project. project_id = self.source.table_reference.projectId if not", "dataset_id, table_id, schema, create_disposition, write_disposition): \"\"\"Gets or creates a table", "has the following attributes: projectId, datasetId, and tableId. Or a", "dataset exists to make sure that the temporary id is", "The function mimics the behavior of BigQuery import jobs when", "table exists already then handle the semantics for WRITE_EMPTY and", "for k, v in iteritems(row): if isinstance(v, decimal.Decimal): # decimal", "that decimal supports. BQ is able to handle # inserts", "== RetryStrategy.RETRY_NEVER: return False elif (strategy == RetryStrategy.RETRY_ON_TRANSIENT_ERROR and error_message", "get_job(self, project, job_id, location=None): request = bigquery.BigqueryJobsGetRequest() request.jobId = job_id", "or creates a table based on create and write dispositions.", "datasetId=dataset_id) dataset = bigquery.Dataset(datasetReference=dataset_reference) if location is not None: dataset.location", "else 'utf8') self._unique_row_id = 0 # For testing scenarios where", "BigQueryWrapper. class BigQueryWrapper(object): \"\"\"BigQuery client wrapper with utilities for querying.", "mins after the delete and create.') # TODO(BEAM-2673): Remove this", "the query and depends on the BigQuery service to provide", "project_id = self.executing_project self.query = 'SELECT * FROM [%s:%s.%s];' %", "def default_encoder(obj): if isinstance(obj, decimal.Decimal): return str(obj) raise TypeError( \"Object", "raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_table_location(self, project_id, dataset_id, table_id): table", "and write_disposition != BigQueryDisposition.WRITE_TRUNCATE: return found_table else: created_table = self._create_table(project_id=project_id,", "'TIMESTAMP': # The UTC should come from the timezone library", "not in RetryStrategy._NON_TRANSIENT_ERRORS): return True else: return False class AppendDestinationsFn(DoFn):", ") ), jobReference=reference, ) ) response = self.client.jobs.Insert(request) return response.jobReference", "# The UTC should come from the timezone library but", "ValueError('%s. %s' % (e, JSON_COMPLIANCE_ERROR)) def decode(self, encoded_table_row): return json.loads(encoded_table_row.decode('utf-8'))", "schema %s. Result: %s.', project_id, dataset_id, table_id, schema or found_table.schema,", "Remove this sleep by migrating to load api time.sleep(150) return", "read by the reader. It is initialized the # first", "return '%s:%s.%s' % ( destination.projectId, destination.datasetId, destination.tableId) else: return destination", "continue # We got some results. The last page is", "\"\"\" # Prepare rows for insertion. Of special note is", "from_json_value(cell.v) if cell.v is not None else None elif isinstance(schema,", "table before this point. if write_disposition == BigQueryDisposition.WRITE_TRUNCATE: # BigQuery", "self.executing_project, location=self._get_source_location()) return self def __exit__(self, exception_type, exception_value, traceback): self.client.clean_up_temporary_dataset(self.executing_project)", "schema: A bigquery.TableSchema instance or None. create_disposition: CREATE_NEVER or CREATE_IF_NEEDED.", "as repeated fields default to # returning an empty list", "'DATASET.TABLE) instead of %s.' % table) table_reference.projectId = match.group('project') table_reference.datasetId", "this was a dry run then the fact that we", "Table Schema provided as string. Args: schema_string: String serialized table", "retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _get_query_results(self, project_id, job_id, page_token=None, max_results=10000): request = bigquery.BigqueryJobsGetQueryResultsRequest(", "\"\"\"Parse the Table Schema provided as string. Args: schema_string: String", "inserted successfully. Returns: A tuple (bool, errors). If first element", "You under the Apache License, Version 2.0 # (the \"License\");", "points and offer a common place where retry logic for", "location=None): request = bigquery.BigqueryJobsGetRequest() request.jobId = job_id request.projectId = project", "project, dataset, and table used for the sink. self.project_id =", "== 'NUMERIC': return decimal.Decimal(value) elif field.type == 'GEOGRAPHY': return value", "argument already has # the proper formatting. return value_provider.StaticValueProvider(lambda x:", "bigquery.Dataset instance. return response else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def", "field['mode'] else: schema.mode = 'NULLABLE' if 'description' in field: schema.description", "= self.sink.table_reference.datasetId self.table_id = self.sink.table_reference.tableId def _flush_rows_buffer(self): if self.rows_buffer: logging.info('Writing", "self.sink.table_schema, self.sink.create_disposition, self.sink.write_disposition) return self def __exit__(self, exception_type, exception_value, traceback):", "# The response is a bigquery.TableDataList instance. return response.totalRows ==", "kms_key=None): self.source = source self.test_bigquery_client = test_bigquery_client if auth.is_running_in_gce: self.executing_project", "fail and retry scenarios in which the insert request may", "_delete_dataset(self, project_id, dataset_id, delete_contents=True): request = bigquery.BigqueryDatasetsDeleteRequest( projectId=project_id, datasetId=dataset_id, deleteContents=delete_contents)", "the destination, and the value is the record itself. Experimental;", "with the information about the job that was started. \"\"\"", "for the destination, and the value is the record itself.", "the \"modes\" enforced by BigQuerySource.__init__. # If this exception has", "apache_beam.io.gcp.bigquery import BigQueryDisposition found_table = None try: found_table = self.get_table(project_id,", "except HttpError as exn: if exn.status_code == 404: logging.warning('Table %s:%s.%s", "Either a TableReference object from the bigquery API. The object", "dataset_id, location=location) else: raise @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def clean_up_temporary_dataset(self, project_id):", "are converted into string because JSON does not # support", "schema.mode = 'NULLABLE' if 'description' in field: schema.description = field['description']", "project. if self.project_id is None and hasattr(sink, 'pipeline_options'): self.project_id =", "etc.). \"\"\" TEMP_TABLE = 'temp_table_' TEMP_DATASET = 'temp_dataset_' def __init__(self,", "to make sure that the temporary id is unique try:", "table_id)) else: raise # If table exists already then handle", "we delete # the table before this point. if write_disposition", "sinks executed in a local runner. Returns: a unique row", "source_uris, schema=None, write_disposition=None, create_disposition=None): reference = bigquery.JobReference(jobId=job_id, projectId=project_id) request =", "inserts can be routed to deleted table ' + 'for", "of a field. skip_invalid_rows: If there are rows with insertion", "self.use_legacy_sql = use_legacy_sql self.flatten_results = flatten_results self.kms_key = kms_key if", "same create and write dispositions. Args: project_id: The project id", "(e, JSON_COMPLIANCE_ERROR)) def decode(self, encoded_table_row): return json.loads(encoded_table_row.decode('utf-8')) class RetryStrategy(object): RETRY_ALWAYS", "'STRING': # Input: \"XYZ\" --> Output: \"XYZ\" return value elif", "is not specified. \"\"\" def encode(self, table_row): # The normal", "def _parse_schema_field(field): \"\"\"Parse a single schema field from dictionary. Args:", "WRITE_TRUNCATE was # specified. if write_disposition == BigQueryDisposition.WRITE_TRUNCATE: self._delete_table(project_id, dataset_id,", "A tuple (bool, errors). If first element is False then", "various mismatches between the state of the table and the", "writing TableRow's instead of dicts. final_rows = [] for row", "receiving JSON with string attrs. v = str(v) json_object.additionalProperties.append( bigquery.JsonObject.AdditionalProperty(", "owning the table. table_id: The table id. rows: A list", "\"\"\"Adds the destination to an element, making it a KV", "enforced by BigQuerySource.__init__. # If this exception has been raised,", "%s.', project_id, dataset_id, table_id, schema or found_table.schema, created_table) # if", "response is a bigquery.Table instance. return response @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter)", "class RetryStrategy(object): RETRY_ALWAYS = 'RETRY_ALWAYS' RETRY_NEVER = 'RETRY_NEVER' RETRY_ON_TRANSIENT_ERROR =", "num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def _start_query_job(self, project_id, query, use_legacy_sql, flatten_results, job_id, dry_run=False):", "for date partitioned tables, appending '$YYYYmmdd' to the table name", "'REPEATED': if value is None: # Ideally this should never", "write disposition is WRITE_EMPTY.' % (project_id, dataset_id, table_id)) # Delete", "= self.source.table_reference.projectId if not project_id: project_id = self.executing_project self.query =", "response @retry.with_exponential_backoff( num_retries=MAX_RETRIES, retry_filter=retry.retry_on_server_errors_and_timeout_filter) def get_or_create_dataset(self, project_id, dataset_id, location=None): #", "self.executing_project = ( source.pipeline_options.view_as(GoogleCloudOptions).project) else: self.executing_project = None # TODO(silviuc):", "response.statistics. Query: %s\", query) return None referenced_tables = response.statistics.query.referencedTables if", "bigquery.BigqueryJobsGetRequest() request.jobId = job_id request.projectId = project request.location = location", "Create a new table potentially reusing the schema from a", "None else None elif isinstance(schema, bigquery.TableFieldSchema): cell = row['f'][index] value", "retry scenarios in which the insert request may be issued", "row in rows: if self.row_as_dict: yield self.client.convert_row_to_dict(row, schema) else: yield", "instance Raises: HttpError if lookup failed. \"\"\" request = bigquery.BigqueryTablesGetRequest(", "multiple times for fail and retry scenarios in which the", "bigquery.TableSchema instance or None. create_disposition: CREATE_NEVER or CREATE_IF_NEEDED. write_disposition: WRITE_APPEND,", "# found table in case the schema was not specified.", "--> Output: 1.23 return float(value) elif field.type == 'TIMESTAMP': #", "the table and writing it logging.warning('Sleeping for 150 seconds before", "JSON with string attrs. v = str(v) json_object.additionalProperties.append( bigquery.JsonObject.AdditionalProperty( key=k,", "BigQueryWrapper.TEMP_DATASET + self._temporary_table_suffix # Check if dataset exists to make", "License. You may obtain a copy of the License at", "with insertion errors, whether they should be skipped, and all", "row IDs. self._row_id_prefix = '' if client else uuid.uuid4() self._temporary_table_suffix", "was a dry run then the fact that we get", "one or more # contributor license agreements. See the NOTICE", "Dictionary object containing serialized schema. Returns: A TableFieldSchema for a", "query, use_legacy_sql, flatten_results, job_id, dry_run=False): reference = bigquery.JobReference(jobId=job_id, projectId=project_id) request", "guarantees. \"\"\" def __init__(self, destination): self.destination = AppendDestinationsFn._get_table_fn(destination) @staticmethod def", "BigQuery service. self.rows_buffer = [] self.rows_buffer_flush_threshold = buffer_size or 1000", "HAS BACKWARDS COMPATIBILITY GUARANTEES. \"\"\" from __future__ import absolute_import import", "import json import logging import re import sys import time", "This method returns the location of the first referenced table", "isinstance(elm, value_provider.ValueProvider): return elm else: # The type argument is", "is not JSON serializable\" % type(obj).__name__) def get_hashable_destination(destination): \"\"\"Parses a", "perform_load_job(self, destination, files, job_id, schema=None, write_disposition=None, create_disposition=None): \"\"\"Starts a job", "updated as well. raise ValueError(\"BigQuerySource must have either a table", "COMPATIBILITY GUARANTEES. \"\"\" from __future__ import absolute_import import datetime import", "'PROJECT:DATASET.TABLE'. \"\"\" if isinstance(destination, bigquery.TableReference): return '%s:%s.%s' % ( destination.projectId,", "See Also: - :meth:`BigQueryWrapper.get_query_location` - :meth:`BigQueryWrapper.get_table_location` Returns: Optional[str]: The source", "found_table.schema, created_table) # if write_disposition == BigQueryDisposition.WRITE_TRUNCATE we delete #", "query source return self.client.get_query_location( self.executing_project, self.source.query, self.source.use_legacy_sql) def __enter__(self): self.client" ]
[ "version of Qt libraries when there is installed another application", "there is installed another application (e.g. QtCreator) if is_win: from", "import extend_system_path extend_system_path([os.path.join(x, 'PyQt5') for x in getsitepackages()]) extend_system_path([os.path.join(os.path.dirname(get_module_file_attribute('PyQt5')), 'Qt',", "Qt libraries when there is installed another application (e.g. QtCreator)", "#----------------------------------------------------------------------------- # Copyright (c) 2005-2017, PyInstaller Development Team. # #", "In the new consolidated mode any PyQt depends on _qt", "of the GNU General Public License with exception # for", "On Windows system PATH has to be extended to point", "any PyQt depends on _qt hiddenimports = ['sip', 'PyQt5.Qt'] #", "file. datas = [x for x in collect_data_files('PyQt5', False, os.path.join('Qt',", "Windows system PATH has to be extended to point to", "to the PyQt5 directory. # The PySide directory contains Qt", "datas = [x for x in collect_data_files('PyQt5', False, os.path.join('Qt', 'bin'))", "to point to the PyQt5 directory. # The PySide directory", "getsitepackages, is_darwin, is_win # On Windows system PATH has to", "For Qt<5.4 to work on Mac OS X it is", "'QT_VERSION_STR') if is_module_satisfies('Qt < 5.4', qt_version): datas = [(qt_menu_nib_dir('PyQt5'), '')]", "# Copyright (c) 2005-2017, PyInstaller Development Team. # # Distributed", "collect_data_files('PyQt5', False, os.path.join('Qt', 'bin')) if x[0].endswith('qt.conf')] # For Qt<5.4 to", "under the terms of the GNU General Public License with", "= ['sip', 'PyQt5.Qt'] # Collect just the qt.conf file. datas", "depends on _qt hiddenimports = ['sip', 'PyQt5.Qt'] # Collect just", "exception # for distributing bootloader. # # The full license", "need to avoid including different # version of Qt libraries", "2005-2017, PyInstaller Development Team. # # Distributed under the terms", "distributed with this software. #----------------------------------------------------------------------------- import os from PyInstaller.utils.hooks import", "library. qt_version = get_module_attribute('PyQt5.QtCore', 'QT_VERSION_STR') if is_module_satisfies('Qt < 5.4', qt_version):", "getsitepackages()]) extend_system_path([os.path.join(os.path.dirname(get_module_file_attribute('PyQt5')), 'Qt', 'bin')]) # In the new consolidated mode", "PyQt depends on _qt hiddenimports = ['sip', 'PyQt5.Qt'] # Collect", "get_module_attribute, is_module_satisfies, qt_menu_nib_dir, get_module_file_attribute, collect_data_files) from PyInstaller.compat import getsitepackages, is_darwin,", "General Public License with exception # for distributing bootloader. #", "from PyInstaller.utils.win32.winutils import extend_system_path extend_system_path([os.path.join(x, 'PyQt5') for x in getsitepackages()])", "extend_system_path extend_system_path([os.path.join(x, 'PyQt5') for x in getsitepackages()]) extend_system_path([os.path.join(os.path.dirname(get_module_file_attribute('PyQt5')), 'Qt', 'bin')])", "5.x shared library. qt_version = get_module_attribute('PyQt5.QtCore', 'QT_VERSION_STR') if is_module_satisfies('Qt <", "or PySide # app. if is_darwin: # Version of the", "system PATH has to be extended to point to the", "get_module_file_attribute, collect_data_files) from PyInstaller.compat import getsitepackages, is_darwin, is_win # On", "( get_module_attribute, is_module_satisfies, qt_menu_nib_dir, get_module_file_attribute, collect_data_files) from PyInstaller.compat import getsitepackages,", "directory. # The PySide directory contains Qt dlls. We need", "# app. if is_darwin: # Version of the currently installed", "import getsitepackages, is_darwin, is_win # On Windows system PATH has", "Distributed under the terms of the GNU General Public License", "with this software. #----------------------------------------------------------------------------- import os from PyInstaller.utils.hooks import (", "extend_system_path([os.path.join(os.path.dirname(get_module_file_attribute('PyQt5')), 'Qt', 'bin')]) # In the new consolidated mode any", "PyInstaller.utils.win32.winutils import extend_system_path extend_system_path([os.path.join(x, 'PyQt5') for x in getsitepackages()]) extend_system_path([os.path.join(os.path.dirname(get_module_file_attribute('PyQt5')),", "hiddenimports = ['sip', 'PyQt5.Qt'] # Collect just the qt.conf file.", "x[0].endswith('qt.conf')] # For Qt<5.4 to work on Mac OS X", "on Mac OS X it is necessary to include `qt_menu.nib`.", "it is necessary to include `qt_menu.nib`. # This directory contains", "mode any PyQt depends on _qt hiddenimports = ['sip', 'PyQt5.Qt']", "PyInstaller Development Team. # # Distributed under the terms of", "some resource files necessary to run PyQt or PySide #", "extended to point to the PyQt5 directory. # The PySide", "Collect just the qt.conf file. datas = [x for x", "if is_darwin: # Version of the currently installed Qt 5.x", "is_win: from PyInstaller.utils.win32.winutils import extend_system_path extend_system_path([os.path.join(x, 'PyQt5') for x in", "(e.g. QtCreator) if is_win: from PyInstaller.utils.win32.winutils import extend_system_path extend_system_path([os.path.join(x, 'PyQt5')", "necessary to include `qt_menu.nib`. # This directory contains some resource", "new consolidated mode any PyQt depends on _qt hiddenimports =", "# version of Qt libraries when there is installed another", "import os from PyInstaller.utils.hooks import ( get_module_attribute, is_module_satisfies, qt_menu_nib_dir, get_module_file_attribute,", "shared library. qt_version = get_module_attribute('PyQt5.QtCore', 'QT_VERSION_STR') if is_module_satisfies('Qt < 5.4',", "is_win # On Windows system PATH has to be extended", "# # The full license is in the file COPYING.txt,", "with exception # for distributing bootloader. # # The full", "when there is installed another application (e.g. QtCreator) if is_win:", "Version of the currently installed Qt 5.x shared library. qt_version", "Development Team. # # Distributed under the terms of the", "# Collect just the qt.conf file. datas = [x for", "to run PyQt or PySide # app. if is_darwin: #", "installed Qt 5.x shared library. qt_version = get_module_attribute('PyQt5.QtCore', 'QT_VERSION_STR') if", "qt_version = get_module_attribute('PyQt5.QtCore', 'QT_VERSION_STR') if is_module_satisfies('Qt < 5.4', qt_version): datas", "consolidated mode any PyQt depends on _qt hiddenimports = ['sip',", "work on Mac OS X it is necessary to include", "Mac OS X it is necessary to include `qt_menu.nib`. #", "'Qt', 'bin')]) # In the new consolidated mode any PyQt", "avoid including different # version of Qt libraries when there", "PyInstaller.utils.hooks import ( get_module_attribute, is_module_satisfies, qt_menu_nib_dir, get_module_file_attribute, collect_data_files) from PyInstaller.compat", "qt.conf file. datas = [x for x in collect_data_files('PyQt5', False,", "for x in collect_data_files('PyQt5', False, os.path.join('Qt', 'bin')) if x[0].endswith('qt.conf')] #", "# In the new consolidated mode any PyQt depends on", "# The full license is in the file COPYING.txt, distributed", "This directory contains some resource files necessary to run PyQt", "'PyQt5') for x in getsitepackages()]) extend_system_path([os.path.join(os.path.dirname(get_module_file_attribute('PyQt5')), 'Qt', 'bin')]) # In", "x in getsitepackages()]) extend_system_path([os.path.join(os.path.dirname(get_module_file_attribute('PyQt5')), 'Qt', 'bin')]) # In the new", "import ( get_module_attribute, is_module_satisfies, qt_menu_nib_dir, get_module_file_attribute, collect_data_files) from PyInstaller.compat import", "# for distributing bootloader. # # The full license is", "point to the PyQt5 directory. # The PySide directory contains", "PyQt5 directory. # The PySide directory contains Qt dlls. We", "_qt hiddenimports = ['sip', 'PyQt5.Qt'] # Collect just the qt.conf", "os from PyInstaller.utils.hooks import ( get_module_attribute, is_module_satisfies, qt_menu_nib_dir, get_module_file_attribute, collect_data_files)", "to avoid including different # version of Qt libraries when", "directory contains Qt dlls. We need to avoid including different", "is_module_satisfies, qt_menu_nib_dir, get_module_file_attribute, collect_data_files) from PyInstaller.compat import getsitepackages, is_darwin, is_win", "this software. #----------------------------------------------------------------------------- import os from PyInstaller.utils.hooks import ( get_module_attribute,", "for distributing bootloader. # # The full license is in", "the PyQt5 directory. # The PySide directory contains Qt dlls.", "from PyInstaller.compat import getsitepackages, is_darwin, is_win # On Windows system", "another application (e.g. QtCreator) if is_win: from PyInstaller.utils.win32.winutils import extend_system_path", "the GNU General Public License with exception # for distributing", "The full license is in the file COPYING.txt, distributed with", "# On Windows system PATH has to be extended to", "application (e.g. QtCreator) if is_win: from PyInstaller.utils.win32.winutils import extend_system_path extend_system_path([os.path.join(x,", "# The PySide directory contains Qt dlls. We need to", "collect_data_files) from PyInstaller.compat import getsitepackages, is_darwin, is_win # On Windows", "be extended to point to the PyQt5 directory. # The", "(c) 2005-2017, PyInstaller Development Team. # # Distributed under the", "'PyQt5.Qt'] # Collect just the qt.conf file. datas = [x", "distributing bootloader. # # The full license is in the", "just the qt.conf file. datas = [x for x in", "directory contains some resource files necessary to run PyQt or", "of Qt libraries when there is installed another application (e.g.", "X it is necessary to include `qt_menu.nib`. # This directory", "PySide # app. if is_darwin: # Version of the currently", "resource files necessary to run PyQt or PySide # app.", "= get_module_attribute('PyQt5.QtCore', 'QT_VERSION_STR') if is_module_satisfies('Qt < 5.4', qt_version): datas =", "QtCreator) if is_win: from PyInstaller.utils.win32.winutils import extend_system_path extend_system_path([os.path.join(x, 'PyQt5') for", "is necessary to include `qt_menu.nib`. # This directory contains some", "the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- import os", "contains Qt dlls. We need to avoid including different #", "the qt.conf file. datas = [x for x in collect_data_files('PyQt5',", "'bin')]) # In the new consolidated mode any PyQt depends", "the currently installed Qt 5.x shared library. qt_version = get_module_attribute('PyQt5.QtCore',", "file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- import os from", "is_darwin: # Version of the currently installed Qt 5.x shared", "is_darwin, is_win # On Windows system PATH has to be", "on _qt hiddenimports = ['sip', 'PyQt5.Qt'] # Collect just the", "The PySide directory contains Qt dlls. We need to avoid", "if is_win: from PyInstaller.utils.win32.winutils import extend_system_path extend_system_path([os.path.join(x, 'PyQt5') for x", "to work on Mac OS X it is necessary to", "[x for x in collect_data_files('PyQt5', False, os.path.join('Qt', 'bin')) if x[0].endswith('qt.conf')]", "for x in getsitepackages()]) extend_system_path([os.path.join(os.path.dirname(get_module_file_attribute('PyQt5')), 'Qt', 'bin')]) # In the", "currently installed Qt 5.x shared library. qt_version = get_module_attribute('PyQt5.QtCore', 'QT_VERSION_STR')", "# For Qt<5.4 to work on Mac OS X it", "dlls. We need to avoid including different # version of", "contains some resource files necessary to run PyQt or PySide", "qt_menu_nib_dir, get_module_file_attribute, collect_data_files) from PyInstaller.compat import getsitepackages, is_darwin, is_win #", "different # version of Qt libraries when there is installed", "in getsitepackages()]) extend_system_path([os.path.join(os.path.dirname(get_module_file_attribute('PyQt5')), 'Qt', 'bin')]) # In the new consolidated", "bootloader. # # The full license is in the file", "to be extended to point to the PyQt5 directory. #", "# Version of the currently installed Qt 5.x shared library.", "terms of the GNU General Public License with exception #", "in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- import", "Copyright (c) 2005-2017, PyInstaller Development Team. # # Distributed under", "of the currently installed Qt 5.x shared library. qt_version =", "necessary to run PyQt or PySide # app. if is_darwin:", "extend_system_path([os.path.join(x, 'PyQt5') for x in getsitepackages()]) extend_system_path([os.path.join(os.path.dirname(get_module_file_attribute('PyQt5')), 'Qt', 'bin')]) #", "PySide directory contains Qt dlls. We need to avoid including", "GNU General Public License with exception # for distributing bootloader.", "the terms of the GNU General Public License with exception", "license is in the file COPYING.txt, distributed with this software.", "# Distributed under the terms of the GNU General Public", "Public License with exception # for distributing bootloader. # #", "'bin')) if x[0].endswith('qt.conf')] # For Qt<5.4 to work on Mac", "files necessary to run PyQt or PySide # app. if", "is installed another application (e.g. QtCreator) if is_win: from PyInstaller.utils.win32.winutils", "include `qt_menu.nib`. # This directory contains some resource files necessary", "Qt 5.x shared library. qt_version = get_module_attribute('PyQt5.QtCore', 'QT_VERSION_STR') if is_module_satisfies('Qt", "PyQt or PySide # app. if is_darwin: # Version of", "software. #----------------------------------------------------------------------------- import os from PyInstaller.utils.hooks import ( get_module_attribute, is_module_satisfies,", "is in the file COPYING.txt, distributed with this software. #-----------------------------------------------------------------------------", "# # Distributed under the terms of the GNU General", "x in collect_data_files('PyQt5', False, os.path.join('Qt', 'bin')) if x[0].endswith('qt.conf')] # For", "if x[0].endswith('qt.conf')] # For Qt<5.4 to work on Mac OS", "Qt<5.4 to work on Mac OS X it is necessary", "We need to avoid including different # version of Qt", "libraries when there is installed another application (e.g. QtCreator) if", "os.path.join('Qt', 'bin')) if x[0].endswith('qt.conf')] # For Qt<5.4 to work on", "COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- import os from PyInstaller.utils.hooks", "to include `qt_menu.nib`. # This directory contains some resource files", "has to be extended to point to the PyQt5 directory.", "`qt_menu.nib`. # This directory contains some resource files necessary to", "False, os.path.join('Qt', 'bin')) if x[0].endswith('qt.conf')] # For Qt<5.4 to work", "installed another application (e.g. QtCreator) if is_win: from PyInstaller.utils.win32.winutils import", "get_module_attribute('PyQt5.QtCore', 'QT_VERSION_STR') if is_module_satisfies('Qt < 5.4', qt_version): datas = [(qt_menu_nib_dir('PyQt5'),", "OS X it is necessary to include `qt_menu.nib`. # This", "Qt dlls. We need to avoid including different # version", "from PyInstaller.utils.hooks import ( get_module_attribute, is_module_satisfies, qt_menu_nib_dir, get_module_file_attribute, collect_data_files) from", "#----------------------------------------------------------------------------- import os from PyInstaller.utils.hooks import ( get_module_attribute, is_module_satisfies, qt_menu_nib_dir,", "['sip', 'PyQt5.Qt'] # Collect just the qt.conf file. datas =", "including different # version of Qt libraries when there is", "run PyQt or PySide # app. if is_darwin: # Version", "License with exception # for distributing bootloader. # # The", "PyInstaller.compat import getsitepackages, is_darwin, is_win # On Windows system PATH", "# This directory contains some resource files necessary to run", "in collect_data_files('PyQt5', False, os.path.join('Qt', 'bin')) if x[0].endswith('qt.conf')] # For Qt<5.4", "app. if is_darwin: # Version of the currently installed Qt", "full license is in the file COPYING.txt, distributed with this", "= [x for x in collect_data_files('PyQt5', False, os.path.join('Qt', 'bin')) if", "Team. # # Distributed under the terms of the GNU", "PATH has to be extended to point to the PyQt5", "the new consolidated mode any PyQt depends on _qt hiddenimports" ]
[ "== 666 def test_compare_nodes(): old_node = vy_ast.parse_to_ast(\"foo = 42\") new_node", "new_node = vy_ast.Int.from_node(old_node, value=666) assert isinstance(new_node, vy_ast.Int) def test_source(): old_node", "new_node = vy_ast.Int.from_node(old_node, value=666) assert old_node.value == 42 assert new_node.value", "42\") new_node = vy_ast.Int.from_node(old_node, value=666) assert isinstance(new_node, vy_ast.Int) def test_source():", "test_kwargs(): old_node = vy_ast.parse_to_ast(\"42\").body[0].value new_node = vy_ast.Int.from_node(old_node, value=666) assert old_node.value", "as vy_ast def test_output_class(): old_node = vy_ast.parse_to_ast(\"foo = 42\") new_node", "test_output_class(): old_node = vy_ast.parse_to_ast(\"foo = 42\") new_node = vy_ast.Int.from_node(old_node, value=666)", "old_node.value == 42 assert new_node.value == 666 def test_compare_nodes(): old_node", "old_node = vy_ast.parse_to_ast(\"foo = 42\") new_node = vy_ast.Int.from_node(old_node, value=666) assert", "42 assert new_node.value == 666 def test_compare_nodes(): old_node = vy_ast.parse_to_ast(\"foo", "= vy_ast.parse_to_ast(\"foo = 42\") new_node = vy_ast.Int.from_node(old_node, value=666) assert new_node._parent", "old_node.node_source_code == new_node.node_source_code def test_kwargs(): old_node = vy_ast.parse_to_ast(\"42\").body[0].value new_node =", "42\") new_node = vy_ast.Int.from_node(old_node, value=666) assert new_node._parent is None assert", "vy_ast.Int.from_node(old_node, value=666) assert new_node._parent is None assert new_node._depth == 0", "new_node.value == 666 def test_compare_nodes(): old_node = vy_ast.parse_to_ast(\"foo = 42\")", "= vy_ast.parse_to_ast(\"foo = 42\") new_node = vy_ast.Int.from_node(old_node, value=666) assert not", "vy_ast.Int.from_node(old_node, value=666) assert old_node.src == new_node.src assert old_node.node_source_code == new_node.node_source_code", "= vy_ast.Int.from_node(old_node, value=666) assert isinstance(new_node, vy_ast.Int) def test_source(): old_node =", "assert old_node.node_source_code == new_node.node_source_code def test_kwargs(): old_node = vy_ast.parse_to_ast(\"42\").body[0].value new_node", "assert isinstance(new_node, vy_ast.Int) def test_source(): old_node = vy_ast.parse_to_ast(\"foo = 42\")", "= vy_ast.parse_to_ast(\"42\").body[0].value new_node = vy_ast.Int.from_node(old_node, value=666) assert old_node.value == 42", "= vy_ast.parse_to_ast(\"foo = 42\") new_node = vy_ast.Int.from_node(old_node, value=666) assert old_node.src", "= 42\") new_node = vy_ast.Int.from_node(old_node, value=666) assert old_node.src == new_node.src", "== 42 assert new_node.value == 666 def test_compare_nodes(): old_node =", "vy_ast.parse_to_ast(\"42\").body[0].value new_node = vy_ast.Int.from_node(old_node, value=666) assert old_node.value == 42 assert", "from vyper import ast as vy_ast def test_output_class(): old_node =", "old_node = vy_ast.parse_to_ast(\"42\").body[0].value new_node = vy_ast.Int.from_node(old_node, value=666) assert old_node.value ==", "= vy_ast.Int.from_node(old_node, value=666) assert old_node.value == 42 assert new_node.value ==", "vy_ast.Int.from_node(old_node, value=666) assert not vy_ast.compare_nodes(old_node, new_node) def test_new_node_has_no_parent(): old_node =", "== new_node.node_source_code def test_kwargs(): old_node = vy_ast.parse_to_ast(\"42\").body[0].value new_node = vy_ast.Int.from_node(old_node,", "new_node = vy_ast.Int.from_node(old_node, value=666) assert not vy_ast.compare_nodes(old_node, new_node) def test_new_node_has_no_parent():", "vy_ast.parse_to_ast(\"foo = 42\") new_node = vy_ast.Int.from_node(old_node, value=666) assert isinstance(new_node, vy_ast.Int)", "value=666) assert not vy_ast.compare_nodes(old_node, new_node) def test_new_node_has_no_parent(): old_node = vy_ast.parse_to_ast(\"foo", "new_node) def test_new_node_has_no_parent(): old_node = vy_ast.parse_to_ast(\"foo = 42\") new_node =", "= vy_ast.Int.from_node(old_node, value=666) assert old_node.src == new_node.src assert old_node.node_source_code ==", "vy_ast.parse_to_ast(\"foo = 42\") new_node = vy_ast.Int.from_node(old_node, value=666) assert old_node.src ==", "vy_ast.parse_to_ast(\"foo = 42\") new_node = vy_ast.Int.from_node(old_node, value=666) assert new_node._parent is", "vy_ast.Int.from_node(old_node, value=666) assert old_node.value == 42 assert new_node.value == 666", "vy_ast.compare_nodes(old_node, new_node) def test_new_node_has_no_parent(): old_node = vy_ast.parse_to_ast(\"foo = 42\") new_node", "vy_ast.Int.from_node(old_node, value=666) assert isinstance(new_node, vy_ast.Int) def test_source(): old_node = vy_ast.parse_to_ast(\"foo", "value=666) assert old_node.value == 42 assert new_node.value == 666 def", "assert old_node.src == new_node.src assert old_node.node_source_code == new_node.node_source_code def test_kwargs():", "42\") new_node = vy_ast.Int.from_node(old_node, value=666) assert not vy_ast.compare_nodes(old_node, new_node) def", "not vy_ast.compare_nodes(old_node, new_node) def test_new_node_has_no_parent(): old_node = vy_ast.parse_to_ast(\"foo = 42\")", "value=666) assert old_node.src == new_node.src assert old_node.node_source_code == new_node.node_source_code def", "= vy_ast.Int.from_node(old_node, value=666) assert not vy_ast.compare_nodes(old_node, new_node) def test_new_node_has_no_parent(): old_node", "def test_compare_nodes(): old_node = vy_ast.parse_to_ast(\"foo = 42\") new_node = vy_ast.Int.from_node(old_node,", "assert not vy_ast.compare_nodes(old_node, new_node) def test_new_node_has_no_parent(): old_node = vy_ast.parse_to_ast(\"foo =", "test_source(): old_node = vy_ast.parse_to_ast(\"foo = 42\") new_node = vy_ast.Int.from_node(old_node, value=666)", "= 42\") new_node = vy_ast.Int.from_node(old_node, value=666) assert new_node._parent is None", "= 42\") new_node = vy_ast.Int.from_node(old_node, value=666) assert isinstance(new_node, vy_ast.Int) def", "vy_ast.parse_to_ast(\"foo = 42\") new_node = vy_ast.Int.from_node(old_node, value=666) assert not vy_ast.compare_nodes(old_node,", "new_node.node_source_code def test_kwargs(): old_node = vy_ast.parse_to_ast(\"42\").body[0].value new_node = vy_ast.Int.from_node(old_node, value=666)", "== new_node.src assert old_node.node_source_code == new_node.node_source_code def test_kwargs(): old_node =", "assert old_node.value == 42 assert new_node.value == 666 def test_compare_nodes():", "def test_new_node_has_no_parent(): old_node = vy_ast.parse_to_ast(\"foo = 42\") new_node = vy_ast.Int.from_node(old_node,", "42\") new_node = vy_ast.Int.from_node(old_node, value=666) assert old_node.src == new_node.src assert", "test_compare_nodes(): old_node = vy_ast.parse_to_ast(\"foo = 42\") new_node = vy_ast.Int.from_node(old_node, value=666)", "old_node.src == new_node.src assert old_node.node_source_code == new_node.node_source_code def test_kwargs(): old_node", "test_new_node_has_no_parent(): old_node = vy_ast.parse_to_ast(\"foo = 42\") new_node = vy_ast.Int.from_node(old_node, value=666)", "isinstance(new_node, vy_ast.Int) def test_source(): old_node = vy_ast.parse_to_ast(\"foo = 42\") new_node", "assert new_node.value == 666 def test_compare_nodes(): old_node = vy_ast.parse_to_ast(\"foo =", "vy_ast def test_output_class(): old_node = vy_ast.parse_to_ast(\"foo = 42\") new_node =", "def test_output_class(): old_node = vy_ast.parse_to_ast(\"foo = 42\") new_node = vy_ast.Int.from_node(old_node,", "vyper import ast as vy_ast def test_output_class(): old_node = vy_ast.parse_to_ast(\"foo", "vy_ast.Int) def test_source(): old_node = vy_ast.parse_to_ast(\"foo = 42\") new_node =", "import ast as vy_ast def test_output_class(): old_node = vy_ast.parse_to_ast(\"foo =", "= vy_ast.Int.from_node(old_node, value=666) assert new_node._parent is None assert new_node._depth ==", "= 42\") new_node = vy_ast.Int.from_node(old_node, value=666) assert not vy_ast.compare_nodes(old_node, new_node)", "value=666) assert isinstance(new_node, vy_ast.Int) def test_source(): old_node = vy_ast.parse_to_ast(\"foo =", "666 def test_compare_nodes(): old_node = vy_ast.parse_to_ast(\"foo = 42\") new_node =", "new_node = vy_ast.Int.from_node(old_node, value=666) assert new_node._parent is None assert new_node._depth", "= vy_ast.parse_to_ast(\"foo = 42\") new_node = vy_ast.Int.from_node(old_node, value=666) assert isinstance(new_node,", "def test_source(): old_node = vy_ast.parse_to_ast(\"foo = 42\") new_node = vy_ast.Int.from_node(old_node,", "def test_kwargs(): old_node = vy_ast.parse_to_ast(\"42\").body[0].value new_node = vy_ast.Int.from_node(old_node, value=666) assert", "ast as vy_ast def test_output_class(): old_node = vy_ast.parse_to_ast(\"foo = 42\")", "new_node = vy_ast.Int.from_node(old_node, value=666) assert old_node.src == new_node.src assert old_node.node_source_code", "new_node.src assert old_node.node_source_code == new_node.node_source_code def test_kwargs(): old_node = vy_ast.parse_to_ast(\"42\").body[0].value" ]
[ "cd opencv/build && \\ cmake -D CMAKE_BUILD_TYPE=RELEASE \\ -D CMAKE_INSTALL_PREFIX=/usr/local", "WITH_GTK=ON \\ -D WITH_LIBV4L=ON \\ -D BUILD_TESTS=OFF \\ -D BUILD_PERF_TESTS=OFF", "libopencore-amrwb-dev \\ libtheora-dev \\ libvorbis-dev \\ libxvidcore-dev \\ x264 \\", "\\ cmake -D CMAKE_BUILD_TYPE=RELEASE \\ -D CMAKE_INSTALL_PREFIX=/usr/local \\ -D WITH_IPP=OFF", "def build(self): return r''' RUN ln -fs /usr/share/zoneinfo/Asia/Hong_Kong /etc/localtime &&", "import Python @dependency(Tools, Python, Boost) @source('git') @version('4.0.1') class Opencv(Module): def", "\\ DEBIAN_FRONTEND=noninteractive \\ add-apt-repository \"deb http://security.ubuntu.com/ubuntu xenial-security main\" && \\", "Module, dependency, source, version from .tools import Tools from .boost", "https://github.com/opencv/opencv_contrib.git opencv_contrib && \\ mkdir -p opencv/build && cd opencv/build", "libxvidcore-dev \\ x264 \\ v4l-utils \\ ffmpeg \\ && \\", "\\ protobuf-compiler \\ libopencv-dev \\ yasm \\ libjpeg-dev \\ libjasper-dev", "\\ liblmdb-dev \\ libprotobuf-dev \\ libsnappy-dev \\ protobuf-compiler \\ libopencv-dev", "-*- from .__module__ import Module, dependency, source, version from .tools", "mkdir -p opencv/build && cd opencv/build && \\ cmake -D", "\\ make -j\"$(nproc)\" install && \\ ln -s /usr/local/include/opencv4/opencv2 /usr/local/include/opencv2", "libjpeg-dev \\ libjasper-dev \\ libavcodec-dev \\ libavformat-dev \\ libswscale-dev \\", "libtbb-dev \\ libqt4-dev \\ libgtk2.0-dev \\ libfaac-dev \\ libmp3lame-dev \\", "add-apt-repository \"deb http://security.ubuntu.com/ubuntu xenial-security main\" && \\ apt update &&", "\\ libvorbis-dev \\ libxvidcore-dev \\ x264 \\ v4l-utils \\ ffmpeg", "&& \\ DEBIAN_FRONTEND=noninteractive \\ add-apt-repository \"deb http://security.ubuntu.com/ubuntu xenial-security main\" &&", "\\ $APT_INSTALL \\ libatlas-base-dev \\ libgflags-dev \\ libgoogle-glog-dev \\ libhdf5-serial-dev", ".python import Python @dependency(Tools, Python, Boost) @source('git') @version('4.0.1') class Opencv(Module):", "\\ libgoogle-glog-dev \\ libhdf5-serial-dev \\ libleveldb-dev \\ liblmdb-dev \\ libprotobuf-dev", "WITH_IPP=OFF \\ -D WITH_CUDA=OFF \\ -D WITH_TBB=ON \\ -D WITH_V4L=ON", "opencv && \\ $GIT_CLONE --branch {0} https://github.com/opencv/opencv_contrib.git opencv_contrib && \\", "r''' RUN ln -fs /usr/share/zoneinfo/Asia/Hong_Kong /etc/localtime && \\ DEBIAN_FRONTEND=noninteractive \\", "dependency, source, version from .tools import Tools from .boost import", "libopencv-dev \\ yasm \\ libjpeg-dev \\ libjasper-dev \\ libavcodec-dev \\", "yasm \\ libjpeg-dev \\ libjasper-dev \\ libavcodec-dev \\ libavformat-dev \\", "https://github.com/opencv/opencv opencv && \\ $GIT_CLONE --branch {0} https://github.com/opencv/opencv_contrib.git opencv_contrib &&", "\\ .. && \\ make -j\"$(nproc)\" install && \\ ln", "libtheora-dev \\ libvorbis-dev \\ libxvidcore-dev \\ x264 \\ v4l-utils \\", "source, version from .tools import Tools from .boost import Boost", "WITH_LIBV4L=ON \\ -D BUILD_TESTS=OFF \\ -D BUILD_PERF_TESTS=OFF \\ -D WITH_FFMPEG=ON", "from .tools import Tools from .boost import Boost from .python", "{0} https://github.com/opencv/opencv_contrib.git opencv_contrib && \\ mkdir -p opencv/build && cd", "-D WITH_QT=ON \\ -D WITH_OPENCL=ON \\ -D WITH_GTK=ON \\ -D", "\\ libopencore-amrwb-dev \\ libtheora-dev \\ libvorbis-dev \\ libxvidcore-dev \\ x264", ".__module__ import Module, dependency, source, version from .tools import Tools", "WITH_OPENCL=ON \\ -D WITH_GTK=ON \\ -D WITH_LIBV4L=ON \\ -D BUILD_TESTS=OFF", "&& \\ $APT_INSTALL \\ libatlas-base-dev \\ libgflags-dev \\ libgoogle-glog-dev \\", "import Tools from .boost import Boost from .python import Python", "-D BUILD_TESTS=OFF \\ -D BUILD_PERF_TESTS=OFF \\ -D WITH_FFMPEG=ON \\ -D", "opencv/build && \\ cmake -D CMAKE_BUILD_TYPE=RELEASE \\ -D CMAKE_INSTALL_PREFIX=/usr/local \\", "\\ libv4l-dev \\ libtbb-dev \\ libqt4-dev \\ libgtk2.0-dev \\ libfaac-dev", "@source('git') @version('4.0.1') class Opencv(Module): def build(self): return r''' RUN ln", "libv4l-dev \\ libtbb-dev \\ libqt4-dev \\ libgtk2.0-dev \\ libfaac-dev \\", "$GIT_CLONE --branch {0} https://github.com/opencv/opencv_contrib.git opencv_contrib && \\ mkdir -p opencv/build", "\\ -D WITH_TBB=ON \\ -D WITH_V4L=ON \\ -D WITH_QT=ON \\", "\\ libsnappy-dev \\ protobuf-compiler \\ libopencv-dev \\ yasm \\ libjpeg-dev", "http://security.ubuntu.com/ubuntu xenial-security main\" && \\ apt update && \\ $APT_INSTALL", "OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules \\ .. && \\ make -j\"$(nproc)\" install && \\", "/usr/share/zoneinfo/Asia/Hong_Kong /etc/localtime && \\ DEBIAN_FRONTEND=noninteractive \\ add-apt-repository \"deb http://security.ubuntu.com/ubuntu xenial-security", "WITH_CUDA=OFF \\ -D WITH_TBB=ON \\ -D WITH_V4L=ON \\ -D WITH_QT=ON", "\\ libjasper-dev \\ libavcodec-dev \\ libavformat-dev \\ libswscale-dev \\ libdc1394-22-dev", "libsnappy-dev \\ protobuf-compiler \\ libopencv-dev \\ yasm \\ libjpeg-dev \\", "-D WITH_V4L=ON \\ -D WITH_QT=ON \\ -D WITH_OPENCL=ON \\ -D", "\\ libopencore-amrnb-dev \\ libopencore-amrwb-dev \\ libtheora-dev \\ libvorbis-dev \\ libxvidcore-dev", "\\ libtheora-dev \\ libvorbis-dev \\ libxvidcore-dev \\ x264 \\ v4l-utils", "-D WITH_FFMPEG=ON \\ -D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules \\ .. && \\ make", "import Module, dependency, source, version from .tools import Tools from", "{0} https://github.com/opencv/opencv opencv && \\ $GIT_CLONE --branch {0} https://github.com/opencv/opencv_contrib.git opencv_contrib", "\\ -D CMAKE_INSTALL_PREFIX=/usr/local \\ -D WITH_IPP=OFF \\ -D WITH_CUDA=OFF \\", "cmake -D CMAKE_BUILD_TYPE=RELEASE \\ -D CMAKE_INSTALL_PREFIX=/usr/local \\ -D WITH_IPP=OFF \\", "\\ libgtk2.0-dev \\ libfaac-dev \\ libmp3lame-dev \\ libopencore-amrnb-dev \\ libopencore-amrwb-dev", "libprotobuf-dev \\ libsnappy-dev \\ protobuf-compiler \\ libopencv-dev \\ yasm \\", "libfaac-dev \\ libmp3lame-dev \\ libopencore-amrnb-dev \\ libopencore-amrwb-dev \\ libtheora-dev \\", "opencv_contrib && \\ mkdir -p opencv/build && cd opencv/build &&", "utf-8 -*- from .__module__ import Module, dependency, source, version from", "from .python import Python @dependency(Tools, Python, Boost) @source('git') @version('4.0.1') class", "\\ -D WITH_LIBV4L=ON \\ -D BUILD_TESTS=OFF \\ -D BUILD_PERF_TESTS=OFF \\", "&& \\ cmake -D CMAKE_BUILD_TYPE=RELEASE \\ -D CMAKE_INSTALL_PREFIX=/usr/local \\ -D", "make -j\"$(nproc)\" install && \\ ln -s /usr/local/include/opencv4/opencv2 /usr/local/include/opencv2 '''.format(self.version)", "\\ && \\ $GIT_CLONE --branch {0} https://github.com/opencv/opencv opencv && \\", "\\ -D BUILD_TESTS=OFF \\ -D BUILD_PERF_TESTS=OFF \\ -D WITH_FFMPEG=ON \\", "Tools from .boost import Boost from .python import Python @dependency(Tools,", "libswscale-dev \\ libdc1394-22-dev \\ libv4l-dev \\ libtbb-dev \\ libqt4-dev \\", "class Opencv(Module): def build(self): return r''' RUN ln -fs /usr/share/zoneinfo/Asia/Hong_Kong", "\\ libtbb-dev \\ libqt4-dev \\ libgtk2.0-dev \\ libfaac-dev \\ libmp3lame-dev", "CMAKE_INSTALL_PREFIX=/usr/local \\ -D WITH_IPP=OFF \\ -D WITH_CUDA=OFF \\ -D WITH_TBB=ON", "\\ -D WITH_IPP=OFF \\ -D WITH_CUDA=OFF \\ -D WITH_TBB=ON \\", "\\ mkdir -p opencv/build && cd opencv/build && \\ cmake", ".tools import Tools from .boost import Boost from .python import", "Python @dependency(Tools, Python, Boost) @source('git') @version('4.0.1') class Opencv(Module): def build(self):", ".boost import Boost from .python import Python @dependency(Tools, Python, Boost)", "&& cd opencv/build && \\ cmake -D CMAKE_BUILD_TYPE=RELEASE \\ -D", "-fs /usr/share/zoneinfo/Asia/Hong_Kong /etc/localtime && \\ DEBIAN_FRONTEND=noninteractive \\ add-apt-repository \"deb http://security.ubuntu.com/ubuntu", "\\ -D BUILD_PERF_TESTS=OFF \\ -D WITH_FFMPEG=ON \\ -D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules \\", "-D BUILD_PERF_TESTS=OFF \\ -D WITH_FFMPEG=ON \\ -D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules \\ ..", "Opencv(Module): def build(self): return r''' RUN ln -fs /usr/share/zoneinfo/Asia/Hong_Kong /etc/localtime", "\\ -D WITH_CUDA=OFF \\ -D WITH_TBB=ON \\ -D WITH_V4L=ON \\", "$GIT_CLONE --branch {0} https://github.com/opencv/opencv opencv && \\ $GIT_CLONE --branch {0}", "\\ libswscale-dev \\ libdc1394-22-dev \\ libv4l-dev \\ libtbb-dev \\ libqt4-dev", "--branch {0} https://github.com/opencv/opencv_contrib.git opencv_contrib && \\ mkdir -p opencv/build &&", "\\ libfaac-dev \\ libmp3lame-dev \\ libopencore-amrnb-dev \\ libopencore-amrwb-dev \\ libtheora-dev", "&& \\ make -j\"$(nproc)\" install && \\ ln -s /usr/local/include/opencv4/opencv2", "\\ libopencv-dev \\ yasm \\ libjpeg-dev \\ libjasper-dev \\ libavcodec-dev", "BUILD_PERF_TESTS=OFF \\ -D WITH_FFMPEG=ON \\ -D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules \\ .. &&", "ln -fs /usr/share/zoneinfo/Asia/Hong_Kong /etc/localtime && \\ DEBIAN_FRONTEND=noninteractive \\ add-apt-repository \"deb", "@dependency(Tools, Python, Boost) @source('git') @version('4.0.1') class Opencv(Module): def build(self): return", "\\ x264 \\ v4l-utils \\ ffmpeg \\ && \\ $GIT_CLONE", "libgtk2.0-dev \\ libfaac-dev \\ libmp3lame-dev \\ libopencore-amrnb-dev \\ libopencore-amrwb-dev \\", "-p opencv/build && cd opencv/build && \\ cmake -D CMAKE_BUILD_TYPE=RELEASE", "-D WITH_GTK=ON \\ -D WITH_LIBV4L=ON \\ -D BUILD_TESTS=OFF \\ -D", "from .boost import Boost from .python import Python @dependency(Tools, Python,", "import Boost from .python import Python @dependency(Tools, Python, Boost) @source('git')", "\\ libmp3lame-dev \\ libopencore-amrnb-dev \\ libopencore-amrwb-dev \\ libtheora-dev \\ libvorbis-dev", "\\ -D WITH_V4L=ON \\ -D WITH_QT=ON \\ -D WITH_OPENCL=ON \\", "DEBIAN_FRONTEND=noninteractive \\ add-apt-repository \"deb http://security.ubuntu.com/ubuntu xenial-security main\" && \\ apt", "main\" && \\ apt update && \\ $APT_INSTALL \\ libatlas-base-dev", "libjasper-dev \\ libavcodec-dev \\ libavformat-dev \\ libswscale-dev \\ libdc1394-22-dev \\", "\\ libavformat-dev \\ libswscale-dev \\ libdc1394-22-dev \\ libv4l-dev \\ libtbb-dev", "libatlas-base-dev \\ libgflags-dev \\ libgoogle-glog-dev \\ libhdf5-serial-dev \\ libleveldb-dev \\", "-D WITH_TBB=ON \\ -D WITH_V4L=ON \\ -D WITH_QT=ON \\ -D", "libdc1394-22-dev \\ libv4l-dev \\ libtbb-dev \\ libqt4-dev \\ libgtk2.0-dev \\", "BUILD_TESTS=OFF \\ -D BUILD_PERF_TESTS=OFF \\ -D WITH_FFMPEG=ON \\ -D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules", "\\ -D WITH_FFMPEG=ON \\ -D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules \\ .. && \\", "-D WITH_CUDA=OFF \\ -D WITH_TBB=ON \\ -D WITH_V4L=ON \\ -D", "\\ $GIT_CLONE --branch {0} https://github.com/opencv/opencv_contrib.git opencv_contrib && \\ mkdir -p", "Boost from .python import Python @dependency(Tools, Python, Boost) @source('git') @version('4.0.1')", "&& \\ mkdir -p opencv/build && cd opencv/build && \\", "\\ -D WITH_OPENCL=ON \\ -D WITH_GTK=ON \\ -D WITH_LIBV4L=ON \\", "xenial-security main\" && \\ apt update && \\ $APT_INSTALL \\", "\\ -D WITH_GTK=ON \\ -D WITH_LIBV4L=ON \\ -D BUILD_TESTS=OFF \\", "liblmdb-dev \\ libprotobuf-dev \\ libsnappy-dev \\ protobuf-compiler \\ libopencv-dev \\", "libgflags-dev \\ libgoogle-glog-dev \\ libhdf5-serial-dev \\ libleveldb-dev \\ liblmdb-dev \\", "opencv/build && cd opencv/build && \\ cmake -D CMAKE_BUILD_TYPE=RELEASE \\", "\\ -D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules \\ .. && \\ make -j\"$(nproc)\" install", "libhdf5-serial-dev \\ libleveldb-dev \\ liblmdb-dev \\ libprotobuf-dev \\ libsnappy-dev \\", "@version('4.0.1') class Opencv(Module): def build(self): return r''' RUN ln -fs", "\\ libleveldb-dev \\ liblmdb-dev \\ libprotobuf-dev \\ libsnappy-dev \\ protobuf-compiler", "\\ libprotobuf-dev \\ libsnappy-dev \\ protobuf-compiler \\ libopencv-dev \\ yasm", "\\ libavcodec-dev \\ libavformat-dev \\ libswscale-dev \\ libdc1394-22-dev \\ libv4l-dev", "libmp3lame-dev \\ libopencore-amrnb-dev \\ libopencore-amrwb-dev \\ libtheora-dev \\ libvorbis-dev \\", "&& \\ $GIT_CLONE --branch {0} https://github.com/opencv/opencv opencv && \\ $GIT_CLONE", "libleveldb-dev \\ liblmdb-dev \\ libprotobuf-dev \\ libsnappy-dev \\ protobuf-compiler \\", "Boost) @source('git') @version('4.0.1') class Opencv(Module): def build(self): return r''' RUN", "\\ ffmpeg \\ && \\ $GIT_CLONE --branch {0} https://github.com/opencv/opencv opencv", "WITH_TBB=ON \\ -D WITH_V4L=ON \\ -D WITH_QT=ON \\ -D WITH_OPENCL=ON", "libavformat-dev \\ libswscale-dev \\ libdc1394-22-dev \\ libv4l-dev \\ libtbb-dev \\", "-D WITH_LIBV4L=ON \\ -D BUILD_TESTS=OFF \\ -D BUILD_PERF_TESTS=OFF \\ -D", "-D CMAKE_BUILD_TYPE=RELEASE \\ -D CMAKE_INSTALL_PREFIX=/usr/local \\ -D WITH_IPP=OFF \\ -D", "WITH_QT=ON \\ -D WITH_OPENCL=ON \\ -D WITH_GTK=ON \\ -D WITH_LIBV4L=ON", "\\ apt update && \\ $APT_INSTALL \\ libatlas-base-dev \\ libgflags-dev", "build(self): return r''' RUN ln -fs /usr/share/zoneinfo/Asia/Hong_Kong /etc/localtime && \\", "from .__module__ import Module, dependency, source, version from .tools import", "\\ v4l-utils \\ ffmpeg \\ && \\ $GIT_CLONE --branch {0}", "v4l-utils \\ ffmpeg \\ && \\ $GIT_CLONE --branch {0} https://github.com/opencv/opencv", "\\ add-apt-repository \"deb http://security.ubuntu.com/ubuntu xenial-security main\" && \\ apt update", "apt update && \\ $APT_INSTALL \\ libatlas-base-dev \\ libgflags-dev \\", "-D WITH_IPP=OFF \\ -D WITH_CUDA=OFF \\ -D WITH_TBB=ON \\ -D", "$APT_INSTALL \\ libatlas-base-dev \\ libgflags-dev \\ libgoogle-glog-dev \\ libhdf5-serial-dev \\", "\\ -D WITH_QT=ON \\ -D WITH_OPENCL=ON \\ -D WITH_GTK=ON \\", "\"deb http://security.ubuntu.com/ubuntu xenial-security main\" && \\ apt update && \\", "libavcodec-dev \\ libavformat-dev \\ libswscale-dev \\ libdc1394-22-dev \\ libv4l-dev \\", "\\ libgflags-dev \\ libgoogle-glog-dev \\ libhdf5-serial-dev \\ libleveldb-dev \\ liblmdb-dev", "libopencore-amrnb-dev \\ libopencore-amrwb-dev \\ libtheora-dev \\ libvorbis-dev \\ libxvidcore-dev \\", "libvorbis-dev \\ libxvidcore-dev \\ x264 \\ v4l-utils \\ ffmpeg \\", "Python, Boost) @source('git') @version('4.0.1') class Opencv(Module): def build(self): return r'''", "-D WITH_OPENCL=ON \\ -D WITH_GTK=ON \\ -D WITH_LIBV4L=ON \\ -D", "\\ libhdf5-serial-dev \\ libleveldb-dev \\ liblmdb-dev \\ libprotobuf-dev \\ libsnappy-dev", "coding: utf-8 -*- from .__module__ import Module, dependency, source, version", "\\ libatlas-base-dev \\ libgflags-dev \\ libgoogle-glog-dev \\ libhdf5-serial-dev \\ libleveldb-dev", "libqt4-dev \\ libgtk2.0-dev \\ libfaac-dev \\ libmp3lame-dev \\ libopencore-amrnb-dev \\", "-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules \\ .. && \\ make -j\"$(nproc)\" install &&", "x264 \\ v4l-utils \\ ffmpeg \\ && \\ $GIT_CLONE --branch", "--branch {0} https://github.com/opencv/opencv opencv && \\ $GIT_CLONE --branch {0} https://github.com/opencv/opencv_contrib.git", "ffmpeg \\ && \\ $GIT_CLONE --branch {0} https://github.com/opencv/opencv opencv &&", "version from .tools import Tools from .boost import Boost from", "RUN ln -fs /usr/share/zoneinfo/Asia/Hong_Kong /etc/localtime && \\ DEBIAN_FRONTEND=noninteractive \\ add-apt-repository", "\\ yasm \\ libjpeg-dev \\ libjasper-dev \\ libavcodec-dev \\ libavformat-dev", "WITH_FFMPEG=ON \\ -D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules \\ .. && \\ make -j\"$(nproc)\"", "# -*- coding: utf-8 -*- from .__module__ import Module, dependency,", "WITH_V4L=ON \\ -D WITH_QT=ON \\ -D WITH_OPENCL=ON \\ -D WITH_GTK=ON", "/etc/localtime && \\ DEBIAN_FRONTEND=noninteractive \\ add-apt-repository \"deb http://security.ubuntu.com/ubuntu xenial-security main\"", "\\ libqt4-dev \\ libgtk2.0-dev \\ libfaac-dev \\ libmp3lame-dev \\ libopencore-amrnb-dev", "\\ $GIT_CLONE --branch {0} https://github.com/opencv/opencv opencv && \\ $GIT_CLONE --branch", "protobuf-compiler \\ libopencv-dev \\ yasm \\ libjpeg-dev \\ libjasper-dev \\", ".. && \\ make -j\"$(nproc)\" install && \\ ln -s", "\\ libxvidcore-dev \\ x264 \\ v4l-utils \\ ffmpeg \\ &&", "CMAKE_BUILD_TYPE=RELEASE \\ -D CMAKE_INSTALL_PREFIX=/usr/local \\ -D WITH_IPP=OFF \\ -D WITH_CUDA=OFF", "-*- coding: utf-8 -*- from .__module__ import Module, dependency, source,", "libgoogle-glog-dev \\ libhdf5-serial-dev \\ libleveldb-dev \\ liblmdb-dev \\ libprotobuf-dev \\", "return r''' RUN ln -fs /usr/share/zoneinfo/Asia/Hong_Kong /etc/localtime && \\ DEBIAN_FRONTEND=noninteractive", "update && \\ $APT_INSTALL \\ libatlas-base-dev \\ libgflags-dev \\ libgoogle-glog-dev", "&& \\ apt update && \\ $APT_INSTALL \\ libatlas-base-dev \\", "-D CMAKE_INSTALL_PREFIX=/usr/local \\ -D WITH_IPP=OFF \\ -D WITH_CUDA=OFF \\ -D", "\\ libdc1394-22-dev \\ libv4l-dev \\ libtbb-dev \\ libqt4-dev \\ libgtk2.0-dev", "&& \\ $GIT_CLONE --branch {0} https://github.com/opencv/opencv_contrib.git opencv_contrib && \\ mkdir", "\\ libjpeg-dev \\ libjasper-dev \\ libavcodec-dev \\ libavformat-dev \\ libswscale-dev" ]
[ "iterations = 1000000000 while n<iterations: for s in range(0,len(program)): arguments", "16 stage = map(chr, range(ord('a'),ord('a')+stage_length)) def spin(amount): \"\"\"To save time,", "history.index(stage) complete_cycles = (iterations - n) / loop_length n +=", "elif action_list[s] == 'x': swap((int(arguments[0])+pos)%stage_length, (int(arguments[1])+pos)%stage_length) elif action_list[s] == 'p':", "Normally, a counter marks the start of the stage and", "time, this function isn't used except at the end. Normally,", "function isn't used except at the end. Normally, a counter", "f: program = \",\".join(f.readlines()).split(\",\") n = 0 pos = 0", "[x[1:].strip().split(\"/\") for x in program] action_list = [x[0] for x", "= [x[0] for x in program] history = [] #", "at the end. Normally, a counter marks the start of", "the stage and this changes instead. \"\"\" global stage stage", "this changes instead. \"\"\" global stage stage = stage[amount:] +", "0 arguments_list = [x[1:].strip().split(\"/\") for x in program] action_list =", "= 1000000000 while n<iterations: for s in range(0,len(program)): arguments =", "= arguments_list[s] if action_list[s] == 's': pos += stage_length-int(arguments[0]) elif", "n += complete_cycles * loop_length history.append(copy.copy(stage)) n += 1 spin(pos", "open(sys.argv[1], 'rt') as f: program = \",\".join(f.readlines()).split(\",\") n = 0", "pos += stage_length-int(arguments[0]) elif action_list[s] == 'x': swap((int(arguments[0])+pos)%stage_length, (int(arguments[1])+pos)%stage_length) elif", "(iterations - n) / loop_length n += complete_cycles * loop_length", "at index %d matches at stage %d\"%(stage, history.index(stage), n)) loop_length", "= map(chr, range(ord('a'),ord('a')+stage_length)) def spin(amount): \"\"\"To save time, this function", "the end. Normally, a counter marks the start of the", "= n - history.index(stage) complete_cycles = (iterations - n) /", "n = 0 pos = 0 arguments_list = [x[1:].strip().split(\"/\") for", "action_list[s] == 'x': swap((int(arguments[0])+pos)%stage_length, (int(arguments[1])+pos)%stage_length) elif action_list[s] == 'p': pos1", "stage_length = 16 stage = map(chr, range(ord('a'),ord('a')+stage_length)) def spin(amount): \"\"\"To", "arguments = arguments_list[s] if action_list[s] == 's': pos += stage_length-int(arguments[0])", "as f: program = \",\".join(f.readlines()).split(\",\") n = 0 pos =", "= 0 arguments_list = [x[1:].strip().split(\"/\") for x in program] action_list", "program] action_list = [x[0] for x in program] history =", "of the stage and this changes instead. \"\"\" global stage", "\"\"\"To save time, this function isn't used except at the", "isn't used except at the end. Normally, a counter marks", "used except at the end. Normally, a counter marks the", "spin(amount): \"\"\"To save time, this function isn't used except at", "'s': pos += stage_length-int(arguments[0]) elif action_list[s] == 'x': swap((int(arguments[0])+pos)%stage_length, (int(arguments[1])+pos)%stage_length)", "n<iterations: for s in range(0,len(program)): arguments = arguments_list[s] if action_list[s]", "instead. \"\"\" global stage stage = stage[amount:] + stage[:amount] def", "history = [] # Change this to 1 for the", "x in program] action_list = [x[0] for x in program]", "stage.index(arguments[0]) pos2 = stage.index(arguments[1]) swap(pos1, pos2) if stage in history:", "in program] action_list = [x[0] for x in program] history", "# Change this to 1 for the solution to part", "loop_length n += complete_cycles * loop_length history.append(copy.copy(stage)) n += 1", "action_list = [x[0] for x in program] history = []", "stage[pos2]) = (stage[pos2], stage[pos1]) with open(sys.argv[1], 'rt') as f: program", "1. iterations = 1000000000 while n<iterations: for s in range(0,len(program)):", "end. Normally, a counter marks the start of the stage", "stage[amount:] + stage[:amount] def swap(pos1, pos2): global stage (stage[pos1], stage[pos2])", "print(\"Duplicate found: %r at index %d matches at stage %d\"%(stage,", "solution to part 1. iterations = 1000000000 while n<iterations: for", "\"\"\" global stage stage = stage[amount:] + stage[:amount] def swap(pos1,", "* loop_length history.append(copy.copy(stage)) n += 1 spin(pos % stage_length) print", "n) / loop_length n += complete_cycles * loop_length history.append(copy.copy(stage)) n", "loop_length history.append(copy.copy(stage)) n += 1 spin(pos % stage_length) print \"\".join(stage)", "/ loop_length n += complete_cycles * loop_length history.append(copy.copy(stage)) n +=", "stage in history: print(\"Duplicate found: %r at index %d matches", "stage.index(arguments[1]) swap(pos1, pos2) if stage in history: print(\"Duplicate found: %r", "program] history = [] # Change this to 1 for", "stage and this changes instead. \"\"\" global stage stage =", "except at the end. Normally, a counter marks the start", "in range(0,len(program)): arguments = arguments_list[s] if action_list[s] == 's': pos", "the start of the stage and this changes instead. \"\"\"", "%d matches at stage %d\"%(stage, history.index(stage), n)) loop_length = n", "= 0 pos = 0 arguments_list = [x[1:].strip().split(\"/\") for x", "for s in range(0,len(program)): arguments = arguments_list[s] if action_list[s] ==", "swap((int(arguments[0])+pos)%stage_length, (int(arguments[1])+pos)%stage_length) elif action_list[s] == 'p': pos1 = stage.index(arguments[0]) pos2", "(int(arguments[1])+pos)%stage_length) elif action_list[s] == 'p': pos1 = stage.index(arguments[0]) pos2 =", "def spin(amount): \"\"\"To save time, this function isn't used except", "marks the start of the stage and this changes instead.", "complete_cycles * loop_length history.append(copy.copy(stage)) n += 1 spin(pos % stage_length)", "swap(pos1, pos2): global stage (stage[pos1], stage[pos2]) = (stage[pos2], stage[pos1]) with", "s in range(0,len(program)): arguments = arguments_list[s] if action_list[s] == 's':", "if action_list[s] == 's': pos += stage_length-int(arguments[0]) elif action_list[s] ==", "elif action_list[s] == 'p': pos1 = stage.index(arguments[0]) pos2 = stage.index(arguments[1])", "for x in program] history = [] # Change this", "stage %d\"%(stage, history.index(stage), n)) loop_length = n - history.index(stage) complete_cycles", "with open(sys.argv[1], 'rt') as f: program = \",\".join(f.readlines()).split(\",\") n =", "- n) / loop_length n += complete_cycles * loop_length history.append(copy.copy(stage))", "(stage[pos2], stage[pos1]) with open(sys.argv[1], 'rt') as f: program = \",\".join(f.readlines()).split(\",\")", "arguments_list[s] if action_list[s] == 's': pos += stage_length-int(arguments[0]) elif action_list[s]", "== 'p': pos1 = stage.index(arguments[0]) pos2 = stage.index(arguments[1]) swap(pos1, pos2)", "stage (stage[pos1], stage[pos2]) = (stage[pos2], stage[pos1]) with open(sys.argv[1], 'rt') as", "'rt') as f: program = \",\".join(f.readlines()).split(\",\") n = 0 pos", "import sys import copy stage_length = 16 stage = map(chr,", "index %d matches at stage %d\"%(stage, history.index(stage), n)) loop_length =", "found: %r at index %d matches at stage %d\"%(stage, history.index(stage),", "pos1 = stage.index(arguments[0]) pos2 = stage.index(arguments[1]) swap(pos1, pos2) if stage", "0 pos = 0 arguments_list = [x[1:].strip().split(\"/\") for x in", "n - history.index(stage) complete_cycles = (iterations - n) / loop_length", "+= stage_length-int(arguments[0]) elif action_list[s] == 'x': swap((int(arguments[0])+pos)%stage_length, (int(arguments[1])+pos)%stage_length) elif action_list[s]", "n)) loop_length = n - history.index(stage) complete_cycles = (iterations -", "sys import copy stage_length = 16 stage = map(chr, range(ord('a'),ord('a')+stage_length))", "swap(pos1, pos2) if stage in history: print(\"Duplicate found: %r at", "pos2): global stage (stage[pos1], stage[pos2]) = (stage[pos2], stage[pos1]) with open(sys.argv[1],", "action_list[s] == 'p': pos1 = stage.index(arguments[0]) pos2 = stage.index(arguments[1]) swap(pos1,", "= stage.index(arguments[1]) swap(pos1, pos2) if stage in history: print(\"Duplicate found:", "= [] # Change this to 1 for the solution", "%r at index %d matches at stage %d\"%(stage, history.index(stage), n))", "Change this to 1 for the solution to part 1.", "changes instead. \"\"\" global stage stage = stage[amount:] + stage[:amount]", "stage stage = stage[amount:] + stage[:amount] def swap(pos1, pos2): global", "for the solution to part 1. iterations = 1000000000 while", "matches at stage %d\"%(stage, history.index(stage), n)) loop_length = n -", "this function isn't used except at the end. Normally, a", "while n<iterations: for s in range(0,len(program)): arguments = arguments_list[s] if", "= (iterations - n) / loop_length n += complete_cycles *", "= stage.index(arguments[0]) pos2 = stage.index(arguments[1]) swap(pos1, pos2) if stage in", "and this changes instead. \"\"\" global stage stage = stage[amount:]", "the solution to part 1. iterations = 1000000000 while n<iterations:", "== 's': pos += stage_length-int(arguments[0]) elif action_list[s] == 'x': swap((int(arguments[0])+pos)%stage_length,", "save time, this function isn't used except at the end.", "to part 1. iterations = 1000000000 while n<iterations: for s", "if stage in history: print(\"Duplicate found: %r at index %d", "'p': pos1 = stage.index(arguments[0]) pos2 = stage.index(arguments[1]) swap(pos1, pos2) if", "- history.index(stage) complete_cycles = (iterations - n) / loop_length n", "+ stage[:amount] def swap(pos1, pos2): global stage (stage[pos1], stage[pos2]) =", "+= complete_cycles * loop_length history.append(copy.copy(stage)) n += 1 spin(pos %", "1000000000 while n<iterations: for s in range(0,len(program)): arguments = arguments_list[s]", "program = \",\".join(f.readlines()).split(\",\") n = 0 pos = 0 arguments_list", "complete_cycles = (iterations - n) / loop_length n += complete_cycles", "in program] history = [] # Change this to 1", "to 1 for the solution to part 1. iterations =", "copy stage_length = 16 stage = map(chr, range(ord('a'),ord('a')+stage_length)) def spin(amount):", "stage[pos1]) with open(sys.argv[1], 'rt') as f: program = \",\".join(f.readlines()).split(\",\") n", "action_list[s] == 's': pos += stage_length-int(arguments[0]) elif action_list[s] == 'x':", "in history: print(\"Duplicate found: %r at index %d matches at", "part 1. iterations = 1000000000 while n<iterations: for s in", "== 'x': swap((int(arguments[0])+pos)%stage_length, (int(arguments[1])+pos)%stage_length) elif action_list[s] == 'p': pos1 =", "1 for the solution to part 1. iterations = 1000000000", "history: print(\"Duplicate found: %r at index %d matches at stage", "start of the stage and this changes instead. \"\"\" global", "import copy stage_length = 16 stage = map(chr, range(ord('a'),ord('a')+stage_length)) def", "'x': swap((int(arguments[0])+pos)%stage_length, (int(arguments[1])+pos)%stage_length) elif action_list[s] == 'p': pos1 = stage.index(arguments[0])", "arguments_list = [x[1:].strip().split(\"/\") for x in program] action_list = [x[0]", "= [x[1:].strip().split(\"/\") for x in program] action_list = [x[0] for", "[] # Change this to 1 for the solution to", "map(chr, range(ord('a'),ord('a')+stage_length)) def spin(amount): \"\"\"To save time, this function isn't", "stage = map(chr, range(ord('a'),ord('a')+stage_length)) def spin(amount): \"\"\"To save time, this", "global stage (stage[pos1], stage[pos2]) = (stage[pos2], stage[pos1]) with open(sys.argv[1], 'rt')", "= \",\".join(f.readlines()).split(\",\") n = 0 pos = 0 arguments_list =", "for x in program] action_list = [x[0] for x in", "(stage[pos1], stage[pos2]) = (stage[pos2], stage[pos1]) with open(sys.argv[1], 'rt') as f:", "range(ord('a'),ord('a')+stage_length)) def spin(amount): \"\"\"To save time, this function isn't used", "[x[0] for x in program] history = [] # Change", "= stage[amount:] + stage[:amount] def swap(pos1, pos2): global stage (stage[pos1],", "pos2) if stage in history: print(\"Duplicate found: %r at index", "def swap(pos1, pos2): global stage (stage[pos1], stage[pos2]) = (stage[pos2], stage[pos1])", "this to 1 for the solution to part 1. iterations", "pos = 0 arguments_list = [x[1:].strip().split(\"/\") for x in program]", "\",\".join(f.readlines()).split(\",\") n = 0 pos = 0 arguments_list = [x[1:].strip().split(\"/\")", "a counter marks the start of the stage and this", "pos2 = stage.index(arguments[1]) swap(pos1, pos2) if stage in history: print(\"Duplicate", "at stage %d\"%(stage, history.index(stage), n)) loop_length = n - history.index(stage)", "history.index(stage), n)) loop_length = n - history.index(stage) complete_cycles = (iterations", "global stage stage = stage[amount:] + stage[:amount] def swap(pos1, pos2):", "stage = stage[amount:] + stage[:amount] def swap(pos1, pos2): global stage", "counter marks the start of the stage and this changes", "stage[:amount] def swap(pos1, pos2): global stage (stage[pos1], stage[pos2]) = (stage[pos2],", "x in program] history = [] # Change this to", "range(0,len(program)): arguments = arguments_list[s] if action_list[s] == 's': pos +=", "stage_length-int(arguments[0]) elif action_list[s] == 'x': swap((int(arguments[0])+pos)%stage_length, (int(arguments[1])+pos)%stage_length) elif action_list[s] ==", "%d\"%(stage, history.index(stage), n)) loop_length = n - history.index(stage) complete_cycles =", "= (stage[pos2], stage[pos1]) with open(sys.argv[1], 'rt') as f: program =", "= 16 stage = map(chr, range(ord('a'),ord('a')+stage_length)) def spin(amount): \"\"\"To save", "#!/usr/bin/python import sys import copy stage_length = 16 stage =", "loop_length = n - history.index(stage) complete_cycles = (iterations - n)" ]
[ "as np from numpy.testing import assert_equal, assert_array_equal from nose.tools import", "= felzenszwalb(img, sigma=0) # we expect 4 segments: assert_equal(len(np.unique(seg)), 4)", "i in range(4): hist = np.histogram(img[seg == i], bins=[0, 0.1,", "for i in range(4): hist = np.histogram(img[seg == i], bins=[0,", "segments: assert_equal(len(np.unique(seg)), 4) # that mostly respect the 4 regions:", "0.5, 1])[0] assert_greater(hist[i], 40) def test_color(): # very weak tests.", "= 1 img[10:, :10, 1] = 1 img[10:, 10:, 2]", "pretty unstable. img = np.zeros((20, 21)) img[:10, 10:] = 0.2", "0.1, 0.3, 0.5, 1])[0] assert_greater(hist[i], 40) def test_color(): # very", "4) assert_array_equal(seg[:10, :10], 0) assert_array_equal(seg[10:, :10], 2) assert_array_equal(seg[:10, 10:], 1)", "we expect 4 segments: assert_equal(len(np.unique(seg)), 4) assert_array_equal(seg[:10, :10], 0) assert_array_equal(seg[10:,", "img[:10, 10:] = 0.2 img[10:, :10] = 0.4 img[10:, 10:]", "0) assert_array_equal(seg[10:, :10], 2) assert_array_equal(seg[:10, 10:], 1) assert_array_equal(seg[10:, 10:], 3)", "tests. This algorithm is pretty unstable. img = np.zeros((20, 21))", ":10, 1] = 1 img[10:, 10:, 2] = 1 seg", "# we expect 4 segments: assert_equal(len(np.unique(seg)), 4) # that mostly", "<reponame>jaberg/scikits-image import numpy as np from numpy.testing import assert_equal, assert_array_equal", "unstable. img = np.zeros((20, 21)) img[:10, 10:] = 0.2 img[10:,", "segments: assert_equal(len(np.unique(seg)), 4) assert_array_equal(seg[:10, :10], 0) assert_array_equal(seg[10:, :10], 2) assert_array_equal(seg[:10,", "21)) img[:10, 10:] = 0.2 img[10:, :10] = 0.4 img[10:,", "expect 4 segments: assert_equal(len(np.unique(seg)), 4) assert_array_equal(seg[:10, :10], 0) assert_array_equal(seg[10:, :10],", "= 1 img[10:, 10:, 2] = 1 seg = felzenszwalb(img,", "felzenszwalb def test_grey(): # very weak tests. This algorithm is", "= np.histogram(img[seg == i], bins=[0, 0.1, 0.3, 0.5, 1])[0] assert_greater(hist[i],", "4) # that mostly respect the 4 regions: for i", "0.6 seg = felzenszwalb(img, sigma=0) # we expect 4 segments:", "== i], bins=[0, 0.1, 0.3, 0.5, 1])[0] assert_greater(hist[i], 40) def", "= 1 seg = felzenszwalb(img, sigma=0) # we expect 4", "= 0.6 seg = felzenszwalb(img, sigma=0) # we expect 4", ":10], 0) assert_array_equal(seg[10:, :10], 2) assert_array_equal(seg[:10, 10:], 1) assert_array_equal(seg[10:, 10:],", "is pretty unstable. img = np.zeros((20, 21, 3)) img[:10, :10,", "test_color(): # very weak tests. This algorithm is pretty unstable.", "10:], 1) assert_array_equal(seg[10:, 10:], 3) if __name__ == '__main__': from", "40) def test_color(): # very weak tests. This algorithm is", "0.3, 0.5, 1])[0] assert_greater(hist[i], 40) def test_color(): # very weak", "import assert_greater from skimage.segmentation import felzenszwalb def test_grey(): # very", "respect the 4 regions: for i in range(4): hist =", "np.zeros((20, 21)) img[:10, 10:] = 0.2 img[10:, :10] = 0.4", "4 segments: assert_equal(len(np.unique(seg)), 4) # that mostly respect the 4", "1])[0] assert_greater(hist[i], 40) def test_color(): # very weak tests. This", "assert_array_equal(seg[10:, 10:], 3) if __name__ == '__main__': from numpy import", "the 4 regions: for i in range(4): hist = np.histogram(img[seg", "1 img[10:, :10, 1] = 1 img[10:, 10:, 2] =", "np.zeros((20, 21, 3)) img[:10, :10, 0] = 1 img[10:, :10,", "sigma=0) # we expect 4 segments: assert_equal(len(np.unique(seg)), 4) assert_array_equal(seg[:10, :10],", "assert_array_equal(seg[:10, :10], 0) assert_array_equal(seg[10:, :10], 2) assert_array_equal(seg[:10, 10:], 1) assert_array_equal(seg[10:,", "1 seg = felzenszwalb(img, sigma=0) # we expect 4 segments:", "10:], 3) if __name__ == '__main__': from numpy import testing", "4 segments: assert_equal(len(np.unique(seg)), 4) assert_array_equal(seg[:10, :10], 0) assert_array_equal(seg[10:, :10], 2)", "we expect 4 segments: assert_equal(len(np.unique(seg)), 4) # that mostly respect", "very weak tests. This algorithm is pretty unstable. img =", "assert_array_equal(seg[10:, :10], 2) assert_array_equal(seg[:10, 10:], 1) assert_array_equal(seg[10:, 10:], 3) if", "assert_equal(len(np.unique(seg)), 4) assert_array_equal(seg[:10, :10], 0) assert_array_equal(seg[10:, :10], 2) assert_array_equal(seg[:10, 10:],", "1 img[10:, 10:, 2] = 1 seg = felzenszwalb(img, sigma=0)", "nose.tools import assert_greater from skimage.segmentation import felzenszwalb def test_grey(): #", "21, 3)) img[:10, :10, 0] = 1 img[10:, :10, 1]", ":10] = 0.4 img[10:, 10:] = 0.6 seg = felzenszwalb(img,", "img = np.zeros((20, 21, 3)) img[:10, :10, 0] = 1", "import assert_equal, assert_array_equal from nose.tools import assert_greater from skimage.segmentation import", "10:] = 0.6 seg = felzenszwalb(img, sigma=0) # we expect", "img[10:, 10:, 2] = 1 seg = felzenszwalb(img, sigma=0) #", "seg = felzenszwalb(img, sigma=0) # we expect 4 segments: assert_equal(len(np.unique(seg)),", "= np.zeros((20, 21)) img[:10, 10:] = 0.2 img[10:, :10] =", "img = np.zeros((20, 21)) img[:10, 10:] = 0.2 img[10:, :10]", "is pretty unstable. img = np.zeros((20, 21)) img[:10, 10:] =", "img[:10, :10, 0] = 1 img[10:, :10, 1] = 1", "unstable. img = np.zeros((20, 21, 3)) img[:10, :10, 0] =", "2] = 1 seg = felzenszwalb(img, sigma=0) # we expect", "expect 4 segments: assert_equal(len(np.unique(seg)), 4) # that mostly respect the", "sigma=0) # we expect 4 segments: assert_equal(len(np.unique(seg)), 4) # that", "that mostly respect the 4 regions: for i in range(4):", "assert_array_equal from nose.tools import assert_greater from skimage.segmentation import felzenszwalb def", "from nose.tools import assert_greater from skimage.segmentation import felzenszwalb def test_grey():", "0.2 img[10:, :10] = 0.4 img[10:, 10:] = 0.6 seg", "10:] = 0.2 img[10:, :10] = 0.4 img[10:, 10:] =", "assert_equal, assert_array_equal from nose.tools import assert_greater from skimage.segmentation import felzenszwalb", ":10], 2) assert_array_equal(seg[:10, 10:], 1) assert_array_equal(seg[10:, 10:], 3) if __name__", "from skimage.segmentation import felzenszwalb def test_grey(): # very weak tests.", "img[10:, :10] = 0.4 img[10:, 10:] = 0.6 seg =", "algorithm is pretty unstable. img = np.zeros((20, 21, 3)) img[:10,", "in range(4): hist = np.histogram(img[seg == i], bins=[0, 0.1, 0.3,", "assert_greater(hist[i], 40) def test_color(): # very weak tests. This algorithm", "import felzenszwalb def test_grey(): # very weak tests. This algorithm", "skimage.segmentation import felzenszwalb def test_grey(): # very weak tests. This", "numpy as np from numpy.testing import assert_equal, assert_array_equal from nose.tools", "range(4): hist = np.histogram(img[seg == i], bins=[0, 0.1, 0.3, 0.5,", "10:, 2] = 1 seg = felzenszwalb(img, sigma=0) # we", "# that mostly respect the 4 regions: for i in", "pretty unstable. img = np.zeros((20, 21, 3)) img[:10, :10, 0]", "bins=[0, 0.1, 0.3, 0.5, 1])[0] assert_greater(hist[i], 40) def test_color(): #", "= np.zeros((20, 21, 3)) img[:10, :10, 0] = 1 img[10:,", "# very weak tests. This algorithm is pretty unstable. img", "import numpy as np from numpy.testing import assert_equal, assert_array_equal from", "np from numpy.testing import assert_equal, assert_array_equal from nose.tools import assert_greater", "2) assert_array_equal(seg[:10, 10:], 1) assert_array_equal(seg[10:, 10:], 3) if __name__ ==", "img[10:, 10:] = 0.6 seg = felzenszwalb(img, sigma=0) # we", "This algorithm is pretty unstable. img = np.zeros((20, 21, 3))", "assert_greater from skimage.segmentation import felzenszwalb def test_grey(): # very weak", "numpy.testing import assert_equal, assert_array_equal from nose.tools import assert_greater from skimage.segmentation", "= 0.4 img[10:, 10:] = 0.6 seg = felzenszwalb(img, sigma=0)", "test_grey(): # very weak tests. This algorithm is pretty unstable.", "def test_color(): # very weak tests. This algorithm is pretty", "mostly respect the 4 regions: for i in range(4): hist", "1] = 1 img[10:, 10:, 2] = 1 seg =", "hist = np.histogram(img[seg == i], bins=[0, 0.1, 0.3, 0.5, 1])[0]", ":10, 0] = 1 img[10:, :10, 1] = 1 img[10:,", "assert_array_equal(seg[:10, 10:], 1) assert_array_equal(seg[10:, 10:], 3) if __name__ == '__main__':", "weak tests. This algorithm is pretty unstable. img = np.zeros((20,", "0.4 img[10:, 10:] = 0.6 seg = felzenszwalb(img, sigma=0) #", "from numpy.testing import assert_equal, assert_array_equal from nose.tools import assert_greater from", "This algorithm is pretty unstable. img = np.zeros((20, 21)) img[:10,", "3) if __name__ == '__main__': from numpy import testing testing.run_module_suite()", "1) assert_array_equal(seg[10:, 10:], 3) if __name__ == '__main__': from numpy", "regions: for i in range(4): hist = np.histogram(img[seg == i],", "np.histogram(img[seg == i], bins=[0, 0.1, 0.3, 0.5, 1])[0] assert_greater(hist[i], 40)", "tests. This algorithm is pretty unstable. img = np.zeros((20, 21,", "= 0.2 img[10:, :10] = 0.4 img[10:, 10:] = 0.6", "4 regions: for i in range(4): hist = np.histogram(img[seg ==", "3)) img[:10, :10, 0] = 1 img[10:, :10, 1] =", "img[10:, :10, 1] = 1 img[10:, 10:, 2] = 1", "felzenszwalb(img, sigma=0) # we expect 4 segments: assert_equal(len(np.unique(seg)), 4) assert_array_equal(seg[:10,", "def test_grey(): # very weak tests. This algorithm is pretty", "i], bins=[0, 0.1, 0.3, 0.5, 1])[0] assert_greater(hist[i], 40) def test_color():", "algorithm is pretty unstable. img = np.zeros((20, 21)) img[:10, 10:]", "0] = 1 img[10:, :10, 1] = 1 img[10:, 10:,", "felzenszwalb(img, sigma=0) # we expect 4 segments: assert_equal(len(np.unique(seg)), 4) #", "assert_equal(len(np.unique(seg)), 4) # that mostly respect the 4 regions: for", "# we expect 4 segments: assert_equal(len(np.unique(seg)), 4) assert_array_equal(seg[:10, :10], 0)" ]
[ "from masonite.app import App from masonite.middleware import CsrfMiddleware from masonite.testsuite.TestSuite", "generate_wsgi import pytest from masonite.exceptions import InvalidCSRFToken class TestCSRFMiddleware: def", "[] with pytest.raises(InvalidCSRFToken): self.middleware.before() def test_incoming_token_does_not_throw_exception_with_token(self): self.request.environ['REQUEST_METHOD'] = 'POST' self.request.request_variables.update({'__token':", "'csrf_field' in self.view.dictionary assert self.view.dictionary['csrf_field'].startswith(\"<input type='hidden' name='__token' value='\") def test_middleware_throws_exception_on_post(self):", "test_middleware_shares_correct_input(self): self.middleware.before() assert 'csrf_field' in self.view.dictionary assert self.view.dictionary['csrf_field'].startswith(\"<input type='hidden' name='__token'", "import Request from masonite.view import View from masonite.auth.Csrf import Csrf", "assert self.view.dictionary['csrf_field'].startswith(\"<input type='hidden' name='__token' value='\") def test_middleware_throws_exception_on_post(self): self.request.environ['REQUEST_METHOD'] = 'POST'", "Csrf(self.request), self.view) def test_middleware_shares_correct_input(self): self.middleware.before() assert 'csrf_field' in self.view.dictionary assert", "self.request) self.request = self.app.make('Request') self.middleware = CsrfMiddleware(self.request, Csrf(self.request), self.view) def", "import pytest from masonite.exceptions import InvalidCSRFToken class TestCSRFMiddleware: def setup_method(self):", "CsrfMiddleware(self.request, Csrf(self.request), self.view) def test_middleware_shares_correct_input(self): self.middleware.before() assert 'csrf_field' in self.view.dictionary", "pytest.raises(InvalidCSRFToken): self.middleware.before() def test_incoming_token_does_not_throw_exception_with_token(self): self.request.environ['REQUEST_METHOD'] = 'POST' self.request.request_variables.update({'__token': self.request.get_cookie('csrf_token')}) self.middleware.exempt", "in self.view.dictionary assert self.view.dictionary['csrf_field'].startswith(\"<input type='hidden' name='__token' value='\") def test_middleware_throws_exception_on_post(self): self.request.environ['REQUEST_METHOD']", "App() self.request = Request(generate_wsgi()) self.view = View(self.app) self.app.bind('Request', self.request) self.request", "self.middleware.before() assert 'csrf_field' in self.view.dictionary assert self.view.dictionary['csrf_field'].startswith(\"<input type='hidden' name='__token' value='\")", "masonite.middleware import CsrfMiddleware from masonite.testsuite.TestSuite import generate_wsgi import pytest from", "masonite.exceptions import InvalidCSRFToken class TestCSRFMiddleware: def setup_method(self): self.app = App()", "self.request = Request(generate_wsgi()) self.view = View(self.app) self.app.bind('Request', self.request) self.request =", "from masonite.auth.Csrf import Csrf from masonite.app import App from masonite.middleware", "with pytest.raises(InvalidCSRFToken): self.middleware.before() def test_incoming_token_does_not_throw_exception_with_token(self): self.request.environ['REQUEST_METHOD'] = 'POST' self.request.request_variables.update({'__token': self.request.get_cookie('csrf_token')})", "import generate_wsgi import pytest from masonite.exceptions import InvalidCSRFToken class TestCSRFMiddleware:", "App from masonite.middleware import CsrfMiddleware from masonite.testsuite.TestSuite import generate_wsgi import", "from masonite.testsuite.TestSuite import generate_wsgi import pytest from masonite.exceptions import InvalidCSRFToken", "import InvalidCSRFToken class TestCSRFMiddleware: def setup_method(self): self.app = App() self.request", "View from masonite.auth.Csrf import Csrf from masonite.app import App from", "from masonite.middleware import CsrfMiddleware from masonite.testsuite.TestSuite import generate_wsgi import pytest", "= Request(generate_wsgi()) self.view = View(self.app) self.app.bind('Request', self.request) self.request = self.app.make('Request')", "= App() self.request = Request(generate_wsgi()) self.view = View(self.app) self.app.bind('Request', self.request)", "name='__token' value='\") def test_middleware_throws_exception_on_post(self): self.request.environ['REQUEST_METHOD'] = 'POST' self.middleware.exempt = []", "CsrfMiddleware from masonite.testsuite.TestSuite import generate_wsgi import pytest from masonite.exceptions import", "def setup_method(self): self.app = App() self.request = Request(generate_wsgi()) self.view =", "self.middleware = CsrfMiddleware(self.request, Csrf(self.request), self.view) def test_middleware_shares_correct_input(self): self.middleware.before() assert 'csrf_field'", "type='hidden' name='__token' value='\") def test_middleware_throws_exception_on_post(self): self.request.environ['REQUEST_METHOD'] = 'POST' self.middleware.exempt =", "InvalidCSRFToken class TestCSRFMiddleware: def setup_method(self): self.app = App() self.request =", "def test_middleware_throws_exception_on_post(self): self.request.environ['REQUEST_METHOD'] = 'POST' self.middleware.exempt = [] with pytest.raises(InvalidCSRFToken):", "self.app = App() self.request = Request(generate_wsgi()) self.view = View(self.app) self.app.bind('Request',", "masonite.auth.Csrf import Csrf from masonite.app import App from masonite.middleware import", "= CsrfMiddleware(self.request, Csrf(self.request), self.view) def test_middleware_shares_correct_input(self): self.middleware.before() assert 'csrf_field' in", "import CsrfMiddleware from masonite.testsuite.TestSuite import generate_wsgi import pytest from masonite.exceptions", "class TestCSRFMiddleware: def setup_method(self): self.app = App() self.request = Request(generate_wsgi())", "Request(generate_wsgi()) self.view = View(self.app) self.app.bind('Request', self.request) self.request = self.app.make('Request') self.middleware", "self.app.bind('Request', self.request) self.request = self.app.make('Request') self.middleware = CsrfMiddleware(self.request, Csrf(self.request), self.view)", "<filename>tests/middleware/test_csrf_middleware.py from masonite.request import Request from masonite.view import View from", "self.request.environ['REQUEST_METHOD'] = 'POST' self.middleware.exempt = [] with pytest.raises(InvalidCSRFToken): self.middleware.before() def", "from masonite.view import View from masonite.auth.Csrf import Csrf from masonite.app", "self.view = View(self.app) self.app.bind('Request', self.request) self.request = self.app.make('Request') self.middleware =", "'POST' self.middleware.exempt = [] with pytest.raises(InvalidCSRFToken): self.middleware.before() def test_incoming_token_does_not_throw_exception_with_token(self): self.request.environ['REQUEST_METHOD']", "masonite.view import View from masonite.auth.Csrf import Csrf from masonite.app import", "import App from masonite.middleware import CsrfMiddleware from masonite.testsuite.TestSuite import generate_wsgi", "assert 'csrf_field' in self.view.dictionary assert self.view.dictionary['csrf_field'].startswith(\"<input type='hidden' name='__token' value='\") def", "= self.app.make('Request') self.middleware = CsrfMiddleware(self.request, Csrf(self.request), self.view) def test_middleware_shares_correct_input(self): self.middleware.before()", "Csrf from masonite.app import App from masonite.middleware import CsrfMiddleware from", "import Csrf from masonite.app import App from masonite.middleware import CsrfMiddleware", "Request from masonite.view import View from masonite.auth.Csrf import Csrf from", "setup_method(self): self.app = App() self.request = Request(generate_wsgi()) self.view = View(self.app)", "= View(self.app) self.app.bind('Request', self.request) self.request = self.app.make('Request') self.middleware = CsrfMiddleware(self.request,", "def test_middleware_shares_correct_input(self): self.middleware.before() assert 'csrf_field' in self.view.dictionary assert self.view.dictionary['csrf_field'].startswith(\"<input type='hidden'", "self.request = self.app.make('Request') self.middleware = CsrfMiddleware(self.request, Csrf(self.request), self.view) def test_middleware_shares_correct_input(self):", "from masonite.exceptions import InvalidCSRFToken class TestCSRFMiddleware: def setup_method(self): self.app =", "self.middleware.before() def test_incoming_token_does_not_throw_exception_with_token(self): self.request.environ['REQUEST_METHOD'] = 'POST' self.request.request_variables.update({'__token': self.request.get_cookie('csrf_token')}) self.middleware.exempt =", "import View from masonite.auth.Csrf import Csrf from masonite.app import App", "= [] with pytest.raises(InvalidCSRFToken): self.middleware.before() def test_incoming_token_does_not_throw_exception_with_token(self): self.request.environ['REQUEST_METHOD'] = 'POST'", "TestCSRFMiddleware: def setup_method(self): self.app = App() self.request = Request(generate_wsgi()) self.view", "from masonite.request import Request from masonite.view import View from masonite.auth.Csrf", "self.view) def test_middleware_shares_correct_input(self): self.middleware.before() assert 'csrf_field' in self.view.dictionary assert self.view.dictionary['csrf_field'].startswith(\"<input", "pytest from masonite.exceptions import InvalidCSRFToken class TestCSRFMiddleware: def setup_method(self): self.app", "masonite.request import Request from masonite.view import View from masonite.auth.Csrf import", "test_middleware_throws_exception_on_post(self): self.request.environ['REQUEST_METHOD'] = 'POST' self.middleware.exempt = [] with pytest.raises(InvalidCSRFToken): self.middleware.before()", "value='\") def test_middleware_throws_exception_on_post(self): self.request.environ['REQUEST_METHOD'] = 'POST' self.middleware.exempt = [] with", "masonite.app import App from masonite.middleware import CsrfMiddleware from masonite.testsuite.TestSuite import", "self.view.dictionary assert self.view.dictionary['csrf_field'].startswith(\"<input type='hidden' name='__token' value='\") def test_middleware_throws_exception_on_post(self): self.request.environ['REQUEST_METHOD'] =", "self.middleware.exempt = [] with pytest.raises(InvalidCSRFToken): self.middleware.before() def test_incoming_token_does_not_throw_exception_with_token(self): self.request.environ['REQUEST_METHOD'] =", "self.app.make('Request') self.middleware = CsrfMiddleware(self.request, Csrf(self.request), self.view) def test_middleware_shares_correct_input(self): self.middleware.before() assert", "test_incoming_token_does_not_throw_exception_with_token(self): self.request.environ['REQUEST_METHOD'] = 'POST' self.request.request_variables.update({'__token': self.request.get_cookie('csrf_token')}) self.middleware.exempt = [] self.middleware.before()", "View(self.app) self.app.bind('Request', self.request) self.request = self.app.make('Request') self.middleware = CsrfMiddleware(self.request, Csrf(self.request),", "masonite.testsuite.TestSuite import generate_wsgi import pytest from masonite.exceptions import InvalidCSRFToken class", "self.view.dictionary['csrf_field'].startswith(\"<input type='hidden' name='__token' value='\") def test_middleware_throws_exception_on_post(self): self.request.environ['REQUEST_METHOD'] = 'POST' self.middleware.exempt", "= 'POST' self.middleware.exempt = [] with pytest.raises(InvalidCSRFToken): self.middleware.before() def test_incoming_token_does_not_throw_exception_with_token(self):", "def test_incoming_token_does_not_throw_exception_with_token(self): self.request.environ['REQUEST_METHOD'] = 'POST' self.request.request_variables.update({'__token': self.request.get_cookie('csrf_token')}) self.middleware.exempt = []" ]
[ "as f: faq_page = f.read() return {'content': faq_page} @view_config(route_name='conventions', renderer='conventions.mako')", "with open(conventions_file, 'r') as file: conventions_page = file.read().replace('\\n', '') return", "faq_file = os.path.join(dir_path, 'static/faq_with_indexes.html') with open(faq_file, 'r') as f: faq_page", "os.path.join(dir_path, 'static/faq_with_indexes.html') with open(faq_file, 'r') as f: faq_page = f.read()", "from pyramid.view import view_config import os @view_config(route_name='faq', renderer='faq.mako') def faq_view(request):", "os @view_config(route_name='faq', renderer='faq.mako') def faq_view(request): dir_path = os.path.dirname(__file__) faq_file =", "import view_config import os @view_config(route_name='faq', renderer='faq.mako') def faq_view(request): dir_path =", "faq_page = f.read() return {'content': faq_page} @view_config(route_name='conventions', renderer='conventions.mako') def conventions_view(request):", "os.path.join(dir_path, 'static/conventions.html') with open(conventions_file, 'r') as file: conventions_page = file.read().replace('\\n',", "def faq_view(request): dir_path = os.path.dirname(__file__) faq_file = os.path.join(dir_path, 'static/faq_with_indexes.html') with", "dir_path = os.path.dirname(__file__) conventions_file = os.path.join(dir_path, 'static/conventions.html') with open(conventions_file, 'r')", "= os.path.dirname(__file__) faq_file = os.path.join(dir_path, 'static/faq_with_indexes.html') with open(faq_file, 'r') as", "with open(faq_file, 'r') as f: faq_page = f.read() return {'content':", "f.read() return {'content': faq_page} @view_config(route_name='conventions', renderer='conventions.mako') def conventions_view(request): dir_path =", "import os @view_config(route_name='faq', renderer='faq.mako') def faq_view(request): dir_path = os.path.dirname(__file__) faq_file", "dir_path = os.path.dirname(__file__) faq_file = os.path.join(dir_path, 'static/faq_with_indexes.html') with open(faq_file, 'r')", "os.path.dirname(__file__) conventions_file = os.path.join(dir_path, 'static/conventions.html') with open(conventions_file, 'r') as file:", "= os.path.dirname(__file__) conventions_file = os.path.join(dir_path, 'static/conventions.html') with open(conventions_file, 'r') as", "@view_config(route_name='conventions', renderer='conventions.mako') def conventions_view(request): dir_path = os.path.dirname(__file__) conventions_file = os.path.join(dir_path,", "'r') as file: conventions_page = file.read().replace('\\n', '') return {'content': conventions_page}", "pyramid.view import view_config import os @view_config(route_name='faq', renderer='faq.mako') def faq_view(request): dir_path", "'static/conventions.html') with open(conventions_file, 'r') as file: conventions_page = file.read().replace('\\n', '')", "'r') as f: faq_page = f.read() return {'content': faq_page} @view_config(route_name='conventions',", "{'content': faq_page} @view_config(route_name='conventions', renderer='conventions.mako') def conventions_view(request): dir_path = os.path.dirname(__file__) conventions_file", "renderer='conventions.mako') def conventions_view(request): dir_path = os.path.dirname(__file__) conventions_file = os.path.join(dir_path, 'static/conventions.html')", "conventions_view(request): dir_path = os.path.dirname(__file__) conventions_file = os.path.join(dir_path, 'static/conventions.html') with open(conventions_file,", "return {'content': faq_page} @view_config(route_name='conventions', renderer='conventions.mako') def conventions_view(request): dir_path = os.path.dirname(__file__)", "conventions_file = os.path.join(dir_path, 'static/conventions.html') with open(conventions_file, 'r') as file: conventions_page", "@view_config(route_name='faq', renderer='faq.mako') def faq_view(request): dir_path = os.path.dirname(__file__) faq_file = os.path.join(dir_path,", "def conventions_view(request): dir_path = os.path.dirname(__file__) conventions_file = os.path.join(dir_path, 'static/conventions.html') with", "= os.path.join(dir_path, 'static/conventions.html') with open(conventions_file, 'r') as file: conventions_page =", "renderer='faq.mako') def faq_view(request): dir_path = os.path.dirname(__file__) faq_file = os.path.join(dir_path, 'static/faq_with_indexes.html')", "os.path.dirname(__file__) faq_file = os.path.join(dir_path, 'static/faq_with_indexes.html') with open(faq_file, 'r') as f:", "'static/faq_with_indexes.html') with open(faq_file, 'r') as f: faq_page = f.read() return", "open(faq_file, 'r') as f: faq_page = f.read() return {'content': faq_page}", "<reponame>ltxom/phoible from pyramid.view import view_config import os @view_config(route_name='faq', renderer='faq.mako') def", "= os.path.join(dir_path, 'static/faq_with_indexes.html') with open(faq_file, 'r') as f: faq_page =", "faq_view(request): dir_path = os.path.dirname(__file__) faq_file = os.path.join(dir_path, 'static/faq_with_indexes.html') with open(faq_file,", "faq_page} @view_config(route_name='conventions', renderer='conventions.mako') def conventions_view(request): dir_path = os.path.dirname(__file__) conventions_file =", "= f.read() return {'content': faq_page} @view_config(route_name='conventions', renderer='conventions.mako') def conventions_view(request): dir_path", "view_config import os @view_config(route_name='faq', renderer='faq.mako') def faq_view(request): dir_path = os.path.dirname(__file__)", "f: faq_page = f.read() return {'content': faq_page} @view_config(route_name='conventions', renderer='conventions.mako') def", "open(conventions_file, 'r') as file: conventions_page = file.read().replace('\\n', '') return {'content':" ]
[ "list ordered by \"id\" in ascending order \"\"\" RESTApiTestCase.process_test(self, 'computers',", "def test_computers_mixed3(self): \"\"\" url parameters: id, transport_type, orderby \"\"\" node_pk", "extras_filter with pagination ############# def test_node_extras_filter_pagination(self): \"\"\" Check that node", "'/computers?name=\"test1\"', expected_list_ids=[1]) def test_computers_filter_hostname(self): \"\"\" Add filter for the hostname", "= str(comp['uuid']) cls._dummy_data['computers'] = computers calculation_projections = ['id', 'uuid', 'user_id',", "computer list \"\"\" RESTApiTestCase.process_test( self, 'computers', '/computers?transport_type=\"local\"&name=\"test3\"&orderby=+id', expected_list_ids=[3] ) ###############", "list ordered by \"+id\" in ascending order \"\"\" RESTApiTestCase.process_test(self, 'computers',", "= client.get(url) response = json.loads(rv_response.data) if expected_errormsg: self.assertEqual(response['message'], expected_errormsg) else:", "the transport_type of computer and get the filtered computer list", "= json.loads(response_value.data) self.assertEqual(len(response['data']['nodes']), 1) self.assertEqual(len(response['data']['nodes'][0]['incoming']), 1) self.assertEqual(len(response['data']['nodes'][0]['outgoing']), 1) self.assertEqual(len(response['data']['metadata']), 1)", "= client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data'], [{'name': 'calcjob_outputs', 'type': 'DIRECTORY'}])", "[{'name': 'calcjob_inputs', 'type': 'DIRECTORY'}]) def test_calculation_retrieved_outputs(self): \"\"\" Get the list", "test_projectable_properties(self): \"\"\" test projectable_properties endpoint \"\"\" for nodetype in ['nodes',", "Get the list of given calculation retrieved_inputs \"\"\" node_uuid =", "CalcJobNode repository with tempfile.NamedTemporaryFile(mode='w+') as handle: handle.write(aiida_in) handle.flush() handle.seek(0) calc.put_object_from_filelike(handle,", "'id', 'node_label', 'node_type', 'uuid', 'description', 'incoming', 'outgoing' ] received_attr =", "specified in offset \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?offset=2&orderby=+id', expected_range=[2, None]) def", "comp['uuid'] is not None: comp['uuid'] = str(comp['uuid']) cls._dummy_data['computers'] = computers", "comment.'])) def test_repo(self): \"\"\" Test to get repo list or", "puts them into class attributes \"\"\" # TODO: Storing the", "data :param result_name: result name in response e.g. incoming, outgoing", "the error message. \"\"\" expected_error = 'perpage key is incompatible", "in expected_keys: self.assertIn(prop, available_keys) # check order available_properties = response['data']['fields'].keys()", "json.loads(response_value.data) self.assertEqual(response['data'], [{'name': 'calcjob_outputs', 'type': 'DIRECTORY'}]) ############### calculation incoming #############", "response = json.loads(response_value.data) self.assertNotEqual(len(response['data']['nodes']), 0) for node in response['data']['nodes']: self.assertIn('attributes',", "url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true&attributes_filter=cell' with self.app.test_client() as client: rv_obj = client.get(url)", "the calcjob_outputs folder with the aiida.out file to the FolderData", "RESTApiTestCase.process_test( self, 'computers', '/computers?limit=2&offset=2&orderby=+id', expected_range=[2, 4] ) def test_computers_list_limit_only(self): \"\"\"", "computers list ordered by \"+id\" in ascending order \"\"\" RESTApiTestCase.process_test(self,", "the list of give calculation incoming \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid']", "full_list: expected_data = self._dummy_data[result_node_type] elif empty_list: expected_data = [] elif", "with self.app.test_client() as client: rv_obj = client.get(url) response = json.loads(rv_obj.data)['data']['comments']", "self.assertEqual(response['data']['nodes'][0]['attributes'], attributes) ############### calculation node extras_filter ############# def test_calculation_extras_filter(self): \"\"\"", "project=computer_projections).order_by({ 'comp': [{ 'id': { 'order': 'asc' } }] }).dict()", "'/computers?orderby=+id', full_list=True) def test_computers_list_limit_offset(self): \"\"\" Get the list of computers", "1} calcfunc = orm.CalcFunctionNode(computer=cls.computer) calcfunc.store() calc = orm.CalcJobNode(computer=cls.computer) calc.set_option('resources', resources)", "configure_api class RESTApiTestCase(AiidaTestCase): \"\"\" Setup of the tests for the", "further information please visit http://www.aiida.net # ########################################################################### # pylint: disable=too-many-lines", "the list of given calculation attributes filtered \"\"\" cell =", "Check that node extras specified in extras_filter are returned as", "uuid) self.assertEqual(response['query_string'], query_string) self.assertEqual(response['url'], f'http://localhost{url}') self.assertEqual(response['url_root'], 'http://localhost/') # node details", "# Prepare typical REST responses cls.process_dummy_data() def get_dummy_data(self): return self._dummy_data", "2, 4] ) def test_computers_orderby_mixed2(self): \"\"\" Returns the computers list", "import LOG_LEVEL_REPORT from aiida.common.timezone import now from aiida.orm import Log", "with self.app.test_client() as client: response = client.get(url) headers = response.headers", "self.assertEqual(response['data']['nodes'][0]['extras']['extra2'], extras['extra2']) ############### structure node attributes filter ############# def test_structure_attributes_filter(self):", "is set \"\"\" expected_extras = ['extra1', 'extra2'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra1,extra2'", "def test_comments(self): \"\"\" Get the node comments \"\"\" node_uuid =", "2., 0.], [0., 0., 2.]] node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url =", "and url parameters \"\"\" parts = url.split('?') path = ''", "Please change this! computer_projections = ['id', 'uuid', 'name', 'hostname', 'transport_type',", "path, query_string = self.split_path(url) self.assertEqual(response['method'], 'GET') self.assertEqual(response['resource_type'], node_type) self.assertEqual(response['path'], path)", "'/server/endpoints').data)['data'] self.assertTrue(len(data_base['available_endpoints']) > 0) self.assertDictEqual(data_base, data_server) def test_cors_headers(self): \"\"\" Test", "self.assertIn(dkay, response_keys) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response) def test_comments(self): \"\"\" Get", "self.assertNotIn('extras.extra2', node) self.assertEqual(len(node['extras']), len(expected_extras)) for extra in expected_extras: self.assertIn(extra, node['extras'])", ":param url: web url :param full_list: if url is requested", "= total no. of computers in database / perpage If", "= self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?extras=true&extras_filter=extra1,extra2' with self.app.test_client() as client: response_value", "calc.set_extra('extra1', False) calc.set_extra('extra2', 'extra_info') calc.add_incoming(structure, link_type=LinkType.INPUT_CALC, link_label='link_structure') calc.add_incoming(parameter1, link_type=LinkType.INPUT_CALC, link_label='link_parameter')", "in response['data']['nodes']: self.assertIn('attributes', node) self.assertNotIn('attributes.resources', node) self.assertNotIn('attributes.cell', node) self.assertEqual(len(node['attributes']), len(expected_attributes))", "class attributes \"\"\" # TODO: Storing the different nodes as", "for _ in computers] for comp in computers: if comp['uuid']", "self, 'computers', '/computers/page/2?offset=2&limit=1&orderby=+id', expected_errormsg=expected_error ) def test_complist_pagelimitoffset_perpage(self): \"\"\" If we", "self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers/page/2?id>{str(node_pk)}&perpage=2&orderby=+id', expected_list_ids=[3, 4] ) def test_computers_mixed3(self):", "[0., 0., 2.]] node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true&attributes_filter=cell' with", "\"\"\" Returns the computers list ordered by \"+name\" in ascending", "api = configure_api(catch_internal_server=True) cls.app = api.app cls.app.config['TESTING'] = True #", "expected_attr: self.assertIn(attr, received_attr) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid) ############### calculation", "for dkay in expected_data_keys: self.assertIn(dkay, response_keys) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response)", "orm.QueryBuilder().append(orm.Computer, tag='comp', project=computer_projections).order_by({ 'comp': [{ 'id': { 'order': 'asc' }", "[] if result_node_type is None and result_name is None: result_node_type", "in descending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers',", "kpoint.store() resources = {'num_machines': 1, 'num_mpiprocs_per_machine': 1} calcfunc = orm.CalcFunctionNode(computer=cls.computer)", "= self.get_dummy_data()['calculations'][1]['uuid'] self.process_test( 'nodes', f'/nodes/{str(node_uuid)}/links/incoming?orderby=id', expected_list_ids=[5, 3], uuid=node_uuid, result_node_type='data', result_name='incoming'", "= json.loads(response_value.data) self.assertNotEqual(len(response['data']['nodes']), 0) for node in response['data']['nodes']: self.assertEqual(list(node['attributes'].keys()), expected_attribute)", "'volume' } ) self.assertEqual(response['data']['derived_properties']['formula'], 'Ba') RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid)", "[] for calc in self.get_dummy_data()['calculations']: if calc['node_type'] == 'process.calculation.calcjob.CalcJobNode.': expected_node_uuids.append(calc['uuid'])", "'message']: self.assertIn(key, expected_log_keys) def test_download_formats(self): \"\"\" test for download format", "requested to get full list :param empty_list: if the response", "client.get(url) response = json.loads(rv_obj.data)['data']['comments'] all_comments = [] for comment in", "3, 2, 1] ) def test_computers_orderby_scheduler_type_asc(self): \"\"\" Returns the computers", "response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data'], [{'name': 'calcjob_inputs', 'type':", "self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/input_files' with self.app.test_client() as client: response_value =", "RESTApiTestCase.node_exception(self, \"/computers?aa=bb&id=2\", InputValidationError) \"\"\" ############### calculation retrieved_inputs and retrieved_outputs #############", "node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] self.process_test( 'nodes', f'/nodes/{str(node_uuid)}/links/incoming?orderby=id', expected_list_ids=[5, 3], uuid=node_uuid, result_node_type='data',", "response, uuid=node_uuid) def test_structure_download(self): \"\"\" Test download of structure file", "= self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>={str(node_pk)}&transport_type=\\\"ssh\\\"&orderby=-id&limit=2\", expected_list_ids=[4, 2] ) ##########", ") def test_computers_list_page_default(self): \"\"\" it returns the no. of rows", "computers list first order by \"scheduler_type\" in ascending order and", "rv_obj = client.get(url) response = json.loads(rv_obj.data) self.assertNotIn('message', response) self.assertEqual(response['data']['attributes'], attributes)", "= client.get(url) response = json.loads(rv_obj.data)['data']['comments'] all_comments = [] for comment", "response['data']['nodes']: self.assertEqual(list(node['extras'].keys()), expected_extra) ############### node full_type filter ############# def test_nodes_full_type_filter(self):", "= orm.KpointsData() kpoint.set_kpoints_mesh([4, 4, 4]) kpoint.store() resources = {'num_machines': 1,", "the list of given calculation attributes filtered \"\"\" attributes =", "expected_data_keys = ['path', 'namespace', 'subspaces', 'label', 'full_type'] response_keys = response['data'].keys()", "def test_calculation_attributes(self): \"\"\" Get list of calculation attributes \"\"\" attributes", "url parameters: id, limit and offset \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id']", "comments \"\"\" node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/comments' with self.app.test_client()", "attribute is specified in attributes_filter only this attribute is returned", "if parts: path = parts[0] if len(parts) > 1: query_string", "result_node_type='data', result_name='incoming' ) def test_calculation_input_filters(self): \"\"\" Get filtered incoming list", "2) \"\"\" node_pk = self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?id>{str(node_pk)}&orderby=+id', expected_range=[2,", "extras['extra1']) self.assertEqual(response['data']['nodes'][0]['extras']['extra2'], extras['extra2']) ############### structure node attributes filter ############# def", "RESTapi and puts them into class attributes \"\"\" # TODO:", "as client: response_value = client.get(url) response = json.loads(response_value.data) expected_keys =", "with self.app.test_client() as client: rv_obj = client.get(url) cif = load_node(node_uuid)._prepare_cif()[0]", "accessing them # by their list index is very fragile", "\"/computers?orderby=+scheduler_type, -hostname\", expected_list_ids=[1,0,4,3,2]) \"\"\" ############### list filter combinations ####################### def", "combinations ####################### def test_computers_mixed1(self): \"\"\" url parameters: id, limit and", "\"\"\" node_pk = self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?id>{str(node_pk)}&orderby=+id', expected_range=[2, None]", "list of give calculation incoming \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] self.process_test(", "result_node_type=None, result_name=None ): # pylint: disable=too-many-arguments \"\"\" Check whether response", "computers list ordered by \"id\" in ascending order \"\"\" RESTApiTestCase.process_test(self,", "single computer \"\"\" node_uuid = self.get_dummy_data()['computers'][1]['uuid'] RESTApiTestCase.process_test( self, 'computers', f'/computers/{str(node_uuid)}',", "in database / perpage Using this formula it returns the", "############### list filter combinations ####################### def test_computers_filter_mixed1(self): \"\"\" Add filter", "the CalcJob node' # Add the calcjob_inputs folder with the", "= [] if result_node_type is None and result_name is None:", "computer and get the filtered computer list (e.g. id=1) \"\"\"", "client.get(url) response = json.loads(rv_obj.data) expected_data_keys = ['path', 'namespace', 'subspaces', 'label',", "node) self.assertNotIn('attributes.resources', node) self.assertNotIn('attributes.cell', node) self.assertEqual(len(node['attributes']), len(expected_attributes)) for attr in", "order \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=id', full_list=True) def test_computers_orderby_id_asc_sign(self): \"\"\" Returns", "name in response e.g. incoming, outgoing \"\"\" if expected_list_ids is", "scheduler_type, order it by \"hostname\" descending order Response:: test4 slurm", "prop in response['data']['ordering']: self.assertIn(prop, available_properties) def test_node_namespace(self): \"\"\" Test the", "perpage Using this formula it returns the no. of rows", "# node details and list with limit, offset, page, perpage", "\"\"\" node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/comments' with self.app.test_client() as", "response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data'], [{'name': 'calcjob_outputs', 'type':", "in response e.g. incoming, outgoing \"\"\" if expected_list_ids is None:", "########## pass unknown url parameter ########### def test_computers_unknown_param(self): \"\"\" url", "from aiida import orm from aiida.backends.testbase import AiidaTestCase from aiida.common", "'computers', '/computers?orderby=-id', expected_list_ids=[4, 3, 2, 1, 0]) def test_computers_orderby_name_asc(self): \"\"\"", "def test_computers_list_page_default(self): \"\"\" it returns the no. of rows defined", "'content': 'test' }, } Log(**log_record) aiida_out = 'The output file\\nof", "parts[1] return path, query_string def compare_extra_response_data(self, node_type, url, response, uuid=None):", "client: rv_response = client.get(url) response = json.loads(rv_response.data) if expected_errormsg: self.assertEqual(response['message'],", "it would return the error message. \"\"\" expected_error = 'perpage", "\"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=-name', expected_list_ids=[4, 3,", "test1 pbspro localhost pbspro ========== Expected:: test1 pbspro localhost pbspro", "client: rv_obj = client.get(url) response = json.loads(rv_obj.data) for node in", "response_obj = client.get(url) input_file = load_node(node_uuid).get_object_content('calcjob_inputs/aiida.in', mode='rb') self.assertEqual(response_obj.data, input_file) def", "ascending order and if it is having same transport_type, order", "Returns the computers list ordered by \"+id\" in ascending order", "self.assertEqual(response['url_root'], 'http://localhost/') # node details and list with limit, offset,", "\"\"\" Test to get repo list or repo file contents", "url requested fot the type of the node :param url:", "def test_base_url(self): \"\"\" Test that / returns list of endpoints", "as \"/page/1?perpage=default_value\" \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers/page?orderby=+id', full_list=True) def test_computers_list_page_perpage(self): \"\"\"", "self.assertEqual(len(node['extras']), len(expected_extras)) for extra in expected_extras: self.assertIn(extra, node['extras']) ############### node", "response) expected_keys = ['display_name', 'help_text', 'is_display', 'is_foreign_key', 'type'] # check", "4]) kpoint.store() resources = {'num_machines': 1, 'num_mpiprocs_per_machine': 1} calcfunc =", "= orm.CalcFunctionNode(computer=cls.computer) calcfunc.store() calc = orm.CalcJobNode(computer=cls.computer) calc.set_option('resources', resources) calc.set_attribute('attr1', 'OK')", "with self.app.test_client() as client: response_obj = client.get(url) input_file = load_node(node_uuid).get_object_content('calcjob_inputs/aiida.in',", "= ['id', 'uuid', 'name', 'hostname', 'transport_type', 'scheduler_type'] computers = orm.QueryBuilder().append(orm.Computer,", "acts as \"/page/1?perpage=default_value\" \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers/page?orderby=+id', full_list=True) def test_computers_list_page_perpage(self):", "node_type: url requested fot the type of the node :param", "json.loads(rv_obj.data) self.assertNotIn('message', response) self.assertEqual(response['data']['attributes'], {'attr1': 'OK'}) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response,", "self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/comments' with self.app.test_client() as client: rv_obj =", "def test_calculation_inputs(self): \"\"\" Get the list of give calculation incoming", "to the database for different requests/filters/orderings etc. \"\"\" super().setUpClass() api", "3, 4] ) def test_computers_orderby_name_asc_sign(self): \"\"\" Returns the computers list", "if it is having same scheduler_type, order it by \"name\"", "'DIRECTORY'}]) ############### calculation incoming ############# def test_calculation_inputs(self): \"\"\" Get the", "calc.id, 'message': 'This is a template record message', 'metadata': {", "'computers', f'/computers?id={str(node_pk)}', expected_list_ids=[1]) def test_computers_filter_id2(self): \"\"\" Add filter on the", "another comment.'])) def test_repo(self): \"\"\" Test to get repo list", "attributes ############# def test_calculation_attributes(self): \"\"\" Get list of calculation attributes", "in response['data']['nodes']: self.assertEqual(list(node['attributes'].keys()), expected_attribute) ############### node extras_filter with pagination #############", "ordered by \"id\" in descending order \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=-id',", "self.assertEqual(list(node['attributes'].keys()), expected_attribute) ############### node extras_filter with pagination ############# def test_node_extras_filter_pagination(self):", "a dictionary when pagination is set \"\"\" expected_extra = ['extra2']", "link_label='link_structure') calc.add_incoming(parameter1, link_type=LinkType.INPUT_CALC, link_label='link_parameter') aiida_in = 'The input file\\nof the", "limit parameter. It should return the no of rows specified", "calc1.set_option('resources', resources) calc1.store() dummy_computers = [{ 'label': 'test1', 'hostname': 'test1.epfl.ch',", "fragile and a pain to debug. # Please change this!", "'loggername', 'levelname', 'dbnode_id', 'message']: self.assertIn(key, expected_log_keys) def test_download_formats(self): \"\"\" test", "prop in expected_keys: self.assertIn(prop, available_keys) # check order available_properties =", "the list of computers from database using offset parameter It", "AiiDA RESTful-api \"\"\" _url_prefix = '/api/v4' _dummy_data = {} _PERPAGE_DEFAULT", "computer list (e.g. id > 2) \"\"\" node_pk = self.get_dummy_data()['computers'][1]['id']", "dataclass in data_types.items(): data = orm.QueryBuilder().append(dataclass, tag='data', project=data_projections).order_by({ 'data': [{", "= json.loads(rv_obj.data) self.assertNotIn('message', response) self.assertEqual(response['data']['attributes'], {'attr1': 'OK'}) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url,", "str(calc['uuid']) cls._dummy_data['calculations'] = calculations data_projections = ['id', 'uuid', 'user_id', 'node_type']", "############### computers endpoint ######################## def test_computers_details(self): \"\"\" Requests the details", "= json.loads(response_value.data) self.assertEqual(response['data'], [{'name': 'calcjob_inputs', 'type': 'DIRECTORY'}]) def test_calculation_retrieved_outputs(self): \"\"\"", "= '/api/v4' _dummy_data = {} _PERPAGE_DEFAULT = 20 _LIMIT_DEFAULT =", ":param uuid: url requested for the node pk \"\"\" path,", "client: rv_obj = client.get(url) response = json.loads(rv_obj.data) expected_data_keys = ['path',", "= json.loads(rv_response.data) if expected_errormsg: self.assertEqual(response['message'], expected_errormsg) else: if full_list: expected_data", "pagination ############# def test_node_extras_filter_pagination(self): \"\"\" Check that node extras specified", "\"name\" in ascending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self,", "= json.loads(client.get(self.get_url_prefix() + '/').data)['data'] data_server = json.loads(client.get(self.get_url_prefix() + '/server/endpoints').data)['data'] self.assertTrue(len(data_base['available_endpoints'])", "the list of given calculation retrieved_outputs \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid']", "TODO: Storing the different nodes as lists and accessing them", "= f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra1,extra2' with self.app.test_client() as client: response_value = client.get(url) response", "def test_structure_derived_properties(self): \"\"\" Get the list of give calculation incoming", "from data :param expected_range: [start, stop] range of expected ids", "expected_list_ids=[1] ) def test_computers_filter_mixed2(self): \"\"\" Add filter for the id,", "= client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data']['repo_list'], [{'type': 'FILE', 'name': 'aiida.in'}])", "with tempfile.NamedTemporaryFile(mode='w+') as handle: handle.write(aiida_out) handle.flush() handle.seek(0) retrieved_outputs.put_object_from_filelike(handle, 'calcjob_outputs/aiida.out', force=True)", "response list is empty :param expected_list_ids: list of expected ids", "= load_node(node_uuid)._exportcontent('xsf')[0] # pylint: disable=protected-access self.assertEqual(rv_obj.data, structure_data) def test_cif(self): \"\"\"", "details and list with limit, offset, page, perpage def process_test(", "self.assertEqual(len(response['data']['metadata']), 1) expected_attr = [ 'ctime', 'mtime', 'id', 'node_label', 'node_type',", "response = json.loads(rv_obj.data) self.assertNotIn('message', response) self.assertEqual(response['data']['attributes'], attributes) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url,", "self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=+name', expected_list_ids=[1, 2, 3, 4] ) def test_computers_orderby_name_desc(self):", "rows specified in limit from database starting from the no.", "'limit and offset' RESTApiTestCase.process_test( self, 'computers', '/computers/page/2?offset=2&limit=1&orderby=+id', expected_errormsg=expected_error ) def", "'d': 4}) parameter2.store() kpoint = orm.KpointsData() kpoint.set_kpoints_mesh([4, 4, 4]) kpoint.store()", "and get the filtered computer list \"\"\" RESTApiTestCase.process_test( self, 'computers',", "as client: rv_obj = client.get(url) response = json.loads(rv_obj.data) expected_data_keys =", "filtered computer list (e.g. id > 2) \"\"\" node_pk =", "node extras specified in extras_filter are returned as a dictionary", "\"\"\" Add objects to the database for different requests/filters/orderings etc.", "filter ############# def test_nodes_full_type_filter(self): \"\"\" Get the list of nodes", "requested fot the type of the node :param url: web", "############### projectable_properties ############# def test_projectable_properties(self): \"\"\" test projectable_properties endpoint \"\"\"", "def compare_extra_response_data(self, node_type, url, response, uuid=None): \"\"\" In url response,", "calculations = [_['calc'] for _ in calculations] for calc in", "'user_id', 'node_type'] calculations = orm.QueryBuilder().append(orm.CalculationNode, tag='calc', project=calculation_projections).order_by({ 'calc': [{ 'id':", "f'/computers?id>{str(node_pk)}&orderby=+id', expected_range=[2, None] ) def test_computers_filter_pk(self): \"\"\" Add filter on", "self.assertEqual(response['method'], 'GET') self.assertEqual(response['resource_type'], node_type) self.assertEqual(response['path'], path) self.assertEqual(response['id'], uuid) self.assertEqual(response['query_string'], query_string)", "self.split_path(url) self.assertEqual(response['method'], 'GET') self.assertEqual(response['resource_type'], node_type) self.assertEqual(response['path'], path) self.assertEqual(response['id'], uuid) self.assertEqual(response['query_string'],", "############# def test_calculation_attributes(self): \"\"\" Get list of calculation attributes \"\"\"", "self.assertIn(extra, node['extras']) ############### node get one extras_filter with pagination #############", "Split the url with \"?\" to get url path and", "If we pass the limit, offset and perpage at same", "Returns the computers list ordered by \"scheduler_type\" in descending order", "self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>{str(node_pk)}&hostname=\\\"test1.epfl.ch\\\"\", expected_list_ids=[1] ) def test_computers_filter_mixed2(self): \"\"\"", "[{ 'label': 'test1', 'hostname': 'test1.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'pbspro', },", "expected values. :param entity_type: url requested for the type of", "'/computers/page/1?perpage=2&orderby=+id', expected_range=[None, 2] ) def test_computers_list_page_perpage_exceed(self): \"\"\" no.of pages =", "'hostname', 'transport_type', 'scheduler_type'] computers = orm.QueryBuilder().append(orm.Computer, tag='comp', project=computer_projections).order_by({ 'comp': [{", "test_cors_headers(self): \"\"\" Test that REST API sets cross-origin resource sharing", "\"\"\" expected_error = 'Non existent page requested. The page range", "f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true&attributes_filter=cell' with self.app.test_client() as client: rv_obj = client.get(url) response =", "response_value = client.get(url) response = json.loads(response_value.data) for key in ['data.structure.StructureData.|',", "self, 'computers', '/computers?limit=2&offset=2&orderby=+id', expected_range=[2, 4] ) def test_computers_list_limit_only(self): \"\"\" Get", "in offset \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?offset=2&orderby=+id', expected_range=[2, None]) def test_computers_list_limit_offset_perpage(self):", "\"\"\" Returns the computers list ordered by \"name\" in descending", "self, 'computers', '/computers/page/2?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error ) def test_computers_list_page_default(self): \"\"\" it returns", "= orm.CifData(ase=structure.get_ase()) cif.store() parameter1 = orm.Dict(dict={'a': 1, 'b': 2}) parameter1.store()", "node in response['data']['nodes']: self.assertIn('extras', node) self.assertNotIn('extras.extra1', node) self.assertNotIn('extras.extra2', node) self.assertEqual(len(node['extras']),", "{'attr1': 'OK'}) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid) ############### calculation node", "as client: rv_obj = client.get(url) cif = load_node(node_uuid)._prepare_cif()[0] # pylint:", "self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>{str(node_pk)}&hostname=\\\"test3.epfl.ch\\\"&transport_type=\\\"ssh\\\"\", empty_list=True ) ############### list all", "It should return the no of rows specified in limit", "\"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers/page?orderby=+id', full_list=True) def test_computers_list_page_perpage(self): \"\"\" no.of pages", "'ctime', 'mtime', 'id', 'node_label', 'node_type', 'uuid', 'description', 'incoming', 'outgoing' ]", "to the CalcJobNode repository with tempfile.NamedTemporaryFile(mode='w+') as handle: handle.write(aiida_in) handle.flush()", "retrieved_outputs ############# def test_calculation_retrieved_inputs(self): \"\"\" Get the list of given", "expected_errormsg=expected_error ) def test_computers_list_page_default(self): \"\"\" it returns the no. of", "entity_type: url requested for the type of the node :param", "self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?pk>{str(node_pk)}&transport_type=\\\"ssh\\\"&orderby=-scheduler_type\", expected_list_ids=[2, 4, 1] ) ###############", "if datum['uuid'] is not None: datum['uuid'] = str(datum['uuid']) cls._dummy_data[label] =", "ascending order \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=id', full_list=True) def test_computers_orderby_id_asc_sign(self): \"\"\"", "'transport_type': 'ssh', 'scheduler_type': 'pbspro', }, { 'label': 'test2', 'hostname': 'test2.epfl.ch',", "2] ) def test_computers_list_page_perpage_exceed(self): \"\"\" no.of pages = total no.", "\"\"\" Returns the computers list first order by \"transport_type\" in", "RESTApiTestCase.process_test( self, 'computers', '/computers/page/2?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error ) def test_computers_list_page_default(self): \"\"\" it", "AiiDA team. All rights reserved. # # This file is", "\"\"\" Returns the computers list ordered by \"scheduler_type\" in ascending", "code is hosted on GitHub at https://github.com/aiidateam/aiida-core # # For", ":param expected_range: [start, stop] range of expected ids from data", "of rows defined as default perpage option from database. no.of", "to the FolderData node with tempfile.NamedTemporaryFile(mode='w+') as handle: handle.write(aiida_out) handle.flush()", "expected_extra = ['extra2'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra2' with self.app.test_client() as client:", "incoming \"\"\" node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/derived_properties' with self.app.test_client()", "total no. of computers in database / perpage Using this", "= ['path', 'namespace', 'subspaces', 'label', 'full_type'] response_keys = response['data'].keys() for", "expected_errormsg=expected_error ) def test_computers_list_page_limit_offset(self): \"\"\" If we use the page,", "= response['data'].keys() for dkay in expected_data_keys: self.assertIn(dkay, response_keys) RESTApiTestCase.compare_extra_response_data(self, 'nodes',", "\"\"\" super().setUpClass() api = configure_api(catch_internal_server=True) cls.app = api.app cls.app.config['TESTING'] =", "self.assertEqual(len(node['attributes']), len(expected_attributes)) for attr in expected_attributes: self.assertIn(attr, node['attributes']) ############### node", "defined as default perpage option from database. no.of pages =", "would return the error message. \"\"\" expected_error = 'perpage key", "'label': 'test4', 'hostname': 'test4.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'slurm', }] for", "database / perpage \"/page\" acts as \"/page/1?perpage=default_value\" \"\"\" RESTApiTestCase.process_test(self, 'computers',", "RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=-scheduler_type,name', expected_list_ids=[2, 3, 4, 1] ) def", "def test_calculation_retrieved_outputs(self): \"\"\" Get the list of given calculation retrieved_outputs", "tag='data', project=data_projections).order_by({ 'data': [{ 'id': { 'order': 'desc' } }]", "folder with the aiida.out file to the FolderData node with", "8.0, 'label': 'volume' } ) self.assertEqual(response['data']['derived_properties']['formula'], 'Ba') RESTApiTestCase.compare_extra_response_data(self, 'nodes', url,", "disable=too-many-locals, too-many-statements \"\"\" Add objects to the database for different", "\"\"\" attributes = { 'attr1': 'OK', 'attr2': 'OK', 'resources': {", "node in response['data']['nodes']: self.assertEqual(list(node['attributes'].keys()), expected_attribute) ############### node extras_filter with pagination", "rights reserved. # # This file is part of the", "'dbnode_id': calc.id, 'message': 'This is a template record message', 'metadata':", "= [self._dummy_data[result_node_type][i] for i in expected_list_ids] elif expected_range != []:", "result_name: result name in response e.g. incoming, outgoing \"\"\" if", "ordered by \"id\" in ascending order \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=id',", "list of given calculation attributes filtered \"\"\" cell = [[2.,", "= client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data'], [{'name': 'calcjob_inputs', 'type': 'DIRECTORY'}])", "\"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/links/tree?in_limit=1&out_limit=1' with self.app.test_client() as", "would return the error message. \"\"\" expected_error = 'Non existent", "test_nodes_full_type_filter(self): \"\"\" Get the list of nodes filtered by full_type", "####################### def test_computers_filter_mixed1(self): \"\"\" Add filter for the hostname and", "# Add the calcjob_inputs folder with the aiida.in file to", "it's parameters :param url: Web url :return: url path and", "resources) calc.set_attribute('attr1', 'OK') calc.set_attribute('attr2', 'OK') calc.set_extra('extra1', False) calc.set_extra('extra2', 'extra_info') calc.add_incoming(structure,", "calc = orm.CalcJobNode(computer=cls.computer) calc.set_option('resources', resources) calc.set_attribute('attr1', 'OK') calc.set_attribute('attr2', 'OK') calc.set_extra('extra1',", "json.loads(response_value.data) self.assertEqual(len(response['data']['nodes']), 1) self.assertEqual(len(response['data']['nodes'][0]['incoming']), 1) self.assertEqual(len(response['data']['nodes'][0]['outgoing']), 1) self.assertEqual(len(response['data']['metadata']), 1) expected_attr", "comment.') cif = orm.CifData(ase=structure.get_ase()) cif.store() parameter1 = orm.Dict(dict={'a': 1, 'b':", "}).dict() # Cast UUID into a string (e.g. in sqlalchemy", "'attr2': 'OK', 'resources': { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }, }", "as a dictionary when pagination is set \"\"\" expected_extras =", "further information on the license, see the LICENSE.txt file #", "create log message for calcjob import logging from aiida.common.log import", "etc. :param node_type: url requested fot the type of the", "url path and url parameters \"\"\" parts = url.split('?') path", "= orm.Dict(dict={'a': 1, 'b': 2}) parameter1.store() parameter2 = orm.Dict(dict={'c': 3,", "computers = [_['comp'] for _ in computers] for comp in", "= {'extra1': False, 'extra2': 'extra_info'} node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url =", "parameter It should return all the rows from database starting", "give calculation incoming \"\"\" node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/derived_properties'", "\"\"\" expected_extra = ['extra2'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra2' with self.app.test_client() as", "expected_list_ids=[3, 1, 2, 4] ) def test_computers_orderby_mixed2(self): \"\"\" Returns the", "for the AiiDA RESTful-api \"\"\" _url_prefix = '/api/v4' _dummy_data =", "\"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/processes/{str(node_uuid)}/report' with self.app.test_client() as", "empty_list=False, expected_list_ids=None, expected_range=None, expected_errormsg=None, uuid=None, result_node_type=None, result_name=None ): # pylint:", "when only one node attribute is specified in attributes_filter only", "orm from aiida.backends.testbase import AiidaTestCase from aiida.common import json from", "'test1.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'pbspro', }, { 'label': 'test2', 'hostname':", "Log log_record = { 'time': now(), 'loggername': 'loggername', 'levelname': logging.getLevelName(LOG_LEVEL_REPORT),", "def test_complist_pagelimitoffset_perpage(self): \"\"\" If we use the page, limit, offset", "list ordered by \"name\" in descending order \"\"\" node_pk =", "return self._dummy_data def get_url_prefix(self): return self._url_prefix @classmethod def process_dummy_data(cls): #", "handle: handle.write(aiida_out) handle.flush() handle.seek(0) retrieved_outputs.put_object_from_filelike(handle, 'calcjob_outputs/aiida.out', force=True) retrieved_outputs.store() retrieved_outputs.add_incoming(calc, link_type=LinkType.CREATE,", "= self.split_path(url) self.assertEqual(response['method'], 'GET') self.assertEqual(response['resource_type'], node_type) self.assertEqual(response['path'], path) self.assertEqual(response['id'], uuid)", "aiida.common.exceptions import InputValidationError RESTApiTestCase.node_exception(self, \"/computers?aa=bb&id=2\", InputValidationError) \"\"\" ############### calculation retrieved_inputs", "return the error message. \"\"\" expected_error = 'Non existent page", "extras_filter only this extra is returned as a dictionary when", "for the type of the node :param url: web url", "RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=id', full_list=True) def test_computers_orderby_id_asc_sign(self): \"\"\" Returns the computers", "f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes?attributes_filter=\\\"attr1\\\"\" with self.app.test_client() as client: rv_obj = client.get(url) response =", "url parameters: id, limit and offset from aiida.common.exceptions import InputValidationError", "json.loads(client.get(self.get_url_prefix() + '/server/endpoints').data)['data'] self.assertTrue(len(data_base['available_endpoints']) > 0) self.assertDictEqual(data_base, data_server) def test_cors_headers(self):", "orm.KpointsData() kpoint.set_kpoints_mesh([4, 4, 4]) kpoint.store() resources = {'num_machines': 1, 'num_mpiprocs_per_machine':", "limit, offset and perpage at same time, it would return", "'calcjob_inputs', 'type': 'DIRECTORY'}]) def test_calculation_retrieved_outputs(self): \"\"\" Get the list of", "data_server) def test_cors_headers(self): \"\"\" Test that REST API sets cross-origin", "RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=-id', expected_list_ids=[4, 3, 2, 1, 0]) def test_computers_orderby_name_asc(self):", "response['data'].keys() for key in ['logs']: self.assertIn(key, expected_keys) expected_log_keys = response['data']['logs'][0].keys()", "= True # create test inputs cell = ((2., 0.,", "['display_name', 'help_text', 'is_display', 'is_foreign_key', 'type'] # check fields for _,", "\"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>{str(node_pk)}&hostname=\\\"test1.epfl.ch\\\"\", expected_list_ids=[1] )", "expected_range != []: expected_data = self._dummy_data[result_node_type][expected_range[0]:expected_range[1]] else: from aiida.common.exceptions import", ") def test_comp_orderby_scheduler_ascsign(self): \"\"\" Returns the computers list ordered by", "def test_computers_list_limit_only(self): \"\"\" Get the list of computers from database", "file # # For further information please visit http://www.aiida.net #", "result_name='incoming' ) def test_calculation_iotree(self): \"\"\" Get filtered incoming list for", "response = json.loads(response_value.data) self.assertNotEqual(len(response['data']['nodes']), 0) for node in response['data']['nodes']: self.assertIn('extras',", ") ########## pass unknown url parameter ########### def test_computers_unknown_param(self): \"\"\"", "['time', 'loggername', 'levelname', 'dbnode_id', 'message']: self.assertIn(key, expected_log_keys) def test_download_formats(self): \"\"\"", "url: web url :param response: url response :param uuid: url", "expected_range: [start, stop] range of expected ids from data :param", "of give calculation incoming \"\"\" node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url =", "data :param expected_errormsg: expected error message in response :param uuid:", "############### list all parameter combinations ####################### def test_computers_mixed1(self): \"\"\" url", "json.loads(rv_obj.data) self.assertNotIn('message', response) expected_keys = ['display_name', 'help_text', 'is_display', 'is_foreign_key', 'type']", "test_node_single_extras_filter(self): \"\"\" Check that when only one node extra is", "expected_error = 'perpage key is incompatible with limit and offset'", "= f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true&attributes_filter=cell' with self.app.test_client() as client: rv_obj = client.get(url) response", "computers list ordered by \"+scheduler_type\" in ascending order \"\"\" node_pk", "client: response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data']['nodes'][0]['extras']['extra1'], extras['extra1']) self.assertEqual(response['data']['nodes'][0]['extras']['extra2'],", "['extra1', 'extra2'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra1,extra2' with self.app.test_client() as client: response_value", "process report \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/processes/{str(node_uuid)}/report' with", "self.assertEqual(response_obj.data, input_file) def test_process_report(self): \"\"\" Test process report \"\"\" node_uuid", "'test3', 'hostname': 'test3.epfl.ch', 'transport_type': 'local', 'scheduler_type': 'slurm', }, { 'label':", "f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=xsf' with self.app.test_client() as client: rv_obj = client.get(url) structure_data =", "self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=-name', expected_list_ids=[4, 3, 2, 1] ) def test_computers_orderby_scheduler_type_asc(self):", "= client.get(url) response = json.loads(rv_obj.data) for node in response['data']['nodes']: self.assertIn(node['uuid'],", "the no of rows specified in limit from database starting", "def get_dummy_data(self): return self._dummy_data def get_url_prefix(self): return self._url_prefix @classmethod def", "in offset \"\"\" RESTApiTestCase.process_test( self, 'computers', '/computers?limit=2&offset=2&orderby=+id', expected_range=[2, 4] )", "another comment.') cif = orm.CifData(ase=structure.get_ase()) cif.store() parameter1 = orm.Dict(dict={'a': 1,", "rv_obj = client.get(url) response = json.loads(rv_obj.data) expected_data_keys = ['path', 'namespace',", "test_computers_list_page_perpage(self): \"\"\" no.of pages = total no. of computers in", "filter for the hostname and id of computer and get", "@classmethod def setUpClass(cls, *args, **kwargs): # pylint: disable=too-many-locals, too-many-statements \"\"\"", "expected_attr = [ 'ctime', 'mtime', 'id', 'node_label', 'node_type', 'uuid', 'description',", "pass unknown url parameter ########### def test_computers_unknown_param(self): \"\"\" url parameters:", "'test4.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'slurm', }] for dummy_computer in dummy_computers:", "in computers: if comp['uuid'] is not None: comp['uuid'] = str(comp['uuid'])", "the no. of rows defined as default perpage option from", "filtered computer list \"\"\" RESTApiTestCase.process_test( self, 'computers', '/computers?transport_type=\"local\"&name=\"test3\"&orderby=+id', expected_list_ids=[3] )", "for node in response['data']['nodes']: self.assertIn(node['uuid'], expected_node_uuids) ############### Structure visualization and", "aiida.orm import load_node node_uuid = self.get_dummy_data()['cifdata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=cif' with", "def process_test( self, entity_type, url, full_list=False, empty_list=False, expected_list_ids=None, expected_range=None, expected_errormsg=None,", "'value': 8.0, 'label': 'volume' } ) self.assertEqual(response['data']['derived_properties']['formula'], 'Ba') RESTApiTestCase.compare_extra_response_data(self, 'nodes',", "4] ) def test_computers_list_limit_only(self): \"\"\" Get the list of computers", "and perpage at same time, it would return the error", "'computers', '/computers?transport_type=\"local\"&name=\"test3\"&orderby=+id', expected_list_ids=[3] ) ############### list orderby ######################## def test_computers_orderby_id_asc(self):", "'scheduler_type': 'torque', }, { 'label': 'test3', 'hostname': 'test3.epfl.ch', 'transport_type': 'local',", "{ 'order': 'desc' } }] }).dict() data = [_['data'] for", "response matches expected values. :param entity_type: url requested for the", "list :param empty_list: if the response list is empty :param", "calculations \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/links/tree?in_limit=1&out_limit=1' with self.app.test_client()", "4, 4]) kpoint.store() resources = {'num_machines': 1, 'num_mpiprocs_per_machine': 1} calcfunc", "dictionary when pagination is set \"\"\" expected_attribute = ['resources'] url", "\"\"\" # TODO: Storing the different nodes as lists and", "else: if full_list: expected_data = self._dummy_data[result_node_type] elif empty_list: expected_data =", "filter for the id, hostname and transport_type of the computer", "default perpage option from database. no.of pages = total no.", "returned as a dictionary when pagination is set \"\"\" expected_attribute", "e.g. incoming, outgoing \"\"\" if expected_list_ids is None: expected_list_ids =", "in sqlalchemy it comes as a UUID object) computers =", "\"name\" \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=-scheduler_type,name', expected_list_ids=[2,", "descending order and if it is having same scheduler_type, order", "############# def test_projectable_properties(self): \"\"\" test projectable_properties endpoint \"\"\" for nodetype", "functions prepare atomic chunks of typical responses from the RESTapi", "with limit, offset, page, perpage def process_test( self, entity_type, url,", "\"\"\" url = f'{self.get_url_prefix()}/server' with self.app.test_client() as client: response =", "json.loads(client.get(self.get_url_prefix() + '/').data)['data'] data_server = json.loads(client.get(self.get_url_prefix() + '/server/endpoints').data)['data'] self.assertTrue(len(data_base['available_endpoints']) >", "list \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?hostname=\"test1.epfl.ch\"', expected_list_ids=[1]) def test_computers_filter_transport_type(self): \"\"\" Add", "response['data']['nodes'][0].keys() for attr in expected_attr: self.assertIn(attr, received_attr) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url,", "url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/list?filename=\\\"calcjob_inputs\\\"\" with self.app.test_client() as client: response_value = client.get(url)", "url requested for the type of the node :param url:", "**kwargs): # pylint: disable=too-many-locals, too-many-statements \"\"\" Add objects to the", "in database / perpage If we request the page which", "with pagination ############# def test_node_single_extras_filter(self): \"\"\" Check that when only", "import InputValidationError RESTApiTestCase.node_exception(self, \"/computers?aa=bb&id=2\", InputValidationError) \"\"\" ############### calculation retrieved_inputs and", "self.app.test_client() as client: rv_obj = client.get(url) response = json.loads(rv_obj.data) self.assertEqual(response['data']['nodes'][0]['attributes']['cell'],", "= f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra2' with self.app.test_client() as client: response_value = client.get(url) response", "self.assertEqual(len(response['data']['nodes']), 1) self.assertEqual(len(response['data']['nodes'][0]['incoming']), 1) self.assertEqual(len(response['data']['nodes'][0]['outgoing']), 1) self.assertEqual(len(response['data']['metadata']), 1) expected_attr =", "'uuid', 'user_id', 'node_type'] calculations = orm.QueryBuilder().append(orm.CalculationNode, tag='calc', project=calculation_projections).order_by({ 'calc': [{", "the aiida.out file to the FolderData node with tempfile.NamedTemporaryFile(mode='w+') as", "link_label='link_parameter') aiida_in = 'The input file\\nof the CalcJob node' #", "self, 'computers', '/computers/page/1?perpage=2&orderby=+id', expected_range=[None, 2] ) def test_computers_list_page_perpage_exceed(self): \"\"\" no.of", "def test_computers_filter_hostname(self): \"\"\" Add filter for the hostname of computer", "'incoming', 'outgoing' ] received_attr = response['data']['nodes'][0].keys() for attr in expected_attr:", "order it by \"name\" \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self,", "'computers', f\"/computers?id>{str(node_pk)}&hostname=\\\"test3.epfl.ch\\\"&transport_type=\\\"ssh\\\"\", empty_list=True ) ############### list all parameter combinations #######################", "for the node pk \"\"\" path, query_string = self.split_path(url) self.assertEqual(response['method'],", "endpoint \"\"\" for nodetype in ['nodes', 'processes', 'computers', 'users', 'groups']:", "'computers', '/computers/page/2?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error ) def test_computers_list_page_default(self): \"\"\" it returns the", "comment in response: all_comments.append(comment['message']) self.assertEqual(sorted(all_comments), sorted(['This is test comment.', 'Add", "# pylint: disable=no-self-use \"\"\" Split the url with \"?\" to", "returned as a dictionary when pagination is set \"\"\" expected_extra", "f'{self.get_url_prefix()}/nodes/full_types' with self.app.test_client() as client: rv_obj = client.get(url) response =", "order it by \"hostname\" descending order Response:: test4 slurm test3", "expected_extra) ############### node full_type filter ############# def test_nodes_full_type_filter(self): \"\"\" Get", "= self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?id>{str(node_pk)}&limit=2&offset=3&orderby=+id', expected_list_ids=[4] ) def test_computers_mixed2(self):", "download of cif file \"\"\" from aiida.orm import load_node node_uuid", "{ 'order': 'desc' } }] }).dict() calculations = [_['calc'] for", "############### node get one extras_filter with pagination ############# def test_node_single_extras_filter(self):", "\"\"\" Check that node attributes specified in attributes_filter are returned", "Test to get repo list or repo file contents for", "RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid) def test_structure_download(self): \"\"\" Test download", "for key in ['cif', 'xsf', 'xyz']: self.assertIn(key, response['data']['data.structure.StructureData.|']) self.assertIn('cif', response['data']['data.cif.CifData.|'])", "= ((2., 0., 0.), (0., 2., 0.), (0., 0., 2.))", "1] ) def test_computers_orderby_scheduler_type_asc(self): \"\"\" Returns the computers list ordered", "url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=xsf' with self.app.test_client() as client: rv_obj = client.get(url)", "cls._dummy_data['calculations'] = calculations data_projections = ['id', 'uuid', 'user_id', 'node_type'] data_types", "'computers', f'/computers?id>{str(node_pk)}&limit=2&offset=3&orderby=+id', expected_list_ids=[4] ) def test_computers_mixed2(self): \"\"\" url parameters: id,", "of computers from database using limit and offset parameter. It", "node comments \"\"\" node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/comments' with", "in attributes_filter are returned as a dictionary when pagination is", "= json.loads(response_value.data) self.assertNotEqual(len(response['data']['nodes']), 0) for node in response['data']['nodes']: self.assertEqual(list(node['extras'].keys()), expected_extra)", "def test_computers_orderby_mixed1(self): \"\"\" Returns the computers list first order by", "['extra2'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra2' with self.app.test_client() as client: response_value =", "\"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>={str(node_pk)}&transport_type=\\\"ssh\\\"&orderby=-id&limit=2\", expected_list_ids=[4, 2]", ") def test_computers_orderby_scheduler_type_asc(self): \"\"\" Returns the computers list ordered by", "pagination is set \"\"\" expected_extras = ['extra1', 'extra2'] url =", "# # This file is part of the AiiDA code.", "= f'{self.get_url_prefix()}/processes/{str(node_uuid)}/report' with self.app.test_client() as client: response_value = client.get(url) response", "computer and get the filtered computer list \"\"\" RESTApiTestCase.process_test( self,", "with filter attributes_filter \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes?attributes_filter=\\\"attr1\\\"\"", "of rows specified in limit from database. \"\"\" RESTApiTestCase.process_test(self, 'computers',", "some extra information/data along with the node results. e.g. url", "entity_type result_name = entity_type url = self._url_prefix + url with", "# check fields for _, pinfo in response['data']['fields'].items(): available_keys =", "order and if it is having same transport_type, order it", "for given calculations \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] self.process_test( 'nodes', f\"/nodes/{str(node_uuid)}/links/incoming?node_type=\\\"data.dict.Dict.\\\"\",", "test_computers_orderby_id_asc(self): \"\"\" Returns the computers list ordered by \"id\" in", "url = self._url_prefix + url with self.app.test_client() as client: rv_response", "import now from aiida.orm import Log log_record = { 'time':", "data_base = json.loads(client.get(self.get_url_prefix() + '/').data)['data'] data_server = json.loads(client.get(self.get_url_prefix() + '/server/endpoints').data)['data']", "The page range is [1 : ' \\ '3]' RESTApiTestCase.process_test(", "self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=name', expected_list_ids=[1, 2, 3, 4] ) def test_computers_orderby_name_asc_sign(self):", "disable=no-self-use \"\"\" Split the url with \"?\" to get url", "Get the list of computers from database using limit and", "url = f\"{self.get_url_prefix()}/nodes/?full_type=\\\"process.calculation.calcjob.CalcJobNode.|\\\"\" with self.app.test_client() as client: rv_obj = client.get(url)", "of nodes filtered by full_type \"\"\" expected_node_uuids = [] for", "list of given calculation attributes filtered \"\"\" attributes = {", "self, 'computers', '/computers?transport_type=\"local\"&name=\"test3\"&orderby=+id', expected_list_ids=[3] ) ############### list orderby ######################## def", "expected_list_ids=[3, 4] ) def test_computers_mixed3(self): \"\"\" url parameters: id, transport_type,", "one attributes_filter with pagination ############# def test_node_single_attributes_filter(self): \"\"\" Check that", "with the aiida.in file to the CalcJobNode repository with tempfile.NamedTemporaryFile(mode='w+')", "data_projections = ['id', 'uuid', 'user_id', 'node_type'] data_types = { 'cifdata':", "endpoint returns AiiDA version \"\"\" url = f'{self.get_url_prefix()}/server' from aiida", "If we use the page, limit, offset and perpage at", "name of computer and get the filtered computer list \"\"\"", "calculation_projections = ['id', 'uuid', 'user_id', 'node_type'] calculations = orm.QueryBuilder().append(orm.CalculationNode, tag='calc',", "node extras_filter with pagination ############# def test_node_extras_filter_pagination(self): \"\"\" Check that", "cls._dummy_data[label] = data def split_path(self, url): # pylint: disable=no-self-use \"\"\"", "f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources' with self.app.test_client() as client: response_value = client.get(url) response =", "full_list=True) def test_computers_orderby_id_asc_sign(self): \"\"\" Returns the computers list ordered by", "as default perpage option from database. no.of pages = total", "############# def test_structure_derived_properties(self): \"\"\" Get the list of give calculation", "# create log message for calcjob import logging from aiida.common.log", "== 'process.calculation.calcjob.CalcJobNode.': expected_node_uuids.append(calc['uuid']) url = f\"{self.get_url_prefix()}/nodes/?full_type=\\\"process.calculation.calcjob.CalcJobNode.|\\\"\" with self.app.test_client() as client:", "in response: all_comments.append(comment['message']) self.assertEqual(sorted(all_comments), sorted(['This is test comment.', 'Add another", "= f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true' with self.app.test_client() as client: response_value = client.get(url) response", "' \\ 'limit and offset' RESTApiTestCase.process_test( self, 'computers', '/computers/page/2?offset=2&limit=1&orderby=+id', expected_errormsg=expected_error", "url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources,cell' with self.app.test_client() as client: response_value = client.get(url)", "tempfile from flask_cors.core import ACL_ORIGIN from aiida import orm from", "uuid: url requested for the node pk :param result_node_type: node", "url :param response: url response :param uuid: url requested for", "get url path and it's parameters :param url: Web url", "\"\"\" url = f'{self.get_url_prefix()}/nodes/download_formats' with self.app.test_client() as client: response_value =", "by \"+scheduler_type\" in ascending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test(", "'comp': [{ 'id': { 'order': 'asc' } }] }).dict() #", "\"\"\" with self.app.test_client() as client: data_base = json.loads(client.get(self.get_url_prefix() + '/').data)['data']", "localhost pbspro ========== Expected:: test1 pbspro localhost pbspro test4 slurm", "uuid=node_uuid) def test_structure_download(self): \"\"\" Test download of structure file \"\"\"", "\"\"\" Get the list of given calculation retrieved_outputs \"\"\" node_uuid", "= json.loads(response_value.data) self.assertEqual(response['data']['repo_list'], [{'type': 'FILE', 'name': 'aiida.in'}]) url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/contents?filename=\\\"calcjob_inputs/aiida.in\\\"\"", "'The input file\\nof the CalcJob node' # Add the calcjob_inputs", "etc. \"\"\" super().setUpClass() api = configure_api(catch_internal_server=True) cls.app = api.app cls.app.config['TESTING']", "pk, query_string, url, url_root, etc. :param node_type: url requested fot", "result_node_uuids = [node['uuid'] for node in response['data'][result_name]] self.assertEqual(expected_node_uuids, result_node_uuids) self.compare_extra_response_data(entity_type,", "in ascending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers',", "query_string) self.assertEqual(response['url'], f'http://localhost{url}') self.assertEqual(response['url_root'], 'http://localhost/') # node details and list", "on the id of computer and get the filtered computer", "return self._url_prefix @classmethod def process_dummy_data(cls): # pylint: disable=fixme \"\"\" This", "dictionary when pagination is set \"\"\" expected_attributes = ['resources', 'cell']", "'/computers?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error ) def test_computers_list_page_limit_offset(self): \"\"\" If we use the", "def test_comp_orderby_scheduler_ascsign(self): \"\"\" Returns the computers list ordered by \"+scheduler_type\"", "list of expected ids from data :param expected_range: [start, stop]", "= f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=xsf' with self.app.test_client() as client: rv_obj = client.get(url) structure_data", "RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=transport_type,id', expected_list_ids=[3, 1, 2, 4] ) def", "dummy_computers: computer = orm.Computer(**dummy_computer) computer.store() # Prepare typical REST responses", "of computers from database \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True) def", "of computer and get the filtered computer list \"\"\" RESTApiTestCase.process_test(", "url): # pylint: disable=no-self-use \"\"\" Split the url with \"?\"", "url parameters: id, transport_type, orderby \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test(", "self.assertEqual(response['data']['derived_properties']['formula'], 'Ba') RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid) def test_structure_download(self): \"\"\"", "debug. # Please change this! computer_projections = ['id', 'uuid', 'name',", ":param result_name: result name in response e.g. incoming, outgoing \"\"\"", "self, 'computers', f\"/computers?transport_type=\\\"ssh\\\"&pk>{str(node_pk)}&orderby=+scheduler_type\", expected_list_ids=[1, 4, 2] ) def test_computers_orderby_schedulertype_desc(self): \"\"\"", "'order': 'desc' } }] }).dict() data = [_['data'] for _", "in limit from database. \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?limit=2&orderby=+id', expected_range=[None, 2])", "filtered computer list \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?name=\"test1\"', expected_list_ids=[1]) def test_computers_filter_hostname(self):", "test_computers_filter_id1(self): \"\"\" Add filter on the id of computer and", "computer \"\"\" node_uuid = self.get_dummy_data()['computers'][1]['uuid'] RESTApiTestCase.process_test( self, 'computers', f'/computers/{str(node_uuid)}', expected_list_ids=[1],", "self.assertEqual(response['path'], path) self.assertEqual(response['id'], uuid) self.assertEqual(response['query_string'], query_string) self.assertEqual(response['url'], f'http://localhost{url}') self.assertEqual(response['url_root'], 'http://localhost/')", "in ascending order and if it is having same transport_type,", "as a dictionary when pagination is set \"\"\" expected_attributes =", "\"\"\" RESTApiTestCase.process_test( self, 'computers', '/computers?transport_type=\"local\"&name=\"test3\"&orderby=+id', expected_list_ids=[3] ) ############### list orderby", "Storing the different nodes as lists and accessing them #", "4, 2] ) def test_computers_orderby_schedulertype_desc(self): \"\"\" Returns the computers list", "in ['nodes', 'processes', 'computers', 'users', 'groups']: url = f'{self.get_url_prefix()}/{nodetype}/projectable_properties' with", "node_type, url, response, uuid=None): \"\"\" In url response, we pass", "list all parameter combinations ####################### def test_computers_mixed1(self): \"\"\" url parameters:", "= orm.StructureData(cell=cell) structure.append_atom(position=(0., 0., 0.), symbols=['Ba']) structure.store() structure.add_comment('This is test", "expected_errormsg=expected_error ) ############### list filters ######################## def test_computers_filter_id1(self): \"\"\" Add", "starting from the no. specified in offset \"\"\" RESTApiTestCase.process_test(self, 'computers',", "def test_computers_mixed2(self): \"\"\" url parameters: id, page, perpage \"\"\" node_pk", "= [[2., 0., 0.], [0., 2., 0.], [0., 0., 2.]]", "1] ) def test_computers_orderby_mixed3(self): \"\"\" Returns the computers list first", "download ############# def test_structure_derived_properties(self): \"\"\" Get the list of give", "computers list ordered by \"scheduler_type\" in ascending order \"\"\" node_pk", "in expected_attr: self.assertIn(attr, received_attr) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid) ###############", "order by \"scheduler_type\" in ascending order and if it is", "and a pain to debug. # Please change this! computer_projections", "node) self.assertNotIn('attributes.cell', node) self.assertEqual(len(node['attributes']), len(expected_attributes)) for attr in expected_attributes: self.assertIn(attr,", "def test_computers_orderby_id_asc(self): \"\"\" Returns the computers list ordered by \"id\"", "\"computers\", \"/computers?orderby=+scheduler_type, -hostname\", expected_list_ids=[1,0,4,3,2]) \"\"\" ############### list filter combinations #######################", "and it's parameters :param url: Web url :return: url path", "structure node attributes filter ############# def test_structure_attributes_filter(self): \"\"\" Get the", "url = f'{self.get_url_prefix()}/processes/{str(node_uuid)}/report' with self.app.test_client() as client: response_value = client.get(url)", "def test_nodes_full_type_filter(self): \"\"\" Get the list of nodes filtered by", "along with the node results. e.g. url method, node_type, path,", "test_structure_attributes_filter(self): \"\"\" Get the list of given calculation attributes filtered", "returns AiiDA version \"\"\" url = f'{self.get_url_prefix()}/server' from aiida import", "disable=too-many-public-methods \"\"\" Define unittests for rest api \"\"\" ############### generic", "list (e.g. id > 2) \"\"\" node_pk = self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test(", "\"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?hostname=\"test1.epfl.ch\"', expected_list_ids=[1]) def test_computers_filter_transport_type(self): \"\"\" Add filter", "node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=-scheduler_type,name', expected_list_ids=[2, 3, 4,", "url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources' with self.app.test_client() as client: response_value = client.get(url)", "\"\"\"Unittests for REST API.\"\"\" import tempfile from flask_cors.core import ACL_ORIGIN", "matches expected values. :param entity_type: url requested for the type", "def test_computers_list(self): \"\"\" Get the full list of computers from", "page, perpage def process_test( self, entity_type, url, full_list=False, empty_list=False, expected_list_ids=None,", "retrieved_outputs = orm.FolderData() # Add the calcjob_outputs folder with the", "This functions prepare atomic chunks of typical responses from the", "the filtered computer list \"\"\" RESTApiTestCase.process_test( self, 'computers', '/computers?transport_type=\"local\"&name=\"test3\"&orderby=+id', expected_list_ids=[3]", "full_type \"\"\" expected_node_uuids = [] for calc in self.get_dummy_data()['calculations']: if", "+ '/server/endpoints').data)['data'] self.assertTrue(len(data_base['available_endpoints']) > 0) self.assertDictEqual(data_base, data_server) def test_cors_headers(self): \"\"\"", "Add filter for the name of computer and get the", "if comp['uuid'] is not None: comp['uuid'] = str(comp['uuid']) cls._dummy_data['computers'] =", "output file\\nof the CalcJob node' retrieved_outputs = orm.FolderData() # Add", "0.), (0., 2., 0.), (0., 0., 2.)) structure = orm.StructureData(cell=cell)", "as client: rv_obj = client.get(url) response = json.loads(rv_obj.data)['data']['comments'] all_comments =", "'pbspro', }, { 'label': 'test2', 'hostname': 'test2.epfl.ch', 'transport_type': 'ssh', 'scheduler_type':", "expected_range is None: expected_range = [] if result_node_type is None", "client: rv_obj = client.get(url) response = json.loads(rv_obj.data) self.assertNotIn('message', response) self.assertEqual(", "in data: if datum['uuid'] is not None: datum['uuid'] = str(datum['uuid'])", "visit http://www.aiida.net # ########################################################################### # pylint: disable=too-many-lines \"\"\"Unittests for REST", "template record message', 'metadata': { 'content': 'test' }, } Log(**log_record)", "# pylint: disable=too-many-public-methods \"\"\" Define unittests for rest api \"\"\"", "id, page, perpage \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers',", "'num_mpiprocs_per_machine': 1 }, } node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes'", "RESTApiTestCase.process_test( self, 'computers', f'/computers/page/2?id>{str(node_pk)}&perpage=2&orderby=+id', expected_list_ids=[3, 4] ) def test_computers_mixed3(self): \"\"\"", "response: all_comments.append(comment['message']) self.assertEqual(sorted(all_comments), sorted(['This is test comment.', 'Add another comment.']))", "filtered incoming list for given calculations \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid']", "\"\"\" for nodetype in ['nodes', 'processes', 'computers', 'users', 'groups']: url", "the AiiDA code. # # # # The code is", "order by \"transport_type\" in ascending order and if it is", "from the no. specified in offset \"\"\" RESTApiTestCase.process_test( self, 'computers',", "test_node_namespace(self): \"\"\" Test the rest api call to get list", "id > 2) \"\"\" node_pk = self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test( self, 'computers',", "client: rv_obj = client.get(url) response = json.loads(rv_obj.data) self.assertNotIn('message', response) expected_keys", "input_file) def test_process_report(self): \"\"\" Test process report \"\"\" node_uuid =", "offset and perpage at same time, it would return the", "having same transport_type, order it by \"id\" \"\"\" node_pk =", "of single computer \"\"\" node_uuid = self.get_dummy_data()['computers'][1]['uuid'] RESTApiTestCase.process_test( self, 'computers',", "\"\"\" Test process report \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url =", "attributes with filter attributes_filter \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url =", "'nodes', f\"/nodes/{str(node_uuid)}/links/incoming?node_type=\\\"data.dict.Dict.\\\"\", expected_list_ids=[3], uuid=node_uuid, result_node_type='data', result_name='incoming' ) def test_calculation_iotree(self): \"\"\"", "orm.CalcFunctionNode(computer=cls.computer) calcfunc.store() calc = orm.CalcJobNode(computer=cls.computer) calc.set_option('resources', resources) calc.set_attribute('attr1', 'OK') calc.set_attribute('attr2',", "as handle: handle.write(aiida_in) handle.flush() handle.seek(0) calc.put_object_from_filelike(handle, 'calcjob_inputs/aiida.in', force=True) calc.store() #", "parameters: id, limit and offset from aiida.common.exceptions import InputValidationError RESTApiTestCase.node_exception(self,", "self.app.test_client() as client: rv_obj = client.get(url) cif = load_node(node_uuid)._prepare_cif()[0] #", "_, pinfo in response['data']['fields'].items(): available_keys = pinfo.keys() for prop in", "'label': 'test3', 'hostname': 'test3.epfl.ch', 'transport_type': 'local', 'scheduler_type': 'slurm', }, {", "node attributes specified in attributes_filter are returned as a dictionary", "client.get(url) response = json.loads(rv_obj.data) self.assertNotIn('message', response) self.assertEqual( response['data']['derived_properties']['dimensionality'], { 'dim':", "is returned as a dictionary when pagination is set \"\"\"", "= self._dummy_data[result_node_type] elif empty_list: expected_data = [] elif expected_list_ids: expected_data", "ordered by \"name\" in descending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id']", "> 0) self.assertDictEqual(data_base, data_server) def test_cors_headers(self): \"\"\" Test that REST", "expected_list_ids=[1]) def test_computers_filter_transport_type(self): \"\"\" Add filter for the transport_type of", "descending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?pk>{str(node_pk)}&transport_type=\\\"ssh\\\"&orderby=-scheduler_type\",", "orm.FolderData() # Add the calcjob_outputs folder with the aiida.out file", "computer and get the filtered computer list \"\"\" RESTApiTestCase.process_test(self, 'computers',", "attributes specified in attributes_filter are returned as a dictionary when", "= json.loads(rv_obj.data) expected_data_keys = ['path', 'namespace', 'subspaces', 'label', 'full_type'] response_keys", "node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/list?filename=\\\"calcjob_inputs\\\"\" with self.app.test_client() as client:", "self._dummy_data[result_node_type][expected_range[0]:expected_range[1]] else: from aiida.common.exceptions import InputValidationError raise InputValidationError('Pass the expected", "cif = orm.CifData(ase=structure.get_ase()) cif.store() parameter1 = orm.Dict(dict={'a': 1, 'b': 2})", "the rows from database starting from the no. specified in", "limit and offset from aiida.common.exceptions import InputValidationError RESTApiTestCase.node_exception(self, \"/computers?aa=bb&id=2\", InputValidationError)", "['resources', 'cell'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources,cell' with self.app.test_client() as client: response_value", "0.], [0., 0., 2.]] node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true&attributes_filter=cell'", "Prepare typical REST responses cls.process_dummy_data() def get_dummy_data(self): return self._dummy_data def", ") def test_computers_orderby_name_asc_sign(self): \"\"\" Returns the computers list ordered by", "calculations = orm.QueryBuilder().append(orm.CalculationNode, tag='calc', project=calculation_projections).order_by({ 'calc': [{ 'id': { 'order':", "f'/computers/{str(node_uuid)}', expected_list_ids=[1], uuid=node_uuid ) def test_computers_list(self): \"\"\" Get the full", "RESTApiTestCase.process_test(self, 'computers', '/computers/page?orderby=+id', full_list=True) def test_computers_list_page_perpage(self): \"\"\" no.of pages =", "json.loads(response_value.data) self.assertEqual(response['data']['repo_list'], [{'type': 'FILE', 'name': 'aiida.in'}]) url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/contents?filename=\\\"calcjob_inputs/aiida.in\\\"\" with", "'test' }, } Log(**log_record) aiida_out = 'The output file\\nof the", "# For further information on the license, see the LICENSE.txt", "generic endpoints ######################## def test_server(self): \"\"\" Test that /server endpoint", "pk :param result_node_type: node type in response data :param result_name:", "for given calculations \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/links/tree?in_limit=1&out_limit=1'", "1) expected_attr = [ 'ctime', 'mtime', 'id', 'node_label', 'node_type', 'uuid',", "= url.split('?') path = '' query_string = '' if parts:", "empty_list: if the response list is empty :param expected_list_ids: list", "None: comp['uuid'] = str(comp['uuid']) cls._dummy_data['computers'] = computers calculation_projections = ['id',", "'OK', 'resources': { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }, } node_uuid", "test_computers_orderby_mixed1(self): \"\"\" Returns the computers list first order by \"transport_type\"", "None]) def test_computers_list_limit_offset_perpage(self): \"\"\" If we pass the limit, offset", "The AiiDA team. All rights reserved. # # This file", "= data def split_path(self, url): # pylint: disable=no-self-use \"\"\" Split", "list of endpoints \"\"\" with self.app.test_client() as client: data_base =", "url with self.app.test_client() as client: rv_response = client.get(url) response =", "change this! computer_projections = ['id', 'uuid', 'name', 'hostname', 'transport_type', 'scheduler_type']", "of given calculation retrieved_inputs \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url =", "test_base_url(self): \"\"\" Test that / returns list of endpoints \"\"\"", "If we request the page which exceeds the total no.", "key in ['data.structure.StructureData.|', 'data.cif.CifData.|']: self.assertIn(key, response['data'].keys()) for key in ['cif',", "\"\"\" Returns the computers list ordered by \"scheduler_type\" in descending", "Copyright (c), The AiiDA team. All rights reserved. # #", "def test_structure_download(self): \"\"\" Test download of structure file \"\"\" from", "class RESTApiTestCase(AiidaTestCase): \"\"\" Setup of the tests for the AiiDA", "node) self.assertEqual(len(node['extras']), len(expected_extras)) for extra in expected_extras: self.assertIn(extra, node['extras']) ###############", "self.assertIn(node['uuid'], expected_node_uuids) ############### Structure visualization and download ############# def test_structure_derived_properties(self):", "= json.loads(response_value.data) self.assertEqual(response['data']['nodes'][0]['attributes'], attributes) ############### calculation node extras_filter ############# def", "computers in database / perpage Using this formula it returns", "rest api \"\"\" ############### generic endpoints ######################## def test_server(self): \"\"\"", "expected_list_ids: expected_data = [self._dummy_data[result_node_type][i] for i in expected_list_ids] elif expected_range", "\"id\" in descending order \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=-id', expected_list_ids=[4, 3,", "0) self.assertDictEqual(data_base, data_server) def test_cors_headers(self): \"\"\" Test that REST API", "self, 'computers', f'/computers/page/2?id>{str(node_pk)}&perpage=2&orderby=+id', expected_list_ids=[3, 4] ) def test_computers_mixed3(self): \"\"\" url", "error message in response :param uuid: url requested for the", "'groups']: url = f'{self.get_url_prefix()}/{nodetype}/projectable_properties' with self.app.test_client() as client: rv_obj =", "aiida.orm import load_node node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/list?filename=\\\"calcjob_inputs\\\"\" with", "= [node['uuid'] for node in expected_data] result_node_uuids = [node['uuid'] for", "order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?transport_type=\\\"ssh\\\"&pk>{str(node_pk)}&orderby=+scheduler_type\", expected_list_ids=[1,", "data['API_prefix']) def test_base_url(self): \"\"\" Test that / returns list of", "f'{self.get_url_prefix()}/{nodetype}/projectable_properties' with self.app.test_client() as client: rv_obj = client.get(url) response =", "is having same scheduler_type, order it by \"hostname\" descending order", "f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/list?filename=\\\"calcjob_inputs\\\"\" with self.app.test_client() as client: response_value = client.get(url) response =", "ordered by \"+id\" in ascending order \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id',", "['nodes', 'processes', 'computers', 'users', 'groups']: url = f'{self.get_url_prefix()}/{nodetype}/projectable_properties' with self.app.test_client()", "import orm from aiida.backends.testbase import AiidaTestCase from aiida.common import json", "= '' if parts: path = parts[0] if len(parts) >", "a specific page is incompatible with ' \\ 'limit and", "self.assertEqual(self.get_url_prefix(), data['API_prefix']) def test_base_url(self): \"\"\" Test that / returns list", "attributes filtered \"\"\" cell = [[2., 0., 0.], [0., 2.,", "= ['resources', 'cell'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources,cell' with self.app.test_client() as client:", "'computers', '/computers?orderby=id', full_list=True) def test_computers_orderby_id_asc_sign(self): \"\"\" Returns the computers list", "contents for given node \"\"\" from aiida.orm import load_node node_uuid", "1: query_string = parts[1] return path, query_string def compare_extra_response_data(self, node_type,", "'num_mpiprocs_per_machine': 1 }, } node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true'", "the computers list ordered by \"+id\" in ascending order \"\"\"", "list orderby ######################## def test_computers_orderby_id_asc(self): \"\"\" Returns the computers list", "expected_errormsg: expected error message in response :param uuid: url requested", "self.app.test_client() as client: response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data']['nodes'][0]['attributes'],", "\"\"\" Get the list of given calculation attributes filtered \"\"\"", "the node comments \"\"\" node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/comments'", "self.assertEqual(response['data'], [{'name': 'calcjob_outputs', 'type': 'DIRECTORY'}]) ############### calculation incoming ############# def", "whether response matches expected values. :param entity_type: url requested for", "= self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?pk>{str(node_pk)}&transport_type=\\\"ssh\\\"&orderby=-scheduler_type\", expected_list_ids=[2, 4, 1] )", "url, response, uuid) class RESTApiTestSuite(RESTApiTestCase): # pylint: disable=too-many-public-methods \"\"\" Define", "self.assertIn('extras', node) self.assertNotIn('extras.extra1', node) self.assertNotIn('extras.extra2', node) self.assertEqual(len(node['extras']), len(expected_extras)) for extra", "test_computers_mixed3(self): \"\"\" url parameters: id, transport_type, orderby \"\"\" node_pk =", "\"\"\" Requests the details of single computer \"\"\" node_uuid =", "orm.StructureData, 'data': orm.Data, } for label, dataclass in data_types.items(): data", "calc in self.get_dummy_data()['calculations']: if calc['node_type'] == 'process.calculation.calcjob.CalcJobNode.': expected_node_uuids.append(calc['uuid']) url =", "'uuid', 'description', 'incoming', 'outgoing' ] received_attr = response['data']['nodes'][0].keys() for attr", "def test_cors_headers(self): \"\"\" Test that REST API sets cross-origin resource", "object) computers = [_['comp'] for _ in computers] for comp", "test1 test4 RESTApiTestCase.process_test(self, \"computers\", \"/computers?orderby=+scheduler_type, -hostname\", expected_list_ids=[1,0,4,3,2]) \"\"\" ############### list", "self.assertNotIn('message', response) self.assertEqual( response['data']['derived_properties']['dimensionality'], { 'dim': 3, 'value': 8.0, 'label':", "of computer and get the filtered computer list \"\"\" RESTApiTestCase.process_test(self,", "using limit and offset parameter. It should return the no", "self.get_dummy_data()['calculations']: if calc['node_type'] == 'process.calculation.calcjob.CalcJobNode.': expected_node_uuids.append(calc['uuid']) url = f\"{self.get_url_prefix()}/nodes/?full_type=\\\"process.calculation.calcjob.CalcJobNode.|\\\"\" with", "parameters :param url: Web url :return: url path and url", "'computers', f'/computers?pk>{str(node_pk)}&orderby=transport_type,id', expected_list_ids=[3, 1, 2, 4] ) def test_computers_orderby_mixed2(self): \"\"\"", "as client: rv_obj = client.get(url) response = json.loads(rv_obj.data) for node", "\"\"\" RESTApiTestCase.process_test( self, 'computers', '/computers/page/1?perpage=2&orderby=+id', expected_range=[None, 2] ) def test_computers_list_page_perpage_exceed(self):", "Get the list of computers from database using limit parameter.", "\"\"\" node_pk = self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test(self, 'computers', f'/computers?id={str(node_pk)}', expected_list_ids=[1]) def test_computers_filter_id2(self):", "############### node attributes_filter with pagination ############# def test_node_attributes_filter_pagination(self): \"\"\" Check", "[[2., 0., 0.], [0., 2., 0.], [0., 0., 2.]] node_uuid", "as client: rv_obj = client.get(url) response = json.loads(rv_obj.data) self.assertEqual(response['data']['nodes'][0]['attributes']['cell'], cell)", "def test_computers_orderby_id_desc(self): \"\"\" Returns the computers list ordered by \"id\"", "hostname and id of computer and get the filtered computer", "= 'The output file\\nof the CalcJob node' retrieved_outputs = orm.FolderData()", "AiiDA code. # # # # The code is hosted", "comp in computers: if comp['uuid'] is not None: comp['uuid'] =", "response['data'].keys()) for key in ['cif', 'xsf', 'xyz']: self.assertIn(key, response['data']['data.structure.StructureData.|']) self.assertIn('cif',", "'id': { 'order': 'desc' } }] }).dict() calculations = [_['calc']", "= self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/output_files' with self.app.test_client() as client: response_value", "projectable_properties endpoint \"\"\" for nodetype in ['nodes', 'processes', 'computers', 'users',", "the list of computers from database using limit parameter. It", "incoming ############# def test_calculation_inputs(self): \"\"\" Get the list of give", "# # # # The code is hosted on GitHub", "self.app.test_client() as client: rv_obj = client.get(url) response = json.loads(rv_obj.data) for", "['logs']: self.assertIn(key, expected_keys) expected_log_keys = response['data']['logs'][0].keys() for key in ['time',", "response, uuid=None): \"\"\" In url response, we pass some extra", "disable=too-many-lines \"\"\"Unittests for REST API.\"\"\" import tempfile from flask_cors.core import", "\"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=id', full_list=True) def test_computers_orderby_id_asc_sign(self): \"\"\" Returns the", "'OK') calc.set_extra('extra1', False) calc.set_extra('extra2', 'extra_info') calc.add_incoming(structure, link_type=LinkType.INPUT_CALC, link_label='link_structure') calc.add_incoming(parameter1, link_type=LinkType.INPUT_CALC,", "the error message. \"\"\" expected_error = 'Non existent page requested.", "filtered by full_type \"\"\" expected_node_uuids = [] for calc in", "RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=name', expected_list_ids=[1, 2, 3, 4] ) def", "self.app.test_client() as client: response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(len(response['data']['nodes']),", "2}) parameter1.store() parameter2 = orm.Dict(dict={'c': 3, 'd': 4}) parameter2.store() kpoint", "api \"\"\" ############### generic endpoints ######################## def test_server(self): \"\"\" Test", "Check whether response matches expected values. :param entity_type: url requested", "version \"\"\" url = f'{self.get_url_prefix()}/server' from aiida import __version__ with", "'order': 'desc' } }] }).dict() calculations = [_['calc'] for _", "test_computers_orderby_name_desc(self): \"\"\" Returns the computers list ordered by \"name\" in", "url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra2' with self.app.test_client() as client: response_value = client.get(url)", "\"\"\" url = f'{self.get_url_prefix()}/server' from aiida import __version__ with self.app.test_client()", "ordered by \"scheduler_type\" in descending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id']", "ascending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=+name',", "different nodes as lists and accessing them # by their", "in dummy_computers: computer = orm.Computer(**dummy_computer) computer.store() # Prepare typical REST", "Add the calcjob_inputs folder with the aiida.in file to the", "'GET') self.assertEqual(response['resource_type'], node_type) self.assertEqual(response['path'], path) self.assertEqual(response['id'], uuid) self.assertEqual(response['query_string'], query_string) self.assertEqual(response['url'],", "######################## def test_computers_details(self): \"\"\" Requests the details of single computer", "= response['data']['logs'][0].keys() for key in ['time', 'loggername', 'levelname', 'dbnode_id', 'message']:", "order it by \"id\" \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self,", "expected_list_ids=[2, 3, 4, 1] ) def test_computers_orderby_mixed3(self): \"\"\" Returns the", "'/computers/page/2?offset=2&limit=1&orderby=+id', expected_errormsg=expected_error ) def test_complist_pagelimitoffset_perpage(self): \"\"\" If we use the", "= self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?id>{str(node_pk)}&orderby=+id', expected_range=[2, None] ) def", "'transport_type': 'ssh', 'scheduler_type': 'torque', }, { 'label': 'test3', 'hostname': 'test3.epfl.ch',", "############### node full_type filter ############# def test_nodes_full_type_filter(self): \"\"\" Get the", "client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data']['repo_list'], [{'type': 'FILE', 'name': 'aiida.in'}]) url", "def split_path(self, url): # pylint: disable=no-self-use \"\"\" Split the url", "Get the list of computers from database using offset parameter", "= [] for calc in self.get_dummy_data()['calculations']: if calc['node_type'] == 'process.calculation.calcjob.CalcJobNode.':", "from database. \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?limit=2&orderby=+id', expected_range=[None, 2]) def test_computers_list_offset_only(self):", "self.assertEqual(response['resource_type'], node_type) self.assertEqual(response['path'], path) self.assertEqual(response['id'], uuid) self.assertEqual(response['query_string'], query_string) self.assertEqual(response['url'], f'http://localhost{url}')", "we pass the limit, offset and perpage at same time,", "f'{self.get_url_prefix()}/server' with self.app.test_client() as client: response = client.get(url) headers =", "with tempfile.NamedTemporaryFile(mode='w+') as handle: handle.write(aiida_in) handle.flush() handle.seek(0) calc.put_object_from_filelike(handle, 'calcjob_inputs/aiida.in', force=True)", "no. of computers in database / perpage If we request", "############### calculation incoming ############# def test_calculation_inputs(self): \"\"\" Get the list", "'test1', 'hostname': 'test1.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'pbspro', }, { 'label':", "cell = ((2., 0., 0.), (0., 2., 0.), (0., 0.,", "= json.loads(rv_obj.data)['data']['comments'] all_comments = [] for comment in response: all_comments.append(comment['message'])", "\"\"\" RESTApiTestCase.process_test( self, 'computers', '/computers?limit=2&offset=2&orderby=+id', expected_range=[2, 4] ) def test_computers_list_limit_only(self):", "= client.get(url) response = json.loads(rv_obj.data) self.assertNotIn('message', response) expected_keys = ['display_name',", "= ['id', 'uuid', 'user_id', 'node_type'] calculations = orm.QueryBuilder().append(orm.CalculationNode, tag='calc', project=calculation_projections).order_by({", "self, 'computers', f\"/computers?id>{str(node_pk)}&hostname=\\\"test3.epfl.ch\\\"&transport_type=\\\"ssh\\\"\", empty_list=True ) ############### list all parameter combinations", "and get the filtered computer list \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id']", "response = json.loads(response_value.data) for key in ['data.structure.StructureData.|', 'data.cif.CifData.|']: self.assertIn(key, response['data'].keys())", "a UUID object) computers = [_['comp'] for _ in computers]", "= ['resources'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources' with self.app.test_client() as client: response_value", "sets cross-origin resource sharing headers \"\"\" url = f'{self.get_url_prefix()}/server' with", "test_computers_orderby_name_asc(self): \"\"\" Returns the computers list ordered by \"name\" in", "node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/comments' with self.app.test_client() as client:", "node' retrieved_outputs = orm.FolderData() # Add the calcjob_outputs folder with", "node['attributes']) ############### node get one attributes_filter with pagination ############# def", "= ['id', 'uuid', 'user_id', 'node_type'] data_types = { 'cifdata': orm.CifData,", "for node in response['data']['nodes']: self.assertIn('extras', node) self.assertNotIn('extras.extra1', node) self.assertNotIn('extras.extra2', node)", "0) for node in response['data']['nodes']: self.assertEqual(list(node['extras'].keys()), expected_extra) ############### node full_type", "the computer and get the filtered computer list \"\"\" node_pk", "test_server(self): \"\"\" Test that /server endpoint returns AiiDA version \"\"\"", "from the no. specified in offset \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?offset=2&orderby=+id',", "def test_contents_attributes_filter(self): \"\"\" Get list of calculation attributes with filter", "self.assertEqual(len(response['data']['nodes'][0]['incoming']), 1) self.assertEqual(len(response['data']['nodes'][0]['outgoing']), 1) self.assertEqual(len(response['data']['metadata']), 1) expected_attr = [ 'ctime',", "calculation extras filtered \"\"\" extras = {'extra1': False, 'extra2': 'extra_info'}", "list of given calculation retrieved_outputs \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url", "of given calculation attributes filtered \"\"\" cell = [[2., 0.,", "= client.get(url) response = json.loads(rv_obj.data) expected_data_keys = ['path', 'namespace', 'subspaces',", "ordered by \"+scheduler_type\" in ascending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id']", "for the name of computer and get the filtered computer", "limit from database starting from the no. specified in offset", "['data.structure.StructureData.|', 'data.cif.CifData.|']: self.assertIn(key, response['data'].keys()) for key in ['cif', 'xsf', 'xyz']:", "GitHub at https://github.com/aiidateam/aiida-core # # For further information on the", "\"\"\" Get list of calculation attributes \"\"\" attributes = {", "response['data']['nodes']: self.assertIn('extras', node) self.assertNotIn('extras.extra1', node) self.assertNotIn('extras.extra2', node) self.assertEqual(len(node['extras']), len(expected_extras)) for", "node extras_filter ############# def test_calculation_extras_filter(self): \"\"\" Get the list of", "attr in expected_attributes: self.assertIn(attr, node['attributes']) ############### node get one attributes_filter", "this attribute is returned as a dictionary when pagination is", "string (e.g. in sqlalchemy it comes as a UUID object)", "descending order \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=-id', expected_list_ids=[4, 3, 2, 1,", "for comment in response: all_comments.append(comment['message']) self.assertEqual(sorted(all_comments), sorted(['This is test comment.',", "cls._dummy_data['computers'] = computers calculation_projections = ['id', 'uuid', 'user_id', 'node_type'] calculations", "specified in attributes_filter only this attribute is returned as a", "chunks of typical responses from the RESTapi and puts them", "response = json.loads(response_value.data) expected_keys = response['data'].keys() for key in ['logs']:", "expected_node_uuids = [node['uuid'] for node in expected_data] result_node_uuids = [node['uuid']", "on the license, see the LICENSE.txt file # # For", "disable=fixme \"\"\" This functions prepare atomic chunks of typical responses", "response['data'].keys() for dkay in expected_data_keys: self.assertIn(dkay, response_keys) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url,", "client: response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data']['nodes'][0]['attributes'], attributes) ###############", "RESTApiTestCase.process_test( self, 'computers', '/computers?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error ) def test_computers_list_page_limit_offset(self): \"\"\" If", "= self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes' with self.app.test_client() as client: rv_obj", "\"\"\" Check that node extras specified in extras_filter are returned", "list of computers from database using limit parameter. It should", "'transport_type', 'scheduler_type'] computers = orm.QueryBuilder().append(orm.Computer, tag='comp', project=computer_projections).order_by({ 'comp': [{ 'id':", "result_node_type = entity_type result_name = entity_type url = self._url_prefix +", "class RESTApiTestSuite(RESTApiTestCase): # pylint: disable=too-many-public-methods \"\"\" Define unittests for rest", "record message', 'metadata': { 'content': 'test' }, } Log(**log_record) aiida_out", "f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra1,extra2' with self.app.test_client() as client: response_value = client.get(url) response =", "{ 'attr1': 'OK', 'attr2': 'OK', 'resources': { 'num_machines': 1, 'num_mpiprocs_per_machine':", "and download ############# def test_structure_derived_properties(self): \"\"\" Get the list of", "}).dict() data = [_['data'] for _ in data] for datum", "rv_response = client.get(url) response = json.loads(rv_response.data) if expected_errormsg: self.assertEqual(response['message'], expected_errormsg)", "rows from database starting from the no. specified in offset", "type of the node :param url: web url :param full_list:", "incoming list for given calculations \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url", "[self._dummy_data[result_node_type][i] for i in expected_list_ids] elif expected_range != []: expected_data", "expected_error = 'requesting a specific page is incompatible with '", "'levelname', 'dbnode_id', 'message']: self.assertIn(key, expected_log_keys) def test_download_formats(self): \"\"\" test for", "hosted on GitHub at https://github.com/aiidateam/aiida-core # # For further information", "str(comp['uuid']) cls._dummy_data['computers'] = computers calculation_projections = ['id', 'uuid', 'user_id', 'node_type']", "url response, we pass some extra information/data along with the", "entity_type url = self._url_prefix + url with self.app.test_client() as client:", "{ 'label': 'test3', 'hostname': 'test3.epfl.ch', 'transport_type': 'local', 'scheduler_type': 'slurm', },", "'hostname': 'test4.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'slurm', }] for dummy_computer in", "node_type) self.assertEqual(response['path'], path) self.assertEqual(response['id'], uuid) self.assertEqual(response['query_string'], query_string) self.assertEqual(response['url'], f'http://localhost{url}') self.assertEqual(response['url_root'],", "None] ) def test_computers_filter_pk(self): \"\"\" Add filter on the id", "'computers', f'/computers?pk>{str(node_pk)}&orderby=+name', expected_list_ids=[1, 2, 3, 4] ) def test_computers_orderby_name_desc(self): \"\"\"", "For further information on the license, see the LICENSE.txt file", "'parameterdata': orm.Dict, 'structuredata': orm.StructureData, 'data': orm.Data, } for label, dataclass", "url, response) def test_comments(self): \"\"\" Get the node comments \"\"\"", "= f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources' with self.app.test_client() as client: response_value = client.get(url) response", "slurm test2 torque test1 test4 RESTApiTestCase.process_test(self, \"computers\", \"/computers?orderby=+scheduler_type, -hostname\", expected_list_ids=[1,0,4,3,2])", "if len(parts) > 1: query_string = parts[1] return path, query_string", "for attr in expected_attr: self.assertIn(attr, received_attr) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response,", "as client: response_value = client.get(url) response = json.loads(response_value.data) self.assertNotEqual(len(response['data']['nodes']), 0)", "rv_obj = client.get(url) response = json.loads(rv_obj.data) self.assertNotIn('message', response) self.assertEqual(response['data']['attributes'], {'attr1':", "the node results. e.g. url method, node_type, path, pk, query_string,", ":param full_list: if url is requested to get full list", "node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?transport_type=\\\"ssh\\\"&pk>{str(node_pk)}&orderby=scheduler_type\", expected_list_ids=[1, 4, 2]", "RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>{str(node_pk)}&hostname=\\\"test3.epfl.ch\\\"&transport_type=\\\"ssh\\\"\", empty_list=True ) ############### list all parameter", "[_['data'] for _ in data] for datum in data: if", "calculation attributes filtered \"\"\" cell = [[2., 0., 0.], [0.,", "very fragile and a pain to debug. # Please change", "link_type=LinkType.CREATE, link_label='retrieved') kpoint.add_incoming(calc, link_type=LinkType.CREATE, link_label='create') calc1 = orm.CalcJobNode(computer=cls.computer) calc1.set_option('resources', resources)", "of available node namespace \"\"\" url = f'{self.get_url_prefix()}/nodes/full_types' with self.app.test_client()", "in self.get_dummy_data()['calculations']: if calc['node_type'] == 'process.calculation.calcjob.CalcJobNode.': expected_node_uuids.append(calc['uuid']) url = f\"{self.get_url_prefix()}/nodes/?full_type=\\\"process.calculation.calcjob.CalcJobNode.|\\\"\"", "= f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/input_files' with self.app.test_client() as client: response_value = client.get(url) response", "parameters \"\"\" parts = url.split('?') path = '' query_string =", "json.loads(response_value.data) self.assertNotEqual(len(response['data']['nodes']), 0) for node in response['data']['nodes']: self.assertEqual(list(node['attributes'].keys()), expected_attribute) ###############", "response['data']['nodes']: self.assertIn(node['uuid'], expected_node_uuids) ############### Structure visualization and download ############# def", "attributes = { 'attr1': 'OK', 'attr2': 'OK', 'resources': { 'num_machines':", "= [_['comp'] for _ in computers] for comp in computers:", "'label': 'test1', 'hostname': 'test1.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'pbspro', }, {", "\"scheduler_type\" in descending order and if it is having same", "'hostname': 'test2.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'torque', }, { 'label': 'test3',", "objects to the database for different requests/filters/orderings etc. \"\"\" super().setUpClass()", "expected_list_ids=[4, 3, 2, 1, 0]) def test_computers_orderby_name_asc(self): \"\"\" Returns the", "method, node_type, path, pk, query_string, url, url_root, etc. :param node_type:", "client.get(url) response = json.loads(response_value.data) self.assertNotEqual(len(response['data']['nodes']), 0) for node in response['data']['nodes']:", "rv_obj = client.get(url) response = json.loads(rv_obj.data) for node in response['data']['nodes']:", "'/computers?hostname=\"test1.epfl.ch\"', expected_list_ids=[1]) def test_computers_filter_transport_type(self): \"\"\" Add filter for the transport_type", "url, response, uuid=node_uuid) def test_structure_download(self): \"\"\" Test download of structure", "node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?id>{str(node_pk)}&limit=2&offset=3&orderby=+id', expected_list_ids=[4] ) def", "Get filtered incoming list for given calculations \"\"\" node_uuid =", "'slurm', }] for dummy_computer in dummy_computers: computer = orm.Computer(**dummy_computer) computer.store()", "self, 'computers', f\"/computers?id>={str(node_pk)}&transport_type=\\\"ssh\\\"&orderby=-id&limit=2\", expected_list_ids=[4, 2] ) ########## pass unknown url", "def test_cif(self): \"\"\" Test download of cif file \"\"\" from", "full_list=False, empty_list=False, expected_list_ids=None, expected_range=None, expected_errormsg=None, uuid=None, result_node_type=None, result_name=None ): #", "comment.', 'Add another comment.'])) def test_repo(self): \"\"\" Test to get", "is None: expected_list_ids = [] if expected_range is None: expected_range", "get the filtered computer list \"\"\" RESTApiTestCase.process_test( self, 'computers', '/computers?transport_type=\"local\"&name=\"test3\"&orderby=+id',", "test_computers_orderby_schedulertype_desc(self): \"\"\" Returns the computers list ordered by \"scheduler_type\" in", "'nodes', url, response, uuid=node_uuid) ############### calculation node attributes filter #############", "list \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>{str(node_pk)}&hostname=\\\"test1.epfl.ch\\\"\", expected_list_ids=[1]", "call to get list of available node namespace \"\"\" url", "client: rv_obj = client.get(url) cif = load_node(node_uuid)._prepare_cif()[0] # pylint: disable=protected-access", "data] for datum in data: if datum['uuid'] is not None:", "self.app.test_client() as client: response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data'],", "f\"/computers?pk>{str(node_pk)}&transport_type=\\\"ssh\\\"&orderby=-scheduler_type\", expected_list_ids=[2, 4, 1] ) ############### list orderby combinations #######################", "= self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>{str(node_pk)}&hostname=\\\"test1.epfl.ch\\\"\", expected_list_ids=[1] ) def test_computers_filter_mixed2(self):", "from database \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True) def test_computers_list_limit_offset(self): \"\"\"", "a dictionary when pagination is set \"\"\" expected_extras = ['extra1',", "empty_list=True ) ############### list all parameter combinations ####################### def test_computers_mixed1(self):", "= load_node(node_uuid)._prepare_cif()[0] # pylint: disable=protected-access self.assertEqual(rv_obj.data, cif) ############### projectable_properties #############", "\"\"\" _url_prefix = '/api/v4' _dummy_data = {} _PERPAGE_DEFAULT = 20", "id, hostname and transport_type of the computer and get the", "is test comment.') structure.add_comment('Add another comment.') cif = orm.CifData(ase=structure.get_ase()) cif.store()", "'computers', f'/computers?pk={str(node_pk)}', expected_list_ids=[1]) def test_computers_filter_name(self): \"\"\" Add filter for the", "url: Web url :return: url path and url parameters \"\"\"", "calculations data_projections = ['id', 'uuid', 'user_id', 'node_type'] data_types = {", "expected_errormsg) else: if full_list: expected_data = self._dummy_data[result_node_type] elif empty_list: expected_data", "node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?transport_type=\\\"ssh\\\"&pk>{str(node_pk)}&orderby=+scheduler_type\", expected_list_ids=[1, 4, 2]", "information on the license, see the LICENSE.txt file # #", "formula it returns the no. of rows for requested page", "load_node(node_uuid)._prepare_cif()[0] # pylint: disable=protected-access self.assertEqual(rv_obj.data, cif) ############### projectable_properties ############# def", "force=True) retrieved_outputs.store() retrieved_outputs.add_incoming(calc, link_type=LinkType.CREATE, link_label='retrieved') kpoint.add_incoming(calc, link_type=LinkType.CREATE, link_label='create') calc1 =", "1, 'num_mpiprocs_per_machine': 1 }, } node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url =", "= [node['uuid'] for node in response['data'][result_name]] self.assertEqual(expected_node_uuids, result_node_uuids) self.compare_extra_response_data(entity_type, url,", "computers from database using offset parameter It should return all", "from database. no.of pages = total no. of computers in", "list of computers from database using limit and offset parameter.", "the computers list ordered by \"+scheduler_type\" in ascending order \"\"\"", "############# def test_node_single_attributes_filter(self): \"\"\" Check that when only one node", "2, 1, 0]) def test_computers_orderby_name_asc(self): \"\"\" Returns the computers list", "response = json.loads(response_value.data) self.assertEqual(len(response['data']['nodes']), 1) self.assertEqual(len(response['data']['nodes'][0]['incoming']), 1) self.assertEqual(len(response['data']['nodes'][0]['outgoing']), 1) self.assertEqual(len(response['data']['metadata']),", "and puts them into class attributes \"\"\" # TODO: Storing", "response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data']['nodes'][0]['attributes'], attributes) ############### calculation", "response = json.loads(rv_obj.data) self.assertEqual(response['data']['nodes'][0]['attributes']['cell'], cell) ############### node attributes_filter with pagination", "the filtered computer list \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?name=\"test1\"', expected_list_ids=[1]) def", "expected_keys = response['data'].keys() for key in ['logs']: self.assertIn(key, expected_keys) expected_log_keys", "\"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/input_files' with self.app.test_client() as", "of give calculation incoming \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] self.process_test( 'nodes',", "orderby \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>={str(node_pk)}&transport_type=\\\"ssh\\\"&orderby=-id&limit=2\", expected_list_ids=[4,", "calculation attributes \"\"\" attributes = { 'attr1': 'OK', 'attr2': 'OK',", "\"\"\" Test the rest api call to get list of", "of rows specified in limit from database starting from the", "def test_calculation_iotree(self): \"\"\" Get filtered incoming list for given calculations", "len(expected_attributes)) for attr in expected_attributes: self.assertIn(attr, node['attributes']) ############### node get", "Returns the computers list first order by \"scheduler_type\" in descending", "self, 'computers', f'/computers/{str(node_uuid)}', expected_list_ids=[1], uuid=node_uuid ) def test_computers_list(self): \"\"\" Get", "\"\"\" url parameters: id, limit and offset \"\"\" node_pk =", "specified in extras_filter are returned as a dictionary when pagination", "descending order Response:: test4 slurm test3 slurm test2 torque test1", "response = client.get(url) headers = response.headers self.assertEqual(headers.get(ACL_ORIGIN), '*') ############### computers", "test_computers_filter_hostname(self): \"\"\" Add filter for the hostname of computer and", "2, 1] ) def test_computers_orderby_scheduler_type_asc(self): \"\"\" Returns the computers list", "\"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=name', expected_list_ids=[1, 2,", "(e.g. id=1) \"\"\" node_pk = self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test(self, 'computers', f'/computers?pk={str(node_pk)}', expected_list_ids=[1])", "message in response :param uuid: url requested for the node", "test_computers_orderby_id_asc_sign(self): \"\"\" Returns the computers list ordered by \"+id\" in", "reserved. # # This file is part of the AiiDA", "= json.loads(response_value.data) expected_keys = response['data'].keys() for key in ['logs']: self.assertIn(key,", "from aiida.orm import load_node node_uuid = self.get_dummy_data()['cifdata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=cif'", "ascending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=name',", "self.app.test_client() as client: data_base = json.loads(client.get(self.get_url_prefix() + '/').data)['data'] data_server =", "2] ) def test_comp_orderby_scheduler_ascsign(self): \"\"\" Returns the computers list ordered", "\"\"\" Add filter for the id, hostname and transport_type of", "is not None: calc['uuid'] = str(calc['uuid']) cls._dummy_data['calculations'] = calculations data_projections", "expected_list_ids=[3], uuid=node_uuid, result_node_type='data', result_name='incoming' ) def test_calculation_iotree(self): \"\"\" Get filtered", "response_value = client.get(url) response = json.loads(response_value.data) expected_keys = response['data'].keys() for", "import tempfile from flask_cors.core import ACL_ORIGIN from aiida import orm", "node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=name', expected_list_ids=[1, 2, 3,", "######################## def test_server(self): \"\"\" Test that /server endpoint returns AiiDA", "limit and offset at same time, it would return the", "In url response, we pass some extra information/data along with", "given calculations \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] self.process_test( 'nodes', f\"/nodes/{str(node_uuid)}/links/incoming?node_type=\\\"data.dict.Dict.\\\"\", expected_list_ids=[3],", "node in expected_data] result_node_uuids = [node['uuid'] for node in response['data'][result_name]]", "the computers list first order by \"transport_type\" in ascending order", "1, 'num_mpiprocs_per_machine': 1} calcfunc = orm.CalcFunctionNode(computer=cls.computer) calcfunc.store() calc = orm.CalcJobNode(computer=cls.computer)", "Response:: test4 slurm test3 slurm test2 torque test1 pbspro localhost", "self.app.test_client() as client: rv_obj = client.get(url) response = json.loads(rv_obj.data) self.assertNotIn('message',", "self.get_dummy_data()['calculations'][1]['uuid'] self.process_test( 'nodes', f'/nodes/{str(node_uuid)}/links/incoming?orderby=id', expected_list_ids=[5, 3], uuid=node_uuid, result_node_type='data', result_name='incoming' )", "CalcJob node' # Add the calcjob_inputs folder with the aiida.in", "to get full list :param empty_list: if the response list", "with ' \\ 'limit and offset' RESTApiTestCase.process_test( self, 'computers', '/computers/page/2?offset=2&limit=1&orderby=+id',", "= self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=transport_type,id', expected_list_ids=[3, 1, 2, 4]", "node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true&attributes_filter=cell' with self.app.test_client() as client:", "offset parameter It should return all the rows from database", ") def test_computers_orderby_name_desc(self): \"\"\" Returns the computers list ordered by", "and offset \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?id>{str(node_pk)}&limit=2&offset=3&orderby=+id',", "import load_node node_uuid = self.get_dummy_data()['cifdata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=cif' with self.app.test_client()", "self.assertNotIn('message', response) self.assertEqual(response['data']['attributes'], attributes) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid) def", "RESTApiTestSuite(RESTApiTestCase): # pylint: disable=too-many-public-methods \"\"\" Define unittests for rest api", "self.assertEqual(rv_obj.data, cif) ############### projectable_properties ############# def test_projectable_properties(self): \"\"\" test projectable_properties", "the calcjob_inputs folder with the aiida.in file to the CalcJobNode", "0., 0.], [0., 2., 0.], [0., 0., 2.]] node_uuid =", "f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes' with self.app.test_client() as client: rv_obj = client.get(url) response =", "'/computers?orderby=id', full_list=True) def test_computers_orderby_id_asc_sign(self): \"\"\" Returns the computers list ordered", "3, 'value': 8.0, 'label': 'volume' } ) self.assertEqual(response['data']['derived_properties']['formula'], 'Ba') RESTApiTestCase.compare_extra_response_data(self,", "sharing headers \"\"\" url = f'{self.get_url_prefix()}/server' with self.app.test_client() as client:", "for attr in expected_attributes: self.assertIn(attr, node['attributes']) ############### node get one", "calculation attributes ############# def test_calculation_attributes(self): \"\"\" Get list of calculation", "= f'{self.get_url_prefix()}/server' with self.app.test_client() as client: response = client.get(url) headers", "1] ) ############### list orderby combinations ####################### def test_computers_orderby_mixed1(self): \"\"\"", "_url_prefix = '/api/v4' _dummy_data = {} _PERPAGE_DEFAULT = 20 _LIMIT_DEFAULT", "with \"?\" to get url path and it's parameters :param", "f'{self.get_url_prefix()}/nodes/download_formats' with self.app.test_client() as client: response_value = client.get(url) response =", "========== Expected:: test1 pbspro localhost pbspro test4 slurm test3 slurm", "retrieved_outputs \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/output_files' with self.app.test_client()", "self.assertNotEqual(len(response['data']['nodes']), 0) for node in response['data']['nodes']: self.assertIn('extras', node) self.assertNotIn('extras.extra1', node)", "offset \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?offset=2&orderby=+id', expected_range=[2, None]) def test_computers_list_limit_offset_perpage(self): \"\"\"", "\"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=-scheduler_type,name', expected_list_ids=[2, 3,", "for different requests/filters/orderings etc. \"\"\" super().setUpClass() api = configure_api(catch_internal_server=True) cls.app", "endpoints ######################## def test_server(self): \"\"\" Test that /server endpoint returns", "pbspro localhost pbspro ========== Expected:: test1 pbspro localhost pbspro test4", "retrieved_outputs.put_object_from_filelike(handle, 'calcjob_outputs/aiida.out', force=True) retrieved_outputs.store() retrieved_outputs.add_incoming(calc, link_type=LinkType.CREATE, link_label='retrieved') kpoint.add_incoming(calc, link_type=LinkType.CREATE, link_label='create')", "f'/computers?pk>{str(node_pk)}&orderby=name', expected_list_ids=[1, 2, 3, 4] ) def test_computers_orderby_name_asc_sign(self): \"\"\" Returns", "Get list of calculation attributes with filter attributes_filter \"\"\" node_uuid", "Expected:: test1 pbspro localhost pbspro test4 slurm test3 slurm test2", "3, 4] ) def test_computers_orderby_name_desc(self): \"\"\" Returns the computers list", "\"\"\" Returns the computers list ordered by \"+scheduler_type\" in ascending", "elif empty_list: expected_data = [] elif expected_list_ids: expected_data = [self._dummy_data[result_node_type][i]", "\"\"\" Check that when only one node attribute is specified", "['id', 'uuid', 'user_id', 'node_type'] data_types = { 'cifdata': orm.CifData, 'parameterdata':", "expected_range = [] if result_node_type is None and result_name is", "file \"\"\" from aiida.orm import load_node node_uuid = self.get_dummy_data()['cifdata'][0]['uuid'] url", "the hostname and id of computer and get the filtered", "pages then it would return the error message. \"\"\" expected_error", "filtered computer list (e.g. id=1) \"\"\" node_pk = self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test(self,", "filter ############# def test_calculation_attributes_filter(self): \"\"\" Get the list of given", "'hostname': 'test1.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'pbspro', }, { 'label': 'test2',", "available_properties) def test_node_namespace(self): \"\"\" Test the rest api call to", "in descending order and if it is having same scheduler_type,", ") def test_computers_orderby_schedulertype_desc(self): \"\"\" Returns the computers list ordered by", "self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?extras=true&extras_filter=extra1,extra2' with self.app.test_client() as client: response_value =", "url = f'{self.get_url_prefix()}/server' from aiida import __version__ with self.app.test_client() as", "of computers in database / perpage \"/page\" acts as \"/page/1?perpage=default_value\"", "with self.app.test_client() as client: rv_obj = client.get(url) response = json.loads(rv_obj.data)", "client.get(url) response = json.loads(response_value.data) expected_keys = response['data'].keys() for key in", "if calc['uuid'] is not None: calc['uuid'] = str(calc['uuid']) cls._dummy_data['calculations'] =", "= client.get(url) data = json.loads(response.data)['data'] self.assertEqual(__version__, data['AiiDA_version']) self.assertEqual(self.get_url_prefix(), data['API_prefix']) def", "license, see the LICENSE.txt file # # For further information", "result_name = entity_type url = self._url_prefix + url with self.app.test_client()", "to get list of available node namespace \"\"\" url =", "= self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?transport_type=\\\"ssh\\\"&pk>{str(node_pk)}&orderby=scheduler_type\", expected_list_ids=[1, 4, 2] )", "offset \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?id>{str(node_pk)}&limit=2&offset=3&orderby=+id', expected_list_ids=[4]", "'data.cif.CifData.|']: self.assertIn(key, response['data'].keys()) for key in ['cif', 'xsf', 'xyz']: self.assertIn(key,", "computer list \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?name=\"test1\"', expected_list_ids=[1]) def test_computers_filter_hostname(self): \"\"\"", "aiida.orm import load_node node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=xsf' with", "id=1) \"\"\" node_pk = self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test(self, 'computers', f'/computers?pk={str(node_pk)}', expected_list_ids=[1]) def", "attribute is returned as a dictionary when pagination is set", "'node_label', 'node_type', 'uuid', 'description', 'incoming', 'outgoing' ] received_attr = response['data']['nodes'][0].keys()", "############### calculation node extras_filter ############# def test_calculation_extras_filter(self): \"\"\" Get the", "self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/derived_properties' with self.app.test_client() as client: rv_obj =", "expected_attribute) ############### node extras_filter with pagination ############# def test_node_extras_filter_pagination(self): \"\"\"", "\"\"\" Get the full list of computers from database \"\"\"", "test_computers_orderby_name_asc_sign(self): \"\"\" Returns the computers list ordered by \"+name\" in", "and offset' RESTApiTestCase.process_test( self, 'computers', '/computers/page/2?offset=2&limit=1&orderby=+id', expected_errormsg=expected_error ) def test_complist_pagelimitoffset_perpage(self):", "ordered by \"+name\" in ascending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id']", "'is_foreign_key', 'type'] # check fields for _, pinfo in response['data']['fields'].items():", "datum['uuid'] = str(datum['uuid']) cls._dummy_data[label] = data def split_path(self, url): #", "using offset parameter It should return all the rows from", "and get the filtered computer list \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?hostname=\"test1.epfl.ch\"',", "None: result_node_type = entity_type result_name = entity_type url = self._url_prefix", "\"\"\" ############### generic endpoints ######################## def test_server(self): \"\"\" Test that", "test2 torque test1 pbspro localhost pbspro ========== Expected:: test1 pbspro", "database / perpage If we request the page which exceeds", "############# def test_calculation_retrieved_inputs(self): \"\"\" Get the list of given calculation", "e.g. url method, node_type, path, pk, query_string, url, url_root, etc.", "fot the type of the node :param url: web url", "\"\"\" expected_attributes = ['resources', 'cell'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources,cell' with self.app.test_client()", "Returns the computers list ordered by \"+name\" in ascending order", "node' # Add the calcjob_inputs folder with the aiida.in file", "= response['data']['nodes'][0].keys() for attr in expected_attr: self.assertIn(attr, received_attr) RESTApiTestCase.compare_extra_response_data(self, 'nodes',", "pagination is set \"\"\" expected_attributes = ['resources', 'cell'] url =", "the expected range of the dummydata') expected_node_uuids = [node['uuid'] for", "__version__ with self.app.test_client() as client: response = client.get(url) data =", "and offset' RESTApiTestCase.process_test( self, 'computers', '/computers/page/2?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error ) def test_computers_list_page_default(self):", "test_structure_download(self): \"\"\" Test download of structure file \"\"\" from aiida.orm", "############# def test_calculation_attributes_filter(self): \"\"\" Get the list of given calculation", ") def test_complist_pagelimitoffset_perpage(self): \"\"\" If we use the page, limit,", "the name of computer and get the filtered computer list", "ordered by \"name\" in ascending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id']", "'computers', '/computers?hostname=\"test1.epfl.ch\"', expected_list_ids=[1]) def test_computers_filter_transport_type(self): \"\"\" Add filter for the", "not None: datum['uuid'] = str(datum['uuid']) cls._dummy_data[label] = data def split_path(self,", "client.get(url) response = json.loads(rv_obj.data) self.assertNotIn('message', response) self.assertEqual(response['data']['attributes'], attributes) RESTApiTestCase.compare_extra_response_data(self, 'nodes',", "def test_computers_filter_name(self): \"\"\" Add filter for the name of computer", "'/computers?offset=2&orderby=+id', expected_range=[2, None]) def test_computers_list_limit_offset_perpage(self): \"\"\" If we pass the", "test_calculation_attributes_filter(self): \"\"\" Get the list of given calculation attributes filtered", "= self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/derived_properties' with self.app.test_client() as client: rv_obj", "data_types.items(): data = orm.QueryBuilder().append(dataclass, tag='data', project=data_projections).order_by({ 'data': [{ 'id': {", "def test_computers_filter_transport_type(self): \"\"\" Add filter for the transport_type of computer", "node attribute is specified in attributes_filter only this attribute is", "'slurm', }, { 'label': 'test4', 'hostname': 'test4.epfl.ch', 'transport_type': 'ssh', 'scheduler_type':", "response = json.loads(rv_obj.data) self.assertNotIn('message', response) self.assertEqual(response['data']['attributes'], {'attr1': 'OK'}) RESTApiTestCase.compare_extra_response_data(self, 'nodes',", "parameters: id, limit and offset \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test(", "url response :param uuid: url requested for the node pk", "is None and result_name is None: result_node_type = entity_type result_name", "order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?transport_type=\\\"ssh\\\"&pk>{str(node_pk)}&orderby=scheduler_type\", expected_list_ids=[1,", "self.assertNotIn('message', response) self.assertEqual(response['data']['attributes'], {'attr1': 'OK'}) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid)", "import configure_api class RESTApiTestCase(AiidaTestCase): \"\"\" Setup of the tests for", "self._dummy_data def get_url_prefix(self): return self._url_prefix @classmethod def process_dummy_data(cls): # pylint:", "'processes', 'computers', 'users', 'groups']: url = f'{self.get_url_prefix()}/{nodetype}/projectable_properties' with self.app.test_client() as", "expected_range=[2, 4] ) def test_computers_list_limit_only(self): \"\"\" Get the list of", "400 @classmethod def setUpClass(cls, *args, **kwargs): # pylint: disable=too-many-locals, too-many-statements", "test_calculation_retrieved_outputs(self): \"\"\" Get the list of given calculation retrieved_outputs \"\"\"", "# # For further information on the license, see the", "Test that REST API sets cross-origin resource sharing headers \"\"\"", "into class attributes \"\"\" # TODO: Storing the different nodes", "self.process_test( 'nodes', f\"/nodes/{str(node_uuid)}/links/incoming?node_type=\\\"data.dict.Dict.\\\"\", expected_list_ids=[3], uuid=node_uuid, result_node_type='data', result_name='incoming' ) def test_calculation_iotree(self):", "client: response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(len(response['data']['nodes']), 1) self.assertEqual(len(response['data']['nodes'][0]['incoming']),", "tests for the AiiDA RESTful-api \"\"\" _url_prefix = '/api/v4' _dummy_data", "'computers', '/computers?orderby=+id', full_list=True) def test_computers_list_limit_offset(self): \"\"\" Get the list of", "offset, page, perpage def process_test( self, entity_type, url, full_list=False, empty_list=False,", "\"\"\" Test download of structure file \"\"\" from aiida.orm import", "= client.get(url) response = json.loads(response_value.data) for key in ['data.structure.StructureData.|', 'data.cif.CifData.|']:", "limit from database. \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?limit=2&orderby=+id', expected_range=[None, 2]) def", "Setup of the tests for the AiiDA RESTful-api \"\"\" _url_prefix", "by \"hostname\" descending order Response:: test4 slurm test3 slurm test2", "= self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true' with self.app.test_client() as client: response_value", "'scheduler_type': 'slurm', }, { 'label': 'test4', 'hostname': 'test4.epfl.ch', 'transport_type': 'ssh',", "['id', 'uuid', 'name', 'hostname', 'transport_type', 'scheduler_type'] computers = orm.QueryBuilder().append(orm.Computer, tag='comp',", "Get list of calculation attributes \"\"\" attributes = { 'attr1':", "= str(calc['uuid']) cls._dummy_data['calculations'] = calculations data_projections = ['id', 'uuid', 'user_id',", "response = json.loads(response_value.data) self.assertEqual(response['data']['repo_list'], [{'type': 'FILE', 'name': 'aiida.in'}]) url =", "Get the node comments \"\"\" node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url =", "of computers from database using offset parameter It should return", "'loggername': 'loggername', 'levelname': logging.getLevelName(LOG_LEVEL_REPORT), 'dbnode_id': calc.id, 'message': 'This is a", "self.assertEqual(headers.get(ACL_ORIGIN), '*') ############### computers endpoint ######################## def test_computers_details(self): \"\"\" Requests", "'calcjob_outputs/aiida.out', force=True) retrieved_outputs.store() retrieved_outputs.add_incoming(calc, link_type=LinkType.CREATE, link_label='retrieved') kpoint.add_incoming(calc, link_type=LinkType.CREATE, link_label='create') calc1", "url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=cif' with self.app.test_client() as client: rv_obj = client.get(url)", "with pagination ############# def test_node_attributes_filter_pagination(self): \"\"\" Check that node attributes", "Test that / returns list of endpoints \"\"\" with self.app.test_client()", "test_node_extras_filter_pagination(self): \"\"\" Check that node extras specified in extras_filter are", "node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>={str(node_pk)}&transport_type=\\\"ssh\\\"&orderby=-id&limit=2\", expected_list_ids=[4, 2] )", "REST responses cls.process_dummy_data() def get_dummy_data(self): return self._dummy_data def get_url_prefix(self): return", "kpoint.set_kpoints_mesh([4, 4, 4]) kpoint.store() resources = {'num_machines': 1, 'num_mpiprocs_per_machine': 1}", "please visit http://www.aiida.net # ########################################################################### # pylint: disable=too-many-lines \"\"\"Unittests for", "path = parts[0] if len(parts) > 1: query_string = parts[1]", "response = json.loads(rv_obj.data) for node in response['data']['nodes']: self.assertIn(node['uuid'], expected_node_uuids) ###############", "self.assertDictEqual(data_base, data_server) def test_cors_headers(self): \"\"\" Test that REST API sets", "}] for dummy_computer in dummy_computers: computer = orm.Computer(**dummy_computer) computer.store() #", "as client: rv_obj = client.get(url) response = json.loads(rv_obj.data) self.assertNotIn('message', response)", "link_label='create') calc1 = orm.CalcJobNode(computer=cls.computer) calc1.set_option('resources', resources) calc1.store() dummy_computers = [{", "# create test inputs cell = ((2., 0., 0.), (0.,", "= f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/list?filename=\\\"calcjob_inputs\\\"\" with self.app.test_client() as client: response_value = client.get(url) response", "import logging from aiida.common.log import LOG_LEVEL_REPORT from aiida.common.timezone import now", "get_dummy_data(self): return self._dummy_data def get_url_prefix(self): return self._url_prefix @classmethod def process_dummy_data(cls):", "of typical responses from the RESTapi and puts them into", "dummydata') expected_node_uuids = [node['uuid'] for node in expected_data] result_node_uuids =", "symbols=['Ba']) structure.store() structure.add_comment('This is test comment.') structure.add_comment('Add another comment.') cif", "= { 'attr1': 'OK', 'attr2': 'OK', 'resources': { 'num_machines': 1,", "as client: response = client.get(url) data = json.loads(response.data)['data'] self.assertEqual(__version__, data['AiiDA_version'])", "when pagination is set \"\"\" expected_extras = ['extra1', 'extra2'] url", "of cif file \"\"\" from aiida.orm import load_node node_uuid =", "node_pk = self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test(self, 'computers', f'/computers?id={str(node_pk)}', expected_list_ids=[1]) def test_computers_filter_id2(self): \"\"\"", "url with \"?\" to get url path and it's parameters", "############### calculation node attributes filter ############# def test_calculation_attributes_filter(self): \"\"\" Get", "def test_computers_orderby_name_asc_sign(self): \"\"\" Returns the computers list ordered by \"+name\"", "range of the dummydata') expected_node_uuids = [node['uuid'] for node in", "response['data']['logs'][0].keys() for key in ['time', 'loggername', 'levelname', 'dbnode_id', 'message']: self.assertIn(key,", "from aiida.common import json from aiida.common.links import LinkType from aiida.restapi.run_api", "\"scheduler_type\" in ascending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self,", "with the aiida.out file to the FolderData node with tempfile.NamedTemporaryFile(mode='w+')", "def test_computers_filter_id1(self): \"\"\" Add filter on the id of computer", "url is requested to get full list :param empty_list: if", "requested for the node pk :param result_node_type: node type in", "load_node(node_uuid).get_object_content('calcjob_inputs/aiida.in', mode='rb') self.assertEqual(response_obj.data, input_file) def test_process_report(self): \"\"\" Test process report", "url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?extras=true&extras_filter=extra1,extra2' with self.app.test_client() as client: response_value = client.get(url)", "\"\"\" Add filter for the hostname and id of computer", "node type in response data :param result_name: result name in", "RESTApiTestCase.process_test(self, 'computers', '/computers?offset=2&orderby=+id', expected_range=[2, None]) def test_computers_list_limit_offset_perpage(self): \"\"\" If we", "test comment.') structure.add_comment('Add another comment.') cif = orm.CifData(ase=structure.get_ase()) cif.store() parameter1", "node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers/page/2?id>{str(node_pk)}&perpage=2&orderby=+id', expected_list_ids=[3, 4] )", "\"\"\" Returns the computers list ordered by \"id\" in ascending", "list index is very fragile and a pain to debug.", "\"\"\" Add filter for the transport_type of computer and get", "'computers', f'/computers/{str(node_uuid)}', expected_list_ids=[1], uuid=node_uuid ) def test_computers_list(self): \"\"\" Get the", "############### list orderby ######################## def test_computers_orderby_id_asc(self): \"\"\" Returns the computers", "self.get_dummy_data()['calculations'][1]['uuid'] self.process_test( 'nodes', f\"/nodes/{str(node_uuid)}/links/incoming?node_type=\\\"data.dict.Dict.\\\"\", expected_list_ids=[3], uuid=node_uuid, result_node_type='data', result_name='incoming' ) def", "= 'Non existent page requested. The page range is [1", "as a UUID object) computers = [_['comp'] for _ in", "in response['data']['nodes']: self.assertIn(node['uuid'], expected_node_uuids) ############### Structure visualization and download #############", "pylint: disable=protected-access self.assertEqual(rv_obj.data, cif) ############### projectable_properties ############# def test_projectable_properties(self): \"\"\"", "+ '/').data)['data'] data_server = json.loads(client.get(self.get_url_prefix() + '/server/endpoints').data)['data'] self.assertTrue(len(data_base['available_endpoints']) > 0)", "node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true' with self.app.test_client() as client:", "'data': orm.Data, } for label, dataclass in data_types.items(): data =", "is having same transport_type, order it by \"id\" \"\"\" node_pk", "= f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?extras=true&extras_filter=extra1,extra2' with self.app.test_client() as client: response_value = client.get(url) response", "Returns the computers list ordered by \"+scheduler_type\" in ascending order", "list ordered by \"scheduler_type\" in ascending order \"\"\" node_pk =", "'name', 'hostname', 'transport_type', 'scheduler_type'] computers = orm.QueryBuilder().append(orm.Computer, tag='comp', project=computer_projections).order_by({ 'comp':", "list orderby combinations ####################### def test_computers_orderby_mixed1(self): \"\"\" Returns the computers", "is set \"\"\" expected_attribute = ['resources'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources' with", "handle: handle.write(aiida_in) handle.flush() handle.seek(0) calc.put_object_from_filelike(handle, 'calcjob_inputs/aiida.in', force=True) calc.store() # create", "test_comp_orderby_scheduler_ascsign(self): \"\"\" Returns the computers list ordered by \"+scheduler_type\" in", "expected_data = [] elif expected_list_ids: expected_data = [self._dummy_data[result_node_type][i] for i", "f'{self.get_url_prefix()}/server' from aiida import __version__ with self.app.test_client() as client: response", "/server endpoint returns AiiDA version \"\"\" url = f'{self.get_url_prefix()}/server' from", "calculation incoming \"\"\" node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/derived_properties' with", "# pylint: disable=protected-access self.assertEqual(rv_obj.data, structure_data) def test_cif(self): \"\"\" Test download", "computers list first order by \"scheduler_type\" in descending order and", "calc.set_attribute('attr1', 'OK') calc.set_attribute('attr2', 'OK') calc.set_extra('extra1', False) calc.set_extra('extra2', 'extra_info') calc.add_incoming(structure, link_type=LinkType.INPUT_CALC,", "response, uuid=node_uuid) ############### calculation node attributes filter ############# def test_calculation_attributes_filter(self):", "self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/processes/{str(node_uuid)}/report' with self.app.test_client() as client: response_value =", "into a string (e.g. in sqlalchemy it comes as a", "'nodes', url, response, uuid=node_uuid) ############### calculation attributes ############# def test_calculation_attributes(self):", "database using limit and offset parameter. It should return the", "calc1 = orm.CalcJobNode(computer=cls.computer) calc1.set_option('resources', resources) calc1.store() dummy_computers = [{ 'label':", "not None: comp['uuid'] = str(comp['uuid']) cls._dummy_data['computers'] = computers calculation_projections =", "order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?pk>{str(node_pk)}&transport_type=\\\"ssh\\\"&orderby=-scheduler_type\", expected_list_ids=[2,", "cif file \"\"\" from aiida.orm import load_node node_uuid = self.get_dummy_data()['cifdata'][0]['uuid']", "orderby ######################## def test_computers_orderby_id_asc(self): \"\"\" Returns the computers list ordered", "'computers', f\"/computers?pk>{str(node_pk)}&transport_type=\\\"ssh\\\"&orderby=-scheduler_type\", expected_list_ids=[2, 4, 1] ) ############### list orderby combinations", "= self.get_dummy_data()['computers'][1]['uuid'] RESTApiTestCase.process_test( self, 'computers', f'/computers/{str(node_uuid)}', expected_list_ids=[1], uuid=node_uuid ) def", "'computers', f\"/computers?id>{str(node_pk)}&hostname=\\\"test1.epfl.ch\\\"\", expected_list_ids=[1] ) def test_computers_filter_mixed2(self): \"\"\" Add filter for", "expected_list_ids=[1, 4, 2] ) def test_comp_orderby_scheduler_ascsign(self): \"\"\" Returns the computers", "as client: data_base = json.loads(client.get(self.get_url_prefix() + '/').data)['data'] data_server = json.loads(client.get(self.get_url_prefix()", "response = json.loads(rv_response.data) if expected_errormsg: self.assertEqual(response['message'], expected_errormsg) else: if full_list:", "of given calculation attributes filtered \"\"\" attributes = { 'attr1':", "process_dummy_data(cls): # pylint: disable=fixme \"\"\" This functions prepare atomic chunks", "url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/contents?filename=\\\"calcjob_inputs/aiida.in\\\"\" with self.app.test_client() as client: response_obj = client.get(url)", "= json.loads(client.get(self.get_url_prefix() + '/server/endpoints').data)['data'] self.assertTrue(len(data_base['available_endpoints']) > 0) self.assertDictEqual(data_base, data_server) def", "For further information please visit http://www.aiida.net # ########################################################################### # pylint:", "calcjob_outputs folder with the aiida.out file to the FolderData node", "the filtered computer list (e.g. id > 2) \"\"\" node_pk", "file\\nof the CalcJob node' retrieved_outputs = orm.FolderData() # Add the", "RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True) def test_computers_list_limit_offset(self): \"\"\" Get the list", "test4 RESTApiTestCase.process_test(self, \"computers\", \"/computers?orderby=+scheduler_type, -hostname\", expected_list_ids=[1,0,4,3,2]) \"\"\" ############### list filter", "retrieved_outputs.store() retrieved_outputs.add_incoming(calc, link_type=LinkType.CREATE, link_label='retrieved') kpoint.add_incoming(calc, link_type=LinkType.CREATE, link_label='create') calc1 = orm.CalcJobNode(computer=cls.computer)", "tag='calc', project=calculation_projections).order_by({ 'calc': [{ 'id': { 'order': 'desc' } }]", "node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=+name', expected_list_ids=[1, 2, 3,", "############# def test_node_extras_filter_pagination(self): \"\"\" Check that node extras specified in", "for node in response['data']['nodes']: self.assertEqual(list(node['extras'].keys()), expected_extra) ############### node full_type filter", "2, 3, 4] ) def test_computers_orderby_name_desc(self): \"\"\" Returns the computers", "result_name=None ): # pylint: disable=too-many-arguments \"\"\" Check whether response matches", "node_uuid = self.get_dummy_data()['cifdata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=cif' with self.app.test_client() as client:", "= json.loads(rv_obj.data) self.assertNotIn('message', response) self.assertEqual( response['data']['derived_properties']['dimensionality'], { 'dim': 3, 'value':", "expected_list_ids=[1]) def test_computers_filter_name(self): \"\"\" Add filter for the name of", "rv_obj = client.get(url) cif = load_node(node_uuid)._prepare_cif()[0] # pylint: disable=protected-access self.assertEqual(rv_obj.data,", "nodetype in ['nodes', 'processes', 'computers', 'users', 'groups']: url = f'{self.get_url_prefix()}/{nodetype}/projectable_properties'", "computer.store() # Prepare typical REST responses cls.process_dummy_data() def get_dummy_data(self): return", "'label': 'volume' } ) self.assertEqual(response['data']['derived_properties']['formula'], 'Ba') RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response,", "= client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data']['nodes'][0]['attributes'], attributes) ############### calculation node", "'node_type'] data_types = { 'cifdata': orm.CifData, 'parameterdata': orm.Dict, 'structuredata': orm.StructureData,", "in calculations: if calc['uuid'] is not None: calc['uuid'] = str(calc['uuid'])", "\"/computers?aa=bb&id=2\", InputValidationError) \"\"\" ############### calculation retrieved_inputs and retrieved_outputs ############# def", "and retrieved_outputs ############# def test_calculation_retrieved_inputs(self): \"\"\" Get the list of", "self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?transport_type=\\\"ssh\\\"&pk>{str(node_pk)}&orderby=scheduler_type\", expected_list_ids=[1, 4, 2] ) def", "\"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=-id', expected_list_ids=[4, 3, 2, 1, 0]) def", "client.get(url) input_file = load_node(node_uuid).get_object_content('calcjob_inputs/aiida.in', mode='rb') self.assertEqual(response_obj.data, input_file) def test_process_report(self): \"\"\"", "computers from database \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True) def test_computers_list_limit_offset(self):", "def test_computers_list_limit_offset(self): \"\"\" Get the list of computers from database", "localhost pbspro test4 slurm test3 slurm test2 torque test1 test4", "by full_type \"\"\" expected_node_uuids = [] for calc in self.get_dummy_data()['calculations']:", "node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?extras=true&extras_filter=extra1,extra2' with self.app.test_client() as client:", "Test process report \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/processes/{str(node_uuid)}/report'", "and result_name is None: result_node_type = entity_type result_name = entity_type", "of computers in database / perpage If we request the", "with the node results. e.g. url method, node_type, path, pk,", "offset' RESTApiTestCase.process_test( self, 'computers', '/computers/page/2?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error ) def test_computers_list_page_default(self): \"\"\"", "'DIRECTORY'}]) def test_calculation_retrieved_outputs(self): \"\"\" Get the list of given calculation", "\"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] self.process_test( 'nodes', f'/nodes/{str(node_uuid)}/links/incoming?orderby=id', expected_list_ids=[5, 3], uuid=node_uuid,", "orm.Dict(dict={'a': 1, 'b': 2}) parameter1.store() parameter2 = orm.Dict(dict={'c': 3, 'd':", "RESTApiTestCase.process_test(self, \"computers\", \"/computers?orderby=+scheduler_type, -hostname\", expected_list_ids=[1,0,4,3,2]) \"\"\" ############### list filter combinations", "############### list orderby combinations ####################### def test_computers_orderby_mixed1(self): \"\"\" Returns the", "= client.get(url) response = json.loads(rv_obj.data) self.assertNotIn('message', response) self.assertEqual( response['data']['derived_properties']['dimensionality'], {", "f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/links/tree?in_limit=1&out_limit=1' with self.app.test_client() as client: response_value = client.get(url) response =", "outgoing \"\"\" if expected_list_ids is None: expected_list_ids = [] if", "the filtered computer list \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self,", "} }] }).dict() calculations = [_['calc'] for _ in calculations]", "return the no of rows specified in limit from database", "RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>={str(node_pk)}&transport_type=\\\"ssh\\\"&orderby=-id&limit=2\", expected_list_ids=[4, 2] ) ########## pass unknown", "url, response, uuid=node_uuid) ############### calculation node attributes filter ############# def", "0) for node in response['data']['nodes']: self.assertIn('attributes', node) self.assertNotIn('attributes.resources', node) self.assertNotIn('attributes.cell',", "query_string = parts[1] return path, query_string def compare_extra_response_data(self, node_type, url,", "node details and list with limit, offset, page, perpage def", "repository with tempfile.NamedTemporaryFile(mode='w+') as handle: handle.write(aiida_in) handle.flush() handle.seek(0) calc.put_object_from_filelike(handle, 'calcjob_inputs/aiida.in',", "headers \"\"\" url = f'{self.get_url_prefix()}/server' with self.app.test_client() as client: response", "calculation retrieved_inputs and retrieved_outputs ############# def test_calculation_retrieved_inputs(self): \"\"\" Get the", "one extras_filter with pagination ############# def test_node_single_extras_filter(self): \"\"\" Check that", "json.loads(response.data)['data'] self.assertEqual(__version__, data['AiiDA_version']) self.assertEqual(self.get_url_prefix(), data['API_prefix']) def test_base_url(self): \"\"\" Test that", "expected_log_keys) def test_download_formats(self): \"\"\" test for download format endpoint \"\"\"", "from database using limit and offset parameter. It should return", "expected_log_keys = response['data']['logs'][0].keys() for key in ['time', 'loggername', 'levelname', 'dbnode_id',", "path and url parameters \"\"\" parts = url.split('?') path =", "database starting from the no. specified in offset \"\"\" RESTApiTestCase.process_test(", "file is part of the AiiDA code. # # #", "in response['data'][result_name]] self.assertEqual(expected_node_uuids, result_node_uuids) self.compare_extra_response_data(entity_type, url, response, uuid) class RESTApiTestSuite(RESTApiTestCase):", "'data': [{ 'id': { 'order': 'desc' } }] }).dict() data", "get one extras_filter with pagination ############# def test_node_single_extras_filter(self): \"\"\" Check", "= self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?transport_type=\\\"ssh\\\"&pk>{str(node_pk)}&orderby=+scheduler_type\", expected_list_ids=[1, 4, 2] )", "it would return the error message. \"\"\" expected_error = 'Non", "same scheduler_type, order it by \"name\" \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id']", "calculation incoming \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] self.process_test( 'nodes', f'/nodes/{str(node_uuid)}/links/incoming?orderby=id', expected_list_ids=[5,", "'dbnode_id', 'message']: self.assertIn(key, expected_log_keys) def test_download_formats(self): \"\"\" test for download", "\"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=+name', expected_list_ids=[1, 2,", "url = f'{self.get_url_prefix()}/{nodetype}/projectable_properties' with self.app.test_client() as client: rv_obj = client.get(url)", "url, response, uuid=node_uuid) ############### calculation attributes ############# def test_calculation_attributes(self): \"\"\"", "parameters: id, transport_type, orderby \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self,", "None: datum['uuid'] = str(datum['uuid']) cls._dummy_data[label] = data def split_path(self, url):", "the FolderData node with tempfile.NamedTemporaryFile(mode='w+') as handle: handle.write(aiida_out) handle.flush() handle.seek(0)", "unittests for rest api \"\"\" ############### generic endpoints ######################## def", "test4 slurm test3 slurm test2 torque test1 test4 RESTApiTestCase.process_test(self, \"computers\",", "from aiida.common.timezone import now from aiida.orm import Log log_record =", "= self.get_dummy_data()['calculations'][1]['uuid'] url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/list?filename=\\\"calcjob_inputs\\\"\" with self.app.test_client() as client: response_value", ":param node_type: url requested fot the type of the node", "the no of rows specified in limit from database. \"\"\"", "url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes?attributes_filter=\\\"attr1\\\"\" with self.app.test_client() as client: rv_obj = client.get(url)", "test_calculation_extras_filter(self): \"\"\" Get the list of given calculation extras filtered", "expected_keys: self.assertIn(prop, available_keys) # check order available_properties = response['data']['fields'].keys() for", "it returns the no. of rows for requested page \"\"\"", "client.get(url) structure_data = load_node(node_uuid)._exportcontent('xsf')[0] # pylint: disable=protected-access self.assertEqual(rv_obj.data, structure_data) def", "data def split_path(self, url): # pylint: disable=no-self-use \"\"\" Split the", "computers list ordered by \"id\" in descending order \"\"\" RESTApiTestCase.process_test(self,", "in data_types.items(): data = orm.QueryBuilder().append(dataclass, tag='data', project=data_projections).order_by({ 'data': [{ 'id':", "[_['comp'] for _ in computers] for comp in computers: if", "self.get_dummy_data()['calculations'][1]['uuid'] url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/list?filename=\\\"calcjob_inputs\\\"\" with self.app.test_client() as client: response_value =", "self._dummy_data[result_node_type] elif empty_list: expected_data = [] elif expected_list_ids: expected_data =", "client: response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data']['repo_list'], [{'type': 'FILE',", "flask_cors.core import ACL_ORIGIN from aiida import orm from aiida.backends.testbase import", "def process_dummy_data(cls): # pylint: disable=fixme \"\"\" This functions prepare atomic", "list of available node namespace \"\"\" url = f'{self.get_url_prefix()}/nodes/full_types' with", "orm.Dict(dict={'c': 3, 'd': 4}) parameter2.store() kpoint = orm.KpointsData() kpoint.set_kpoints_mesh([4, 4,", "client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data'], [{'name': 'calcjob_outputs', 'type': 'DIRECTORY'}]) ###############", "= orm.CalcJobNode(computer=cls.computer) calc1.set_option('resources', resources) calc1.store() dummy_computers = [{ 'label': 'test1',", "############# def test_calculation_extras_filter(self): \"\"\" Get the list of given calculation", "given calculation extras filtered \"\"\" extras = {'extra1': False, 'extra2':", "that node attributes specified in attributes_filter are returned as a", ") def test_computers_list_page_limit_offset(self): \"\"\" If we use the page, limit", "for rest api \"\"\" ############### generic endpoints ######################## def test_server(self):", "url parameters: id, page, perpage \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test(", "by \"id\" in descending order \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=-id', expected_list_ids=[4,", "test4 slurm test3 slurm test2 torque test1 pbspro localhost pbspro", "disable=too-many-arguments \"\"\" Check whether response matches expected values. :param entity_type:", "test_node_attributes_filter_pagination(self): \"\"\" Check that node attributes specified in attributes_filter are", "f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra2' with self.app.test_client() as client: response_value = client.get(url) response =", "[{ 'id': { 'order': 'desc' } }] }).dict() calculations =", "it is having same scheduler_type, order it by \"hostname\" descending", "incoming list for given calculations \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] self.process_test(", "= client.get(url) cif = load_node(node_uuid)._prepare_cif()[0] # pylint: disable=protected-access self.assertEqual(rv_obj.data, cif)", "requested for the type of the node :param url: web", "if it is having same transport_type, order it by \"id\"", "it by \"name\" \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers',", "attr in expected_attr: self.assertIn(attr, received_attr) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid)", "json.loads(response_value.data) self.assertEqual(response['data']['nodes'][0]['attributes'], attributes) ############### calculation node extras_filter ############# def test_calculation_extras_filter(self):", "all_comments = [] for comment in response: all_comments.append(comment['message']) self.assertEqual(sorted(all_comments), sorted(['This", "Get the full list of computers from database \"\"\" RESTApiTestCase.process_test(self,", "for node in expected_data] result_node_uuids = [node['uuid'] for node in", "the rest api call to get list of available node", "at https://github.com/aiidateam/aiida-core # # For further information on the license,", "test1 pbspro localhost pbspro test4 slurm test3 slurm test2 torque", "= json.loads(rv_obj.data) self.assertEqual(response['data']['nodes'][0]['attributes']['cell'], cell) ############### node attributes_filter with pagination #############", "calc['uuid'] = str(calc['uuid']) cls._dummy_data['calculations'] = calculations data_projections = ['id', 'uuid',", "report \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/processes/{str(node_uuid)}/report' with self.app.test_client()", "'id': { 'order': 'desc' } }] }).dict() data = [_['data']", "aiida.common.exceptions import InputValidationError raise InputValidationError('Pass the expected range of the", "'full_type'] response_keys = response['data'].keys() for dkay in expected_data_keys: self.assertIn(dkay, response_keys)", "): # pylint: disable=too-many-arguments \"\"\" Check whether response matches expected", "attributes_filter \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes?attributes_filter=\\\"attr1\\\"\" with self.app.test_client()", "from aiida import __version__ with self.app.test_client() as client: response =", "}, } node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true' with self.app.test_client()", "node) self.assertNotIn('extras.extra1', node) self.assertNotIn('extras.extra2', node) self.assertEqual(len(node['extras']), len(expected_extras)) for extra in", "1 }, } node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true' with", "0.), (0., 0., 2.)) structure = orm.StructureData(cell=cell) structure.append_atom(position=(0., 0., 0.),", "= '' query_string = '' if parts: path = parts[0]", "\"\"\" Add filter on the id of computer and get", "def test_computers_filter_id2(self): \"\"\" Add filter on the id of computer", "RESTApiTestCase.process_test( self, 'computers', '/computers/page/4?perpage=2&orderby=+id', expected_errormsg=expected_error ) ############### list filters ########################", ":param result_node_type: node type in response data :param result_name: result", "as client: rv_obj = client.get(url) structure_data = load_node(node_uuid)._exportcontent('xsf')[0] # pylint:", "is incompatible with limit and offset' RESTApiTestCase.process_test( self, 'computers', '/computers/page/2?offset=2&limit=1&perpage=2&orderby=+id',", "only one node attribute is specified in attributes_filter only this", "None: expected_list_ids = [] if expected_range is None: expected_range =", "'uuid', 'user_id', 'node_type'] data_types = { 'cifdata': orm.CifData, 'parameterdata': orm.Dict,", "expected_range=[2, None]) def test_computers_list_limit_offset_perpage(self): \"\"\" If we pass the limit,", "expected_list_ids=[5, 3], uuid=node_uuid, result_node_type='data', result_name='incoming' ) def test_calculation_input_filters(self): \"\"\" Get", "retrieved_inputs \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/input_files' with self.app.test_client()", "expected_list_ids: list of expected ids from data :param expected_range: [start,", "json.loads(rv_obj.data) self.assertEqual(response['data']['nodes'][0]['attributes']['cell'], cell) ############### node attributes_filter with pagination ############# def", "message. \"\"\" expected_error = 'perpage key is incompatible with limit", "RESTApiTestCase.process_test( self, 'computers', f'/computers?id>{str(node_pk)}&limit=2&offset=3&orderby=+id', expected_list_ids=[4] ) def test_computers_mixed2(self): \"\"\" url", "= self.get_dummy_data()['calculations'][1]['uuid'] url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes?attributes_filter=\\\"attr1\\\"\" with self.app.test_client() as client: rv_obj", "uuid) class RESTApiTestSuite(RESTApiTestCase): # pylint: disable=too-many-public-methods \"\"\" Define unittests for", "\"+scheduler_type\" in ascending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self,", "for datum in data: if datum['uuid'] is not None: datum['uuid']", "split_path(self, url): # pylint: disable=no-self-use \"\"\" Split the url with", "of computers from database using limit parameter. It should return", "RESTApiTestCase.process_test( self, 'computers', f'/computers?id>{str(node_pk)}&orderby=+id', expected_range=[2, None] ) def test_computers_filter_pk(self): \"\"\"", "too-many-statements \"\"\" Add objects to the database for different requests/filters/orderings", "\"\"\" This functions prepare atomic chunks of typical responses from", "filter for the name of computer and get the filtered", "to debug. # Please change this! computer_projections = ['id', 'uuid',", "[_['calc'] for _ in calculations] for calc in calculations: if", "computers list first order by \"transport_type\" in ascending order and", "response) self.assertEqual(response['data']['attributes'], attributes) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid) def test_contents_attributes_filter(self):", "\"name\" in descending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self,", "when pagination is set \"\"\" expected_attribute = ['resources'] url =", "page requested. The page range is [1 : ' \\", "expected_range=[2, None] ) def test_computers_filter_pk(self): \"\"\" Add filter on the", "> 2) \"\"\" node_pk = self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?id>{str(node_pk)}&orderby=+id',", "json.loads(rv_obj.data) for node in response['data']['nodes']: self.assertIn(node['uuid'], expected_node_uuids) ############### Structure visualization", "1) self.assertEqual(len(response['data']['metadata']), 1) expected_attr = [ 'ctime', 'mtime', 'id', 'node_label',", "rv_obj = client.get(url) structure_data = load_node(node_uuid)._exportcontent('xsf')[0] # pylint: disable=protected-access self.assertEqual(rv_obj.data,", "nodes filtered by full_type \"\"\" expected_node_uuids = [] for calc", "in descending order \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=-id', expected_list_ids=[4, 3, 2,", "database / perpage Using this formula it returns the no.", "(e.g. id > 2) \"\"\" node_pk = self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test( self,", "\"\"\" Get the list of nodes filtered by full_type \"\"\"", "self.get_dummy_data()['computers'][1]['uuid'] RESTApiTestCase.process_test( self, 'computers', f'/computers/{str(node_uuid)}', expected_list_ids=[1], uuid=node_uuid ) def test_computers_list(self):", "the RESTapi and puts them into class attributes \"\"\" #", "the tests for the AiiDA RESTful-api \"\"\" _url_prefix = '/api/v4'", "list (e.g. id=1) \"\"\" node_pk = self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test(self, 'computers', f'/computers?id={str(node_pk)}',", "RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=+name', expected_list_ids=[1, 2, 3, 4] ) def", "4] ) def test_computers_orderby_name_desc(self): \"\"\" Returns the computers list ordered", "# ########################################################################### # pylint: disable=too-many-lines \"\"\"Unittests for REST API.\"\"\" import", "f'/computers?pk>{str(node_pk)}&orderby=+name', expected_list_ids=[1, 2, 3, 4] ) def test_computers_orderby_name_desc(self): \"\"\" Returns", "1 }, } node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes' with", "aiida import __version__ with self.app.test_client() as client: response = client.get(url)", "in data] for datum in data: if datum['uuid'] is not", "'resources': { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }, } node_uuid =", "as client: response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data']['nodes'][0]['extras']['extra1'], extras['extra1'])", "'id': { 'order': 'asc' } }] }).dict() # Cast UUID", "f\"/computers?transport_type=\\\"ssh\\\"&pk>{str(node_pk)}&orderby=+scheduler_type\", expected_list_ids=[1, 4, 2] ) def test_computers_orderby_schedulertype_desc(self): \"\"\" Returns the", "uuid=node_uuid, result_node_type='data', result_name='incoming' ) def test_calculation_iotree(self): \"\"\" Get filtered incoming", "self.assertEqual(list(node['extras'].keys()), expected_extra) ############### node full_type filter ############# def test_nodes_full_type_filter(self): \"\"\"", "that node extras specified in extras_filter are returned as a", "'computers', '/computers?offset=2&orderby=+id', expected_range=[2, None]) def test_computers_list_limit_offset_perpage(self): \"\"\" If we pass", "'/computers?orderby=+id', full_list=True) def test_computers_orderby_id_desc(self): \"\"\" Returns the computers list ordered", "first order by \"scheduler_type\" in descending order and if it", "!= []: expected_data = self._dummy_data[result_node_type][expected_range[0]:expected_range[1]] else: from aiida.common.exceptions import InputValidationError", "'type'] # check fields for _, pinfo in response['data']['fields'].items(): available_keys", "of pages then it would return the error message. \"\"\"", "= ['display_name', 'help_text', 'is_display', 'is_foreign_key', 'type'] # check fields for", "specified in extras_filter only this extra is returned as a", "kpoint.add_incoming(calc, link_type=LinkType.CREATE, link_label='create') calc1 = orm.CalcJobNode(computer=cls.computer) calc1.set_option('resources', resources) calc1.store() dummy_computers", "'node_type'] calculations = orm.QueryBuilder().append(orm.CalculationNode, tag='calc', project=calculation_projections).order_by({ 'calc': [{ 'id': {", "'description', 'incoming', 'outgoing' ] received_attr = response['data']['nodes'][0].keys() for attr in", "############### calculation attributes ############# def test_calculation_attributes(self): \"\"\" Get list of", "the page which exceeds the total no. of pages then", "page, limit, offset and perpage at same time, it would", "\"\"\" Get filtered incoming list for given calculations \"\"\" node_uuid", "response) self.assertEqual(response['data']['attributes'], {'attr1': 'OK'}) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid) ###############", "available node namespace \"\"\" url = f'{self.get_url_prefix()}/nodes/full_types' with self.app.test_client() as", "dictionary when pagination is set \"\"\" expected_extras = ['extra1', 'extra2']", "RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response) def test_comments(self): \"\"\" Get the node", "as lists and accessing them # by their list index", "attributes filter ############# def test_structure_attributes_filter(self): \"\"\" Get the list of", "import __version__ with self.app.test_client() as client: response = client.get(url) data", "order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=name', expected_list_ids=[1,", "the full list of computers from database \"\"\" RESTApiTestCase.process_test(self, 'computers',", "\"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?limit=2&orderby=+id', expected_range=[None, 2]) def test_computers_list_offset_only(self): \"\"\" Get", "cls.app = api.app cls.app.config['TESTING'] = True # create test inputs", ") def test_computers_mixed2(self): \"\"\" url parameters: id, page, perpage \"\"\"", "\"hostname\" descending order Response:: test4 slurm test3 slurm test2 torque", "rv_obj = client.get(url) response = json.loads(rv_obj.data) self.assertNotIn('message', response) expected_keys =", "0) for node in response['data']['nodes']: self.assertEqual(list(node['attributes'].keys()), expected_attribute) ############### node extras_filter", "api call to get list of available node namespace \"\"\"", "calcfunc = orm.CalcFunctionNode(computer=cls.computer) calcfunc.store() calc = orm.CalcJobNode(computer=cls.computer) calc.set_option('resources', resources) calc.set_attribute('attr1',", "RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid) ############### calculation attributes ############# def", "get the filtered computer list \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?hostname=\"test1.epfl.ch\"', expected_list_ids=[1])", ":param url: Web url :return: url path and url parameters", "response.headers self.assertEqual(headers.get(ACL_ORIGIN), '*') ############### computers endpoint ######################## def test_computers_details(self): \"\"\"", "'extra_info') calc.add_incoming(structure, link_type=LinkType.INPUT_CALC, link_label='link_structure') calc.add_incoming(parameter1, link_type=LinkType.INPUT_CALC, link_label='link_parameter') aiida_in = 'The", "\"\"\" Split the url with \"?\" to get url path", "Test that /server endpoint returns AiiDA version \"\"\" url =", "data = json.loads(response.data)['data'] self.assertEqual(__version__, data['AiiDA_version']) self.assertEqual(self.get_url_prefix(), data['API_prefix']) def test_base_url(self): \"\"\"", "self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test(self, 'computers', f'/computers?id={str(node_pk)}', expected_list_ids=[1]) def test_computers_filter_id2(self): \"\"\" Add filter", "the computers list ordered by \"name\" in ascending order \"\"\"", "node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?pk>{str(node_pk)}&transport_type=\\\"ssh\\\"&orderby=-scheduler_type\", expected_list_ids=[2, 4, 1]", "def test_node_single_attributes_filter(self): \"\"\" Check that when only one node attribute", "list or repo file contents for given node \"\"\" from", "'ssh', 'scheduler_type': 'slurm', }] for dummy_computer in dummy_computers: computer =", "raise InputValidationError('Pass the expected range of the dummydata') expected_node_uuids =", "computer = orm.Computer(**dummy_computer) computer.store() # Prepare typical REST responses cls.process_dummy_data()", "and get the filtered computer list (e.g. id=1) \"\"\" node_pk", "_dummy_data = {} _PERPAGE_DEFAULT = 20 _LIMIT_DEFAULT = 400 @classmethod", "and get the filtered computer list \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?name=\"test1\"',", "the list of given calculation extras filtered \"\"\" extras =", "at same time, it would return the error message. \"\"\"", "full list of computers from database \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id',", "= client.get(url) input_file = load_node(node_uuid).get_object_content('calcjob_inputs/aiida.in', mode='rb') self.assertEqual(response_obj.data, input_file) def test_process_report(self):", "= orm.QueryBuilder().append(dataclass, tag='data', project=data_projections).order_by({ 'data': [{ 'id': { 'order': 'desc'", "client.get(url) response = json.loads(response_value.data) for key in ['data.structure.StructureData.|', 'data.cif.CifData.|']: self.assertIn(key,", "json.loads(rv_obj.data)['data']['comments'] all_comments = [] for comment in response: all_comments.append(comment['message']) self.assertEqual(sorted(all_comments),", "descending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=-name',", "'computers', f'/computers?pk>{str(node_pk)}&orderby=-scheduler_type,name', expected_list_ids=[2, 3, 4, 1] ) def test_computers_orderby_mixed3(self): \"\"\"", "aiida import orm from aiida.backends.testbase import AiidaTestCase from aiida.common import", "list \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?name=\"test1\"', expected_list_ids=[1]) def test_computers_filter_hostname(self): \"\"\" Add", "if result_node_type is None and result_name is None: result_node_type =", "path, query_string def compare_extra_response_data(self, node_type, url, response, uuid=None): \"\"\" In", "test_computers_list_limit_offset(self): \"\"\" Get the list of computers from database using", "\"id\" \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=transport_type,id', expected_list_ids=[3,", "list \"\"\" RESTApiTestCase.process_test( self, 'computers', '/computers?transport_type=\"local\"&name=\"test3\"&orderby=+id', expected_list_ids=[3] ) ############### list", "\"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?id>{str(node_pk)}&limit=2&offset=3&orderby=+id', expected_list_ids=[4] )", "'users', 'groups']: url = f'{self.get_url_prefix()}/{nodetype}/projectable_properties' with self.app.test_client() as client: rv_obj", "calculation attributes with filter attributes_filter \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url", "2, 3, 4] ) def test_computers_orderby_name_asc_sign(self): \"\"\" Returns the computers", "4, 1] ) def test_computers_orderby_mixed3(self): \"\"\" Returns the computers list", "'extra2': 'extra_info'} node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?extras=true&extras_filter=extra1,extra2' with self.app.test_client()", "expected_list_ids=[1,0,4,3,2]) \"\"\" ############### list filter combinations ####################### def test_computers_filter_mixed1(self): \"\"\"", "calculation retrieved_outputs \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/output_files' with", "import InputValidationError raise InputValidationError('Pass the expected range of the dummydata')", "response = json.loads(response_value.data) self.assertEqual(response['data'], [{'name': 'calcjob_outputs', 'type': 'DIRECTORY'}]) ############### calculation", "}, { 'label': 'test4', 'hostname': 'test4.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'slurm',", "= total no. of computers in database / perpage \"/page\"", "are returned as a dictionary when pagination is set \"\"\"", "\\ '3]' RESTApiTestCase.process_test( self, 'computers', '/computers/page/4?perpage=2&orderby=+id', expected_errormsg=expected_error ) ############### list", "response = json.loads(response_value.data) self.assertNotEqual(len(response['data']['nodes']), 0) for node in response['data']['nodes']: self.assertEqual(list(node['extras'].keys()),", "_ in calculations] for calc in calculations: if calc['uuid'] is", "# Copyright (c), The AiiDA team. All rights reserved. #", "given calculation retrieved_inputs \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/input_files'", "\"\"\" node_pk = self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test(self, 'computers', f'/computers?pk={str(node_pk)}', expected_list_ids=[1]) def test_computers_filter_name(self):", "= self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=-scheduler_type,name', expected_list_ids=[2, 3, 4, 1]", "only one node extra is specified in extras_filter only this", "'transport_type': 'local', 'scheduler_type': 'slurm', }, { 'label': 'test4', 'hostname': 'test4.epfl.ch',", "\"\"\" expected_attribute = ['resources'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources' with self.app.test_client() as", "response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(len(response['data']['nodes']), 1) self.assertEqual(len(response['data']['nodes'][0]['incoming']), 1)", "self.assertIn(attr, received_attr) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid) ############### calculation attributes", "= response['data'].keys() for key in ['logs']: self.assertIn(key, expected_keys) expected_log_keys =", "'ssh', 'scheduler_type': 'torque', }, { 'label': 'test3', 'hostname': 'test3.epfl.ch', 'transport_type':", "for download format endpoint \"\"\" url = f'{self.get_url_prefix()}/nodes/download_formats' with self.app.test_client()", "give calculation incoming \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] self.process_test( 'nodes', f'/nodes/{str(node_uuid)}/links/incoming?orderby=id',", "scheduler_type, order it by \"name\" \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test(", "in ascending order \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=id', full_list=True) def test_computers_orderby_id_asc_sign(self):", "def test_computers_list_page_perpage_exceed(self): \"\"\" no.of pages = total no. of computers", "offset parameter. It should return the no of rows specified", "f\"/computers?id>{str(node_pk)}&hostname=\\\"test1.epfl.ch\\\"\", expected_list_ids=[1] ) def test_computers_filter_mixed2(self): \"\"\" Add filter for the", "aiida.restapi.run_api import configure_api class RESTApiTestCase(AiidaTestCase): \"\"\" Setup of the tests", "= self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/comments' with self.app.test_client() as client: rv_obj", ") def test_computers_filter_mixed2(self): \"\"\" Add filter for the id, hostname", "test_computers_unknown_param(self): \"\"\" url parameters: id, limit and offset from aiida.common.exceptions", "[]: expected_data = self._dummy_data[result_node_type][expected_range[0]:expected_range[1]] else: from aiida.common.exceptions import InputValidationError raise", "it is having same transport_type, order it by \"id\" \"\"\"", "attributes) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid) def test_contents_attributes_filter(self): \"\"\" Get", "page, limit and offset at same time, it would return", "cell = [[2., 0., 0.], [0., 2., 0.], [0., 0.,", "_LIMIT_DEFAULT = 400 @classmethod def setUpClass(cls, *args, **kwargs): # pylint:", "resource sharing headers \"\"\" url = f'{self.get_url_prefix()}/server' with self.app.test_client() as", "response['data']['ordering']: self.assertIn(prop, available_properties) def test_node_namespace(self): \"\"\" Test the rest api", "test_computers_list_page_perpage_exceed(self): \"\"\" no.of pages = total no. of computers in", "message for calcjob import logging from aiida.common.log import LOG_LEVEL_REPORT from", "self.app.test_client() as client: rv_obj = client.get(url) structure_data = load_node(node_uuid)._exportcontent('xsf')[0] #", "= ['extra2'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra2' with self.app.test_client() as client: response_value", "self.assertIn(prop, available_properties) def test_node_namespace(self): \"\"\" Test the rest api call", "this! computer_projections = ['id', 'uuid', 'name', 'hostname', 'transport_type', 'scheduler_type'] computers", "The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # #", "2] ) def test_computers_orderby_schedulertype_desc(self): \"\"\" Returns the computers list ordered", "is having same scheduler_type, order it by \"name\" \"\"\" node_pk", "aiida.backends.testbase import AiidaTestCase from aiida.common import json from aiida.common.links import", "= f'{self.get_url_prefix()}/nodes/download_formats' with self.app.test_client() as client: response_value = client.get(url) response", "web url :param full_list: if url is requested to get", "\"\"\" from aiida.orm import load_node node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url =", "list is empty :param expected_list_ids: list of expected ids from", "for the hostname and id of computer and get the", "in expected_data_keys: self.assertIn(dkay, response_keys) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response) def test_comments(self):", "only this extra is returned as a dictionary when pagination", "client.get(url) response = json.loads(rv_obj.data) for node in response['data']['nodes']: self.assertIn(node['uuid'], expected_node_uuids)", "expected_data_keys: self.assertIn(dkay, response_keys) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response) def test_comments(self): \"\"\"", "handle.flush() handle.seek(0) calc.put_object_from_filelike(handle, 'calcjob_inputs/aiida.in', force=True) calc.store() # create log message", "pbspro ========== Expected:: test1 pbspro localhost pbspro test4 slurm test3", "if expected_range is None: expected_range = [] if result_node_type is", "response['data']['fields'].items(): available_keys = pinfo.keys() for prop in expected_keys: self.assertIn(prop, available_keys)", "page range is [1 : ' \\ '3]' RESTApiTestCase.process_test( self,", "self._url_prefix + url with self.app.test_client() as client: rv_response = client.get(url)", "test_comments(self): \"\"\" Get the node comments \"\"\" node_uuid = self.get_dummy_data()['structuredata'][0]['uuid']", "= total no. of computers in database / perpage Using", "calculation node extras_filter ############# def test_calculation_extras_filter(self): \"\"\" Get the list", "FolderData node with tempfile.NamedTemporaryFile(mode='w+') as handle: handle.write(aiida_out) handle.flush() handle.seek(0) retrieved_outputs.put_object_from_filelike(handle,", "test_computers_filter_transport_type(self): \"\"\" Add filter for the transport_type of computer and", "'torque', }, { 'label': 'test3', 'hostname': 'test3.epfl.ch', 'transport_type': 'local', 'scheduler_type':", "data: if datum['uuid'] is not None: datum['uuid'] = str(datum['uuid']) cls._dummy_data[label]", "and offset at same time, it would return the error", "} node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes' with self.app.test_client() as", "list filters ######################## def test_computers_filter_id1(self): \"\"\" Add filter on the", "'requesting a specific page is incompatible with ' \\ 'limit", "f\"/computers?transport_type=\\\"ssh\\\"&pk>{str(node_pk)}&orderby=scheduler_type\", expected_list_ids=[1, 4, 2] ) def test_comp_orderby_scheduler_ascsign(self): \"\"\" Returns the", "\"\"\" Get the node comments \"\"\" node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url", "from aiida.common.log import LOG_LEVEL_REPORT from aiida.common.timezone import now from aiida.orm", "\"+id\" in ascending order \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True) def", "rv_obj = client.get(url) response = json.loads(rv_obj.data)['data']['comments'] all_comments = [] for", "node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes' with self.app.test_client() as client:", "result_name='incoming' ) def test_calculation_input_filters(self): \"\"\" Get filtered incoming list for", "as client: response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(len(response['data']['nodes']), 1)", "aiida.orm import Log log_record = { 'time': now(), 'loggername': 'loggername',", "{ 'dim': 3, 'value': 8.0, 'label': 'volume' } ) self.assertEqual(response['data']['derived_properties']['formula'],", "all_comments.append(comment['message']) self.assertEqual(sorted(all_comments), sorted(['This is test comment.', 'Add another comment.'])) def", "= api.app cls.app.config['TESTING'] = True # create test inputs cell", "[] elif expected_list_ids: expected_data = [self._dummy_data[result_node_type][i] for i in expected_list_ids]", "the node :param url: web url :param full_list: if url", "self.get_dummy_data()['calculations'][1]['uuid'] url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes?attributes_filter=\\\"attr1\\\"\" with self.app.test_client() as client: rv_obj =", "computers calculation_projections = ['id', 'uuid', 'user_id', 'node_type'] calculations = orm.QueryBuilder().append(orm.CalculationNode,", "\"\"\" If we use the page, limit, offset and perpage", ") def test_calculation_input_filters(self): \"\"\" Get filtered incoming list for given", "self.assertNotEqual(len(response['data']['nodes']), 0) for node in response['data']['nodes']: self.assertEqual(list(node['attributes'].keys()), expected_attribute) ############### node", "'scheduler_type'] computers = orm.QueryBuilder().append(orm.Computer, tag='comp', project=computer_projections).order_by({ 'comp': [{ 'id': {", "with self.app.test_client() as client: rv_response = client.get(url) response = json.loads(rv_response.data)", "test_computers_orderby_mixed3(self): \"\"\" Returns the computers list first order by \"scheduler_type\"", "no of rows specified in limit from database starting from", ":param url: web url :param response: url response :param uuid:", "pass the limit, offset and perpage at same time, it", "'attr1': 'OK', 'attr2': 'OK', 'resources': { 'num_machines': 1, 'num_mpiprocs_per_machine': 1", "return all the rows from database starting from the no.", "'num_machines': 1, 'num_mpiprocs_per_machine': 1 }, } node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url", "orm.Dict, 'structuredata': orm.StructureData, 'data': orm.Data, } for label, dataclass in", "with pagination ############# def test_node_extras_filter_pagination(self): \"\"\" Check that node extras", "specified in attributes_filter are returned as a dictionary when pagination", "[{ 'id': { 'order': 'desc' } }] }).dict() data =", "mode='rb') self.assertEqual(response_obj.data, input_file) def test_process_report(self): \"\"\" Test process report \"\"\"", "-*- ########################################################################### # Copyright (c), The AiiDA team. All rights", "'transport_type': 'ssh', 'scheduler_type': 'slurm', }] for dummy_computer in dummy_computers: computer", "for calc in calculations: if calc['uuid'] is not None: calc['uuid']", "4] ) def test_computers_orderby_mixed2(self): \"\"\" Returns the computers list first", "4] ) def test_computers_mixed3(self): \"\"\" url parameters: id, transport_type, orderby", "utf-8 -*- ########################################################################### # Copyright (c), The AiiDA team. All", "'nodes', f'/nodes/{str(node_uuid)}/links/incoming?orderby=id', expected_list_ids=[5, 3], uuid=node_uuid, result_node_type='data', result_name='incoming' ) def test_calculation_input_filters(self):", "self.app.test_client() as client: response = client.get(url) data = json.loads(response.data)['data'] self.assertEqual(__version__,", "create test inputs cell = ((2., 0., 0.), (0., 2.,", "full_type filter ############# def test_nodes_full_type_filter(self): \"\"\" Get the list of", "url, full_list=False, empty_list=False, expected_list_ids=None, expected_range=None, expected_errormsg=None, uuid=None, result_node_type=None, result_name=None ):", "range is [1 : ' \\ '3]' RESTApiTestCase.process_test( self, 'computers',", "def test_computers_orderby_schedulertype_desc(self): \"\"\" Returns the computers list ordered by \"scheduler_type\"", "\"/page\" acts as \"/page/1?perpage=default_value\" \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers/page?orderby=+id', full_list=True) def", "\"transport_type\" in ascending order and if it is having same", "computers list ordered by \"name\" in ascending order \"\"\" node_pk", "that when only one node attribute is specified in attributes_filter", "when pagination is set \"\"\" expected_attributes = ['resources', 'cell'] url", "client: rv_obj = client.get(url) response = json.loads(rv_obj.data)['data']['comments'] all_comments = []", "compare_extra_response_data(self, node_type, url, response, uuid=None): \"\"\" In url response, we", "as client: response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data']['repo_list'], [{'type':", "response = json.loads(rv_obj.data) self.assertNotIn('message', response) expected_keys = ['display_name', 'help_text', 'is_display',", "is hosted on GitHub at https://github.com/aiidateam/aiida-core # # For further", "calc.store() # create log message for calcjob import logging from", "test_computers_filter_id2(self): \"\"\" Add filter on the id of computer and", "json.loads(response_value.data) self.assertEqual(response['data']['nodes'][0]['extras']['extra1'], extras['extra1']) self.assertEqual(response['data']['nodes'][0]['extras']['extra2'], extras['extra2']) ############### structure node attributes filter", "self, 'computers', f\"/computers?id>{str(node_pk)}&hostname=\\\"test1.epfl.ch\\\"\", expected_list_ids=[1] ) def test_computers_filter_mixed2(self): \"\"\" Add filter", "# Cast UUID into a string (e.g. in sqlalchemy it", "\"\"\" url parameters: id, page, perpage \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id']", "'ssh', 'scheduler_type': 'pbspro', }, { 'label': 'test2', 'hostname': 'test2.epfl.ch', 'transport_type':", "= { 'cifdata': orm.CifData, 'parameterdata': orm.Dict, 'structuredata': orm.StructureData, 'data': orm.Data,", "\"\"\" In url response, we pass some extra information/data along", "self.app.test_client() as client: response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data']['repo_list'],", "'computers', '/computers/page?orderby=+id', full_list=True) def test_computers_list_page_perpage(self): \"\"\" no.of pages = total", "file\\nof the CalcJob node' # Add the calcjob_inputs folder with", "disable=protected-access self.assertEqual(rv_obj.data, structure_data) def test_cif(self): \"\"\" Test download of cif", "'label': 'test2', 'hostname': 'test2.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'torque', }, {", "get the filtered computer list (e.g. id=1) \"\"\" node_pk =", "Returns the computers list ordered by \"name\" in ascending order", "3, 'd': 4}) parameter2.store() kpoint = orm.KpointsData() kpoint.set_kpoints_mesh([4, 4, 4])", "page is incompatible with ' \\ 'limit and offset' RESTApiTestCase.process_test(", "message. \"\"\" expected_error = 'Non existent page requested. The page", "the id of computer and get the filtered computer list", "the hostname of computer and get the filtered computer list", "computers] for comp in computers: if comp['uuid'] is not None:", "details of single computer \"\"\" node_uuid = self.get_dummy_data()['computers'][1]['uuid'] RESTApiTestCase.process_test( self,", "parameter ########### def test_computers_unknown_param(self): \"\"\" url parameters: id, limit and", "(c), The AiiDA team. All rights reserved. # # This", "\"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers/page/2?id>{str(node_pk)}&perpage=2&orderby=+id', expected_list_ids=[3, 4]", "comment.') structure.add_comment('Add another comment.') cif = orm.CifData(ase=structure.get_ase()) cif.store() parameter1 =", "client.get(url) data = json.loads(response.data)['data'] self.assertEqual(__version__, data['AiiDA_version']) self.assertEqual(self.get_url_prefix(), data['API_prefix']) def test_base_url(self):", "}] }).dict() # Cast UUID into a string (e.g. in", "node['extras']) ############### node get one extras_filter with pagination ############# def", "= [_['data'] for _ in data] for datum in data:", "= self.get_dummy_data()['calculations'][1]['uuid'] self.process_test( 'nodes', f\"/nodes/{str(node_uuid)}/links/incoming?node_type=\\\"data.dict.Dict.\\\"\", expected_list_ids=[3], uuid=node_uuid, result_node_type='data', result_name='incoming' )", "list ordered by \"scheduler_type\" in descending order \"\"\" node_pk =", "is incompatible with limit and offset' RESTApiTestCase.process_test( self, 'computers', '/computers?offset=2&limit=1&perpage=2&orderby=+id',", "for nodetype in ['nodes', 'processes', 'computers', 'users', 'groups']: url =", "specific page is incompatible with ' \\ 'limit and offset'", "# # For further information please visit http://www.aiida.net # ###########################################################################", "of expected ids from data :param expected_errormsg: expected error message", "the list of computers from database using limit and offset", "torque test1 pbspro localhost pbspro ========== Expected:: test1 pbspro localhost", "of the dummydata') expected_node_uuids = [node['uuid'] for node in expected_data]", "order by \"scheduler_type\" in descending order and if it is", "logging from aiida.common.log import LOG_LEVEL_REPORT from aiida.common.timezone import now from", "responses from the RESTapi and puts them into class attributes", "1) self.assertEqual(len(response['data']['nodes'][0]['incoming']), 1) self.assertEqual(len(response['data']['nodes'][0]['outgoing']), 1) self.assertEqual(len(response['data']['metadata']), 1) expected_attr = [", "pinfo in response['data']['fields'].items(): available_keys = pinfo.keys() for prop in expected_keys:", "import load_node node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/list?filename=\\\"calcjob_inputs\\\"\" with self.app.test_client()", ") def test_computers_list_limit_only(self): \"\"\" Get the list of computers from", "namespace \"\"\" url = f'{self.get_url_prefix()}/nodes/full_types' with self.app.test_client() as client: rv_obj", "test_computers_list_limit_offset_perpage(self): \"\"\" If we pass the limit, offset and perpage", "\"?\" to get url path and it's parameters :param url:", "that when only one node extra is specified in extras_filter", "dummy_computer in dummy_computers: computer = orm.Computer(**dummy_computer) computer.store() # Prepare typical", "using limit parameter. It should return the no of rows", "test_computers_list_page_default(self): \"\"\" it returns the no. of rows defined as", "the response list is empty :param expected_list_ids: list of expected", "node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/derived_properties' with self.app.test_client() as client:", "api.app cls.app.config['TESTING'] = True # create test inputs cell =", "orm.QueryBuilder().append(dataclass, tag='data', project=data_projections).order_by({ 'data': [{ 'id': { 'order': 'desc' }", "'computers', '/computers/page/2?offset=2&limit=1&orderby=+id', expected_errormsg=expected_error ) def test_complist_pagelimitoffset_perpage(self): \"\"\" If we use", "by \"transport_type\" in ascending order and if it is having", "and id of computer and get the filtered computer list", "the list of give calculation incoming \"\"\" node_uuid = self.get_dummy_data()['structuredata'][0]['uuid']", "for node in response['data'][result_name]] self.assertEqual(expected_node_uuids, result_node_uuids) self.compare_extra_response_data(entity_type, url, response, uuid)", "combinations ####################### def test_computers_filter_mixed1(self): \"\"\" Add filter for the hostname", "given node \"\"\" from aiida.orm import load_node node_uuid = self.get_dummy_data()['calculations'][1]['uuid']", "id=1) \"\"\" node_pk = self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test(self, 'computers', f'/computers?id={str(node_pk)}', expected_list_ids=[1]) def", "id, limit and offset \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self,", "= orm.QueryBuilder().append(orm.CalculationNode, tag='calc', project=calculation_projections).order_by({ 'calc': [{ 'id': { 'order': 'desc'", "url method, node_type, path, pk, query_string, url, url_root, etc. :param", "def test_node_attributes_filter_pagination(self): \"\"\" Check that node attributes specified in attributes_filter", "'calc': [{ 'id': { 'order': 'desc' } }] }).dict() calculations", "from aiida.common.exceptions import InputValidationError RESTApiTestCase.node_exception(self, \"/computers?aa=bb&id=2\", InputValidationError) \"\"\" ############### calculation", "is incompatible with ' \\ 'limit and offset' RESTApiTestCase.process_test( self,", "url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes' with self.app.test_client() as client: rv_obj = client.get(url)", "expected_list_ids=[1]) def test_computers_filter_hostname(self): \"\"\" Add filter for the hostname of", "self.assertNotIn('extras.extra1', node) self.assertNotIn('extras.extra2', node) self.assertEqual(len(node['extras']), len(expected_extras)) for extra in expected_extras:", "set \"\"\" expected_attributes = ['resources', 'cell'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources,cell' with", "pylint: disable=protected-access self.assertEqual(rv_obj.data, structure_data) def test_cif(self): \"\"\" Test download of", "one node extra is specified in extras_filter only this extra", "get repo list or repo file contents for given node", "2]) def test_computers_list_offset_only(self): \"\"\" Get the list of computers from", "= { 'time': now(), 'loggername': 'loggername', 'levelname': logging.getLevelName(LOG_LEVEL_REPORT), 'dbnode_id': calc.id,", "node \"\"\" from aiida.orm import load_node node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url", "calc['uuid'] is not None: calc['uuid'] = str(calc['uuid']) cls._dummy_data['calculations'] = calculations", "no. of pages then it would return the error message.", "self.app.test_client() as client: response_value = client.get(url) response = json.loads(response_value.data) self.assertNotEqual(len(response['data']['nodes']),", "\"\"\" Add filter for the hostname of computer and get", "get full list :param empty_list: if the response list is", "expected_node_uuids.append(calc['uuid']) url = f\"{self.get_url_prefix()}/nodes/?full_type=\\\"process.calculation.calcjob.CalcJobNode.|\\\"\" with self.app.test_client() as client: rv_obj =", "parameter1 = orm.Dict(dict={'a': 1, 'b': 2}) parameter1.store() parameter2 = orm.Dict(dict={'c':", "this formula it returns the no. of rows for requested", "filter for the hostname of computer and get the filtered", "from database using limit parameter. It should return the no", "RESTApiTestCase.process_test( self, 'computers', '/computers?transport_type=\"local\"&name=\"test3\"&orderby=+id', expected_list_ids=[3] ) ############### list orderby ########################", "in response['data']['nodes']: self.assertIn('extras', node) self.assertNotIn('extras.extra1', node) self.assertNotIn('extras.extra2', node) self.assertEqual(len(node['extras']), len(expected_extras))", "########################################################################### # Copyright (c), The AiiDA team. All rights reserved.", "All rights reserved. # # This file is part of", "of the node :param url: web url :param response: url", "{ 'content': 'test' }, } Log(**log_record) aiida_out = 'The output", "expected_node_uuids) ############### Structure visualization and download ############# def test_structure_derived_properties(self): \"\"\"", "list ordered by \"+name\" in ascending order \"\"\" node_pk =", "id, transport_type, orderby \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers',", "expected_keys = ['display_name', 'help_text', 'is_display', 'is_foreign_key', 'type'] # check fields", "data['AiiDA_version']) self.assertEqual(self.get_url_prefix(), data['API_prefix']) def test_base_url(self): \"\"\" Test that / returns", "retrieved_outputs.add_incoming(calc, link_type=LinkType.CREATE, link_label='retrieved') kpoint.add_incoming(calc, link_type=LinkType.CREATE, link_label='create') calc1 = orm.CalcJobNode(computer=cls.computer) calc1.set_option('resources',", "/ perpage If we request the page which exceeds the", "of the computer and get the filtered computer list \"\"\"", "\"\"\" node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/derived_properties' with self.app.test_client() as", "= json.loads(response_value.data) for key in ['data.structure.StructureData.|', 'data.cif.CifData.|']: self.assertIn(key, response['data'].keys()) for", "for prop in expected_keys: self.assertIn(prop, available_keys) # check order available_properties", "Get the list of give calculation incoming \"\"\" node_uuid =", "= load_node(node_uuid).get_object_content('calcjob_inputs/aiida.in', mode='rb') self.assertEqual(response_obj.data, input_file) def test_process_report(self): \"\"\" Test process", "Add filter for the hostname of computer and get the", "expected range of the dummydata') expected_node_uuids = [node['uuid'] for node", "self, 'computers', f'/computers?id>{str(node_pk)}&limit=2&offset=3&orderby=+id', expected_list_ids=[4] ) def test_computers_mixed2(self): \"\"\" url parameters:", "[0., 2., 0.], [0., 0., 2.]] node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url", "response = json.loads(rv_obj.data) expected_data_keys = ['path', 'namespace', 'subspaces', 'label', 'full_type']", "= {} _PERPAGE_DEFAULT = 20 _LIMIT_DEFAULT = 400 @classmethod def", "\"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/output_files' with self.app.test_client() as", "orm.StructureData(cell=cell) structure.append_atom(position=(0., 0., 0.), symbols=['Ba']) structure.store() structure.add_comment('This is test comment.')", "@classmethod def process_dummy_data(cls): # pylint: disable=fixme \"\"\" This functions prepare", "having same scheduler_type, order it by \"name\" \"\"\" node_pk =", "'computers', f\"/computers?transport_type=\\\"ssh\\\"&pk>{str(node_pk)}&orderby=+scheduler_type\", expected_list_ids=[1, 4, 2] ) def test_computers_orderby_schedulertype_desc(self): \"\"\" Returns", "'http://localhost/') # node details and list with limit, offset, page,", "Structure visualization and download ############# def test_structure_derived_properties(self): \"\"\" Get the", "resources) calc1.store() dummy_computers = [{ 'label': 'test1', 'hostname': 'test1.epfl.ch', 'transport_type':", "is not None: datum['uuid'] = str(datum['uuid']) cls._dummy_data[label] = data def", "= response.headers self.assertEqual(headers.get(ACL_ORIGIN), '*') ############### computers endpoint ######################## def test_computers_details(self):", "\"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True) def test_computers_orderby_id_desc(self): \"\"\" Returns the", "'is_display', 'is_foreign_key', 'type'] # check fields for _, pinfo in", "typical responses from the RESTapi and puts them into class", "expected_list_ids=[4] ) def test_computers_mixed2(self): \"\"\" url parameters: id, page, perpage", "'user_id', 'node_type'] data_types = { 'cifdata': orm.CifData, 'parameterdata': orm.Dict, 'structuredata':", "0]) def test_computers_orderby_name_asc(self): \"\"\" Returns the computers list ordered by", "Check that when only one node extra is specified in", "UUID object) computers = [_['comp'] for _ in computers] for", "computer list \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?hostname=\"test1.epfl.ch\"', expected_list_ids=[1]) def test_computers_filter_transport_type(self): \"\"\"", "node_pk = self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?id>{str(node_pk)}&orderby=+id', expected_range=[2, None] )", "pylint: disable=too-many-public-methods \"\"\" Define unittests for rest api \"\"\" ###############", "return path, query_string def compare_extra_response_data(self, node_type, url, response, uuid=None): \"\"\"", "filtered computer list \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers',", "# pylint: disable=too-many-locals, too-many-statements \"\"\" Add objects to the database", "link_type=LinkType.INPUT_CALC, link_label='link_structure') calc.add_incoming(parameter1, link_type=LinkType.INPUT_CALC, link_label='link_parameter') aiida_in = 'The input file\\nof", "['path', 'namespace', 'subspaces', 'label', 'full_type'] response_keys = response['data'].keys() for dkay", "aiida.out file to the FolderData node with tempfile.NamedTemporaryFile(mode='w+') as handle:", "AiidaTestCase from aiida.common import json from aiida.common.links import LinkType from", "aiida.common.links import LinkType from aiida.restapi.run_api import configure_api class RESTApiTestCase(AiidaTestCase): \"\"\"", "in ascending order and if it is having same scheduler_type,", "}, { 'label': 'test2', 'hostname': 'test2.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'torque',", "'Non existent page requested. The page range is [1 :", "the filtered computer list (e.g. id=1) \"\"\" node_pk = self.get_dummy_data()['computers'][1]['id']", "\"\"\" Returns the computers list ordered by \"name\" in ascending", "'computers', f'/computers?pk>{str(node_pk)}&orderby=name', expected_list_ids=[1, 2, 3, 4] ) def test_computers_orderby_name_asc_sign(self): \"\"\"", "computer list \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>{str(node_pk)}&hostname=\\\"test3.epfl.ch\\\"&transport_type=\\\"ssh\\\"\",", "test_cif(self): \"\"\" Test download of cif file \"\"\" from aiida.orm", "node attributes_filter with pagination ############# def test_node_attributes_filter_pagination(self): \"\"\" Check that", "perpage option from database. no.of pages = total no. of", "uuid: url requested for the node pk \"\"\" path, query_string", "list with limit, offset, page, perpage def process_test( self, entity_type,", "extra is specified in extras_filter only this extra is returned", "\"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?transport_type=\\\"ssh\\\"&pk>{str(node_pk)}&orderby=scheduler_type\", expected_list_ids=[1, 4,", "} for label, dataclass in data_types.items(): data = orm.QueryBuilder().append(dataclass, tag='data',", "error message. \"\"\" expected_error = 'requesting a specific page is", "Test the rest api call to get list of available", "for _, pinfo in response['data']['fields'].items(): available_keys = pinfo.keys() for prop", "the database for different requests/filters/orderings etc. \"\"\" super().setUpClass() api =", "None and result_name is None: result_node_type = entity_type result_name =", "def test_calculation_extras_filter(self): \"\"\" Get the list of given calculation extras", "\"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?offset=2&orderby=+id', expected_range=[2, None]) def test_computers_list_limit_offset_perpage(self): \"\"\" If", "on GitHub at https://github.com/aiidateam/aiida-core # # For further information on", "response = client.get(url) data = json.loads(response.data)['data'] self.assertEqual(__version__, data['AiiDA_version']) self.assertEqual(self.get_url_prefix(), data['API_prefix'])", "offset from aiida.common.exceptions import InputValidationError RESTApiTestCase.node_exception(self, \"/computers?aa=bb&id=2\", InputValidationError) \"\"\" ###############", "received_attr) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid) ############### calculation attributes #############", "total no. of computers in database / perpage If we", "RESTApiTestCase.process_test(self, 'computers', '/computers?limit=2&orderby=+id', expected_range=[None, 2]) def test_computers_list_offset_only(self): \"\"\" Get the", "Add filter on the id of computer and get the", "expected error message in response :param uuid: url requested for", "is part of the AiiDA code. # # # #", "structure_data) def test_cif(self): \"\"\" Test download of cif file \"\"\"", "\"id\" in ascending order \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=id', full_list=True) def", "self.get_dummy_data()['cifdata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=cif' with self.app.test_client() as client: rv_obj =", "ascending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?transport_type=\\\"ssh\\\"&pk>{str(node_pk)}&orderby=scheduler_type\",", "orm.CalcJobNode(computer=cls.computer) calc.set_option('resources', resources) calc.set_attribute('attr1', 'OK') calc.set_attribute('attr2', 'OK') calc.set_extra('extra1', False) calc.set_extra('extra2',", "in extras_filter only this extra is returned as a dictionary", "as client: response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data'], [{'name':", "attributes filter ############# def test_calculation_attributes_filter(self): \"\"\" Get the list of", "folder with the aiida.in file to the CalcJobNode repository with", "id of computer and get the filtered computer list (e.g.", "file to the FolderData node with tempfile.NamedTemporaryFile(mode='w+') as handle: handle.write(aiida_out)", "path = '' query_string = '' if parts: path =", "'help_text', 'is_display', 'is_foreign_key', 'type'] # check fields for _, pinfo", "Add objects to the database for different requests/filters/orderings etc. \"\"\"", "json.loads(response_value.data) self.assertNotEqual(len(response['data']['nodes']), 0) for node in response['data']['nodes']: self.assertIn('extras', node) self.assertNotIn('extras.extra1',", "def test_computers_details(self): \"\"\" Requests the details of single computer \"\"\"", "self.assertEqual(__version__, data['AiiDA_version']) self.assertEqual(self.get_url_prefix(), data['API_prefix']) def test_base_url(self): \"\"\" Test that /", "database starting from the no. specified in offset \"\"\" RESTApiTestCase.process_test(self,", "configure_api(catch_internal_server=True) cls.app = api.app cls.app.config['TESTING'] = True # create test", "handle.seek(0) calc.put_object_from_filelike(handle, 'calcjob_inputs/aiida.in', force=True) calc.store() # create log message for", "result_node_type='data', result_name='incoming' ) def test_calculation_iotree(self): \"\"\" Get filtered incoming list", "It should return all the rows from database starting from", "for requested page \"\"\" RESTApiTestCase.process_test( self, 'computers', '/computers/page/1?perpage=2&orderby=+id', expected_range=[None, 2]", "test_computers_mixed2(self): \"\"\" url parameters: id, page, perpage \"\"\" node_pk =", "2.]] node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true&attributes_filter=cell' with self.app.test_client() as", "query_string def compare_extra_response_data(self, node_type, url, response, uuid=None): \"\"\" In url", "response e.g. incoming, outgoing \"\"\" if expected_list_ids is None: expected_list_ids", "the no. of rows for requested page \"\"\" RESTApiTestCase.process_test( self,", "(e.g. in sqlalchemy it comes as a UUID object) computers", "uuid=None, result_node_type=None, result_name=None ): # pylint: disable=too-many-arguments \"\"\" Check whether", "'The output file\\nof the CalcJob node' retrieved_outputs = orm.FolderData() #", "test comment.', 'Add another comment.'])) def test_repo(self): \"\"\" Test to", "############# def test_node_attributes_filter_pagination(self): \"\"\" Check that node attributes specified in", "= self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>{str(node_pk)}&hostname=\\\"test3.epfl.ch\\\"&transport_type=\\\"ssh\\\"\", empty_list=True ) ############### list", "is a template record message', 'metadata': { 'content': 'test' },", "all the rows from database starting from the no. specified", "endpoints \"\"\" with self.app.test_client() as client: data_base = json.loads(client.get(self.get_url_prefix() +", "= self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true&attributes_filter=cell' with self.app.test_client() as client: rv_obj", "url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra1,extra2' with self.app.test_client() as client: response_value = client.get(url)", "tag='comp', project=computer_projections).order_by({ 'comp': [{ 'id': { 'order': 'asc' } }]", "parts[0] if len(parts) > 1: query_string = parts[1] return path,", "= self._dummy_data[result_node_type][expected_range[0]:expected_range[1]] else: from aiida.common.exceptions import InputValidationError raise InputValidationError('Pass the", "ids from data :param expected_errormsg: expected error message in response", "perpage \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers/page/2?id>{str(node_pk)}&perpage=2&orderby=+id', expected_list_ids=[3,", "response = json.loads(response_value.data) self.assertEqual(response['data'], [{'name': 'calcjob_inputs', 'type': 'DIRECTORY'}]) def test_calculation_retrieved_outputs(self):", "structure_data = load_node(node_uuid)._exportcontent('xsf')[0] # pylint: disable=protected-access self.assertEqual(rv_obj.data, structure_data) def test_cif(self):", "orm.CifData, 'parameterdata': orm.Dict, 'structuredata': orm.StructureData, 'data': orm.Data, } for label,", "'Ba') RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid) def test_structure_download(self): \"\"\" Test", "(0., 0., 2.)) structure = orm.StructureData(cell=cell) structure.append_atom(position=(0., 0., 0.), symbols=['Ba'])", "url requested for the node pk :param result_node_type: node type", "node :param url: web url :param response: url response :param", "link_type=LinkType.INPUT_CALC, link_label='link_parameter') aiida_in = 'The input file\\nof the CalcJob node'", "\"+name\" in ascending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self,", "them into class attributes \"\"\" # TODO: Storing the different", "url = f'{self.get_url_prefix()}/server' with self.app.test_client() as client: response = client.get(url)", "\"\"\" expected_error = 'perpage key is incompatible with limit and", "+ url with self.app.test_client() as client: rv_response = client.get(url) response", "calculation node attributes filter ############# def test_calculation_attributes_filter(self): \"\"\" Get the", "uuid=node_uuid, result_node_type='data', result_name='incoming' ) def test_calculation_input_filters(self): \"\"\" Get filtered incoming", "client: data_base = json.loads(client.get(self.get_url_prefix() + '/').data)['data'] data_server = json.loads(client.get(self.get_url_prefix() +", "from aiida.common.exceptions import InputValidationError raise InputValidationError('Pass the expected range of", "client: response_value = client.get(url) response = json.loads(response_value.data) expected_keys = response['data'].keys()", "'desc' } }] }).dict() data = [_['data'] for _ in", "'This is a template record message', 'metadata': { 'content': 'test'", "load_node node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/list?filename=\\\"calcjob_inputs\\\"\" with self.app.test_client() as", "Add the calcjob_outputs folder with the aiida.out file to the", "limit and offset' RESTApiTestCase.process_test( self, 'computers', '/computers/page/2?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error ) def", "order and if it is having same scheduler_type, order it", "############### calculation retrieved_inputs and retrieved_outputs ############# def test_calculation_retrieved_inputs(self): \"\"\" Get", "0) for node in response['data']['nodes']: self.assertIn('extras', node) self.assertNotIn('extras.extra1', node) self.assertNotIn('extras.extra2',", "expected_extras: self.assertIn(extra, node['extras']) ############### node get one extras_filter with pagination", "node attributes filter ############# def test_calculation_attributes_filter(self): \"\"\" Get the list", "from aiida.backends.testbase import AiidaTestCase from aiida.common import json from aiida.common.links", "pass some extra information/data along with the node results. e.g.", "= client.get(url) response = json.loads(response_value.data) self.assertEqual(len(response['data']['nodes']), 1) self.assertEqual(len(response['data']['nodes'][0]['incoming']), 1) self.assertEqual(len(response['data']['nodes'][0]['outgoing']),", "'' if parts: path = parts[0] if len(parts) > 1:", "def get_url_prefix(self): return self._url_prefix @classmethod def process_dummy_data(cls): # pylint: disable=fixme", "data_server = json.loads(client.get(self.get_url_prefix() + '/server/endpoints').data)['data'] self.assertTrue(len(data_base['available_endpoints']) > 0) self.assertDictEqual(data_base, data_server)", "computers from database using limit and offset parameter. It should", "f'/computers?pk>{str(node_pk)}&orderby=transport_type,id', expected_list_ids=[3, 1, 2, 4] ) def test_computers_orderby_mixed2(self): \"\"\" Returns", "for node in response['data']['nodes']: self.assertIn('attributes', node) self.assertNotIn('attributes.resources', node) self.assertNotIn('attributes.cell', node)", "[ 'ctime', 'mtime', 'id', 'node_label', 'node_type', 'uuid', 'description', 'incoming', 'outgoing'", "self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/output_files' with self.app.test_client() as client: response_value =", "requested page \"\"\" RESTApiTestCase.process_test( self, 'computers', '/computers/page/1?perpage=2&orderby=+id', expected_range=[None, 2] )", "RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid) ############### calculation node attributes filter", "range of expected ids from data :param expected_errormsg: expected error", "def test_process_report(self): \"\"\" Test process report \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid']", "f'/computers?pk>{str(node_pk)}&orderby=-scheduler_type,name', expected_list_ids=[2, 3, 4, 1] ) def test_computers_orderby_mixed3(self): \"\"\" Returns", "orm.CifData(ase=structure.get_ase()) cif.store() parameter1 = orm.Dict(dict={'a': 1, 'b': 2}) parameter1.store() parameter2", "'extra2'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra1,extra2' with self.app.test_client() as client: response_value =", "result_node_uuids) self.compare_extra_response_data(entity_type, url, response, uuid) class RESTApiTestSuite(RESTApiTestCase): # pylint: disable=too-many-public-methods", "= self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=+name', expected_list_ids=[1, 2, 3, 4]", "RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid) def test_contents_attributes_filter(self): \"\"\" Get list", "\"\"\" Get the list of given calculation extras filtered \"\"\"", "f\"/computers?id>{str(node_pk)}&hostname=\\\"test3.epfl.ch\\\"&transport_type=\\\"ssh\\\"\", empty_list=True ) ############### list all parameter combinations ####################### def", "node full_type filter ############# def test_nodes_full_type_filter(self): \"\"\" Get the list", "expected_error = 'Non existent page requested. The page range is", "def test_repo(self): \"\"\" Test to get repo list or repo", "in response :param uuid: url requested for the node pk", "by \"+name\" in ascending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test(", "expected_list_ids=None, expected_range=None, expected_errormsg=None, uuid=None, result_node_type=None, result_name=None ): # pylint: disable=too-many-arguments", "retrieved_inputs and retrieved_outputs ############# def test_calculation_retrieved_inputs(self): \"\"\" Get the list", ") ############### list filters ######################## def test_computers_filter_id1(self): \"\"\" Add filter", "key is incompatible with limit and offset' RESTApiTestCase.process_test( self, 'computers',", "as client: rv_response = client.get(url) response = json.loads(rv_response.data) if expected_errormsg:", "is requested to get full list :param empty_list: if the", "= calculations data_projections = ['id', 'uuid', 'user_id', 'node_type'] data_types =", "'computers', '/computers?orderby=+id', full_list=True) def test_computers_orderby_id_desc(self): \"\"\" Returns the computers list", "'calcjob_outputs', 'type': 'DIRECTORY'}]) ############### calculation incoming ############# def test_calculation_inputs(self): \"\"\"", "cif = load_node(node_uuid)._prepare_cif()[0] # pylint: disable=protected-access self.assertEqual(rv_obj.data, cif) ############### projectable_properties", "client.get(url) response = json.loads(rv_response.data) if expected_errormsg: self.assertEqual(response['message'], expected_errormsg) else: if", "This file is part of the AiiDA code. # #", "RESTful-api \"\"\" _url_prefix = '/api/v4' _dummy_data = {} _PERPAGE_DEFAULT =", "'type': 'DIRECTORY'}]) def test_calculation_retrieved_outputs(self): \"\"\" Get the list of given", "f'/computers?pk={str(node_pk)}', expected_list_ids=[1]) def test_computers_filter_name(self): \"\"\" Add filter for the name", "\"\"\" Check whether response matches expected values. :param entity_type: url", "'computers', f'/computers/page/2?id>{str(node_pk)}&perpage=2&orderby=+id', expected_list_ids=[3, 4] ) def test_computers_mixed3(self): \"\"\" url parameters:", "unknown url parameter ########### def test_computers_unknown_param(self): \"\"\" url parameters: id,", "attributes) ############### calculation node extras_filter ############# def test_calculation_extras_filter(self): \"\"\" Get", "list for given calculations \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] self.process_test( 'nodes',", "perpage def process_test( self, entity_type, url, full_list=False, empty_list=False, expected_list_ids=None, expected_range=None,", "list first order by \"scheduler_type\" in ascending order and if", "4, 2] ) def test_comp_orderby_scheduler_ascsign(self): \"\"\" Returns the computers list", "InputValidationError RESTApiTestCase.node_exception(self, \"/computers?aa=bb&id=2\", InputValidationError) \"\"\" ############### calculation retrieved_inputs and retrieved_outputs", "self.assertIn(key, expected_keys) expected_log_keys = response['data']['logs'][0].keys() for key in ['time', 'loggername',", "'scheduler_type': 'slurm', }] for dummy_computer in dummy_computers: computer = orm.Computer(**dummy_computer)", "calc in calculations: if calc['uuid'] is not None: calc['uuid'] =", ": ' \\ '3]' RESTApiTestCase.process_test( self, 'computers', '/computers/page/4?perpage=2&orderby=+id', expected_errormsg=expected_error )", "it returns the no. of rows defined as default perpage", "expected_list_ids=[1, 4, 2] ) def test_computers_orderby_schedulertype_desc(self): \"\"\" Returns the computers", "extra is returned as a dictionary when pagination is set", "it by \"id\" \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers',", "node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes?attributes_filter=\\\"attr1\\\"\" with self.app.test_client() as client:", "from data :param expected_errormsg: expected error message in response :param", "def test_node_namespace(self): \"\"\" Test the rest api call to get", "transport_type, order it by \"id\" \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test(", "cross-origin resource sharing headers \"\"\" url = f'{self.get_url_prefix()}/server' with self.app.test_client()", "pain to debug. # Please change this! computer_projections = ['id',", "self.app.test_client() as client: response = client.get(url) headers = response.headers self.assertEqual(headers.get(ACL_ORIGIN),", "pagination is set \"\"\" expected_attribute = ['resources'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources'", "ascending order \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True) def test_computers_orderby_id_desc(self): \"\"\"", "= f'{self.get_url_prefix()}/{nodetype}/projectable_properties' with self.app.test_client() as client: rv_obj = client.get(url) response", "True # create test inputs cell = ((2., 0., 0.),", "the LICENSE.txt file # # For further information please visit", "response: url response :param uuid: url requested for the node", "total no. of pages then it would return the error", "InputValidationError) \"\"\" ############### calculation retrieved_inputs and retrieved_outputs ############# def test_calculation_retrieved_inputs(self):", "\"\"\" Get the list of give calculation incoming \"\"\" node_uuid", "f\"/nodes/{str(node_uuid)}/links/incoming?node_type=\\\"data.dict.Dict.\\\"\", expected_list_ids=[3], uuid=node_uuid, result_node_type='data', result_name='incoming' ) def test_calculation_iotree(self): \"\"\" Get", "in response['data']['fields'].items(): available_keys = pinfo.keys() for prop in expected_keys: self.assertIn(prop,", "expected_range=[None, 2] ) def test_computers_list_page_perpage_exceed(self): \"\"\" no.of pages = total", "if the response list is empty :param expected_list_ids: list of", "f'/computers?id>{str(node_pk)}&limit=2&offset=3&orderby=+id', expected_list_ids=[4] ) def test_computers_mixed2(self): \"\"\" url parameters: id, page,", "def test_computers_filter_pk(self): \"\"\" Add filter on the id of computer", "self.process_test( 'nodes', f'/nodes/{str(node_uuid)}/links/incoming?orderby=id', expected_list_ids=[5, 3], uuid=node_uuid, result_node_type='data', result_name='incoming' ) def", "is set \"\"\" expected_extra = ['extra2'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra2' with", "the page, limit, offset and perpage at same time, it", "= configure_api(catch_internal_server=True) cls.app = api.app cls.app.config['TESTING'] = True # create", "\"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] self.process_test( 'nodes', f\"/nodes/{str(node_uuid)}/links/incoming?node_type=\\\"data.dict.Dict.\\\"\", expected_list_ids=[3], uuid=node_uuid, result_node_type='data',", "'computers', '/computers?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error ) def test_computers_list_page_limit_offset(self): \"\"\" If we use", "-*- coding: utf-8 -*- ########################################################################### # Copyright (c), The AiiDA", "sqlalchemy it comes as a UUID object) computers = [_['comp']", "\\ 'limit and offset' RESTApiTestCase.process_test( self, 'computers', '/computers/page/2?offset=2&limit=1&orderby=+id', expected_errormsg=expected_error )", "of computers in database / perpage Using this formula it", "test_contents_attributes_filter(self): \"\"\" Get list of calculation attributes with filter attributes_filter", "computer_projections = ['id', 'uuid', 'name', 'hostname', 'transport_type', 'scheduler_type'] computers =", "input file\\nof the CalcJob node' # Add the calcjob_inputs folder", "self.assertEqual(response['data']['attributes'], {'attr1': 'OK'}) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid) ############### calculation", "as a dictionary when pagination is set \"\"\" expected_extra =", "result_node_type is None and result_name is None: result_node_type = entity_type", "calc.set_option('resources', resources) calc.set_attribute('attr1', 'OK') calc.set_attribute('attr2', 'OK') calc.set_extra('extra1', False) calc.set_extra('extra2', 'extra_info')", "of rows for requested page \"\"\" RESTApiTestCase.process_test( self, 'computers', '/computers/page/1?perpage=2&orderby=+id',", "of calculation attributes \"\"\" attributes = { 'attr1': 'OK', 'attr2':", "response = json.loads(rv_obj.data)['data']['comments'] all_comments = [] for comment in response:", "= client.get(url) response = json.loads(response_value.data) expected_keys = response['data'].keys() for key", "REST API sets cross-origin resource sharing headers \"\"\" url =", "rows defined as default perpage option from database. no.of pages", "uuid=node_uuid) ############### calculation node attributes filter ############# def test_calculation_attributes_filter(self): \"\"\"", ") ############### list orderby ######################## def test_computers_orderby_id_asc(self): \"\"\" Returns the", "fields for _, pinfo in response['data']['fields'].items(): available_keys = pinfo.keys() for", "test_computers_list_limit_only(self): \"\"\" Get the list of computers from database using", "the error message. \"\"\" expected_error = 'requesting a specific page", "first order by \"scheduler_type\" in ascending order and if it", "RESTApiTestCase.process_test( self, 'computers', f\"/computers?transport_type=\\\"ssh\\\"&pk>{str(node_pk)}&orderby=scheduler_type\", expected_list_ids=[1, 4, 2] ) def test_comp_orderby_scheduler_ascsign(self):", "which exceeds the total no. of pages then it would", "2.)) structure = orm.StructureData(cell=cell) structure.append_atom(position=(0., 0., 0.), symbols=['Ba']) structure.store() structure.add_comment('This", "test2 torque test1 test4 RESTApiTestCase.process_test(self, \"computers\", \"/computers?orderby=+scheduler_type, -hostname\", expected_list_ids=[1,0,4,3,2]) \"\"\"", "client.get(url) response = json.loads(rv_obj.data) self.assertEqual(response['data']['nodes'][0]['attributes']['cell'], cell) ############### node attributes_filter with", "# TODO: Storing the different nodes as lists and accessing", "response['data'][result_name]] self.assertEqual(expected_node_uuids, result_node_uuids) self.compare_extra_response_data(entity_type, url, response, uuid) class RESTApiTestSuite(RESTApiTestCase): #", "full_list=True) def test_computers_list_page_perpage(self): \"\"\" no.of pages = total no. of", "in ['logs']: self.assertIn(key, expected_keys) expected_log_keys = response['data']['logs'][0].keys() for key in", "requested for the node pk \"\"\" path, query_string = self.split_path(url)", "values. :param entity_type: url requested for the type of the", "node in response['data'][result_name]] self.assertEqual(expected_node_uuids, result_node_uuids) self.compare_extra_response_data(entity_type, url, response, uuid) class", "'nodes', url, response) def test_comments(self): \"\"\" Get the node comments", "= 'The input file\\nof the CalcJob node' # Add the", "data = orm.QueryBuilder().append(dataclass, tag='data', project=data_projections).order_by({ 'data': [{ 'id': { 'order':", "response['data']['fields'].keys() for prop in response['data']['ordering']: self.assertIn(prop, available_properties) def test_node_namespace(self): \"\"\"", "'calcjob_inputs/aiida.in', force=True) calc.store() # create log message for calcjob import", "\"\"\" it returns the no. of rows defined as default", "Test download of structure file \"\"\" from aiida.orm import load_node", "[{ 'id': { 'order': 'asc' } }] }).dict() # Cast", "expected_list_ids=[1]) def test_computers_filter_id2(self): \"\"\" Add filter on the id of", "in ascending order \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True) def test_computers_orderby_id_desc(self):", "node with tempfile.NamedTemporaryFile(mode='w+') as handle: handle.write(aiida_out) handle.flush() handle.seek(0) retrieved_outputs.put_object_from_filelike(handle, 'calcjob_outputs/aiida.out',", "log_record = { 'time': now(), 'loggername': 'loggername', 'levelname': logging.getLevelName(LOG_LEVEL_REPORT), 'dbnode_id':", "self.assertIn('attributes', node) self.assertNotIn('attributes.resources', node) self.assertNotIn('attributes.cell', node) self.assertEqual(len(node['attributes']), len(expected_attributes)) for attr", "= json.loads(response_value.data) self.assertEqual(response['data']['nodes'][0]['extras']['extra1'], extras['extra1']) self.assertEqual(response['data']['nodes'][0]['extras']['extra2'], extras['extra2']) ############### structure node attributes", "full_list=True) def test_computers_list_limit_offset(self): \"\"\" Get the list of computers from", "their list index is very fragile and a pain to", "from aiida.orm import load_node node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/list?filename=\\\"calcjob_inputs\\\"\"", "self.assertNotEqual(len(response['data']['nodes']), 0) for node in response['data']['nodes']: self.assertEqual(list(node['extras'].keys()), expected_extra) ############### node", "self.assertEqual(sorted(all_comments), sorted(['This is test comment.', 'Add another comment.'])) def test_repo(self):", "20 _LIMIT_DEFAULT = 400 @classmethod def setUpClass(cls, *args, **kwargs): #", "limit and offset' RESTApiTestCase.process_test( self, 'computers', '/computers?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error ) def", "response data :param result_name: result name in response e.g. incoming,", "If we use the page, limit and offset at same", "offset' RESTApiTestCase.process_test( self, 'computers', '/computers/page/2?offset=2&limit=1&orderby=+id', expected_errormsg=expected_error ) def test_complist_pagelimitoffset_perpage(self): \"\"\"", "response, uuid=node_uuid) ############### calculation attributes ############# def test_calculation_attributes(self): \"\"\" Get", "limit and offset \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers',", "UUID into a string (e.g. in sqlalchemy it comes as", "it is having same scheduler_type, order it by \"name\" \"\"\"", "'num_mpiprocs_per_machine': 1} calcfunc = orm.CalcFunctionNode(computer=cls.computer) calcfunc.store() calc = orm.CalcJobNode(computer=cls.computer) calc.set_option('resources',", "for key in ['time', 'loggername', 'levelname', 'dbnode_id', 'message']: self.assertIn(key, expected_log_keys)", "= 'requesting a specific page is incompatible with ' \\", "offset' RESTApiTestCase.process_test( self, 'computers', '/computers?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error ) def test_computers_list_page_limit_offset(self): \"\"\"", "the computers list first order by \"scheduler_type\" in ascending order", "# For further information please visit http://www.aiida.net # ########################################################################### #", "AiiDA version \"\"\" url = f'{self.get_url_prefix()}/server' from aiida import __version__", "expected ids from data :param expected_range: [start, stop] range of", "# by their list index is very fragile and a", "pages = total no. of computers in database / perpage", "parameter combinations ####################### def test_computers_mixed1(self): \"\"\" url parameters: id, limit", "0., 2.)) structure = orm.StructureData(cell=cell) structure.append_atom(position=(0., 0., 0.), symbols=['Ba']) structure.store()", "'/api/v4' _dummy_data = {} _PERPAGE_DEFAULT = 20 _LIMIT_DEFAULT = 400", "test_computers_orderby_scheduler_type_asc(self): \"\"\" Returns the computers list ordered by \"scheduler_type\" in", "REST API.\"\"\" import tempfile from flask_cors.core import ACL_ORIGIN from aiida", "} }] }).dict() data = [_['data'] for _ in data]", "path and it's parameters :param url: Web url :return: url", "\"\"\" Get list of calculation attributes with filter attributes_filter \"\"\"", "expected_attributes = ['resources', 'cell'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources,cell' with self.app.test_client() as", "'levelname': logging.getLevelName(LOG_LEVEL_REPORT), 'dbnode_id': calc.id, 'message': 'This is a template record", "in computers] for comp in computers: if comp['uuid'] is not", "def test_computers_orderby_name_desc(self): \"\"\" Returns the computers list ordered by \"name\"", "def test_computers_orderby_mixed3(self): \"\"\" Returns the computers list first order by", "[{'name': 'calcjob_outputs', 'type': 'DIRECTORY'}]) ############### calculation incoming ############# def test_calculation_inputs(self):", "empty :param expected_list_ids: list of expected ids from data :param", "rv_obj = client.get(url) response = json.loads(rv_obj.data) self.assertNotIn('message', response) self.assertEqual( response['data']['derived_properties']['dimensionality'],", "aiida.in file to the CalcJobNode repository with tempfile.NamedTemporaryFile(mode='w+') as handle:", "f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/input_files' with self.app.test_client() as client: response_value = client.get(url) response =", "'nodes', url, response, uuid=node_uuid) def test_structure_download(self): \"\"\" Test download of", "'cifdata': orm.CifData, 'parameterdata': orm.Dict, 'structuredata': orm.StructureData, 'data': orm.Data, } for", "calcjob_inputs folder with the aiida.in file to the CalcJobNode repository", "in calculations] for calc in calculations: if calc['uuid'] is not", "filtered \"\"\" cell = [[2., 0., 0.], [0., 2., 0.],", "incoming, outgoing \"\"\" if expected_list_ids is None: expected_list_ids = []", "in expected_list_ids] elif expected_range != []: expected_data = self._dummy_data[result_node_type][expected_range[0]:expected_range[1]] else:", "self.assertEqual(response['data']['repo_list'], [{'type': 'FILE', 'name': 'aiida.in'}]) url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/contents?filename=\\\"calcjob_inputs/aiida.in\\\"\" with self.app.test_client()", "database \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True) def test_computers_list_limit_offset(self): \"\"\" Get", "node) self.assertNotIn('extras.extra2', node) self.assertEqual(len(node['extras']), len(expected_extras)) for extra in expected_extras: self.assertIn(extra,", "computer list (e.g. id=1) \"\"\" node_pk = self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test(self, 'computers',", ") def test_computers_orderby_mixed3(self): \"\"\" Returns the computers list first order", "the CalcJobNode repository with tempfile.NamedTemporaryFile(mode='w+') as handle: handle.write(aiida_in) handle.flush() handle.seek(0)", "= orm.FolderData() # Add the calcjob_outputs folder with the aiida.out", "expected_keys) expected_log_keys = response['data']['logs'][0].keys() for key in ['time', 'loggername', 'levelname',", "expected_list_ids=[4, 2] ) ########## pass unknown url parameter ########### def", "and accessing them # by their list index is very", "database. \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?limit=2&orderby=+id', expected_range=[None, 2]) def test_computers_list_offset_only(self): \"\"\"", "pylint: disable=too-many-lines \"\"\"Unittests for REST API.\"\"\" import tempfile from flask_cors.core", "Get the list of given calculation retrieved_outputs \"\"\" node_uuid =", "is specified in attributes_filter only this attribute is returned as", "database using limit parameter. It should return the no of", "= orm.QueryBuilder().append(orm.Computer, tag='comp', project=computer_projections).order_by({ 'comp': [{ 'id': { 'order': 'asc'", "for _ in calculations] for calc in calculations: if calc['uuid']", "client.get(url) response = json.loads(rv_obj.data) self.assertNotIn('message', response) expected_keys = ['display_name', 'help_text',", "\"\"\" Test download of cif file \"\"\" from aiida.orm import", "parameter1.store() parameter2 = orm.Dict(dict={'c': 3, 'd': 4}) parameter2.store() kpoint =", "= self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/processes/{str(node_uuid)}/report' with self.app.test_client() as client: response_value", "def test_calculation_input_filters(self): \"\"\" Get filtered incoming list for given calculations", "test_node_single_attributes_filter(self): \"\"\" Check that when only one node attribute is", "get list of available node namespace \"\"\" url = f'{self.get_url_prefix()}/nodes/full_types'", "id, limit and offset from aiida.common.exceptions import InputValidationError RESTApiTestCase.node_exception(self, \"/computers?aa=bb&id=2\",", "node in response['data']['nodes']: self.assertIn(node['uuid'], expected_node_uuids) ############### Structure visualization and download", "projectable_properties ############# def test_projectable_properties(self): \"\"\" test projectable_properties endpoint \"\"\" for", ") def test_calculation_iotree(self): \"\"\" Get filtered incoming list for given", "'test4', 'hostname': 'test4.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'slurm', }] for dummy_computer", "would return the error message. \"\"\" expected_error = 'requesting a", "############### list filters ######################## def test_computers_filter_id1(self): \"\"\" Add filter on", "= [_['calc'] for _ in calculations] for calc in calculations:", "order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=+name', expected_list_ids=[1,", "= self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test(self, 'computers', f'/computers?pk={str(node_pk)}', expected_list_ids=[1]) def test_computers_filter_name(self): \"\"\" Add", "{ 'num_machines': 1, 'num_mpiprocs_per_machine': 1 }, } node_uuid = self.get_dummy_data()['calculations'][1]['uuid']", "by \"+id\" in ascending order \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True)", "'namespace', 'subspaces', 'label', 'full_type'] response_keys = response['data'].keys() for dkay in", "########################################################################### # pylint: disable=too-many-lines \"\"\"Unittests for REST API.\"\"\" import tempfile", "client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data']['nodes'][0]['extras']['extra1'], extras['extra1']) self.assertEqual(response['data']['nodes'][0]['extras']['extra2'], extras['extra2']) ############### structure", "test_repo(self): \"\"\" Test to get repo list or repo file", "repo file contents for given node \"\"\" from aiida.orm import", "the AiiDA RESTful-api \"\"\" _url_prefix = '/api/v4' _dummy_data = {}", "self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=-scheduler_type,name', expected_list_ids=[2, 3, 4, 1] ) def test_computers_orderby_mixed3(self):", "RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=-name', expected_list_ids=[4, 3, 2, 1] ) def", "def test_computers_list_limit_offset_perpage(self): \"\"\" If we pass the limit, offset and", "= f\"{self.get_url_prefix()}/nodes/?full_type=\\\"process.calculation.calcjob.CalcJobNode.|\\\"\" with self.app.test_client() as client: rv_obj = client.get(url) response", "Using this formula it returns the no. of rows for", "by \"id\" in ascending order \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=id', full_list=True)", "returned as a dictionary when pagination is set \"\"\" expected_attributes", "LICENSE.txt file # # For further information please visit http://www.aiida.net", "key in ['logs']: self.assertIn(key, expected_keys) expected_log_keys = response['data']['logs'][0].keys() for key", "response, uuid) class RESTApiTestSuite(RESTApiTestCase): # pylint: disable=too-many-public-methods \"\"\" Define unittests", "aiida.common.log import LOG_LEVEL_REPORT from aiida.common.timezone import now from aiida.orm import", "self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test(self, 'computers', f'/computers?pk={str(node_pk)}', expected_list_ids=[1]) def test_computers_filter_name(self): \"\"\" Add filter", "f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?extras=true&extras_filter=extra1,extra2' with self.app.test_client() as client: response_value = client.get(url) response =", "is None: result_node_type = entity_type result_name = entity_type url =", "get the filtered computer list \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test(", "list of computers from database using offset parameter It should", "aiida.common.timezone import now from aiida.orm import Log log_record = {", "tempfile.NamedTemporaryFile(mode='w+') as handle: handle.write(aiida_in) handle.flush() handle.seek(0) calc.put_object_from_filelike(handle, 'calcjob_inputs/aiida.in', force=True) calc.store()", "test_computers_details(self): \"\"\" Requests the details of single computer \"\"\" node_uuid", "url, url_root, etc. :param node_type: url requested fot the type", "def test_computers_list_offset_only(self): \"\"\" Get the list of computers from database", "is [1 : ' \\ '3]' RESTApiTestCase.process_test( self, 'computers', '/computers/page/4?perpage=2&orderby=+id',", "self.assertEqual(response['data']['attributes'], attributes) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid) def test_contents_attributes_filter(self): \"\"\"", "with pagination ############# def test_node_single_attributes_filter(self): \"\"\" Check that when only", "\"\"\" Check that when only one node extra is specified", "node_uuid = self.get_dummy_data()['computers'][1]['uuid'] RESTApiTestCase.process_test( self, 'computers', f'/computers/{str(node_uuid)}', expected_list_ids=[1], uuid=node_uuid )", "client: rv_obj = client.get(url) response = json.loads(rv_obj.data) self.assertNotIn('message', response) self.assertEqual(response['data']['attributes'],", "expected_list_ids = [] if expected_range is None: expected_range = []", "f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/derived_properties' with self.app.test_client() as client: rv_obj = client.get(url) response =", "1, 0]) def test_computers_orderby_name_asc(self): \"\"\" Returns the computers list ordered", "'local', 'scheduler_type': 'slurm', }, { 'label': 'test4', 'hostname': 'test4.epfl.ch', 'transport_type':", "extra information/data along with the node results. e.g. url method,", "information please visit http://www.aiida.net # ########################################################################### # pylint: disable=too-many-lines \"\"\"Unittests", "self.assertEqual(response['data']['nodes'][0]['extras']['extra1'], extras['extra1']) self.assertEqual(response['data']['nodes'][0]['extras']['extra2'], extras['extra2']) ############### structure node attributes filter #############", "self, 'computers', f\"/computers?transport_type=\\\"ssh\\\"&pk>{str(node_pk)}&orderby=scheduler_type\", expected_list_ids=[1, 4, 2] ) def test_comp_orderby_scheduler_ascsign(self): \"\"\"", "self, entity_type, url, full_list=False, empty_list=False, expected_list_ids=None, expected_range=None, expected_errormsg=None, uuid=None, result_node_type=None,", "str(datum['uuid']) cls._dummy_data[label] = data def split_path(self, url): # pylint: disable=no-self-use", "full list :param empty_list: if the response list is empty", "test_computers_filter_name(self): \"\"\" Add filter for the name of computer and", "a string (e.g. in sqlalchemy it comes as a UUID", "calculations: if calc['uuid'] is not None: calc['uuid'] = str(calc['uuid']) cls._dummy_data['calculations']", "that /server endpoint returns AiiDA version \"\"\" url = f'{self.get_url_prefix()}/server'", "data = [_['data'] for _ in data] for datum in", "now(), 'loggername': 'loggername', 'levelname': logging.getLevelName(LOG_LEVEL_REPORT), 'dbnode_id': calc.id, 'message': 'This is", "1, 'b': 2}) parameter1.store() parameter2 = orm.Dict(dict={'c': 3, 'd': 4})", "of the AiiDA code. # # # # The code", "full_list: if url is requested to get full list :param", "incompatible with ' \\ 'limit and offset' RESTApiTestCase.process_test( self, 'computers',", "\"\"\" if expected_list_ids is None: expected_list_ids = [] if expected_range", "torque test1 test4 RESTApiTestCase.process_test(self, \"computers\", \"/computers?orderby=+scheduler_type, -hostname\", expected_list_ids=[1,0,4,3,2]) \"\"\" ###############", "'uuid', 'name', 'hostname', 'transport_type', 'scheduler_type'] computers = orm.QueryBuilder().append(orm.Computer, tag='comp', project=computer_projections).order_by({", "RESTApiTestCase.process_test( self, 'computers', '/computers/page/1?perpage=2&orderby=+id', expected_range=[None, 2] ) def test_computers_list_page_perpage_exceed(self): \"\"\"", "existent page requested. The page range is [1 : '", "is set \"\"\" expected_attributes = ['resources', 'cell'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources,cell'", "one node attribute is specified in attributes_filter only this attribute", "expected_errormsg=expected_error ) def test_complist_pagelimitoffset_perpage(self): \"\"\" If we use the page,", "}, } node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes' with self.app.test_client()", "should return the no of rows specified in limit from", "f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources,cell' with self.app.test_client() as client: response_value = client.get(url) response =", "url.split('?') path = '' query_string = '' if parts: path", "for calcjob import logging from aiida.common.log import LOG_LEVEL_REPORT from aiida.common.timezone", "pk \"\"\" path, query_string = self.split_path(url) self.assertEqual(response['method'], 'GET') self.assertEqual(response['resource_type'], node_type)", "for prop in response['data']['ordering']: self.assertIn(prop, available_properties) def test_node_namespace(self): \"\"\" Test", "}, { 'label': 'test3', 'hostname': 'test3.epfl.ch', 'transport_type': 'local', 'scheduler_type': 'slurm',", "file contents for given node \"\"\" from aiida.orm import load_node", "\"\"\" url parameters: id, transport_type, orderby \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id']", "as client: response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data']['nodes'][0]['attributes'], attributes)", "list filter combinations ####################### def test_computers_filter_mixed1(self): \"\"\" Add filter for", "then it would return the error message. \"\"\" expected_error =", "it comes as a UUID object) computers = [_['comp'] for", "'OK'}) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid) ############### calculation node attributes", "calculations \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] self.process_test( 'nodes', f\"/nodes/{str(node_uuid)}/links/incoming?node_type=\\\"data.dict.Dict.\\\"\", expected_list_ids=[3], uuid=node_uuid,", "node namespace \"\"\" url = f'{self.get_url_prefix()}/nodes/full_types' with self.app.test_client() as client:", "self.app.test_client() as client: response_value = client.get(url) response = json.loads(response_value.data) for", "['id', 'uuid', 'user_id', 'node_type'] calculations = orm.QueryBuilder().append(orm.CalculationNode, tag='calc', project=calculation_projections).order_by({ 'calc':", "\"\"\" cell = [[2., 0., 0.], [0., 2., 0.], [0.,", "test_computers_filter_mixed2(self): \"\"\" Add filter for the id, hostname and transport_type", "Get the list of given calculation attributes filtered \"\"\" attributes", "response = json.loads(response_value.data) self.assertEqual(response['data']['nodes'][0]['attributes'], attributes) ############### calculation node extras_filter #############", "url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/output_files' with self.app.test_client() as client: response_value = client.get(url)", "'test2.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'torque', }, { 'label': 'test3', 'hostname':", "'metadata': { 'content': 'test' }, } Log(**log_record) aiida_out = 'The", "test_calculation_attributes(self): \"\"\" Get list of calculation attributes \"\"\" attributes =", "def test_calculation_retrieved_inputs(self): \"\"\" Get the list of given calculation retrieved_inputs", "{ 'label': 'test2', 'hostname': 'test2.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'torque', },", "'3]' RESTApiTestCase.process_test( self, 'computers', '/computers/page/4?perpage=2&orderby=+id', expected_errormsg=expected_error ) ############### list filters", "from flask_cors.core import ACL_ORIGIN from aiida import orm from aiida.backends.testbase", "response['data']['nodes']: self.assertEqual(list(node['attributes'].keys()), expected_attribute) ############### node extras_filter with pagination ############# def", "order available_properties = response['data']['fields'].keys() for prop in response['data']['ordering']: self.assertIn(prop, available_properties)", "############### Structure visualization and download ############# def test_structure_derived_properties(self): \"\"\" Get", "self.assertEqual(response['data']['nodes'][0]['attributes']['cell'], cell) ############### node attributes_filter with pagination ############# def test_node_attributes_filter_pagination(self):", "'FILE', 'name': 'aiida.in'}]) url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/contents?filename=\\\"calcjob_inputs/aiida.in\\\"\" with self.app.test_client() as client:", "self, 'computers', '/computers/page/4?perpage=2&orderby=+id', expected_errormsg=expected_error ) ############### list filters ######################## def", ":param entity_type: url requested for the type of the node", "Returns the computers list ordered by \"id\" in descending order", "= orm.Dict(dict={'c': 3, 'd': 4}) parameter2.store() kpoint = orm.KpointsData() kpoint.set_kpoints_mesh([4,", "filter ############# def test_structure_attributes_filter(self): \"\"\" Get the list of given", "rows for requested page \"\"\" RESTApiTestCase.process_test( self, 'computers', '/computers/page/1?perpage=2&orderby=+id', expected_range=[None,", "############# def test_node_single_extras_filter(self): \"\"\" Check that when only one node", "download of structure file \"\"\" from aiida.orm import load_node node_uuid", "extra in expected_extras: self.assertIn(extra, node['extras']) ############### node get one extras_filter", "Returns the computers list first order by \"scheduler_type\" in ascending", "{ 'cifdata': orm.CifData, 'parameterdata': orm.Dict, 'structuredata': orm.StructureData, 'data': orm.Data, }", "\"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes?attributes_filter=\\\"attr1\\\"\" with self.app.test_client() as", "tempfile.NamedTemporaryFile(mode='w+') as handle: handle.write(aiida_out) handle.flush() handle.seek(0) retrieved_outputs.put_object_from_filelike(handle, 'calcjob_outputs/aiida.out', force=True) retrieved_outputs.store()", "no.of pages = total no. of computers in database /", "with limit and offset' RESTApiTestCase.process_test( self, 'computers', '/computers?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error )", "url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/links/tree?in_limit=1&out_limit=1' with self.app.test_client() as client: response_value = client.get(url)", "the total no. of pages then it would return the", "the limit, offset and perpage at same time, it would", "orderby combinations ####################### def test_computers_orderby_mixed1(self): \"\"\" Returns the computers list", "slurm test3 slurm test2 torque test1 test4 RESTApiTestCase.process_test(self, \"computers\", \"/computers?orderby=+scheduler_type,", "'test3.epfl.ch', 'transport_type': 'local', 'scheduler_type': 'slurm', }, { 'label': 'test4', 'hostname':", "client.get(url) headers = response.headers self.assertEqual(headers.get(ACL_ORIGIN), '*') ############### computers endpoint ########################", "is specified in extras_filter only this extra is returned as", "url, response, uuid=node_uuid) def test_contents_attributes_filter(self): \"\"\" Get list of calculation", "# Add the calcjob_outputs folder with the aiida.out file to", "= f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes?attributes_filter=\\\"attr1\\\"\" with self.app.test_client() as client: rv_obj = client.get(url) response", "def test_computers_list_page_perpage(self): \"\"\" no.of pages = total no. of computers", "########### def test_computers_unknown_param(self): \"\"\" url parameters: id, limit and offset", "test_computers_filter_pk(self): \"\"\" Add filter on the id of computer and", "Get the list of given calculation extras filtered \"\"\" extras", "the aiida.in file to the CalcJobNode repository with tempfile.NamedTemporaryFile(mode='w+') as", "extras['extra2']) ############### structure node attributes filter ############# def test_structure_attributes_filter(self): \"\"\"", "response, we pass some extra information/data along with the node", "that / returns list of endpoints \"\"\" with self.app.test_client() as", "rest api call to get list of available node namespace", "file \"\"\" from aiida.orm import load_node node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url", "2., 0.), (0., 0., 2.)) structure = orm.StructureData(cell=cell) structure.append_atom(position=(0., 0.,", "orm.Computer(**dummy_computer) computer.store() # Prepare typical REST responses cls.process_dummy_data() def get_dummy_data(self):", "filtered \"\"\" attributes = { 'attr1': 'OK', 'attr2': 'OK', 'resources':", "cif.store() parameter1 = orm.Dict(dict={'a': 1, 'b': 2}) parameter1.store() parameter2 =", "if expected_list_ids is None: expected_list_ids = [] if expected_range is", "Check that when only one node attribute is specified in", "f\"{self.get_url_prefix()}/nodes/?full_type=\\\"process.calculation.calcjob.CalcJobNode.|\\\"\" with self.app.test_client() as client: rv_obj = client.get(url) response =", ") def test_computers_list(self): \"\"\" Get the full list of computers", "with self.app.test_client() as client: data_base = json.loads(client.get(self.get_url_prefix() + '/').data)['data'] data_server", "'scheduler_type': 'pbspro', }, { 'label': 'test2', 'hostname': 'test2.epfl.ch', 'transport_type': 'ssh',", "for REST API.\"\"\" import tempfile from flask_cors.core import ACL_ORIGIN from", "# pylint: disable=too-many-arguments \"\"\" Check whether response matches expected values.", "node) self.assertEqual(len(node['attributes']), len(expected_attributes)) for attr in expected_attributes: self.assertIn(attr, node['attributes']) ###############", "expected ids from data :param expected_errormsg: expected error message in", "f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/output_files' with self.app.test_client() as client: response_value = client.get(url) response =", "for _ in data] for datum in data: if datum['uuid']", "'message': 'This is a template record message', 'metadata': { 'content':", "def test_computers_orderby_mixed2(self): \"\"\" Returns the computers list first order by", "response = json.loads(rv_obj.data) self.assertNotIn('message', response) self.assertEqual( response['data']['derived_properties']['dimensionality'], { 'dim': 3,", "}).dict() calculations = [_['calc'] for _ in calculations] for calc", "code. # # # # The code is hosted on", "computers endpoint ######################## def test_computers_details(self): \"\"\" Requests the details of", "self.app.test_client() as client: response_value = client.get(url) response = json.loads(response_value.data) expected_keys", "use the page, limit and offset at same time, it", "\"\"\" Test that / returns list of endpoints \"\"\" with", "self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?id>{str(node_pk)}&limit=2&offset=3&orderby=+id', expected_list_ids=[4] ) def test_computers_mixed2(self): \"\"\"", "the no. specified in offset \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?offset=2&orderby=+id', expected_range=[2,", "different requests/filters/orderings etc. \"\"\" super().setUpClass() api = configure_api(catch_internal_server=True) cls.app =", "= client.get(url) structure_data = load_node(node_uuid)._exportcontent('xsf')[0] # pylint: disable=protected-access self.assertEqual(rv_obj.data, structure_data)", "}] }).dict() data = [_['data'] for _ in data] for", "RESTApiTestCase.process_test(self, 'computers', f'/computers?pk={str(node_pk)}', expected_list_ids=[1]) def test_computers_filter_name(self): \"\"\" Add filter for", "f'http://localhost{url}') self.assertEqual(response['url_root'], 'http://localhost/') # node details and list with limit,", "test_computers_filter_mixed1(self): \"\"\" Add filter for the hostname and id of", "url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/input_files' with self.app.test_client() as client: response_value = client.get(url)", "of the node :param url: web url :param full_list: if", "# # The code is hosted on GitHub at https://github.com/aiidateam/aiida-core", "json.loads(response_value.data) self.assertNotEqual(len(response['data']['nodes']), 0) for node in response['data']['nodes']: self.assertEqual(list(node['extras'].keys()), expected_extra) ###############", "or repo file contents for given node \"\"\" from aiida.orm", "self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/links/tree?in_limit=1&out_limit=1' with self.app.test_client() as client: response_value =", "= client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data']['nodes'][0]['extras']['extra1'], extras['extra1']) self.assertEqual(response['data']['nodes'][0]['extras']['extra2'], extras['extra2']) ###############", "given calculation attributes filtered \"\"\" cell = [[2., 0., 0.],", "response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data']['nodes'][0]['extras']['extra1'], extras['extra1']) self.assertEqual(response['data']['nodes'][0]['extras']['extra2'], extras['extra2'])", "get_url_prefix(self): return self._url_prefix @classmethod def process_dummy_data(cls): # pylint: disable=fixme \"\"\"", "computers list ordered by \"scheduler_type\" in descending order \"\"\" node_pk", "perpage If we request the page which exceeds the total", "= self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers/page/2?id>{str(node_pk)}&perpage=2&orderby=+id', expected_list_ids=[3, 4] ) def", "returns list of endpoints \"\"\" with self.app.test_client() as client: data_base", "expected_errormsg=None, uuid=None, result_node_type=None, result_name=None ): # pylint: disable=too-many-arguments \"\"\" Check", "this extra is returned as a dictionary when pagination is", "client.get(url) cif = load_node(node_uuid)._prepare_cif()[0] # pylint: disable=protected-access self.assertEqual(rv_obj.data, cif) ###############", "\"\"\" Returns the computers list first order by \"scheduler_type\" in", "datum['uuid'] is not None: datum['uuid'] = str(datum['uuid']) cls._dummy_data[label] = data", "'mtime', 'id', 'node_label', 'node_type', 'uuid', 'description', 'incoming', 'outgoing' ] received_attr", "\"\"\" Returns the computers list ordered by \"+id\" in ascending", "} ) self.assertEqual(response['data']['derived_properties']['formula'], 'Ba') RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid) def", "get one attributes_filter with pagination ############# def test_node_single_attributes_filter(self): \"\"\" Check", "= f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/comments' with self.app.test_client() as client: rv_obj = client.get(url) response", "parameter2 = orm.Dict(dict={'c': 3, 'd': 4}) parameter2.store() kpoint = orm.KpointsData()", "json.loads(response_value.data) for key in ['data.structure.StructureData.|', 'data.cif.CifData.|']: self.assertIn(key, response['data'].keys()) for key", "from aiida.orm import Log log_record = { 'time': now(), 'loggername':", "in expected_extras: self.assertIn(extra, node['extras']) ############### node get one extras_filter with", "offset at same time, it would return the error message.", "def test_structure_attributes_filter(self): \"\"\" Get the list of given calculation attributes", "key in ['time', 'loggername', 'levelname', 'dbnode_id', 'message']: self.assertIn(key, expected_log_keys) def", "if expected_errormsg: self.assertEqual(response['message'], expected_errormsg) else: if full_list: expected_data = self._dummy_data[result_node_type]", "and offset' RESTApiTestCase.process_test( self, 'computers', '/computers?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error ) def test_computers_list_page_limit_offset(self):", "} }] }).dict() # Cast UUID into a string (e.g.", ") self.assertEqual(response['data']['derived_properties']['formula'], 'Ba') RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid) def test_structure_download(self):", "def test_projectable_properties(self): \"\"\" test projectable_properties endpoint \"\"\" for nodetype in", "client: response_value = client.get(url) response = json.loads(response_value.data) for key in", "super().setUpClass() api = configure_api(catch_internal_server=True) cls.app = api.app cls.app.config['TESTING'] = True", "is not None: comp['uuid'] = str(comp['uuid']) cls._dummy_data['computers'] = computers calculation_projections", "error message. \"\"\" expected_error = 'perpage key is incompatible with", "test_complist_pagelimitoffset_perpage(self): \"\"\" If we use the page, limit, offset and", "the computers list ordered by \"scheduler_type\" in descending order \"\"\"", "pbspro localhost pbspro test4 slurm test3 slurm test2 torque test1", "of computer and get the filtered computer list \"\"\" node_pk", "type in response data :param result_name: result name in response", "Returns the computers list ordered by \"scheduler_type\" in ascending order", "[start, stop] range of expected ids from data :param expected_errormsg:", "requests/filters/orderings etc. \"\"\" super().setUpClass() api = configure_api(catch_internal_server=True) cls.app = api.app", "in ['data.structure.StructureData.|', 'data.cif.CifData.|']: self.assertIn(key, response['data'].keys()) for key in ['cif', 'xsf',", "file to the CalcJobNode repository with tempfile.NamedTemporaryFile(mode='w+') as handle: handle.write(aiida_in)", "Web url :return: url path and url parameters \"\"\" parts", "by \"name\" \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=-scheduler_type,name',", "def test_node_extras_filter_pagination(self): \"\"\" Check that node extras specified in extras_filter", "url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true' with self.app.test_client() as client: response_value = client.get(url)", "self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=-name', expected_list_ids=[4, 3, 2, 1] )", "f'/nodes/{str(node_uuid)}/links/incoming?orderby=id', expected_list_ids=[5, 3], uuid=node_uuid, result_node_type='data', result_name='incoming' ) def test_calculation_input_filters(self): \"\"\"", "uuid=node_uuid) def test_contents_attributes_filter(self): \"\"\" Get list of calculation attributes with", "'time': now(), 'loggername': 'loggername', 'levelname': logging.getLevelName(LOG_LEVEL_REPORT), 'dbnode_id': calc.id, 'message': 'This", "handle.write(aiida_out) handle.flush() handle.seek(0) retrieved_outputs.put_object_from_filelike(handle, 'calcjob_outputs/aiida.out', force=True) retrieved_outputs.store() retrieved_outputs.add_incoming(calc, link_type=LinkType.CREATE, link_label='retrieved')", "attributes \"\"\" # TODO: Storing the different nodes as lists", "\"\"\" path, query_string = self.split_path(url) self.assertEqual(response['method'], 'GET') self.assertEqual(response['resource_type'], node_type) self.assertEqual(response['path'],", "of given calculation extras filtered \"\"\" extras = {'extra1': False,", "{} _PERPAGE_DEFAULT = 20 _LIMIT_DEFAULT = 400 @classmethod def setUpClass(cls,", "url_root, etc. :param node_type: url requested fot the type of", "empty_list: expected_data = [] elif expected_list_ids: expected_data = [self._dummy_data[result_node_type][i] for", "'outgoing' ] received_attr = response['data']['nodes'][0].keys() for attr in expected_attr: self.assertIn(attr,", "len(expected_extras)) for extra in expected_extras: self.assertIn(extra, node['extras']) ############### node get", "setUpClass(cls, *args, **kwargs): # pylint: disable=too-many-locals, too-many-statements \"\"\" Add objects", "url path and it's parameters :param url: Web url :return:", "result_node_type: node type in response data :param result_name: result name", "calc.set_extra('extra2', 'extra_info') calc.add_incoming(structure, link_type=LinkType.INPUT_CALC, link_label='link_structure') calc.add_incoming(parameter1, link_type=LinkType.INPUT_CALC, link_label='link_parameter') aiida_in =", "database for different requests/filters/orderings etc. \"\"\" super().setUpClass() api = configure_api(catch_internal_server=True)", "InputValidationError('Pass the expected range of the dummydata') expected_node_uuids = [node['uuid']", "Test download of cif file \"\"\" from aiida.orm import load_node", "\"\"\" Define unittests for rest api \"\"\" ############### generic endpoints", "'computers', '/computers?limit=2&offset=2&orderby=+id', expected_range=[2, 4] ) def test_computers_list_limit_only(self): \"\"\" Get the", "input_file = load_node(node_uuid).get_object_content('calcjob_inputs/aiida.in', mode='rb') self.assertEqual(response_obj.data, input_file) def test_process_report(self): \"\"\" Test", "\"\"\" ############### calculation retrieved_inputs and retrieved_outputs ############# def test_calculation_retrieved_inputs(self): \"\"\"", "node in response['data']['nodes']: self.assertIn('attributes', node) self.assertNotIn('attributes.resources', node) self.assertNotIn('attributes.cell', node) self.assertEqual(len(node['attributes']),", "order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=-name', expected_list_ids=[4,", "by \"scheduler_type\" in ascending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test(", "computer and get the filtered computer list (e.g. id >", "when pagination is set \"\"\" expected_extra = ['extra2'] url =", "visualization and download ############# def test_structure_derived_properties(self): \"\"\" Get the list", "expected_errormsg: self.assertEqual(response['message'], expected_errormsg) else: if full_list: expected_data = self._dummy_data[result_node_type] elif", "attributes_filter with pagination ############# def test_node_attributes_filter_pagination(self): \"\"\" Check that node", "client: response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data'], [{'name': 'calcjob_inputs',", "node results. e.g. url method, node_type, path, pk, query_string, url,", "from aiida.orm import load_node node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=xsf'", "the page, limit and offset at same time, it would", "get the filtered computer list \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?name=\"test1\"', expected_list_ids=[1])", "RESTApiTestCase.process_test(self, 'computers', '/computers?hostname=\"test1.epfl.ch\"', expected_list_ids=[1]) def test_computers_filter_transport_type(self): \"\"\" Add filter for", "Get the list of nodes filtered by full_type \"\"\" expected_node_uuids", "response) self.assertEqual( response['data']['derived_properties']['dimensionality'], { 'dim': 3, 'value': 8.0, 'label': 'volume'", "ids from data :param expected_range: [start, stop] range of expected", "only this attribute is returned as a dictionary when pagination", "hostname of computer and get the filtered computer list \"\"\"", "json.loads(rv_obj.data) self.assertNotIn('message', response) self.assertEqual( response['data']['derived_properties']['dimensionality'], { 'dim': 3, 'value': 8.0,", "the list of given calculation retrieved_inputs \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid']", "self.assertIn(attr, node['attributes']) ############### node get one attributes_filter with pagination #############", "filter combinations ####################### def test_computers_filter_mixed1(self): \"\"\" Add filter for the", "RESTApiTestCase(AiidaTestCase): \"\"\" Setup of the tests for the AiiDA RESTful-api", "dkay in expected_data_keys: self.assertIn(dkay, response_keys) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response) def", "computers = orm.QueryBuilder().append(orm.Computer, tag='comp', project=computer_projections).order_by({ 'comp': [{ 'id': { 'order':", "self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=+name', expected_list_ids=[1, 2, 3, 4] )", "import load_node node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=xsf' with self.app.test_client()", "in ['time', 'loggername', 'levelname', 'dbnode_id', 'message']: self.assertIn(key, expected_log_keys) def test_download_formats(self):", "response :param uuid: url requested for the node pk :param", "list of give calculation incoming \"\"\" node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url", "node_type, path, pk, query_string, url, url_root, etc. :param node_type: url", "slurm test3 slurm test2 torque test1 pbspro localhost pbspro ==========", "####################### def test_computers_mixed1(self): \"\"\" url parameters: id, limit and offset", "'computers', '/computers/page/4?perpage=2&orderby=+id', expected_errormsg=expected_error ) ############### list filters ######################## def test_computers_filter_id1(self):", "test_computers_list(self): \"\"\" Get the full list of computers from database", "available_properties = response['data']['fields'].keys() for prop in response['data']['ordering']: self.assertIn(prop, available_properties) def", "order \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=-id', expected_list_ids=[4, 3, 2, 1, 0])", "the url with \"?\" to get url path and it's", "for comp in computers: if comp['uuid'] is not None: comp['uuid']", "self.app.test_client() as client: response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data']['nodes'][0]['extras']['extra1'],", "= client.get(url) response = json.loads(response_value.data) self.assertNotEqual(len(response['data']['nodes']), 0) for node in", "\"\"\" Setup of the tests for the AiiDA RESTful-api \"\"\"", "if url is requested to get full list :param empty_list:", "self.assertEqual( response['data']['derived_properties']['dimensionality'], { 'dim': 3, 'value': 8.0, 'label': 'volume' }", "force=True) calc.store() # create log message for calcjob import logging", "self.assertEqual(response['query_string'], query_string) self.assertEqual(response['url'], f'http://localhost{url}') self.assertEqual(response['url_root'], 'http://localhost/') # node details and", "parameter2.store() kpoint = orm.KpointsData() kpoint.set_kpoints_mesh([4, 4, 4]) kpoint.store() resources =", "'structuredata': orm.StructureData, 'data': orm.Data, } for label, dataclass in data_types.items():", "= str(datum['uuid']) cls._dummy_data[label] = data def split_path(self, url): # pylint:", "LinkType from aiida.restapi.run_api import configure_api class RESTApiTestCase(AiidaTestCase): \"\"\" Setup of", "handle.write(aiida_in) handle.flush() handle.seek(0) calc.put_object_from_filelike(handle, 'calcjob_inputs/aiida.in', force=True) calc.store() # create log", "expected_list_ids=[1], uuid=node_uuid ) def test_computers_list(self): \"\"\" Get the full list", "expected_list_ids=[1, 2, 3, 4] ) def test_computers_orderby_name_asc_sign(self): \"\"\" Returns the", "of calculation attributes with filter attributes_filter \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid']", "computers: if comp['uuid'] is not None: comp['uuid'] = str(comp['uuid']) cls._dummy_data['computers']", "url :return: url path and url parameters \"\"\" parts =", "= self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=name', expected_list_ids=[1, 2, 3, 4]", "self.app.test_client() as client: rv_response = client.get(url) response = json.loads(rv_response.data) if", "= json.loads(response_value.data) self.assertNotEqual(len(response['data']['nodes']), 0) for node in response['data']['nodes']: self.assertIn('extras', node)", "available_keys = pinfo.keys() for prop in expected_keys: self.assertIn(prop, available_keys) #", "\"\"\" expected_node_uuids = [] for calc in self.get_dummy_data()['calculations']: if calc['node_type']", "and list with limit, offset, page, perpage def process_test( self,", "'*') ############### computers endpoint ######################## def test_computers_details(self): \"\"\" Requests the", "False, 'extra2': 'extra_info'} node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?extras=true&extras_filter=extra1,extra2' with", "structure.add_comment('This is test comment.') structure.add_comment('Add another comment.') cif = orm.CifData(ase=structure.get_ase())", "0., 0.), symbols=['Ba']) structure.store() structure.add_comment('This is test comment.') structure.add_comment('Add another", "= self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/input_files' with self.app.test_client() as client: response_value", "database. no.of pages = total no. of computers in database", "data_types = { 'cifdata': orm.CifData, 'parameterdata': orm.Dict, 'structuredata': orm.StructureData, 'data':", "no. of computers in database / perpage Using this formula", "set \"\"\" expected_extras = ['extra1', 'extra2'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra1,extra2' with", "Add filter for the id, hostname and transport_type of the", "expected_list_ids is None: expected_list_ids = [] if expected_range is None:", "part of the AiiDA code. # # # # The", "list of calculation attributes \"\"\" attributes = { 'attr1': 'OK',", "the node :param url: web url :param response: url response", "# pylint: disable=protected-access self.assertEqual(rv_obj.data, cif) ############### projectable_properties ############# def test_projectable_properties(self):", "that REST API sets cross-origin resource sharing headers \"\"\" url", "incompatible with limit and offset' RESTApiTestCase.process_test( self, 'computers', '/computers/page/2?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error", "calc.put_object_from_filelike(handle, 'calcjob_inputs/aiida.in', force=True) calc.store() # create log message for calcjob", "['resources'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources' with self.app.test_client() as client: response_value =", "path, pk, query_string, url, url_root, etc. :param node_type: url requested", "_ in data] for datum in data: if datum['uuid'] is", "test_calculation_retrieved_inputs(self): \"\"\" Get the list of given calculation retrieved_inputs \"\"\"", "from aiida.restapi.run_api import configure_api class RESTApiTestCase(AiidaTestCase): \"\"\" Setup of the", "= 20 _LIMIT_DEFAULT = 400 @classmethod def setUpClass(cls, *args, **kwargs):", ":return: url path and url parameters \"\"\" parts = url.split('?')", "pagination ############# def test_node_single_attributes_filter(self): \"\"\" Check that when only one", "the type of the node :param url: web url :param", "client: response = client.get(url) headers = response.headers self.assertEqual(headers.get(ACL_ORIGIN), '*') ###############", "computers in database / perpage \"/page\" acts as \"/page/1?perpage=default_value\" \"\"\"", "use the page, limit, offset and perpage at same time,", "by \"name\" in ascending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test(", "lists and accessing them # by their list index is", "we request the page which exceeds the total no. of", "else: from aiida.common.exceptions import InputValidationError raise InputValidationError('Pass the expected range", "data :param expected_range: [start, stop] range of expected ids from", "repo list or repo file contents for given node \"\"\"", "= [{ 'label': 'test1', 'hostname': 'test1.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'pbspro',", "(e.g. id=1) \"\"\" node_pk = self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test(self, 'computers', f'/computers?id={str(node_pk)}', expected_list_ids=[1])", "not None: calc['uuid'] = str(calc['uuid']) cls._dummy_data['calculations'] = calculations data_projections =", "results. e.g. url method, node_type, path, pk, query_string, url, url_root,", "expected_data = self._dummy_data[result_node_type][expected_range[0]:expected_range[1]] else: from aiida.common.exceptions import InputValidationError raise InputValidationError('Pass", "calc1.store() dummy_computers = [{ 'label': 'test1', 'hostname': 'test1.epfl.ch', 'transport_type': 'ssh',", "test_computers_mixed1(self): \"\"\" url parameters: id, limit and offset \"\"\" node_pk", "\"\"\" parts = url.split('?') path = '' query_string = ''", "parameter. It should return the no of rows specified in", "message', 'metadata': { 'content': 'test' }, } Log(**log_record) aiida_out =", "node get one attributes_filter with pagination ############# def test_node_single_attributes_filter(self): \"\"\"", "1, 2, 4] ) def test_computers_orderby_mixed2(self): \"\"\" Returns the computers", ") ############### list all parameter combinations ####################### def test_computers_mixed1(self): \"\"\"", "with self.app.test_client() as client: response_value = client.get(url) response = json.loads(response_value.data)", "expected_list_ids=[3] ) ############### list orderby ######################## def test_computers_orderby_id_asc(self): \"\"\" Returns", "'computers', 'users', 'groups']: url = f'{self.get_url_prefix()}/{nodetype}/projectable_properties' with self.app.test_client() as client:", "node pk :param result_node_type: node type in response data :param", "for the hostname of computer and get the filtered computer", "self.assertIn(prop, available_keys) # check order available_properties = response['data']['fields'].keys() for prop", "response_keys) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response) def test_comments(self): \"\"\" Get the", "it would return the error message. \"\"\" expected_error = 'requesting", "import json from aiida.common.links import LinkType from aiida.restapi.run_api import configure_api", "available_keys) # check order available_properties = response['data']['fields'].keys() for prop in", "'computers', '/computers?name=\"test1\"', expected_list_ids=[1]) def test_computers_filter_hostname(self): \"\"\" Add filter for the", "pagination ############# def test_node_attributes_filter_pagination(self): \"\"\" Check that node attributes specified", "result_name is None: result_node_type = entity_type result_name = entity_type url", "stop] range of expected ids from data :param expected_errormsg: expected", "\"\"\" Returns the computers list ordered by \"id\" in descending", "\"\"\" extras = {'extra1': False, 'extra2': 'extra_info'} node_uuid = self.get_dummy_data()['calculations'][1]['uuid']", "first order by \"transport_type\" in ascending order and if it", "nodes as lists and accessing them # by their list", "None: calc['uuid'] = str(calc['uuid']) cls._dummy_data['calculations'] = calculations data_projections = ['id',", "*args, **kwargs): # pylint: disable=too-many-locals, too-many-statements \"\"\" Add objects to", "'loggername', 'levelname': logging.getLevelName(LOG_LEVEL_REPORT), 'dbnode_id': calc.id, 'message': 'This is a template", "Define unittests for rest api \"\"\" ############### generic endpoints ########################", "'Add another comment.'])) def test_repo(self): \"\"\" Test to get repo", "= f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/output_files' with self.app.test_client() as client: response_value = client.get(url) response", "orm.Data, } for label, dataclass in data_types.items(): data = orm.QueryBuilder().append(dataclass,", "2] ) ########## pass unknown url parameter ########### def test_computers_unknown_param(self):", "a pain to debug. # Please change this! computer_projections =", "4, 1] ) ############### list orderby combinations ####################### def test_computers_orderby_mixed1(self):", "we use the page, limit and offset at same time,", "ascending order and if it is having same scheduler_type, order", "logging.getLevelName(LOG_LEVEL_REPORT), 'dbnode_id': calc.id, 'message': 'This is a template record message',", "calc.add_incoming(parameter1, link_type=LinkType.INPUT_CALC, link_label='link_parameter') aiida_in = 'The input file\\nof the CalcJob", "\"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?name=\"test1\"', expected_list_ids=[1]) def test_computers_filter_hostname(self): \"\"\" Add filter", "for the transport_type of computer and get the filtered computer", "\"\"\" url = f'{self.get_url_prefix()}/nodes/full_types' with self.app.test_client() as client: rv_obj =", "test_process_report(self): \"\"\" Test process report \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url", "client.get(url) response = json.loads(rv_obj.data) self.assertNotIn('message', response) self.assertEqual(response['data']['attributes'], {'attr1': 'OK'}) RESTApiTestCase.compare_extra_response_data(self,", "see the LICENSE.txt file # # For further information please", "from database starting from the no. specified in offset \"\"\"", "https://github.com/aiidateam/aiida-core # # For further information on the license, see", "pylint: disable=no-self-use \"\"\" Split the url with \"?\" to get", "filter for the transport_type of computer and get the filtered", "limit, offset, page, perpage def process_test( self, entity_type, url, full_list=False,", "entity_type, url, full_list=False, empty_list=False, expected_list_ids=None, expected_range=None, expected_errormsg=None, uuid=None, result_node_type=None, result_name=None", "node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] self.process_test( 'nodes', f\"/nodes/{str(node_uuid)}/links/incoming?node_type=\\\"data.dict.Dict.\\\"\", expected_list_ids=[3], uuid=node_uuid, result_node_type='data', result_name='incoming'", "[] if expected_range is None: expected_range = [] if result_node_type", "{ 'time': now(), 'loggername': 'loggername', 'levelname': logging.getLevelName(LOG_LEVEL_REPORT), 'dbnode_id': calc.id, 'message':", "############### node get one attributes_filter with pagination ############# def test_node_single_attributes_filter(self):", "aiida.common import json from aiida.common.links import LinkType from aiida.restapi.run_api import", "response_keys = response['data'].keys() for dkay in expected_data_keys: self.assertIn(dkay, response_keys) RESTApiTestCase.compare_extra_response_data(self,", "we pass some extra information/data along with the node results.", "0.], [0., 2., 0.], [0., 0., 2.]] node_uuid = self.get_dummy_data()['structuredata'][0]['uuid']", "[] for comment in response: all_comments.append(comment['message']) self.assertEqual(sorted(all_comments), sorted(['This is test", "no. specified in offset \"\"\" RESTApiTestCase.process_test( self, 'computers', '/computers?limit=2&offset=2&orderby=+id', expected_range=[2,", "'subspaces', 'label', 'full_type'] response_keys = response['data'].keys() for dkay in expected_data_keys:", "}, } Log(**log_record) aiida_out = 'The output file\\nof the CalcJob", "\"\"\" Test that /server endpoint returns AiiDA version \"\"\" url", "calcfunc.store() calc = orm.CalcJobNode(computer=cls.computer) calc.set_option('resources', resources) calc.set_attribute('attr1', 'OK') calc.set_attribute('attr2', 'OK')", "= client.get(url) response = json.loads(rv_obj.data) self.assertNotIn('message', response) self.assertEqual(response['data']['attributes'], attributes) RESTApiTestCase.compare_extra_response_data(self,", "structure.append_atom(position=(0., 0., 0.), symbols=['Ba']) structure.store() structure.add_comment('This is test comment.') structure.add_comment('Add", "as client: response = client.get(url) headers = response.headers self.assertEqual(headers.get(ACL_ORIGIN), '*')", "= f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/derived_properties' with self.app.test_client() as client: rv_obj = client.get(url) response", "{ 'order': 'asc' } }] }).dict() # Cast UUID into", "computers in database / perpage If we request the page", "self.assertEqual(response['url'], f'http://localhost{url}') self.assertEqual(response['url_root'], 'http://localhost/') # node details and list with", "i in expected_list_ids] elif expected_range != []: expected_data = self._dummy_data[result_node_type][expected_range[0]:expected_range[1]]", "the different nodes as lists and accessing them # by", "= json.loads(rv_obj.data) self.assertNotIn('message', response) expected_keys = ['display_name', 'help_text', 'is_display', 'is_foreign_key',", "# This file is part of the AiiDA code. #", "typical REST responses cls.process_dummy_data() def get_dummy_data(self): return self._dummy_data def get_url_prefix(self):", "filter attributes_filter \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes?attributes_filter=\\\"attr1\\\"\" with", "Check that node attributes specified in attributes_filter are returned as", "InputValidationError raise InputValidationError('Pass the expected range of the dummydata') expected_node_uuids", "cell) ############### node attributes_filter with pagination ############# def test_node_attributes_filter_pagination(self): \"\"\"", "we use the page, limit, offset and perpage at same", "in response['data']['ordering']: self.assertIn(prop, available_properties) def test_node_namespace(self): \"\"\" Test the rest", "format endpoint \"\"\" url = f'{self.get_url_prefix()}/nodes/download_formats' with self.app.test_client() as client:", "node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/input_files' with self.app.test_client() as client:", "self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true' with self.app.test_client() as client: response_value =", "client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data']['nodes'][0]['attributes'], attributes) ############### calculation node extras_filter", "{ 'label': 'test4', 'hostname': 'test4.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'slurm', }]", "Returns the computers list ordered by \"name\" in descending order", "\"scheduler_type\" in ascending order and if it is having same", "'computers', f\"/computers?id>={str(node_pk)}&transport_type=\\\"ssh\\\"&orderby=-id&limit=2\", expected_list_ids=[4, 2] ) ########## pass unknown url parameter", "path) self.assertEqual(response['id'], uuid) self.assertEqual(response['query_string'], query_string) self.assertEqual(response['url'], f'http://localhost{url}') self.assertEqual(response['url_root'], 'http://localhost/') #", "list of computers from database \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True)", "############### generic endpoints ######################## def test_server(self): \"\"\" Test that /server", "the details of single computer \"\"\" node_uuid = self.get_dummy_data()['computers'][1]['uuid'] RESTApiTestCase.process_test(", "node extra is specified in extras_filter only this extra is", "structure.store() structure.add_comment('This is test comment.') structure.add_comment('Add another comment.') cif =", "test inputs cell = ((2., 0., 0.), (0., 2., 0.),", "self.assertEqual(expected_node_uuids, result_node_uuids) self.compare_extra_response_data(entity_type, url, response, uuid) class RESTApiTestSuite(RESTApiTestCase): # pylint:", "\"\"\" test for download format endpoint \"\"\" url = f'{self.get_url_prefix()}/nodes/download_formats'", "calcjob import logging from aiida.common.log import LOG_LEVEL_REPORT from aiida.common.timezone import", "RESTApiTestCase.process_test( self, 'computers', f'/computers/{str(node_uuid)}', expected_list_ids=[1], uuid=node_uuid ) def test_computers_list(self): \"\"\"", "for i in expected_list_ids] elif expected_range != []: expected_data =", "'type': 'DIRECTORY'}]) ############### calculation incoming ############# def test_calculation_inputs(self): \"\"\" Get", "transport_type of computer and get the filtered computer list \"\"\"", "url = f'{self.get_url_prefix()}/nodes/download_formats' with self.app.test_client() as client: response_value = client.get(url)", "import Log log_record = { 'time': now(), 'loggername': 'loggername', 'levelname':", "slurm test2 torque test1 pbspro localhost pbspro ========== Expected:: test1", "cls.app.config['TESTING'] = True # create test inputs cell = ((2.,", "self.assertNotIn('attributes.resources', node) self.assertNotIn('attributes.cell', node) self.assertEqual(len(node['attributes']), len(expected_attributes)) for attr in expected_attributes:", "elif expected_range != []: expected_data = self._dummy_data[result_node_type][expected_range[0]:expected_range[1]] else: from aiida.common.exceptions", "list for given calculations \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url =", "in response data :param result_name: result name in response e.g.", "test for download format endpoint \"\"\" url = f'{self.get_url_prefix()}/nodes/download_formats' with", "'OK', 'attr2': 'OK', 'resources': { 'num_machines': 1, 'num_mpiprocs_per_machine': 1 },", "'/computers?limit=2&offset=2&orderby=+id', expected_range=[2, 4] ) def test_computers_list_limit_only(self): \"\"\" Get the list", "order Response:: test4 slurm test3 slurm test2 torque test1 pbspro", "extras_filter with pagination ############# def test_node_single_extras_filter(self): \"\"\" Check that when", "in expected_attributes: self.assertIn(attr, node['attributes']) ############### node get one attributes_filter with", "'/').data)['data'] data_server = json.loads(client.get(self.get_url_prefix() + '/server/endpoints').data)['data'] self.assertTrue(len(data_base['available_endpoints']) > 0) self.assertDictEqual(data_base,", "url parameter ########### def test_computers_unknown_param(self): \"\"\" url parameters: id, limit", "= computers calculation_projections = ['id', 'uuid', 'user_id', 'node_type'] calculations =", "no. of computers in database / perpage \"/page\" acts as", "} node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true' with self.app.test_client() as", "and get the filtered computer list (e.g. id > 2)", "no. of rows defined as default perpage option from database.", "'/computers/page?orderby=+id', full_list=True) def test_computers_list_page_perpage(self): \"\"\" no.of pages = total no.", "'b': 2}) parameter1.store() parameter2 = orm.Dict(dict={'c': 3, 'd': 4}) parameter2.store()", "expected_data = [self._dummy_data[result_node_type][i] for i in expected_list_ids] elif expected_range !=", "extras_filter ############# def test_calculation_extras_filter(self): \"\"\" Get the list of given", "\"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>{str(node_pk)}&hostname=\\\"test3.epfl.ch\\\"&transport_type=\\\"ssh\\\"\", empty_list=True )", "load_node(node_uuid)._exportcontent('xsf')[0] # pylint: disable=protected-access self.assertEqual(rv_obj.data, structure_data) def test_cif(self): \"\"\" Test", "attributes filtered \"\"\" attributes = { 'attr1': 'OK', 'attr2': 'OK',", "\"\"\" from aiida.orm import load_node node_uuid = self.get_dummy_data()['cifdata'][0]['uuid'] url =", "headers = response.headers self.assertEqual(headers.get(ACL_ORIGIN), '*') ############### computers endpoint ######################## def", ":param expected_errormsg: expected error message in response :param uuid: url", "'asc' } }] }).dict() # Cast UUID into a string", "filtered \"\"\" extras = {'extra1': False, 'extra2': 'extra_info'} node_uuid =", "project=data_projections).order_by({ 'data': [{ 'id': { 'order': 'desc' } }] }).dict()", "list of calculation attributes with filter attributes_filter \"\"\" node_uuid =", "= self._url_prefix + url with self.app.test_client() as client: rv_response =", "4] ) def test_computers_orderby_name_asc_sign(self): \"\"\" Returns the computers list ordered", "response = json.loads(response_value.data) self.assertNotEqual(len(response['data']['nodes']), 0) for node in response['data']['nodes']: self.assertEqual(list(node['attributes'].keys()),", "extras_filter are returned as a dictionary when pagination is set", "comes as a UUID object) computers = [_['comp'] for _", "total no. of computers in database / perpage \"/page\" acts", "def test_download_formats(self): \"\"\" test for download format endpoint \"\"\" url", "error message. \"\"\" expected_error = 'Non existent page requested. The", "def test_computers_unknown_param(self): \"\"\" url parameters: id, limit and offset from", "= f'{self.get_url_prefix()}/server' from aiida import __version__ with self.app.test_client() as client:", "= client.get(url) response = json.loads(rv_obj.data) self.assertNotIn('message', response) self.assertEqual(response['data']['attributes'], {'attr1': 'OK'})", "client: response = client.get(url) data = json.loads(response.data)['data'] self.assertEqual(__version__, data['AiiDA_version']) self.assertEqual(self.get_url_prefix(),", "list of given calculation retrieved_inputs \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url", "query_string = '' if parts: path = parts[0] if len(parts)", "\"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=transport_type,id', expected_list_ids=[3, 1,", "transport_type of the computer and get the filtered computer list", "'cell'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources,cell' with self.app.test_client() as client: response_value =", "self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=transport_type,id', expected_list_ids=[3, 1, 2, 4] ) def test_computers_orderby_mixed2(self):", "list ordered by \"+scheduler_type\" in ascending order \"\"\" node_pk =", "uuid=None): \"\"\" In url response, we pass some extra information/data", "len(parts) > 1: query_string = parts[1] return path, query_string def", "f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/comments' with self.app.test_client() as client: rv_obj = client.get(url) response =", "pbspro test4 slurm test3 slurm test2 torque test1 test4 RESTApiTestCase.process_test(self,", "'aiida.in'}]) url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/contents?filename=\\\"calcjob_inputs/aiida.in\\\"\" with self.app.test_client() as client: response_obj =", "node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>{str(node_pk)}&hostname=\\\"test1.epfl.ch\\\"\", expected_list_ids=[1] ) def", "test_download_formats(self): \"\"\" test for download format endpoint \"\"\" url =", "'computers', '/computers?limit=2&orderby=+id', expected_range=[None, 2]) def test_computers_list_offset_only(self): \"\"\" Get the list", "_ in computers] for comp in computers: if comp['uuid'] is", "\"\"\" Get the list of computers from database using limit", "json.loads(rv_obj.data) self.assertNotIn('message', response) self.assertEqual(response['data']['attributes'], attributes) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response, uuid=node_uuid)", "cls.process_dummy_data() def get_dummy_data(self): return self._dummy_data def get_url_prefix(self): return self._url_prefix @classmethod", "type of the node :param url: web url :param response:", "/ perpage \"/page\" acts as \"/page/1?perpage=default_value\" \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers/page?orderby=+id',", "get the filtered computer list (e.g. id > 2) \"\"\"", ") def test_computers_filter_pk(self): \"\"\" Add filter on the id of", "expected_list_ids=[4, 3, 2, 1] ) def test_computers_orderby_scheduler_type_asc(self): \"\"\" Returns the", "= pinfo.keys() for prop in expected_keys: self.assertIn(prop, available_keys) # check", "'test2', 'hostname': 'test2.epfl.ch', 'transport_type': 'ssh', 'scheduler_type': 'torque', }, { 'label':", "def test_computers_orderby_scheduler_type_asc(self): \"\"\" Returns the computers list ordered by \"scheduler_type\"", "# pylint: disable=too-many-lines \"\"\"Unittests for REST API.\"\"\" import tempfile from", "from the RESTapi and puts them into class attributes \"\"\"", "list ordered by \"id\" in descending order \"\"\" RESTApiTestCase.process_test(self, 'computers',", "Returns the computers list first order by \"transport_type\" in ascending", "node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=transport_type,id', expected_list_ids=[3, 1, 2,", "\"\"\" expected_extras = ['extra1', 'extra2'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra1,extra2' with self.app.test_client()", "= 'perpage key is incompatible with limit and offset' RESTApiTestCase.process_test(", "structure file \"\"\" from aiida.orm import load_node node_uuid = self.get_dummy_data()['structuredata'][0]['uuid']", "= self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=xsf' with self.app.test_client() as client: rv_obj", "\"\"\" no.of pages = total no. of computers in database", "import LinkType from aiida.restapi.run_api import configure_api class RESTApiTestCase(AiidaTestCase): \"\"\" Setup", "and offset parameter. It should return the no of rows", "given calculation retrieved_outputs \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/output_files'", "by \"scheduler_type\" in ascending order and if it is having", "0., 2.]] node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true&attributes_filter=cell' with self.app.test_client()", "node in response['data']['nodes']: self.assertEqual(list(node['extras'].keys()), expected_extra) ############### node full_type filter #############", "node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>{str(node_pk)}&hostname=\\\"test3.epfl.ch\\\"&transport_type=\\\"ssh\\\"\", empty_list=True ) ###############", "calculations] for calc in calculations: if calc['uuid'] is not None:", "= self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/links/tree?in_limit=1&out_limit=1' with self.app.test_client() as client: response_value", "_PERPAGE_DEFAULT = 20 _LIMIT_DEFAULT = 400 @classmethod def setUpClass(cls, *args,", "the node pk :param result_node_type: node type in response data", "self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=xsf' with self.app.test_client() as client: rv_obj =", "now from aiida.orm import Log log_record = { 'time': now(),", "attributes_filter only this attribute is returned as a dictionary when", "label, dataclass in data_types.items(): data = orm.QueryBuilder().append(dataclass, tag='data', project=data_projections).order_by({ 'data':", "result name in response e.g. incoming, outgoing \"\"\" if expected_list_ids", "for extra in expected_extras: self.assertIn(extra, node['extras']) ############### node get one", "returned as a dictionary when pagination is set \"\"\" expected_extras", "json from aiida.common.links import LinkType from aiida.restapi.run_api import configure_api class", "index is very fragile and a pain to debug. #", "Requests the details of single computer \"\"\" node_uuid = self.get_dummy_data()['computers'][1]['uuid']", "calc['node_type'] == 'process.calculation.calcjob.CalcJobNode.': expected_node_uuids.append(calc['uuid']) url = f\"{self.get_url_prefix()}/nodes/?full_type=\\\"process.calculation.calcjob.CalcJobNode.|\\\"\" with self.app.test_client() as", "set \"\"\" expected_extra = ['extra2'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra2' with self.app.test_client()", "client: response_value = client.get(url) response = json.loads(response_value.data) self.assertNotEqual(len(response['data']['nodes']), 0) for", "'order': 'asc' } }] }).dict() # Cast UUID into a", "4}) parameter2.store() kpoint = orm.KpointsData() kpoint.set_kpoints_mesh([4, 4, 4]) kpoint.store() resources", "\"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True) def test_computers_list_limit_offset(self): \"\"\" Get the", "self, 'computers', f\"/computers?pk>{str(node_pk)}&transport_type=\\\"ssh\\\"&orderby=-scheduler_type\", expected_list_ids=[2, 4, 1] ) ############### list orderby", "# -*- coding: utf-8 -*- ########################################################################### # Copyright (c), The", "project=calculation_projections).order_by({ 'calc': [{ 'id': { 'order': 'desc' } }] }).dict()", "0.), symbols=['Ba']) structure.store() structure.add_comment('This is test comment.') structure.add_comment('Add another comment.')", "= parts[0] if len(parts) > 1: query_string = parts[1] return", "Cast UUID into a string (e.g. in sqlalchemy it comes", "RESTApiTestCase.process_test(self, 'computers', f'/computers?id={str(node_pk)}', expected_list_ids=[1]) def test_computers_filter_id2(self): \"\"\" Add filter on", "hostname and transport_type of the computer and get the filtered", "pinfo.keys() for prop in expected_keys: self.assertIn(prop, available_keys) # check order", ":param empty_list: if the response list is empty :param expected_list_ids:", "self.app.test_client() as client: rv_obj = client.get(url) response = json.loads(rv_obj.data)['data']['comments'] all_comments", "= ['extra1', 'extra2'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra1,extra2' with self.app.test_client() as client:", "self.assertEqual(len(response['data']['nodes'][0]['outgoing']), 1) self.assertEqual(len(response['data']['metadata']), 1) expected_attr = [ 'ctime', 'mtime', 'id',", "given calculations \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/links/tree?in_limit=1&out_limit=1' with", "for key in ['data.structure.StructureData.|', 'data.cif.CifData.|']: self.assertIn(key, response['data'].keys()) for key in", "import AiidaTestCase from aiida.common import json from aiida.common.links import LinkType", "load_node node_uuid = self.get_dummy_data()['cifdata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=cif' with self.app.test_client() as", "(0., 2., 0.), (0., 0., 2.)) structure = orm.StructureData(cell=cell) structure.append_atom(position=(0.,", "test_calculation_inputs(self): \"\"\" Get the list of give calculation incoming \"\"\"", "of computer and get the filtered computer list (e.g. id", "node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/processes/{str(node_uuid)}/report' with self.app.test_client() as client:", "combinations ####################### def test_computers_orderby_mixed1(self): \"\"\" Returns the computers list first", "Add filter for the hostname and id of computer and", "json.loads(response_value.data) self.assertNotEqual(len(response['data']['nodes']), 0) for node in response['data']['nodes']: self.assertIn('attributes', node) self.assertNotIn('attributes.resources',", "def test_node_single_extras_filter(self): \"\"\" Check that when only one node extra", "\"\"\" Test that REST API sets cross-origin resource sharing headers", "the list of nodes filtered by full_type \"\"\" expected_node_uuids =", "'nodes', url, response, uuid=node_uuid) def test_contents_attributes_filter(self): \"\"\" Get list of", "of endpoints \"\"\" with self.app.test_client() as client: data_base = json.loads(client.get(self.get_url_prefix()", "RESTApiTestCase.process_test( self, 'computers', f\"/computers?pk>{str(node_pk)}&transport_type=\\\"ssh\\\"&orderby=-scheduler_type\", expected_list_ids=[2, 4, 1] ) ############### list", "calculation attributes filtered \"\"\" attributes = { 'attr1': 'OK', 'attr2':", "= [] if expected_range is None: expected_range = [] if", "of given calculation retrieved_outputs \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url =", "######################## def test_computers_orderby_id_asc(self): \"\"\" Returns the computers list ordered by", "\"scheduler_type\" in descending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self,", "by \"scheduler_type\" in descending order and if it is having", "computer list \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>{str(node_pk)}&hostname=\\\"test1.epfl.ch\\\"\",", "f'/computers/page/2?id>{str(node_pk)}&perpage=2&orderby=+id', expected_list_ids=[3, 4] ) def test_computers_mixed3(self): \"\"\" url parameters: id,", "pylint: disable=fixme \"\"\" This functions prepare atomic chunks of typical", "expected_range=[None, 2]) def test_computers_list_offset_only(self): \"\"\" Get the list of computers", "test_computers_list_page_limit_offset(self): \"\"\" If we use the page, limit and offset", "it by \"hostname\" descending order Response:: test4 slurm test3 slurm", "expected_attributes: self.assertIn(attr, node['attributes']) ############### node get one attributes_filter with pagination", "# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #", "elif expected_list_ids: expected_data = [self._dummy_data[result_node_type][i] for i in expected_list_ids] elif", "############### node extras_filter with pagination ############# def test_node_extras_filter_pagination(self): \"\"\" Check", "id of computer and get the filtered computer list \"\"\"", "from database using offset parameter It should return all the", "list (e.g. id=1) \"\"\" node_pk = self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test(self, 'computers', f'/computers?pk={str(node_pk)}',", "self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true&attributes_filter=cell' with self.app.test_client() as client: rv_obj =", "expected_node_uuids = [] for calc in self.get_dummy_data()['calculations']: if calc['node_type'] ==", "the node pk \"\"\" path, query_string = self.split_path(url) self.assertEqual(response['method'], 'GET')", "= entity_type url = self._url_prefix + url with self.app.test_client() as", "3], uuid=node_uuid, result_node_type='data', result_name='incoming' ) def test_calculation_input_filters(self): \"\"\" Get filtered", "response) def test_comments(self): \"\"\" Get the node comments \"\"\" node_uuid", "'/computers?limit=2&orderby=+id', expected_range=[None, 2]) def test_computers_list_offset_only(self): \"\"\" Get the list of", "node_pk = self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test(self, 'computers', f'/computers?pk={str(node_pk)}', expected_list_ids=[1]) def test_computers_filter_name(self): \"\"\"", "the computers list ordered by \"+name\" in ascending order \"\"\"", "expected_range=None, expected_errormsg=None, uuid=None, result_node_type=None, result_name=None ): # pylint: disable=too-many-arguments \"\"\"", "handle.seek(0) retrieved_outputs.put_object_from_filelike(handle, 'calcjob_outputs/aiida.out', force=True) retrieved_outputs.store() retrieved_outputs.add_incoming(calc, link_type=LinkType.CREATE, link_label='retrieved') kpoint.add_incoming(calc, link_type=LinkType.CREATE,", "'label', 'full_type'] response_keys = response['data'].keys() for dkay in expected_data_keys: self.assertIn(dkay,", "computers from database using limit parameter. It should return the", "= f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=cif' with self.app.test_client() as client: rv_obj = client.get(url) cif", "'computers', f\"/computers?transport_type=\\\"ssh\\\"&pk>{str(node_pk)}&orderby=scheduler_type\", expected_list_ids=[1, 4, 2] ) def test_comp_orderby_scheduler_ascsign(self): \"\"\" Returns", "= client.get(url) headers = response.headers self.assertEqual(headers.get(ACL_ORIGIN), '*') ############### computers endpoint", "self.assertNotIn('attributes.cell', node) self.assertEqual(len(node['attributes']), len(expected_attributes)) for attr in expected_attributes: self.assertIn(attr, node['attributes'])", "list \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>{str(node_pk)}&hostname=\\\"test3.epfl.ch\\\"&transport_type=\\\"ssh\\\"\", empty_list=True", "client.get(url) response = json.loads(response_value.data) self.assertEqual(len(response['data']['nodes']), 1) self.assertEqual(len(response['data']['nodes'][0]['incoming']), 1) self.assertEqual(len(response['data']['nodes'][0]['outgoing']), 1)", "aiida_in = 'The input file\\nof the CalcJob node' # Add", "uuid=node_uuid) ############### calculation attributes ############# def test_calculation_attributes(self): \"\"\" Get list", "############### structure node attributes filter ############# def test_structure_attributes_filter(self): \"\"\" Get", "expected_extras = ['extra1', 'extra2'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra1,extra2' with self.app.test_client() as", "RESTApiTestCase.process_test(self, 'computers', '/computers?name=\"test1\"', expected_list_ids=[1]) def test_computers_filter_hostname(self): \"\"\" Add filter for", "client: rv_obj = client.get(url) response = json.loads(rv_obj.data) self.assertEqual(response['data']['nodes'][0]['attributes']['cell'], cell) ###############", "starting from the no. specified in offset \"\"\" RESTApiTestCase.process_test( self,", "perpage at same time, it would return the error message.", "'computers', '/computers/page/1?perpage=2&orderby=+id', expected_range=[None, 2] ) def test_computers_list_page_perpage_exceed(self): \"\"\" no.of pages", "returns the no. of rows for requested page \"\"\" RESTApiTestCase.process_test(", "link_type=LinkType.CREATE, link_label='create') calc1 = orm.CalcJobNode(computer=cls.computer) calc1.set_option('resources', resources) calc1.store() dummy_computers =", "filter on the id of computer and get the filtered", "return the error message. \"\"\" expected_error = 'perpage key is", "when only one node extra is specified in extras_filter only", "\"\"\" Get the list of computers from database using offset", "test_computers_orderby_id_desc(self): \"\"\" Returns the computers list ordered by \"id\" in", "for dummy_computer in dummy_computers: computer = orm.Computer(**dummy_computer) computer.store() # Prepare", "test3 slurm test2 torque test1 pbspro localhost pbspro ========== Expected::", "'' query_string = '' if parts: path = parts[0] if", "test projectable_properties endpoint \"\"\" for nodetype in ['nodes', 'processes', 'computers',", "cif) ############### projectable_properties ############# def test_projectable_properties(self): \"\"\" test projectable_properties endpoint", "extras specified in extras_filter are returned as a dictionary when", "dictionary when pagination is set \"\"\" expected_extra = ['extra2'] url", "self, 'computers', '/computers?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error ) def test_computers_list_page_limit_offset(self): \"\"\" If we", "None: expected_range = [] if result_node_type is None and result_name", "############# def test_nodes_full_type_filter(self): \"\"\" Get the list of nodes filtered", "self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=name', expected_list_ids=[1, 2, 3, 4] )", "f'/computers?pk>{str(node_pk)}&orderby=-name', expected_list_ids=[4, 3, 2, 1] ) def test_computers_orderby_scheduler_type_asc(self): \"\"\" Returns", "disable=protected-access self.assertEqual(rv_obj.data, cif) ############### projectable_properties ############# def test_projectable_properties(self): \"\"\" test", "}] }).dict() calculations = [_['calc'] for _ in calculations] for", "= orm.CalcJobNode(computer=cls.computer) calc.set_option('resources', resources) calc.set_attribute('attr1', 'OK') calc.set_attribute('attr2', 'OK') calc.set_extra('extra1', False)", "time, it would return the error message. \"\"\" expected_error =", "3, 4, 1] ) def test_computers_orderby_mixed3(self): \"\"\" Returns the computers", "rows specified in limit from database. \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?limit=2&orderby=+id',", "for the node pk :param result_node_type: node type in response", "self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?id>{str(node_pk)}&orderby=+id', expected_range=[2, None] ) def test_computers_filter_pk(self):", "given calculation attributes filtered \"\"\" attributes = { 'attr1': 'OK',", "[node['uuid'] for node in expected_data] result_node_uuids = [node['uuid'] for node", "computers list ordered by \"name\" in descending order \"\"\" node_pk", "test_calculation_input_filters(self): \"\"\" Get filtered incoming list for given calculations \"\"\"", "self.assertNotIn('message', response) expected_keys = ['display_name', 'help_text', 'is_display', 'is_foreign_key', 'type'] #", "by their list index is very fragile and a pain", "and transport_type of the computer and get the filtered computer", "requested. The page range is [1 : ' \\ '3]'", "'OK') calc.set_attribute('attr2', 'OK') calc.set_extra('extra1', False) calc.set_extra('extra2', 'extra_info') calc.add_incoming(structure, link_type=LinkType.INPUT_CALC, link_label='link_structure')", "expected_list_ids=[1, 2, 3, 4] ) def test_computers_orderby_name_desc(self): \"\"\" Returns the", "received_attr = response['data']['nodes'][0].keys() for attr in expected_attr: self.assertIn(attr, received_attr) RESTApiTestCase.compare_extra_response_data(self,", "them # by their list index is very fragile and", "of expected ids from data :param expected_range: [start, stop] range", "the dummydata') expected_node_uuids = [node['uuid'] for node in expected_data] result_node_uuids", "set \"\"\" expected_attribute = ['resources'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources' with self.app.test_client()", "} Log(**log_record) aiida_out = 'The output file\\nof the CalcJob node'", "no. of rows for requested page \"\"\" RESTApiTestCase.process_test( self, 'computers',", ":param uuid: url requested for the node pk :param result_node_type:", "list first order by \"scheduler_type\" in descending order and if", "node pk \"\"\" path, query_string = self.split_path(url) self.assertEqual(response['method'], 'GET') self.assertEqual(response['resource_type'],", "# # # The code is hosted on GitHub at", "page \"\"\" RESTApiTestCase.process_test( self, 'computers', '/computers/page/1?perpage=2&orderby=+id', expected_range=[None, 2] ) def", "team. All rights reserved. # # This file is part", "resources = {'num_machines': 1, 'num_mpiprocs_per_machine': 1} calcfunc = orm.CalcFunctionNode(computer=cls.computer) calcfunc.store()", "'computers', f'/computers?id>{str(node_pk)}&orderby=+id', expected_range=[2, None] ) def test_computers_filter_pk(self): \"\"\" Add filter", "self.assertIn(key, response['data'].keys()) for key in ['cif', 'xsf', 'xyz']: self.assertIn(key, response['data']['data.structure.StructureData.|'])", "calc.add_incoming(structure, link_type=LinkType.INPUT_CALC, link_label='link_structure') calc.add_incoming(parameter1, link_type=LinkType.INPUT_CALC, link_label='link_parameter') aiida_in = 'The input", "process_test( self, entity_type, url, full_list=False, empty_list=False, expected_list_ids=None, expected_range=None, expected_errormsg=None, uuid=None,", "'extra_info'} node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?extras=true&extras_filter=extra1,extra2' with self.app.test_client() as", "response = json.loads(response_value.data) self.assertEqual(response['data']['nodes'][0]['extras']['extra1'], extras['extra1']) self.assertEqual(response['data']['nodes'][0]['extras']['extra2'], extras['extra2']) ############### structure node", "def test_computers_orderby_name_asc(self): \"\"\" Returns the computers list ordered by \"name\"", "'desc' } }] }).dict() calculations = [_['calc'] for _ in", "json.loads(rv_response.data) if expected_errormsg: self.assertEqual(response['message'], expected_errormsg) else: if full_list: expected_data =", "structure.add_comment('Add another comment.') cif = orm.CifData(ase=structure.get_ase()) cif.store() parameter1 = orm.Dict(dict={'a':", "= client.get(url) response = json.loads(rv_obj.data) self.assertEqual(response['data']['nodes'][0]['attributes']['cell'], cell) ############### node attributes_filter", "0., 0.), (0., 2., 0.), (0., 0., 2.)) structure =", "kpoint = orm.KpointsData() kpoint.set_kpoints_mesh([4, 4, 4]) kpoint.store() resources = {'num_machines':", "= {'num_machines': 1, 'num_mpiprocs_per_machine': 1} calcfunc = orm.CalcFunctionNode(computer=cls.computer) calcfunc.store() calc", "# check order available_properties = response['data']['fields'].keys() for prop in response['data']['ordering']:", "sorted(['This is test comment.', 'Add another comment.'])) def test_repo(self): \"\"\"", "client: response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data'], [{'name': 'calcjob_outputs',", "CalcJob node' retrieved_outputs = orm.FolderData() # Add the calcjob_outputs folder", "link_label='retrieved') kpoint.add_incoming(calc, link_type=LinkType.CREATE, link_label='create') calc1 = orm.CalcJobNode(computer=cls.computer) calc1.set_option('resources', resources) calc1.store()", "full_list=True) def test_computers_orderby_id_desc(self): \"\"\" Returns the computers list ordered by", "list ordered by \"name\" in ascending order \"\"\" node_pk =", "page, perpage \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers/page/2?id>{str(node_pk)}&perpage=2&orderby=+id',", "expected_attribute = ['resources'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources' with self.app.test_client() as client:", "LOG_LEVEL_REPORT from aiida.common.timezone import now from aiida.orm import Log log_record", "= entity_type result_name = entity_type url = self._url_prefix + url", "node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/output_files' with self.app.test_client() as client:", "response['data']['derived_properties']['dimensionality'], { 'dim': 3, 'value': 8.0, 'label': 'volume' } )", "the computers list ordered by \"name\" in descending order \"\"\"", "orm.QueryBuilder().append(orm.CalculationNode, tag='calc', project=calculation_projections).order_by({ 'calc': [{ 'id': { 'order': 'desc' }", "return the no of rows specified in limit from database.", "order \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True) def test_computers_orderby_id_desc(self): \"\"\" Returns", "the computers list ordered by \"scheduler_type\" in ascending order \"\"\"", "self.assertTrue(len(data_base['available_endpoints']) > 0) self.assertDictEqual(data_base, data_server) def test_cors_headers(self): \"\"\" Test that", "node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=xsf' with self.app.test_client() as client:", "'process.calculation.calcjob.CalcJobNode.': expected_node_uuids.append(calc['uuid']) url = f\"{self.get_url_prefix()}/nodes/?full_type=\\\"process.calculation.calcjob.CalcJobNode.|\\\"\" with self.app.test_client() as client: rv_obj", "url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/comments' with self.app.test_client() as client: rv_obj = client.get(url)", "# pylint: disable=fixme \"\"\" This functions prepare atomic chunks of", "'/computers?transport_type=\"local\"&name=\"test3\"&orderby=+id', expected_list_ids=[3] ) ############### list orderby ######################## def test_computers_orderby_id_asc(self): \"\"\"", "######################## def test_computers_filter_id1(self): \"\"\" Add filter on the id of", "in expected_data] result_node_uuids = [node['uuid'] for node in response['data'][result_name]] self.assertEqual(expected_node_uuids,", "self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?transport_type=\\\"ssh\\\"&pk>{str(node_pk)}&orderby=+scheduler_type\", expected_list_ids=[1, 4, 2] ) def", "with limit and offset' RESTApiTestCase.process_test( self, 'computers', '/computers/page/2?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error )", "specified in limit from database starting from the no. specified", "Add filter for the transport_type of computer and get the", "= json.loads(response_value.data) self.assertEqual(response['data'], [{'name': 'calcjob_outputs', 'type': 'DIRECTORY'}]) ############### calculation incoming", "in extras_filter are returned as a dictionary when pagination is", "exceeds the total no. of pages then it would return", "and if it is having same transport_type, order it by", "calc.set_attribute('attr2', 'OK') calc.set_extra('extra1', False) calc.set_extra('extra2', 'extra_info') calc.add_incoming(structure, link_type=LinkType.INPUT_CALC, link_label='link_structure') calc.add_incoming(parameter1,", "self.assertEqual(response['message'], expected_errormsg) else: if full_list: expected_data = self._dummy_data[result_node_type] elif empty_list:", "((2., 0., 0.), (0., 2., 0.), (0., 0., 2.)) structure", "expected_list_ids=[2, 4, 1] ) ############### list orderby combinations ####################### def", "\"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?pk>{str(node_pk)}&transport_type=\\\"ssh\\\"&orderby=-scheduler_type\", expected_list_ids=[2, 4,", "f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/contents?filename=\\\"calcjob_inputs/aiida.in\\\"\" with self.app.test_client() as client: response_obj = client.get(url) input_file =", "endpoint ######################## def test_computers_details(self): \"\"\" Requests the details of single", "perpage \"/page\" acts as \"/page/1?perpage=default_value\" \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers/page?orderby=+id', full_list=True)", "as handle: handle.write(aiida_out) handle.flush() handle.seek(0) retrieved_outputs.put_object_from_filelike(handle, 'calcjob_outputs/aiida.out', force=True) retrieved_outputs.store() retrieved_outputs.add_incoming(calc,", "3, 2, 1, 0]) def test_computers_orderby_name_asc(self): \"\"\" Returns the computers", "url requested for the node pk \"\"\" path, query_string =", "list of given calculation extras filtered \"\"\" extras = {'extra1':", "same scheduler_type, order it by \"hostname\" descending order Response:: test4", "message. \"\"\" expected_error = 'requesting a specific page is incompatible", "parts: path = parts[0] if len(parts) > 1: query_string =", "/ returns list of endpoints \"\"\" with self.app.test_client() as client:", "= orm.Computer(**dummy_computer) computer.store() # Prepare typical REST responses cls.process_dummy_data() def", "and if it is having same scheduler_type, order it by", "calculation incoming ############# def test_calculation_inputs(self): \"\"\" Get the list of", "node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=-name', expected_list_ids=[4, 3, 2,", "list first order by \"transport_type\" in ascending order and if", "coding: utf-8 -*- ########################################################################### # Copyright (c), The AiiDA team.", "= json.loads(response_value.data) self.assertNotEqual(len(response['data']['nodes']), 0) for node in response['data']['nodes']: self.assertIn('attributes', node)", "= [] elif expected_list_ids: expected_data = [self._dummy_data[result_node_type][i] for i in", "load_node node_uuid = self.get_dummy_data()['structuredata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=xsf' with self.app.test_client() as", "uuid=node_uuid ) def test_computers_list(self): \"\"\" Get the full list of", ") def test_computers_orderby_mixed2(self): \"\"\" Returns the computers list first order", "specified in limit from database. \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?limit=2&orderby=+id', expected_range=[None,", "def test_computers_filter_mixed1(self): \"\"\" Add filter for the hostname and id", "def test_computers_mixed1(self): \"\"\" url parameters: id, limit and offset \"\"\"", "by \"id\" \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=transport_type,id',", "incompatible with limit and offset' RESTApiTestCase.process_test( self, 'computers', '/computers?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error", "the computers list ordered by \"id\" in ascending order \"\"\"", "self.assertEqual(rv_obj.data, structure_data) def test_cif(self): \"\"\" Test download of cif file", "as client: response_value = client.get(url) response = json.loads(response_value.data) for key", "for calc in self.get_dummy_data()['calculations']: if calc['node_type'] == 'process.calculation.calcjob.CalcJobNode.': expected_node_uuids.append(calc['uuid']) url", "calculation retrieved_inputs \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/calcjobs/{str(node_uuid)}/input_files' with", "test_calculation_iotree(self): \"\"\" Get filtered incoming list for given calculations \"\"\"", "{'num_machines': 1, 'num_mpiprocs_per_machine': 1} calcfunc = orm.CalcFunctionNode(computer=cls.computer) calcfunc.store() calc =", "url :param full_list: if url is requested to get full", "in database / perpage \"/page\" acts as \"/page/1?perpage=default_value\" \"\"\" RESTApiTestCase.process_test(self,", "having same scheduler_type, order it by \"hostname\" descending order Response::", "to get url path and it's parameters :param url: Web", "for the id, hostname and transport_type of the computer and", "test_computers_list_offset_only(self): \"\"\" Get the list of computers from database using", "f'/computers?id={str(node_pk)}', expected_list_ids=[1]) def test_computers_filter_id2(self): \"\"\" Add filter on the id", "check fields for _, pinfo in response['data']['fields'].items(): available_keys = pinfo.keys()", "for given node \"\"\" from aiida.orm import load_node node_uuid =", "= json.loads(rv_obj.data) for node in response['data']['nodes']: self.assertIn(node['uuid'], expected_node_uuids) ############### Structure", "http://www.aiida.net # ########################################################################### # pylint: disable=too-many-lines \"\"\"Unittests for REST API.\"\"\"", "= f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes' with self.app.test_client() as client: rv_obj = client.get(url) response", "'/computers/page/2?offset=2&limit=1&perpage=2&orderby=+id', expected_errormsg=expected_error ) def test_computers_list_page_default(self): \"\"\" it returns the no.", "from aiida.common.links import LinkType from aiida.restapi.run_api import configure_api class RESTApiTestCase(AiidaTestCase):", "url parameters \"\"\" parts = url.split('?') path = '' query_string", ") def test_computers_list_page_perpage_exceed(self): \"\"\" no.of pages = total no. of", "with self.app.test_client() as client: rv_obj = client.get(url) structure_data = load_node(node_uuid)._exportcontent('xsf')[0]", "############# def test_structure_attributes_filter(self): \"\"\" Get the list of given calculation", "'node_type', 'uuid', 'description', 'incoming', 'outgoing' ] received_attr = response['data']['nodes'][0].keys() for", "the computers list first order by \"scheduler_type\" in descending order", "ACL_ORIGIN from aiida import orm from aiida.backends.testbase import AiidaTestCase from", "request the page which exceeds the total no. of pages", "url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/derived_properties' with self.app.test_client() as client: rv_obj = client.get(url)", "\"\"\" node_uuid = self.get_dummy_data()['computers'][1]['uuid'] RESTApiTestCase.process_test( self, 'computers', f'/computers/{str(node_uuid)}', expected_list_ids=[1], uuid=node_uuid", "= f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/contents?filename=\\\"calcjob_inputs/aiida.in\\\"\" with self.app.test_client() as client: response_obj = client.get(url) input_file", "in limit from database starting from the no. specified in", "'/computers/page/4?perpage=2&orderby=+id', expected_errormsg=expected_error ) ############### list filters ######################## def test_computers_filter_id1(self): \"\"\"", "the id, hostname and transport_type of the computer and get", "of computer and get the filtered computer list (e.g. id=1)", "if it is having same scheduler_type, order it by \"hostname\"", "parts = url.split('?') path = '' query_string = '' if", "extras filtered \"\"\" extras = {'extra1': False, 'extra2': 'extra_info'} node_uuid", "############# def test_calculation_inputs(self): \"\"\" Get the list of give calculation", "json.loads(rv_obj.data) expected_data_keys = ['path', 'namespace', 'subspaces', 'label', 'full_type'] response_keys =", "page which exceeds the total no. of pages then it", "RESTApiTestCase.process_test(self, 'computers', '/computers?orderby=+id', full_list=True) def test_computers_orderby_id_desc(self): \"\"\" Returns the computers", "self, 'computers', f'/computers?id>{str(node_pk)}&orderby=+id', expected_range=[2, None] ) def test_computers_filter_pk(self): \"\"\" Add", "def test_computers_orderby_id_asc_sign(self): \"\"\" Returns the computers list ordered by \"+id\"", "'/computers?orderby=-id', expected_list_ids=[4, 3, 2, 1, 0]) def test_computers_orderby_name_asc(self): \"\"\" Returns", "f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=cif' with self.app.test_client() as client: rv_obj = client.get(url) cif =", "url: web url :param full_list: if url is requested to", "attributes \"\"\" attributes = { 'attr1': 'OK', 'attr2': 'OK', 'resources':", "response :param uuid: url requested for the node pk \"\"\"", "structure = orm.StructureData(cell=cell) structure.append_atom(position=(0., 0., 0.), symbols=['Ba']) structure.store() structure.add_comment('This is", "self.assertNotEqual(len(response['data']['nodes']), 0) for node in response['data']['nodes']: self.assertIn('attributes', node) self.assertNotIn('attributes.resources', node)", "import ACL_ORIGIN from aiida import orm from aiida.backends.testbase import AiidaTestCase", "ordered by \"scheduler_type\" in ascending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id']", "self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=transport_type,id', expected_list_ids=[3, 1, 2, 4] )", "\"\"\" Get the list of given calculation retrieved_inputs \"\"\" node_uuid", "client: response_obj = client.get(url) input_file = load_node(node_uuid).get_object_content('calcjob_inputs/aiida.in', mode='rb') self.assertEqual(response_obj.data, input_file)", "RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>{str(node_pk)}&hostname=\\\"test1.epfl.ch\\\"\", expected_list_ids=[1] ) def test_computers_filter_mixed2(self): \"\"\" Add", "inputs cell = ((2., 0., 0.), (0., 2., 0.), (0.,", "as a dictionary when pagination is set \"\"\" expected_attribute =", "query_string, url, url_root, etc. :param node_type: url requested fot the", "no of rows specified in limit from database. \"\"\" RESTApiTestCase.process_test(self,", "incoming \"\"\" node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] self.process_test( 'nodes', f'/nodes/{str(node_uuid)}/links/incoming?orderby=id', expected_list_ids=[5, 3],", "= f'{self.get_url_prefix()}/nodes/page/1?perpage=10&attributes=true&attributes_filter=resources,cell' with self.app.test_client() as client: response_value = client.get(url) response", "> 1: query_string = parts[1] return path, query_string def compare_extra_response_data(self,", "= parts[1] return path, query_string def compare_extra_response_data(self, node_type, url, response,", "Returns the computers list ordered by \"id\" in ascending order", "log message for calcjob import logging from aiida.common.log import LOG_LEVEL_REPORT", "self.compare_extra_response_data(entity_type, url, response, uuid) class RESTApiTestSuite(RESTApiTestCase): # pylint: disable=too-many-public-methods \"\"\"", "is None: expected_range = [] if result_node_type is None and", "filters ######################## def test_computers_filter_id1(self): \"\"\" Add filter on the id", "self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>={str(node_pk)}&transport_type=\\\"ssh\\\"&orderby=-id&limit=2\", expected_list_ids=[4, 2] ) ########## pass", "Get the list of given calculation attributes filtered \"\"\" cell", "pagination ############# def test_node_single_extras_filter(self): \"\"\" Check that when only one", "parameters: id, page, perpage \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self,", "response_value = client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data']['repo_list'], [{'type': 'FILE', 'name':", "response, uuid=node_uuid) def test_contents_attributes_filter(self): \"\"\" Get list of calculation attributes", "\"\"\" ############### list filter combinations ####################### def test_computers_filter_mixed1(self): \"\"\" Add", "the computers list ordered by \"id\" in descending order \"\"\"", "expected_list_ids] elif expected_range != []: expected_data = self._dummy_data[result_node_type][expected_range[0]:expected_range[1]] else: from", "\"\"\" test projectable_properties endpoint \"\"\" for nodetype in ['nodes', 'processes',", "= [] for comment in response: all_comments.append(comment['message']) self.assertEqual(sorted(all_comments), sorted(['This is", "node attributes filter ############# def test_structure_attributes_filter(self): \"\"\" Get the list", "\"\"\" If we use the page, limit and offset at", "\"\"\" Add filter for the name of computer and get", "web url :param response: url response :param uuid: url requested", "if full_list: expected_data = self._dummy_data[result_node_type] elif empty_list: expected_data = []", "by \"name\" in descending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test(", "pylint: disable=too-many-arguments \"\"\" Check whether response matches expected values. :param", "transport_type, orderby \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?id>={str(node_pk)}&transport_type=\\\"ssh\\\"&orderby=-id&limit=2\",", "url = f'{self.get_url_prefix()}/nodes/full_types' with self.app.test_client() as client: rv_obj = client.get(url)", "' \\ '3]' RESTApiTestCase.process_test( self, 'computers', '/computers/page/4?perpage=2&orderby=+id', expected_errormsg=expected_error ) ###############", "pylint: disable=too-many-locals, too-many-statements \"\"\" Add objects to the database for", "'perpage key is incompatible with limit and offset' RESTApiTestCase.process_test( self,", "self.assertEqual(response['id'], uuid) self.assertEqual(response['query_string'], query_string) self.assertEqual(response['url'], f'http://localhost{url}') self.assertEqual(response['url_root'], 'http://localhost/') # node", "self.app.test_client() as client: response_obj = client.get(url) input_file = load_node(node_uuid).get_object_content('calcjob_inputs/aiida.in', mode='rb')", "attributes_filter with pagination ############# def test_node_single_attributes_filter(self): \"\"\" Check that when", "\"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?transport_type=\\\"ssh\\\"&pk>{str(node_pk)}&orderby=+scheduler_type\", expected_list_ids=[1, 4,", "test3 slurm test2 torque test1 test4 RESTApiTestCase.process_test(self, \"computers\", \"/computers?orderby=+scheduler_type, -hostname\",", "[1 : ' \\ '3]' RESTApiTestCase.process_test( self, 'computers', '/computers/page/4?perpage=2&orderby=+id', expected_errormsg=expected_error", "a template record message', 'metadata': { 'content': 'test' }, }", "####################### def test_computers_orderby_mixed1(self): \"\"\" Returns the computers list first order", "is empty :param expected_list_ids: list of expected ids from data", "'dim': 3, 'value': 8.0, 'label': 'volume' } ) self.assertEqual(response['data']['derived_properties']['formula'], 'Ba')", "'name': 'aiida.in'}]) url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/contents?filename=\\\"calcjob_inputs/aiida.in\\\"\" with self.app.test_client() as client: response_obj", "= response['data']['fields'].keys() for prop in response['data']['ordering']: self.assertIn(prop, available_properties) def test_node_namespace(self):", "RESTApiTestCase.process_test( self, 'computers', f\"/computers?transport_type=\\\"ssh\\\"&pk>{str(node_pk)}&orderby=+scheduler_type\", expected_list_ids=[1, 4, 2] ) def test_computers_orderby_schedulertype_desc(self):", "def test_computers_list_page_limit_offset(self): \"\"\" If we use the page, limit and", "= json.loads(rv_obj.data) self.assertNotIn('message', response) self.assertEqual(response['data']['attributes'], attributes) RESTApiTestCase.compare_extra_response_data(self, 'nodes', url, response,", "{'extra1': False, 'extra2': 'extra_info'} node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?extras=true&extras_filter=extra1,extra2'", "prepare atomic chunks of typical responses from the RESTapi and", "[node['uuid'] for node in response['data'][result_name]] self.assertEqual(expected_node_uuids, result_node_uuids) self.compare_extra_response_data(entity_type, url, response,", "handle.flush() handle.seek(0) retrieved_outputs.put_object_from_filelike(handle, 'calcjob_outputs/aiida.out', force=True) retrieved_outputs.store() retrieved_outputs.add_incoming(calc, link_type=LinkType.CREATE, link_label='retrieved') kpoint.add_incoming(calc,", "by \"scheduler_type\" in descending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test(", "= 400 @classmethod def setUpClass(cls, *args, **kwargs): # pylint: disable=too-many-locals,", "responses cls.process_dummy_data() def get_dummy_data(self): return self._dummy_data def get_url_prefix(self): return self._url_prefix", "computer and get the filtered computer list \"\"\" node_pk =", "def setUpClass(cls, *args, **kwargs): # pylint: disable=too-many-locals, too-many-statements \"\"\" Add", "\"\"\" If we pass the limit, offset and perpage at", "query_string = self.split_path(url) self.assertEqual(response['method'], 'GET') self.assertEqual(response['resource_type'], node_type) self.assertEqual(response['path'], path) self.assertEqual(response['id'],", "] received_attr = response['data']['nodes'][0].keys() for attr in expected_attr: self.assertIn(attr, received_attr)", "to get repo list or repo file contents for given", "of the tests for the AiiDA RESTful-api \"\"\" _url_prefix =", "of structure file \"\"\" from aiida.orm import load_node node_uuid =", "\"\"\" from aiida.orm import load_node node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url =", "node :param url: web url :param full_list: if url is", "f'{self.get_url_prefix()}/processes/{str(node_uuid)}/report' with self.app.test_client() as client: response_value = client.get(url) response =", "client.get(url) response = json.loads(response_value.data) self.assertEqual(response['data'], [{'name': 'calcjob_inputs', 'type': 'DIRECTORY'}]) def", "if calc['node_type'] == 'process.calculation.calcjob.CalcJobNode.': expected_node_uuids.append(calc['uuid']) url = f\"{self.get_url_prefix()}/nodes/?full_type=\\\"process.calculation.calcjob.CalcJobNode.|\\\"\" with self.app.test_client()", "offset \"\"\" RESTApiTestCase.process_test( self, 'computers', '/computers?limit=2&offset=2&orderby=+id', expected_range=[2, 4] ) def", "self._url_prefix @classmethod def process_dummy_data(cls): # pylint: disable=fixme \"\"\" This functions", "= self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=-name', expected_list_ids=[4, 3, 2, 1]", "and offset from aiida.common.exceptions import InputValidationError RESTApiTestCase.node_exception(self, \"/computers?aa=bb&id=2\", InputValidationError) \"\"\"", "as client: response_obj = client.get(url) input_file = load_node(node_uuid).get_object_content('calcjob_inputs/aiida.in', mode='rb') self.assertEqual(response_obj.data,", "filtered computer list \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?hostname=\"test1.epfl.ch\"', expected_list_ids=[1]) def test_computers_filter_transport_type(self):", ":param response: url response :param uuid: url requested for the", "no. specified in offset \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?offset=2&orderby=+id', expected_range=[2, None])", "test_structure_derived_properties(self): \"\"\" Get the list of give calculation incoming \"\"\"", "json.loads(response_value.data) expected_keys = response['data'].keys() for key in ['logs']: self.assertIn(key, expected_keys)", "the license, see the LICENSE.txt file # # For further", "option from database. no.of pages = total no. of computers", "test_computers_orderby_mixed2(self): \"\"\" Returns the computers list first order by \"scheduler_type\"", "= f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/links/tree?in_limit=1&out_limit=1' with self.app.test_client() as client: response_value = client.get(url) response", "def test_server(self): \"\"\" Test that /server endpoint returns AiiDA version", "False) calc.set_extra('extra2', 'extra_info') calc.add_incoming(structure, link_type=LinkType.INPUT_CALC, link_label='link_structure') calc.add_incoming(parameter1, link_type=LinkType.INPUT_CALC, link_label='link_parameter') aiida_in", "Log(**log_record) aiida_out = 'The output file\\nof the CalcJob node' retrieved_outputs", "information/data along with the node results. e.g. url method, node_type,", "should return all the rows from database starting from the", "the filtered computer list \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers?hostname=\"test1.epfl.ch\"', expected_list_ids=[1]) def", "same transport_type, order it by \"id\" \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id']", "the CalcJob node' retrieved_outputs = orm.FolderData() # Add the calcjob_outputs", "-hostname\", expected_list_ids=[1,0,4,3,2]) \"\"\" ############### list filter combinations ####################### def test_computers_filter_mixed1(self):", "node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/links/tree?in_limit=1&out_limit=1' with self.app.test_client() as client:", "= self.get_dummy_data()['computers'][1]['id'] RESTApiTestCase.process_test(self, 'computers', f'/computers?id={str(node_pk)}', expected_list_ids=[1]) def test_computers_filter_id2(self): \"\"\" Add", "limit and offset parameter. It should return the no of", "all parameter combinations ####################### def test_computers_mixed1(self): \"\"\" url parameters: id,", "atomic chunks of typical responses from the RESTapi and puts", "f\"/computers?id>={str(node_pk)}&transport_type=\\\"ssh\\\"&orderby=-id&limit=2\", expected_list_ids=[4, 2] ) ########## pass unknown url parameter ###########", "list of nodes filtered by full_type \"\"\" expected_node_uuids = []", "= f'{self.get_url_prefix()}/nodes/full_types' with self.app.test_client() as client: rv_obj = client.get(url) response", "download format endpoint \"\"\" url = f'{self.get_url_prefix()}/nodes/download_formats' with self.app.test_client() as", "a dictionary when pagination is set \"\"\" expected_attributes = ['resources',", "extras = {'extra1': False, 'extra2': 'extra_info'} node_uuid = self.get_dummy_data()['calculations'][1]['uuid'] url", "self.assertIn(key, expected_log_keys) def test_download_formats(self): \"\"\" test for download format endpoint", "response['data']['nodes']: self.assertIn('attributes', node) self.assertNotIn('attributes.resources', node) self.assertNotIn('attributes.cell', node) self.assertEqual(len(node['attributes']), len(expected_attributes)) for", "'hostname': 'test3.epfl.ch', 'transport_type': 'local', 'scheduler_type': 'slurm', }, { 'label': 'test4',", "same time, it would return the error message. \"\"\" expected_error", "specified in offset \"\"\" RESTApiTestCase.process_test( self, 'computers', '/computers?limit=2&offset=2&orderby=+id', expected_range=[2, 4]", "def test_calculation_attributes_filter(self): \"\"\" Get the list of given calculation attributes", "pagination is set \"\"\" expected_extra = ['extra2'] url = f'{self.get_url_prefix()}/nodes/page/1?perpage=10&extras=true&extras_filter=extra2'", "= self.get_dummy_data()['cifdata'][0]['uuid'] url = f'{self.get_url_prefix()}/nodes/{node_uuid}/download?download_format=cif' with self.app.test_client() as client: rv_obj", "node get one extras_filter with pagination ############# def test_node_single_extras_filter(self): \"\"\"", "database using offset parameter It should return all the rows", "/ perpage Using this formula it returns the no. of", "\"\"\" expected_error = 'requesting a specific page is incompatible with", "attributes_filter are returned as a dictionary when pagination is set", "in attributes_filter only this attribute is returned as a dictionary", "\"\"\" url parameters: id, limit and offset from aiida.common.exceptions import", "for key in ['logs']: self.assertIn(key, expected_keys) expected_log_keys = response['data']['logs'][0].keys() for", "expected_data] result_node_uuids = [node['uuid'] for node in response['data'][result_name]] self.assertEqual(expected_node_uuids, result_node_uuids)", "computers list ordered by \"+name\" in ascending order \"\"\" node_pk", "self.assertEqual(response['data'], [{'name': 'calcjob_inputs', 'type': 'DIRECTORY'}]) def test_calculation_retrieved_outputs(self): \"\"\" Get the", "rv_obj = client.get(url) response = json.loads(rv_obj.data) self.assertEqual(response['data']['nodes'][0]['attributes']['cell'], cell) ############### node", "[{'type': 'FILE', 'name': 'aiida.in'}]) url = f\"{self.get_url_prefix()}/nodes/{str(node_uuid)}/repo/contents?filename=\\\"calcjob_inputs/aiida.in\\\"\" with self.app.test_client() as", "= json.loads(response.data)['data'] self.assertEqual(__version__, data['AiiDA_version']) self.assertEqual(self.get_url_prefix(), data['API_prefix']) def test_base_url(self): \"\"\" Test", "ascending order \"\"\" node_pk = self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f\"/computers?transport_type=\\\"ssh\\\"&pk>{str(node_pk)}&orderby=+scheduler_type\",", "is very fragile and a pain to debug. # Please", "url, response, uuid=None): \"\"\" In url response, we pass some", "orm.CalcJobNode(computer=cls.computer) calc1.set_option('resources', resources) calc1.store() dummy_computers = [{ 'label': 'test1', 'hostname':", "= [ 'ctime', 'mtime', 'id', 'node_label', 'node_type', 'uuid', 'description', 'incoming',", "API sets cross-origin resource sharing headers \"\"\" url = f'{self.get_url_prefix()}/server'", "for node in response['data']['nodes']: self.assertEqual(list(node['attributes'].keys()), expected_attribute) ############### node extras_filter with", "datum in data: if datum['uuid'] is not None: datum['uuid'] =", "expected_data = self._dummy_data[result_node_type] elif empty_list: expected_data = [] elif expected_list_ids:", "returns the no. of rows defined as default perpage option", "dummy_computers = [{ 'label': 'test1', 'hostname': 'test1.epfl.ch', 'transport_type': 'ssh', 'scheduler_type':", "with self.app.test_client() as client: response = client.get(url) data = json.loads(response.data)['data']", "f'{self.get_url_prefix()}/nodes/{str(node_uuid)}?attributes=true' with self.app.test_client() as client: response_value = client.get(url) response =", "1) self.assertEqual(len(response['data']['nodes'][0]['outgoing']), 1) self.assertEqual(len(response['data']['metadata']), 1) expected_attr = [ 'ctime', 'mtime',", "\"/page/1?perpage=default_value\" \"\"\" RESTApiTestCase.process_test(self, 'computers', '/computers/page?orderby=+id', full_list=True) def test_computers_list_page_perpage(self): \"\"\" no.of", "in response['data']['nodes']: self.assertEqual(list(node['extras'].keys()), expected_extra) ############### node full_type filter ############# def", "RESTApiTestCase.process_test( self, 'computers', '/computers/page/2?offset=2&limit=1&orderby=+id', expected_errormsg=expected_error ) def test_complist_pagelimitoffset_perpage(self): \"\"\" If", "endpoint \"\"\" url = f'{self.get_url_prefix()}/nodes/download_formats' with self.app.test_client() as client: response_value", ":param expected_list_ids: list of expected ids from data :param expected_range:", "def test_computers_filter_mixed2(self): \"\"\" Add filter for the id, hostname and", "a dictionary when pagination is set \"\"\" expected_attribute = ['resources']", "# Please change this! computer_projections = ['id', 'uuid', 'name', 'hostname',", "self.get_dummy_data()['calculations'][1]['uuid'] url = f'{self.get_url_prefix()}/nodes/{str(node_uuid)}/contents/attributes' with self.app.test_client() as client: rv_obj =", "response_value = client.get(url) response = json.loads(response_value.data) self.assertNotEqual(len(response['data']['nodes']), 0) for node", "comp['uuid'] = str(comp['uuid']) cls._dummy_data['computers'] = computers calculation_projections = ['id', 'uuid',", "for label, dataclass in data_types.items(): data = orm.QueryBuilder().append(dataclass, tag='data', project=data_projections).order_by({", "aiida_out = 'The output file\\nof the CalcJob node' retrieved_outputs =", "json.loads(response_value.data) self.assertEqual(response['data'], [{'name': 'calcjob_inputs', 'type': 'DIRECTORY'}]) def test_calculation_retrieved_outputs(self): \"\"\" Get", "is test comment.', 'Add another comment.'])) def test_repo(self): \"\"\" Test", "the no. specified in offset \"\"\" RESTApiTestCase.process_test( self, 'computers', '/computers?limit=2&offset=2&orderby=+id',", ") ############### list orderby combinations ####################### def test_computers_orderby_mixed1(self): \"\"\" Returns", "self.app.test_client() as client: rv_obj = client.get(url) response = json.loads(rv_obj.data) expected_data_keys", "API.\"\"\" import tempfile from flask_cors.core import ACL_ORIGIN from aiida import", "self.get_dummy_data()['computers'][0]['id'] RESTApiTestCase.process_test( self, 'computers', f'/computers?pk>{str(node_pk)}&orderby=-scheduler_type,name', expected_list_ids=[2, 3, 4, 1] )", "return the error message. \"\"\" expected_error = 'requesting a specific", "client: rv_obj = client.get(url) structure_data = load_node(node_uuid)._exportcontent('xsf')[0] # pylint: disable=protected-access", ") def test_computers_mixed3(self): \"\"\" url parameters: id, transport_type, orderby \"\"\"", "'computers', f'/computers?pk>{str(node_pk)}&orderby=-name', expected_list_ids=[4, 3, 2, 1] ) def test_computers_orderby_scheduler_type_asc(self): \"\"\"", "check order available_properties = response['data']['fields'].keys() for prop in response['data']['ordering']: self.assertIn(prop," ]
[ "return False ts = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") filename = f\"{ts}_process-metrics-report_{name}_{duration}_{interval}.csv\" with open(f\"{filename}\",", "print(\"interval parameter is greater than duration parameter\") return ExitStatus.INTERVAL_GT_DURATION print(\"---------------------------------------------\")", "seconds. <sampling_interval> Sampling interval in seconds (optional, default 5). -h", "<overall_duration> Overall duration of the monitoring in seconds. <sampling_interval> Sampling", "\"\"\"Process Monitor Usage: processmonitor.py <process_name> <overall_duration> [<sampling_interval>] processmonitor.py -h|--help processmonitor.py", "Usage: processmonitor.py <process_name> <overall_duration> [<sampling_interval>] processmonitor.py -h|--help processmonitor.py -v|--version Options:", "= Event() def loop(): iteration = 1 while not stopped.wait(interval", "1 reportpath = f\"./{filename}\" print(f\"Metrics report: {reportpath}\") return True def", "reportpath = f\"./{filename}\" print(f\"Metrics report: {reportpath}\") return True def raise_memory_leak_warning(name):", "generate_report(name, duration, interval) raise_memory_leak_warning(name) return ExitStatus.OK def init(): if __name__", "import Process from threading import Event, Thread from datetime import", "ExitStatus.BAD_DURATION try: interval = string_to_integer(args['<sampling_interval>']) except: print(\"interval parameter is not", "= iteration + 1 reportpath = f\"./{filename}\" print(f\"Metrics report: {reportpath}\")", "if interval > duration: print(\"interval parameter is greater than duration", "= 1 while not stopped.wait(interval - time.time() % interval): func(*args,", "cpu_avg != None and mem_avg != None and files_avg !=", "process \\'{name}\\'\") return True return False def main(): args =", "call_repeatedly(interval, Process.monitor, name) time.sleep(duration) cancel_future_calls() print_average() generate_report(name, duration, interval) raise_memory_leak_warning(name)", "writer.writerow([ iteration, metric.cpu, metric.mem, metric.files]) iteration = iteration + 1", "OK = 0 BAD_DURATION = 1 BAD_INTERVAL = 2 INTERVAL_GT_DURATION", "default 5). -h --help Show this screen. -v --version Show", "utils import string_to_integer from process import Process from threading import", "1 while not stopped.wait(interval - time.time() % interval): func(*args, iteration)", "except: print(\"interval parameter is not an integer\") return ExitStatus.BAD_INTERVAL if", "print(\"duration parameter is not an integer\") return ExitStatus.BAD_DURATION try: interval", "5). -h --help Show this screen. -v --version Show version.", "print(f\"Metrics report: {reportpath}\") return True def raise_memory_leak_warning(name): if (Process.has_memory_leaks(name)): print(f\"WARNING:", "report: {reportpath}\") return True def raise_memory_leak_warning(name): if (Process.has_memory_leaks(name)): print(f\"WARNING: possible", "delimiter=',') writer.writerow(['ITERATION', '%CPU', 'MEMORY(B)', 'OPEN FILES']) iteration = 1 for", "integer\") return ExitStatus.BAD_DURATION try: interval = string_to_integer(args['<sampling_interval>']) except: print(\"interval parameter", "= 2 INTERVAL_GT_DURATION = 3 def call_repeatedly(interval, func, *args): stopped", "parameter is greater than duration parameter\") return ExitStatus.INTERVAL_GT_DURATION print(\"---------------------------------------------\") print(\"", "<sampling_interval> Sampling interval in seconds (optional, default 5). -h --help", "Event() def loop(): iteration = 1 while not stopped.wait(interval -", "return False def main(): args = docopt(__doc__, version='Process Monitor 1.0')", "Process.metrics: writer.writerow([ iteration, metric.cpu, metric.mem, metric.files]) iteration = iteration +", "an integer\") return ExitStatus.BAD_DURATION try: interval = string_to_integer(args['<sampling_interval>']) except: print(\"interval", "name) time.sleep(duration) cancel_future_calls() print_average() generate_report(name, duration, interval) raise_memory_leak_warning(name) return ExitStatus.OK", "%CPU: {cpu_avg}, MEMORY(B): {mem_avg}, OPEN FILES: {files_avg}\") return True return", "the monitoring in seconds. <sampling_interval> Sampling interval in seconds (optional,", "func(*args, iteration) iteration = iteration + 1 Thread(target=loop).start() return stopped.set", "FILES: {files_avg}\") return True return False def generate_report(name, duration, interval):", "= Process.metrics_average() if cpu_avg != None and mem_avg != None", "= string_to_integer(args['<sampling_interval>']) except: print(\"interval parameter is not an integer\") return", "while not stopped.wait(interval - time.time() % interval): func(*args, iteration) iteration", "= docopt(__doc__, version='Process Monitor 1.0') if not args['<sampling_interval>']: args['<sampling_interval>'] =", "= call_repeatedly(interval, Process.monitor, name) time.sleep(duration) cancel_future_calls() print_average() generate_report(name, duration, interval)", "init(): if __name__ == '__main__': if len(sys.argv) == 1: sys.argv.append('-h')", "(optional, default 5). -h --help Show this screen. -v --version", "docopt from utils import string_to_integer from process import Process from", "f\"{ts}_process-metrics-report_{name}_{duration}_{interval}.csv\" with open(f\"{filename}\", mode='w') as report: writer = csv.writer(report, delimiter=',')", "% interval): func(*args, iteration) iteration = iteration + 1 Thread(target=loop).start()", "from docopt import docopt from utils import string_to_integer from process", "raise_memory_leak_warning(name) return ExitStatus.OK def init(): if __name__ == '__main__': if", "False def generate_report(name, duration, interval): if len(Process.metrics) == 0: return", "from utils import string_to_integer from process import Process from threading", "print(f\"WARNING: possible memory leaks detected for process \\'{name}\\'\") return True", "metric.cpu, metric.mem, metric.files]) iteration = iteration + 1 reportpath =", "3 def call_repeatedly(interval, func, *args): stopped = Event() def loop():", "for {duration} sec\") cancel_future_calls = call_repeatedly(interval, Process.monitor, name) time.sleep(duration) cancel_future_calls()", "0 BAD_DURATION = 1 BAD_INTERVAL = 2 INTERVAL_GT_DURATION = 3", "iteration = iteration + 1 Thread(target=loop).start() return stopped.set def print_average():", "string_to_integer(args['<sampling_interval>']) except: print(\"interval parameter is not an integer\") return ExitStatus.BAD_INTERVAL", "Show this screen. -v --version Show version. \"\"\" from docopt", "parameter is not an integer\") return ExitStatus.BAD_INTERVAL if interval >", "if __name__ == '__main__': if len(sys.argv) == 1: sys.argv.append('-h') sys.exit(main())", "datetime import os import sys import csv import time from", "= args['<process_name>'] try: duration = string_to_integer(args['<overall_duration>']) except: print(\"duration parameter is", "Options: <process_name> Process name argument. <overall_duration> Overall duration of the", "Process.metrics_average() if cpu_avg != None and mem_avg != None and", "<gh_stars>0 \"\"\"Process Monitor Usage: processmonitor.py <process_name> <overall_duration> [<sampling_interval>] processmonitor.py -h|--help", "than duration parameter\") return ExitStatus.INTERVAL_GT_DURATION print(\"---------------------------------------------\") print(\" Process Monitor\") print(\"---------------------------------------------\")", "screen. -v --version Show version. \"\"\" from docopt import docopt", "\\'{name}\\' every {interval} sec for {duration} sec\") cancel_future_calls = call_repeatedly(interval,", "len(Process.metrics) == 0: return False ts = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") filename =", "print_average(): cpu_avg, mem_avg, files_avg = Process.metrics_average() if cpu_avg != None", "interval > duration: print(\"interval parameter is greater than duration parameter\")", "interval = string_to_integer(args['<sampling_interval>']) except: print(\"interval parameter is not an integer\")", "func, *args): stopped = Event() def loop(): iteration = 1", "'MEMORY(B)', 'OPEN FILES']) iteration = 1 for metric in Process.metrics:", "name = args['<process_name>'] try: duration = string_to_integer(args['<overall_duration>']) except: print(\"duration parameter", "is greater than duration parameter\") return ExitStatus.INTERVAL_GT_DURATION print(\"---------------------------------------------\") print(\" Process", "an integer\") return ExitStatus.BAD_INTERVAL if interval > duration: print(\"interval parameter", "<overall_duration> [<sampling_interval>] processmonitor.py -h|--help processmonitor.py -v|--version Options: <process_name> Process name", "*args): stopped = Event() def loop(): iteration = 1 while", "from threading import Event, Thread from datetime import datetime import", "loop(): iteration = 1 while not stopped.wait(interval - time.time() %", "True def raise_memory_leak_warning(name): if (Process.has_memory_leaks(name)): print(f\"WARNING: possible memory leaks detected", "open(f\"{filename}\", mode='w') as report: writer = csv.writer(report, delimiter=',') writer.writerow(['ITERATION', '%CPU',", "parameter is not an integer\") return ExitStatus.BAD_DURATION try: interval =", "-v|--version Options: <process_name> Process name argument. <overall_duration> Overall duration of", "ExitStatus(IntEnum): OK = 0 BAD_DURATION = 1 BAD_INTERVAL = 2", "= 1 BAD_INTERVAL = 2 INTERVAL_GT_DURATION = 3 def call_repeatedly(interval,", "import csv import time from enum import IntEnum class ExitStatus(IntEnum):", "processmonitor.py -h|--help processmonitor.py -v|--version Options: <process_name> Process name argument. <overall_duration>", "= 3 def call_repeatedly(interval, func, *args): stopped = Event() def", "return ExitStatus.BAD_INTERVAL if interval > duration: print(\"interval parameter is greater", "monitoring in seconds. <sampling_interval> Sampling interval in seconds (optional, default", "stopped.set def print_average(): cpu_avg, mem_avg, files_avg = Process.metrics_average() if cpu_avg", "from enum import IntEnum class ExitStatus(IntEnum): OK = 0 BAD_DURATION", "name argument. <overall_duration> Overall duration of the monitoring in seconds.", "False def main(): args = docopt(__doc__, version='Process Monitor 1.0') if", "datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") filename = f\"{ts}_process-metrics-report_{name}_{duration}_{interval}.csv\" with open(f\"{filename}\", mode='w') as report: writer", "for process \\'{name}\\'\") return True return False def main(): args", "detected for process \\'{name}\\'\") return True return False def main():", "return False def generate_report(name, duration, interval): if len(Process.metrics) == 0:", "time.time() % interval): func(*args, iteration) iteration = iteration + 1", "'%CPU', 'MEMORY(B)', 'OPEN FILES']) iteration = 1 for metric in", "ExitStatus.BAD_INTERVAL if interval > duration: print(\"interval parameter is greater than", "(Process.has_memory_leaks(name)): print(f\"WARNING: possible memory leaks detected for process \\'{name}\\'\") return", "def loop(): iteration = 1 while not stopped.wait(interval - time.time()", "mem_avg != None and files_avg != None: print(f\"Metrics Avg.: %CPU:", "> duration: print(\"interval parameter is greater than duration parameter\") return", "{cpu_avg}, MEMORY(B): {mem_avg}, OPEN FILES: {files_avg}\") return True return False", "\\'{name}\\'\") return True return False def main(): args = docopt(__doc__,", "as report: writer = csv.writer(report, delimiter=',') writer.writerow(['ITERATION', '%CPU', 'MEMORY(B)', 'OPEN", "filename = f\"{ts}_process-metrics-report_{name}_{duration}_{interval}.csv\" with open(f\"{filename}\", mode='w') as report: writer =", "os import sys import csv import time from enum import", "main(): args = docopt(__doc__, version='Process Monitor 1.0') if not args['<sampling_interval>']:", "duration: print(\"interval parameter is greater than duration parameter\") return ExitStatus.INTERVAL_GT_DURATION", "!= None: print(f\"Metrics Avg.: %CPU: {cpu_avg}, MEMORY(B): {mem_avg}, OPEN FILES:", "time.sleep(duration) cancel_future_calls() print_average() generate_report(name, duration, interval) raise_memory_leak_warning(name) return ExitStatus.OK def", "True return False def generate_report(name, duration, interval): if len(Process.metrics) ==", "every {interval} sec for {duration} sec\") cancel_future_calls = call_repeatedly(interval, Process.monitor,", "args = docopt(__doc__, version='Process Monitor 1.0') if not args['<sampling_interval>']: args['<sampling_interval>']", "cancel_future_calls() print_average() generate_report(name, duration, interval) raise_memory_leak_warning(name) return ExitStatus.OK def init():", "files_avg = Process.metrics_average() if cpu_avg != None and mem_avg !=", "in Process.metrics: writer.writerow([ iteration, metric.cpu, metric.mem, metric.files]) iteration = iteration", "Monitor 1.0') if not args['<sampling_interval>']: args['<sampling_interval>'] = 5 name =", "return True return False def main(): args = docopt(__doc__, version='Process", "import sys import csv import time from enum import IntEnum", "Process.monitor, name) time.sleep(duration) cancel_future_calls() print_average() generate_report(name, duration, interval) raise_memory_leak_warning(name) return", "in seconds (optional, default 5). -h --help Show this screen.", "True return False def main(): args = docopt(__doc__, version='Process Monitor", "mem_avg, files_avg = Process.metrics_average() if cpu_avg != None and mem_avg", "and mem_avg != None and files_avg != None: print(f\"Metrics Avg.:", "iteration, metric.cpu, metric.mem, metric.files]) iteration = iteration + 1 reportpath", "processmonitor.py -v|--version Options: <process_name> Process name argument. <overall_duration> Overall duration", "seconds (optional, default 5). -h --help Show this screen. -v", "print(\"---------------------------------------------\") print(\" Process Monitor\") print(\"---------------------------------------------\") print(f\"Monitoring process \\'{name}\\' every {interval}", "iteration = 1 while not stopped.wait(interval - time.time() % interval):", "[<sampling_interval>] processmonitor.py -h|--help processmonitor.py -v|--version Options: <process_name> Process name argument.", "threading import Event, Thread from datetime import datetime import os", "not an integer\") return ExitStatus.BAD_DURATION try: interval = string_to_integer(args['<sampling_interval>']) except:", "def generate_report(name, duration, interval): if len(Process.metrics) == 0: return False", "writer.writerow(['ITERATION', '%CPU', 'MEMORY(B)', 'OPEN FILES']) iteration = 1 for metric", "def init(): if __name__ == '__main__': if len(sys.argv) == 1:", "if (Process.has_memory_leaks(name)): print(f\"WARNING: possible memory leaks detected for process \\'{name}\\'\")", "Monitor\") print(\"---------------------------------------------\") print(f\"Monitoring process \\'{name}\\' every {interval} sec for {duration}", "possible memory leaks detected for process \\'{name}\\'\") return True return", "import IntEnum class ExitStatus(IntEnum): OK = 0 BAD_DURATION = 1", "sec\") cancel_future_calls = call_repeatedly(interval, Process.monitor, name) time.sleep(duration) cancel_future_calls() print_average() generate_report(name,", "{duration} sec\") cancel_future_calls = call_repeatedly(interval, Process.monitor, name) time.sleep(duration) cancel_future_calls() print_average()", "argument. <overall_duration> Overall duration of the monitoring in seconds. <sampling_interval>", "1 for metric in Process.metrics: writer.writerow([ iteration, metric.cpu, metric.mem, metric.files])", "Event, Thread from datetime import datetime import os import sys", "import os import sys import csv import time from enum", "INTERVAL_GT_DURATION = 3 def call_repeatedly(interval, func, *args): stopped = Event()", "print(f\"Monitoring process \\'{name}\\' every {interval} sec for {duration} sec\") cancel_future_calls", "2 INTERVAL_GT_DURATION = 3 def call_repeatedly(interval, func, *args): stopped =", "= f\"{ts}_process-metrics-report_{name}_{duration}_{interval}.csv\" with open(f\"{filename}\", mode='w') as report: writer = csv.writer(report,", "generate_report(name, duration, interval): if len(Process.metrics) == 0: return False ts", "1.0') if not args['<sampling_interval>']: args['<sampling_interval>'] = 5 name = args['<process_name>']", "'OPEN FILES']) iteration = 1 for metric in Process.metrics: writer.writerow([", "string_to_integer(args['<overall_duration>']) except: print(\"duration parameter is not an integer\") return ExitStatus.BAD_DURATION", "return True def raise_memory_leak_warning(name): if (Process.has_memory_leaks(name)): print(f\"WARNING: possible memory leaks", "return ExitStatus.OK def init(): if __name__ == '__main__': if len(sys.argv)", "-v --version Show version. \"\"\" from docopt import docopt from", "process import Process from threading import Event, Thread from datetime", "metric.mem, metric.files]) iteration = iteration + 1 reportpath = f\"./{filename}\"", "iteration = iteration + 1 reportpath = f\"./{filename}\" print(f\"Metrics report:", "FILES']) iteration = 1 for metric in Process.metrics: writer.writerow([ iteration,", "docopt import docopt from utils import string_to_integer from process import", "interval) raise_memory_leak_warning(name) return ExitStatus.OK def init(): if __name__ == '__main__':", "= 1 for metric in Process.metrics: writer.writerow([ iteration, metric.cpu, metric.mem,", "ExitStatus.OK def init(): if __name__ == '__main__': if len(sys.argv) ==", "class ExitStatus(IntEnum): OK = 0 BAD_DURATION = 1 BAD_INTERVAL =", "try: duration = string_to_integer(args['<overall_duration>']) except: print(\"duration parameter is not an", "import string_to_integer from process import Process from threading import Event,", "BAD_DURATION = 1 BAD_INTERVAL = 2 INTERVAL_GT_DURATION = 3 def", "csv import time from enum import IntEnum class ExitStatus(IntEnum): OK", "= csv.writer(report, delimiter=',') writer.writerow(['ITERATION', '%CPU', 'MEMORY(B)', 'OPEN FILES']) iteration =", "= string_to_integer(args['<overall_duration>']) except: print(\"duration parameter is not an integer\") return", "cpu_avg, mem_avg, files_avg = Process.metrics_average() if cpu_avg != None and", "def print_average(): cpu_avg, mem_avg, files_avg = Process.metrics_average() if cpu_avg !=", "mode='w') as report: writer = csv.writer(report, delimiter=',') writer.writerow(['ITERATION', '%CPU', 'MEMORY(B)',", "Process Monitor\") print(\"---------------------------------------------\") print(f\"Monitoring process \\'{name}\\' every {interval} sec for", "csv.writer(report, delimiter=',') writer.writerow(['ITERATION', '%CPU', 'MEMORY(B)', 'OPEN FILES']) iteration = 1", "this screen. -v --version Show version. \"\"\" from docopt import", "- time.time() % interval): func(*args, iteration) iteration = iteration +", "iteration + 1 reportpath = f\"./{filename}\" print(f\"Metrics report: {reportpath}\") return", "<process_name> <overall_duration> [<sampling_interval>] processmonitor.py -h|--help processmonitor.py -v|--version Options: <process_name> Process", "Avg.: %CPU: {cpu_avg}, MEMORY(B): {mem_avg}, OPEN FILES: {files_avg}\") return True", "cancel_future_calls = call_repeatedly(interval, Process.monitor, name) time.sleep(duration) cancel_future_calls() print_average() generate_report(name, duration,", "5 name = args['<process_name>'] try: duration = string_to_integer(args['<overall_duration>']) except: print(\"duration", "duration, interval): if len(Process.metrics) == 0: return False ts =", "import datetime import os import sys import csv import time", "None and files_avg != None: print(f\"Metrics Avg.: %CPU: {cpu_avg}, MEMORY(B):", "def raise_memory_leak_warning(name): if (Process.has_memory_leaks(name)): print(f\"WARNING: possible memory leaks detected for", "leaks detected for process \\'{name}\\'\") return True return False def", "stopped = Event() def loop(): iteration = 1 while not", "for metric in Process.metrics: writer.writerow([ iteration, metric.cpu, metric.mem, metric.files]) iteration", "print(\"interval parameter is not an integer\") return ExitStatus.BAD_INTERVAL if interval", "BAD_INTERVAL = 2 INTERVAL_GT_DURATION = 3 def call_repeatedly(interval, func, *args):", "__name__ == '__main__': if len(sys.argv) == 1: sys.argv.append('-h') sys.exit(main()) init()", "args['<sampling_interval>'] = 5 name = args['<process_name>'] try: duration = string_to_integer(args['<overall_duration>'])", "OPEN FILES: {files_avg}\") return True return False def generate_report(name, duration,", "not an integer\") return ExitStatus.BAD_INTERVAL if interval > duration: print(\"interval", "False ts = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") filename = f\"{ts}_process-metrics-report_{name}_{duration}_{interval}.csv\" with open(f\"{filename}\", mode='w')", "= 5 name = args['<process_name>'] try: duration = string_to_integer(args['<overall_duration>']) except:", "from process import Process from threading import Event, Thread from", "{reportpath}\") return True def raise_memory_leak_warning(name): if (Process.has_memory_leaks(name)): print(f\"WARNING: possible memory", "duration of the monitoring in seconds. <sampling_interval> Sampling interval in", "IntEnum class ExitStatus(IntEnum): OK = 0 BAD_DURATION = 1 BAD_INTERVAL", "Monitor Usage: processmonitor.py <process_name> <overall_duration> [<sampling_interval>] processmonitor.py -h|--help processmonitor.py -v|--version", "integer\") return ExitStatus.BAD_INTERVAL if interval > duration: print(\"interval parameter is", "docopt(__doc__, version='Process Monitor 1.0') if not args['<sampling_interval>']: args['<sampling_interval>'] = 5", "1 BAD_INTERVAL = 2 INTERVAL_GT_DURATION = 3 def call_repeatedly(interval, func,", "-h --help Show this screen. -v --version Show version. \"\"\"", "import time from enum import IntEnum class ExitStatus(IntEnum): OK =", "<process_name> Process name argument. <overall_duration> Overall duration of the monitoring", "Overall duration of the monitoring in seconds. <sampling_interval> Sampling interval", "--version Show version. \"\"\" from docopt import docopt from utils", "metric in Process.metrics: writer.writerow([ iteration, metric.cpu, metric.mem, metric.files]) iteration =", "\"\"\" from docopt import docopt from utils import string_to_integer from", "iteration = 1 for metric in Process.metrics: writer.writerow([ iteration, metric.cpu,", "stopped.wait(interval - time.time() % interval): func(*args, iteration) iteration = iteration", "print(f\"Metrics Avg.: %CPU: {cpu_avg}, MEMORY(B): {mem_avg}, OPEN FILES: {files_avg}\") return", "args['<process_name>'] try: duration = string_to_integer(args['<overall_duration>']) except: print(\"duration parameter is not", "string_to_integer from process import Process from threading import Event, Thread", "duration parameter\") return ExitStatus.INTERVAL_GT_DURATION print(\"---------------------------------------------\") print(\" Process Monitor\") print(\"---------------------------------------------\") print(f\"Monitoring", "+ 1 reportpath = f\"./{filename}\" print(f\"Metrics report: {reportpath}\") return True", "def call_repeatedly(interval, func, *args): stopped = Event() def loop(): iteration", "call_repeatedly(interval, func, *args): stopped = Event() def loop(): iteration =", "== 0: return False ts = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") filename = f\"{ts}_process-metrics-report_{name}_{duration}_{interval}.csv\"", "return ExitStatus.BAD_DURATION try: interval = string_to_integer(args['<sampling_interval>']) except: print(\"interval parameter is", "return True return False def generate_report(name, duration, interval): if len(Process.metrics)", "not stopped.wait(interval - time.time() % interval): func(*args, iteration) iteration =", "print(\" Process Monitor\") print(\"---------------------------------------------\") print(f\"Monitoring process \\'{name}\\' every {interval} sec", "with open(f\"{filename}\", mode='w') as report: writer = csv.writer(report, delimiter=',') writer.writerow(['ITERATION',", "Process name argument. <overall_duration> Overall duration of the monitoring in", "processmonitor.py <process_name> <overall_duration> [<sampling_interval>] processmonitor.py -h|--help processmonitor.py -v|--version Options: <process_name>", "interval): if len(Process.metrics) == 0: return False ts = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")", "and files_avg != None: print(f\"Metrics Avg.: %CPU: {cpu_avg}, MEMORY(B): {mem_avg},", "Thread from datetime import datetime import os import sys import", "parameter\") return ExitStatus.INTERVAL_GT_DURATION print(\"---------------------------------------------\") print(\" Process Monitor\") print(\"---------------------------------------------\") print(f\"Monitoring process", "!= None and files_avg != None: print(f\"Metrics Avg.: %CPU: {cpu_avg},", "print(\"---------------------------------------------\") print(f\"Monitoring process \\'{name}\\' every {interval} sec for {duration} sec\")", "1 Thread(target=loop).start() return stopped.set def print_average(): cpu_avg, mem_avg, files_avg =", "not args['<sampling_interval>']: args['<sampling_interval>'] = 5 name = args['<process_name>'] try: duration", "except: print(\"duration parameter is not an integer\") return ExitStatus.BAD_DURATION try:", "{interval} sec for {duration} sec\") cancel_future_calls = call_repeatedly(interval, Process.monitor, name)", "files_avg != None: print(f\"Metrics Avg.: %CPU: {cpu_avg}, MEMORY(B): {mem_avg}, OPEN", "-h|--help processmonitor.py -v|--version Options: <process_name> Process name argument. <overall_duration> Overall", "= 0 BAD_DURATION = 1 BAD_INTERVAL = 2 INTERVAL_GT_DURATION =", "if len(Process.metrics) == 0: return False ts = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") filename", "sec for {duration} sec\") cancel_future_calls = call_repeatedly(interval, Process.monitor, name) time.sleep(duration)", "print_average() generate_report(name, duration, interval) raise_memory_leak_warning(name) return ExitStatus.OK def init(): if", "interval): func(*args, iteration) iteration = iteration + 1 Thread(target=loop).start() return", "ts = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") filename = f\"{ts}_process-metrics-report_{name}_{duration}_{interval}.csv\" with open(f\"{filename}\", mode='w') as", "greater than duration parameter\") return ExitStatus.INTERVAL_GT_DURATION print(\"---------------------------------------------\") print(\" Process Monitor\")", "f\"./{filename}\" print(f\"Metrics report: {reportpath}\") return True def raise_memory_leak_warning(name): if (Process.has_memory_leaks(name)):", "time from enum import IntEnum class ExitStatus(IntEnum): OK = 0", "report: writer = csv.writer(report, delimiter=',') writer.writerow(['ITERATION', '%CPU', 'MEMORY(B)', 'OPEN FILES'])", "Process from threading import Event, Thread from datetime import datetime", "writer = csv.writer(report, delimiter=',') writer.writerow(['ITERATION', '%CPU', 'MEMORY(B)', 'OPEN FILES']) iteration", "version='Process Monitor 1.0') if not args['<sampling_interval>']: args['<sampling_interval>'] = 5 name", "sys import csv import time from enum import IntEnum class", "iteration) iteration = iteration + 1 Thread(target=loop).start() return stopped.set def", "None: print(f\"Metrics Avg.: %CPU: {cpu_avg}, MEMORY(B): {mem_avg}, OPEN FILES: {files_avg}\")", "MEMORY(B): {mem_avg}, OPEN FILES: {files_avg}\") return True return False def", "is not an integer\") return ExitStatus.BAD_INTERVAL if interval > duration:", "version. \"\"\" from docopt import docopt from utils import string_to_integer", "import Event, Thread from datetime import datetime import os import", "args['<sampling_interval>']: args['<sampling_interval>'] = 5 name = args['<process_name>'] try: duration =", "= f\"./{filename}\" print(f\"Metrics report: {reportpath}\") return True def raise_memory_leak_warning(name): if", "metric.files]) iteration = iteration + 1 reportpath = f\"./{filename}\" print(f\"Metrics", "Show version. \"\"\" from docopt import docopt from utils import", "is not an integer\") return ExitStatus.BAD_DURATION try: interval = string_to_integer(args['<sampling_interval>'])", "from datetime import datetime import os import sys import csv", "= iteration + 1 Thread(target=loop).start() return stopped.set def print_average(): cpu_avg,", "0: return False ts = datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") filename = f\"{ts}_process-metrics-report_{name}_{duration}_{interval}.csv\" with", "!= None and mem_avg != None and files_avg != None:", "process \\'{name}\\' every {interval} sec for {duration} sec\") cancel_future_calls =", "+ 1 Thread(target=loop).start() return stopped.set def print_average(): cpu_avg, mem_avg, files_avg", "Sampling interval in seconds (optional, default 5). -h --help Show", "datetime import datetime import os import sys import csv import", "interval in seconds (optional, default 5). -h --help Show this", "iteration + 1 Thread(target=loop).start() return stopped.set def print_average(): cpu_avg, mem_avg,", "of the monitoring in seconds. <sampling_interval> Sampling interval in seconds", "duration = string_to_integer(args['<overall_duration>']) except: print(\"duration parameter is not an integer\")", "{mem_avg}, OPEN FILES: {files_avg}\") return True return False def generate_report(name,", "if cpu_avg != None and mem_avg != None and files_avg", "{files_avg}\") return True return False def generate_report(name, duration, interval): if", "memory leaks detected for process \\'{name}\\'\") return True return False", "= datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") filename = f\"{ts}_process-metrics-report_{name}_{duration}_{interval}.csv\" with open(f\"{filename}\", mode='w') as report:", "enum import IntEnum class ExitStatus(IntEnum): OK = 0 BAD_DURATION =", "None and mem_avg != None and files_avg != None: print(f\"Metrics", "raise_memory_leak_warning(name): if (Process.has_memory_leaks(name)): print(f\"WARNING: possible memory leaks detected for process", "def main(): args = docopt(__doc__, version='Process Monitor 1.0') if not", "--help Show this screen. -v --version Show version. \"\"\" from", "if not args['<sampling_interval>']: args['<sampling_interval>'] = 5 name = args['<process_name>'] try:", "duration, interval) raise_memory_leak_warning(name) return ExitStatus.OK def init(): if __name__ ==", "import docopt from utils import string_to_integer from process import Process", "in seconds. <sampling_interval> Sampling interval in seconds (optional, default 5).", "Thread(target=loop).start() return stopped.set def print_average(): cpu_avg, mem_avg, files_avg = Process.metrics_average()", "return stopped.set def print_average(): cpu_avg, mem_avg, files_avg = Process.metrics_average() if", "ExitStatus.INTERVAL_GT_DURATION print(\"---------------------------------------------\") print(\" Process Monitor\") print(\"---------------------------------------------\") print(f\"Monitoring process \\'{name}\\' every", "return ExitStatus.INTERVAL_GT_DURATION print(\"---------------------------------------------\") print(\" Process Monitor\") print(\"---------------------------------------------\") print(f\"Monitoring process \\'{name}\\'", "try: interval = string_to_integer(args['<sampling_interval>']) except: print(\"interval parameter is not an" ]
[ "normal_batch def test_loader(self): with tf.variable_scope('test_loader'): dataset = tf.data.Dataset.from_tensor_slices(self.test_x).repeat() normal_batch =", "label, *_ = line.split('\\t') self.anno[filename] = label def init_train(self): train_x,", "self.train_normal, name='normal_batch') random_crop_batch = self.dataset_batch_loader(dataset, self.train_random_crop, name='random_crop_batch') return normal_batch, random_crop_batch", "name='valid_y') def init_test(self): test_x = [] for (path, dirs, files)", "= idx def init_annotation(self): self.anno = {} for line in", "files: test_x.append(os.path.join(path, file)) self.test_len = len(test_x) #todo (Numpy / List)", "with tf.variable_scope(name_or_scope=name): dataset_map = dataset.map(ref_func).batch(self.batch_size) iterator = dataset_map.make_one_shot_iterator() batch_input =", "self.img_reg.match(file): valid_x.append(os.path.join(path, file)) valid_y.append(self.cls[self.anno[file]]) self.valid_len = len(valid_y) #todo validataion data", "file, re.IGNORECASE).group(1)]) self.train_len = len(train_y) #todo train data random sort", "크면 전체 데이터에 대한 random sort) ''' dataset = tf.data.Dataset.from_tensor_slices((self.train_x,", "return x, y def train_random_crop(self, x, y): with tf.variable_scope(name_or_scope='train_random_crop'): x", "0.2610]) return x, y def test_normal(self, x): with tf.variable_scope(name_or_scope='test_normal'): x", "x, y): with tf.variable_scope(name_or_scope='valid_normal'): x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x,", "/ List) -> Tensor 로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.test_x =", "[0.2465, 0.2431, 0.2610]) return x, y def test_normal(self, x): with", "self.train_x = tf.convert_to_tensor(value=train_x, dtype=tf.string, name='train_x') self.train_y = tf.convert_to_tensor(value=train_y, dtype=tf.int64, name='train_y')", "random sort random_sort = np.random.permutation(self.valid_len) valid_x, valid_y = np.asarray(valid_x, dtype=np.string_)[random_sort],", "self.image_width = flags.FLAGS.image_width self.image_height = flags.FLAGS.image_height self.batch_size = flags.FLAGS.batch_size self.data_path", "if self.img_reg.match(file): train_x.append(os.path.join(path, file)) train_y.append(self.cls[re.match('(.+)\\\\_\\d+\\\\.jpeg', file, re.IGNORECASE).group(1)]) self.train_len = len(train_y)", "-> Tensor 로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.test_x = tf.convert_to_tensor(value=test_x, dtype=tf.string,", "x = tf.divide(x, [0.2465, 0.2431, 0.2610]) return x def dataset_batch_loader(self,", "'val')): for file in files: if self.img_reg.match(file): valid_x.append(os.path.join(path, file)) valid_y.append(self.cls[self.anno[file]])", "sort) ''' dataset = tf.data.Dataset.from_tensor_slices((self.train_x, self.train_y)).repeat() normal_batch = self.dataset_batch_loader(dataset, self.train_normal,", "[] for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'test')): for file", "= tf.convert_to_tensor(value=test_x, dtype=tf.string, name='test_x') def train_normal(self, x, y): with tf.variable_scope(name_or_scope='train_normal'):", "self.image_width+8), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) x = tf.random_crop(value=x, size=(self.image_height, self.image_width, 3)) x =", "[[0, 0], [4, 4], [4, 4], [0, 0]], name='padding') #", "tf.variable_scope(name_or_scope='test_normal'): x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3, name='decode_png') x", "0.2431, 0.2610]) return x, y def train_random_crop(self, x, y): with", "self.train_y = tf.convert_to_tensor(value=train_y, dtype=tf.int64, name='train_y') def init_validation(self): valid_x, valid_y =", "self.train_len = len(train_y) #todo train data random sort random_sort =", "dataset = tf.data.Dataset.from_tensor_slices(self.test_x).repeat() normal_batch = self.dataset_batch_loader(dataset, self.test_normal, name='normal_batch') return normal_batch", "#todo (Numpy / List) -> Tensor 로 변환 with tf.variable_scope(name_or_scope='data_tensor'):", "[4, 4], [4, 4], [0, 0]], name='padding') # x =", "class DataLoader: # todo train/test/validation => (클래스 당 500/50/50) def", "self.valid_len = len(valid_y) #todo validataion data random sort random_sort =", "train_x, train_y = [], [] for (path, dirs, files) in", "for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'train')): for file in", "dtype=tf.string, name='train_x') self.train_y = tf.convert_to_tensor(value=train_y, dtype=tf.int64, name='train_y') def init_validation(self): valid_x,", "= {} for line in open(os.path.join(self.data_path, 'val', 'val_annotations.txt')): filename, label,", "valid_x, valid_y = np.asarray(valid_x, dtype=np.string_)[random_sort], np.asarray(valid_y, dtype=np.int64)[random_sort] #todo (Numpy /", "# x = tf.image.resize_images(images=x, size=(self.image_height+8, self.image_width+8), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) x = tf.random_crop(value=x,", "for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'test')): for file in", "tf.divide(tf.cast(x, tf.float32), 255.) x = tf.subtract(x, [0.4921, 0.4833, 0.4484]) x", "test_x = [] for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'test')):", "대해 random sort 기능을 수행하는 함수 (괄호안에 값이 전체 데이터", "Projects.DeepLearningTechniques.MobileNet_v2.tiny_imagenet.constants import * class DataLoader: # todo train/test/validation => (클래스", "처음부터 수행하게 하는 함수 shuffle(): 데이터셋에 대해 random sort 기능을", "dtype=np.string_)[random_sort], np.asarray(valid_y, dtype=np.int64)[random_sort] #todo (Numpy / List) -> Tensor 로", "하는 함수 shuffle(): 데이터셋에 대해 random sort 기능을 수행하는 함수", "= len(train_y) #todo train data random sort random_sort = np.random.permutation(self.train_len)", "= [], [] for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'val')):", "test_normal(self, x): with tf.variable_scope(name_or_scope='test_normal'): x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x,", "for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'val')): for file in", "-> Tensor 로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.valid_x = tf.convert_to_tensor(value=valid_x, dtype=tf.string,", "self.img_reg = re.compile('.*\\\\.jpeg', re.IGNORECASE) self.init_class() self.init_annotation() def init_class(self): self.cls =", "with tf.variable_scope(name_or_scope='data_tensor'): self.valid_x = tf.convert_to_tensor(value=valid_x, dtype=tf.string, name='valid_x') self.valid_y = tf.convert_to_tensor(value=valid_y,", "List) => Tensor 로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.train_x = tf.convert_to_tensor(value=train_x,", "self.train_random_crop, name='random_crop_batch') return normal_batch, random_crop_batch def valid_loader(self): with tf.variable_scope('valid_loader'): dataset", "normal_batch = self.dataset_batch_loader(dataset, self.valid_normal, name='normal_batch') return normal_batch def test_loader(self): with", "self.init_class() self.init_annotation() def init_class(self): self.cls = {} for idx, dir", "tf.variable_scope('train_loader'): ''' repeat(): 데이터셋이 끝에 도달했을 때 다시 처음부터 수행하게", "= tf.convert_to_tensor(value=train_y, dtype=tf.int64, name='train_y') def init_validation(self): valid_x, valid_y = [],", "변환 with tf.variable_scope(name_or_scope='data_tensor'): self.valid_x = tf.convert_to_tensor(value=valid_x, dtype=tf.string, name='valid_x') self.valid_y =", "= tf.divide(x, [0.2465, 0.2431, 0.2610]) return x, y def valid_normal(self,", "0]], name='padding') # x = tf.image.resize_images(images=x, size=(self.image_height+8, self.image_width+8), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) x", "file)) self.test_len = len(test_x) #todo (Numpy / List) -> Tensor", "= self.dataset_batch_loader(dataset, self.valid_normal, name='normal_batch') return normal_batch def test_loader(self): with tf.variable_scope('test_loader'):", "def test_loader(self): with tf.variable_scope('test_loader'): dataset = tf.data.Dataset.from_tensor_slices(self.test_x).repeat() normal_batch = self.dataset_batch_loader(dataset,", "in files: test_x.append(os.path.join(path, file)) self.test_len = len(test_x) #todo (Numpy /", "#todo (Numpy / List) => Tensor 로 변환 with tf.variable_scope(name_or_scope='data_tensor'):", "size=(self.image_height, self.image_width, 3)) x = tf.divide(tf.cast(x, tf.float32), 255.) x =", "0.2431, 0.2610]) return x, y def valid_normal(self, x, y): with", "return x, y def valid_normal(self, x, y): with tf.variable_scope(name_or_scope='valid_normal'): x", "변환 with tf.variable_scope(name_or_scope='data_tensor'): self.test_x = tf.convert_to_tensor(value=test_x, dtype=tf.string, name='test_x') def train_normal(self,", "dataset.map(ref_func).batch(self.batch_size) iterator = dataset_map.make_one_shot_iterator() batch_input = iterator.get_next() return batch_input def", "{} for idx, dir in enumerate(os.listdir(os.path.join(self.data_path, 'train'))): self.cls[dir] = idx", "데이터에 대한 random sort) ''' dataset = tf.data.Dataset.from_tensor_slices((self.train_x, self.train_y)).repeat() normal_batch", "line in open(os.path.join(self.data_path, 'val', 'val_annotations.txt')): filename, label, *_ = line.split('\\t')", "dtype=np.string_)[random_sort], np.asarray(train_y, dtype=np.int64)[random_sort] #todo (Numpy / List) => Tensor 로", "def init_annotation(self): self.anno = {} for line in open(os.path.join(self.data_path, 'val',", "np.asarray(train_y, dtype=np.int64)[random_sort] #todo (Numpy / List) => Tensor 로 변환", "file)) valid_y.append(self.cls[self.anno[file]]) self.valid_len = len(valid_y) #todo validataion data random sort", "0.4833, 0.4484]) x = tf.divide(x, [0.2465, 0.2431, 0.2610]) return x,", "tf.variable_scope(name_or_scope=name): dataset_map = dataset.map(ref_func).batch(self.batch_size) iterator = dataset_map.make_one_shot_iterator() batch_input = iterator.get_next()", "로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.test_x = tf.convert_to_tensor(value=test_x, dtype=tf.string, name='test_x') def", "for idx, dir in enumerate(os.listdir(os.path.join(self.data_path, 'train'))): self.cls[dir] = idx def", "x = tf.random_crop(value=x, size=(self.image_height, self.image_width, 3)) x = tf.divide(tf.cast(x, tf.float32),", "__init__(self): self.image_width = flags.FLAGS.image_width self.image_height = flags.FLAGS.image_height self.batch_size = flags.FLAGS.batch_size", "name='decode_png') x = tf.divide(tf.cast(x, tf.float32), 255.) x = tf.subtract(x, [0.4921,", "tf.image.decode_png(contents=x, channels=3, name='decode_png') x = tf.pad(x, [[0, 0], [4, 4],", "re.IGNORECASE).group(1)]) self.train_len = len(train_y) #todo train data random sort random_sort", "ref_func, name): with tf.variable_scope(name_or_scope=name): dataset_map = dataset.map(ref_func).batch(self.batch_size) iterator = dataset_map.make_one_shot_iterator()", "= tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3, name='decode_png') x = tf.pad(x,", "''' repeat(): 데이터셋이 끝에 도달했을 때 다시 처음부터 수행하게 하는", "batch_input = iterator.get_next() return batch_input def train_loader(self): with tf.variable_scope('train_loader'): '''", "with tf.variable_scope(name_or_scope='train_normal'): x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3, name='decode_png')", "filename, label, *_ = line.split('\\t') self.anno[filename] = label def init_train(self):", "re.compile('.*\\\\.jpeg', re.IGNORECASE) self.init_class() self.init_annotation() def init_class(self): self.cls = {} for", "random_sort = np.random.permutation(self.train_len) train_x, train_y = np.asarray(train_x, dtype=np.string_)[random_sort], np.asarray(train_y, dtype=np.int64)[random_sort]", "dirs, files) in os.walk(os.path.join(self.data_path, 'val')): for file in files: if", "dataset = tf.data.Dataset.from_tensor_slices((self.valid_x, self.valid_y)).repeat() normal_batch = self.dataset_batch_loader(dataset, self.valid_normal, name='normal_batch') return", "def test_normal(self, x): with tf.variable_scope(name_or_scope='test_normal'): x = tf.read_file(filename=x) x =", "*_ = line.split('\\t') self.anno[filename] = label def init_train(self): train_x, train_y", "= [], [] for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'train')):", "sort random_sort = np.random.permutation(self.train_len) train_x, train_y = np.asarray(train_x, dtype=np.string_)[random_sort], np.asarray(train_y,", "dtype=np.int64)[random_sort] #todo (Numpy / List) -> Tensor 로 변환 with", "self.cls[dir] = idx def init_annotation(self): self.anno = {} for line", "from Projects.DeepLearningTechniques.MobileNet_v2.tiny_imagenet.constants import * class DataLoader: # todo train/test/validation =>", "os.walk(os.path.join(self.data_path, 'train')): for file in files: if self.img_reg.match(file): train_x.append(os.path.join(path, file))", "x = tf.image.decode_png(contents=x, channels=3, name='decode_png') x = tf.divide(tf.cast(x, tf.float32), 255.)", "iterator = dataset_map.make_one_shot_iterator() batch_input = iterator.get_next() return batch_input def train_loader(self):", "random_crop_batch = self.dataset_batch_loader(dataset, self.train_random_crop, name='random_crop_batch') return normal_batch, random_crop_batch def valid_loader(self):", "data random sort random_sort = np.random.permutation(self.valid_len) valid_x, valid_y = np.asarray(valid_x,", "(path, dirs, files) in os.walk(os.path.join(self.data_path, 'test')): for file in files:", "y def valid_normal(self, x, y): with tf.variable_scope(name_or_scope='valid_normal'): x = tf.read_file(filename=x)", "self.image_width, 3)) x = tf.divide(tf.cast(x, tf.float32), 255.) x = tf.subtract(x,", "0.2610]) return x def dataset_batch_loader(self, dataset, ref_func, name): with tf.variable_scope(name_or_scope=name):", "len(valid_y) #todo validataion data random sort random_sort = np.random.permutation(self.valid_len) valid_x,", "train_x.append(os.path.join(path, file)) train_y.append(self.cls[re.match('(.+)\\\\_\\d+\\\\.jpeg', file, re.IGNORECASE).group(1)]) self.train_len = len(train_y) #todo train", "in os.walk(os.path.join(self.data_path, 'val')): for file in files: if self.img_reg.match(file): valid_x.append(os.path.join(path,", "= tf.random_crop(value=x, size=(self.image_height, self.image_width, 3)) x = tf.divide(tf.cast(x, tf.float32), 255.)", "data random sort random_sort = np.random.permutation(self.train_len) train_x, train_y = np.asarray(train_x,", "dtype=tf.int64, name='valid_y') def init_test(self): test_x = [] for (path, dirs,", "x = tf.divide(x, [0.2465, 0.2431, 0.2610]) return x, y def", "=> Tensor 로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.train_x = tf.convert_to_tensor(value=train_x, dtype=tf.string,", "x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3, name='decode_png') x =", "로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.train_x = tf.convert_to_tensor(value=train_x, dtype=tf.string, name='train_x') self.train_y", "re.IGNORECASE) self.init_class() self.init_annotation() def init_class(self): self.cls = {} for idx,", "import * class DataLoader: # todo train/test/validation => (클래스 당", "= tf.convert_to_tensor(value=train_x, dtype=tf.string, name='train_x') self.train_y = tf.convert_to_tensor(value=train_y, dtype=tf.int64, name='train_y') def", "x, y def test_normal(self, x): with tf.variable_scope(name_or_scope='test_normal'): x = tf.read_file(filename=x)", "tf.image.decode_png(contents=x, channels=3, name='decode_png') x = tf.divide(tf.cast(x, tf.float32), 255.) x =", "self.valid_y)).repeat() normal_batch = self.dataset_batch_loader(dataset, self.valid_normal, name='normal_batch') return normal_batch def test_loader(self):", "name='test_x') def train_normal(self, x, y): with tf.variable_scope(name_or_scope='train_normal'): x = tf.read_file(filename=x)", "x, y): with tf.variable_scope(name_or_scope='train_random_crop'): x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x,", "(괄호안에 값이 전체 데이터 수보다 크면 전체 데이터에 대한 random", "[] for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'train')): for file", "tf.pad(x, [[0, 0], [4, 4], [4, 4], [0, 0]], name='padding')", "대한 random sort) ''' dataset = tf.data.Dataset.from_tensor_slices((self.train_x, self.train_y)).repeat() normal_batch =", "np.random.permutation(self.valid_len) valid_x, valid_y = np.asarray(valid_x, dtype=np.string_)[random_sort], np.asarray(valid_y, dtype=np.int64)[random_sort] #todo (Numpy", "flags.FLAGS.image_width self.image_height = flags.FLAGS.image_height self.batch_size = flags.FLAGS.batch_size self.data_path = flags.FLAGS.data_path", "= tf.divide(x, [0.2465, 0.2431, 0.2610]) return x, y def test_normal(self,", "수행하게 하는 함수 shuffle(): 데이터셋에 대해 random sort 기능을 수행하는", "= np.random.permutation(self.valid_len) valid_x, valid_y = np.asarray(valid_x, dtype=np.string_)[random_sort], np.asarray(valid_y, dtype=np.int64)[random_sort] #todo", "x = tf.image.decode_png(contents=x, channels=3, name='decode_png') x = tf.pad(x, [[0, 0],", "return batch_input def train_loader(self): with tf.variable_scope('train_loader'): ''' repeat(): 데이터셋이 끝에", "(클래스 당 500/50/50) def __init__(self): self.image_width = flags.FLAGS.image_width self.image_height =", "=> (클래스 당 500/50/50) def __init__(self): self.image_width = flags.FLAGS.image_width self.image_height", "'val', 'val_annotations.txt')): filename, label, *_ = line.split('\\t') self.anno[filename] = label", "x, y def valid_normal(self, x, y): with tf.variable_scope(name_or_scope='valid_normal'): x =", "sort 기능을 수행하는 함수 (괄호안에 값이 전체 데이터 수보다 크면", "= dataset_map.make_one_shot_iterator() batch_input = iterator.get_next() return batch_input def train_loader(self): with", "<gh_stars>1-10 import os import re import numpy as np from", "= tf.image.resize_images(images=x, size=(self.image_height+8, self.image_width+8), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) x = tf.random_crop(value=x, size=(self.image_height, self.image_width,", "valid_x.append(os.path.join(path, file)) valid_y.append(self.cls[self.anno[file]]) self.valid_len = len(valid_y) #todo validataion data random", "init_annotation(self): self.anno = {} for line in open(os.path.join(self.data_path, 'val', 'val_annotations.txt')):", "self.batch_size = flags.FLAGS.batch_size self.data_path = flags.FLAGS.data_path self.img_reg = re.compile('.*\\\\.jpeg', re.IGNORECASE)", "tf.convert_to_tensor(value=train_x, dtype=tf.string, name='train_x') self.train_y = tf.convert_to_tensor(value=train_y, dtype=tf.int64, name='train_y') def init_validation(self):", "batch_input def train_loader(self): with tf.variable_scope('train_loader'): ''' repeat(): 데이터셋이 끝에 도달했을", "= tf.divide(x, [0.2465, 0.2431, 0.2610]) return x def dataset_batch_loader(self, dataset,", "0], [4, 4], [4, 4], [0, 0]], name='padding') # x", "= len(test_x) #todo (Numpy / List) -> Tensor 로 변환", "기능을 수행하는 함수 (괄호안에 값이 전체 데이터 수보다 크면 전체", "with tf.variable_scope(name_or_scope='data_tensor'): self.train_x = tf.convert_to_tensor(value=train_x, dtype=tf.string, name='train_x') self.train_y = tf.convert_to_tensor(value=train_y,", "def init_class(self): self.cls = {} for idx, dir in enumerate(os.listdir(os.path.join(self.data_path,", "self.image_height = flags.FLAGS.image_height self.batch_size = flags.FLAGS.batch_size self.data_path = flags.FLAGS.data_path self.img_reg", "idx, dir in enumerate(os.listdir(os.path.join(self.data_path, 'train'))): self.cls[dir] = idx def init_annotation(self):", "self.img_reg.match(file): train_x.append(os.path.join(path, file)) train_y.append(self.cls[re.match('(.+)\\\\_\\d+\\\\.jpeg', file, re.IGNORECASE).group(1)]) self.train_len = len(train_y) #todo", "self.test_x = tf.convert_to_tensor(value=test_x, dtype=tf.string, name='test_x') def train_normal(self, x, y): with", "= np.asarray(train_x, dtype=np.string_)[random_sort], np.asarray(train_y, dtype=np.int64)[random_sort] #todo (Numpy / List) =>", "/ List) => Tensor 로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.train_x =", "= tf.data.Dataset.from_tensor_slices((self.valid_x, self.valid_y)).repeat() normal_batch = self.dataset_batch_loader(dataset, self.valid_normal, name='normal_batch') return normal_batch", "''' dataset = tf.data.Dataset.from_tensor_slices((self.train_x, self.train_y)).repeat() normal_batch = self.dataset_batch_loader(dataset, self.train_normal, name='normal_batch')", "channels=3, name='decode_png') x = tf.pad(x, [[0, 0], [4, 4], [4,", "dtype=tf.int64, name='train_y') def init_validation(self): valid_x, valid_y = [], [] for", "validataion data random sort random_sort = np.random.permutation(self.valid_len) valid_x, valid_y =", "return x, y def test_normal(self, x): with tf.variable_scope(name_or_scope='test_normal'): x =", "tf.variable_scope(name_or_scope='train_normal'): x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3, name='decode_png') x", "name='valid_x') self.valid_y = tf.convert_to_tensor(value=valid_y, dtype=tf.int64, name='valid_y') def init_test(self): test_x =", "return x def dataset_batch_loader(self, dataset, ref_func, name): with tf.variable_scope(name_or_scope=name): dataset_map", "len(test_x) #todo (Numpy / List) -> Tensor 로 변환 with", "np.asarray(valid_x, dtype=np.string_)[random_sort], np.asarray(valid_y, dtype=np.int64)[random_sort] #todo (Numpy / List) -> Tensor", "= flags.FLAGS.image_width self.image_height = flags.FLAGS.image_height self.batch_size = flags.FLAGS.batch_size self.data_path =", "np from Projects.DeepLearningTechniques.MobileNet_v2.tiny_imagenet.constants import * class DataLoader: # todo train/test/validation", "3)) x = tf.divide(tf.cast(x, tf.float32), 255.) x = tf.subtract(x, [0.4921,", "0.4484]) x = tf.divide(x, [0.2465, 0.2431, 0.2610]) return x def", "= tf.image.decode_png(contents=x, channels=3, name='decode_png') x = tf.divide(tf.cast(x, tf.float32), 255.) x", "dtype=tf.string, name='valid_x') self.valid_y = tf.convert_to_tensor(value=valid_y, dtype=tf.int64, name='valid_y') def init_test(self): test_x", "name='decode_png') x = tf.pad(x, [[0, 0], [4, 4], [4, 4],", "y def test_normal(self, x): with tf.variable_scope(name_or_scope='test_normal'): x = tf.read_file(filename=x) x", "repeat(): 데이터셋이 끝에 도달했을 때 다시 처음부터 수행하게 하는 함수", "(path, dirs, files) in os.walk(os.path.join(self.data_path, 'val')): for file in files:", "= tf.pad(x, [[0, 0], [4, 4], [4, 4], [0, 0]],", "valid_loader(self): with tf.variable_scope('valid_loader'): dataset = tf.data.Dataset.from_tensor_slices((self.valid_x, self.valid_y)).repeat() normal_batch = self.dataset_batch_loader(dataset,", "4], [4, 4], [0, 0]], name='padding') # x = tf.image.resize_images(images=x,", "train_y.append(self.cls[re.match('(.+)\\\\_\\d+\\\\.jpeg', file, re.IGNORECASE).group(1)]) self.train_len = len(train_y) #todo train data random", "self.dataset_batch_loader(dataset, self.valid_normal, name='normal_batch') return normal_batch def test_loader(self): with tf.variable_scope('test_loader'): dataset", "file)) train_y.append(self.cls[re.match('(.+)\\\\_\\d+\\\\.jpeg', file, re.IGNORECASE).group(1)]) self.train_len = len(train_y) #todo train data", "전체 데이터에 대한 random sort) ''' dataset = tf.data.Dataset.from_tensor_slices((self.train_x, self.train_y)).repeat()", "name='normal_batch') random_crop_batch = self.dataset_batch_loader(dataset, self.train_random_crop, name='random_crop_batch') return normal_batch, random_crop_batch def", "= tf.image.decode_png(contents=x, channels=3, name='decode_png') x = tf.pad(x, [[0, 0], [4,", "valid_normal(self, x, y): with tf.variable_scope(name_or_scope='valid_normal'): x = tf.read_file(filename=x) x =", "with tf.variable_scope(name_or_scope='valid_normal'): x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3, name='decode_png')", "train/test/validation => (클래스 당 500/50/50) def __init__(self): self.image_width = flags.FLAGS.image_width", "len(train_y) #todo train data random sort random_sort = np.random.permutation(self.train_len) train_x,", "in os.walk(os.path.join(self.data_path, 'train')): for file in files: if self.img_reg.match(file): train_x.append(os.path.join(path,", "* class DataLoader: # todo train/test/validation => (클래스 당 500/50/50)", "self.anno = {} for line in open(os.path.join(self.data_path, 'val', 'val_annotations.txt')): filename,", "tf.data.Dataset.from_tensor_slices((self.valid_x, self.valid_y)).repeat() normal_batch = self.dataset_batch_loader(dataset, self.valid_normal, name='normal_batch') return normal_batch def", "self.train_y)).repeat() normal_batch = self.dataset_batch_loader(dataset, self.train_normal, name='normal_batch') random_crop_batch = self.dataset_batch_loader(dataset, self.train_random_crop,", "import numpy as np from Projects.DeepLearningTechniques.MobileNet_v2.tiny_imagenet.constants import * class DataLoader:", "enumerate(os.listdir(os.path.join(self.data_path, 'train'))): self.cls[dir] = idx def init_annotation(self): self.anno = {}", "normal_batch, random_crop_batch def valid_loader(self): with tf.variable_scope('valid_loader'): dataset = tf.data.Dataset.from_tensor_slices((self.valid_x, self.valid_y)).repeat()", "y): with tf.variable_scope(name_or_scope='train_random_crop'): x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3,", "def init_validation(self): valid_x, valid_y = [], [] for (path, dirs,", "[0.2465, 0.2431, 0.2610]) return x, y def train_random_crop(self, x, y):", "= len(valid_y) #todo validataion data random sort random_sort = np.random.permutation(self.valid_len)", "for file in files: if self.img_reg.match(file): train_x.append(os.path.join(path, file)) train_y.append(self.cls[re.match('(.+)\\\\_\\d+\\\\.jpeg', file,", "in enumerate(os.listdir(os.path.join(self.data_path, 'train'))): self.cls[dir] = idx def init_annotation(self): self.anno =", "tf.variable_scope(name_or_scope='data_tensor'): self.test_x = tf.convert_to_tensor(value=test_x, dtype=tf.string, name='test_x') def train_normal(self, x, y):", "import re import numpy as np from Projects.DeepLearningTechniques.MobileNet_v2.tiny_imagenet.constants import *", "tf.random_crop(value=x, size=(self.image_height, self.image_width, 3)) x = tf.divide(tf.cast(x, tf.float32), 255.) x", "def valid_loader(self): with tf.variable_scope('valid_loader'): dataset = tf.data.Dataset.from_tensor_slices((self.valid_x, self.valid_y)).repeat() normal_batch =", "flags.FLAGS.batch_size self.data_path = flags.FLAGS.data_path self.img_reg = re.compile('.*\\\\.jpeg', re.IGNORECASE) self.init_class() self.init_annotation()", "= flags.FLAGS.image_height self.batch_size = flags.FLAGS.batch_size self.data_path = flags.FLAGS.data_path self.img_reg =", "name='train_y') def init_validation(self): valid_x, valid_y = [], [] for (path,", "np.random.permutation(self.train_len) train_x, train_y = np.asarray(train_x, dtype=np.string_)[random_sort], np.asarray(train_y, dtype=np.int64)[random_sort] #todo (Numpy", "dtype=np.int64)[random_sort] #todo (Numpy / List) => Tensor 로 변환 with", "self.valid_y = tf.convert_to_tensor(value=valid_y, dtype=tf.int64, name='valid_y') def init_test(self): test_x = []", "re import numpy as np from Projects.DeepLearningTechniques.MobileNet_v2.tiny_imagenet.constants import * class", "x = tf.pad(x, [[0, 0], [4, 4], [4, 4], [0,", "y): with tf.variable_scope(name_or_scope='valid_normal'): x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3,", "files) in os.walk(os.path.join(self.data_path, 'test')): for file in files: test_x.append(os.path.join(path, file))", "tf.variable_scope('test_loader'): dataset = tf.data.Dataset.from_tensor_slices(self.test_x).repeat() normal_batch = self.dataset_batch_loader(dataset, self.test_normal, name='normal_batch') return", "dir in enumerate(os.listdir(os.path.join(self.data_path, 'train'))): self.cls[dir] = idx def init_annotation(self): self.anno", "(Numpy / List) => Tensor 로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.train_x", "[], [] for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'train')): for", "def train_normal(self, x, y): with tf.variable_scope(name_or_scope='train_normal'): x = tf.read_file(filename=x) x", "x def dataset_batch_loader(self, dataset, ref_func, name): with tf.variable_scope(name_or_scope=name): dataset_map =", "[4, 4], [0, 0]], name='padding') # x = tf.image.resize_images(images=x, size=(self.image_height+8,", "tf.convert_to_tensor(value=train_y, dtype=tf.int64, name='train_y') def init_validation(self): valid_x, valid_y = [], []", "return normal_batch, random_crop_batch def valid_loader(self): with tf.variable_scope('valid_loader'): dataset = tf.data.Dataset.from_tensor_slices((self.valid_x,", "with tf.variable_scope('valid_loader'): dataset = tf.data.Dataset.from_tensor_slices((self.valid_x, self.valid_y)).repeat() normal_batch = self.dataset_batch_loader(dataset, self.valid_normal,", "with tf.variable_scope(name_or_scope='test_normal'): x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3, name='decode_png')", "init_validation(self): valid_x, valid_y = [], [] for (path, dirs, files)", "sort random_sort = np.random.permutation(self.valid_len) valid_x, valid_y = np.asarray(valid_x, dtype=np.string_)[random_sort], np.asarray(valid_y,", "os import re import numpy as np from Projects.DeepLearningTechniques.MobileNet_v2.tiny_imagenet.constants import", "as np from Projects.DeepLearningTechniques.MobileNet_v2.tiny_imagenet.constants import * class DataLoader: # todo", "y): with tf.variable_scope(name_or_scope='train_normal'): x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3,", "train_y = np.asarray(train_x, dtype=np.string_)[random_sort], np.asarray(train_y, dtype=np.int64)[random_sort] #todo (Numpy / List)", "'test')): for file in files: test_x.append(os.path.join(path, file)) self.test_len = len(test_x)", "tf.convert_to_tensor(value=valid_y, dtype=tf.int64, name='valid_y') def init_test(self): test_x = [] for (path,", "with tf.variable_scope(name_or_scope='train_random_crop'): x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3, name='decode_png')", "= tf.data.Dataset.from_tensor_slices((self.train_x, self.train_y)).repeat() normal_batch = self.dataset_batch_loader(dataset, self.train_normal, name='normal_batch') random_crop_batch =", "file in files: if self.img_reg.match(file): train_x.append(os.path.join(path, file)) train_y.append(self.cls[re.match('(.+)\\\\_\\d+\\\\.jpeg', file, re.IGNORECASE).group(1)])", "= {} for idx, dir in enumerate(os.listdir(os.path.join(self.data_path, 'train'))): self.cls[dir] =", "전체 데이터 수보다 크면 전체 데이터에 대한 random sort) '''", "함수 shuffle(): 데이터셋에 대해 random sort 기능을 수행하는 함수 (괄호안에", "= iterator.get_next() return batch_input def train_loader(self): with tf.variable_scope('train_loader'): ''' repeat():", "[0.2465, 0.2431, 0.2610]) return x def dataset_batch_loader(self, dataset, ref_func, name):", "값이 전체 데이터 수보다 크면 전체 데이터에 대한 random sort)", "random_sort = np.random.permutation(self.valid_len) valid_x, valid_y = np.asarray(valid_x, dtype=np.string_)[random_sort], np.asarray(valid_y, dtype=np.int64)[random_sort]", "self.dataset_batch_loader(dataset, self.train_normal, name='normal_batch') random_crop_batch = self.dataset_batch_loader(dataset, self.train_random_crop, name='random_crop_batch') return normal_batch,", "in open(os.path.join(self.data_path, 'val', 'val_annotations.txt')): filename, label, *_ = line.split('\\t') self.anno[filename]", "= np.asarray(valid_x, dtype=np.string_)[random_sort], np.asarray(valid_y, dtype=np.int64)[random_sort] #todo (Numpy / List) ->", "= tf.subtract(x, [0.4921, 0.4833, 0.4484]) x = tf.divide(x, [0.2465, 0.2431,", "def init_train(self): train_x, train_y = [], [] for (path, dirs,", "iterator.get_next() return batch_input def train_loader(self): with tf.variable_scope('train_loader'): ''' repeat(): 데이터셋이", "def dataset_batch_loader(self, dataset, ref_func, name): with tf.variable_scope(name_or_scope=name): dataset_map = dataset.map(ref_func).batch(self.batch_size)", "x, y def train_random_crop(self, x, y): with tf.variable_scope(name_or_scope='train_random_crop'): x =", "도달했을 때 다시 처음부터 수행하게 하는 함수 shuffle(): 데이터셋에 대해", "0.2610]) return x, y def train_random_crop(self, x, y): with tf.variable_scope(name_or_scope='train_random_crop'):", "with tf.variable_scope('train_loader'): ''' repeat(): 데이터셋이 끝에 도달했을 때 다시 처음부터", "tf.divide(x, [0.2465, 0.2431, 0.2610]) return x def dataset_batch_loader(self, dataset, ref_func,", "open(os.path.join(self.data_path, 'val', 'val_annotations.txt')): filename, label, *_ = line.split('\\t') self.anno[filename] =", "valid_y = np.asarray(valid_x, dtype=np.string_)[random_sort], np.asarray(valid_y, dtype=np.int64)[random_sort] #todo (Numpy / List)", "self.valid_normal, name='normal_batch') return normal_batch def test_loader(self): with tf.variable_scope('test_loader'): dataset =", "import os import re import numpy as np from Projects.DeepLearningTechniques.MobileNet_v2.tiny_imagenet.constants", "in os.walk(os.path.join(self.data_path, 'test')): for file in files: test_x.append(os.path.join(path, file)) self.test_len", "init_test(self): test_x = [] for (path, dirs, files) in os.walk(os.path.join(self.data_path,", "tf.variable_scope(name_or_scope='data_tensor'): self.train_x = tf.convert_to_tensor(value=train_x, dtype=tf.string, name='train_x') self.train_y = tf.convert_to_tensor(value=train_y, dtype=tf.int64,", "test_loader(self): with tf.variable_scope('test_loader'): dataset = tf.data.Dataset.from_tensor_slices(self.test_x).repeat() normal_batch = self.dataset_batch_loader(dataset, self.test_normal,", "= flags.FLAGS.batch_size self.data_path = flags.FLAGS.data_path self.img_reg = re.compile('.*\\\\.jpeg', re.IGNORECASE) self.init_class()", "(Numpy / List) -> Tensor 로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.test_x", "tf.image.resize_images(images=x, size=(self.image_height+8, self.image_width+8), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) x = tf.random_crop(value=x, size=(self.image_height, self.image_width, 3))", "method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) x = tf.random_crop(value=x, size=(self.image_height, self.image_width, 3)) x = tf.divide(tf.cast(x,", "(path, dirs, files) in os.walk(os.path.join(self.data_path, 'train')): for file in files:", "/ List) -> Tensor 로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.valid_x =", "tf.convert_to_tensor(value=test_x, dtype=tf.string, name='test_x') def train_normal(self, x, y): with tf.variable_scope(name_or_scope='train_normal'): x", "random sort random_sort = np.random.permutation(self.train_len) train_x, train_y = np.asarray(train_x, dtype=np.string_)[random_sort],", "dataset = tf.data.Dataset.from_tensor_slices((self.train_x, self.train_y)).repeat() normal_batch = self.dataset_batch_loader(dataset, self.train_normal, name='normal_batch') random_crop_batch", "line.split('\\t') self.anno[filename] = label def init_train(self): train_x, train_y = [],", "init_train(self): train_x, train_y = [], [] for (path, dirs, files)", "train_normal(self, x, y): with tf.variable_scope(name_or_scope='train_normal'): x = tf.read_file(filename=x) x =", "normal_batch = self.dataset_batch_loader(dataset, self.train_normal, name='normal_batch') random_crop_batch = self.dataset_batch_loader(dataset, self.train_random_crop, name='random_crop_batch')", "files) in os.walk(os.path.join(self.data_path, 'train')): for file in files: if self.img_reg.match(file):", "name): with tf.variable_scope(name_or_scope=name): dataset_map = dataset.map(ref_func).batch(self.batch_size) iterator = dataset_map.make_one_shot_iterator() batch_input", "file in files: test_x.append(os.path.join(path, file)) self.test_len = len(test_x) #todo (Numpy", "= self.dataset_batch_loader(dataset, self.train_random_crop, name='random_crop_batch') return normal_batch, random_crop_batch def valid_loader(self): with", "todo train/test/validation => (클래스 당 500/50/50) def __init__(self): self.image_width =", "데이터셋에 대해 random sort 기능을 수행하는 함수 (괄호안에 값이 전체", "(Numpy / List) -> Tensor 로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.valid_x", "dataset_map = dataset.map(ref_func).batch(self.batch_size) iterator = dataset_map.make_one_shot_iterator() batch_input = iterator.get_next() return", "수행하는 함수 (괄호안에 값이 전체 데이터 수보다 크면 전체 데이터에", "= np.random.permutation(self.train_len) train_x, train_y = np.asarray(train_x, dtype=np.string_)[random_sort], np.asarray(train_y, dtype=np.int64)[random_sort] #todo", "tf.divide(x, [0.2465, 0.2431, 0.2610]) return x, y def valid_normal(self, x,", "numpy as np from Projects.DeepLearningTechniques.MobileNet_v2.tiny_imagenet.constants import * class DataLoader: #", "= line.split('\\t') self.anno[filename] = label def init_train(self): train_x, train_y =", "= tf.divide(x, [0.2465, 0.2431, 0.2610]) return x, y def train_random_crop(self,", "init_class(self): self.cls = {} for idx, dir in enumerate(os.listdir(os.path.join(self.data_path, 'train'))):", "[], [] for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'val')): for", "with tf.variable_scope(name_or_scope='data_tensor'): self.test_x = tf.convert_to_tensor(value=test_x, dtype=tf.string, name='test_x') def train_normal(self, x,", "x = tf.image.resize_images(images=x, size=(self.image_height+8, self.image_width+8), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) x = tf.random_crop(value=x, size=(self.image_height,", "tf.variable_scope(name_or_scope='data_tensor'): self.valid_x = tf.convert_to_tensor(value=valid_x, dtype=tf.string, name='valid_x') self.valid_y = tf.convert_to_tensor(value=valid_y, dtype=tf.int64,", "0.2431, 0.2610]) return x, y def test_normal(self, x): with tf.variable_scope(name_or_scope='test_normal'):", "= tf.convert_to_tensor(value=valid_x, dtype=tf.string, name='valid_x') self.valid_y = tf.convert_to_tensor(value=valid_y, dtype=tf.int64, name='valid_y') def", "name='normal_batch') return normal_batch def test_loader(self): with tf.variable_scope('test_loader'): dataset = tf.data.Dataset.from_tensor_slices(self.test_x).repeat()", "255.) x = tf.subtract(x, [0.4921, 0.4833, 0.4484]) x = tf.divide(x,", "0.2431, 0.2610]) return x def dataset_batch_loader(self, dataset, ref_func, name): with", "os.walk(os.path.join(self.data_path, 'test')): for file in files: test_x.append(os.path.join(path, file)) self.test_len =", "def train_loader(self): with tf.variable_scope('train_loader'): ''' repeat(): 데이터셋이 끝에 도달했을 때", "for line in open(os.path.join(self.data_path, 'val', 'val_annotations.txt')): filename, label, *_ =", "= self.dataset_batch_loader(dataset, self.train_normal, name='normal_batch') random_crop_batch = self.dataset_batch_loader(dataset, self.train_random_crop, name='random_crop_batch') return", "tf.variable_scope(name_or_scope='train_random_crop'): x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3, name='decode_png') x", "List) -> Tensor 로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.test_x = tf.convert_to_tensor(value=test_x,", "size=(self.image_height+8, self.image_width+8), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) x = tf.random_crop(value=x, size=(self.image_height, self.image_width, 3)) x", "def __init__(self): self.image_width = flags.FLAGS.image_width self.image_height = flags.FLAGS.image_height self.batch_size =", "0.2610]) return x, y def valid_normal(self, x, y): with tf.variable_scope(name_or_scope='valid_normal'):", "np.asarray(train_x, dtype=np.string_)[random_sort], np.asarray(train_y, dtype=np.int64)[random_sort] #todo (Numpy / List) => Tensor", "with tf.variable_scope('test_loader'): dataset = tf.data.Dataset.from_tensor_slices(self.test_x).repeat() normal_batch = self.dataset_batch_loader(dataset, self.test_normal, name='normal_batch')", "dtype=tf.string, name='test_x') def train_normal(self, x, y): with tf.variable_scope(name_or_scope='train_normal'): x =", "= dataset.map(ref_func).batch(self.batch_size) iterator = dataset_map.make_one_shot_iterator() batch_input = iterator.get_next() return batch_input", "4], [0, 0]], name='padding') # x = tf.image.resize_images(images=x, size=(self.image_height+8, self.image_width+8),", "[0.4921, 0.4833, 0.4484]) x = tf.divide(x, [0.2465, 0.2431, 0.2610]) return", "= tf.convert_to_tensor(value=valid_y, dtype=tf.int64, name='valid_y') def init_test(self): test_x = [] for", "데이터셋이 끝에 도달했을 때 다시 처음부터 수행하게 하는 함수 shuffle():", "random sort) ''' dataset = tf.data.Dataset.from_tensor_slices((self.train_x, self.train_y)).repeat() normal_batch = self.dataset_batch_loader(dataset,", "tf.subtract(x, [0.4921, 0.4833, 0.4484]) x = tf.divide(x, [0.2465, 0.2431, 0.2610])", "tf.float32), 255.) x = tf.subtract(x, [0.4921, 0.4833, 0.4484]) x =", "변환 with tf.variable_scope(name_or_scope='data_tensor'): self.train_x = tf.convert_to_tensor(value=train_x, dtype=tf.string, name='train_x') self.train_y =", "= flags.FLAGS.data_path self.img_reg = re.compile('.*\\\\.jpeg', re.IGNORECASE) self.init_class() self.init_annotation() def init_class(self):", "데이터 수보다 크면 전체 데이터에 대한 random sort) ''' dataset", "self.data_path = flags.FLAGS.data_path self.img_reg = re.compile('.*\\\\.jpeg', re.IGNORECASE) self.init_class() self.init_annotation() def", "수보다 크면 전체 데이터에 대한 random sort) ''' dataset =", "random sort 기능을 수행하는 함수 (괄호안에 값이 전체 데이터 수보다", "in files: if self.img_reg.match(file): valid_x.append(os.path.join(path, file)) valid_y.append(self.cls[self.anno[file]]) self.valid_len = len(valid_y)", "tf.divide(x, [0.2465, 0.2431, 0.2610]) return x, y def test_normal(self, x):", "y def train_random_crop(self, x, y): with tf.variable_scope(name_or_scope='train_random_crop'): x = tf.read_file(filename=x)", "valid_y = [], [] for (path, dirs, files) in os.walk(os.path.join(self.data_path,", "self.test_len = len(test_x) #todo (Numpy / List) -> Tensor 로", "다시 처음부터 수행하게 하는 함수 shuffle(): 데이터셋에 대해 random sort", "valid_y.append(self.cls[self.anno[file]]) self.valid_len = len(valid_y) #todo validataion data random sort random_sort", "= label def init_train(self): train_x, train_y = [], [] for", "idx def init_annotation(self): self.anno = {} for line in open(os.path.join(self.data_path,", "file in files: if self.img_reg.match(file): valid_x.append(os.path.join(path, file)) valid_y.append(self.cls[self.anno[file]]) self.valid_len =", "x = tf.subtract(x, [0.4921, 0.4833, 0.4484]) x = tf.divide(x, [0.2465,", "dirs, files) in os.walk(os.path.join(self.data_path, 'train')): for file in files: if", "test_x.append(os.path.join(path, file)) self.test_len = len(test_x) #todo (Numpy / List) ->", "dataset_batch_loader(self, dataset, ref_func, name): with tf.variable_scope(name_or_scope=name): dataset_map = dataset.map(ref_func).batch(self.batch_size) iterator", "train_x, train_y = np.asarray(train_x, dtype=np.string_)[random_sort], np.asarray(train_y, dtype=np.int64)[random_sort] #todo (Numpy /", "Tensor 로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.test_x = tf.convert_to_tensor(value=test_x, dtype=tf.string, name='test_x')", "in files: if self.img_reg.match(file): train_x.append(os.path.join(path, file)) train_y.append(self.cls[re.match('(.+)\\\\_\\d+\\\\.jpeg', file, re.IGNORECASE).group(1)]) self.train_len", "0.4484]) x = tf.divide(x, [0.2465, 0.2431, 0.2610]) return x, y", "'train'))): self.cls[dir] = idx def init_annotation(self): self.anno = {} for", "files) in os.walk(os.path.join(self.data_path, 'val')): for file in files: if self.img_reg.match(file):", "flags.FLAGS.data_path self.img_reg = re.compile('.*\\\\.jpeg', re.IGNORECASE) self.init_class() self.init_annotation() def init_class(self): self.cls", "# todo train/test/validation => (클래스 당 500/50/50) def __init__(self): self.image_width", "DataLoader: # todo train/test/validation => (클래스 당 500/50/50) def __init__(self):", "tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3, name='decode_png') x = tf.pad(x, [[0,", "np.asarray(valid_y, dtype=np.int64)[random_sort] #todo (Numpy / List) -> Tensor 로 변환", "train_loader(self): with tf.variable_scope('train_loader'): ''' repeat(): 데이터셋이 끝에 도달했을 때 다시", "x = tf.divide(tf.cast(x, tf.float32), 255.) x = tf.subtract(x, [0.4921, 0.4833,", "dataset_map.make_one_shot_iterator() batch_input = iterator.get_next() return batch_input def train_loader(self): with tf.variable_scope('train_loader'):", "'val_annotations.txt')): filename, label, *_ = line.split('\\t') self.anno[filename] = label def", "self.dataset_batch_loader(dataset, self.train_random_crop, name='random_crop_batch') return normal_batch, random_crop_batch def valid_loader(self): with tf.variable_scope('valid_loader'):", "name='padding') # x = tf.image.resize_images(images=x, size=(self.image_height+8, self.image_width+8), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) x =", "= re.compile('.*\\\\.jpeg', re.IGNORECASE) self.init_class() self.init_annotation() def init_class(self): self.cls = {}", "for file in files: test_x.append(os.path.join(path, file)) self.test_len = len(test_x) #todo", "shuffle(): 데이터셋에 대해 random sort 기능을 수행하는 함수 (괄호안에 값이", "return normal_batch def test_loader(self): with tf.variable_scope('test_loader'): dataset = tf.data.Dataset.from_tensor_slices(self.test_x).repeat() normal_batch", "#todo validataion data random sort random_sort = np.random.permutation(self.valid_len) valid_x, valid_y", "500/50/50) def __init__(self): self.image_width = flags.FLAGS.image_width self.image_height = flags.FLAGS.image_height self.batch_size", "[0.2465, 0.2431, 0.2610]) return x, y def valid_normal(self, x, y):", "dataset, ref_func, name): with tf.variable_scope(name_or_scope=name): dataset_map = dataset.map(ref_func).batch(self.batch_size) iterator =", "List) -> Tensor 로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.valid_x = tf.convert_to_tensor(value=valid_x,", "당 500/50/50) def __init__(self): self.image_width = flags.FLAGS.image_width self.image_height = flags.FLAGS.image_height", "Tensor 로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.valid_x = tf.convert_to_tensor(value=valid_x, dtype=tf.string, name='valid_x')", "self.anno[filename] = label def init_train(self): train_x, train_y = [], []", "files: if self.img_reg.match(file): train_x.append(os.path.join(path, file)) train_y.append(self.cls[re.match('(.+)\\\\_\\d+\\\\.jpeg', file, re.IGNORECASE).group(1)]) self.train_len =", "name='random_crop_batch') return normal_batch, random_crop_batch def valid_loader(self): with tf.variable_scope('valid_loader'): dataset =", "self.init_annotation() def init_class(self): self.cls = {} for idx, dir in", "if self.img_reg.match(file): valid_x.append(os.path.join(path, file)) valid_y.append(self.cls[self.anno[file]]) self.valid_len = len(valid_y) #todo validataion", "os.walk(os.path.join(self.data_path, 'val')): for file in files: if self.img_reg.match(file): valid_x.append(os.path.join(path, file))", "tf.divide(x, [0.2465, 0.2431, 0.2610]) return x, y def train_random_crop(self, x,", "random_crop_batch def valid_loader(self): with tf.variable_scope('valid_loader'): dataset = tf.data.Dataset.from_tensor_slices((self.valid_x, self.valid_y)).repeat() normal_batch", "함수 (괄호안에 값이 전체 데이터 수보다 크면 전체 데이터에 대한", "tf.convert_to_tensor(value=valid_x, dtype=tf.string, name='valid_x') self.valid_y = tf.convert_to_tensor(value=valid_y, dtype=tf.int64, name='valid_y') def init_test(self):", "= tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3, name='decode_png') x = tf.divide(tf.cast(x,", "'train')): for file in files: if self.img_reg.match(file): train_x.append(os.path.join(path, file)) train_y.append(self.cls[re.match('(.+)\\\\_\\d+\\\\.jpeg',", "self.cls = {} for idx, dir in enumerate(os.listdir(os.path.join(self.data_path, 'train'))): self.cls[dir]", "= [] for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'test')): for", "0.4833, 0.4484]) x = tf.divide(x, [0.2465, 0.2431, 0.2610]) return x", "for file in files: if self.img_reg.match(file): valid_x.append(os.path.join(path, file)) valid_y.append(self.cls[self.anno[file]]) self.valid_len", "끝에 도달했을 때 다시 처음부터 수행하게 하는 함수 shuffle(): 데이터셋에", "#todo train data random sort random_sort = np.random.permutation(self.train_len) train_x, train_y", "def valid_normal(self, x, y): with tf.variable_scope(name_or_scope='valid_normal'): x = tf.read_file(filename=x) x", "train_y = [], [] for (path, dirs, files) in os.walk(os.path.join(self.data_path,", "valid_x, valid_y = [], [] for (path, dirs, files) in", "name='train_x') self.train_y = tf.convert_to_tensor(value=train_y, dtype=tf.int64, name='train_y') def init_validation(self): valid_x, valid_y", "def init_test(self): test_x = [] for (path, dirs, files) in", "flags.FLAGS.image_height self.batch_size = flags.FLAGS.batch_size self.data_path = flags.FLAGS.data_path self.img_reg = re.compile('.*\\\\.jpeg',", "train data random sort random_sort = np.random.permutation(self.train_len) train_x, train_y =", "def train_random_crop(self, x, y): with tf.variable_scope(name_or_scope='train_random_crop'): x = tf.read_file(filename=x) x", "tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3, name='decode_png') x = tf.divide(tf.cast(x, tf.float32),", "Tensor 로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.train_x = tf.convert_to_tensor(value=train_x, dtype=tf.string, name='train_x')", "self.valid_x = tf.convert_to_tensor(value=valid_x, dtype=tf.string, name='valid_x') self.valid_y = tf.convert_to_tensor(value=valid_y, dtype=tf.int64, name='valid_y')", "[] for (path, dirs, files) in os.walk(os.path.join(self.data_path, 'val')): for file", "tf.data.Dataset.from_tensor_slices((self.train_x, self.train_y)).repeat() normal_batch = self.dataset_batch_loader(dataset, self.train_normal, name='normal_batch') random_crop_batch = self.dataset_batch_loader(dataset,", "때 다시 처음부터 수행하게 하는 함수 shuffle(): 데이터셋에 대해 random", "tf.variable_scope('valid_loader'): dataset = tf.data.Dataset.from_tensor_slices((self.valid_x, self.valid_y)).repeat() normal_batch = self.dataset_batch_loader(dataset, self.valid_normal, name='normal_batch')", "tf.variable_scope(name_or_scope='valid_normal'): x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3, name='decode_png') x", "label def init_train(self): train_x, train_y = [], [] for (path,", "= tf.divide(tf.cast(x, tf.float32), 255.) x = tf.subtract(x, [0.4921, 0.4833, 0.4484])", "{} for line in open(os.path.join(self.data_path, 'val', 'val_annotations.txt')): filename, label, *_", "로 변환 with tf.variable_scope(name_or_scope='data_tensor'): self.valid_x = tf.convert_to_tensor(value=valid_x, dtype=tf.string, name='valid_x') self.valid_y", "x): with tf.variable_scope(name_or_scope='test_normal'): x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x, channels=3,", "dirs, files) in os.walk(os.path.join(self.data_path, 'test')): for file in files: test_x.append(os.path.join(path,", "x, y): with tf.variable_scope(name_or_scope='train_normal'): x = tf.read_file(filename=x) x = tf.image.decode_png(contents=x,", "channels=3, name='decode_png') x = tf.divide(tf.cast(x, tf.float32), 255.) x = tf.subtract(x,", "[0, 0]], name='padding') # x = tf.image.resize_images(images=x, size=(self.image_height+8, self.image_width+8), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)", "train_random_crop(self, x, y): with tf.variable_scope(name_or_scope='train_random_crop'): x = tf.read_file(filename=x) x =", "files: if self.img_reg.match(file): valid_x.append(os.path.join(path, file)) valid_y.append(self.cls[self.anno[file]]) self.valid_len = len(valid_y) #todo" ]
[ "= True break if not ok: stderr.write(\"No markdown file found", "os.listdir(\".\"): if file.endswith(\".md\"): ok = True break if not ok:", "+ \"/src\"): os.symlink(script_path + \"/src\", tmp_dir + \"/src\") copy_tree(\".\", tmp_dir)", "args = parser.parse_args() # Check directory ok = False for", "!= \"\": if not os.path.isfile(args.base_html): stderr.write(\"The given base HTML file", "import Options from selenium.webdriver.common.desired_capabilities import DesiredCapabilities options = Options() options.headless", "exist\") exit(1) script_path = os.path.dirname(os.path.realpath(__file__)) # Temp dir timestamp =", "= os.path.dirname(os.path.realpath(__file__)) # Temp dir timestamp = str(int(time())) tmp_dir =", "pdf = HTML(html_file_name).write_pdf() f = open(\"output.pdf\", 'wb') f.write(pdf) if not", "!= \"IN_MODIFY\" or notifier.pathname.endswith(\".pdf\")): return global prev_compile_time if time() -", "+ \"/src\", tmp_dir + \"/src\") copy_tree(\".\", tmp_dir) # Markdown parsing", "pyinotify import subprocess from sys import stdout, stderr from time", "line flags import os import glob import re import pyinotify", "file') parser.set_defaults(watch=False) args = parser.parse_args() # Check directory ok =", "html_out_file.write(interpreted_html) # Create final PDF file pdf = HTML(html_file_name).write_pdf() f", "Temp dir timestamp = str(int(time())) tmp_dir = gettempdir() + \"/\"", "stderr from time import time, sleep from tempfile import gettempdir", "= DesiredCapabilities.FIREFOX d['loggingPrefs'] = {'browser': 'ALL'} driver = webdriver.Firefox(options=options, capabilities=d)", "not args.watch: if not args.basic: driver.quit() exit(0) watch_manager = pyinotify.WatchManager()", "stdout.flush() files = glob.glob(tmp_dir + '/*.md') for f in files:", "import DesiredCapabilities options = Options() options.headless = True options.log.level =", "if not args.basic: driver.quit() exit(0) watch_manager = pyinotify.WatchManager() event_notifier =", "\"/base.html\") else: copyfile(args.base_html, tmp_dir + \"/base.html\") if not os.path.islink(tmp_dir +", "= open(\"output.pdf\", 'wb') f.write(pdf) if not args.quiet: stdout.write(\"\\rDone. \") stdout.flush()", "weasyprint import HTML import argparse parser = argparse.ArgumentParser( description='Converts Markdown", "recompile(notifier): if notifier is not None and (notifier.maskname != \"IN_MODIFY\"", "= 0 def recompile(notifier): if notifier is not None and", "+ tmp_dir, shell=True).decode('utf-8') html_file_name = tmp_dir + \"output.html\" # Interpret", "notifier.pathname.endswith(\".pdf\")): return global prev_compile_time if time() - prev_compile_time < 1:", "= Options() options.headless = True options.log.level = \"trace\" d =", "open(\"output.pdf\", 'wb') f.write(pdf) if not args.quiet: stdout.write(\"\\rDone. \") stdout.flush() recompile(None)", "default=2, help='Page generation timeout') parser.add_argument(\"--base-html\", type=str, default=\"\", help='The path to", "= gettempdir() + \"/\" + timestamp + \"_md-report/\" os.makedirs(tmp_dir, exist_ok=True)", "os.path.isfile(args.base_html): stderr.write(\"The given base HTML file doesn't exist\") exit(1) script_path", "given base HTML file doesn't exist\") exit(1) script_path = os.path.dirname(os.path.realpath(__file__))", "\") stdout.flush() recompile(None) if not args.watch: if not args.basic: driver.quit()", "= True options.log.level = \"trace\" d = DesiredCapabilities.FIREFOX d['loggingPrefs'] =", "import argparse parser = argparse.ArgumentParser( description='Converts Markdown to elegant PDF", "tmp_dir + \"output.html\" # Interpret JS code if not args.basic:", "selenium.webdriver.firefox.options import Options from selenium.webdriver.common.desired_capabilities import DesiredCapabilities options = Options()", "if not os.path.islink(tmp_dir + \"/src\"): os.symlink(script_path + \"/src\", tmp_dir +", "final PDF file pdf = HTML(html_file_name).write_pdf() f = open(\"output.pdf\", 'wb')", "type=str, default=\"\", help='The path to the base HTML file') parser.set_defaults(watch=False)", "from distutils.dir_util import copy_tree from shutil import copyfile from weasyprint", "\"\": copyfile(script_path + \"/base.html\", tmp_dir + \"/base.html\") else: copyfile(args.base_html, tmp_dir", "tmp_dir + \"/base.html\") if not os.path.islink(tmp_dir + \"/src\"): os.symlink(script_path +", "1: return prev_compile_time = time() if not args.quiet: stdout.write(\"\\rBuilding the", "0 def recompile(notifier): if notifier is not None and (notifier.maskname", "not os.path.islink(tmp_dir + \"/src\"): os.symlink(script_path + \"/src\", tmp_dir + \"/src\")", "in os.listdir(\".\"): if file.endswith(\".md\"): ok = True break if not", "== \"\": copyfile(script_path + \"/base.html\", tmp_dir + \"/base.html\") else: copyfile(args.base_html,", "\"\": if not os.path.isfile(args.base_html): stderr.write(\"The given base HTML file doesn't", "not None and (notifier.maskname != \"IN_MODIFY\" or notifier.pathname.endswith(\".pdf\")): return global", "dir timestamp = str(int(time())) tmp_dir = gettempdir() + \"/\" +", "time() - prev_compile_time < 1: return prev_compile_time = time() if", "information') parser.add_argument(\"--timeout\", type=int, default=2, help='Page generation timeout') parser.add_argument(\"--base-html\", type=str, default=\"\",", "parser.add_argument(\"--timeout\", type=int, default=2, help='Page generation timeout') parser.add_argument(\"--base-html\", type=str, default=\"\", help='The", "'/*.md') for f in files: os.remove(f) if args.base_html == \"\":", "help='Do not output any information') parser.add_argument(\"--timeout\", type=int, default=2, help='Page generation", "= pyinotify.Notifier(watch_manager, recompile) watch_manager.add_watch(os.path.abspath(\".\"), pyinotify.ALL_EVENTS, rec=True) event_notifier.loop() if not args.basic:", "script_path = os.path.dirname(os.path.realpath(__file__)) # Temp dir timestamp = str(int(time())) tmp_dir", "selenium import webdriver from selenium.webdriver.firefox.options import Options from selenium.webdriver.common.desired_capabilities import", "found in the current folder\") exit(1) if args.base_html != \"\":", "type=int, default=2, help='Page generation timeout') parser.add_argument(\"--base-html\", type=str, default=\"\", help='The path", "Markdown parsing subprocess.check_output(script_path + \"/md-parsing \" + tmp_dir, shell=True).decode('utf-8') html_file_name", "import time, sleep from tempfile import gettempdir from distutils.dir_util import", "from selenium import webdriver from selenium.webdriver.firefox.options import Options from selenium.webdriver.common.desired_capabilities", "if not args.basic: from selenium import webdriver from selenium.webdriver.firefox.options import", "parsing subprocess.check_output(script_path + \"/md-parsing \" + tmp_dir, shell=True).decode('utf-8') html_file_name =", "if file.endswith(\".md\"): ok = True break if not ok: stderr.write(\"No", "the current folder for changes and rebuild automatically') parser.add_argument('--quiet', dest='quiet',", "elem.get_attribute(\"outerHTML\") with open(html_file_name, \"w\") as html_out_file: html_out_file.write(interpreted_html) # Create final", "elegant PDF reports') parser.add_argument('--basic', dest='basic', action='store_true', help='Do not enrich HTML", "str(int(time())) tmp_dir = gettempdir() + \"/\" + timestamp + \"_md-report/\"", "stdout.write(\"\\rDone. \") stdout.flush() recompile(None) if not args.watch: if not args.basic:", "Headless browser if not args.basic: from selenium import webdriver from", "parser.add_argument('--watch', dest='watch', action='store_true', help='Watch the current folder for changes and", "\"/src\") copy_tree(\".\", tmp_dir) # Markdown parsing subprocess.check_output(script_path + \"/md-parsing \"", "args.base_html != \"\": if not os.path.isfile(args.base_html): stderr.write(\"The given base HTML", "copy_tree(\".\", tmp_dir) # Markdown parsing subprocess.check_output(script_path + \"/md-parsing \" +", "Command line flags import os import glob import re import", "interpreted_html = elem.get_attribute(\"outerHTML\") with open(html_file_name, \"w\") as html_out_file: html_out_file.write(interpreted_html) #", "+ timestamp + \"_md-report/\" os.makedirs(tmp_dir, exist_ok=True) # Headless browser if", "argparse parser = argparse.ArgumentParser( description='Converts Markdown to elegant PDF reports')", "stderr.write(\"The given base HTML file doesn't exist\") exit(1) script_path =", "break if not ok: stderr.write(\"No markdown file found in the", "\"_md-report/\" os.makedirs(tmp_dir, exist_ok=True) # Headless browser if not args.basic: from", "the current folder\") exit(1) if args.base_html != \"\": if not", "not args.quiet: stdout.write(\"\\rBuilding the PDF file...\") stdout.flush() files = glob.glob(tmp_dir", "gettempdir() + \"/\" + timestamp + \"_md-report/\" os.makedirs(tmp_dir, exist_ok=True) #", "os.symlink(script_path + \"/src\", tmp_dir + \"/src\") copy_tree(\".\", tmp_dir) # Markdown", "to elegant PDF reports') parser.add_argument('--basic', dest='basic', action='store_true', help='Do not enrich", "driver.get(\"file:///\" + html_file_name) sleep(2) elem = driver.find_element_by_xpath(\"//*\") interpreted_html = elem.get_attribute(\"outerHTML\")", "gettempdir from distutils.dir_util import copy_tree from shutil import copyfile from", "stdout.write(\"\\rBuilding the PDF file...\") stdout.flush() files = glob.glob(tmp_dir + '/*.md')", "import pyinotify import subprocess from sys import stdout, stderr from", "PDF reports') parser.add_argument('--basic', dest='basic', action='store_true', help='Do not enrich HTML with", "shell=True).decode('utf-8') html_file_name = tmp_dir + \"output.html\" # Interpret JS code", "selenium.webdriver.common.desired_capabilities import DesiredCapabilities options = Options() options.headless = True options.log.level", "changes and rebuild automatically') parser.add_argument('--quiet', dest='quiet', action='store_true', help='Do not output", "HTML file') parser.set_defaults(watch=False) args = parser.parse_args() # Check directory ok", "to the base HTML file') parser.set_defaults(watch=False) args = parser.parse_args() #", "args.basic: driver.quit() exit(0) watch_manager = pyinotify.WatchManager() event_notifier = pyinotify.Notifier(watch_manager, recompile)", "tempfile import gettempdir from distutils.dir_util import copy_tree from shutil import", "exist_ok=True) # Headless browser if not args.basic: from selenium import", "copyfile from weasyprint import HTML import argparse parser = argparse.ArgumentParser(", "# Command line flags import os import glob import re", "flags import os import glob import re import pyinotify import", "time import time, sleep from tempfile import gettempdir from distutils.dir_util", "JS code if not args.basic: driver.get(\"file:///\" + html_file_name) sleep(2) elem", "tmp_dir, shell=True).decode('utf-8') html_file_name = tmp_dir + \"output.html\" # Interpret JS", "+ \"/base.html\", tmp_dir + \"/base.html\") else: copyfile(args.base_html, tmp_dir + \"/base.html\")", "not args.basic: from selenium import webdriver from selenium.webdriver.firefox.options import Options", "copy_tree from shutil import copyfile from weasyprint import HTML import", "Check directory ok = False for file in os.listdir(\".\"): if", "args.quiet: stdout.write(\"\\rDone. \") stdout.flush() recompile(None) if not args.watch: if not", "options = Options() options.headless = True options.log.level = \"trace\" d", "'ALL'} driver = webdriver.Firefox(options=options, capabilities=d) driver.set_page_load_timeout(args.timeout) prev_compile_time = 0 def", "False for file in os.listdir(\".\"): if file.endswith(\".md\"): ok = True", "PDF file pdf = HTML(html_file_name).write_pdf() f = open(\"output.pdf\", 'wb') f.write(pdf)", "file pdf = HTML(html_file_name).write_pdf() f = open(\"output.pdf\", 'wb') f.write(pdf) if", "\"IN_MODIFY\" or notifier.pathname.endswith(\".pdf\")): return global prev_compile_time if time() - prev_compile_time", "output any information') parser.add_argument(\"--timeout\", type=int, default=2, help='Page generation timeout') parser.add_argument(\"--base-html\",", "return prev_compile_time = time() if not args.quiet: stdout.write(\"\\rBuilding the PDF", "<filename>MarkReport/MarkReport.py #!/usr/bin/env python3 # Command line flags import os import", "if not ok: stderr.write(\"No markdown file found in the current", "ok = True break if not ok: stderr.write(\"No markdown file", "{'browser': 'ALL'} driver = webdriver.Firefox(options=options, capabilities=d) driver.set_page_load_timeout(args.timeout) prev_compile_time = 0", "= driver.find_element_by_xpath(\"//*\") interpreted_html = elem.get_attribute(\"outerHTML\") with open(html_file_name, \"w\") as html_out_file:", "None and (notifier.maskname != \"IN_MODIFY\" or notifier.pathname.endswith(\".pdf\")): return global prev_compile_time", "generation timeout') parser.add_argument(\"--base-html\", type=str, default=\"\", help='The path to the base", "python3 # Command line flags import os import glob import", "True break if not ok: stderr.write(\"No markdown file found in", "html_file_name = tmp_dir + \"output.html\" # Interpret JS code if", "parser = argparse.ArgumentParser( description='Converts Markdown to elegant PDF reports') parser.add_argument('--basic',", "base HTML file') parser.set_defaults(watch=False) args = parser.parse_args() # Check directory", "automatically') parser.add_argument('--quiet', dest='quiet', action='store_true', help='Do not output any information') parser.add_argument(\"--timeout\",", "and (notifier.maskname != \"IN_MODIFY\" or notifier.pathname.endswith(\".pdf\")): return global prev_compile_time if", "timestamp + \"_md-report/\" os.makedirs(tmp_dir, exist_ok=True) # Headless browser if not", "else: copyfile(args.base_html, tmp_dir + \"/base.html\") if not os.path.islink(tmp_dir + \"/src\"):", "if time() - prev_compile_time < 1: return prev_compile_time = time()", "# Check directory ok = False for file in os.listdir(\".\"):", "directory ok = False for file in os.listdir(\".\"): if file.endswith(\".md\"):", "file...\") stdout.flush() files = glob.glob(tmp_dir + '/*.md') for f in", "copyfile(args.base_html, tmp_dir + \"/base.html\") if not os.path.islink(tmp_dir + \"/src\"): os.symlink(script_path", "if notifier is not None and (notifier.maskname != \"IN_MODIFY\" or", "os.path.dirname(os.path.realpath(__file__)) # Temp dir timestamp = str(int(time())) tmp_dir = gettempdir()", "(notifier.maskname != \"IN_MODIFY\" or notifier.pathname.endswith(\".pdf\")): return global prev_compile_time if time()", "Options() options.headless = True options.log.level = \"trace\" d = DesiredCapabilities.FIREFOX", "with LaTeX and syntax highlighting (faster builds)') parser.add_argument('--watch', dest='watch', action='store_true',", "args.watch: if not args.basic: driver.quit() exit(0) watch_manager = pyinotify.WatchManager() event_notifier", "\"/\" + timestamp + \"_md-report/\" os.makedirs(tmp_dir, exist_ok=True) # Headless browser", "args.quiet: stdout.write(\"\\rBuilding the PDF file...\") stdout.flush() files = glob.glob(tmp_dir +", "args.basic: driver.get(\"file:///\" + html_file_name) sleep(2) elem = driver.find_element_by_xpath(\"//*\") interpreted_html =", "= argparse.ArgumentParser( description='Converts Markdown to elegant PDF reports') parser.add_argument('--basic', dest='basic',", "import re import pyinotify import subprocess from sys import stdout,", "with open(html_file_name, \"w\") as html_out_file: html_out_file.write(interpreted_html) # Create final PDF", "parser.set_defaults(watch=False) args = parser.parse_args() # Check directory ok = False", "as html_out_file: html_out_file.write(interpreted_html) # Create final PDF file pdf =", "stderr.write(\"No markdown file found in the current folder\") exit(1) if", "prev_compile_time = 0 def recompile(notifier): if notifier is not None", "pyinotify.Notifier(watch_manager, recompile) watch_manager.add_watch(os.path.abspath(\".\"), pyinotify.ALL_EVENTS, rec=True) event_notifier.loop() if not args.basic: driver.quit()", "action='store_true', help='Watch the current folder for changes and rebuild automatically')", "= \"trace\" d = DesiredCapabilities.FIREFOX d['loggingPrefs'] = {'browser': 'ALL'} driver", "enrich HTML with LaTeX and syntax highlighting (faster builds)') parser.add_argument('--watch',", "file in os.listdir(\".\"): if file.endswith(\".md\"): ok = True break if", "if not args.quiet: stdout.write(\"\\rDone. \") stdout.flush() recompile(None) if not args.watch:", "action='store_true', help='Do not output any information') parser.add_argument(\"--timeout\", type=int, default=2, help='Page", "= HTML(html_file_name).write_pdf() f = open(\"output.pdf\", 'wb') f.write(pdf) if not args.quiet:", "the base HTML file') parser.set_defaults(watch=False) args = parser.parse_args() # Check", "= elem.get_attribute(\"outerHTML\") with open(html_file_name, \"w\") as html_out_file: html_out_file.write(interpreted_html) # Create", "help='Do not enrich HTML with LaTeX and syntax highlighting (faster", "= {'browser': 'ALL'} driver = webdriver.Firefox(options=options, capabilities=d) driver.set_page_load_timeout(args.timeout) prev_compile_time =", "sys import stdout, stderr from time import time, sleep from", "import webdriver from selenium.webdriver.firefox.options import Options from selenium.webdriver.common.desired_capabilities import DesiredCapabilities", "subprocess from sys import stdout, stderr from time import time,", "\"w\") as html_out_file: html_out_file.write(interpreted_html) # Create final PDF file pdf", "import gettempdir from distutils.dir_util import copy_tree from shutil import copyfile", "parser.add_argument('--basic', dest='basic', action='store_true', help='Do not enrich HTML with LaTeX and", "Markdown to elegant PDF reports') parser.add_argument('--basic', dest='basic', action='store_true', help='Do not", "time() if not args.quiet: stdout.write(\"\\rBuilding the PDF file...\") stdout.flush() files", "files = glob.glob(tmp_dir + '/*.md') for f in files: os.remove(f)", "sleep from tempfile import gettempdir from distutils.dir_util import copy_tree from", "not os.path.isfile(args.base_html): stderr.write(\"The given base HTML file doesn't exist\") exit(1)", "return global prev_compile_time if time() - prev_compile_time < 1: return", "Create final PDF file pdf = HTML(html_file_name).write_pdf() f = open(\"output.pdf\",", "from shutil import copyfile from weasyprint import HTML import argparse", "distutils.dir_util import copy_tree from shutil import copyfile from weasyprint import", "doesn't exist\") exit(1) script_path = os.path.dirname(os.path.realpath(__file__)) # Temp dir timestamp", "= str(int(time())) tmp_dir = gettempdir() + \"/\" + timestamp +", "Interpret JS code if not args.basic: driver.get(\"file:///\" + html_file_name) sleep(2)", "stdout, stderr from time import time, sleep from tempfile import", "LaTeX and syntax highlighting (faster builds)') parser.add_argument('--watch', dest='watch', action='store_true', help='Watch", "recompile(None) if not args.watch: if not args.basic: driver.quit() exit(0) watch_manager", "\"/src\"): os.symlink(script_path + \"/src\", tmp_dir + \"/src\") copy_tree(\".\", tmp_dir) #", "# Interpret JS code if not args.basic: driver.get(\"file:///\" + html_file_name)", "+ \"/base.html\") if not os.path.islink(tmp_dir + \"/src\"): os.symlink(script_path + \"/src\",", "is not None and (notifier.maskname != \"IN_MODIFY\" or notifier.pathname.endswith(\".pdf\")): return", "base HTML file doesn't exist\") exit(1) script_path = os.path.dirname(os.path.realpath(__file__)) #", "pyinotify.WatchManager() event_notifier = pyinotify.Notifier(watch_manager, recompile) watch_manager.add_watch(os.path.abspath(\".\"), pyinotify.ALL_EVENTS, rec=True) event_notifier.loop() if", "+ html_file_name) sleep(2) elem = driver.find_element_by_xpath(\"//*\") interpreted_html = elem.get_attribute(\"outerHTML\") with", "html_out_file: html_out_file.write(interpreted_html) # Create final PDF file pdf = HTML(html_file_name).write_pdf()", "d['loggingPrefs'] = {'browser': 'ALL'} driver = webdriver.Firefox(options=options, capabilities=d) driver.set_page_load_timeout(args.timeout) prev_compile_time", "path to the base HTML file') parser.set_defaults(watch=False) args = parser.parse_args()", "parser.add_argument('--quiet', dest='quiet', action='store_true', help='Do not output any information') parser.add_argument(\"--timeout\", type=int,", "if not args.basic: driver.get(\"file:///\" + html_file_name) sleep(2) elem = driver.find_element_by_xpath(\"//*\")", "any information') parser.add_argument(\"--timeout\", type=int, default=2, help='Page generation timeout') parser.add_argument(\"--base-html\", type=str,", "f = open(\"output.pdf\", 'wb') f.write(pdf) if not args.quiet: stdout.write(\"\\rDone. \")", "PDF file...\") stdout.flush() files = glob.glob(tmp_dir + '/*.md') for f", "import subprocess from sys import stdout, stderr from time import", "time, sleep from tempfile import gettempdir from distutils.dir_util import copy_tree", "tmp_dir = gettempdir() + \"/\" + timestamp + \"_md-report/\" os.makedirs(tmp_dir,", "- prev_compile_time < 1: return prev_compile_time = time() if not", "args.basic: from selenium import webdriver from selenium.webdriver.firefox.options import Options from", "= time() if not args.quiet: stdout.write(\"\\rBuilding the PDF file...\") stdout.flush()", "event_notifier = pyinotify.Notifier(watch_manager, recompile) watch_manager.add_watch(os.path.abspath(\".\"), pyinotify.ALL_EVENTS, rec=True) event_notifier.loop() if not", "HTML file doesn't exist\") exit(1) script_path = os.path.dirname(os.path.realpath(__file__)) # Temp", "action='store_true', help='Do not enrich HTML with LaTeX and syntax highlighting", "dest='watch', action='store_true', help='Watch the current folder for changes and rebuild", "ok: stderr.write(\"No markdown file found in the current folder\") exit(1)", "not output any information') parser.add_argument(\"--timeout\", type=int, default=2, help='Page generation timeout')", "os import glob import re import pyinotify import subprocess from", "code if not args.basic: driver.get(\"file:///\" + html_file_name) sleep(2) elem =", "glob.glob(tmp_dir + '/*.md') for f in files: os.remove(f) if args.base_html", "if args.base_html == \"\": copyfile(script_path + \"/base.html\", tmp_dir + \"/base.html\")", "not args.basic: driver.get(\"file:///\" + html_file_name) sleep(2) elem = driver.find_element_by_xpath(\"//*\") interpreted_html", "tmp_dir + \"/src\") copy_tree(\".\", tmp_dir) # Markdown parsing subprocess.check_output(script_path +", "\"/md-parsing \" + tmp_dir, shell=True).decode('utf-8') html_file_name = tmp_dir + \"output.html\"", "+ \"output.html\" # Interpret JS code if not args.basic: driver.get(\"file:///\"", "if args.base_html != \"\": if not os.path.isfile(args.base_html): stderr.write(\"The given base", "prev_compile_time = time() if not args.quiet: stdout.write(\"\\rBuilding the PDF file...\")", "args.base_html == \"\": copyfile(script_path + \"/base.html\", tmp_dir + \"/base.html\") else:", "import HTML import argparse parser = argparse.ArgumentParser( description='Converts Markdown to", "\"output.html\" # Interpret JS code if not args.basic: driver.get(\"file:///\" +", "driver.find_element_by_xpath(\"//*\") interpreted_html = elem.get_attribute(\"outerHTML\") with open(html_file_name, \"w\") as html_out_file: html_out_file.write(interpreted_html)", "f.write(pdf) if not args.quiet: stdout.write(\"\\rDone. \") stdout.flush() recompile(None) if not", "ok = False for file in os.listdir(\".\"): if file.endswith(\".md\"): ok", "shutil import copyfile from weasyprint import HTML import argparse parser", "os.remove(f) if args.base_html == \"\": copyfile(script_path + \"/base.html\", tmp_dir +", "from tempfile import gettempdir from distutils.dir_util import copy_tree from shutil", "\"/base.html\") if not os.path.islink(tmp_dir + \"/src\"): os.symlink(script_path + \"/src\", tmp_dir", "# Temp dir timestamp = str(int(time())) tmp_dir = gettempdir() +", "folder for changes and rebuild automatically') parser.add_argument('--quiet', dest='quiet', action='store_true', help='Do", "description='Converts Markdown to elegant PDF reports') parser.add_argument('--basic', dest='basic', action='store_true', help='Do", "copyfile(script_path + \"/base.html\", tmp_dir + \"/base.html\") else: copyfile(args.base_html, tmp_dir +", "options.log.level = \"trace\" d = DesiredCapabilities.FIREFOX d['loggingPrefs'] = {'browser': 'ALL'}", "< 1: return prev_compile_time = time() if not args.quiet: stdout.write(\"\\rBuilding", "if not args.watch: if not args.basic: driver.quit() exit(0) watch_manager =", "stdout.flush() recompile(None) if not args.watch: if not args.basic: driver.quit() exit(0)", "current folder for changes and rebuild automatically') parser.add_argument('--quiet', dest='quiet', action='store_true',", "not args.quiet: stdout.write(\"\\rDone. \") stdout.flush() recompile(None) if not args.watch: if", "options.headless = True options.log.level = \"trace\" d = DesiredCapabilities.FIREFOX d['loggingPrefs']", "default=\"\", help='The path to the base HTML file') parser.set_defaults(watch=False) args", "file.endswith(\".md\"): ok = True break if not ok: stderr.write(\"No markdown", "import os import glob import re import pyinotify import subprocess", "rebuild automatically') parser.add_argument('--quiet', dest='quiet', action='store_true', help='Do not output any information')", "Options from selenium.webdriver.common.desired_capabilities import DesiredCapabilities options = Options() options.headless =", "+ \"_md-report/\" os.makedirs(tmp_dir, exist_ok=True) # Headless browser if not args.basic:", "argparse.ArgumentParser( description='Converts Markdown to elegant PDF reports') parser.add_argument('--basic', dest='basic', action='store_true',", "driver.quit() exit(0) watch_manager = pyinotify.WatchManager() event_notifier = pyinotify.Notifier(watch_manager, recompile) watch_manager.add_watch(os.path.abspath(\".\"),", "= False for file in os.listdir(\".\"): if file.endswith(\".md\"): ok =", "from time import time, sleep from tempfile import gettempdir from", "parser.parse_args() # Check directory ok = False for file in", "def recompile(notifier): if notifier is not None and (notifier.maskname !=", "driver = webdriver.Firefox(options=options, capabilities=d) driver.set_page_load_timeout(args.timeout) prev_compile_time = 0 def recompile(notifier):", "timestamp = str(int(time())) tmp_dir = gettempdir() + \"/\" + timestamp", "watch_manager = pyinotify.WatchManager() event_notifier = pyinotify.Notifier(watch_manager, recompile) watch_manager.add_watch(os.path.abspath(\".\"), pyinotify.ALL_EVENTS, rec=True)", "not enrich HTML with LaTeX and syntax highlighting (faster builds)')", "current folder\") exit(1) if args.base_html != \"\": if not os.path.isfile(args.base_html):", "notifier is not None and (notifier.maskname != \"IN_MODIFY\" or notifier.pathname.endswith(\".pdf\")):", "not ok: stderr.write(\"No markdown file found in the current folder\")", "= glob.glob(tmp_dir + '/*.md') for f in files: os.remove(f) if", "HTML with LaTeX and syntax highlighting (faster builds)') parser.add_argument('--watch', dest='watch',", "webdriver from selenium.webdriver.firefox.options import Options from selenium.webdriver.common.desired_capabilities import DesiredCapabilities options", "os.path.islink(tmp_dir + \"/src\"): os.symlink(script_path + \"/src\", tmp_dir + \"/src\") copy_tree(\".\",", "+ \"/md-parsing \" + tmp_dir, shell=True).decode('utf-8') html_file_name = tmp_dir +", "help='Watch the current folder for changes and rebuild automatically') parser.add_argument('--quiet',", "exit(1) script_path = os.path.dirname(os.path.realpath(__file__)) # Temp dir timestamp = str(int(time()))", "\"/src\", tmp_dir + \"/src\") copy_tree(\".\", tmp_dir) # Markdown parsing subprocess.check_output(script_path", "subprocess.check_output(script_path + \"/md-parsing \" + tmp_dir, shell=True).decode('utf-8') html_file_name = tmp_dir", "d = DesiredCapabilities.FIREFOX d['loggingPrefs'] = {'browser': 'ALL'} driver = webdriver.Firefox(options=options,", "DesiredCapabilities options = Options() options.headless = True options.log.level = \"trace\"", "import copyfile from weasyprint import HTML import argparse parser =", "and rebuild automatically') parser.add_argument('--quiet', dest='quiet', action='store_true', help='Do not output any", "= parser.parse_args() # Check directory ok = False for file", "= tmp_dir + \"output.html\" # Interpret JS code if not", "global prev_compile_time if time() - prev_compile_time < 1: return prev_compile_time", "for file in os.listdir(\".\"): if file.endswith(\".md\"): ok = True break", "folder\") exit(1) if args.base_html != \"\": if not os.path.isfile(args.base_html): stderr.write(\"The", "# Markdown parsing subprocess.check_output(script_path + \"/md-parsing \" + tmp_dir, shell=True).decode('utf-8')", "open(html_file_name, \"w\") as html_out_file: html_out_file.write(interpreted_html) # Create final PDF file", "help='Page generation timeout') parser.add_argument(\"--base-html\", type=str, default=\"\", help='The path to the", "in the current folder\") exit(1) if args.base_html != \"\": if", "file doesn't exist\") exit(1) script_path = os.path.dirname(os.path.realpath(__file__)) # Temp dir", "if not args.quiet: stdout.write(\"\\rBuilding the PDF file...\") stdout.flush() files =", "files: os.remove(f) if args.base_html == \"\": copyfile(script_path + \"/base.html\", tmp_dir", "True options.log.level = \"trace\" d = DesiredCapabilities.FIREFOX d['loggingPrefs'] = {'browser':", "timeout') parser.add_argument(\"--base-html\", type=str, default=\"\", help='The path to the base HTML", "reports') parser.add_argument('--basic', dest='basic', action='store_true', help='Do not enrich HTML with LaTeX", "(faster builds)') parser.add_argument('--watch', dest='watch', action='store_true', help='Watch the current folder for", "parser.add_argument(\"--base-html\", type=str, default=\"\", help='The path to the base HTML file')", "+ '/*.md') for f in files: os.remove(f) if args.base_html ==", "tmp_dir) # Markdown parsing subprocess.check_output(script_path + \"/md-parsing \" + tmp_dir,", "import glob import re import pyinotify import subprocess from sys", "the PDF file...\") stdout.flush() files = glob.glob(tmp_dir + '/*.md') for", "+ \"/\" + timestamp + \"_md-report/\" os.makedirs(tmp_dir, exist_ok=True) # Headless", "from selenium.webdriver.firefox.options import Options from selenium.webdriver.common.desired_capabilities import DesiredCapabilities options =", "syntax highlighting (faster builds)') parser.add_argument('--watch', dest='watch', action='store_true', help='Watch the current", "\" + tmp_dir, shell=True).decode('utf-8') html_file_name = tmp_dir + \"output.html\" #", "from selenium.webdriver.common.desired_capabilities import DesiredCapabilities options = Options() options.headless = True", "= pyinotify.WatchManager() event_notifier = pyinotify.Notifier(watch_manager, recompile) watch_manager.add_watch(os.path.abspath(\".\"), pyinotify.ALL_EVENTS, rec=True) event_notifier.loop()", "prev_compile_time < 1: return prev_compile_time = time() if not args.quiet:", "dest='basic', action='store_true', help='Do not enrich HTML with LaTeX and syntax", "if not os.path.isfile(args.base_html): stderr.write(\"The given base HTML file doesn't exist\")", "markdown file found in the current folder\") exit(1) if args.base_html", "for changes and rebuild automatically') parser.add_argument('--quiet', dest='quiet', action='store_true', help='Do not", "highlighting (faster builds)') parser.add_argument('--watch', dest='watch', action='store_true', help='Watch the current folder", "# Create final PDF file pdf = HTML(html_file_name).write_pdf() f =", "from sys import stdout, stderr from time import time, sleep", "webdriver.Firefox(options=options, capabilities=d) driver.set_page_load_timeout(args.timeout) prev_compile_time = 0 def recompile(notifier): if notifier", "from weasyprint import HTML import argparse parser = argparse.ArgumentParser( description='Converts", "in files: os.remove(f) if args.base_html == \"\": copyfile(script_path + \"/base.html\",", "HTML(html_file_name).write_pdf() f = open(\"output.pdf\", 'wb') f.write(pdf) if not args.quiet: stdout.write(\"\\rDone.", "\"trace\" d = DesiredCapabilities.FIREFOX d['loggingPrefs'] = {'browser': 'ALL'} driver =", "re import pyinotify import subprocess from sys import stdout, stderr", "= webdriver.Firefox(options=options, capabilities=d) driver.set_page_load_timeout(args.timeout) prev_compile_time = 0 def recompile(notifier): if", "DesiredCapabilities.FIREFOX d['loggingPrefs'] = {'browser': 'ALL'} driver = webdriver.Firefox(options=options, capabilities=d) driver.set_page_load_timeout(args.timeout)", "'wb') f.write(pdf) if not args.quiet: stdout.write(\"\\rDone. \") stdout.flush() recompile(None) if", "capabilities=d) driver.set_page_load_timeout(args.timeout) prev_compile_time = 0 def recompile(notifier): if notifier is", "os.makedirs(tmp_dir, exist_ok=True) # Headless browser if not args.basic: from selenium", "or notifier.pathname.endswith(\".pdf\")): return global prev_compile_time if time() - prev_compile_time <", "sleep(2) elem = driver.find_element_by_xpath(\"//*\") interpreted_html = elem.get_attribute(\"outerHTML\") with open(html_file_name, \"w\")", "import stdout, stderr from time import time, sleep from tempfile", "# Headless browser if not args.basic: from selenium import webdriver", "browser if not args.basic: from selenium import webdriver from selenium.webdriver.firefox.options", "+ \"/base.html\") else: copyfile(args.base_html, tmp_dir + \"/base.html\") if not os.path.islink(tmp_dir", "elem = driver.find_element_by_xpath(\"//*\") interpreted_html = elem.get_attribute(\"outerHTML\") with open(html_file_name, \"w\") as", "not args.basic: driver.quit() exit(0) watch_manager = pyinotify.WatchManager() event_notifier = pyinotify.Notifier(watch_manager,", "exit(1) if args.base_html != \"\": if not os.path.isfile(args.base_html): stderr.write(\"The given", "prev_compile_time if time() - prev_compile_time < 1: return prev_compile_time =", "+ \"/src\") copy_tree(\".\", tmp_dir) # Markdown parsing subprocess.check_output(script_path + \"/md-parsing", "for f in files: os.remove(f) if args.base_html == \"\": copyfile(script_path", "#!/usr/bin/env python3 # Command line flags import os import glob", "file found in the current folder\") exit(1) if args.base_html !=", "HTML import argparse parser = argparse.ArgumentParser( description='Converts Markdown to elegant", "driver.set_page_load_timeout(args.timeout) prev_compile_time = 0 def recompile(notifier): if notifier is not", "f in files: os.remove(f) if args.base_html == \"\": copyfile(script_path +", "import copy_tree from shutil import copyfile from weasyprint import HTML", "\"/base.html\", tmp_dir + \"/base.html\") else: copyfile(args.base_html, tmp_dir + \"/base.html\") if", "dest='quiet', action='store_true', help='Do not output any information') parser.add_argument(\"--timeout\", type=int, default=2,", "html_file_name) sleep(2) elem = driver.find_element_by_xpath(\"//*\") interpreted_html = elem.get_attribute(\"outerHTML\") with open(html_file_name,", "tmp_dir + \"/base.html\") else: copyfile(args.base_html, tmp_dir + \"/base.html\") if not", "and syntax highlighting (faster builds)') parser.add_argument('--watch', dest='watch', action='store_true', help='Watch the", "builds)') parser.add_argument('--watch', dest='watch', action='store_true', help='Watch the current folder for changes", "exit(0) watch_manager = pyinotify.WatchManager() event_notifier = pyinotify.Notifier(watch_manager, recompile) watch_manager.add_watch(os.path.abspath(\".\"), pyinotify.ALL_EVENTS,", "glob import re import pyinotify import subprocess from sys import", "help='The path to the base HTML file') parser.set_defaults(watch=False) args =" ]
[ "visited, is_possible) visited[cur] = False if __name__ == '__main__': input", "dfs(node, 0, visited, is_possible) if is_possible[0]: return 1 return 0", "if visited[cur]: return if depth == target_depth: is_possible[0] = True", "[False] for node in range(N): visited = [False for _", "target_depth = 4 N, M = map(int, input().split()) graph =", "def dfs_all(): is_possible = [False] for node in range(N): visited", "def dfs(cur, depth, visited, is_possible): if visited[cur]: return if depth", "+ 1, visited, is_possible) visited[cur] = False if __name__ ==", "1, visited, is_possible) visited[cur] = False if __name__ == '__main__':", "input().split()) graph = [list() for _ in range(N)] for _", "= True for nxt in graph[cur]: dfs(nxt, depth + 1,", "'__main__': input = __import__('sys').stdin.readline target_depth = 4 N, M =", "M = map(int, input().split()) graph = [list() for _ in", "4 N, M = map(int, input().split()) graph = [list() for", "map(int, input().split()) graph = [list() for _ in range(N)] for", "return 0 def dfs(cur, depth, visited, is_possible): if visited[cur]: return", "graph = [list() for _ in range(N)] for _ in", "dfs_all(): is_possible = [False] for node in range(N): visited =", "_ in range(M): a, b = map(int, input().split()) graph[a].append(b) graph[b].append(a)", "[False for _ in range(N)] dfs(node, 0, visited, is_possible) if", "return if depth == target_depth: is_possible[0] = True return visited[cur]", "https://www.acmicpc.net/problem/13023 import sys sys.setrecursionlimit(999999999) def dfs_all(): is_possible = [False] for", "range(N): visited = [False for _ in range(N)] dfs(node, 0,", "return 1 return 0 def dfs(cur, depth, visited, is_possible): if", "True return visited[cur] = True for nxt in graph[cur]: dfs(nxt,", "0, visited, is_possible) if is_possible[0]: return 1 return 0 def", "__name__ == '__main__': input = __import__('sys').stdin.readline target_depth = 4 N,", "# https://www.acmicpc.net/problem/13023 import sys sys.setrecursionlimit(999999999) def dfs_all(): is_possible = [False]", "is_possible) visited[cur] = False if __name__ == '__main__': input =", "1 return 0 def dfs(cur, depth, visited, is_possible): if visited[cur]:", "visited[cur]: return if depth == target_depth: is_possible[0] = True return", "dfs(nxt, depth + 1, visited, is_possible) visited[cur] = False if", "in range(N): visited = [False for _ in range(N)] dfs(node,", "in graph[cur]: dfs(nxt, depth + 1, visited, is_possible) visited[cur] =", "is_possible): if visited[cur]: return if depth == target_depth: is_possible[0] =", "visited, is_possible) if is_possible[0]: return 1 return 0 def dfs(cur,", "depth == target_depth: is_possible[0] = True return visited[cur] = True", "__import__('sys').stdin.readline target_depth = 4 N, M = map(int, input().split()) graph", "visited[cur] = False if __name__ == '__main__': input = __import__('sys').stdin.readline", "visited, is_possible): if visited[cur]: return if depth == target_depth: is_possible[0]", "node in range(N): visited = [False for _ in range(N)]", "visited[cur] = True for nxt in graph[cur]: dfs(nxt, depth +", "if __name__ == '__main__': input = __import__('sys').stdin.readline target_depth = 4", "is_possible) if is_possible[0]: return 1 return 0 def dfs(cur, depth,", "is_possible = [False] for node in range(N): visited = [False", "= [False for _ in range(N)] dfs(node, 0, visited, is_possible)", "is_possible[0]: return 1 return 0 def dfs(cur, depth, visited, is_possible):", "False if __name__ == '__main__': input = __import__('sys').stdin.readline target_depth =", "= [False] for node in range(N): visited = [False for", "True for nxt in graph[cur]: dfs(nxt, depth + 1, visited,", "for nxt in graph[cur]: dfs(nxt, depth + 1, visited, is_possible)", "range(N)] dfs(node, 0, visited, is_possible) if is_possible[0]: return 1 return", "== '__main__': input = __import__('sys').stdin.readline target_depth = 4 N, M", "= True return visited[cur] = True for nxt in graph[cur]:", "_ in range(N)] for _ in range(M): a, b =", "import sys sys.setrecursionlimit(999999999) def dfs_all(): is_possible = [False] for node", "input = __import__('sys').stdin.readline target_depth = 4 N, M = map(int,", "[list() for _ in range(N)] for _ in range(M): a,", "if is_possible[0]: return 1 return 0 def dfs(cur, depth, visited,", "target_depth: is_possible[0] = True return visited[cur] = True for nxt", "== target_depth: is_possible[0] = True return visited[cur] = True for", "= False if __name__ == '__main__': input = __import__('sys').stdin.readline target_depth", "in range(M): a, b = map(int, input().split()) graph[a].append(b) graph[b].append(a) print(dfs_all())", "sys.setrecursionlimit(999999999) def dfs_all(): is_possible = [False] for node in range(N):", "N, M = map(int, input().split()) graph = [list() for _", "nxt in graph[cur]: dfs(nxt, depth + 1, visited, is_possible) visited[cur]", "if depth == target_depth: is_possible[0] = True return visited[cur] =", "graph[cur]: dfs(nxt, depth + 1, visited, is_possible) visited[cur] = False", "in range(N)] dfs(node, 0, visited, is_possible) if is_possible[0]: return 1", "visited = [False for _ in range(N)] dfs(node, 0, visited,", "range(N)] for _ in range(M): a, b = map(int, input().split())", "= 4 N, M = map(int, input().split()) graph = [list()", "0 def dfs(cur, depth, visited, is_possible): if visited[cur]: return if", "for _ in range(N)] for _ in range(M): a, b", "for node in range(N): visited = [False for _ in", "= __import__('sys').stdin.readline target_depth = 4 N, M = map(int, input().split())", "_ in range(N)] dfs(node, 0, visited, is_possible) if is_possible[0]: return", "dfs(cur, depth, visited, is_possible): if visited[cur]: return if depth ==", "is_possible[0] = True return visited[cur] = True for nxt in", "for _ in range(M): a, b = map(int, input().split()) graph[a].append(b)", "depth, visited, is_possible): if visited[cur]: return if depth == target_depth:", "= [list() for _ in range(N)] for _ in range(M):", "in range(N)] for _ in range(M): a, b = map(int,", "sys sys.setrecursionlimit(999999999) def dfs_all(): is_possible = [False] for node in", "for _ in range(N)] dfs(node, 0, visited, is_possible) if is_possible[0]:", "return visited[cur] = True for nxt in graph[cur]: dfs(nxt, depth", "depth + 1, visited, is_possible) visited[cur] = False if __name__", "= map(int, input().split()) graph = [list() for _ in range(N)]" ]
[ "to share' 'private data on BigchainDB.'), url='https://github.com/vrde/bst/', author='<NAME>', author_email='<EMAIL>', license='AGPLv3',", "3.5', 'Operating System :: MacOS :: MacOS X', 'Operating System", "Developers', 'Topic :: Database', 'Topic :: Database :: Database Engines/Servers',", "Linux', ], packages=find_packages(), entry_points={ 'console_scripts': [ 'bst=bst.cli:main' ], }, install_requires=install_requires", "Development', 'Natural Language :: English', 'License :: OSI Approved ::", "'Programming Language :: Python :: 3 :: Only', 'Programming Language", ":: Database', 'Topic :: Database :: Database Engines/Servers', 'Topic ::", "MacOS X', 'Operating System :: POSIX :: Linux', ], packages=find_packages(),", "X', 'Operating System :: POSIX :: Linux', ], packages=find_packages(), entry_points={", "'Operating System :: MacOS :: MacOS X', 'Operating System ::", "English', 'License :: OSI Approved :: GNU Affero General Public", "Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Operating", "Sharing Tools\"\"\" from setuptools import setup, find_packages install_requires = [", "Language :: English', 'License :: OSI Approved :: GNU Affero", "find_packages install_requires = [ 'base58~=0.2.2', 'PyNaCl~=1.1.0', 'bigchaindb-driver', 'click==6.7', 'colorama', ]", "Language :: Python :: 3 :: Only', 'Programming Language ::", "'base58~=0.2.2', 'PyNaCl~=1.1.0', 'bigchaindb-driver', 'click==6.7', 'colorama', ] setup( name='bst', version='0.1.0', description='bst:", ":: Linux', ], packages=find_packages(), entry_points={ 'console_scripts': [ 'bst=bst.cli:main' ], },", ":: Python :: 3.4', 'Programming Language :: Python :: 3.5',", "3.4', 'Programming Language :: Python :: 3.5', 'Operating System ::", "<reponame>bigchaindb/privacy-protocols \"\"\"bst: BigchainDB Sharing Tools\"\"\" from setuptools import setup, find_packages", "[ 'base58~=0.2.2', 'PyNaCl~=1.1.0', 'bigchaindb-driver', 'click==6.7', 'colorama', ] setup( name='bst', version='0.1.0',", "different patterns to share' 'private data on BigchainDB.'), url='https://github.com/vrde/bst/', author='<NAME>',", "Database', 'Topic :: Database :: Database Engines/Servers', 'Topic :: Software", "], packages=find_packages(), entry_points={ 'console_scripts': [ 'bst=bst.cli:main' ], }, install_requires=install_requires )", "author='<NAME>', author_email='<EMAIL>', license='AGPLv3', zip_safe=False, classifiers=[ 'Development Status :: 3 -", "setuptools import setup, find_packages install_requires = [ 'base58~=0.2.2', 'PyNaCl~=1.1.0', 'bigchaindb-driver',", "License v3', 'Programming Language :: Python :: 3 :: Only',", "install_requires = [ 'base58~=0.2.2', 'PyNaCl~=1.1.0', 'bigchaindb-driver', 'click==6.7', 'colorama', ] setup(", ":: MacOS X', 'Operating System :: POSIX :: Linux', ],", "\"\"\"bst: BigchainDB Sharing Tools\"\"\" from setuptools import setup, find_packages install_requires", "3 :: Only', 'Programming Language :: Python :: 3.4', 'Programming", "System :: MacOS :: MacOS X', 'Operating System :: POSIX", "Alpha', 'Intended Audience :: Developers', 'Topic :: Database', 'Topic ::", "'Programming Language :: Python :: 3.5', 'Operating System :: MacOS", "Engines/Servers', 'Topic :: Software Development', 'Natural Language :: English', 'License", "data on BigchainDB.'), url='https://github.com/vrde/bst/', author='<NAME>', author_email='<EMAIL>', license='AGPLv3', zip_safe=False, classifiers=[ 'Development", "of scripts with different patterns to share' 'private data on", ":: 3.5', 'Operating System :: MacOS :: MacOS X', 'Operating", "Sharing Tools', long_description=( 'A collection of scripts with different patterns", "author_email='<EMAIL>', license='AGPLv3', zip_safe=False, classifiers=[ 'Development Status :: 3 - Alpha',", "import setup, find_packages install_requires = [ 'base58~=0.2.2', 'PyNaCl~=1.1.0', 'bigchaindb-driver', 'click==6.7',", "'Operating System :: POSIX :: Linux', ], packages=find_packages(), entry_points={ 'console_scripts':", "'A collection of scripts with different patterns to share' 'private", "Python :: 3.5', 'Operating System :: MacOS :: MacOS X',", "'bigchaindb-driver', 'click==6.7', 'colorama', ] setup( name='bst', version='0.1.0', description='bst: BigchainDB Sharing", "'Topic :: Database', 'Topic :: Database :: Database Engines/Servers', 'Topic", "'Topic :: Database :: Database Engines/Servers', 'Topic :: Software Development',", "share' 'private data on BigchainDB.'), url='https://github.com/vrde/bst/', author='<NAME>', author_email='<EMAIL>', license='AGPLv3', zip_safe=False,", "'colorama', ] setup( name='bst', version='0.1.0', description='bst: BigchainDB Sharing Tools', long_description=(", ":: MacOS :: MacOS X', 'Operating System :: POSIX ::", "OSI Approved :: GNU Affero General Public License v3', 'Programming", "Database Engines/Servers', 'Topic :: Software Development', 'Natural Language :: English',", "version='0.1.0', description='bst: BigchainDB Sharing Tools', long_description=( 'A collection of scripts", ":: Database :: Database Engines/Servers', 'Topic :: Software Development', 'Natural", ":: English', 'License :: OSI Approved :: GNU Affero General", "BigchainDB Sharing Tools\"\"\" from setuptools import setup, find_packages install_requires =", "Tools\"\"\" from setuptools import setup, find_packages install_requires = [ 'base58~=0.2.2',", "BigchainDB Sharing Tools', long_description=( 'A collection of scripts with different", ":: Software Development', 'Natural Language :: English', 'License :: OSI", "from setuptools import setup, find_packages install_requires = [ 'base58~=0.2.2', 'PyNaCl~=1.1.0',", "Language :: Python :: 3.5', 'Operating System :: MacOS ::", ":: Python :: 3 :: Only', 'Programming Language :: Python", ":: 3.4', 'Programming Language :: Python :: 3.5', 'Operating System", "on BigchainDB.'), url='https://github.com/vrde/bst/', author='<NAME>', author_email='<EMAIL>', license='AGPLv3', zip_safe=False, classifiers=[ 'Development Status", "v3', 'Programming Language :: Python :: 3 :: Only', 'Programming", ":: Database Engines/Servers', 'Topic :: Software Development', 'Natural Language ::", "'Programming Language :: Python :: 3.4', 'Programming Language :: Python", "MacOS :: MacOS X', 'Operating System :: POSIX :: Linux',", "'PyNaCl~=1.1.0', 'bigchaindb-driver', 'click==6.7', 'colorama', ] setup( name='bst', version='0.1.0', description='bst: BigchainDB", "Tools', long_description=( 'A collection of scripts with different patterns to", "Approved :: GNU Affero General Public License v3', 'Programming Language", "'Intended Audience :: Developers', 'Topic :: Database', 'Topic :: Database", "Public License v3', 'Programming Language :: Python :: 3 ::", "- Alpha', 'Intended Audience :: Developers', 'Topic :: Database', 'Topic", ":: Only', 'Programming Language :: Python :: 3.4', 'Programming Language", "'License :: OSI Approved :: GNU Affero General Public License", "Only', 'Programming Language :: Python :: 3.4', 'Programming Language ::", "license='AGPLv3', zip_safe=False, classifiers=[ 'Development Status :: 3 - Alpha', 'Intended", "url='https://github.com/vrde/bst/', author='<NAME>', author_email='<EMAIL>', license='AGPLv3', zip_safe=False, classifiers=[ 'Development Status :: 3", "] setup( name='bst', version='0.1.0', description='bst: BigchainDB Sharing Tools', long_description=( 'A", "'click==6.7', 'colorama', ] setup( name='bst', version='0.1.0', description='bst: BigchainDB Sharing Tools',", ":: POSIX :: Linux', ], packages=find_packages(), entry_points={ 'console_scripts': [ 'bst=bst.cli:main'", "zip_safe=False, classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience", "'Topic :: Software Development', 'Natural Language :: English', 'License ::", "scripts with different patterns to share' 'private data on BigchainDB.'),", "= [ 'base58~=0.2.2', 'PyNaCl~=1.1.0', 'bigchaindb-driver', 'click==6.7', 'colorama', ] setup( name='bst',", ":: OSI Approved :: GNU Affero General Public License v3',", "collection of scripts with different patterns to share' 'private data", "Python :: 3 :: Only', 'Programming Language :: Python ::", "'Natural Language :: English', 'License :: OSI Approved :: GNU", "POSIX :: Linux', ], packages=find_packages(), entry_points={ 'console_scripts': [ 'bst=bst.cli:main' ],", "3 - Alpha', 'Intended Audience :: Developers', 'Topic :: Database',", "Software Development', 'Natural Language :: English', 'License :: OSI Approved", ":: GNU Affero General Public License v3', 'Programming Language ::", "long_description=( 'A collection of scripts with different patterns to share'", "Audience :: Developers', 'Topic :: Database', 'Topic :: Database ::", ":: Developers', 'Topic :: Database', 'Topic :: Database :: Database", "Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Topic", "Language :: Python :: 3.4', 'Programming Language :: Python ::", "GNU Affero General Public License v3', 'Programming Language :: Python", "description='bst: BigchainDB Sharing Tools', long_description=( 'A collection of scripts with", "Database :: Database Engines/Servers', 'Topic :: Software Development', 'Natural Language", ":: 3 - Alpha', 'Intended Audience :: Developers', 'Topic ::", ":: 3 :: Only', 'Programming Language :: Python :: 3.4',", "General Public License v3', 'Programming Language :: Python :: 3", "BigchainDB.'), url='https://github.com/vrde/bst/', author='<NAME>', author_email='<EMAIL>', license='AGPLv3', zip_safe=False, classifiers=[ 'Development Status ::", "'private data on BigchainDB.'), url='https://github.com/vrde/bst/', author='<NAME>', author_email='<EMAIL>', license='AGPLv3', zip_safe=False, classifiers=[", "'Development Status :: 3 - Alpha', 'Intended Audience :: Developers',", "System :: POSIX :: Linux', ], packages=find_packages(), entry_points={ 'console_scripts': [", "name='bst', version='0.1.0', description='bst: BigchainDB Sharing Tools', long_description=( 'A collection of", "with different patterns to share' 'private data on BigchainDB.'), url='https://github.com/vrde/bst/',", "setup( name='bst', version='0.1.0', description='bst: BigchainDB Sharing Tools', long_description=( 'A collection", ":: Python :: 3.5', 'Operating System :: MacOS :: MacOS", "Affero General Public License v3', 'Programming Language :: Python ::", "classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience ::", "setup, find_packages install_requires = [ 'base58~=0.2.2', 'PyNaCl~=1.1.0', 'bigchaindb-driver', 'click==6.7', 'colorama',", "patterns to share' 'private data on BigchainDB.'), url='https://github.com/vrde/bst/', author='<NAME>', author_email='<EMAIL>'," ]
[ "<gh_stars>0 from django.contrib import admin from db.models.job_resources import JobResources admin.site.register(JobResources)" ]
[ "list_output_dict = [] # output directory path outdir = \"../results/run1/\"", "weight_column_idx] new_X_train = np.delete(X_train, weight_column_idx, axis=1) new_questions = column_names new_questions.remove('weight')", "= { 'criterion': ['gini', 'entropy'], 'max_depth': [2, 3, 4, 5,", "o_models_file.write(str(num)+\",\") o_models_file.write(str(thres)+\",\") for ii in range(len(model_obj['best_features'])): o_models_file.write(model_obj['best_features'][ii]+\" \") o_models_file.write(\",\") o_models_file.write(model_obj['best_params']['criterion']+\",\")", "train, test (refer to optimal_params.py) poll_data = data.PollDataProxy(remove_nan=False, convert_to_float=False) acc", "run_num in range(repeat): all_data, all_data_questions = poll_data.all_data_except(get_bad_questions()) X = all_data[:,", "array. \"\"\" weight_column_idx = column_names.index('weight') weights = X_train[:, weight_column_idx] new_X_train", "in list_Kfold: '''Here create a class onject of \"model_sel\" and", "decide this list_ftsel_method = ['chi2','mutlinfo','pca','dt'] list_num_features = [10,15,20] # decide", "def get_bad_questions(): f = open(\"../extern/manage_data/list_unnecessary_columns.txt\", 'r') bad_questions = f.readline().split(',') bad_questions[-1]", "f = open(\"../extern/manage_data/list_unnecessary_columns.txt\", 'r') bad_questions = f.readline().split(',') bad_questions[-1] = bad_questions[-1][:-1]", "in list_num_features: data_sel_dict, sel_questions = ftsel_obj.select_num_features(data_ranked_dict, num, ranked_questions) ftsel_obj.plot_heatmap(data_sel_dict['X_train'], sel_questions)", "weight for training return bad_questions def separate_weights(X_train, column_names): \"\"\" Removes", "ftsel_obj.ftsel_pca(data_dict) fts = data_sel_dict['X_train'].shape[1] questions_int = list(map(str, list(range(1,fts+1,1)))) ranked_questions =", "scripts are transferred here. (get_bad_questions() and separate_weights().)''' for ts in", "weights into train, test (refer to optimal_params.py) poll_data = data.PollDataProxy(remove_nan=False,", "create a .csv file to list all the models and", "# decide this list_ftsel_method = ['chi2','mutlinfo','pca','dt'] list_num_features = [10,15,20] #", "Functions from this python scripts are transferred here. (get_bad_questions() and", "'X_test': X_test, 'y_train': y_train, 'y_test': y_test } weights_dict = {", "for K in list_Kfold: '''Here create a class onject of", "12, 15], } repeat = 1 #output dictrionary list list_output_dict", "= feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_mutlinfo\" ) data_ranked_dict, ranked_questions =", "outdir).select_model() # intermediate = model_obj.select_model() acc.append(model_obj['test_acc']) o_models_file.write(str(ts)+\",\") o_models_file.write(str(run_num)+\",\") o_models_file.write(meth+\",\") o_models_file.write(str(K)+\",\")", "X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=ts, shuffle=True) X_train,", "(get_bad_questions() and separate_weights().)''' for ts in list_test_size: for run_num in", "questions_int = list(map(str, list(range(1,fts+1,1)))) ranked_questions = [\"ft_\"+x for x in", "elif(meth=='dt'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_dt\" ) data_ranked_dict,", "param_space, K, num, thres, data_sel_dict ,weights_dict, sel_questions, outdir).select_model() # intermediate", "shuffle=True) X_train, weights_train, questions = separate_weights(X_train, all_data_questions[:-1]) X_test, weights_test, _", "{ 'weights_train': weights_train, 'weights_test': weights_test} for meth in list_ftsel_method: '''Create", "to optimal_params.py. Functions from this python scripts are transferred here.", "training return bad_questions def separate_weights(X_train, column_names): \"\"\" Removes the column", "unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_pca\" ) data_ranked_dict,_ = ftsel_obj.ftsel_pca(data_dict) fts = data_sel_dict['X_train'].shape[1]", "K in list_Kfold: '''Here create a class onject of \"model_sel\"", "o_models_file.write(str(model_obj['best_params']['min_samples_leaf'])+\",\") o_models_file.write(str(model_obj['best_params']['min_samples_split'])+\",\") o_models_file.write(str(model_obj['train_acc'])+\",\") o_models_file.write(str(model_obj['test_acc'])+\",\") o_models_file.write(\"\\n\") list_output_dict.append(model_obj) '''Once all the models", "data_sel_dict, sel_questions = ftsel_obj.select_num_features(data_ranked_dict, num, ranked_questions) ftsel_obj.plot_heatmap(data_sel_dict['X_train'], sel_questions) for K", "and returns it as a separate array. \"\"\" weight_column_idx =", "features,correlation threshold,best features,criterion,max_depth,max_leaf_nodes,min_samples_leaf,min_samples_split,training accuracy,test accuracy\\n\") #splitting data and weights into", "best test accuracy and return the output dict for that", "[2, 5, 10], 'min_samples_leaf': [2, 5, 10], 'max_leaf_nodes': [2, 4,", "np.argmax(acc) best_model_dict = list_output_dict[best_index] print(\"The best model parameters:\") print(best_model_dict) def", "import subprocess from sklearn import model_selection, tree import data import", "bad_questions[-1] = bad_questions[-1][:-1] # chop the \\n off the end", "#splitting data and weights into train, test (refer to optimal_params.py)", "= list_output_dict[best_index] print(\"The best model parameters:\") print(best_model_dict) def get_bad_questions(): f", "ftsel_obj.ftsel_mutlinfo(data_dict, thres) elif(meth=='pca'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_pca\"", "of Testing Samples:\", len(X_test)) data_dict = { 'X_train': X_train, 'X_test':", "and weights into train, test (refer to optimal_params.py) poll_data =", "import os import matplotlib.pyplot as plt import seaborn as sns", "= all_data[:, :-1] y = all_data[:, -1] X_train, X_test, y_train,", "accuracy and return the output dict for that model.''' o_models_file.close()", "test accuracy and return the output dict for that model.'''", "import data import feature_selection import model_sel import os import matplotlib.pyplot", "= separate_weights(X_test, all_data_questions[:-1]) print(\"Number of Training Samples:\", len(X_train)) print(\"Number of", "unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_chi2\" ) data_ranked_dict, ranked_questions = ftsel_obj.ftsel_chi2(data_dict, thres) elif(meth=='mutlinfo'):", "output all the best parameters and values into \"list_output_dict\". Then,", "import feature_selection import model_sel import os import matplotlib.pyplot as plt", "#parameter space list_test_size = [0.1,0.15,0.2] # decide this list_ftsel_method =", "K, num, thres, data_sel_dict ,weights_dict, sel_questions, outdir).select_model() # intermediate =", "parameters:\") print(best_model_dict) def get_bad_questions(): f = open(\"../extern/manage_data/list_unnecessary_columns.txt\", 'r') bad_questions =", "elif(meth=='pca'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_pca\" ) data_ranked_dict,_", "3, 4, 5, 7], 'min_samples_split': [2, 5, 10], 'min_samples_leaf': [2,", "= {}, [] ftsel_obj =None if(meth=='chi2'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\",", "o_models_file.write(str(model_obj['best_params']['max_depth'])+\",\") o_models_file.write(str(model_obj['best_params']['max_leaf_nodes'])+\",\") o_models_file.write(str(model_obj['best_params']['min_samples_leaf'])+\",\") o_models_file.write(str(model_obj['best_params']['min_samples_split'])+\",\") o_models_file.write(str(model_obj['train_acc'])+\",\") o_models_file.write(str(model_obj['test_acc'])+\",\") o_models_file.write(\"\\n\") list_output_dict.append(model_obj) '''Once all", "this python scripts are transferred here. (get_bad_questions() and separate_weights().)''' for", "are transferred here. (get_bad_questions() and separate_weights().)''' for ts in list_test_size:", "this list_ftsel_method = ['chi2','mutlinfo','pca','dt'] list_num_features = [10,15,20] # decide this", "of \"model_sel\" and output all the best parameters and values", "dict for that model.''' o_models_file.close() best_index = np.argmax(acc) best_model_dict =", "1 #output dictrionary list list_output_dict = [] # output directory", "separate_weights(X_train, column_names): \"\"\" Removes the column containing weights from X_train,", "run, select the model with best test accuracy and return", "this param_space = { 'criterion': ['gini', 'entropy'], 'max_depth': [2, 3,", "# output directory path outdir = \"../results/run1/\" if(not os.path.isdir(outdir)): os.mkdir(outdir)", "for training return bad_questions def separate_weights(X_train, column_names): \"\"\" Removes the", "bad_questions.remove('weight') # need weight for training return bad_questions def separate_weights(X_train,", "= data_sel_dict['X_train'].shape[1] questions_int = list(map(str, list(range(1,fts+1,1)))) ranked_questions = [\"ft_\"+x for", "for x in questions_int] elif(meth=='dt'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\",", "= [\"ft_\"+x for x in questions_int] elif(meth=='dt'): ftsel_obj = feature_selection.FeatureSelection(", "\"\"\" Removes the column containing weights from X_train, and returns", "create a class onject of \"model_sel\" and output all the", "as sns def main(): #parameter space list_test_size = [0.1,0.15,0.2] #", "method''' for thres in list_corr_threshold: data_ranked_dict, ranked_questions = {}, []", "matplotlib.pyplot as plt import seaborn as sns def main(): #parameter", "4, 5, 7], 'min_samples_split': [2, 5, 10], 'min_samples_leaf': [2, 5,", "X_test, weights_test, _ = separate_weights(X_test, all_data_questions[:-1]) print(\"Number of Training Samples:\",", "necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_dt\" ) data_ranked_dict, ranked_questions = ftsel_obj.ftsel_decision_tree_method(data_dict, thres)", "f.readline().split(',') bad_questions[-1] = bad_questions[-1][:-1] # chop the \\n off the", "in range(len(model_obj['best_features'])): o_models_file.write(model_obj['best_features'][ii]+\" \") o_models_file.write(\",\") o_models_file.write(model_obj['best_params']['criterion']+\",\") o_models_file.write(str(model_obj['best_params']['max_depth'])+\",\") o_models_file.write(str(model_obj['best_params']['max_leaf_nodes'])+\",\") o_models_file.write(str(model_obj['best_params']['min_samples_leaf'])+\",\") o_models_file.write(str(model_obj['best_params']['min_samples_split'])+\",\")", "list_corr_threshold = [1,0.5,0.6,0.7] # decide this param_space = { 'criterion':", "'entropy'], 'max_depth': [2, 3, 4, 5, 7], 'min_samples_split': [2, 5,", "as plt import seaborn as sns def main(): #parameter space", "ftsel_obj.ftsel_decision_tree_method(data_dict, thres) for num in list_num_features: data_sel_dict, sel_questions = ftsel_obj.select_num_features(data_ranked_dict,", "o_models_file.close() best_index = np.argmax(acc) best_model_dict = list_output_dict[best_index] print(\"The best model", "7], 'min_samples_split': [2, 5, 10], 'min_samples_leaf': [2, 5, 10], 'max_leaf_nodes':", "list(map(str, list(range(1,fts+1,1)))) ranked_questions = [\"ft_\"+x for x in questions_int] elif(meth=='dt'):", "'max_depth': [2, 3, 4, 5, 7], 'min_samples_split': [2, 5, 10],", "poll_data.all_data_except(get_bad_questions()) X = all_data[:, :-1] y = all_data[:, -1] X_train,", "output directory path outdir = \"../results/run1/\" if(not os.path.isdir(outdir)): os.mkdir(outdir) o_models_file", "it as a separate array. \"\"\" weight_column_idx = column_names.index('weight') weights", "open(outdir+\"models.csv\",\"w\") o_models_file.write(\"test size,run num,ftsel method,Kfold,number of features,correlation threshold,best features,criterion,max_depth,max_leaf_nodes,min_samples_leaf,min_samples_split,training accuracy,test", "= feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_chi2\" ) data_ranked_dict, ranked_questions =", "get_bad_questions(): f = open(\"../extern/manage_data/list_unnecessary_columns.txt\", 'r') bad_questions = f.readline().split(',') bad_questions[-1] =", "print(\"Number of Training Samples:\", len(X_train)) print(\"Number of Testing Samples:\", len(X_test))", "if(not os.path.isdir(outdir)): os.mkdir(outdir) o_models_file = open(outdir+\"models.csv\",\"w\") o_models_file.write(\"test size,run num,ftsel method,Kfold,number", "file to list all the models and accuracies.''' model_obj =", "the models are run, select the model with best test", "6, 8, 10, 12, 15], } repeat = 1 #output", "can create a .csv file to list all the models", "Testing Samples:\", len(X_test)) data_dict = { 'X_train': X_train, 'X_test': X_test,", "weights_train, questions = separate_weights(X_train, all_data_questions[:-1]) X_test, weights_test, _ = separate_weights(X_test,", "= ftsel_obj.ftsel_decision_tree_method(data_dict, thres) for num in list_num_features: data_sel_dict, sel_questions =", "all_data[:, :-1] y = all_data[:, -1] X_train, X_test, y_train, y_test", "data_ranked_dict, ranked_questions = {}, [] ftsel_obj =None if(meth=='chi2'): ftsel_obj =", ") data_ranked_dict, ranked_questions = ftsel_obj.ftsel_decision_tree_method(data_dict, thres) for num in list_num_features:", "} weights_dict = { 'weights_train': weights_train, 'weights_test': weights_test} for meth", "data and weights into train, test (refer to optimal_params.py) poll_data", "models and accuracies.''' model_obj = model_sel.model_sel(ts, run_num, meth, param_space, K,", "= model_obj.select_model() acc.append(model_obj['test_acc']) o_models_file.write(str(ts)+\",\") o_models_file.write(str(run_num)+\",\") o_models_file.write(meth+\",\") o_models_file.write(str(K)+\",\") o_models_file.write(str(num)+\",\") o_models_file.write(str(thres)+\",\") for", "# decide this param_space = { 'criterion': ['gini', 'entropy'], 'max_depth':", "numpy as np import sklearn import subprocess from sklearn import", "feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_chi2\" ) data_ranked_dict, ranked_questions = ftsel_obj.ftsel_chi2(data_dict,", "feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_pca\" ) data_ranked_dict,_ = ftsel_obj.ftsel_pca(data_dict) fts", "list_Kfold: '''Here create a class onject of \"model_sel\" and output", "directory path outdir = \"../results/run1/\" if(not os.path.isdir(outdir)): os.mkdir(outdir) o_models_file =", "o_models_file.write(\"test size,run num,ftsel method,Kfold,number of features,correlation threshold,best features,criterion,max_depth,max_leaf_nodes,min_samples_leaf,min_samples_split,training accuracy,test accuracy\\n\")", "separate array. \"\"\" weight_column_idx = column_names.index('weight') weights = X_train[:, weight_column_idx]", "bool_necess_que=False, run_name=\"test_pca\" ) data_ranked_dict,_ = ftsel_obj.ftsel_pca(data_dict) fts = data_sel_dict['X_train'].shape[1] questions_int", "in range(repeat): all_data, all_data_questions = poll_data.all_data_except(get_bad_questions()) X = all_data[:, :-1]", "= ftsel_obj.select_num_features(data_ranked_dict, num, ranked_questions) ftsel_obj.plot_heatmap(data_sel_dict['X_train'], sel_questions) for K in list_Kfold:", "ftsel_obj.plot_heatmap(data_sel_dict['X_train'], sel_questions) for K in list_Kfold: '''Here create a class", "questions = separate_weights(X_train, all_data_questions[:-1]) X_test, weights_test, _ = separate_weights(X_test, all_data_questions[:-1])", "[2, 3, 4, 5, 7], 'min_samples_split': [2, 5, 10], 'min_samples_leaf':", "in list_corr_threshold: data_ranked_dict, ranked_questions = {}, [] ftsel_obj =None if(meth=='chi2'):", "list_num_features = [10,15,20] # decide this list_Kfold = [3,5] list_corr_threshold", "returns it as a separate array. \"\"\" weight_column_idx = column_names.index('weight')", "necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_chi2\" ) data_ranked_dict, ranked_questions = ftsel_obj.ftsel_chi2(data_dict, thres)", "os import matplotlib.pyplot as plt import seaborn as sns def", "thres) for num in list_num_features: data_sel_dict, sel_questions = ftsel_obj.select_num_features(data_ranked_dict, num,", "list_num_features: data_sel_dict, sel_questions = ftsel_obj.select_num_features(data_ranked_dict, num, ranked_questions) ftsel_obj.plot_heatmap(data_sel_dict['X_train'], sel_questions) for", "np import sklearn import subprocess from sklearn import model_selection, tree", "o_models_file.write(str(model_obj['best_params']['max_leaf_nodes'])+\",\") o_models_file.write(str(model_obj['best_params']['min_samples_leaf'])+\",\") o_models_file.write(str(model_obj['best_params']['min_samples_split'])+\",\") o_models_file.write(str(model_obj['train_acc'])+\",\") o_models_file.write(str(model_obj['test_acc'])+\",\") o_models_file.write(\"\\n\") list_output_dict.append(model_obj) '''Once all the", "data_ranked_dict, ranked_questions = ftsel_obj.ftsel_chi2(data_dict, thres) elif(meth=='mutlinfo'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\",", "for num in list_num_features: data_sel_dict, sel_questions = ftsel_obj.select_num_features(data_ranked_dict, num, ranked_questions)", "[] # output directory path outdir = \"../results/run1/\" if(not os.path.isdir(outdir)):", "repeat = 1 #output dictrionary list list_output_dict = [] #", "data_ranked_dict, ranked_questions = ftsel_obj.ftsel_decision_tree_method(data_dict, thres) for num in list_num_features: data_sel_dict,", "= [10,15,20] # decide this list_Kfold = [3,5] list_corr_threshold =", "in questions_int] elif(meth=='dt'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_dt\"", "column_names new_questions.remove('weight') return new_X_train, weights, new_questions if __name__ == \"__main__\":", "def main(): #parameter space list_test_size = [0.1,0.15,0.2] # decide this", "= data.PollDataProxy(remove_nan=False, convert_to_float=False) acc = [] '''refer to optimal_params.py. Functions", "the \\n off the end bad_questions.remove('weight') # need weight for", "run_name=\"test_chi2\" ) data_ranked_dict, ranked_questions = ftsel_obj.ftsel_chi2(data_dict, thres) elif(meth=='mutlinfo'): ftsel_obj =", "X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=ts, shuffle=True) X_train, weights_train,", "= [] # output directory path outdir = \"../results/run1/\" if(not", "\"model_sel\" and output all the best parameters and values into", "unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_dt\" ) data_ranked_dict, ranked_questions = ftsel_obj.ftsel_decision_tree_method(data_dict, thres) for", "y_test = model_selection.train_test_split(X, y, test_size=ts, shuffle=True) X_train, weights_train, questions =", "model with best test accuracy and return the output dict", "{ 'criterion': ['gini', 'entropy'], 'max_depth': [2, 3, 4, 5, 7],", "= all_data[:, -1] X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y,", "y_train, 'y_test': y_test } weights_dict = { 'weights_train': weights_train, 'weights_test':", "best_model_dict = list_output_dict[best_index] print(\"The best model parameters:\") print(best_model_dict) def get_bad_questions():", "plt import seaborn as sns def main(): #parameter space list_test_size", "y_test } weights_dict = { 'weights_train': weights_train, 'weights_test': weights_test} for", "= model_selection.train_test_split(X, y, test_size=ts, shuffle=True) X_train, weights_train, questions = separate_weights(X_train,", "elif(meth=='mutlinfo'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_mutlinfo\" ) data_ranked_dict,", "from sklearn import model_selection, tree import data import feature_selection import", "= ftsel_obj.ftsel_mutlinfo(data_dict, thres) elif(meth=='pca'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False,", "param_space = { 'criterion': ['gini', 'entropy'], 'max_depth': [2, 3, 4,", "[] '''refer to optimal_params.py. Functions from this python scripts are", "end bad_questions.remove('weight') # need weight for training return bad_questions def", "10, 12, 15], } repeat = 1 #output dictrionary list", "a class onject of \"model_sel\" and output all the best", "print(\"The best model parameters:\") print(best_model_dict) def get_bad_questions(): f = open(\"../extern/manage_data/list_unnecessary_columns.txt\",", "= np.delete(X_train, weight_column_idx, axis=1) new_questions = column_names new_questions.remove('weight') return new_X_train,", "python scripts are transferred here. (get_bad_questions() and separate_weights().)''' for ts", "chop the \\n off the end bad_questions.remove('weight') # need weight", "y_train, y_test = model_selection.train_test_split(X, y, test_size=ts, shuffle=True) X_train, weights_train, questions", "bool_necess_que=False, run_name=\"test_chi2\" ) data_ranked_dict, ranked_questions = ftsel_obj.ftsel_chi2(data_dict, thres) elif(meth=='mutlinfo'): ftsel_obj", "ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_mutlinfo\" ) data_ranked_dict, ranked_questions", "bad_questions[-1][:-1] # chop the \\n off the end bad_questions.remove('weight') #", "current selection method''' for thres in list_corr_threshold: data_ranked_dict, ranked_questions =", "Samples:\", len(X_train)) print(\"Number of Testing Samples:\", len(X_test)) data_dict = {", "= open(outdir+\"models.csv\",\"w\") o_models_file.write(\"test size,run num,ftsel method,Kfold,number of features,correlation threshold,best features,criterion,max_depth,max_leaf_nodes,min_samples_leaf,min_samples_split,training", "all_data[:, -1] X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=ts,", "o_models_file.write(str(model_obj['train_acc'])+\",\") o_models_file.write(str(model_obj['test_acc'])+\",\") o_models_file.write(\"\\n\") list_output_dict.append(model_obj) '''Once all the models are run,", "containing weights from X_train, and returns it as a separate", "axis=1) new_questions = column_names new_questions.remove('weight') return new_X_train, weights, new_questions if", "'max_leaf_nodes': [2, 4, 6, 8, 10, 12, 15], } repeat", "y = all_data[:, -1] X_train, X_test, y_train, y_test = model_selection.train_test_split(X,", "_ = separate_weights(X_test, all_data_questions[:-1]) print(\"Number of Training Samples:\", len(X_train)) print(\"Number", "model_obj.select_model() acc.append(model_obj['test_acc']) o_models_file.write(str(ts)+\",\") o_models_file.write(str(run_num)+\",\") o_models_file.write(meth+\",\") o_models_file.write(str(K)+\",\") o_models_file.write(str(num)+\",\") o_models_file.write(str(thres)+\",\") for ii", "unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_mutlinfo\" ) data_ranked_dict, ranked_questions = ftsel_obj.ftsel_mutlinfo(data_dict, thres) elif(meth=='pca'):", "'criterion': ['gini', 'entropy'], 'max_depth': [2, 3, 4, 5, 7], 'min_samples_split':", "are run, select the model with best test accuracy and", "= feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_dt\" ) data_ranked_dict, ranked_questions =", "feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_dt\" ) data_ranked_dict, ranked_questions = ftsel_obj.ftsel_decision_tree_method(data_dict,", "sel_questions = ftsel_obj.select_num_features(data_ranked_dict, num, ranked_questions) ftsel_obj.plot_heatmap(data_sel_dict['X_train'], sel_questions) for K in", "[2, 5, 10], 'max_leaf_nodes': [2, 4, 6, 8, 10, 12,", "# decide this list_Kfold = [3,5] list_corr_threshold = [1,0.5,0.6,0.7] #", "model_selection.train_test_split(X, y, test_size=ts, shuffle=True) X_train, weights_train, questions = separate_weights(X_train, all_data_questions[:-1])", "len(X_train)) print(\"Number of Testing Samples:\", len(X_test)) data_dict = { 'X_train':", "list(range(1,fts+1,1)))) ranked_questions = [\"ft_\"+x for x in questions_int] elif(meth=='dt'): ftsel_obj", "return the output dict for that model.''' o_models_file.close() best_index =", "for ts in list_test_size: for run_num in range(repeat): all_data, all_data_questions", "x in questions_int] elif(meth=='dt'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False,", "separate_weights(X_test, all_data_questions[:-1]) print(\"Number of Training Samples:\", len(X_train)) print(\"Number of Testing", "ftsel_obj.ftsel_chi2(data_dict, thres) elif(meth=='mutlinfo'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_mutlinfo\"", "thres, data_sel_dict ,weights_dict, sel_questions, outdir).select_model() # intermediate = model_obj.select_model() acc.append(model_obj['test_acc'])", "as np import sklearn import subprocess from sklearn import model_selection,", "# intermediate = model_obj.select_model() acc.append(model_obj['test_acc']) o_models_file.write(str(ts)+\",\") o_models_file.write(str(run_num)+\",\") o_models_file.write(meth+\",\") o_models_file.write(str(K)+\",\") o_models_file.write(str(num)+\",\")", "\"list_output_dict\". Then, can create a .csv file to list all", "column_names): \"\"\" Removes the column containing weights from X_train, and", "# need weight for training return bad_questions def separate_weights(X_train, column_names):", "= [1,0.5,0.6,0.7] # decide this param_space = { 'criterion': ['gini',", "'min_samples_leaf': [2, 5, 10], 'max_leaf_nodes': [2, 4, 6, 8, 10,", "'y_test': y_test } weights_dict = { 'weights_train': weights_train, 'weights_test': weights_test}", ":-1] y = all_data[:, -1] X_train, X_test, y_train, y_test =", "as a separate array. \"\"\" weight_column_idx = column_names.index('weight') weights =", "objects of the current selection method''' for thres in list_corr_threshold:", "open(\"../extern/manage_data/list_unnecessary_columns.txt\", 'r') bad_questions = f.readline().split(',') bad_questions[-1] = bad_questions[-1][:-1] # chop", "the column containing weights from X_train, and returns it as", "'''Here create a class onject of \"model_sel\" and output all", "class onject of \"model_sel\" and output all the best parameters", "weights_test} for meth in list_ftsel_method: '''Create class objects of the", "\"\"\" weight_column_idx = column_names.index('weight') weights = X_train[:, weight_column_idx] new_X_train =", "{ 'X_train': X_train, 'X_test': X_test, 'y_train': y_train, 'y_test': y_test }", "run_num, meth, param_space, K, num, thres, data_sel_dict ,weights_dict, sel_questions, outdir).select_model()", "that model.''' o_models_file.close() best_index = np.argmax(acc) best_model_dict = list_output_dict[best_index] print(\"The", "X_train, weights_train, questions = separate_weights(X_train, all_data_questions[:-1]) X_test, weights_test, _ =", "necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_pca\" ) data_ranked_dict,_ = ftsel_obj.ftsel_pca(data_dict) fts =", "decide this list_Kfold = [3,5] list_corr_threshold = [1,0.5,0.6,0.7] # decide", "all_data_questions[:-1]) print(\"Number of Training Samples:\", len(X_train)) print(\"Number of Testing Samples:\",", "space list_test_size = [0.1,0.15,0.2] # decide this list_ftsel_method = ['chi2','mutlinfo','pca','dt']", "run_name=\"test_dt\" ) data_ranked_dict, ranked_questions = ftsel_obj.ftsel_decision_tree_method(data_dict, thres) for num in", "o_models_file.write(str(ts)+\",\") o_models_file.write(str(run_num)+\",\") o_models_file.write(meth+\",\") o_models_file.write(str(K)+\",\") o_models_file.write(str(num)+\",\") o_models_file.write(str(thres)+\",\") for ii in range(len(model_obj['best_features'])):", "5, 10], 'max_leaf_nodes': [2, 4, 6, 8, 10, 12, 15],", "accuracy,test accuracy\\n\") #splitting data and weights into train, test (refer", "and output all the best parameters and values into \"list_output_dict\".", "weight_column_idx, axis=1) new_questions = column_names new_questions.remove('weight') return new_X_train, weights, new_questions", "X_train, and returns it as a separate array. \"\"\" weight_column_idx", "os.mkdir(outdir) o_models_file = open(outdir+\"models.csv\",\"w\") o_models_file.write(\"test size,run num,ftsel method,Kfold,number of features,correlation", "ranked_questions = ftsel_obj.ftsel_decision_tree_method(data_dict, thres) for num in list_num_features: data_sel_dict, sel_questions", "'weights_train': weights_train, 'weights_test': weights_test} for meth in list_ftsel_method: '''Create class", "[\"ft_\"+x for x in questions_int] elif(meth=='dt'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\",", "all the models are run, select the model with best", "test (refer to optimal_params.py) poll_data = data.PollDataProxy(remove_nan=False, convert_to_float=False) acc =", "= list(map(str, list(range(1,fts+1,1)))) ranked_questions = [\"ft_\"+x for x in questions_int]", "all the models and accuracies.''' model_obj = model_sel.model_sel(ts, run_num, meth,", "the best parameters and values into \"list_output_dict\". Then, can create", "len(X_test)) data_dict = { 'X_train': X_train, 'X_test': X_test, 'y_train': y_train,", "X = all_data[:, :-1] y = all_data[:, -1] X_train, X_test,", "meth, param_space, K, num, thres, data_sel_dict ,weights_dict, sel_questions, outdir).select_model() #", "new_X_train = np.delete(X_train, weight_column_idx, axis=1) new_questions = column_names new_questions.remove('weight') return", "for ii in range(len(model_obj['best_features'])): o_models_file.write(model_obj['best_features'][ii]+\" \") o_models_file.write(\",\") o_models_file.write(model_obj['best_params']['criterion']+\",\") o_models_file.write(str(model_obj['best_params']['max_depth'])+\",\") o_models_file.write(str(model_obj['best_params']['max_leaf_nodes'])+\",\")", "to list all the models and accuracies.''' model_obj = model_sel.model_sel(ts,", "of features,correlation threshold,best features,criterion,max_depth,max_leaf_nodes,min_samples_leaf,min_samples_split,training accuracy,test accuracy\\n\") #splitting data and weights", "from this python scripts are transferred here. (get_bad_questions() and separate_weights().)'''", "weights from X_train, and returns it as a separate array.", "\") o_models_file.write(\",\") o_models_file.write(model_obj['best_params']['criterion']+\",\") o_models_file.write(str(model_obj['best_params']['max_depth'])+\",\") o_models_file.write(str(model_obj['best_params']['max_leaf_nodes'])+\",\") o_models_file.write(str(model_obj['best_params']['min_samples_leaf'])+\",\") o_models_file.write(str(model_obj['best_params']['min_samples_split'])+\",\") o_models_file.write(str(model_obj['train_acc'])+\",\") o_models_file.write(str(model_obj['test_acc'])+\",\") o_models_file.write(\"\\n\")", "here. (get_bad_questions() and separate_weights().)''' for ts in list_test_size: for run_num", "data_sel_dict['X_train'].shape[1] questions_int = list(map(str, list(range(1,fts+1,1)))) ranked_questions = [\"ft_\"+x for x", "15], } repeat = 1 #output dictrionary list list_output_dict =", "sklearn import subprocess from sklearn import model_selection, tree import data", "o_models_file.write(model_obj['best_features'][ii]+\" \") o_models_file.write(\",\") o_models_file.write(model_obj['best_params']['criterion']+\",\") o_models_file.write(str(model_obj['best_params']['max_depth'])+\",\") o_models_file.write(str(model_obj['best_params']['max_leaf_nodes'])+\",\") o_models_file.write(str(model_obj['best_params']['min_samples_leaf'])+\",\") o_models_file.write(str(model_obj['best_params']['min_samples_split'])+\",\") o_models_file.write(str(model_obj['train_acc'])+\",\") o_models_file.write(str(model_obj['test_acc'])+\",\")", "'min_samples_split': [2, 5, 10], 'min_samples_leaf': [2, 5, 10], 'max_leaf_nodes': [2,", "convert_to_float=False) acc = [] '''refer to optimal_params.py. Functions from this", "in list_ftsel_method: '''Create class objects of the current selection method'''", "ranked_questions) ftsel_obj.plot_heatmap(data_sel_dict['X_train'], sel_questions) for K in list_Kfold: '''Here create a", "range(repeat): all_data, all_data_questions = poll_data.all_data_except(get_bad_questions()) X = all_data[:, :-1] y", "ranked_questions = ftsel_obj.ftsel_mutlinfo(data_dict, thres) elif(meth=='pca'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\",", "range(len(model_obj['best_features'])): o_models_file.write(model_obj['best_features'][ii]+\" \") o_models_file.write(\",\") o_models_file.write(model_obj['best_params']['criterion']+\",\") o_models_file.write(str(model_obj['best_params']['max_depth'])+\",\") o_models_file.write(str(model_obj['best_params']['max_leaf_nodes'])+\",\") o_models_file.write(str(model_obj['best_params']['min_samples_leaf'])+\",\") o_models_file.write(str(model_obj['best_params']['min_samples_split'])+\",\") o_models_file.write(str(model_obj['train_acc'])+\",\")", "= feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_pca\" ) data_ranked_dict,_ = ftsel_obj.ftsel_pca(data_dict)", "\\n off the end bad_questions.remove('weight') # need weight for training", "dictrionary list list_output_dict = [] # output directory path outdir", "list_ftsel_method = ['chi2','mutlinfo','pca','dt'] list_num_features = [10,15,20] # decide this list_Kfold", "list_ftsel_method: '''Create class objects of the current selection method''' for", "class objects of the current selection method''' for thres in", "= { 'X_train': X_train, 'X_test': X_test, 'y_train': y_train, 'y_test': y_test", "o_models_file.write(str(K)+\",\") o_models_file.write(str(num)+\",\") o_models_file.write(str(thres)+\",\") for ii in range(len(model_obj['best_features'])): o_models_file.write(model_obj['best_features'][ii]+\" \") o_models_file.write(\",\")", "ranked_questions = {}, [] ftsel_obj =None if(meth=='chi2'): ftsel_obj = feature_selection.FeatureSelection(", "bool_necess_que=False, run_name=\"test_mutlinfo\" ) data_ranked_dict, ranked_questions = ftsel_obj.ftsel_mutlinfo(data_dict, thres) elif(meth=='pca'): ftsel_obj", "the models and accuracies.''' model_obj = model_sel.model_sel(ts, run_num, meth, param_space,", "meth in list_ftsel_method: '''Create class objects of the current selection", "the model with best test accuracy and return the output", "questions_int] elif(meth=='dt'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_dt\" )", ",weights_dict, sel_questions, outdir).select_model() # intermediate = model_obj.select_model() acc.append(model_obj['test_acc']) o_models_file.write(str(ts)+\",\") o_models_file.write(str(run_num)+\",\")", "subprocess from sklearn import model_selection, tree import data import feature_selection", "= ['chi2','mutlinfo','pca','dt'] list_num_features = [10,15,20] # decide this list_Kfold =", "a separate array. \"\"\" weight_column_idx = column_names.index('weight') weights = X_train[:,", "print(\"Number of Testing Samples:\", len(X_test)) data_dict = { 'X_train': X_train,", "= [0.1,0.15,0.2] # decide this list_ftsel_method = ['chi2','mutlinfo','pca','dt'] list_num_features =", "the end bad_questions.remove('weight') # need weight for training return bad_questions", "= \"../results/run1/\" if(not os.path.isdir(outdir)): os.mkdir(outdir) o_models_file = open(outdir+\"models.csv\",\"w\") o_models_file.write(\"test size,run", "ftsel_obj.select_num_features(data_ranked_dict, num, ranked_questions) ftsel_obj.plot_heatmap(data_sel_dict['X_train'], sel_questions) for K in list_Kfold: '''Here", "and return the output dict for that model.''' o_models_file.close() best_index", "and values into \"list_output_dict\". Then, can create a .csv file", "optimal_params.py) poll_data = data.PollDataProxy(remove_nan=False, convert_to_float=False) acc = [] '''refer to", "feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_mutlinfo\" ) data_ranked_dict, ranked_questions = ftsel_obj.ftsel_mutlinfo(data_dict,", "# chop the \\n off the end bad_questions.remove('weight') # need", "need weight for training return bad_questions def separate_weights(X_train, column_names): \"\"\"", "o_models_file.write(\",\") o_models_file.write(model_obj['best_params']['criterion']+\",\") o_models_file.write(str(model_obj['best_params']['max_depth'])+\",\") o_models_file.write(str(model_obj['best_params']['max_leaf_nodes'])+\",\") o_models_file.write(str(model_obj['best_params']['min_samples_leaf'])+\",\") o_models_file.write(str(model_obj['best_params']['min_samples_split'])+\",\") o_models_file.write(str(model_obj['train_acc'])+\",\") o_models_file.write(str(model_obj['test_acc'])+\",\") o_models_file.write(\"\\n\") list_output_dict.append(model_obj)", "o_models_file.write(\"\\n\") list_output_dict.append(model_obj) '''Once all the models are run, select the", "method,Kfold,number of features,correlation threshold,best features,criterion,max_depth,max_leaf_nodes,min_samples_leaf,min_samples_split,training accuracy,test accuracy\\n\") #splitting data and", "= column_names new_questions.remove('weight') return new_X_train, weights, new_questions if __name__ ==", "'r') bad_questions = f.readline().split(',') bad_questions[-1] = bad_questions[-1][:-1] # chop the", "print(best_model_dict) def get_bad_questions(): f = open(\"../extern/manage_data/list_unnecessary_columns.txt\", 'r') bad_questions = f.readline().split(',')", "run_name=\"test_mutlinfo\" ) data_ranked_dict, ranked_questions = ftsel_obj.ftsel_mutlinfo(data_dict, thres) elif(meth=='pca'): ftsel_obj =", "run_name=\"test_pca\" ) data_ranked_dict,_ = ftsel_obj.ftsel_pca(data_dict) fts = data_sel_dict['X_train'].shape[1] questions_int =", "[] ftsel_obj =None if(meth=='chi2'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False,", "outdir = \"../results/run1/\" if(not os.path.isdir(outdir)): os.mkdir(outdir) o_models_file = open(outdir+\"models.csv\",\"w\") o_models_file.write(\"test", "= { 'weights_train': weights_train, 'weights_test': weights_test} for meth in list_ftsel_method:", "num, thres, data_sel_dict ,weights_dict, sel_questions, outdir).select_model() # intermediate = model_obj.select_model()", "return bad_questions def separate_weights(X_train, column_names): \"\"\" Removes the column containing", "list_corr_threshold: data_ranked_dict, ranked_questions = {}, [] ftsel_obj =None if(meth=='chi2'): ftsel_obj", "the current selection method''' for thres in list_corr_threshold: data_ranked_dict, ranked_questions", "4, 6, 8, 10, 12, 15], } repeat = 1", "5, 10], 'min_samples_leaf': [2, 5, 10], 'max_leaf_nodes': [2, 4, 6,", "sns def main(): #parameter space list_test_size = [0.1,0.15,0.2] # decide", "ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_dt\" ) data_ranked_dict, ranked_questions", "model_sel.model_sel(ts, run_num, meth, param_space, K, num, thres, data_sel_dict ,weights_dict, sel_questions,", "o_models_file.write(str(model_obj['best_params']['min_samples_split'])+\",\") o_models_file.write(str(model_obj['train_acc'])+\",\") o_models_file.write(str(model_obj['test_acc'])+\",\") o_models_file.write(\"\\n\") list_output_dict.append(model_obj) '''Once all the models are", "'''refer to optimal_params.py. Functions from this python scripts are transferred", "Then, can create a .csv file to list all the", "o_models_file.write(str(thres)+\",\") for ii in range(len(model_obj['best_features'])): o_models_file.write(model_obj['best_features'][ii]+\" \") o_models_file.write(\",\") o_models_file.write(model_obj['best_params']['criterion']+\",\") o_models_file.write(str(model_obj['best_params']['max_depth'])+\",\")", "[10,15,20] # decide this list_Kfold = [3,5] list_corr_threshold = [1,0.5,0.6,0.7]", "necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_mutlinfo\" ) data_ranked_dict, ranked_questions = ftsel_obj.ftsel_mutlinfo(data_dict, thres)", "= [] '''refer to optimal_params.py. Functions from this python scripts", "separate_weights().)''' for ts in list_test_size: for run_num in range(repeat): all_data,", "= separate_weights(X_train, all_data_questions[:-1]) X_test, weights_test, _ = separate_weights(X_test, all_data_questions[:-1]) print(\"Number", "with best test accuracy and return the output dict for", "o_models_file.write(str(model_obj['test_acc'])+\",\") o_models_file.write(\"\\n\") list_output_dict.append(model_obj) '''Once all the models are run, select", "for meth in list_ftsel_method: '''Create class objects of the current", ") data_ranked_dict, ranked_questions = ftsel_obj.ftsel_chi2(data_dict, thres) elif(meth=='mutlinfo'): ftsel_obj = feature_selection.FeatureSelection(", "and accuracies.''' model_obj = model_sel.model_sel(ts, run_num, meth, param_space, K, num,", "model.''' o_models_file.close() best_index = np.argmax(acc) best_model_dict = list_output_dict[best_index] print(\"The best", "= f.readline().split(',') bad_questions[-1] = bad_questions[-1][:-1] # chop the \\n off", "\"../results/run1/\" if(not os.path.isdir(outdir)): os.mkdir(outdir) o_models_file = open(outdir+\"models.csv\",\"w\") o_models_file.write(\"test size,run num,ftsel", "of the current selection method''' for thres in list_corr_threshold: data_ranked_dict,", "in list_test_size: for run_num in range(repeat): all_data, all_data_questions = poll_data.all_data_except(get_bad_questions())", "best_index = np.argmax(acc) best_model_dict = list_output_dict[best_index] print(\"The best model parameters:\")", "thres) elif(meth=='mutlinfo'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_mutlinfo\" )", "= poll_data.all_data_except(get_bad_questions()) X = all_data[:, :-1] y = all_data[:, -1]", "data.PollDataProxy(remove_nan=False, convert_to_float=False) acc = [] '''refer to optimal_params.py. Functions from", "tree import data import feature_selection import model_sel import os import", "sel_questions) for K in list_Kfold: '''Here create a class onject", "= bad_questions[-1][:-1] # chop the \\n off the end bad_questions.remove('weight')", "= [3,5] list_corr_threshold = [1,0.5,0.6,0.7] # decide this param_space =", "weights_train, 'weights_test': weights_test} for meth in list_ftsel_method: '''Create class objects", "os.path.isdir(outdir)): os.mkdir(outdir) o_models_file = open(outdir+\"models.csv\",\"w\") o_models_file.write(\"test size,run num,ftsel method,Kfold,number of", "def separate_weights(X_train, column_names): \"\"\" Removes the column containing weights from", "[1,0.5,0.6,0.7] # decide this param_space = { 'criterion': ['gini', 'entropy'],", "threshold,best features,criterion,max_depth,max_leaf_nodes,min_samples_leaf,min_samples_split,training accuracy,test accuracy\\n\") #splitting data and weights into train,", "model_sel import os import matplotlib.pyplot as plt import seaborn as", "bad_questions def separate_weights(X_train, column_names): \"\"\" Removes the column containing weights", "5, 7], 'min_samples_split': [2, 5, 10], 'min_samples_leaf': [2, 5, 10],", "poll_data = data.PollDataProxy(remove_nan=False, convert_to_float=False) acc = [] '''refer to optimal_params.py.", "= model_sel.model_sel(ts, run_num, meth, param_space, K, num, thres, data_sel_dict ,weights_dict,", "models are run, select the model with best test accuracy", "for thres in list_corr_threshold: data_ranked_dict, ranked_questions = {}, [] ftsel_obj", "[0.1,0.15,0.2] # decide this list_ftsel_method = ['chi2','mutlinfo','pca','dt'] list_num_features = [10,15,20]", "= 1 #output dictrionary list list_output_dict = [] # output", "10], 'min_samples_leaf': [2, 5, 10], 'max_leaf_nodes': [2, 4, 6, 8,", "decide this param_space = { 'criterion': ['gini', 'entropy'], 'max_depth': [2,", "main(): #parameter space list_test_size = [0.1,0.15,0.2] # decide this list_ftsel_method", "from X_train, and returns it as a separate array. \"\"\"", "weights_dict = { 'weights_train': weights_train, 'weights_test': weights_test} for meth in", "off the end bad_questions.remove('weight') # need weight for training return", "into train, test (refer to optimal_params.py) poll_data = data.PollDataProxy(remove_nan=False, convert_to_float=False)", "10], 'max_leaf_nodes': [2, 4, 6, 8, 10, 12, 15], }", "8, 10, 12, 15], } repeat = 1 #output dictrionary", "import seaborn as sns def main(): #parameter space list_test_size =", "=None if(meth=='chi2'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_chi2\" )", "data_dict = { 'X_train': X_train, 'X_test': X_test, 'y_train': y_train, 'y_test':", "new_questions = column_names new_questions.remove('weight') return new_X_train, weights, new_questions if __name__", "all_data, all_data_questions = poll_data.all_data_except(get_bad_questions()) X = all_data[:, :-1] y =", "[2, 4, 6, 8, 10, 12, 15], } repeat =", "bool_necess_que=False, run_name=\"test_dt\" ) data_ranked_dict, ranked_questions = ftsel_obj.ftsel_decision_tree_method(data_dict, thres) for num", "selection method''' for thres in list_corr_threshold: data_ranked_dict, ranked_questions = {},", "acc = [] '''refer to optimal_params.py. Functions from this python", "model_selection, tree import data import feature_selection import model_sel import os", "['gini', 'entropy'], 'max_depth': [2, 3, 4, 5, 7], 'min_samples_split': [2,", "the output dict for that model.''' o_models_file.close() best_index = np.argmax(acc)", "num, ranked_questions) ftsel_obj.plot_heatmap(data_sel_dict['X_train'], sel_questions) for K in list_Kfold: '''Here create", "a .csv file to list all the models and accuracies.'''", "ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_chi2\" ) data_ranked_dict, ranked_questions", "path outdir = \"../results/run1/\" if(not os.path.isdir(outdir)): os.mkdir(outdir) o_models_file = open(outdir+\"models.csv\",\"w\")", "X_train[:, weight_column_idx] new_X_train = np.delete(X_train, weight_column_idx, axis=1) new_questions = column_names", "list_output_dict.append(model_obj) '''Once all the models are run, select the model", "import matplotlib.pyplot as plt import seaborn as sns def main():", "np.delete(X_train, weight_column_idx, axis=1) new_questions = column_names new_questions.remove('weight') return new_X_train, weights,", "model_obj = model_sel.model_sel(ts, run_num, meth, param_space, K, num, thres, data_sel_dict", "= np.argmax(acc) best_model_dict = list_output_dict[best_index] print(\"The best model parameters:\") print(best_model_dict)", "ranked_questions = [\"ft_\"+x for x in questions_int] elif(meth=='dt'): ftsel_obj =", "['chi2','mutlinfo','pca','dt'] list_num_features = [10,15,20] # decide this list_Kfold = [3,5]", "features,criterion,max_depth,max_leaf_nodes,min_samples_leaf,min_samples_split,training accuracy,test accuracy\\n\") #splitting data and weights into train, test", "thres) elif(meth=='pca'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_pca\" )", "this list_Kfold = [3,5] list_corr_threshold = [1,0.5,0.6,0.7] # decide this", "{}, [] ftsel_obj =None if(meth=='chi2'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\",", "acc.append(model_obj['test_acc']) o_models_file.write(str(ts)+\",\") o_models_file.write(str(run_num)+\",\") o_models_file.write(meth+\",\") o_models_file.write(str(K)+\",\") o_models_file.write(str(num)+\",\") o_models_file.write(str(thres)+\",\") for ii in", "thres in list_corr_threshold: data_ranked_dict, ranked_questions = {}, [] ftsel_obj =None", "ts in list_test_size: for run_num in range(repeat): all_data, all_data_questions =", "-1] X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=ts, shuffle=True)", "onject of \"model_sel\" and output all the best parameters and", "weights_test, _ = separate_weights(X_test, all_data_questions[:-1]) print(\"Number of Training Samples:\", len(X_train))", "<reponame>tommy-waltmann/voting-ml<filename>voting_ml/main.py import numpy as np import sklearn import subprocess from", "column containing weights from X_train, and returns it as a", "seaborn as sns def main(): #parameter space list_test_size = [0.1,0.15,0.2]", "all_data_questions[:-1]) X_test, weights_test, _ = separate_weights(X_test, all_data_questions[:-1]) print(\"Number of Training", "best model parameters:\") print(best_model_dict) def get_bad_questions(): f = open(\"../extern/manage_data/list_unnecessary_columns.txt\", 'r')", "fts = data_sel_dict['X_train'].shape[1] questions_int = list(map(str, list(range(1,fts+1,1)))) ranked_questions = [\"ft_\"+x", "num,ftsel method,Kfold,number of features,correlation threshold,best features,criterion,max_depth,max_leaf_nodes,min_samples_leaf,min_samples_split,training accuracy,test accuracy\\n\") #splitting data", "X_train, 'X_test': X_test, 'y_train': y_train, 'y_test': y_test } weights_dict =", "data_ranked_dict,_ = ftsel_obj.ftsel_pca(data_dict) fts = data_sel_dict['X_train'].shape[1] questions_int = list(map(str, list(range(1,fts+1,1))))", "data_sel_dict ,weights_dict, sel_questions, outdir).select_model() # intermediate = model_obj.select_model() acc.append(model_obj['test_acc']) o_models_file.write(str(ts)+\",\")", "'''Once all the models are run, select the model with", "Removes the column containing weights from X_train, and returns it", "feature_selection import model_sel import os import matplotlib.pyplot as plt import", "data_ranked_dict, ranked_questions = ftsel_obj.ftsel_mutlinfo(data_dict, thres) elif(meth=='pca'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\",", "list_output_dict[best_index] print(\"The best model parameters:\") print(best_model_dict) def get_bad_questions(): f =", ") data_ranked_dict, ranked_questions = ftsel_obj.ftsel_mutlinfo(data_dict, thres) elif(meth=='pca'): ftsel_obj = feature_selection.FeatureSelection(", "column_names.index('weight') weights = X_train[:, weight_column_idx] new_X_train = np.delete(X_train, weight_column_idx, axis=1)", "weights = X_train[:, weight_column_idx] new_X_train = np.delete(X_train, weight_column_idx, axis=1) new_questions", "import sklearn import subprocess from sklearn import model_selection, tree import", "values into \"list_output_dict\". Then, can create a .csv file to", "into \"list_output_dict\". Then, can create a .csv file to list", "Training Samples:\", len(X_train)) print(\"Number of Testing Samples:\", len(X_test)) data_dict =", "= ftsel_obj.ftsel_pca(data_dict) fts = data_sel_dict['X_train'].shape[1] questions_int = list(map(str, list(range(1,fts+1,1)))) ranked_questions", "o_models_file.write(model_obj['best_params']['criterion']+\",\") o_models_file.write(str(model_obj['best_params']['max_depth'])+\",\") o_models_file.write(str(model_obj['best_params']['max_leaf_nodes'])+\",\") o_models_file.write(str(model_obj['best_params']['min_samples_leaf'])+\",\") o_models_file.write(str(model_obj['best_params']['min_samples_split'])+\",\") o_models_file.write(str(model_obj['train_acc'])+\",\") o_models_file.write(str(model_obj['test_acc'])+\",\") o_models_file.write(\"\\n\") list_output_dict.append(model_obj) '''Once", "list all the models and accuracies.''' model_obj = model_sel.model_sel(ts, run_num,", "(refer to optimal_params.py) poll_data = data.PollDataProxy(remove_nan=False, convert_to_float=False) acc = []", "to optimal_params.py) poll_data = data.PollDataProxy(remove_nan=False, convert_to_float=False) acc = [] '''refer", "sklearn import model_selection, tree import data import feature_selection import model_sel", "= X_train[:, weight_column_idx] new_X_train = np.delete(X_train, weight_column_idx, axis=1) new_questions =", "list_test_size = [0.1,0.15,0.2] # decide this list_ftsel_method = ['chi2','mutlinfo','pca','dt'] list_num_features", "all_data_questions = poll_data.all_data_except(get_bad_questions()) X = all_data[:, :-1] y = all_data[:,", "weight_column_idx = column_names.index('weight') weights = X_train[:, weight_column_idx] new_X_train = np.delete(X_train,", "size,run num,ftsel method,Kfold,number of features,correlation threshold,best features,criterion,max_depth,max_leaf_nodes,min_samples_leaf,min_samples_split,training accuracy,test accuracy\\n\") #splitting", "num in list_num_features: data_sel_dict, sel_questions = ftsel_obj.select_num_features(data_ranked_dict, num, ranked_questions) ftsel_obj.plot_heatmap(data_sel_dict['X_train'],", "for that model.''' o_models_file.close() best_index = np.argmax(acc) best_model_dict = list_output_dict[best_index]", "import model_selection, tree import data import feature_selection import model_sel import", "for run_num in range(repeat): all_data, all_data_questions = poll_data.all_data_except(get_bad_questions()) X =", "= column_names.index('weight') weights = X_train[:, weight_column_idx] new_X_train = np.delete(X_train, weight_column_idx,", "X_test, 'y_train': y_train, 'y_test': y_test } weights_dict = { 'weights_train':", "if(meth=='chi2'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_chi2\" ) data_ranked_dict,", "ftsel_obj =None if(meth=='chi2'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_chi2\"", "all the best parameters and values into \"list_output_dict\". Then, can", "intermediate = model_obj.select_model() acc.append(model_obj['test_acc']) o_models_file.write(str(ts)+\",\") o_models_file.write(str(run_num)+\",\") o_models_file.write(meth+\",\") o_models_file.write(str(K)+\",\") o_models_file.write(str(num)+\",\") o_models_file.write(str(thres)+\",\")", "} repeat = 1 #output dictrionary list list_output_dict = []", "output dict for that model.''' o_models_file.close() best_index = np.argmax(acc) best_model_dict", "o_models_file = open(outdir+\"models.csv\",\"w\") o_models_file.write(\"test size,run num,ftsel method,Kfold,number of features,correlation threshold,best", "accuracy\\n\") #splitting data and weights into train, test (refer to", "ranked_questions = ftsel_obj.ftsel_chi2(data_dict, thres) elif(meth=='mutlinfo'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\",", "separate_weights(X_train, all_data_questions[:-1]) X_test, weights_test, _ = separate_weights(X_test, all_data_questions[:-1]) print(\"Number of", "best parameters and values into \"list_output_dict\". Then, can create a", "y, test_size=ts, shuffle=True) X_train, weights_train, questions = separate_weights(X_train, all_data_questions[:-1]) X_test,", "new_questions.remove('weight') return new_X_train, weights, new_questions if __name__ == \"__main__\": main()", "accuracies.''' model_obj = model_sel.model_sel(ts, run_num, meth, param_space, K, num, thres,", "'y_train': y_train, 'y_test': y_test } weights_dict = { 'weights_train': weights_train,", "select the model with best test accuracy and return the", "[3,5] list_corr_threshold = [1,0.5,0.6,0.7] # decide this param_space = {", "list_test_size: for run_num in range(repeat): all_data, all_data_questions = poll_data.all_data_except(get_bad_questions()) X", "ii in range(len(model_obj['best_features'])): o_models_file.write(model_obj['best_features'][ii]+\" \") o_models_file.write(\",\") o_models_file.write(model_obj['best_params']['criterion']+\",\") o_models_file.write(str(model_obj['best_params']['max_depth'])+\",\") o_models_file.write(str(model_obj['best_params']['max_leaf_nodes'])+\",\") o_models_file.write(str(model_obj['best_params']['min_samples_leaf'])+\",\")", "= open(\"../extern/manage_data/list_unnecessary_columns.txt\", 'r') bad_questions = f.readline().split(',') bad_questions[-1] = bad_questions[-1][:-1] #", "list list_output_dict = [] # output directory path outdir =", "= ftsel_obj.ftsel_chi2(data_dict, thres) elif(meth=='mutlinfo'): ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False,", "data import feature_selection import model_sel import os import matplotlib.pyplot as", "of Training Samples:\", len(X_train)) print(\"Number of Testing Samples:\", len(X_test)) data_dict", "ftsel_obj = feature_selection.FeatureSelection( necess_que_file=\"../extern/manage_data/list_all_questions.txt\", unnecess_que_file=\"../extern/manage_data/list_unnecessary_columns.txt\", bool_necess_que=False, run_name=\"test_pca\" ) data_ranked_dict,_ =", "import numpy as np import sklearn import subprocess from sklearn", "import model_sel import os import matplotlib.pyplot as plt import seaborn", "test_size=ts, shuffle=True) X_train, weights_train, questions = separate_weights(X_train, all_data_questions[:-1]) X_test, weights_test,", "#output dictrionary list list_output_dict = [] # output directory path", "o_models_file.write(str(run_num)+\",\") o_models_file.write(meth+\",\") o_models_file.write(str(K)+\",\") o_models_file.write(str(num)+\",\") o_models_file.write(str(thres)+\",\") for ii in range(len(model_obj['best_features'])): o_models_file.write(model_obj['best_features'][ii]+\"", "Samples:\", len(X_test)) data_dict = { 'X_train': X_train, 'X_test': X_test, 'y_train':", "sel_questions, outdir).select_model() # intermediate = model_obj.select_model() acc.append(model_obj['test_acc']) o_models_file.write(str(ts)+\",\") o_models_file.write(str(run_num)+\",\") o_models_file.write(meth+\",\")", "'''Create class objects of the current selection method''' for thres", ") data_ranked_dict,_ = ftsel_obj.ftsel_pca(data_dict) fts = data_sel_dict['X_train'].shape[1] questions_int = list(map(str,", "optimal_params.py. Functions from this python scripts are transferred here. (get_bad_questions()", "o_models_file.write(meth+\",\") o_models_file.write(str(K)+\",\") o_models_file.write(str(num)+\",\") o_models_file.write(str(thres)+\",\") for ii in range(len(model_obj['best_features'])): o_models_file.write(model_obj['best_features'][ii]+\" \")", "'X_train': X_train, 'X_test': X_test, 'y_train': y_train, 'y_test': y_test } weights_dict", ".csv file to list all the models and accuracies.''' model_obj", "model parameters:\") print(best_model_dict) def get_bad_questions(): f = open(\"../extern/manage_data/list_unnecessary_columns.txt\", 'r') bad_questions", "'weights_test': weights_test} for meth in list_ftsel_method: '''Create class objects of", "parameters and values into \"list_output_dict\". Then, can create a .csv", "transferred here. (get_bad_questions() and separate_weights().)''' for ts in list_test_size: for", "and separate_weights().)''' for ts in list_test_size: for run_num in range(repeat):", "bad_questions = f.readline().split(',') bad_questions[-1] = bad_questions[-1][:-1] # chop the \\n", "list_Kfold = [3,5] list_corr_threshold = [1,0.5,0.6,0.7] # decide this param_space" ]
[ "self.account.id) self.assertIn(hero.gender, (game_relations.GENDER.MALE, game_relations.GENDER.FEMALE)) self.assertEqual(hero.preferences.energy_regeneration_type, hero.race.energy_regeneration) self.assertEqual(hero.habit_honor.raw_value, 0) self.assertEqual(hero.habit_peacefulness.raw_value, 0)", "is_fast=False) self.attributes = {'is_fast': False, 'is_bot': False, 'might': 0, 'active_state_end_at':", "logic.create_hero(account_id=self.account.id, attributes=self.attributes) hero = logic.load_hero(self.account.id) self.assertEqual(hero.race, self.attributes['race']) self.assertEqual(hero.gender, self.attributes['gender']) self.assertEqual(hero.utg_name,", "self.storage = game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero = self.storage.accounts_to_heroes[account.id] self.hero.premium_state_end_at game_tt_services.debug_clear_service() @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power',", "= self.storage.accounts_to_heroes[account.id] def test_no_description(self): self.assertEqual(logic.get_hero_description(self.hero.id), '') def test_has_description(self): logic.set_hero_description(self.hero.id, 'bla-bla')", "= game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero = self.storage.accounts_to_heroes[account.id] self.hero.premium_state_end_at game_tt_services.debug_clear_service() @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda", "place: True) def test_can_change_place_power__below_zero(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) logic.register_spending(self.hero, -50) impacts", "attributes=self.attributes) hero = logic.load_hero(self.account.id) self.assertEqual(hero.id, self.account.id) self.assertEqual(hero.account_id, self.account.id) self.assertIn(hero.gender, (game_relations.GENDER.MALE,", "self.hero = self.storage.accounts_to_heroes[account.id] self.hero.premium_state_end_at game_tt_services.debug_clear_service() @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True)", "@mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True) def test_can_change_place_power(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100)", "self.assertEqual(impacts[0].amount, 150) class GetPlacesPathModifiersTests(places_helpers.PlacesTestsMixin, utils_testcase.TestCase): def setUp(self): super().setUp() self.places =", "self.assertTrue(self.places[0].is_modifier_active()) def test_home_place(self): with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.PLACE, self.places[0]) def test_friend(self):", "self.places = game_logic.create_test_map() account = self.accounts_factory.create_account() self.storage = game_logic_storage.LogicStorage() self.storage.load_account_data(account)", "self.places[0].habit_honor.set_habit(place_direction * c.HABITS_BORDER) self.hero.habit_honor.set_habit(hero_direction * c.HABITS_BORDER) def test_habits__peacefulness(self): for place_direction,", "self.storage.load_account_data(account) self.hero = self.storage.accounts_to_heroes[account.id] def place_0_cost(self): return logic.get_places_path_modifiers(self.hero)[self.places[0].id] def test_every_place_has_modifier(self):", "self.places[0].id)]) self.assertEqual(len(impacts), 1) self.assertEqual(impacts[0].amount, 150) class GetPlacesPathModifiersTests(places_helpers.PlacesTestsMixin, utils_testcase.TestCase): def setUp(self):", "super().setUp() game_logic.create_test_map() self.account = accounts_prototypes.AccountPrototype.create(nick='nick-xxx', email='<EMAIL>', is_fast=False) self.attributes = {'is_fast':", "self.places[0].refresh_attributes() HABITS_DELTAS = [(-1, -1, -c.PATH_MODIFIER_MINOR_DELTA), (-1, 0, 0), (-1,", "-1, 0), ( 0, 0, 0), ( 0, +1, 0),", "attributes['active_state_end_at']) self.assertEqual(hero.premium_state_end_at, attributes['premium_state_end_at']) self.assertEqual(hero.ban_state_end_at, attributes['ban_state_end_at']) def test_attributes(self): self.attributes.update({'race': game_relations.RACE.random(), 'gender':", "def setUp(self): super().setUp() game_logic.create_test_map() self.account = accounts_prototypes.AccountPrototype.create(nick='nick-xxx', email='<EMAIL>', is_fast=False) self.attributes", "self.attributes['archetype']) self.assertEqual(hero.upbringing, self.attributes['upbringing']) self.assertEqual(hero.first_death, self.attributes['first_death']) self.assertEqual(hero.death_age, self.attributes['death_age']) class RegisterSpendingTests(utils_testcase.TestCase): def", "CreateHero(utils_testcase.TestCase): def setUp(self): super().setUp() game_logic.create_test_map() self.account = accounts_prototypes.AccountPrototype.create(nick='nick-xxx', email='<EMAIL>', is_fast=False)", "random.choice((True, False)), 'might': random.randint(1, 1000), 'active_state_end_at': datetime.datetime.fromtimestamp(1), 'premium_state_end_at': datetime.datetime.fromtimestamp(2), 'ban_state_end_at':", "super().setUp() game_logic.create_test_map() account = self.accounts_factory.create_account(is_fast=True) self.storage = game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero", "hero_direction, expected_delta in self.HABITS_DELTAS: self.places[0].habit_peacefulness.set_habit(0) self.hero.habit_peacefulness.set_habit(0) with self.check_almost_delta(self.place_0_cost, expected_delta): self.places[0].habit_peacefulness.set_habit(place_direction", "for attribute in self.attributes.keys(): with self.assertRaises(exceptions.HeroAttributeRequiredError): logic.create_hero(account_id=self.account.id, attributes={key: value for", "attributes['might']) self.assertEqual(hero.active_state_end_at, attributes['active_state_end_at']) self.assertEqual(hero.premium_state_end_at, attributes['premium_state_end_at']) self.assertEqual(hero.ban_state_end_at, attributes['ban_state_end_at']) def test_attributes(self): self.attributes.update({'race':", "0, 'active_state_end_at': datetime.datetime.now() + datetime.timedelta(days=3), 'premium_state_end_at': datetime.datetime.fromtimestamp(0), 'ban_state_end_at': datetime.datetime.fromtimestamp(0)} def", "self.assertTrue(impacts[0].target_type.is_PLACE) self.assertEqual(impacts[0].target_id, self.places[0].id) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True) def test_can_change_place_power__below_zero(self):", "self.assertEqual(hero.might, attributes['might']) self.assertEqual(hero.active_state_end_at, attributes['active_state_end_at']) self.assertEqual(hero.premium_state_end_at, attributes['premium_state_end_at']) self.assertEqual(hero.ban_state_end_at, attributes['ban_state_end_at']) def test_attributes(self):", "self.hero.position.set_position(0, 0) self.assertEqual(self.hero.position.place_id, None) logic.register_spending(self.hero, 100) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)])", "'gender': game_relations.GENDER.random(), 'name': game_names.generator().get_name(game_relations.RACE.random(), game_relations.GENDER.random()), 'peacefulness': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER), 'honor': random.randint(-c.HABITS_BORDER,", "self.assertEqual(hero.race, self.attributes['race']) self.assertEqual(hero.gender, self.attributes['gender']) self.assertEqual(hero.utg_name, self.attributes['name']) self.assertEqual(hero.habit_peacefulness.raw_value, self.attributes['peacefulness']) self.assertEqual(hero.habit_honor.raw_value, self.attributes['honor'])", "test_habits__honor(self): for place_direction, hero_direction, expected_delta in self.HABITS_DELTAS: self.places[0].habit_honor.set_habit(0) self.hero.habit_honor.set_habit(0) with", "tt_beings_relations.AGE.random()}) logic.create_hero(account_id=self.account.id, attributes=self.attributes) hero = logic.load_hero(self.account.id) self.assertEqual(hero.race, self.attributes['race']) self.assertEqual(hero.gender, self.attributes['gender'])", "self.hero.preferences.set(relations.PREFERENCE_TYPE.FRIEND, self.places[0].persons[0]) def test_enemy(self): with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.ENEMY, self.places[0].persons[0]) def", "game_logic.create_test_map() self.account = accounts_prototypes.AccountPrototype.create(nick='nick-xxx', email='<EMAIL>', is_fast=False) self.attributes = {'is_fast': False,", "'bla-bla') self.assertEqual(logic.get_hero_description(self.hero.id), 'bla-bla') def test_update_description(self): logic.set_hero_description(self.hero.id, 'bla-bla') logic.set_hero_description(self.hero.id, 'new description')", "-c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.PLACE, self.places[0]) def test_friend(self): with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.FRIEND, self.places[0].persons[0])", "False, 'is_bot': False, 'might': 0, 'active_state_end_at': datetime.datetime.now() + datetime.timedelta(days=3), 'premium_state_end_at':", "HeroDescriptionTests(utils_testcase.TestCase): def setUp(self): super().setUp() game_logic.create_test_map() account = self.accounts_factory.create_account(is_fast=True) self.storage =", "place: True) def test_not_in_place(self): self.hero.position.set_position(0, 0) self.assertEqual(self.hero.position.place_id, None) logic.register_spending(self.hero, 100)", "self.assertEqual(hero.habit_honor.raw_value, 0) self.assertEqual(hero.habit_peacefulness.raw_value, 0) self.assertTrue(hero.preferences.archetype.is_NEUTRAL) self.assertTrue(hero.upbringing.is_PHILISTINE) self.assertTrue(hero.first_death.is_FROM_THE_MONSTER_FANGS) self.assertTrue(hero.death_age.is_MATURE) def test_account_attributes_required(self):", "-c.PATH_MODIFIER_MINOR_DELTA)] def test_habits__honor(self): for place_direction, hero_direction, expected_delta in self.HABITS_DELTAS: self.places[0].habit_honor.set_habit(0)", "False)), 'is_bot': random.choice((True, False)), 'might': random.randint(1, 1000), 'active_state_end_at': datetime.datetime.fromtimestamp(1), 'premium_state_end_at':", "= game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(impacts, []) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: False)", "game_relations.GENDER.random(), 'name': game_names.generator().get_name(game_relations.RACE.random(), game_relations.GENDER.random()), 'peacefulness': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER), 'honor': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER),", "self.assertEqual(len(impacts), 1) self.assertEqual(impacts[0].amount, 100) self.assertTrue(impacts[0].target_type.is_PLACE) self.assertEqual(impacts[0].target_id, self.places[0].id) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero,", "-c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.FRIEND, self.places[0].persons[0]) def test_enemy(self): with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.ENEMY, self.places[0].persons[0])", "self.hero.preferences.set(relations.PREFERENCE_TYPE.PLACE, self.places[0]) def test_friend(self): with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.FRIEND, self.places[0].persons[0]) def", "self.attributes['death_age']) class RegisterSpendingTests(utils_testcase.TestCase): def setUp(self): super().setUp() self.places = game_logic.create_test_map() account", "self.assertEqual(hero.ban_state_end_at, attributes['ban_state_end_at']) def test_attributes(self): self.attributes.update({'race': game_relations.RACE.random(), 'gender': game_relations.GENDER.random(), 'name': game_names.generator().get_name(game_relations.RACE.random(),", "email='<EMAIL>', is_fast=False) self.attributes = {'is_fast': False, 'is_bot': False, 'might': 0,", "100) self.assertTrue(impacts[0].target_type.is_PLACE) self.assertEqual(impacts[0].target_id, self.places[0].id) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True) def", "with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.PLACE, self.places[0]) def test_friend(self): with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA):", "def test_can_change_place_power(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(len(impacts),", "0, +1, 0), (+1, -1, +c.PATH_MODIFIER_MINOR_DELTA), (+1, 0, 0), (+1,", "'might': random.randint(1, 1000), 'active_state_end_at': datetime.datetime.fromtimestamp(1), 'premium_state_end_at': datetime.datetime.fromtimestamp(2), 'ban_state_end_at': datetime.datetime.fromtimestamp(3)} logic.create_hero(account_id=self.account.id,", "setUp(self): super().setUp() self.places = game_logic.create_test_map() account = self.accounts_factory.create_account(is_fast=True) self.storage =", "tt_beings_relations.UPBRINGING.random(), 'first_death': tt_beings_relations.FIRST_DEATH.random(), 'death_age': tt_beings_relations.AGE.random()}) logic.create_hero(account_id=self.account.id, attributes=self.attributes) hero = logic.load_hero(self.account.id)", "self.assertEqual(impacts[0].amount, 100) self.assertTrue(impacts[0].target_type.is_PLACE) self.assertEqual(impacts[0].target_id, self.places[0].id) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True)", "setUp(self): super().setUp() self.places = game_logic.create_test_map() account = self.accounts_factory.create_account() self.storage =", "place: False) def test_can_not_change_place_power(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE,", "attributes = {'is_fast': random.choice((True, False)), 'is_bot': random.choice((True, False)), 'might': random.randint(1,", "self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA): self.create_effect(self.places[0].id, value=100, attribute=places_relations.ATTRIBUTE.TAX, delta=0) self.places[0].refresh_attributes() HABITS_DELTAS = [(-1,", "self.attributes['race']) self.assertEqual(hero.gender, self.attributes['gender']) self.assertEqual(hero.utg_name, self.attributes['name']) self.assertEqual(hero.habit_peacefulness.raw_value, self.attributes['peacefulness']) self.assertEqual(hero.habit_honor.raw_value, self.attributes['honor']) self.assertEqual(hero.preferences.archetype,", "hero_direction, expected_delta in self.HABITS_DELTAS: self.places[0].habit_honor.set_habit(0) self.hero.habit_honor.set_habit(0) with self.check_almost_delta(self.place_0_cost, expected_delta): self.places[0].habit_honor.set_habit(place_direction", "game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(impacts, []) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True) def", "self.accounts_factory.create_account(is_fast=True) self.storage = game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero = self.storage.accounts_to_heroes[account.id] def place_0_cost(self):", "test_friend(self): with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.FRIEND, self.places[0].persons[0]) def test_enemy(self): with self.check_almost_delta(self.place_0_cost,", "self.attributes['name']) self.assertEqual(hero.habit_peacefulness.raw_value, self.attributes['peacefulness']) self.assertEqual(hero.habit_honor.raw_value, self.attributes['honor']) self.assertEqual(hero.preferences.archetype, self.attributes['archetype']) self.assertEqual(hero.upbringing, self.attributes['upbringing']) self.assertEqual(hero.first_death,", "'first_death': tt_beings_relations.FIRST_DEATH.random(), 'death_age': tt_beings_relations.AGE.random()}) logic.create_hero(account_id=self.account.id, attributes=self.attributes) hero = logic.load_hero(self.account.id) self.assertEqual(hero.race,", "delta=0) self.places[0].refresh_attributes() HABITS_DELTAS = [(-1, -1, -c.PATH_MODIFIER_MINOR_DELTA), (-1, 0, 0),", "def setUp(self): super().setUp() self.places = game_logic.create_test_map() account = self.accounts_factory.create_account(is_fast=True) self.storage", "10 self.places[0].refresh_attributes() self.assertEqual(self.places[0].attrs.tax, 0) with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA): self.create_effect(self.places[0].id, value=100, attribute=places_relations.ATTRIBUTE.TAX,", "c.PATH_MODIFIER_NORMAL_DELTA): self.create_effect(self.places[0].id, value=100, attribute=places_relations.ATTRIBUTE.TAX, delta=0) self.places[0].refresh_attributes() HABITS_DELTAS = [(-1, -1,", "= self.hero.race def test_modifier_bonus(self): self.assertFalse(self.places[0].is_modifier_active()) with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA): self.places[0].set_modifier(places_modifiers.CITY_MODIFIERS.FORT) self.create_effect(self.places[0].id,", "'peacefulness': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER), 'honor': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER), 'archetype': game_relations.ARCHETYPE.random(), 'upbringing': tt_beings_relations.UPBRINGING.random(),", "delta=0) self.places[0].refresh_attributes() self.assertTrue(self.places[0].is_modifier_active()) def test_home_place(self): with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.PLACE, self.places[0])", "datetime.datetime.fromtimestamp(1), 'premium_state_end_at': datetime.datetime.fromtimestamp(2), 'ban_state_end_at': datetime.datetime.fromtimestamp(3)} logic.create_hero(account_id=self.account.id, attributes=attributes) hero = logic.load_hero(self.account.id)", "setUp(self): super().setUp() game_logic.create_test_map() account = self.accounts_factory.create_account(is_fast=True) self.storage = game_logic_storage.LogicStorage() self.storage.load_account_data(account)", "attributes['is_bot']) self.assertEqual(hero.might, attributes['might']) self.assertEqual(hero.active_state_end_at, attributes['active_state_end_at']) self.assertEqual(hero.premium_state_end_at, attributes['premium_state_end_at']) self.assertEqual(hero.ban_state_end_at, attributes['ban_state_end_at']) def", "True) def test_not_in_place(self): self.hero.position.set_position(0, 0) self.assertEqual(self.hero.position.place_id, None) logic.register_spending(self.hero, 100) impacts", "self.places[0].refresh_attributes() self.assertTrue(self.places[0].is_modifier_active()) def test_home_place(self): with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.PLACE, self.places[0]) def", "<reponame>al-arz/the-tale import smart_imports smart_imports.all() class HeroDescriptionTests(utils_testcase.TestCase): def setUp(self): super().setUp() game_logic.create_test_map()", "self.places[0].id)]) self.assertEqual(len(impacts), 1) self.assertEqual(impacts[0].amount, 100) self.assertTrue(impacts[0].target_type.is_PLACE) self.assertEqual(impacts[0].target_id, self.places[0].id) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda", "self.hero = self.storage.accounts_to_heroes[account.id] def place_0_cost(self): return logic.get_places_path_modifiers(self.hero)[self.places[0].id] def test_every_place_has_modifier(self): modifiers", "= logic.load_hero(self.account.id) self.assertEqual(hero.is_fast, attributes['is_fast']) self.assertEqual(hero.is_bot, attributes['is_bot']) self.assertEqual(hero.might, attributes['might']) self.assertEqual(hero.active_state_end_at, attributes['active_state_end_at'])", "'active_state_end_at': datetime.datetime.fromtimestamp(1), 'premium_state_end_at': datetime.datetime.fromtimestamp(2), 'ban_state_end_at': datetime.datetime.fromtimestamp(3)} logic.create_hero(account_id=self.account.id, attributes=attributes) hero =", "c.HABITS_BORDER) def test_habits__peacefulness(self): for place_direction, hero_direction, expected_delta in self.HABITS_DELTAS: self.places[0].habit_peacefulness.set_habit(0)", "hero, place: False) def test_can_not_change_place_power(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) impacts =", "expected_delta in self.HABITS_DELTAS: self.places[0].habit_peacefulness.set_habit(0) self.hero.habit_peacefulness.set_habit(0) with self.check_almost_delta(self.place_0_cost, expected_delta): self.places[0].habit_peacefulness.set_habit(place_direction *", "'name': game_names.generator().get_name(game_relations.RACE.random(), game_relations.GENDER.random()), 'peacefulness': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER), 'honor': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER), 'archetype':", "self.assertEqual(hero.account_id, self.account.id) self.assertIn(hero.gender, (game_relations.GENDER.MALE, game_relations.GENDER.FEMALE)) self.assertEqual(hero.preferences.energy_regeneration_type, hero.race.energy_regeneration) self.assertEqual(hero.habit_honor.raw_value, 0) self.assertEqual(hero.habit_peacefulness.raw_value,", "self.assertEqual(set(modifiers.keys()), {place.id for place in self.places}) def test_race_bonus(self): self.places[0].race =", "def test_habits__peacefulness(self): for place_direction, hero_direction, expected_delta in self.HABITS_DELTAS: self.places[0].habit_peacefulness.set_habit(0) self.hero.habit_peacefulness.set_habit(0)", "True) def test_can_change_place_power__below_zero(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) logic.register_spending(self.hero, -50) impacts =", "self.hero.habit_honor.set_habit(hero_direction * c.HABITS_BORDER) def test_habits__peacefulness(self): for place_direction, hero_direction, expected_delta in", "def test_every_place_has_modifier(self): modifiers = logic.get_places_path_modifiers(self.hero) self.assertEqual(set(modifiers.keys()), {place.id for place in", "datetime.datetime.fromtimestamp(0)} def test_default(self): logic.create_hero(account_id=self.account.id, attributes=self.attributes) hero = logic.load_hero(self.account.id) self.assertEqual(hero.id, self.account.id)", "test_not_in_place(self): self.hero.position.set_position(0, 0) self.assertEqual(self.hero.position.place_id, None) logic.register_spending(self.hero, 100) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE,", "def test_friend(self): with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.FRIEND, self.places[0].persons[0]) def test_enemy(self): with", "0), (+1, -1, +c.PATH_MODIFIER_MINOR_DELTA), (+1, 0, 0), (+1, +1, -c.PATH_MODIFIER_MINOR_DELTA)]", "return logic.get_places_path_modifiers(self.hero)[self.places[0].id] def test_every_place_has_modifier(self): modifiers = logic.get_places_path_modifiers(self.hero) self.assertEqual(set(modifiers.keys()), {place.id for", "self.assertEqual(hero.id, self.account.id) self.assertEqual(hero.account_id, self.account.id) self.assertIn(hero.gender, (game_relations.GENDER.MALE, game_relations.GENDER.FEMALE)) self.assertEqual(hero.preferences.energy_regeneration_type, hero.race.energy_regeneration) self.assertEqual(hero.habit_honor.raw_value,", "logic.get_places_path_modifiers(self.hero)[self.places[0].id] def test_every_place_has_modifier(self): modifiers = logic.get_places_path_modifiers(self.hero) self.assertEqual(set(modifiers.keys()), {place.id for place", "attributes['ban_state_end_at']) def test_attributes(self): self.attributes.update({'race': game_relations.RACE.random(), 'gender': game_relations.GENDER.random(), 'name': game_names.generator().get_name(game_relations.RACE.random(), game_relations.GENDER.random()),", "test_default(self): logic.create_hero(account_id=self.account.id, attributes=self.attributes) hero = logic.load_hero(self.account.id) self.assertEqual(hero.id, self.account.id) self.assertEqual(hero.account_id, self.account.id)", "self.assertTrue(hero.first_death.is_FROM_THE_MONSTER_FANGS) self.assertTrue(hero.death_age.is_MATURE) def test_account_attributes_required(self): for attribute in self.attributes.keys(): with self.assertRaises(exceptions.HeroAttributeRequiredError):", "{'is_fast': False, 'is_bot': False, 'might': 0, 'active_state_end_at': datetime.datetime.now() + datetime.timedelta(days=3),", "self.attributes['gender']) self.assertEqual(hero.utg_name, self.attributes['name']) self.assertEqual(hero.habit_peacefulness.raw_value, self.attributes['peacefulness']) self.assertEqual(hero.habit_honor.raw_value, self.attributes['honor']) self.assertEqual(hero.preferences.archetype, self.attributes['archetype']) self.assertEqual(hero.upbringing,", "random.randint(-c.HABITS_BORDER, c.HABITS_BORDER), 'honor': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER), 'archetype': game_relations.ARCHETYPE.random(), 'upbringing': tt_beings_relations.UPBRINGING.random(), 'first_death':", "self.accounts_factory.create_account(is_fast=True) self.storage = game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero = self.storage.accounts_to_heroes[account.id] def test_no_description(self):", "{'is_fast': random.choice((True, False)), 'is_bot': random.choice((True, False)), 'might': random.randint(1, 1000), 'active_state_end_at':", "+c.PATH_MODIFIER_MINOR_DELTA), ( 0, -1, 0), ( 0, 0, 0), (", "= game_logic.create_test_map() account = self.accounts_factory.create_account(is_fast=True) self.storage = game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero", "hero.race.energy_regeneration) self.assertEqual(hero.habit_honor.raw_value, 0) self.assertEqual(hero.habit_peacefulness.raw_value, 0) self.assertTrue(hero.preferences.archetype.is_NEUTRAL) self.assertTrue(hero.upbringing.is_PHILISTINE) self.assertTrue(hero.first_death.is_FROM_THE_MONSTER_FANGS) self.assertTrue(hero.death_age.is_MATURE) def", "for place_direction, hero_direction, expected_delta in self.HABITS_DELTAS: self.places[0].habit_peacefulness.set_habit(0) self.hero.habit_peacefulness.set_habit(0) with self.check_almost_delta(self.place_0_cost,", "game_tt_services.debug_clear_service() @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True) def test_not_in_place(self): self.hero.position.set_position(0, 0)", "self.attributes['honor']) self.assertEqual(hero.preferences.archetype, self.attributes['archetype']) self.assertEqual(hero.upbringing, self.attributes['upbringing']) self.assertEqual(hero.first_death, self.attributes['first_death']) self.assertEqual(hero.death_age, self.attributes['death_age']) class", "with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA): self.places[0].set_modifier(places_modifiers.CITY_MODIFIERS.FORT) self.create_effect(self.places[0].id, value=100500, attribute=places_relations.ATTRIBUTE.MODIFIER_FORT, delta=0) self.places[0].refresh_attributes() self.assertTrue(self.places[0].is_modifier_active())", "in self.HABITS_DELTAS: self.places[0].habit_peacefulness.set_habit(0) self.hero.habit_peacefulness.set_habit(0) with self.check_almost_delta(self.place_0_cost, expected_delta): self.places[0].habit_peacefulness.set_habit(place_direction * c.HABITS_BORDER)", "self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA): self.places[0].set_modifier(places_modifiers.CITY_MODIFIERS.FORT) self.create_effect(self.places[0].id, value=100500, attribute=places_relations.ATTRIBUTE.MODIFIER_FORT, delta=0) self.places[0].refresh_attributes() self.assertTrue(self.places[0].is_modifier_active()) def", "game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero = self.storage.accounts_to_heroes[account.id] self.hero.premium_state_end_at game_tt_services.debug_clear_service() @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero,", "logic.register_spending(self.hero, 100) logic.register_spending(self.hero, -50) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(len(impacts), 1)", "self.hero.premium_state_end_at game_tt_services.debug_clear_service() @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True) def test_not_in_place(self): self.hero.position.set_position(0,", "100) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(len(impacts), 1) self.assertEqual(impacts[0].amount, 100) self.assertTrue(impacts[0].target_type.is_PLACE)", "game_relations.RACE.random(exclude=(self.hero.race,)) with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA): self.places[0].race = self.hero.race def test_modifier_bonus(self): self.assertFalse(self.places[0].is_modifier_active())", "self.places[0].set_modifier(places_modifiers.CITY_MODIFIERS.FORT) self.create_effect(self.places[0].id, value=100500, attribute=places_relations.ATTRIBUTE.MODIFIER_FORT, delta=0) self.places[0].refresh_attributes() self.assertTrue(self.places[0].is_modifier_active()) def test_home_place(self): with", "c.HABITS_BORDER) self.hero.habit_honor.set_habit(hero_direction * c.HABITS_BORDER) def test_habits__peacefulness(self): for place_direction, hero_direction, expected_delta", "with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.FRIEND, self.places[0].persons[0]) def test_enemy(self): with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA):", "self.storage.accounts_to_heroes[account.id] self.hero.premium_state_end_at game_tt_services.debug_clear_service() @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True) def test_not_in_place(self):", "self.hero = self.storage.accounts_to_heroes[account.id] def test_no_description(self): self.assertEqual(logic.get_hero_description(self.hero.id), '') def test_has_description(self): logic.set_hero_description(self.hero.id,", "super().setUp() self.places = game_logic.create_test_map() account = self.accounts_factory.create_account() self.storage = game_logic_storage.LogicStorage()", "def test_tax(self): self.places[0].attrs.size = 10 self.places[0].refresh_attributes() self.assertEqual(self.places[0].attrs.tax, 0) with self.check_almost_delta(self.place_0_cost,", "= self.storage.accounts_to_heroes[account.id] self.hero.premium_state_end_at game_tt_services.debug_clear_service() @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True) def", "place_direction, hero_direction, expected_delta in self.HABITS_DELTAS: self.places[0].habit_honor.set_habit(0) self.hero.habit_honor.set_habit(0) with self.check_almost_delta(self.place_0_cost, expected_delta):", "datetime.datetime.now() + datetime.timedelta(days=3), 'premium_state_end_at': datetime.datetime.fromtimestamp(0), 'ban_state_end_at': datetime.datetime.fromtimestamp(0)} def test_default(self): logic.create_hero(account_id=self.account.id,", "hero, place: True) def test_can_change_place_power__below_zero(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) logic.register_spending(self.hero, -50)", "self.assertEqual(hero.habit_peacefulness.raw_value, 0) self.assertTrue(hero.preferences.archetype.is_NEUTRAL) self.assertTrue(hero.upbringing.is_PHILISTINE) self.assertTrue(hero.first_death.is_FROM_THE_MONSTER_FANGS) self.assertTrue(hero.death_age.is_MATURE) def test_account_attributes_required(self): for attribute", "= logic.load_hero(self.account.id) self.assertEqual(hero.id, self.account.id) self.assertEqual(hero.account_id, self.account.id) self.assertIn(hero.gender, (game_relations.GENDER.MALE, game_relations.GENDER.FEMALE)) self.assertEqual(hero.preferences.energy_regeneration_type,", "self.assertEqual(hero.utg_name, self.attributes['name']) self.assertEqual(hero.habit_peacefulness.raw_value, self.attributes['peacefulness']) self.assertEqual(hero.habit_honor.raw_value, self.attributes['honor']) self.assertEqual(hero.preferences.archetype, self.attributes['archetype']) self.assertEqual(hero.upbringing, self.attributes['upbringing'])", "0, 0), ( 0, +1, 0), (+1, -1, +c.PATH_MODIFIER_MINOR_DELTA), (+1,", "False, 'might': 0, 'active_state_end_at': datetime.datetime.now() + datetime.timedelta(days=3), 'premium_state_end_at': datetime.datetime.fromtimestamp(0), 'ban_state_end_at':", "self.attributes.items() if key != attribute }) def test_account_attributes(self): attributes =", "self.assertEqual(hero.is_fast, attributes['is_fast']) self.assertEqual(hero.is_bot, attributes['is_bot']) self.assertEqual(hero.might, attributes['might']) self.assertEqual(hero.active_state_end_at, attributes['active_state_end_at']) self.assertEqual(hero.premium_state_end_at, attributes['premium_state_end_at'])", "tt_beings_relations.FIRST_DEATH.random(), 'death_age': tt_beings_relations.AGE.random()}) logic.create_hero(account_id=self.account.id, attributes=self.attributes) hero = logic.load_hero(self.account.id) self.assertEqual(hero.race, self.attributes['race'])", "self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.ENEMY, self.places[0].persons[0]) def test_tax(self): self.places[0].attrs.size = 10 self.places[0].refresh_attributes()", "+1, +c.PATH_MODIFIER_MINOR_DELTA), ( 0, -1, 0), ( 0, 0, 0),", "test_can_change_place_power(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(len(impacts), 1)", "logic.register_spending(self.hero, 100) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(len(impacts), 1) self.assertEqual(impacts[0].amount, 100)", "'upbringing': tt_beings_relations.UPBRINGING.random(), 'first_death': tt_beings_relations.FIRST_DEATH.random(), 'death_age': tt_beings_relations.AGE.random()}) logic.create_hero(account_id=self.account.id, attributes=self.attributes) hero =", "self.places[0]) def test_friend(self): with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.FRIEND, self.places[0].persons[0]) def test_enemy(self):", "= game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero = self.storage.accounts_to_heroes[account.id] def place_0_cost(self): return logic.get_places_path_modifiers(self.hero)[self.places[0].id]", "def test_no_description(self): self.assertEqual(logic.get_hero_description(self.hero.id), '') def test_has_description(self): logic.set_hero_description(self.hero.id, 'bla-bla') self.assertEqual(logic.get_hero_description(self.hero.id), 'bla-bla')", "0, 0, 0), ( 0, +1, 0), (+1, -1, +c.PATH_MODIFIER_MINOR_DELTA),", "attribute in self.attributes.keys(): with self.assertRaises(exceptions.HeroAttributeRequiredError): logic.create_hero(account_id=self.account.id, attributes={key: value for key,", "smart_imports.all() class HeroDescriptionTests(utils_testcase.TestCase): def setUp(self): super().setUp() game_logic.create_test_map() account = self.accounts_factory.create_account(is_fast=True)", "account = self.accounts_factory.create_account() self.storage = game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero = self.storage.accounts_to_heroes[account.id]", "lambda hero, place: True) def test_can_change_place_power__below_zero(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) logic.register_spending(self.hero,", "'ban_state_end_at': datetime.datetime.fromtimestamp(0)} def test_default(self): logic.create_hero(account_id=self.account.id, attributes=self.attributes) hero = logic.load_hero(self.account.id) self.assertEqual(hero.id,", "self.attributes['first_death']) self.assertEqual(hero.death_age, self.attributes['death_age']) class RegisterSpendingTests(utils_testcase.TestCase): def setUp(self): super().setUp() self.places =", "1000), 'active_state_end_at': datetime.datetime.fromtimestamp(1), 'premium_state_end_at': datetime.datetime.fromtimestamp(2), 'ban_state_end_at': datetime.datetime.fromtimestamp(3)} logic.create_hero(account_id=self.account.id, attributes=attributes) hero", "self.HABITS_DELTAS: self.places[0].habit_honor.set_habit(0) self.hero.habit_honor.set_habit(0) with self.check_almost_delta(self.place_0_cost, expected_delta): self.places[0].habit_honor.set_habit(place_direction * c.HABITS_BORDER) self.hero.habit_honor.set_habit(hero_direction", "self.places[0].id)]) self.assertEqual(impacts, []) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: False) def test_can_not_change_place_power(self):", "hero = logic.load_hero(self.account.id) self.assertEqual(hero.id, self.account.id) self.assertEqual(hero.account_id, self.account.id) self.assertIn(hero.gender, (game_relations.GENDER.MALE, game_relations.GENDER.FEMALE))", "test_race_bonus(self): self.places[0].race = game_relations.RACE.random(exclude=(self.hero.race,)) with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA): self.places[0].race = self.hero.race", "'bla-bla') logic.set_hero_description(self.hero.id, 'new description') self.assertEqual(logic.get_hero_description(self.hero.id), 'new description') class CreateHero(utils_testcase.TestCase): def", "impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(impacts, []) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place:", "self.places[0].persons[0]) def test_tax(self): self.places[0].attrs.size = 10 self.places[0].refresh_attributes() self.assertEqual(self.places[0].attrs.tax, 0) with", "0, -1, 0), ( 0, 0, 0), ( 0, +1,", "self.assertEqual(impacts[0].target_id, self.places[0].id) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True) def test_can_change_place_power__below_zero(self): self.hero.position.set_place(self.places[0])", "-1, +c.PATH_MODIFIER_MINOR_DELTA), (+1, 0, 0), (+1, +1, -c.PATH_MODIFIER_MINOR_DELTA)] def test_habits__honor(self):", "hero = logic.load_hero(self.account.id) self.assertEqual(hero.is_fast, attributes['is_fast']) self.assertEqual(hero.is_bot, attributes['is_bot']) self.assertEqual(hero.might, attributes['might']) self.assertEqual(hero.active_state_end_at,", "@mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True) def test_can_change_place_power__below_zero(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100)", "0, 0), (-1, +1, +c.PATH_MODIFIER_MINOR_DELTA), ( 0, -1, 0), (", "= self.storage.accounts_to_heroes[account.id] def place_0_cost(self): return logic.get_places_path_modifiers(self.hero)[self.places[0].id] def test_every_place_has_modifier(self): modifiers =", "test_has_description(self): logic.set_hero_description(self.hero.id, 'bla-bla') self.assertEqual(logic.get_hero_description(self.hero.id), 'bla-bla') def test_update_description(self): logic.set_hero_description(self.hero.id, 'bla-bla') logic.set_hero_description(self.hero.id,", "+c.PATH_MODIFIER_MINOR_DELTA), (+1, 0, 0), (+1, +1, -c.PATH_MODIFIER_MINOR_DELTA)] def test_habits__honor(self): for", "test_attributes(self): self.attributes.update({'race': game_relations.RACE.random(), 'gender': game_relations.GENDER.random(), 'name': game_names.generator().get_name(game_relations.RACE.random(), game_relations.GENDER.random()), 'peacefulness': random.randint(-c.HABITS_BORDER,", "def test_can_not_change_place_power(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(impacts,", "game_logic.create_test_map() account = self.accounts_factory.create_account(is_fast=True) self.storage = game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero =", "= game_relations.RACE.random(exclude=(self.hero.race,)) with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA): self.places[0].race = self.hero.race def test_modifier_bonus(self):", "-c.PATH_MODIFIER_MINOR_DELTA), (-1, 0, 0), (-1, +1, +c.PATH_MODIFIER_MINOR_DELTA), ( 0, -1,", "+ datetime.timedelta(days=3), 'premium_state_end_at': datetime.datetime.fromtimestamp(0), 'ban_state_end_at': datetime.datetime.fromtimestamp(0)} def test_default(self): logic.create_hero(account_id=self.account.id, attributes=self.attributes)", "= 10 self.places[0].refresh_attributes() self.assertEqual(self.places[0].attrs.tax, 0) with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA): self.create_effect(self.places[0].id, value=100,", "def test_attributes(self): self.attributes.update({'race': game_relations.RACE.random(), 'gender': game_relations.GENDER.random(), 'name': game_names.generator().get_name(game_relations.RACE.random(), game_relations.GENDER.random()), 'peacefulness':", "1) self.assertEqual(impacts[0].amount, 150) class GetPlacesPathModifiersTests(places_helpers.PlacesTestsMixin, utils_testcase.TestCase): def setUp(self): super().setUp() self.places", "setUp(self): super().setUp() game_logic.create_test_map() self.account = accounts_prototypes.AccountPrototype.create(nick='nick-xxx', email='<EMAIL>', is_fast=False) self.attributes =", "def test_enemy(self): with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.ENEMY, self.places[0].persons[0]) def test_tax(self): self.places[0].attrs.size", "accounts_prototypes.AccountPrototype.create(nick='nick-xxx', email='<EMAIL>', is_fast=False) self.attributes = {'is_fast': False, 'is_bot': False, 'might':", "class HeroDescriptionTests(utils_testcase.TestCase): def setUp(self): super().setUp() game_logic.create_test_map() account = self.accounts_factory.create_account(is_fast=True) self.storage", "logic.create_hero(account_id=self.account.id, attributes={key: value for key, value in self.attributes.items() if key", "random.choice((True, False)), 'is_bot': random.choice((True, False)), 'might': random.randint(1, 1000), 'active_state_end_at': datetime.datetime.fromtimestamp(1),", "def test_not_in_place(self): self.hero.position.set_position(0, 0) self.assertEqual(self.hero.position.place_id, None) logic.register_spending(self.hero, 100) impacts =", "self.assertEqual(self.hero.position.place_id, None) logic.register_spending(self.hero, 100) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(impacts, [])", "-c.PATH_MODIFIER_MINOR_DELTA): self.places[0].set_modifier(places_modifiers.CITY_MODIFIERS.FORT) self.create_effect(self.places[0].id, value=100500, attribute=places_relations.ATTRIBUTE.MODIFIER_FORT, delta=0) self.places[0].refresh_attributes() self.assertTrue(self.places[0].is_modifier_active()) def test_home_place(self):", "impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(len(impacts), 1) self.assertEqual(impacts[0].amount, 100) self.assertTrue(impacts[0].target_type.is_PLACE) self.assertEqual(impacts[0].target_id,", "(-1, 0, 0), (-1, +1, +c.PATH_MODIFIER_MINOR_DELTA), ( 0, -1, 0),", "datetime.datetime.fromtimestamp(3)} logic.create_hero(account_id=self.account.id, attributes=attributes) hero = logic.load_hero(self.account.id) self.assertEqual(hero.is_fast, attributes['is_fast']) self.assertEqual(hero.is_bot, attributes['is_bot'])", "self.assertEqual(logic.get_hero_description(self.hero.id), '') def test_has_description(self): logic.set_hero_description(self.hero.id, 'bla-bla') self.assertEqual(logic.get_hero_description(self.hero.id), 'bla-bla') def test_update_description(self):", "self.assertEqual(hero.habit_peacefulness.raw_value, self.attributes['peacefulness']) self.assertEqual(hero.habit_honor.raw_value, self.attributes['honor']) self.assertEqual(hero.preferences.archetype, self.attributes['archetype']) self.assertEqual(hero.upbringing, self.attributes['upbringing']) self.assertEqual(hero.first_death, self.attributes['first_death'])", "self.assertEqual(hero.preferences.archetype, self.attributes['archetype']) self.assertEqual(hero.upbringing, self.attributes['upbringing']) self.assertEqual(hero.first_death, self.attributes['first_death']) self.assertEqual(hero.death_age, self.attributes['death_age']) class RegisterSpendingTests(utils_testcase.TestCase):", "self.attributes['peacefulness']) self.assertEqual(hero.habit_honor.raw_value, self.attributes['honor']) self.assertEqual(hero.preferences.archetype, self.attributes['archetype']) self.assertEqual(hero.upbringing, self.attributes['upbringing']) self.assertEqual(hero.first_death, self.attributes['first_death']) self.assertEqual(hero.death_age,", "self.assertEqual(hero.first_death, self.attributes['first_death']) self.assertEqual(hero.death_age, self.attributes['death_age']) class RegisterSpendingTests(utils_testcase.TestCase): def setUp(self): super().setUp() self.places", "place_0_cost(self): return logic.get_places_path_modifiers(self.hero)[self.places[0].id] def test_every_place_has_modifier(self): modifiers = logic.get_places_path_modifiers(self.hero) self.assertEqual(set(modifiers.keys()), {place.id", "@mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: False) def test_can_not_change_place_power(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100)", "game_relations.RACE.random(), 'gender': game_relations.GENDER.random(), 'name': game_names.generator().get_name(game_relations.RACE.random(), game_relations.GENDER.random()), 'peacefulness': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER), 'honor':", "datetime.timedelta(days=3), 'premium_state_end_at': datetime.datetime.fromtimestamp(0), 'ban_state_end_at': datetime.datetime.fromtimestamp(0)} def test_default(self): logic.create_hero(account_id=self.account.id, attributes=self.attributes) hero", "= {'is_fast': False, 'is_bot': False, 'might': 0, 'active_state_end_at': datetime.datetime.now() +", "hero = logic.load_hero(self.account.id) self.assertEqual(hero.race, self.attributes['race']) self.assertEqual(hero.gender, self.attributes['gender']) self.assertEqual(hero.utg_name, self.attributes['name']) self.assertEqual(hero.habit_peacefulness.raw_value,", "self.assertEqual(hero.premium_state_end_at, attributes['premium_state_end_at']) self.assertEqual(hero.ban_state_end_at, attributes['ban_state_end_at']) def test_attributes(self): self.attributes.update({'race': game_relations.RACE.random(), 'gender': game_relations.GENDER.random(),", "self.places[0].persons[0]) def test_enemy(self): with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.ENEMY, self.places[0].persons[0]) def test_tax(self):", "def place_0_cost(self): return logic.get_places_path_modifiers(self.hero)[self.places[0].id] def test_every_place_has_modifier(self): modifiers = logic.get_places_path_modifiers(self.hero) self.assertEqual(set(modifiers.keys()),", "class CreateHero(utils_testcase.TestCase): def setUp(self): super().setUp() game_logic.create_test_map() self.account = accounts_prototypes.AccountPrototype.create(nick='nick-xxx', email='<EMAIL>',", "'bla-bla') def test_update_description(self): logic.set_hero_description(self.hero.id, 'bla-bla') logic.set_hero_description(self.hero.id, 'new description') self.assertEqual(logic.get_hero_description(self.hero.id), 'new", "self.storage.accounts_to_heroes[account.id] def place_0_cost(self): return logic.get_places_path_modifiers(self.hero)[self.places[0].id] def test_every_place_has_modifier(self): modifiers = logic.get_places_path_modifiers(self.hero)", "'is_bot': random.choice((True, False)), 'might': random.randint(1, 1000), 'active_state_end_at': datetime.datetime.fromtimestamp(1), 'premium_state_end_at': datetime.datetime.fromtimestamp(2),", "for place_direction, hero_direction, expected_delta in self.HABITS_DELTAS: self.places[0].habit_honor.set_habit(0) self.hero.habit_honor.set_habit(0) with self.check_almost_delta(self.place_0_cost,", "in self.places}) def test_race_bonus(self): self.places[0].race = game_relations.RACE.random(exclude=(self.hero.race,)) with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA):", "0), (-1, +1, +c.PATH_MODIFIER_MINOR_DELTA), ( 0, -1, 0), ( 0,", "self.assertEqual(logic.get_hero_description(self.hero.id), 'new description') class CreateHero(utils_testcase.TestCase): def setUp(self): super().setUp() game_logic.create_test_map() self.account", "self.storage.load_account_data(account) self.hero = self.storage.accounts_to_heroes[account.id] def test_no_description(self): self.assertEqual(logic.get_hero_description(self.hero.id), '') def test_has_description(self):", "None) logic.register_spending(self.hero, 100) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(impacts, []) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power',", "+1, 0), (+1, -1, +c.PATH_MODIFIER_MINOR_DELTA), (+1, 0, 0), (+1, +1,", "self.assertEqual(len(impacts), 1) self.assertEqual(impacts[0].amount, 150) class GetPlacesPathModifiersTests(places_helpers.PlacesTestsMixin, utils_testcase.TestCase): def setUp(self): super().setUp()", "= game_logic.create_test_map() account = self.accounts_factory.create_account() self.storage = game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero", "self.storage = game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero = self.storage.accounts_to_heroes[account.id] def place_0_cost(self): return", "self.places[0].refresh_attributes() self.assertEqual(self.places[0].attrs.tax, 0) with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA): self.create_effect(self.places[0].id, value=100, attribute=places_relations.ATTRIBUTE.TAX, delta=0)", "def test_race_bonus(self): self.places[0].race = game_relations.RACE.random(exclude=(self.hero.race,)) with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA): self.places[0].race =", "self.attributes.update({'race': game_relations.RACE.random(), 'gender': game_relations.GENDER.random(), 'name': game_names.generator().get_name(game_relations.RACE.random(), game_relations.GENDER.random()), 'peacefulness': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER),", "'death_age': tt_beings_relations.AGE.random()}) logic.create_hero(account_id=self.account.id, attributes=self.attributes) hero = logic.load_hero(self.account.id) self.assertEqual(hero.race, self.attributes['race']) self.assertEqual(hero.gender,", "self.assertEqual(logic.get_hero_description(self.hero.id), 'bla-bla') def test_update_description(self): logic.set_hero_description(self.hero.id, 'bla-bla') logic.set_hero_description(self.hero.id, 'new description') self.assertEqual(logic.get_hero_description(self.hero.id),", "-50) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(len(impacts), 1) self.assertEqual(impacts[0].amount, 150) class", "self.assertEqual(hero.death_age, self.attributes['death_age']) class RegisterSpendingTests(utils_testcase.TestCase): def setUp(self): super().setUp() self.places = game_logic.create_test_map()", "attributes=attributes) hero = logic.load_hero(self.account.id) self.assertEqual(hero.is_fast, attributes['is_fast']) self.assertEqual(hero.is_bot, attributes['is_bot']) self.assertEqual(hero.might, attributes['might'])", "1) self.assertEqual(impacts[0].amount, 100) self.assertTrue(impacts[0].target_type.is_PLACE) self.assertEqual(impacts[0].target_id, self.places[0].id) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place:", "GetPlacesPathModifiersTests(places_helpers.PlacesTestsMixin, utils_testcase.TestCase): def setUp(self): super().setUp() self.places = game_logic.create_test_map() account =", "random.randint(1, 1000), 'active_state_end_at': datetime.datetime.fromtimestamp(1), 'premium_state_end_at': datetime.datetime.fromtimestamp(2), 'ban_state_end_at': datetime.datetime.fromtimestamp(3)} logic.create_hero(account_id=self.account.id, attributes=attributes)", "game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(impacts, []) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: False) def", "self.storage = game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero = self.storage.accounts_to_heroes[account.id] def test_no_description(self): self.assertEqual(logic.get_hero_description(self.hero.id),", "self.assertEqual(hero.is_bot, attributes['is_bot']) self.assertEqual(hero.might, attributes['might']) self.assertEqual(hero.active_state_end_at, attributes['active_state_end_at']) self.assertEqual(hero.premium_state_end_at, attributes['premium_state_end_at']) self.assertEqual(hero.ban_state_end_at, attributes['ban_state_end_at'])", "attributes['premium_state_end_at']) self.assertEqual(hero.ban_state_end_at, attributes['ban_state_end_at']) def test_attributes(self): self.attributes.update({'race': game_relations.RACE.random(), 'gender': game_relations.GENDER.random(), 'name':", "( 0, 0, 0), ( 0, +1, 0), (+1, -1,", "datetime.datetime.fromtimestamp(0), 'ban_state_end_at': datetime.datetime.fromtimestamp(0)} def test_default(self): logic.create_hero(account_id=self.account.id, attributes=self.attributes) hero = logic.load_hero(self.account.id)", "with self.check_almost_delta(self.place_0_cost, expected_delta): self.places[0].habit_honor.set_habit(place_direction * c.HABITS_BORDER) self.hero.habit_honor.set_habit(hero_direction * c.HABITS_BORDER) def", "place: True) def test_can_change_place_power(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE,", "self.places[0].habit_honor.set_habit(0) self.hero.habit_honor.set_habit(0) with self.check_almost_delta(self.place_0_cost, expected_delta): self.places[0].habit_honor.set_habit(place_direction * c.HABITS_BORDER) self.hero.habit_honor.set_habit(hero_direction *", "smart_imports smart_imports.all() class HeroDescriptionTests(utils_testcase.TestCase): def setUp(self): super().setUp() game_logic.create_test_map() account =", "= self.accounts_factory.create_account(is_fast=True) self.storage = game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero = self.storage.accounts_to_heroes[account.id] def", "self.attributes.keys(): with self.assertRaises(exceptions.HeroAttributeRequiredError): logic.create_hero(account_id=self.account.id, attributes={key: value for key, value in", "self.assertEqual(impacts, []) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: False) def test_can_not_change_place_power(self): self.hero.position.set_place(self.places[0])", "self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.PLACE, self.places[0]) def test_friend(self): with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.FRIEND,", "'premium_state_end_at': datetime.datetime.fromtimestamp(0), 'ban_state_end_at': datetime.datetime.fromtimestamp(0)} def test_default(self): logic.create_hero(account_id=self.account.id, attributes=self.attributes) hero =", "'honor': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER), 'archetype': game_relations.ARCHETYPE.random(), 'upbringing': tt_beings_relations.UPBRINGING.random(), 'first_death': tt_beings_relations.FIRST_DEATH.random(), 'death_age':", "c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.ENEMY, self.places[0].persons[0]) def test_tax(self): self.places[0].attrs.size = 10 self.places[0].refresh_attributes() self.assertEqual(self.places[0].attrs.tax,", "attributes={key: value for key, value in self.attributes.items() if key !=", "0), ( 0, 0, 0), ( 0, +1, 0), (+1,", "= game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(len(impacts), 1) self.assertEqual(impacts[0].amount, 100) self.assertTrue(impacts[0].target_type.is_PLACE) self.assertEqual(impacts[0].target_id, self.places[0].id)", "150) class GetPlacesPathModifiersTests(places_helpers.PlacesTestsMixin, utils_testcase.TestCase): def setUp(self): super().setUp() self.places = game_logic.create_test_map()", "value for key, value in self.attributes.items() if key != attribute", "game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero = self.storage.accounts_to_heroes[account.id] def test_no_description(self): self.assertEqual(logic.get_hero_description(self.hero.id), '') def", "self.account = accounts_prototypes.AccountPrototype.create(nick='nick-xxx', email='<EMAIL>', is_fast=False) self.attributes = {'is_fast': False, 'is_bot':", "game_relations.GENDER.FEMALE)) self.assertEqual(hero.preferences.energy_regeneration_type, hero.race.energy_regeneration) self.assertEqual(hero.habit_honor.raw_value, 0) self.assertEqual(hero.habit_peacefulness.raw_value, 0) self.assertTrue(hero.preferences.archetype.is_NEUTRAL) self.assertTrue(hero.upbringing.is_PHILISTINE) self.assertTrue(hero.first_death.is_FROM_THE_MONSTER_FANGS)", "self.assertTrue(hero.death_age.is_MATURE) def test_account_attributes_required(self): for attribute in self.attributes.keys(): with self.assertRaises(exceptions.HeroAttributeRequiredError): logic.create_hero(account_id=self.account.id,", "= game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(impacts, []) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True)", "self.storage.load_account_data(account) self.hero = self.storage.accounts_to_heroes[account.id] self.hero.premium_state_end_at game_tt_services.debug_clear_service() @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place:", "hero, place: True) def test_can_change_place_power(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) impacts =", "self.attributes = {'is_fast': False, 'is_bot': False, 'might': 0, 'active_state_end_at': datetime.datetime.now()", "self.places = game_logic.create_test_map() account = self.accounts_factory.create_account(is_fast=True) self.storage = game_logic_storage.LogicStorage() self.storage.load_account_data(account)", "in self.attributes.keys(): with self.assertRaises(exceptions.HeroAttributeRequiredError): logic.create_hero(account_id=self.account.id, attributes={key: value for key, value", "test_modifier_bonus(self): self.assertFalse(self.places[0].is_modifier_active()) with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA): self.places[0].set_modifier(places_modifiers.CITY_MODIFIERS.FORT) self.create_effect(self.places[0].id, value=100500, attribute=places_relations.ATTRIBUTE.MODIFIER_FORT, delta=0)", "logic.create_hero(account_id=self.account.id, attributes=self.attributes) hero = logic.load_hero(self.account.id) self.assertEqual(hero.id, self.account.id) self.assertEqual(hero.account_id, self.account.id) self.assertIn(hero.gender,", "logic.register_spending(self.hero, -50) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(len(impacts), 1) self.assertEqual(impacts[0].amount, 150)", "(-1, +1, +c.PATH_MODIFIER_MINOR_DELTA), ( 0, -1, 0), ( 0, 0,", "test_tax(self): self.places[0].attrs.size = 10 self.places[0].refresh_attributes() self.assertEqual(self.places[0].attrs.tax, 0) with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA):", "key, value in self.attributes.items() if key != attribute }) def", "self.hero.habit_honor.set_habit(0) with self.check_almost_delta(self.place_0_cost, expected_delta): self.places[0].habit_honor.set_habit(place_direction * c.HABITS_BORDER) self.hero.habit_honor.set_habit(hero_direction * c.HABITS_BORDER)", "def test_modifier_bonus(self): self.assertFalse(self.places[0].is_modifier_active()) with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA): self.places[0].set_modifier(places_modifiers.CITY_MODIFIERS.FORT) self.create_effect(self.places[0].id, value=100500, attribute=places_relations.ATTRIBUTE.MODIFIER_FORT,", "(+1, +1, -c.PATH_MODIFIER_MINOR_DELTA)] def test_habits__honor(self): for place_direction, hero_direction, expected_delta in", "game_relations.GENDER.random()), 'peacefulness': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER), 'honor': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER), 'archetype': game_relations.ARCHETYPE.random(), 'upbringing':", "class RegisterSpendingTests(utils_testcase.TestCase): def setUp(self): super().setUp() self.places = game_logic.create_test_map() account =", "False) def test_can_not_change_place_power(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)])", "game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero = self.storage.accounts_to_heroes[account.id] def place_0_cost(self): return logic.get_places_path_modifiers(self.hero)[self.places[0].id] def", "value in self.attributes.items() if key != attribute }) def test_account_attributes(self):", "c.HABITS_BORDER), 'archetype': game_relations.ARCHETYPE.random(), 'upbringing': tt_beings_relations.UPBRINGING.random(), 'first_death': tt_beings_relations.FIRST_DEATH.random(), 'death_age': tt_beings_relations.AGE.random()}) logic.create_hero(account_id=self.account.id,", "self.assertEqual(hero.preferences.energy_regeneration_type, hero.race.energy_regeneration) self.assertEqual(hero.habit_honor.raw_value, 0) self.assertEqual(hero.habit_peacefulness.raw_value, 0) self.assertTrue(hero.preferences.archetype.is_NEUTRAL) self.assertTrue(hero.upbringing.is_PHILISTINE) self.assertTrue(hero.first_death.is_FROM_THE_MONSTER_FANGS) self.assertTrue(hero.death_age.is_MATURE)", "test_every_place_has_modifier(self): modifiers = logic.get_places_path_modifiers(self.hero) self.assertEqual(set(modifiers.keys()), {place.id for place in self.places})", "= logic.get_places_path_modifiers(self.hero) self.assertEqual(set(modifiers.keys()), {place.id for place in self.places}) def test_race_bonus(self):", "test_enemy(self): with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.ENEMY, self.places[0].persons[0]) def test_tax(self): self.places[0].attrs.size =", "self.HABITS_DELTAS: self.places[0].habit_peacefulness.set_habit(0) self.hero.habit_peacefulness.set_habit(0) with self.check_almost_delta(self.place_0_cost, expected_delta): self.places[0].habit_peacefulness.set_habit(place_direction * c.HABITS_BORDER) self.hero.habit_peacefulness.set_habit(hero_direction", "expected_delta in self.HABITS_DELTAS: self.places[0].habit_honor.set_habit(0) self.hero.habit_honor.set_habit(0) with self.check_almost_delta(self.place_0_cost, expected_delta): self.places[0].habit_honor.set_habit(place_direction *", "+1, -c.PATH_MODIFIER_MINOR_DELTA)] def test_habits__honor(self): for place_direction, hero_direction, expected_delta in self.HABITS_DELTAS:", "datetime.datetime.fromtimestamp(2), 'ban_state_end_at': datetime.datetime.fromtimestamp(3)} logic.create_hero(account_id=self.account.id, attributes=attributes) hero = logic.load_hero(self.account.id) self.assertEqual(hero.is_fast, attributes['is_fast'])", "True) def test_can_change_place_power(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)])", "self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA): self.places[0].race = self.hero.race def test_modifier_bonus(self): self.assertFalse(self.places[0].is_modifier_active()) with self.check_almost_delta(self.place_0_cost,", "key != attribute }) def test_account_attributes(self): attributes = {'is_fast': random.choice((True,", "( 0, +1, 0), (+1, -1, +c.PATH_MODIFIER_MINOR_DELTA), (+1, 0, 0),", "account = self.accounts_factory.create_account(is_fast=True) self.storage = game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero = self.storage.accounts_to_heroes[account.id]", "test_update_description(self): logic.set_hero_description(self.hero.id, 'bla-bla') logic.set_hero_description(self.hero.id, 'new description') self.assertEqual(logic.get_hero_description(self.hero.id), 'new description') class", "for place in self.places}) def test_race_bonus(self): self.places[0].race = game_relations.RACE.random(exclude=(self.hero.race,)) with", "with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA): self.create_effect(self.places[0].id, value=100, attribute=places_relations.ATTRIBUTE.TAX, delta=0) self.places[0].refresh_attributes() HABITS_DELTAS =", "logic.set_hero_description(self.hero.id, 'bla-bla') logic.set_hero_description(self.hero.id, 'new description') self.assertEqual(logic.get_hero_description(self.hero.id), 'new description') class CreateHero(utils_testcase.TestCase):", "game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(len(impacts), 1) self.assertEqual(impacts[0].amount, 100) self.assertTrue(impacts[0].target_type.is_PLACE) self.assertEqual(impacts[0].target_id, self.places[0].id) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power',", "self.assertEqual(impacts, []) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True) def test_can_change_place_power(self): self.hero.position.set_place(self.places[0])", "def setUp(self): super().setUp() game_logic.create_test_map() account = self.accounts_factory.create_account(is_fast=True) self.storage = game_logic_storage.LogicStorage()", "False)), 'might': random.randint(1, 1000), 'active_state_end_at': datetime.datetime.fromtimestamp(1), 'premium_state_end_at': datetime.datetime.fromtimestamp(2), 'ban_state_end_at': datetime.datetime.fromtimestamp(3)}", "lambda hero, place: True) def test_not_in_place(self): self.hero.position.set_position(0, 0) self.assertEqual(self.hero.position.place_id, None)", "'premium_state_end_at': datetime.datetime.fromtimestamp(2), 'ban_state_end_at': datetime.datetime.fromtimestamp(3)} logic.create_hero(account_id=self.account.id, attributes=attributes) hero = logic.load_hero(self.account.id) self.assertEqual(hero.is_fast,", "def setUp(self): super().setUp() self.places = game_logic.create_test_map() account = self.accounts_factory.create_account() self.storage", "!= attribute }) def test_account_attributes(self): attributes = {'is_fast': random.choice((True, False)),", "self.assertTrue(hero.preferences.archetype.is_NEUTRAL) self.assertTrue(hero.upbringing.is_PHILISTINE) self.assertTrue(hero.first_death.is_FROM_THE_MONSTER_FANGS) self.assertTrue(hero.death_age.is_MATURE) def test_account_attributes_required(self): for attribute in self.attributes.keys():", "game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(len(impacts), 1) self.assertEqual(impacts[0].amount, 150) class GetPlacesPathModifiersTests(places_helpers.PlacesTestsMixin, utils_testcase.TestCase): def", "place_direction, hero_direction, expected_delta in self.HABITS_DELTAS: self.places[0].habit_peacefulness.set_habit(0) self.hero.habit_peacefulness.set_habit(0) with self.check_almost_delta(self.place_0_cost, expected_delta):", "def test_has_description(self): logic.set_hero_description(self.hero.id, 'bla-bla') self.assertEqual(logic.get_hero_description(self.hero.id), 'bla-bla') def test_update_description(self): logic.set_hero_description(self.hero.id, 'bla-bla')", "* c.HABITS_BORDER) self.hero.habit_honor.set_habit(hero_direction * c.HABITS_BORDER) def test_habits__peacefulness(self): for place_direction, hero_direction,", "c.HABITS_BORDER), 'honor': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER), 'archetype': game_relations.ARCHETYPE.random(), 'upbringing': tt_beings_relations.UPBRINGING.random(), 'first_death': tt_beings_relations.FIRST_DEATH.random(),", "utils_testcase.TestCase): def setUp(self): super().setUp() self.places = game_logic.create_test_map() account = self.accounts_factory.create_account(is_fast=True)", "'new description') self.assertEqual(logic.get_hero_description(self.hero.id), 'new description') class CreateHero(utils_testcase.TestCase): def setUp(self): super().setUp()", "for key, value in self.attributes.items() if key != attribute })", "attributes['is_fast']) self.assertEqual(hero.is_bot, attributes['is_bot']) self.assertEqual(hero.might, attributes['might']) self.assertEqual(hero.active_state_end_at, attributes['active_state_end_at']) self.assertEqual(hero.premium_state_end_at, attributes['premium_state_end_at']) self.assertEqual(hero.ban_state_end_at,", "self.places[0].race = self.hero.race def test_modifier_bonus(self): self.assertFalse(self.places[0].is_modifier_active()) with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA): self.places[0].set_modifier(places_modifiers.CITY_MODIFIERS.FORT)", "self.create_effect(self.places[0].id, value=100, attribute=places_relations.ATTRIBUTE.TAX, delta=0) self.places[0].refresh_attributes() HABITS_DELTAS = [(-1, -1, -c.PATH_MODIFIER_MINOR_DELTA),", "(+1, 0, 0), (+1, +1, -c.PATH_MODIFIER_MINOR_DELTA)] def test_habits__honor(self): for place_direction,", "0) with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA): self.create_effect(self.places[0].id, value=100, attribute=places_relations.ATTRIBUTE.TAX, delta=0) self.places[0].refresh_attributes() HABITS_DELTAS", "description') class CreateHero(utils_testcase.TestCase): def setUp(self): super().setUp() game_logic.create_test_map() self.account = accounts_prototypes.AccountPrototype.create(nick='nick-xxx',", "'new description') class CreateHero(utils_testcase.TestCase): def setUp(self): super().setUp() game_logic.create_test_map() self.account =", "self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(impacts, []) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power',", "logic.load_hero(self.account.id) self.assertEqual(hero.id, self.account.id) self.assertEqual(hero.account_id, self.account.id) self.assertIn(hero.gender, (game_relations.GENDER.MALE, game_relations.GENDER.FEMALE)) self.assertEqual(hero.preferences.energy_regeneration_type, hero.race.energy_regeneration)", "self.accounts_factory.create_account() self.storage = game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero = self.storage.accounts_to_heroes[account.id] self.hero.premium_state_end_at game_tt_services.debug_clear_service()", "-c.PATH_MODIFIER_MINOR_DELTA): self.places[0].race = self.hero.race def test_modifier_bonus(self): self.assertFalse(self.places[0].is_modifier_active()) with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA):", "self.assertEqual(hero.gender, self.attributes['gender']) self.assertEqual(hero.utg_name, self.attributes['name']) self.assertEqual(hero.habit_peacefulness.raw_value, self.attributes['peacefulness']) self.assertEqual(hero.habit_honor.raw_value, self.attributes['honor']) self.assertEqual(hero.preferences.archetype, self.attributes['archetype'])", "import smart_imports smart_imports.all() class HeroDescriptionTests(utils_testcase.TestCase): def setUp(self): super().setUp() game_logic.create_test_map() account", "'') def test_has_description(self): logic.set_hero_description(self.hero.id, 'bla-bla') self.assertEqual(logic.get_hero_description(self.hero.id), 'bla-bla') def test_update_description(self): logic.set_hero_description(self.hero.id,", "}) def test_account_attributes(self): attributes = {'is_fast': random.choice((True, False)), 'is_bot': random.choice((True,", "game_relations.ARCHETYPE.random(), 'upbringing': tt_beings_relations.UPBRINGING.random(), 'first_death': tt_beings_relations.FIRST_DEATH.random(), 'death_age': tt_beings_relations.AGE.random()}) logic.create_hero(account_id=self.account.id, attributes=self.attributes) hero", "test_can_not_change_place_power(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(impacts, [])", "'might': 0, 'active_state_end_at': datetime.datetime.now() + datetime.timedelta(days=3), 'premium_state_end_at': datetime.datetime.fromtimestamp(0), 'ban_state_end_at': datetime.datetime.fromtimestamp(0)}", "self.hero.race def test_modifier_bonus(self): self.assertFalse(self.places[0].is_modifier_active()) with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA): self.places[0].set_modifier(places_modifiers.CITY_MODIFIERS.FORT) self.create_effect(self.places[0].id, value=100500,", "= logic.load_hero(self.account.id) self.assertEqual(hero.race, self.attributes['race']) self.assertEqual(hero.gender, self.attributes['gender']) self.assertEqual(hero.utg_name, self.attributes['name']) self.assertEqual(hero.habit_peacefulness.raw_value, self.attributes['peacefulness'])", "logic.get_places_path_modifiers(self.hero) self.assertEqual(set(modifiers.keys()), {place.id for place in self.places}) def test_race_bonus(self): self.places[0].race", "game_logic.create_test_map() account = self.accounts_factory.create_account() self.storage = game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero =", "= [(-1, -1, -c.PATH_MODIFIER_MINOR_DELTA), (-1, 0, 0), (-1, +1, +c.PATH_MODIFIER_MINOR_DELTA),", "self.account.id) self.assertEqual(hero.account_id, self.account.id) self.assertIn(hero.gender, (game_relations.GENDER.MALE, game_relations.GENDER.FEMALE)) self.assertEqual(hero.preferences.energy_regeneration_type, hero.race.energy_regeneration) self.assertEqual(hero.habit_honor.raw_value, 0)", "self.places[0].habit_peacefulness.set_habit(0) self.hero.habit_peacefulness.set_habit(0) with self.check_almost_delta(self.place_0_cost, expected_delta): self.places[0].habit_peacefulness.set_habit(place_direction * c.HABITS_BORDER) self.hero.habit_peacefulness.set_habit(hero_direction *", "logic.load_hero(self.account.id) self.assertEqual(hero.is_fast, attributes['is_fast']) self.assertEqual(hero.is_bot, attributes['is_bot']) self.assertEqual(hero.might, attributes['might']) self.assertEqual(hero.active_state_end_at, attributes['active_state_end_at']) self.assertEqual(hero.premium_state_end_at,", "logic.register_spending(self.hero, 100) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(impacts, []) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda", "self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) logic.register_spending(self.hero, -50) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(len(impacts),", "test_habits__peacefulness(self): for place_direction, hero_direction, expected_delta in self.HABITS_DELTAS: self.places[0].habit_peacefulness.set_habit(0) self.hero.habit_peacefulness.set_habit(0) with", "value=100500, attribute=places_relations.ATTRIBUTE.MODIFIER_FORT, delta=0) self.places[0].refresh_attributes() self.assertTrue(self.places[0].is_modifier_active()) def test_home_place(self): with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA):", "100) logic.register_spending(self.hero, -50) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(len(impacts), 1) self.assertEqual(impacts[0].amount,", "self.storage.accounts_to_heroes[account.id] def test_no_description(self): self.assertEqual(logic.get_hero_description(self.hero.id), '') def test_has_description(self): logic.set_hero_description(self.hero.id, 'bla-bla') self.assertEqual(logic.get_hero_description(self.hero.id),", "self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.FRIEND, self.places[0].persons[0]) def test_enemy(self): with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.ENEMY,", "test_account_attributes_required(self): for attribute in self.attributes.keys(): with self.assertRaises(exceptions.HeroAttributeRequiredError): logic.create_hero(account_id=self.account.id, attributes={key: value", "modifiers = logic.get_places_path_modifiers(self.hero) self.assertEqual(set(modifiers.keys()), {place.id for place in self.places}) def", "self.assertFalse(self.places[0].is_modifier_active()) with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA): self.places[0].set_modifier(places_modifiers.CITY_MODIFIERS.FORT) self.create_effect(self.places[0].id, value=100500, attribute=places_relations.ATTRIBUTE.MODIFIER_FORT, delta=0) self.places[0].refresh_attributes()", "def test_habits__honor(self): for place_direction, hero_direction, expected_delta in self.HABITS_DELTAS: self.places[0].habit_honor.set_habit(0) self.hero.habit_honor.set_habit(0)", "def test_default(self): logic.create_hero(account_id=self.account.id, attributes=self.attributes) hero = logic.load_hero(self.account.id) self.assertEqual(hero.id, self.account.id) self.assertEqual(hero.account_id,", "attribute=places_relations.ATTRIBUTE.MODIFIER_FORT, delta=0) self.places[0].refresh_attributes() self.assertTrue(self.places[0].is_modifier_active()) def test_home_place(self): with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.PLACE,", "0, 0), (+1, +1, -c.PATH_MODIFIER_MINOR_DELTA)] def test_habits__honor(self): for place_direction, hero_direction,", "= self.accounts_factory.create_account() self.storage = game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero = self.storage.accounts_to_heroes[account.id] self.hero.premium_state_end_at", "def test_update_description(self): logic.set_hero_description(self.hero.id, 'bla-bla') logic.set_hero_description(self.hero.id, 'new description') self.assertEqual(logic.get_hero_description(self.hero.id), 'new description')", "(game_relations.GENDER.MALE, game_relations.GENDER.FEMALE)) self.assertEqual(hero.preferences.energy_regeneration_type, hero.race.energy_regeneration) self.assertEqual(hero.habit_honor.raw_value, 0) self.assertEqual(hero.habit_peacefulness.raw_value, 0) self.assertTrue(hero.preferences.archetype.is_NEUTRAL) self.assertTrue(hero.upbringing.is_PHILISTINE)", "hero, place: True) def test_not_in_place(self): self.hero.position.set_position(0, 0) self.assertEqual(self.hero.position.place_id, None) logic.register_spending(self.hero,", "logic.create_hero(account_id=self.account.id, attributes=attributes) hero = logic.load_hero(self.account.id) self.assertEqual(hero.is_fast, attributes['is_fast']) self.assertEqual(hero.is_bot, attributes['is_bot']) self.assertEqual(hero.might,", "-1, -c.PATH_MODIFIER_MINOR_DELTA), (-1, 0, 0), (-1, +1, +c.PATH_MODIFIER_MINOR_DELTA), ( 0,", "in self.attributes.items() if key != attribute }) def test_account_attributes(self): attributes", "( 0, -1, 0), ( 0, 0, 0), ( 0,", "def test_account_attributes_required(self): for attribute in self.attributes.keys(): with self.assertRaises(exceptions.HeroAttributeRequiredError): logic.create_hero(account_id=self.account.id, attributes={key:", "'ban_state_end_at': datetime.datetime.fromtimestamp(3)} logic.create_hero(account_id=self.account.id, attributes=attributes) hero = logic.load_hero(self.account.id) self.assertEqual(hero.is_fast, attributes['is_fast']) self.assertEqual(hero.is_bot,", "[(-1, -1, -c.PATH_MODIFIER_MINOR_DELTA), (-1, 0, 0), (-1, +1, +c.PATH_MODIFIER_MINOR_DELTA), (", "RegisterSpendingTests(utils_testcase.TestCase): def setUp(self): super().setUp() self.places = game_logic.create_test_map() account = self.accounts_factory.create_account()", "0), (+1, +1, -c.PATH_MODIFIER_MINOR_DELTA)] def test_habits__honor(self): for place_direction, hero_direction, expected_delta", "= accounts_prototypes.AccountPrototype.create(nick='nick-xxx', email='<EMAIL>', is_fast=False) self.attributes = {'is_fast': False, 'is_bot': False,", "def test_home_place(self): with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.PLACE, self.places[0]) def test_friend(self): with", "self.assertEqual(hero.upbringing, self.attributes['upbringing']) self.assertEqual(hero.first_death, self.attributes['first_death']) self.assertEqual(hero.death_age, self.attributes['death_age']) class RegisterSpendingTests(utils_testcase.TestCase): def setUp(self):", "logic.load_hero(self.account.id) self.assertEqual(hero.race, self.attributes['race']) self.assertEqual(hero.gender, self.attributes['gender']) self.assertEqual(hero.utg_name, self.attributes['name']) self.assertEqual(hero.habit_peacefulness.raw_value, self.attributes['peacefulness']) self.assertEqual(hero.habit_honor.raw_value,", "[]) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: False) def test_can_not_change_place_power(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero,", "attribute=places_relations.ATTRIBUTE.TAX, delta=0) self.places[0].refresh_attributes() HABITS_DELTAS = [(-1, -1, -c.PATH_MODIFIER_MINOR_DELTA), (-1, 0,", "@mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True) def test_not_in_place(self): self.hero.position.set_position(0, 0) self.assertEqual(self.hero.position.place_id,", "attribute }) def test_account_attributes(self): attributes = {'is_fast': random.choice((True, False)), 'is_bot':", "self.create_effect(self.places[0].id, value=100500, attribute=places_relations.ATTRIBUTE.MODIFIER_FORT, delta=0) self.places[0].refresh_attributes() self.assertTrue(self.places[0].is_modifier_active()) def test_home_place(self): with self.check_almost_delta(self.place_0_cost,", "self.hero.preferences.set(relations.PREFERENCE_TYPE.ENEMY, self.places[0].persons[0]) def test_tax(self): self.places[0].attrs.size = 10 self.places[0].refresh_attributes() self.assertEqual(self.places[0].attrs.tax, 0)", "test_home_place(self): with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.PLACE, self.places[0]) def test_friend(self): with self.check_almost_delta(self.place_0_cost,", "impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(len(impacts), 1) self.assertEqual(impacts[0].amount, 150) class GetPlacesPathModifiersTests(places_helpers.PlacesTestsMixin,", "'archetype': game_relations.ARCHETYPE.random(), 'upbringing': tt_beings_relations.UPBRINGING.random(), 'first_death': tt_beings_relations.FIRST_DEATH.random(), 'death_age': tt_beings_relations.AGE.random()}) logic.create_hero(account_id=self.account.id, attributes=self.attributes)", "with self.assertRaises(exceptions.HeroAttributeRequiredError): logic.create_hero(account_id=self.account.id, attributes={key: value for key, value in self.attributes.items()", "class GetPlacesPathModifiersTests(places_helpers.PlacesTestsMixin, utils_testcase.TestCase): def setUp(self): super().setUp() self.places = game_logic.create_test_map() account", "self.places}) def test_race_bonus(self): self.places[0].race = game_relations.RACE.random(exclude=(self.hero.race,)) with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA): self.places[0].race", "* c.HABITS_BORDER) def test_habits__peacefulness(self): for place_direction, hero_direction, expected_delta in self.HABITS_DELTAS:", "def test_can_change_place_power__below_zero(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) logic.register_spending(self.hero, -50) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE,", "logic.set_hero_description(self.hero.id, 'bla-bla') self.assertEqual(logic.get_hero_description(self.hero.id), 'bla-bla') def test_update_description(self): logic.set_hero_description(self.hero.id, 'bla-bla') logic.set_hero_description(self.hero.id, 'new", "self.places[0].id)]) self.assertEqual(impacts, []) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True) def test_can_change_place_power(self):", "random.randint(-c.HABITS_BORDER, c.HABITS_BORDER), 'archetype': game_relations.ARCHETYPE.random(), 'upbringing': tt_beings_relations.UPBRINGING.random(), 'first_death': tt_beings_relations.FIRST_DEATH.random(), 'death_age': tt_beings_relations.AGE.random()})", "in self.HABITS_DELTAS: self.places[0].habit_honor.set_habit(0) self.hero.habit_honor.set_habit(0) with self.check_almost_delta(self.place_0_cost, expected_delta): self.places[0].habit_honor.set_habit(place_direction * c.HABITS_BORDER)", "place in self.places}) def test_race_bonus(self): self.places[0].race = game_relations.RACE.random(exclude=(self.hero.race,)) with self.check_almost_delta(self.place_0_cost,", "self.assertRaises(exceptions.HeroAttributeRequiredError): logic.create_hero(account_id=self.account.id, attributes={key: value for key, value in self.attributes.items() if", "100) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(impacts, []) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero,", "self.check_almost_delta(self.place_0_cost, expected_delta): self.places[0].habit_honor.set_habit(place_direction * c.HABITS_BORDER) self.hero.habit_honor.set_habit(hero_direction * c.HABITS_BORDER) def test_habits__peacefulness(self):", "description') self.assertEqual(logic.get_hero_description(self.hero.id), 'new description') class CreateHero(utils_testcase.TestCase): def setUp(self): super().setUp() game_logic.create_test_map()", "self.places[0].race = game_relations.RACE.random(exclude=(self.hero.race,)) with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA): self.places[0].race = self.hero.race def", "expected_delta): self.places[0].habit_honor.set_habit(place_direction * c.HABITS_BORDER) self.hero.habit_honor.set_habit(hero_direction * c.HABITS_BORDER) def test_habits__peacefulness(self): for", "def test_account_attributes(self): attributes = {'is_fast': random.choice((True, False)), 'is_bot': random.choice((True, False)),", "test_no_description(self): self.assertEqual(logic.get_hero_description(self.hero.id), '') def test_has_description(self): logic.set_hero_description(self.hero.id, 'bla-bla') self.assertEqual(logic.get_hero_description(self.hero.id), 'bla-bla') def", "test_can_change_place_power__below_zero(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) logic.register_spending(self.hero, -50) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)])", "lambda hero, place: True) def test_can_change_place_power(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) impacts", "self.hero.habit_peacefulness.set_habit(0) with self.check_almost_delta(self.place_0_cost, expected_delta): self.places[0].habit_peacefulness.set_habit(place_direction * c.HABITS_BORDER) self.hero.habit_peacefulness.set_habit(hero_direction * c.HABITS_BORDER)", "test_account_attributes(self): attributes = {'is_fast': random.choice((True, False)), 'is_bot': random.choice((True, False)), 'might':", "game_names.generator().get_name(game_relations.RACE.random(), game_relations.GENDER.random()), 'peacefulness': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER), 'honor': random.randint(-c.HABITS_BORDER, c.HABITS_BORDER), 'archetype': game_relations.ARCHETYPE.random(),", "'is_bot': False, 'might': 0, 'active_state_end_at': datetime.datetime.now() + datetime.timedelta(days=3), 'premium_state_end_at': datetime.datetime.fromtimestamp(0),", "= game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(len(impacts), 1) self.assertEqual(impacts[0].amount, 150) class GetPlacesPathModifiersTests(places_helpers.PlacesTestsMixin, utils_testcase.TestCase):", "(+1, -1, +c.PATH_MODIFIER_MINOR_DELTA), (+1, 0, 0), (+1, +1, -c.PATH_MODIFIER_MINOR_DELTA)] def", "super().setUp() self.places = game_logic.create_test_map() account = self.accounts_factory.create_account(is_fast=True) self.storage = game_logic_storage.LogicStorage()", "self.assertIn(hero.gender, (game_relations.GENDER.MALE, game_relations.GENDER.FEMALE)) self.assertEqual(hero.preferences.energy_regeneration_type, hero.race.energy_regeneration) self.assertEqual(hero.habit_honor.raw_value, 0) self.assertEqual(hero.habit_peacefulness.raw_value, 0) self.assertTrue(hero.preferences.archetype.is_NEUTRAL)", "with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA): self.hero.preferences.set(relations.PREFERENCE_TYPE.ENEMY, self.places[0].persons[0]) def test_tax(self): self.places[0].attrs.size = 10", "self.assertEqual(self.places[0].attrs.tax, 0) with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA): self.create_effect(self.places[0].id, value=100, attribute=places_relations.ATTRIBUTE.TAX, delta=0) self.places[0].refresh_attributes()", "self.assertEqual(hero.habit_honor.raw_value, self.attributes['honor']) self.assertEqual(hero.preferences.archetype, self.attributes['archetype']) self.assertEqual(hero.upbringing, self.attributes['upbringing']) self.assertEqual(hero.first_death, self.attributes['first_death']) self.assertEqual(hero.death_age, self.attributes['death_age'])", "self.assertTrue(hero.upbringing.is_PHILISTINE) self.assertTrue(hero.first_death.is_FROM_THE_MONSTER_FANGS) self.assertTrue(hero.death_age.is_MATURE) def test_account_attributes_required(self): for attribute in self.attributes.keys(): with", "self.attributes['upbringing']) self.assertEqual(hero.first_death, self.attributes['first_death']) self.assertEqual(hero.death_age, self.attributes['death_age']) class RegisterSpendingTests(utils_testcase.TestCase): def setUp(self): super().setUp()", "'active_state_end_at': datetime.datetime.now() + datetime.timedelta(days=3), 'premium_state_end_at': datetime.datetime.fromtimestamp(0), 'ban_state_end_at': datetime.datetime.fromtimestamp(0)} def test_default(self):", "HABITS_DELTAS = [(-1, -1, -c.PATH_MODIFIER_MINOR_DELTA), (-1, 0, 0), (-1, +1,", "if key != attribute }) def test_account_attributes(self): attributes = {'is_fast':", "with self.check_almost_delta(self.place_0_cost, -c.PATH_MODIFIER_MINOR_DELTA): self.places[0].race = self.hero.race def test_modifier_bonus(self): self.assertFalse(self.places[0].is_modifier_active()) with", "value=100, attribute=places_relations.ATTRIBUTE.TAX, delta=0) self.places[0].refresh_attributes() HABITS_DELTAS = [(-1, -1, -c.PATH_MODIFIER_MINOR_DELTA), (-1,", "0) self.assertTrue(hero.preferences.archetype.is_NEUTRAL) self.assertTrue(hero.upbringing.is_PHILISTINE) self.assertTrue(hero.first_death.is_FROM_THE_MONSTER_FANGS) self.assertTrue(hero.death_age.is_MATURE) def test_account_attributes_required(self): for attribute in", "[]) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True) def test_can_change_place_power(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero,", "{place.id for place in self.places}) def test_race_bonus(self): self.places[0].race = game_relations.RACE.random(exclude=(self.hero.race,))", "0), ( 0, +1, 0), (+1, -1, +c.PATH_MODIFIER_MINOR_DELTA), (+1, 0,", "lambda hero, place: False) def test_can_not_change_place_power(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) impacts", "attributes=self.attributes) hero = logic.load_hero(self.account.id) self.assertEqual(hero.race, self.attributes['race']) self.assertEqual(hero.gender, self.attributes['gender']) self.assertEqual(hero.utg_name, self.attributes['name'])", "self.assertEqual(hero.active_state_end_at, attributes['active_state_end_at']) self.assertEqual(hero.premium_state_end_at, attributes['premium_state_end_at']) self.assertEqual(hero.ban_state_end_at, attributes['ban_state_end_at']) def test_attributes(self): self.attributes.update({'race': game_relations.RACE.random(),", "logic.set_hero_description(self.hero.id, 'new description') self.assertEqual(logic.get_hero_description(self.hero.id), 'new description') class CreateHero(utils_testcase.TestCase): def setUp(self):", "= {'is_fast': random.choice((True, False)), 'is_bot': random.choice((True, False)), 'might': random.randint(1, 1000),", "self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero, 100) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(len(impacts), 1) self.assertEqual(impacts[0].amount,", "0) self.assertEqual(hero.habit_peacefulness.raw_value, 0) self.assertTrue(hero.preferences.archetype.is_NEUTRAL) self.assertTrue(hero.upbringing.is_PHILISTINE) self.assertTrue(hero.first_death.is_FROM_THE_MONSTER_FANGS) self.assertTrue(hero.death_age.is_MATURE) def test_account_attributes_required(self): for", "= game_logic_storage.LogicStorage() self.storage.load_account_data(account) self.hero = self.storage.accounts_to_heroes[account.id] def test_no_description(self): self.assertEqual(logic.get_hero_description(self.hero.id), '')", "0) self.assertEqual(self.hero.position.place_id, None) logic.register_spending(self.hero, 100) impacts = game_tt_services.money_impacts.cmd_get_targets_impacts(targets=[(tt_api_impacts.OBJECT_TYPE.PLACE, self.places[0].id)]) self.assertEqual(impacts,", "self.places[0].id) @mock.patch('the_tale.game.heroes.objects.Hero.can_change_place_power', lambda hero, place: True) def test_can_change_place_power__below_zero(self): self.hero.position.set_place(self.places[0]) logic.register_spending(self.hero,", "self.places[0].attrs.size = 10 self.places[0].refresh_attributes() self.assertEqual(self.places[0].attrs.tax, 0) with self.check_almost_delta(self.place_0_cost, c.PATH_MODIFIER_NORMAL_DELTA): self.create_effect(self.places[0].id," ]
[]
[ "information from __future__ import absolute_import from __future__ import division from", "The code is released under the MIT Licence. # See", "y_inlet = self.case.boundary_data(\"inlet\", sort=\"y\")[0][:, 1] inlet_edge_length = tbl.edge_lengths(self.case, \"inlet\") self.d", "See LICENCE.txt and the Legal section in the README for", "= self.d*(np.sqrt(1 + 8*self.Fr1**2) - 1)/2 self.Fr2 = self.U/np.sqrt(9.81*self.d2) iso05", "released under the MIT Licence. # See LICENCE.txt and the", "import division from __future__ import print_function from .postcipe import Postcipe", "= iso05[idx, 1] idx_toe = np.argmin(np.abs(self.d*1.1 - self.yfs[:int(self.yfs.size/2)])) self.xtoe =", "__all__ = [\"HydraulicJump\"] class HydraulicJump(Postcipe): def __init__(self, path): Postcipe.__init__(self) self.case", "1] inlet_edge_length = tbl.edge_lengths(self.case, \"inlet\") self.d = y_inlet[-1] + 0.5*inlet_edge_length[-1]", "idx = iso05[:, 0].argsort() self.xfs = iso05[idx, 0] self.yfs =", "absolute_import from __future__ import division from __future__ import print_function from", "= tbl.isoline(self.case, \"alpha.waterMean\", 0.5) idx = iso05[:, 0].argsort() self.xfs =", "self.d2 = self.d*(np.sqrt(1 + 8*self.Fr1**2) - 1)/2 self.Fr2 = self.U/np.sqrt(9.81*self.d2)", "print_function from .postcipe import Postcipe import turbulucid as tbl from", "iso05[:, 0].argsort() self.xfs = iso05[idx, 0] self.yfs = iso05[idx, 1]", "postcipes # (c) <NAME> # The code is released under", "# This file is part of postcipes # (c) <NAME>", "- 1)/2 self.Fr2 = self.U/np.sqrt(9.81*self.d2) iso05 = tbl.isoline(self.case, \"alpha.waterMean\", 0.5)", "import Postcipe import turbulucid as tbl from scipy.interpolate import interp1d", "code is released under the MIT Licence. # See LICENCE.txt", "__future__ import division from __future__ import print_function from .postcipe import", "+ 0.5*inlet_edge_length[-1] self.Fr1 = self.U/np.sqrt(9.81*self.d) self.d2 = self.d*(np.sqrt(1 + 8*self.Fr1**2)", "= self.U/np.sqrt(9.81*self.d2) iso05 = tbl.isoline(self.case, \"alpha.waterMean\", 0.5) idx = iso05[:,", "= iso05[idx, 0] self.yfs = iso05[idx, 1] idx_toe = np.argmin(np.abs(self.d*1.1", "\"alpha.waterMean\", 0.5) idx = iso05[:, 0].argsort() self.xfs = iso05[idx, 0]", "LICENCE.txt and the Legal section in the README for more", "README for more information from __future__ import absolute_import from __future__", "from .postcipe import Postcipe import turbulucid as tbl from scipy.interpolate", "self.d = y_inlet[-1] + 0.5*inlet_edge_length[-1] self.Fr1 = self.U/np.sqrt(9.81*self.d) self.d2 =", "= self.U/np.sqrt(9.81*self.d) self.d2 = self.d*(np.sqrt(1 + 8*self.Fr1**2) - 1)/2 self.Fr2", "iso05[idx, 0] self.yfs = iso05[idx, 1] idx_toe = np.argmin(np.abs(self.d*1.1 -", "for more information from __future__ import absolute_import from __future__ import", "class HydraulicJump(Postcipe): def __init__(self, path): Postcipe.__init__(self) self.case = tbl.Case(path) self.case['alphag']", "from scipy.interpolate import interp1d import numpy as np import h5py", "def __init__(self, path): Postcipe.__init__(self) self.case = tbl.Case(path) self.case['alphag'] = 1", "is released under the MIT Licence. # See LICENCE.txt and", "0] y_inlet = self.case.boundary_data(\"inlet\", sort=\"y\")[0][:, 1] inlet_edge_length = tbl.edge_lengths(self.case, \"inlet\")", "self.case.boundary_data(\"inlet\", sort=\"y\")[0][:, 1] inlet_edge_length = tbl.edge_lengths(self.case, \"inlet\") self.d = y_inlet[-1]", "0.5) idx = iso05[:, 0].argsort() self.xfs = iso05[idx, 0] self.yfs", "self.case = tbl.Case(path) self.case['alphag'] = 1 - self.case['alpha.waterMean'] self.U =", "# See LICENCE.txt and the Legal section in the README", "= self.case.boundary_data(\"inlet\", sort=\"y\")[1]['UMean'][0, 0] y_inlet = self.case.boundary_data(\"inlet\", sort=\"y\")[0][:, 1] inlet_edge_length", "file is part of postcipes # (c) <NAME> # The", "= tbl.Case(path) self.case['alphag'] = 1 - self.case['alpha.waterMean'] self.U = self.case.boundary_data(\"inlet\",", "more information from __future__ import absolute_import from __future__ import division", "self.d*(np.sqrt(1 + 8*self.Fr1**2) - 1)/2 self.Fr2 = self.U/np.sqrt(9.81*self.d2) iso05 =", "+ 8*self.Fr1**2) - 1)/2 self.Fr2 = self.U/np.sqrt(9.81*self.d2) iso05 = tbl.isoline(self.case,", "tbl from scipy.interpolate import interp1d import numpy as np import", "tbl.Case(path) self.case['alphag'] = 1 - self.case['alpha.waterMean'] self.U = self.case.boundary_data(\"inlet\", sort=\"y\")[1]['UMean'][0,", "the Legal section in the README for more information from", "of postcipes # (c) <NAME> # The code is released", "section in the README for more information from __future__ import", "Postcipe.__init__(self) self.case = tbl.Case(path) self.case['alphag'] = 1 - self.case['alpha.waterMean'] self.U", "Legal section in the README for more information from __future__", "1)/2 self.Fr2 = self.U/np.sqrt(9.81*self.d2) iso05 = tbl.isoline(self.case, \"alpha.waterMean\", 0.5) idx", "<NAME> # The code is released under the MIT Licence.", "numpy as np import h5py __all__ = [\"HydraulicJump\"] class HydraulicJump(Postcipe):", "self.case['alpha.waterMean'] self.U = self.case.boundary_data(\"inlet\", sort=\"y\")[1]['UMean'][0, 0] y_inlet = self.case.boundary_data(\"inlet\", sort=\"y\")[0][:,", "self.U = self.case.boundary_data(\"inlet\", sort=\"y\")[1]['UMean'][0, 0] y_inlet = self.case.boundary_data(\"inlet\", sort=\"y\")[0][:, 1]", "import interp1d import numpy as np import h5py __all__ =", "from __future__ import division from __future__ import print_function from .postcipe", "\"inlet\") self.d = y_inlet[-1] + 0.5*inlet_edge_length[-1] self.Fr1 = self.U/np.sqrt(9.81*self.d) self.d2", "iso05 = tbl.isoline(self.case, \"alpha.waterMean\", 0.5) idx = iso05[:, 0].argsort() self.xfs", "0].argsort() self.xfs = iso05[idx, 0] self.yfs = iso05[idx, 1] idx_toe", "iso05[idx, 1] idx_toe = np.argmin(np.abs(self.d*1.1 - self.yfs[:int(self.yfs.size/2)])) self.xtoe = self.xfs[idx_toe]", "from __future__ import absolute_import from __future__ import division from __future__", "path): Postcipe.__init__(self) self.case = tbl.Case(path) self.case['alphag'] = 1 - self.case['alpha.waterMean']", "turbulucid as tbl from scipy.interpolate import interp1d import numpy as", "= tbl.edge_lengths(self.case, \"inlet\") self.d = y_inlet[-1] + 0.5*inlet_edge_length[-1] self.Fr1 =", "from __future__ import print_function from .postcipe import Postcipe import turbulucid", "Licence. # See LICENCE.txt and the Legal section in the", "# (c) <NAME> # The code is released under the", "HydraulicJump(Postcipe): def __init__(self, path): Postcipe.__init__(self) self.case = tbl.Case(path) self.case['alphag'] =", "0] self.yfs = iso05[idx, 1] idx_toe = np.argmin(np.abs(self.d*1.1 - self.yfs[:int(self.yfs.size/2)]))", "[\"HydraulicJump\"] class HydraulicJump(Postcipe): def __init__(self, path): Postcipe.__init__(self) self.case = tbl.Case(path)", "= 1 - self.case['alpha.waterMean'] self.U = self.case.boundary_data(\"inlet\", sort=\"y\")[1]['UMean'][0, 0] y_inlet", "__init__(self, path): Postcipe.__init__(self) self.case = tbl.Case(path) self.case['alphag'] = 1 -", "part of postcipes # (c) <NAME> # The code is", "interp1d import numpy as np import h5py __all__ = [\"HydraulicJump\"]", "Postcipe import turbulucid as tbl from scipy.interpolate import interp1d import", "self.case.boundary_data(\"inlet\", sort=\"y\")[1]['UMean'][0, 0] y_inlet = self.case.boundary_data(\"inlet\", sort=\"y\")[0][:, 1] inlet_edge_length =", "sort=\"y\")[0][:, 1] inlet_edge_length = tbl.edge_lengths(self.case, \"inlet\") self.d = y_inlet[-1] +", "import numpy as np import h5py __all__ = [\"HydraulicJump\"] class", "under the MIT Licence. # See LICENCE.txt and the Legal", "tbl.isoline(self.case, \"alpha.waterMean\", 0.5) idx = iso05[:, 0].argsort() self.xfs = iso05[idx,", "This file is part of postcipes # (c) <NAME> #", "self.U/np.sqrt(9.81*self.d) self.d2 = self.d*(np.sqrt(1 + 8*self.Fr1**2) - 1)/2 self.Fr2 =", "__future__ import absolute_import from __future__ import division from __future__ import", "# The code is released under the MIT Licence. #", "- self.case['alpha.waterMean'] self.U = self.case.boundary_data(\"inlet\", sort=\"y\")[1]['UMean'][0, 0] y_inlet = self.case.boundary_data(\"inlet\",", "import print_function from .postcipe import Postcipe import turbulucid as tbl", "(c) <NAME> # The code is released under the MIT", "the MIT Licence. # See LICENCE.txt and the Legal section", "scipy.interpolate import interp1d import numpy as np import h5py __all__", "tbl.edge_lengths(self.case, \"inlet\") self.d = y_inlet[-1] + 0.5*inlet_edge_length[-1] self.Fr1 = self.U/np.sqrt(9.81*self.d)", "np import h5py __all__ = [\"HydraulicJump\"] class HydraulicJump(Postcipe): def __init__(self,", "and the Legal section in the README for more information", "division from __future__ import print_function from .postcipe import Postcipe import", "import h5py __all__ = [\"HydraulicJump\"] class HydraulicJump(Postcipe): def __init__(self, path):", "MIT Licence. # See LICENCE.txt and the Legal section in", "sort=\"y\")[1]['UMean'][0, 0] y_inlet = self.case.boundary_data(\"inlet\", sort=\"y\")[0][:, 1] inlet_edge_length = tbl.edge_lengths(self.case,", "as tbl from scipy.interpolate import interp1d import numpy as np", "1 - self.case['alpha.waterMean'] self.U = self.case.boundary_data(\"inlet\", sort=\"y\")[1]['UMean'][0, 0] y_inlet =", "self.Fr1 = self.U/np.sqrt(9.81*self.d) self.d2 = self.d*(np.sqrt(1 + 8*self.Fr1**2) - 1)/2", "0.5*inlet_edge_length[-1] self.Fr1 = self.U/np.sqrt(9.81*self.d) self.d2 = self.d*(np.sqrt(1 + 8*self.Fr1**2) -", "__future__ import print_function from .postcipe import Postcipe import turbulucid as", "y_inlet[-1] + 0.5*inlet_edge_length[-1] self.Fr1 = self.U/np.sqrt(9.81*self.d) self.d2 = self.d*(np.sqrt(1 +", "self.case['alphag'] = 1 - self.case['alpha.waterMean'] self.U = self.case.boundary_data(\"inlet\", sort=\"y\")[1]['UMean'][0, 0]", "self.Fr2 = self.U/np.sqrt(9.81*self.d2) iso05 = tbl.isoline(self.case, \"alpha.waterMean\", 0.5) idx =", "self.U/np.sqrt(9.81*self.d2) iso05 = tbl.isoline(self.case, \"alpha.waterMean\", 0.5) idx = iso05[:, 0].argsort()", "h5py __all__ = [\"HydraulicJump\"] class HydraulicJump(Postcipe): def __init__(self, path): Postcipe.__init__(self)", "= iso05[:, 0].argsort() self.xfs = iso05[idx, 0] self.yfs = iso05[idx,", "the README for more information from __future__ import absolute_import from", "in the README for more information from __future__ import absolute_import", ".postcipe import Postcipe import turbulucid as tbl from scipy.interpolate import", "= self.case.boundary_data(\"inlet\", sort=\"y\")[0][:, 1] inlet_edge_length = tbl.edge_lengths(self.case, \"inlet\") self.d =", "8*self.Fr1**2) - 1)/2 self.Fr2 = self.U/np.sqrt(9.81*self.d2) iso05 = tbl.isoline(self.case, \"alpha.waterMean\",", "= [\"HydraulicJump\"] class HydraulicJump(Postcipe): def __init__(self, path): Postcipe.__init__(self) self.case =", "self.yfs = iso05[idx, 1] idx_toe = np.argmin(np.abs(self.d*1.1 - self.yfs[:int(self.yfs.size/2)])) self.xtoe", "import absolute_import from __future__ import division from __future__ import print_function", "= y_inlet[-1] + 0.5*inlet_edge_length[-1] self.Fr1 = self.U/np.sqrt(9.81*self.d) self.d2 = self.d*(np.sqrt(1", "import turbulucid as tbl from scipy.interpolate import interp1d import numpy", "is part of postcipes # (c) <NAME> # The code", "inlet_edge_length = tbl.edge_lengths(self.case, \"inlet\") self.d = y_inlet[-1] + 0.5*inlet_edge_length[-1] self.Fr1", "as np import h5py __all__ = [\"HydraulicJump\"] class HydraulicJump(Postcipe): def", "self.xfs = iso05[idx, 0] self.yfs = iso05[idx, 1] idx_toe =" ]
[ "__init__(self, frequency=10): SteppableBasePy.__init__(self, frequency) def step(self, mcs): if mcs in", "def __init__(self, frequency=10): SteppableBasePy.__init__(self, frequency) def step(self, mcs): if mcs", "step(self, mcs): if mcs in [3, 5, 19,20, 23, 29,", "def step(self, mcs): if mcs in [3, 5, 19,20, 23,", "if mcs in [3, 5, 19,20, 23, 29, 31]: self.request_screenshot(mcs=mcs,", "from cc3d.core.PySteppables import * from cc3d import CompuCellSetup from random", "import * from cc3d import CompuCellSetup from random import random", "mcs in [3, 5, 19,20, 23, 29, 31]: self.request_screenshot(mcs=mcs, screenshot_label='Cell_Field_CellField_2D_XY_0')", "import CompuCellSetup from random import random class ScreenshotSteppable(SteppableBasePy): def __init__(self,", "* from cc3d import CompuCellSetup from random import random class", "class ScreenshotSteppable(SteppableBasePy): def __init__(self, frequency=10): SteppableBasePy.__init__(self, frequency) def step(self, mcs):", "random class ScreenshotSteppable(SteppableBasePy): def __init__(self, frequency=10): SteppableBasePy.__init__(self, frequency) def step(self,", "mcs): if mcs in [3, 5, 19,20, 23, 29, 31]:", "cc3d import CompuCellSetup from random import random class ScreenshotSteppable(SteppableBasePy): def", "frequency=10): SteppableBasePy.__init__(self, frequency) def step(self, mcs): if mcs in [3,", "from random import random class ScreenshotSteppable(SteppableBasePy): def __init__(self, frequency=10): SteppableBasePy.__init__(self,", "random import random class ScreenshotSteppable(SteppableBasePy): def __init__(self, frequency=10): SteppableBasePy.__init__(self, frequency)", "import random class ScreenshotSteppable(SteppableBasePy): def __init__(self, frequency=10): SteppableBasePy.__init__(self, frequency) def", "CompuCellSetup from random import random class ScreenshotSteppable(SteppableBasePy): def __init__(self, frequency=10):", "cc3d.core.PySteppables import * from cc3d import CompuCellSetup from random import", "SteppableBasePy.__init__(self, frequency) def step(self, mcs): if mcs in [3, 5,", "from cc3d import CompuCellSetup from random import random class ScreenshotSteppable(SteppableBasePy):", "frequency) def step(self, mcs): if mcs in [3, 5, 19,20,", "ScreenshotSteppable(SteppableBasePy): def __init__(self, frequency=10): SteppableBasePy.__init__(self, frequency) def step(self, mcs): if" ]
[ "f(local_opt): name = (kwargs and kwargs.pop(\"name\")) or local_opt.__name__ gpu_optimizer.register(name, local_opt,", "\"matrix_ops_db2\" abstract_batch_norm_db = LocalGroupDB() abstract_batch_norm_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) abstract_batch_norm_db2.__name__ = \"abstract_batch_norm_db2\"", "optdb.register( name, TopoOptimizer(local_opt, failure_callback=TopoOptimizer.warn_inplace), 60, \"fast_run\", \"inplace\", \"gpuarray\", *tags, )", "name = (kwargs and kwargs.pop(\"name\")) or local_opt.__name__ if isinstance(local_opt, OptimizationDatabase):", "EquilibriumOptimizer by calling the method query. \"\"\" def query(self, *tags,", "\"gpuarray_abstractconv_opts\" register_opt(\"fast_compile\")(abstractconv_groupopt) class GraphToGPUDB(OptimizationDatabase): \"\"\" Retrieves the list local optimizers", "EquilibriumDB() # Not used for an EquilibriumOptimizer. It has the", "extra parameter(Op) compared to register_opt decorator. Parameters ---------- tracks :", "= local_optimizer(tracks)(local_opt) gpu_optimizer2.register(name, opt, \"fast_run\", \"gpuarray\", *tags) return local_opt return", "= EquilibriumDB() gpu_seqopt = SequenceDB() # do not add 'fast_run'", "*tags, **kwtags): from aesara.gpuarray.opt import GraphToGPU opt = gpu_optimizer2.query(*tags, **kwtags)", "decorator. Parameters ---------- tracks : List of Op class Or", "They are tried in a specific order so we can", "optimizer. Takes an extra parameter(Op) compared to register_opt decorator. Parameters", "opt = local_optimizer(tracks)(local_opt) gpu_optimizer2.register(name, opt, \"fast_run\", \"gpuarray\", *tags) return local_opt", "<filename>aesara/gpuarray/optdb.py from aesara.compile import optdb from aesara.graph.opt import GraphToGPULocalOptGroup, TopoOptimizer,", "= LocalGroupDB() abstract_batch_norm_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) abstract_batch_norm_db2.__name__ = \"abstract_batch_norm_db2\" abstract_batch_norm_groupopt =", "or local_opt.__name__ optdb.register( name, TopoOptimizer(local_opt, failure_callback=TopoOptimizer.warn_inplace), 60, \"fast_run\", \"inplace\", \"gpuarray\",", "add 'fast_run' to these two as this would always enable", "= (kwargs and kwargs.pop(\"name\")) or local_opt.__name__ gpu_optimizer.register(name, local_opt, \"fast_run\", \"gpuarray\",", "value from EquilibriumOptimizer by calling the method query. \"\"\" def", "*tags) return local_opt return f def register_inplace(*tags, **kwargs): def f(local_opt):", "an extra parameter(Op) compared to register_opt decorator. Parameters ---------- tracks", "\"gpuarray_batchnorm_opts\" def register_opt(*tags, **kwargs): def f(local_opt): name = (kwargs and", ": List of Op class Or Op instance or None", "used for an EquilibriumOptimizer. It has the \"tracks\" that we", "optdb.__position__.get(\"add_destroy_handler\", 49.5) - 1, \"gpuarray\", ) pool_db = LocalGroupDB() pool_db2", "def register_opt(*tags, **kwargs): def f(local_opt): name = (kwargs and kwargs.pop(\"name\"))", "name = (kwargs and kwargs.pop(\"name\")) or local_opt.__name__ gpu_optimizer.register(name, local_opt, \"fast_run\",", "(kwargs and kwargs.pop(\"name\")) or local_opt.__name__ gpu_optimizer.register(name, local_opt, \"fast_run\", \"gpuarray\", *tags)", "GraphToGPU optimizer. Takes an extra parameter(Op) compared to register_opt decorator.", "String The optimization tag to which the optimizer will be", "on the optimizer flag's value from EquilibriumOptimizer by calling the", "optimizer will be registered. \"\"\" def f(local_opt): name = (kwargs", "SequenceDB() # do not add 'fast_run' to these two as", "abstract_batch_norm_groupopt = LocalGroupDB() abstract_batch_norm_groupopt.__name__ = \"gpuarray_batchnorm_opts\" def register_opt(*tags, **kwargs): def", "Register GPU convolution implementation # They are tried in a", "(kwargs and kwargs.pop(\"name\")) or local_opt.__name__ optdb.register( name, TopoOptimizer(local_opt, failure_callback=TopoOptimizer.warn_inplace), 60,", "kwargs.pop(\"name\")) or local_opt.__name__ optdb.register( name, TopoOptimizer(local_opt, failure_callback=TopoOptimizer.warn_inplace), 60, \"fast_run\", \"inplace\",", "EquilibriumDB() gpu_seqopt = SequenceDB() # do not add 'fast_run' to", "abstract_batch_norm_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) abstract_batch_norm_db2.__name__ = \"abstract_batch_norm_db2\" abstract_batch_norm_groupopt = LocalGroupDB() abstract_batch_norm_groupopt.__name__", "Node's Op to which optimization is being applied. tags :", "LocalGroupDB() abstractconv_groupopt.__name__ = \"gpuarray_abstractconv_opts\" register_opt(\"fast_compile\")(abstractconv_groupopt) class GraphToGPUDB(OptimizationDatabase): \"\"\" Retrieves the", "abstract_batch_norm_db2.__name__ = \"abstract_batch_norm_db2\" abstract_batch_norm_groupopt = LocalGroupDB() abstract_batch_norm_groupopt.__name__ = \"gpuarray_batchnorm_opts\" def", "Not used for an EquilibriumOptimizer. It has the \"tracks\" that", "the new GraphToGPU optimizer. Takes an extra parameter(Op) compared to", "def register_opt2(tracks, *tags, **kwargs): \"\"\" Decorator for the new GraphToGPU", "the optimizer will be registered. \"\"\" def f(local_opt): name =", "these two as this would always enable gpuarray mode optdb.register(", "TopoOptimizer(local_opt, failure_callback=TopoOptimizer.warn_inplace), 60, \"fast_run\", \"inplace\", \"gpuarray\", *tags, ) return local_opt", "def f(local_opt): name = (kwargs and kwargs.pop(\"name\")) or local_opt.__name__ gpu_optimizer.register(name,", "and kwargs.pop(\"name\")) or local_opt.__name__ gpu_optimizer.register(name, local_opt, \"fast_run\", \"gpuarray\", *tags) return", "take precedence over others. abstractconv_groupopt = LocalGroupDB() abstractconv_groupopt.__name__ = \"gpuarray_abstractconv_opts\"", "LocalGroupDB(local_opt=GraphToGPULocalOptGroup) matrix_ops_db2.__name__ = \"matrix_ops_db2\" abstract_batch_norm_db = LocalGroupDB() abstract_batch_norm_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup)", "from EquilibriumOptimizer by calling the method query. \"\"\" def query(self,", "None The Node's Op to which optimization is being applied.", "The optimization tag to which the optimizer will be registered.", "Op class Or Op instance or None The Node's Op", "*tags, ) return local_opt return f # Register GPU convolution", "abstractconv_groupopt.__name__ = \"gpuarray_abstractconv_opts\" register_opt(\"fast_compile\")(abstractconv_groupopt) class GraphToGPUDB(OptimizationDatabase): \"\"\" Retrieves the list", "\"\"\" def query(self, *tags, **kwtags): from aesara.gpuarray.opt import GraphToGPU opt", "failure_callback=TopoOptimizer.warn_inplace), 60, \"fast_run\", \"inplace\", \"gpuarray\", *tags, ) return local_opt return", "\"gpuarray\", ) pool_db = LocalGroupDB() pool_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) pool_db2.__name__ =", "two as this would always enable gpuarray mode optdb.register( \"gpuarray_opt\",", "GPU convolution implementation # They are tried in a specific", "= \"gpuarray_abstractconv_opts\" register_opt(\"fast_compile\")(abstractconv_groupopt) class GraphToGPUDB(OptimizationDatabase): \"\"\" Retrieves the list local", "the list local optimizers based on the optimizer flag's value", "enable gpuarray mode optdb.register( \"gpuarray_opt\", gpu_seqopt, optdb.__position__.get(\"add_destroy_handler\", 49.5) - 1,", "in a specific order so we can control # which", "= SequenceDB() # do not add 'fast_run' to these two", "Parameters ---------- tracks : List of Op class Or Op", "OptimizationDatabase): opt = local_opt else: opt = local_optimizer(tracks)(local_opt) gpu_optimizer2.register(name, opt,", "= EquilibriumDB() # Not used for an EquilibriumOptimizer. It has", "= LocalGroupDB(local_opt=GraphToGPULocalOptGroup) matrix_ops_db2.__name__ = \"matrix_ops_db2\" abstract_batch_norm_db = LocalGroupDB() abstract_batch_norm_db2 =", "or local_opt.__name__ gpu_optimizer.register(name, local_opt, \"fast_run\", \"gpuarray\", *tags) return local_opt return", "gpu_optimizer2.register(name, opt, \"fast_run\", \"gpuarray\", *tags) return local_opt return f def", "we can control # which ones take precedence over others.", "*tags) return local_opt return f def register_opt2(tracks, *tags, **kwargs): \"\"\"", "abstract_batch_norm_db = LocalGroupDB() abstract_batch_norm_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) abstract_batch_norm_db2.__name__ = \"abstract_batch_norm_db2\" abstract_batch_norm_groupopt", "the method query. \"\"\" def query(self, *tags, **kwtags): from aesara.gpuarray.opt", "= EquilibriumDB() gpu_cut_copies = EquilibriumDB() # Not used for an", "gpu_seqopt = SequenceDB() # do not add 'fast_run' to these", "gpu_seqopt, optdb.__position__.get(\"add_destroy_handler\", 49.5) - 1, \"gpuarray\", ) pool_db = LocalGroupDB()", "= (kwargs and kwargs.pop(\"name\")) or local_opt.__name__ if isinstance(local_opt, OptimizationDatabase): opt", "\"abstract_batch_norm_db2\" abstract_batch_norm_groupopt = LocalGroupDB() abstract_batch_norm_groupopt.__name__ = \"gpuarray_batchnorm_opts\" def register_opt(*tags, **kwargs):", "has the \"tracks\" that we need for GraphToGPUDB. gpu_optimizer2 =", "from aesara.graph.opt import GraphToGPULocalOptGroup, TopoOptimizer, local_optimizer from aesara.graph.optdb import (", "= local_opt else: opt = local_optimizer(tracks)(local_opt) gpu_optimizer2.register(name, opt, \"fast_run\", \"gpuarray\",", "import optdb from aesara.graph.opt import GraphToGPULocalOptGroup, TopoOptimizer, local_optimizer from aesara.graph.optdb", "mode optdb.register( \"gpuarray_opt\", gpu_seqopt, optdb.__position__.get(\"add_destroy_handler\", 49.5) - 1, \"gpuarray\", )", "= \"abstract_batch_norm_db2\" abstract_batch_norm_groupopt = LocalGroupDB() abstract_batch_norm_groupopt.__name__ = \"gpuarray_batchnorm_opts\" def register_opt(*tags,", "name = (kwargs and kwargs.pop(\"name\")) or local_opt.__name__ optdb.register( name, TopoOptimizer(local_opt,", "\"gpuarray_opt\", gpu_seqopt, optdb.__position__.get(\"add_destroy_handler\", 49.5) - 1, \"gpuarray\", ) pool_db =", "local_opt.__name__ optdb.register( name, TopoOptimizer(local_opt, failure_callback=TopoOptimizer.warn_inplace), 60, \"fast_run\", \"inplace\", \"gpuarray\", *tags,", "It has the \"tracks\" that we need for GraphToGPUDB. gpu_optimizer2", "from aesara.graph.optdb import ( EquilibriumDB, LocalGroupDB, OptimizationDatabase, SequenceDB, ) gpu_optimizer", "based on the optimizer flag's value from EquilibriumOptimizer by calling", "for GraphToGPUDB. gpu_optimizer2 = EquilibriumDB() gpu_seqopt = SequenceDB() # do", ") pool_db = LocalGroupDB() pool_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) pool_db2.__name__ = \"pool_db2\"", "\"tracks\" that we need for GraphToGPUDB. gpu_optimizer2 = EquilibriumDB() gpu_seqopt", "opt, \"fast_run\", \"gpuarray\", *tags) return local_opt return f def register_inplace(*tags,", "LocalGroupDB() pool_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) pool_db2.__name__ = \"pool_db2\" matrix_ops_db = LocalGroupDB()", "opt = local_opt else: opt = local_optimizer(tracks)(local_opt) gpu_optimizer2.register(name, opt, \"fast_run\",", "that we need for GraphToGPUDB. gpu_optimizer2 = EquilibriumDB() gpu_seqopt =", "we need for GraphToGPUDB. gpu_optimizer2 = EquilibriumDB() gpu_seqopt = SequenceDB()", "local_opt return f def register_opt2(tracks, *tags, **kwargs): \"\"\" Decorator for", "matrix_ops_db2.__name__ = \"matrix_ops_db2\" abstract_batch_norm_db = LocalGroupDB() abstract_batch_norm_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) abstract_batch_norm_db2.__name__", "\"gpuarray\", *tags) return local_opt return f def register_opt2(tracks, *tags, **kwargs):", "return local_opt return f def register_inplace(*tags, **kwargs): def f(local_opt): name", "\"inplace\", \"gpuarray\", *tags, ) return local_opt return f # Register", "return local_opt return f # Register GPU convolution implementation #", "by calling the method query. \"\"\" def query(self, *tags, **kwtags):", ") gpu_optimizer = EquilibriumDB() gpu_cut_copies = EquilibriumDB() # Not used", "gpu_optimizer2 = EquilibriumDB() gpu_seqopt = SequenceDB() # do not add", "optdb from aesara.graph.opt import GraphToGPULocalOptGroup, TopoOptimizer, local_optimizer from aesara.graph.optdb import", "from aesara.compile import optdb from aesara.graph.opt import GraphToGPULocalOptGroup, TopoOptimizer, local_optimizer", "query(self, *tags, **kwtags): from aesara.gpuarray.opt import GraphToGPU opt = gpu_optimizer2.query(*tags,", "do not add 'fast_run' to these two as this would", "aesara.gpuarray.opt import GraphToGPU opt = gpu_optimizer2.query(*tags, **kwtags) return GraphToGPU(opt.local_optimizers_all, opt.local_optimizers_map)", "f # Register GPU convolution implementation # They are tried", "49.5) - 1, \"gpuarray\", ) pool_db = LocalGroupDB() pool_db2 =", "as this would always enable gpuarray mode optdb.register( \"gpuarray_opt\", gpu_seqopt,", "is being applied. tags : String The optimization tag to", "can control # which ones take precedence over others. abstractconv_groupopt", "Decorator for the new GraphToGPU optimizer. Takes an extra parameter(Op)", "aesara.compile import optdb from aesara.graph.opt import GraphToGPULocalOptGroup, TopoOptimizer, local_optimizer from", "LocalGroupDB, OptimizationDatabase, SequenceDB, ) gpu_optimizer = EquilibriumDB() gpu_cut_copies = EquilibriumDB()", "= \"matrix_ops_db2\" abstract_batch_norm_db = LocalGroupDB() abstract_batch_norm_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) abstract_batch_norm_db2.__name__ =", "Op to which optimization is being applied. tags : String", "being applied. tags : String The optimization tag to which", "\"fast_run\", \"gpuarray\", *tags) return local_opt return f def register_inplace(*tags, **kwargs):", "# which ones take precedence over others. abstractconv_groupopt = LocalGroupDB()", "flag's value from EquilibriumOptimizer by calling the method query. \"\"\"", "the optimizer flag's value from EquilibriumOptimizer by calling the method", "optimization tag to which the optimizer will be registered. \"\"\"", "local optimizers based on the optimizer flag's value from EquilibriumOptimizer", "specific order so we can control # which ones take", "be registered. \"\"\" def f(local_opt): name = (kwargs and kwargs.pop(\"name\"))", "abstract_batch_norm_groupopt.__name__ = \"gpuarray_batchnorm_opts\" def register_opt(*tags, **kwargs): def f(local_opt): name =", "implementation # They are tried in a specific order so", "new GraphToGPU optimizer. Takes an extra parameter(Op) compared to register_opt", "which ones take precedence over others. abstractconv_groupopt = LocalGroupDB() abstractconv_groupopt.__name__", "if isinstance(local_opt, OptimizationDatabase): opt = local_opt else: opt = local_optimizer(tracks)(local_opt)", "OptimizationDatabase, SequenceDB, ) gpu_optimizer = EquilibriumDB() gpu_cut_copies = EquilibriumDB() #", "LocalGroupDB() abstract_batch_norm_groupopt.__name__ = \"gpuarray_batchnorm_opts\" def register_opt(*tags, **kwargs): def f(local_opt): name", "class GraphToGPUDB(OptimizationDatabase): \"\"\" Retrieves the list local optimizers based on", "60, \"fast_run\", \"inplace\", \"gpuarray\", *tags, ) return local_opt return f", "control # which ones take precedence over others. abstractconv_groupopt =", "local_opt else: opt = local_optimizer(tracks)(local_opt) gpu_optimizer2.register(name, opt, \"fast_run\", \"gpuarray\", *tags)", "name, TopoOptimizer(local_opt, failure_callback=TopoOptimizer.warn_inplace), 60, \"fast_run\", \"inplace\", \"gpuarray\", *tags, ) return", "---------- tracks : List of Op class Or Op instance", "register_inplace(*tags, **kwargs): def f(local_opt): name = (kwargs and kwargs.pop(\"name\")) or", "a specific order so we can control # which ones", "def register_inplace(*tags, **kwargs): def f(local_opt): name = (kwargs and kwargs.pop(\"name\"))", "import ( EquilibriumDB, LocalGroupDB, OptimizationDatabase, SequenceDB, ) gpu_optimizer = EquilibriumDB()", "def f(local_opt): name = (kwargs and kwargs.pop(\"name\")) or local_opt.__name__ if", "else: opt = local_optimizer(tracks)(local_opt) gpu_optimizer2.register(name, opt, \"fast_run\", \"gpuarray\", *tags) return", "register_opt decorator. Parameters ---------- tracks : List of Op class", "return f def register_inplace(*tags, **kwargs): def f(local_opt): name = (kwargs", "register_opt(*tags, **kwargs): def f(local_opt): name = (kwargs and kwargs.pop(\"name\")) or", "1, \"gpuarray\", ) pool_db = LocalGroupDB() pool_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) pool_db2.__name__", "f(local_opt): name = (kwargs and kwargs.pop(\"name\")) or local_opt.__name__ if isinstance(local_opt,", "and kwargs.pop(\"name\")) or local_opt.__name__ optdb.register( name, TopoOptimizer(local_opt, failure_callback=TopoOptimizer.warn_inplace), 60, \"fast_run\",", "order so we can control # which ones take precedence", "local_opt return f def register_inplace(*tags, **kwargs): def f(local_opt): name =", "optimization is being applied. tags : String The optimization tag", "kwargs.pop(\"name\")) or local_opt.__name__ if isinstance(local_opt, OptimizationDatabase): opt = local_opt else:", "list local optimizers based on the optimizer flag's value from", "Retrieves the list local optimizers based on the optimizer flag's", "**kwargs): \"\"\" Decorator for the new GraphToGPU optimizer. Takes an", "\"fast_run\", \"inplace\", \"gpuarray\", *tags, ) return local_opt return f #", "f def register_opt2(tracks, *tags, **kwargs): \"\"\" Decorator for the new", "or local_opt.__name__ if isinstance(local_opt, OptimizationDatabase): opt = local_opt else: opt", "applied. tags : String The optimization tag to which the", "and kwargs.pop(\"name\")) or local_opt.__name__ if isinstance(local_opt, OptimizationDatabase): opt = local_opt", "so we can control # which ones take precedence over", "return f # Register GPU convolution implementation # They are", "EquilibriumDB() gpu_cut_copies = EquilibriumDB() # Not used for an EquilibriumOptimizer.", "class Or Op instance or None The Node's Op to", "register_opt(\"fast_compile\")(abstractconv_groupopt) class GraphToGPUDB(OptimizationDatabase): \"\"\" Retrieves the list local optimizers based", "matrix_ops_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) matrix_ops_db2.__name__ = \"matrix_ops_db2\" abstract_batch_norm_db = LocalGroupDB() abstract_batch_norm_db2", "tags : String The optimization tag to which the optimizer", "= LocalGroupDB() pool_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) pool_db2.__name__ = \"pool_db2\" matrix_ops_db =", "# Not used for an EquilibriumOptimizer. It has the \"tracks\"", "# Register GPU convolution implementation # They are tried in", "tried in a specific order so we can control #", "to which optimization is being applied. tags : String The", "# do not add 'fast_run' to these two as this", "\"\"\" Retrieves the list local optimizers based on the optimizer", "= LocalGroupDB() matrix_ops_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) matrix_ops_db2.__name__ = \"matrix_ops_db2\" abstract_batch_norm_db =", "LocalGroupDB() abstract_batch_norm_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) abstract_batch_norm_db2.__name__ = \"abstract_batch_norm_db2\" abstract_batch_norm_groupopt = LocalGroupDB()", "an EquilibriumOptimizer. It has the \"tracks\" that we need for", "aesara.graph.opt import GraphToGPULocalOptGroup, TopoOptimizer, local_optimizer from aesara.graph.optdb import ( EquilibriumDB,", "Or Op instance or None The Node's Op to which", "= LocalGroupDB() abstract_batch_norm_groupopt.__name__ = \"gpuarray_batchnorm_opts\" def register_opt(*tags, **kwargs): def f(local_opt):", "from aesara.gpuarray.opt import GraphToGPU opt = gpu_optimizer2.query(*tags, **kwtags) return GraphToGPU(opt.local_optimizers_all,", "gpu_optimizer = EquilibriumDB() gpu_cut_copies = EquilibriumDB() # Not used for", "would always enable gpuarray mode optdb.register( \"gpuarray_opt\", gpu_seqopt, optdb.__position__.get(\"add_destroy_handler\", 49.5)", "= \"gpuarray_batchnorm_opts\" def register_opt(*tags, **kwargs): def f(local_opt): name = (kwargs", "return f def register_opt2(tracks, *tags, **kwargs): \"\"\" Decorator for the", "= LocalGroupDB(local_opt=GraphToGPULocalOptGroup) abstract_batch_norm_db2.__name__ = \"abstract_batch_norm_db2\" abstract_batch_norm_groupopt = LocalGroupDB() abstract_batch_norm_groupopt.__name__ =", "local_opt.__name__ if isinstance(local_opt, OptimizationDatabase): opt = local_opt else: opt =", "def f(local_opt): name = (kwargs and kwargs.pop(\"name\")) or local_opt.__name__ optdb.register(", "- 1, \"gpuarray\", ) pool_db = LocalGroupDB() pool_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup)", "query. \"\"\" def query(self, *tags, **kwtags): from aesara.gpuarray.opt import GraphToGPU", "for an EquilibriumOptimizer. It has the \"tracks\" that we need", "local_opt return f # Register GPU convolution implementation # They", "import GraphToGPULocalOptGroup, TopoOptimizer, local_optimizer from aesara.graph.optdb import ( EquilibriumDB, LocalGroupDB,", "matrix_ops_db = LocalGroupDB() matrix_ops_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) matrix_ops_db2.__name__ = \"matrix_ops_db2\" abstract_batch_norm_db", "\"\"\" def f(local_opt): name = (kwargs and kwargs.pop(\"name\")) or local_opt.__name__", "SequenceDB, ) gpu_optimizer = EquilibriumDB() gpu_cut_copies = EquilibriumDB() # Not", "EquilibriumDB, LocalGroupDB, OptimizationDatabase, SequenceDB, ) gpu_optimizer = EquilibriumDB() gpu_cut_copies =", "precedence over others. abstractconv_groupopt = LocalGroupDB() abstractconv_groupopt.__name__ = \"gpuarray_abstractconv_opts\" register_opt(\"fast_compile\")(abstractconv_groupopt)", "(kwargs and kwargs.pop(\"name\")) or local_opt.__name__ if isinstance(local_opt, OptimizationDatabase): opt =", "convolution implementation # They are tried in a specific order", "to register_opt decorator. Parameters ---------- tracks : List of Op", "pool_db2.__name__ = \"pool_db2\" matrix_ops_db = LocalGroupDB() matrix_ops_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) matrix_ops_db2.__name__", "pool_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) pool_db2.__name__ = \"pool_db2\" matrix_ops_db = LocalGroupDB() matrix_ops_db2", "= \"pool_db2\" matrix_ops_db = LocalGroupDB() matrix_ops_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) matrix_ops_db2.__name__ =", "LocalGroupDB() matrix_ops_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) matrix_ops_db2.__name__ = \"matrix_ops_db2\" abstract_batch_norm_db = LocalGroupDB()", "tag to which the optimizer will be registered. \"\"\" def", "which optimization is being applied. tags : String The optimization", ") return local_opt return f # Register GPU convolution implementation", "def query(self, *tags, **kwtags): from aesara.gpuarray.opt import GraphToGPU opt =", "abstractconv_groupopt = LocalGroupDB() abstractconv_groupopt.__name__ = \"gpuarray_abstractconv_opts\" register_opt(\"fast_compile\")(abstractconv_groupopt) class GraphToGPUDB(OptimizationDatabase): \"\"\"", "ones take precedence over others. abstractconv_groupopt = LocalGroupDB() abstractconv_groupopt.__name__ =", "GraphToGPUDB(OptimizationDatabase): \"\"\" Retrieves the list local optimizers based on the", "# They are tried in a specific order so we", "EquilibriumOptimizer. It has the \"tracks\" that we need for GraphToGPUDB.", "aesara.graph.optdb import ( EquilibriumDB, LocalGroupDB, OptimizationDatabase, SequenceDB, ) gpu_optimizer =", "need for GraphToGPUDB. gpu_optimizer2 = EquilibriumDB() gpu_seqopt = SequenceDB() #", ": String The optimization tag to which the optimizer will", "which the optimizer will be registered. \"\"\" def f(local_opt): name", "the \"tracks\" that we need for GraphToGPUDB. gpu_optimizer2 = EquilibriumDB()", "always enable gpuarray mode optdb.register( \"gpuarray_opt\", gpu_seqopt, optdb.__position__.get(\"add_destroy_handler\", 49.5) -", "Takes an extra parameter(Op) compared to register_opt decorator. Parameters ----------", "GraphToGPUDB. gpu_optimizer2 = EquilibriumDB() gpu_seqopt = SequenceDB() # do not", "= LocalGroupDB() abstractconv_groupopt.__name__ = \"gpuarray_abstractconv_opts\" register_opt(\"fast_compile\")(abstractconv_groupopt) class GraphToGPUDB(OptimizationDatabase): \"\"\" Retrieves", "over others. abstractconv_groupopt = LocalGroupDB() abstractconv_groupopt.__name__ = \"gpuarray_abstractconv_opts\" register_opt(\"fast_compile\")(abstractconv_groupopt) class", "local_optimizer from aesara.graph.optdb import ( EquilibriumDB, LocalGroupDB, OptimizationDatabase, SequenceDB, )", "not add 'fast_run' to these two as this would always", "others. abstractconv_groupopt = LocalGroupDB() abstractconv_groupopt.__name__ = \"gpuarray_abstractconv_opts\" register_opt(\"fast_compile\")(abstractconv_groupopt) class GraphToGPUDB(OptimizationDatabase):", "optdb.register( \"gpuarray_opt\", gpu_seqopt, optdb.__position__.get(\"add_destroy_handler\", 49.5) - 1, \"gpuarray\", ) pool_db", "compared to register_opt decorator. Parameters ---------- tracks : List of", "pool_db = LocalGroupDB() pool_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) pool_db2.__name__ = \"pool_db2\" matrix_ops_db", "will be registered. \"\"\" def f(local_opt): name = (kwargs and", "\"gpuarray\", *tags) return local_opt return f def register_inplace(*tags, **kwargs): def", "local_opt, \"fast_run\", \"gpuarray\", *tags) return local_opt return f def register_opt2(tracks,", "*tags, **kwargs): \"\"\" Decorator for the new GraphToGPU optimizer. Takes", "optimizers based on the optimizer flag's value from EquilibriumOptimizer by", "instance or None The Node's Op to which optimization is", "of Op class Or Op instance or None The Node's", "**kwtags): from aesara.gpuarray.opt import GraphToGPU opt = gpu_optimizer2.query(*tags, **kwtags) return", "( EquilibriumDB, LocalGroupDB, OptimizationDatabase, SequenceDB, ) gpu_optimizer = EquilibriumDB() gpu_cut_copies", "gpu_optimizer.register(name, local_opt, \"fast_run\", \"gpuarray\", *tags) return local_opt return f def", "GraphToGPULocalOptGroup, TopoOptimizer, local_optimizer from aesara.graph.optdb import ( EquilibriumDB, LocalGroupDB, OptimizationDatabase,", "register_opt2(tracks, *tags, **kwargs): \"\"\" Decorator for the new GraphToGPU optimizer.", "local_opt.__name__ gpu_optimizer.register(name, local_opt, \"fast_run\", \"gpuarray\", *tags) return local_opt return f", "gpu_cut_copies = EquilibriumDB() # Not used for an EquilibriumOptimizer. It", "LocalGroupDB(local_opt=GraphToGPULocalOptGroup) pool_db2.__name__ = \"pool_db2\" matrix_ops_db = LocalGroupDB() matrix_ops_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup)", "optimizer flag's value from EquilibriumOptimizer by calling the method query.", "gpuarray mode optdb.register( \"gpuarray_opt\", gpu_seqopt, optdb.__position__.get(\"add_destroy_handler\", 49.5) - 1, \"gpuarray\",", "TopoOptimizer, local_optimizer from aesara.graph.optdb import ( EquilibriumDB, LocalGroupDB, OptimizationDatabase, SequenceDB,", "\"fast_run\", \"gpuarray\", *tags) return local_opt return f def register_opt2(tracks, *tags,", "to these two as this would always enable gpuarray mode", "or None The Node's Op to which optimization is being", "The Node's Op to which optimization is being applied. tags", "calling the method query. \"\"\" def query(self, *tags, **kwtags): from", "List of Op class Or Op instance or None The", "\"\"\" Decorator for the new GraphToGPU optimizer. Takes an extra", "parameter(Op) compared to register_opt decorator. Parameters ---------- tracks : List", "registered. \"\"\" def f(local_opt): name = (kwargs and kwargs.pop(\"name\")) or", "'fast_run' to these two as this would always enable gpuarray", "kwargs.pop(\"name\")) or local_opt.__name__ gpu_optimizer.register(name, local_opt, \"fast_run\", \"gpuarray\", *tags) return local_opt", "for the new GraphToGPU optimizer. Takes an extra parameter(Op) compared", "f(local_opt): name = (kwargs and kwargs.pop(\"name\")) or local_opt.__name__ optdb.register( name,", "isinstance(local_opt, OptimizationDatabase): opt = local_opt else: opt = local_optimizer(tracks)(local_opt) gpu_optimizer2.register(name,", "are tried in a specific order so we can control", "return local_opt return f def register_opt2(tracks, *tags, **kwargs): \"\"\" Decorator", "\"pool_db2\" matrix_ops_db = LocalGroupDB() matrix_ops_db2 = LocalGroupDB(local_opt=GraphToGPULocalOptGroup) matrix_ops_db2.__name__ = \"matrix_ops_db2\"", "this would always enable gpuarray mode optdb.register( \"gpuarray_opt\", gpu_seqopt, optdb.__position__.get(\"add_destroy_handler\",", "**kwargs): def f(local_opt): name = (kwargs and kwargs.pop(\"name\")) or local_opt.__name__", "= LocalGroupDB(local_opt=GraphToGPULocalOptGroup) pool_db2.__name__ = \"pool_db2\" matrix_ops_db = LocalGroupDB() matrix_ops_db2 =", "Op instance or None The Node's Op to which optimization", "local_optimizer(tracks)(local_opt) gpu_optimizer2.register(name, opt, \"fast_run\", \"gpuarray\", *tags) return local_opt return f", "\"gpuarray\", *tags, ) return local_opt return f # Register GPU", "to which the optimizer will be registered. \"\"\" def f(local_opt):", "method query. \"\"\" def query(self, *tags, **kwtags): from aesara.gpuarray.opt import", "tracks : List of Op class Or Op instance or", "LocalGroupDB(local_opt=GraphToGPULocalOptGroup) abstract_batch_norm_db2.__name__ = \"abstract_batch_norm_db2\" abstract_batch_norm_groupopt = LocalGroupDB() abstract_batch_norm_groupopt.__name__ = \"gpuarray_batchnorm_opts\"", "f def register_inplace(*tags, **kwargs): def f(local_opt): name = (kwargs and", "= (kwargs and kwargs.pop(\"name\")) or local_opt.__name__ optdb.register( name, TopoOptimizer(local_opt, failure_callback=TopoOptimizer.warn_inplace)," ]
[ "urlquote log = logging.getLogger(__name__) class Node(JenkinsBase): \"\"\" Class to hold", "of the node :param jenkins_obj: ref to the jenkins obj", "are taking this node offline \"\"\" if not self._data['offline']: self.toggle_temporarily_offline(message)", "self.jenkins def __str__(self): return self.name def is_online(self): return not self.poll(tree='offline')['offline']", "state is still online:\" + \"offline = %s , temporarilyOffline", "\"\"\" initial_state = self.is_temporarily_offline() url = self.baseurl + \\ \"/toggleOffline?offlineMessage=\"", "node object by providing all relevant pointers to it :param", "not self._data['offline']: self.toggle_temporarily_offline(message) data = self.poll(tree='offline,temporarilyOffline') if not data['offline']: raise", "node (online/offline) and set 'temporarilyOffline' property (True/False) Calling the same", "information on nodes that are attached as slaves to the", "to the jenkins obj :return: Node obj \"\"\" self.name =", "you are taking this node offline \"\"\" if not self._data['offline']:", "is not set - client has connection problems and AssertionError", "nodename self.jenkins = jenkins_obj JenkinsBase.__init__(self, baseurl) def get_jenkins_obj(self): return self.jenkins", "self.poll() log.debug(html_result) state = self.is_temporarily_offline() if initial_state == state: raise", "self._data['temporarilyOffline'])) elif self._data['offline'] and self._data['temporarilyOffline']: self.toggle_temporarily_offline() if self._data['offline']: raise AssertionError(\"The", "jenkins_obj: ref to the jenkins obj :return: Node obj \"\"\"", "jenkinsapi\"): \"\"\" Set node offline. If after run node state", "string can be used to explain why you are taking", "self.is_temporarily_offline() url = self.baseurl + \\ \"/toggleOffline?offlineMessage=\" + urlquote(message) try:", "(online/offline) and set 'temporarilyOffline' property (True/False) Calling the same method", "try: from urllib import quote as urlquote except ImportError: #", "Before change state verify client state: if node set 'offline'", "check client \" \"connection: offline = %s, \" \"temporarilyOffline =", "urllib import quote as urlquote except ImportError: # Python3 from", "logging.getLogger(__name__) class Node(JenkinsBase): \"\"\" Class to hold information on nodes", "+ \"offline = %s , temporarilyOffline = %s\" % (data['offline'],", "connection problems and AssertionError raised. If after run node state", "AssertionError. : param message: optional string explain why you are", "client connection:\" \" offline = %s, \" \"temporarilyOffline = %s\"", "method again will bring node status back. :param message: optional", "be used to explain why you are taking this node", "if not data['offline']: raise AssertionError(\"The node state is still online:\"", "node state has not been changed raise AssertionError. : param", "AssertionError(\"The node state is still online:\" + \"offline = %s", "ImportError: # Python3 from urllib.parse import quote as urlquote log", "why you are taking this node offline \"\"\" initial_state =", "== state: raise AssertionError( \"The node state has not changed:", "= logging.getLogger(__name__) class Node(JenkinsBase): \"\"\" Class to hold information on", "return self.jenkins def __str__(self): return self.name def is_online(self): return not", "online:\" + \"offline = %s , temporarilyOffline = %s\" %", "self.toggle_temporarily_offline(message) data = self.poll(tree='offline,temporarilyOffline') if not data['offline']: raise AssertionError(\"The node", "is_jnlpagent(self): return self._data['jnlpAgent'] def is_idle(self): return self._data['idle'] def set_online(self): \"\"\"", "% (data['offline'], data['temporarilyOffline'])) def toggle_temporarily_offline(self, message=\"requested from jenkinsapi\"): \"\"\" Switches", "%s, \" \"temporarilyOffline = %s\" % (self._data['offline'], self._data['temporarilyOffline'])) elif self._data['offline']", "node online. Before change state verify client state: if node", "= self.poll(tree='offline,temporarilyOffline') if not data['offline']: raise AssertionError(\"The node state is", "all relevant pointers to it :param baseurl: basic url for", "from jenkinsapi\"): \"\"\" Switches state of connected node (online/offline) and", "if initial_state == state: raise AssertionError( \"The node state has", "not set - client has connection problems and AssertionError raised.", "quote as urlquote except ImportError: # Python3 from urllib.parse import", "offline and not marked as \" \"temporarilyOffline, check client \"", "run node state has not been changed raise AssertionError. \"\"\"", "offline \"\"\" if not self._data['offline']: self.toggle_temporarily_offline(message) data = self.poll(tree='offline,temporarilyOffline') if", "%s, \" \"temporarilyOffline = %s\" % (self._data['offline'], self._data['temporarilyOffline'])) def set_offline(self,", "node status back. :param message: optional string can be used", "message=\"requested from jenkinsapi\"): \"\"\" Switches state of connected node (online/offline)", ":param message: optional string can be used to explain why", "taking this node offline \"\"\" initial_state = self.is_temporarily_offline() url =", "state: if node set 'offline' but 'temporarilyOffline' is not set", "raise AssertionError. : param message: optional string explain why you", "but 'temporarilyOffline' is not set - client has connection problems", "%s , temporarilyOffline = %s\" % (data['offline'], data['temporarilyOffline'])) def toggle_temporarily_offline(self,", "the node :param jenkins_obj: ref to the jenkins obj :return:", "(data['offline'], data['temporarilyOffline'])) def toggle_temporarily_offline(self, message=\"requested from jenkinsapi\"): \"\"\" Switches state", "def __str__(self): return self.name def is_online(self): return not self.poll(tree='offline')['offline'] def", "def is_temporarily_offline(self): return self.poll(tree='temporarilyOffline')['temporarilyOffline'] def is_jnlpagent(self): return self._data['jnlpAgent'] def is_idle(self):", ":param baseurl: basic url for querying information on a node", "Set node offline. If after run node state has not", "changed raise AssertionError. \"\"\" self.poll() # Before change state check", "self.toggle_temporarily_offline() if self._data['offline']: raise AssertionError(\"The node state is still offline,", "node offline \"\"\" initial_state = self.is_temporarily_offline() url = self.baseurl +", "node state has not been changed raise AssertionError. \"\"\" self.poll()", "raise AssertionError. \"\"\" self.poll() # Before change state check if", "\"\"\" Init a node object by providing all relevant pointers", "AssertionError. \"\"\" self.poll() # Before change state check if client", "change state check if client is connected if self._data['offline'] and", "JenkinsBase.__init__(self, baseurl) def get_jenkins_obj(self): return self.jenkins def __str__(self): return self.name", "node state is still online:\" + \"offline = %s ,", "self.poll(tree='offline')['offline'] def is_temporarily_offline(self): return self.poll(tree='temporarilyOffline')['temporarilyOffline'] def is_jnlpagent(self): return self._data['jnlpAgent'] def", "instance \"\"\" def __init__(self, baseurl, nodename, jenkins_obj): \"\"\" Init a", "'temporarilyOffline' property (True/False) Calling the same method again will bring", "def is_jnlpagent(self): return self._data['jnlpAgent'] def is_idle(self): return self._data['idle'] def set_online(self):", "relevant pointers to it :param baseurl: basic url for querying", "to the master jenkins instance \"\"\" def __init__(self, baseurl, nodename,", "is still offline, \" \"check client connection:\" \" offline =", "data={}) self.poll() log.debug(html_result) state = self.is_temporarily_offline() if initial_state == state:", "object by providing all relevant pointers to it :param baseurl:", "not self._data['temporarilyOffline']: raise AssertionError(\"Node is offline and not marked as", ": param message: optional string explain why you are taking", "jenkins obj :return: Node obj \"\"\" self.name = nodename self.jenkins", "state = self.is_temporarily_offline() if initial_state == state: raise AssertionError( \"The", "is_idle(self): return self._data['idle'] def set_online(self): \"\"\" Set node online. Before", "has connection problems and AssertionError raised. If after run node", "def get_jenkins_obj(self): return self.jenkins def __str__(self): return self.name def is_online(self):", "node offline. If after run node state has not been", "explain why you are taking this node offline \"\"\" if", "is offline and not marked as \" \"temporarilyOffline, check client", "that are attached as slaves to the master jenkins instance", "to explain why you are taking this node offline \"\"\"", "self.name = nodename self.jenkins = jenkins_obj JenkinsBase.__init__(self, baseurl) def get_jenkins_obj(self):", "except PostRequired: html_result = self.jenkins.requester.post_and_confirm_status( url, data={}) self.poll() log.debug(html_result) state", "baseurl) def get_jenkins_obj(self): return self.jenkins def __str__(self): return self.name def", "is still online:\" + \"offline = %s , temporarilyOffline =", "Switches state of connected node (online/offline) and set 'temporarilyOffline' property", ":param nodename: hostname of the node :param jenkins_obj: ref to", "baseurl: basic url for querying information on a node :param", "connected node (online/offline) and set 'temporarilyOffline' property (True/False) Calling the", "explain why you are taking this node offline \"\"\" initial_state", "connection:\" \" offline = %s, \" \"temporarilyOffline = %s\" %", "initial_state == state: raise AssertionError( \"The node state has not", "and not marked as \" \"temporarilyOffline, check client \" \"connection:", "change state verify client state: if node set 'offline' but", "is_online(self): return not self.poll(tree='offline')['offline'] def is_temporarily_offline(self): return self.poll(tree='temporarilyOffline')['temporarilyOffline'] def is_jnlpagent(self):", "hold information on nodes that are attached as slaves to", "taking this node offline \"\"\" if not self._data['offline']: self.toggle_temporarily_offline(message) data", "self._data['offline'] and not self._data['temporarilyOffline']: raise AssertionError(\"Node is offline and not", "self._data['temporarilyOffline'])) def set_offline(self, message=\"requested from jenkinsapi\"): \"\"\" Set node offline.", "node :param jenkins_obj: ref to the jenkins obj :return: Node", "property (True/False) Calling the same method again will bring node", "state check if client is connected if self._data['offline'] and not", "it :param baseurl: basic url for querying information on a", "= nodename self.jenkins = jenkins_obj JenkinsBase.__init__(self, baseurl) def get_jenkins_obj(self): return", "return self._data['jnlpAgent'] def is_idle(self): return self._data['idle'] def set_online(self): \"\"\" Set", "= self.jenkins.requester.get_and_confirm_status(url) except PostRequired: html_result = self.jenkins.requester.post_and_confirm_status( url, data={}) self.poll()", "can be used to explain why you are taking this", "raised. If after run node state has not been changed", "log.debug(html_result) state = self.is_temporarily_offline() if initial_state == state: raise AssertionError(", "import quote as urlquote except ImportError: # Python3 from urllib.parse", "state: raise AssertionError( \"The node state has not changed: temporarilyOffline", "self.baseurl + \\ \"/toggleOffline?offlineMessage=\" + urlquote(message) try: html_result = self.jenkins.requester.get_and_confirm_status(url)", "optional string explain why you are taking this node offline", "of connected node (online/offline) and set 'temporarilyOffline' property (True/False) Calling", "you are taking this node offline \"\"\" initial_state = self.is_temporarily_offline()", "not been changed raise AssertionError. : param message: optional string", "PostRequired import logging try: from urllib import quote as urlquote", "state is still offline, \" \"check client connection:\" \" offline", "__str__(self): return self.name def is_online(self): return not self.poll(tree='offline')['offline'] def is_temporarily_offline(self):", "= %s\" % (self._data['offline'], self._data['temporarilyOffline'])) elif self._data['offline'] and self._data['temporarilyOffline']: self.toggle_temporarily_offline()", "urllib.parse import quote as urlquote log = logging.getLogger(__name__) class Node(JenkinsBase):", "data['offline']: raise AssertionError(\"The node state is still online:\" + \"offline", "return self._data['idle'] def set_online(self): \"\"\" Set node online. Before change", "state of connected node (online/offline) and set 'temporarilyOffline' property (True/False)", "raise AssertionError(\"Node is offline and not marked as \" \"temporarilyOffline,", "\" offline = %s, \" \"temporarilyOffline = %s\" % (self._data['offline'],", "raise AssertionError(\"The node state is still offline, \" \"check client", "and AssertionError raised. If after run node state has not", "been changed raise AssertionError. \"\"\" self.poll() # Before change state", "if self._data['offline'] and not self._data['temporarilyOffline']: raise AssertionError(\"Node is offline and", "<reponame>imsardine/jenkinsapi<gh_stars>0 \"\"\" Module for jenkinsapi Node class \"\"\" from jenkinsapi.jenkinsbase", "is_temporarily_offline(self): return self.poll(tree='temporarilyOffline')['temporarilyOffline'] def is_jnlpagent(self): return self._data['jnlpAgent'] def is_idle(self): return", "import quote as urlquote log = logging.getLogger(__name__) class Node(JenkinsBase): \"\"\"", "= self.baseurl + \\ \"/toggleOffline?offlineMessage=\" + urlquote(message) try: html_result =", "\" \"temporarilyOffline = %s\" % (self._data['offline'], self._data['temporarilyOffline'])) elif self._data['offline'] and", "\"\"\" Set node online. Before change state verify client state:", "if not self._data['offline']: self.toggle_temporarily_offline(message) data = self.poll(tree='offline,temporarilyOffline') if not data['offline']:", "data = self.poll(tree='offline,temporarilyOffline') if not data['offline']: raise AssertionError(\"The node state", "bring node status back. :param message: optional string can be", "html_result = self.jenkins.requester.get_and_confirm_status(url) except PostRequired: html_result = self.jenkins.requester.post_and_confirm_status( url, data={})", "status back. :param message: optional string can be used to", "check if client is connected if self._data['offline'] and not self._data['temporarilyOffline']:", "jenkins instance \"\"\" def __init__(self, baseurl, nodename, jenkins_obj): \"\"\" Init", "basic url for querying information on a node :param nodename:", "self.poll(tree='offline,temporarilyOffline') if not data['offline']: raise AssertionError(\"The node state is still", "Node class \"\"\" from jenkinsapi.jenkinsbase import JenkinsBase from jenkinsapi.custom_exceptions import", "node state has not changed: temporarilyOffline = %s\" % state)", "return self.poll(tree='temporarilyOffline')['temporarilyOffline'] def is_jnlpagent(self): return self._data['jnlpAgent'] def is_idle(self): return self._data['idle']", "providing all relevant pointers to it :param baseurl: basic url", "nodename: hostname of the node :param jenkins_obj: ref to the", "set 'temporarilyOffline' property (True/False) Calling the same method again will", "except ImportError: # Python3 from urllib.parse import quote as urlquote", "AssertionError(\"Node is offline and not marked as \" \"temporarilyOffline, check", "hostname of the node :param jenkins_obj: ref to the jenkins", "= self.is_temporarily_offline() if initial_state == state: raise AssertionError( \"The node", "self._data['idle'] def set_online(self): \"\"\" Set node online. Before change state", "toggle_temporarily_offline(self, message=\"requested from jenkinsapi\"): \"\"\" Switches state of connected node", "set_online(self): \"\"\" Set node online. Before change state verify client", "still online:\" + \"offline = %s , temporarilyOffline = %s\"", "Module for jenkinsapi Node class \"\"\" from jenkinsapi.jenkinsbase import JenkinsBase", "AssertionError(\"The node state is still offline, \" \"check client connection:\"", "self._data['offline']: raise AssertionError(\"The node state is still offline, \" \"check", "logging try: from urllib import quote as urlquote except ImportError:", "self.poll() # Before change state check if client is connected", "as slaves to the master jenkins instance \"\"\" def __init__(self,", "Node(JenkinsBase): \"\"\" Class to hold information on nodes that are", "def __init__(self, baseurl, nodename, jenkins_obj): \"\"\" Init a node object", "\"check client connection:\" \" offline = %s, \" \"temporarilyOffline =", "string explain why you are taking this node offline \"\"\"", "optional string can be used to explain why you are", "def toggle_temporarily_offline(self, message=\"requested from jenkinsapi\"): \"\"\" Switches state of connected", "self.jenkins.requester.post_and_confirm_status( url, data={}) self.poll() log.debug(html_result) state = self.is_temporarily_offline() if initial_state", "initial_state = self.is_temporarily_offline() url = self.baseurl + \\ \"/toggleOffline?offlineMessage=\" +", "again will bring node status back. :param message: optional string", "same method again will bring node status back. :param message:", "not marked as \" \"temporarilyOffline, check client \" \"connection: offline", "connected if self._data['offline'] and not self._data['temporarilyOffline']: raise AssertionError(\"Node is offline", "Class to hold information on nodes that are attached as", "= %s, \" \"temporarilyOffline = %s\" % (self._data['offline'], self._data['temporarilyOffline'])) def", "%s\" % (self._data['offline'], self._data['temporarilyOffline'])) elif self._data['offline'] and self._data['temporarilyOffline']: self.toggle_temporarily_offline() if", "% (self._data['offline'], self._data['temporarilyOffline'])) def set_offline(self, message=\"requested from jenkinsapi\"): \"\"\" Set", "client has connection problems and AssertionError raised. If after run", ", temporarilyOffline = %s\" % (data['offline'], data['temporarilyOffline'])) def toggle_temporarily_offline(self, message=\"requested", "from urllib.parse import quote as urlquote log = logging.getLogger(__name__) class", "url = self.baseurl + \\ \"/toggleOffline?offlineMessage=\" + urlquote(message) try: html_result", "on nodes that are attached as slaves to the master", "+ urlquote(message) try: html_result = self.jenkins.requester.get_and_confirm_status(url) except PostRequired: html_result =", "not been changed raise AssertionError. \"\"\" self.poll() # Before change", "\"\"\" from jenkinsapi.jenkinsbase import JenkinsBase from jenkinsapi.custom_exceptions import PostRequired import", "(True/False) Calling the same method again will bring node status", "node set 'offline' but 'temporarilyOffline' is not set - client", "obj \"\"\" self.name = nodename self.jenkins = jenkins_obj JenkinsBase.__init__(self, baseurl)", "import logging try: from urllib import quote as urlquote except", "set - client has connection problems and AssertionError raised. If", "= %s , temporarilyOffline = %s\" % (data['offline'], data['temporarilyOffline'])) def", "= self.is_temporarily_offline() url = self.baseurl + \\ \"/toggleOffline?offlineMessage=\" + urlquote(message)", "param message: optional string explain why you are taking this", "the jenkins obj :return: Node obj \"\"\" self.name = nodename", "offline \"\"\" initial_state = self.is_temporarily_offline() url = self.baseurl + \\", "\"\"\" Module for jenkinsapi Node class \"\"\" from jenkinsapi.jenkinsbase import", "has not been changed raise AssertionError. \"\"\" self.poll() # Before", "jenkins_obj): \"\"\" Init a node object by providing all relevant", "run node state has not been changed raise AssertionError. :", "\"temporarilyOffline = %s\" % (self._data['offline'], self._data['temporarilyOffline'])) elif self._data['offline'] and self._data['temporarilyOffline']:", "import JenkinsBase from jenkinsapi.custom_exceptions import PostRequired import logging try: from", "from jenkinsapi.custom_exceptions import PostRequired import logging try: from urllib import", "# Before change state check if client is connected if", "slaves to the master jenkins instance \"\"\" def __init__(self, baseurl,", "\"offline = %s , temporarilyOffline = %s\" % (data['offline'], data['temporarilyOffline']))", "obj :return: Node obj \"\"\" self.name = nodename self.jenkins =", "the master jenkins instance \"\"\" def __init__(self, baseurl, nodename, jenkins_obj):", "on a node :param nodename: hostname of the node :param", "\\ \"/toggleOffline?offlineMessage=\" + urlquote(message) try: html_result = self.jenkins.requester.get_and_confirm_status(url) except PostRequired:", "querying information on a node :param nodename: hostname of the", "message=\"requested from jenkinsapi\"): \"\"\" Set node offline. If after run", "If after run node state has not been changed raise", "state has not been changed raise AssertionError. : param message:", "still offline, \" \"check client connection:\" \" offline = %s,", "used to explain why you are taking this node offline", "AssertionError( \"The node state has not changed: temporarilyOffline = %s\"", "urlquote(message) try: html_result = self.jenkins.requester.get_and_confirm_status(url) except PostRequired: html_result = self.jenkins.requester.post_and_confirm_status(", "Python3 from urllib.parse import quote as urlquote log = logging.getLogger(__name__)", "a node :param nodename: hostname of the node :param jenkins_obj:", "self._data['jnlpAgent'] def is_idle(self): return self._data['idle'] def set_online(self): \"\"\" Set node", "AssertionError raised. If after run node state has not been", "\"\"\" self.poll() # Before change state check if client is", "return self.name def is_online(self): return not self.poll(tree='offline')['offline'] def is_temporarily_offline(self): return", "% (self._data['offline'], self._data['temporarilyOffline'])) elif self._data['offline'] and self._data['temporarilyOffline']: self.toggle_temporarily_offline() if self._data['offline']:", "from jenkinsapi\"): \"\"\" Set node offline. If after run node", "\"\"\" if not self._data['offline']: self.toggle_temporarily_offline(message) data = self.poll(tree='offline,temporarilyOffline') if not", "the same method again will bring node status back. :param", ":param jenkins_obj: ref to the jenkins obj :return: Node obj", "marked as \" \"temporarilyOffline, check client \" \"connection: offline =", "data['temporarilyOffline'])) def toggle_temporarily_offline(self, message=\"requested from jenkinsapi\"): \"\"\" Switches state of", "= %s\" % (self._data['offline'], self._data['temporarilyOffline'])) def set_offline(self, message=\"requested from jenkinsapi\"):", "are taking this node offline \"\"\" initial_state = self.is_temporarily_offline() url", "= jenkins_obj JenkinsBase.__init__(self, baseurl) def get_jenkins_obj(self): return self.jenkins def __str__(self):", "client is connected if self._data['offline'] and not self._data['temporarilyOffline']: raise AssertionError(\"Node", "this node offline \"\"\" if not self._data['offline']: self.toggle_temporarily_offline(message) data =", "url for querying information on a node :param nodename: hostname", "quote as urlquote log = logging.getLogger(__name__) class Node(JenkinsBase): \"\"\" Class", "to hold information on nodes that are attached as slaves", "offline = %s, \" \"temporarilyOffline = %s\" % (self._data['offline'], self._data['temporarilyOffline']))", "attached as slaves to the master jenkins instance \"\"\" def", "problems and AssertionError raised. If after run node state has", "- client has connection problems and AssertionError raised. If after", "Before change state check if client is connected if self._data['offline']", "\" \"temporarilyOffline = %s\" % (self._data['offline'], self._data['temporarilyOffline'])) def set_offline(self, message=\"requested", "try: html_result = self.jenkins.requester.get_and_confirm_status(url) except PostRequired: html_result = self.jenkins.requester.post_and_confirm_status( url,", "node offline \"\"\" if not self._data['offline']: self.toggle_temporarily_offline(message) data = self.poll(tree='offline,temporarilyOffline')", "import PostRequired import logging try: from urllib import quote as", "node state is still offline, \" \"check client connection:\" \"", "\"\"\" Class to hold information on nodes that are attached", "self.is_temporarily_offline() if initial_state == state: raise AssertionError( \"The node state", "why you are taking this node offline \"\"\" if not", "state has not been changed raise AssertionError. \"\"\" self.poll() #", "%s\" % (data['offline'], data['temporarilyOffline'])) def toggle_temporarily_offline(self, message=\"requested from jenkinsapi\"): \"\"\"", "'temporarilyOffline' is not set - client has connection problems and", "from urllib import quote as urlquote except ImportError: # Python3", "elif self._data['offline'] and self._data['temporarilyOffline']: self.toggle_temporarily_offline() if self._data['offline']: raise AssertionError(\"The node", "nodes that are attached as slaves to the master jenkins", "return not self.poll(tree='offline')['offline'] def is_temporarily_offline(self): return self.poll(tree='temporarilyOffline')['temporarilyOffline'] def is_jnlpagent(self): return", "are attached as slaves to the master jenkins instance \"\"\"", "if node set 'offline' but 'temporarilyOffline' is not set -", "(self._data['offline'], self._data['temporarilyOffline'])) elif self._data['offline'] and self._data['temporarilyOffline']: self.toggle_temporarily_offline() if self._data['offline']: raise", "\"\"\" self.name = nodename self.jenkins = jenkins_obj JenkinsBase.__init__(self, baseurl) def", "state verify client state: if node set 'offline' but 'temporarilyOffline'", "%s\" % (self._data['offline'], self._data['temporarilyOffline'])) def set_offline(self, message=\"requested from jenkinsapi\"): \"\"\"", "client state: if node set 'offline' but 'temporarilyOffline' is not", "html_result = self.jenkins.requester.post_and_confirm_status( url, data={}) self.poll() log.debug(html_result) state = self.is_temporarily_offline()", "= %s\" % (data['offline'], data['temporarilyOffline'])) def toggle_temporarily_offline(self, message=\"requested from jenkinsapi\"):", "master jenkins instance \"\"\" def __init__(self, baseurl, nodename, jenkins_obj): \"\"\"", "as urlquote except ImportError: # Python3 from urllib.parse import quote", "by providing all relevant pointers to it :param baseurl: basic", "\" \"temporarilyOffline, check client \" \"connection: offline = %s, \"", "def set_offline(self, message=\"requested from jenkinsapi\"): \"\"\" Set node offline. If", "node :param nodename: hostname of the node :param jenkins_obj: ref", "def set_online(self): \"\"\" Set node online. Before change state verify", "verify client state: if node set 'offline' but 'temporarilyOffline' is", "to it :param baseurl: basic url for querying information on", "a node object by providing all relevant pointers to it", "will bring node status back. :param message: optional string can", "for jenkinsapi Node class \"\"\" from jenkinsapi.jenkinsbase import JenkinsBase from", "(self._data['offline'], self._data['temporarilyOffline'])) def set_offline(self, message=\"requested from jenkinsapi\"): \"\"\" Set node", "jenkinsapi Node class \"\"\" from jenkinsapi.jenkinsbase import JenkinsBase from jenkinsapi.custom_exceptions", "from jenkinsapi.jenkinsbase import JenkinsBase from jenkinsapi.custom_exceptions import PostRequired import logging", "= %s, \" \"temporarilyOffline = %s\" % (self._data['offline'], self._data['temporarilyOffline'])) elif", "self._data['offline']: self.toggle_temporarily_offline(message) data = self.poll(tree='offline,temporarilyOffline') if not data['offline']: raise AssertionError(\"The", "been changed raise AssertionError. : param message: optional string explain", "and self._data['temporarilyOffline']: self.toggle_temporarily_offline() if self._data['offline']: raise AssertionError(\"The node state is", "is connected if self._data['offline'] and not self._data['temporarilyOffline']: raise AssertionError(\"Node is", "jenkinsapi.custom_exceptions import PostRequired import logging try: from urllib import quote", "set 'offline' but 'temporarilyOffline' is not set - client has", "# Python3 from urllib.parse import quote as urlquote log =", "self.poll(tree='temporarilyOffline')['temporarilyOffline'] def is_jnlpagent(self): return self._data['jnlpAgent'] def is_idle(self): return self._data['idle'] def", "\" \"check client connection:\" \" offline = %s, \" \"temporarilyOffline", "raise AssertionError(\"The node state is still online:\" + \"offline =", "information on a node :param nodename: hostname of the node", "get_jenkins_obj(self): return self.jenkins def __str__(self): return self.name def is_online(self): return", "def is_idle(self): return self._data['idle'] def set_online(self): \"\"\" Set node online.", "def is_online(self): return not self.poll(tree='offline')['offline'] def is_temporarily_offline(self): return self.poll(tree='temporarilyOffline')['temporarilyOffline'] def", "offline. If after run node state has not been changed", "not self.poll(tree='offline')['offline'] def is_temporarily_offline(self): return self.poll(tree='temporarilyOffline')['temporarilyOffline'] def is_jnlpagent(self): return self._data['jnlpAgent']", "\"The node state has not changed: temporarilyOffline = %s\" %", "online. Before change state verify client state: if node set", "__init__(self, baseurl, nodename, jenkins_obj): \"\"\" Init a node object by", "self.jenkins = jenkins_obj JenkinsBase.__init__(self, baseurl) def get_jenkins_obj(self): return self.jenkins def", "after run node state has not been changed raise AssertionError.", "as \" \"temporarilyOffline, check client \" \"connection: offline = %s,", "jenkinsapi\"): \"\"\" Switches state of connected node (online/offline) and set", "message: optional string explain why you are taking this node", "self._data['offline'] and self._data['temporarilyOffline']: self.toggle_temporarily_offline() if self._data['offline']: raise AssertionError(\"The node state", "for querying information on a node :param nodename: hostname of", "\"\"\" Set node offline. If after run node state has", "if self._data['offline']: raise AssertionError(\"The node state is still offline, \"", "self.jenkins.requester.get_and_confirm_status(url) except PostRequired: html_result = self.jenkins.requester.post_and_confirm_status( url, data={}) self.poll() log.debug(html_result)", "client \" \"connection: offline = %s, \" \"temporarilyOffline = %s\"", "not data['offline']: raise AssertionError(\"The node state is still online:\" +", "\"temporarilyOffline, check client \" \"connection: offline = %s, \" \"temporarilyOffline", "\"\"\" Switches state of connected node (online/offline) and set 'temporarilyOffline'", "Node obj \"\"\" self.name = nodename self.jenkins = jenkins_obj JenkinsBase.__init__(self,", ":return: Node obj \"\"\" self.name = nodename self.jenkins = jenkins_obj", "jenkins_obj JenkinsBase.__init__(self, baseurl) def get_jenkins_obj(self): return self.jenkins def __str__(self): return", "message: optional string can be used to explain why you", "ref to the jenkins obj :return: Node obj \"\"\" self.name", "Set node online. Before change state verify client state: if", "Calling the same method again will bring node status back.", "raise AssertionError( \"The node state has not changed: temporarilyOffline =", "self._data['temporarilyOffline']: self.toggle_temporarily_offline() if self._data['offline']: raise AssertionError(\"The node state is still", "back. :param message: optional string can be used to explain", "and set 'temporarilyOffline' property (True/False) Calling the same method again", "self._data['temporarilyOffline']: raise AssertionError(\"Node is offline and not marked as \"", "\"/toggleOffline?offlineMessage=\" + urlquote(message) try: html_result = self.jenkins.requester.get_and_confirm_status(url) except PostRequired: html_result", "class \"\"\" from jenkinsapi.jenkinsbase import JenkinsBase from jenkinsapi.custom_exceptions import PostRequired", "\"connection: offline = %s, \" \"temporarilyOffline = %s\" % (self._data['offline'],", "and not self._data['temporarilyOffline']: raise AssertionError(\"Node is offline and not marked", "nodename, jenkins_obj): \"\"\" Init a node object by providing all", "set_offline(self, message=\"requested from jenkinsapi\"): \"\"\" Set node offline. If after", "JenkinsBase from jenkinsapi.custom_exceptions import PostRequired import logging try: from urllib", "self.name def is_online(self): return not self.poll(tree='offline')['offline'] def is_temporarily_offline(self): return self.poll(tree='temporarilyOffline')['temporarilyOffline']", "'offline' but 'temporarilyOffline' is not set - client has connection", "baseurl, nodename, jenkins_obj): \"\"\" Init a node object by providing", "+ \\ \"/toggleOffline?offlineMessage=\" + urlquote(message) try: html_result = self.jenkins.requester.get_and_confirm_status(url) except", "changed raise AssertionError. : param message: optional string explain why", "jenkinsapi.jenkinsbase import JenkinsBase from jenkinsapi.custom_exceptions import PostRequired import logging try:", "as urlquote log = logging.getLogger(__name__) class Node(JenkinsBase): \"\"\" Class to", "pointers to it :param baseurl: basic url for querying information", "offline, \" \"check client connection:\" \" offline = %s, \"", "if client is connected if self._data['offline'] and not self._data['temporarilyOffline']: raise", "has not been changed raise AssertionError. : param message: optional", "\" \"connection: offline = %s, \" \"temporarilyOffline = %s\" %", "class Node(JenkinsBase): \"\"\" Class to hold information on nodes that", "Init a node object by providing all relevant pointers to", "temporarilyOffline = %s\" % (data['offline'], data['temporarilyOffline'])) def toggle_temporarily_offline(self, message=\"requested from", "= self.jenkins.requester.post_and_confirm_status( url, data={}) self.poll() log.debug(html_result) state = self.is_temporarily_offline() if", "url, data={}) self.poll() log.debug(html_result) state = self.is_temporarily_offline() if initial_state ==", "urlquote except ImportError: # Python3 from urllib.parse import quote as", "log = logging.getLogger(__name__) class Node(JenkinsBase): \"\"\" Class to hold information", "this node offline \"\"\" initial_state = self.is_temporarily_offline() url = self.baseurl", "PostRequired: html_result = self.jenkins.requester.post_and_confirm_status( url, data={}) self.poll() log.debug(html_result) state =", "\"temporarilyOffline = %s\" % (self._data['offline'], self._data['temporarilyOffline'])) def set_offline(self, message=\"requested from", "\"\"\" def __init__(self, baseurl, nodename, jenkins_obj): \"\"\" Init a node" ]
[ "= self.createTimeRange(0, 240) self._ddhhmmTime = self.getCurrentTime( argDict, \"%d%H%M\", shiftToLocal=0, stripLeading=0)", "the public domain, furnished \"as is\", without technical # support,", "self.getCurrentTime( argDict, \"%l%M %p %Z %a %b %e %Y\", stripLeading=1)", "*|\\n\\n\" ## # bullets = bullets + \"\\n\" ## return", "xrange(len(buf)): if x == 0: continue #headlines and text before", "fcst + \"Default overview section\\n\" return fcst def _preProcessArea(self, fcst,", "to 1 to automatically write product to file # Area", ">= 2: time = bullets[1] else: time = None if", "impact = None if len(regText) == 0: regText = None", "\"|* \" + textToUse + \" *|\" # add bullet", "### offices that use \"-20 degrees\" in the text. ###", "accurateCities = self._accurateCities) fcst = fcst + areaHeader return fcst", "Tags fcst = re.sub(r'\\nPRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\s*&&\\n', \\ \"\", fcst) fcst =", "# of bullets and text. The multipleRecords is set to", "feed term. # of the text regText = \"\" #regular", "always removed. # Framing codes are added if specified. #", "/ or modified by Raytheon Company, # pursuant to Contract", "if eh['sig'] not in foundSig: foundSig.append(eh['sig']) includeFrameCodes = 0 includeText", "# 05/07/2015 4027 randerso Migrated A1 OB9.16 code to A2", "bulletFlag: print \"appending to bottom list of bullets!\" segmentTextSplit =", "pairs hazardBodyPhrase = re.sub(r'&&\\s*PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\n', \\ \"\", hazardBodyPhrase) return hazardBodyPhrase", "hazardList: if (each.has_key('prevOverviewText') and each.has_key('pil') and each.has_key('endTime') and each.has_key('act')): if", "else: segmentText = segmentText # # If segment passes the", "fraction = 0 fractionOne = 1.0/float(len(segmentList)) percent = 50.0 self.setProgressPercentage(50)", "endTimePhrase = self.hazardTimePhrases(eachHazard, argDict) hazName = self.hazardName(eachHazard['hdln'], argDict, False) hazardBodyPhrase", "ent in foundCTAs: #only process CTAs that are vtec phen/sig", "overridden. ## #------------------------------------------------------------------------- # Description: This product is a template", "breakStr=[\" \", \"-\", \"...\"]) return processedText, foundCTAs def decodeBulletedText(self, prevText):", "Company, # pursuant to Contract DG133W-05-CQ-1067 with the US Government.", "def _determineTimeRanges(self, argDict): # Set up the time range for", "bulletFlag = 0 # # Now if there is a", "foundCTAs: #only process CTAs that are vtec phen/sig based if", "== 2: phensig = (ent[0:2], ent[3]) #phen.sig if phensig in", "phraseCount == 1: phraseCount = 2 if hdln != lastHdln:", "set of entries in # a segment, thus double events", "r\"\\nPRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\n\") fcst = string.replace(fcst, \"\\n \",\"\\n\") fcst = string.replace(fcst,", "action # newList = [] canList = [] expList =", "is for upgrade hazards # for eachHazard in upgList: if", "(inc capture text, inc framing codes, skip CTAs, forceCTAList) #", "# # If an overview exists for this product, insert", "overview section \"bulletProd\": 0, # do not default to bullets", "section\\n\" return fcst def _preProcessArea(self, fcst, segmentAreas, expireTime, argDict): #", "Mail Stop B8 # Omaha, NE 68106 # 402.291.0100 #", "\"&&\", \"\\n&&\\n\") # Prevent empty Call to Action Tags fcst", "\"fullStationID\": \"<fullStationID>\", # full station identifier (4letter) \"wmoID\": \"<wmoID>\", #", "+ 1 end = len(self._bulletOrder()) bulletFlag = 1 for i", "types, copy, re import CallToActions import AbsTime class TextProduct(TextRules.TextRules, SampleAnalysis.SampleAnalysis,", "= self._bulletOrder() staticBulletOrder = self._bulletOrder() for bullet in staticBulletOrder: print", "not in foundSig: foundSig.append(eh['sig']) includeFrameCodes = 0 includeText = 1", "= 0 foundCANS = 0 foundSig = [] for eh", "needed. for bullet in removeBulletList: if re.search(\"\\* \"+ bullet +", "hazardBodyPhrase + '\\n\\n' ctas = [] for (phen,sig) in forceCTAList:", "= 0 lastHdln = None for eachHazard in newList: hdln", "= self._wmoID + \" \" + self._fullStationID + \" \"", "1 if phraseCount == 0: phraseCount = 1 if eachHazard['phen']", "+ processedText + \"*|\\n\" # Wrap processedText = self.endline(processedText, linelength=self._lineLength,", "+ self._fullStationID + \" \" + \\ self._ddhhmmTime + \"\\n\"", "we only # want text from the last in the", "\"FOUS45\" # pil Product pil, such as \"SFTBOS\" # areaName", "Contractor Address: 6825 Pine Street, Suite 340 # Mail Stop", "eachHazard['act'] == 'CAN': hazardBodyPhrase = hazardBodyPhrase + \\ \"\\n\\n|* Wrap-up", "+ '\\n\\n' ctas = [] for (phen,sig) in forceCTAList: hazardPhenSig", "is for statement hazards # for eachHazard in statementList: hazardBodyPhrase", "continue #no defined headline, skip phrase hazName = self.hazardName(eachHazard['hdln'], argDict,", "the text ### bullets = [] bullets = string.split(prevText, '\\n\\n')", "text # print \"hazardBodyText info: incTextFlag: \",incTextFlag if incTextFlag: print", "+ \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) segmentText = string.join(segmentTextSplit,\"* \" + bullet.upper()", "in keepBulletList and canBullet not in removeBulletList: removeBulletList.append(canBullet) print \"hazardBodyText", "easPhrase Optional EAS phrase to be include in product header", "effect\" + endTimePhrase + \". \" elif phraseCount == 1:", "\\ \" has also been issued\" + endTimePhrase + \".", "TextProduct(TextRules.TextRules, SampleAnalysis.SampleAnalysis, CallToActions.CallToActions): Definition = { \"type\": \"smart\", \"displayName\": None,", "in ['UPG']: upgList.append(eachHazard) else: conList.append(eachHazard) # # Now, go through", "is restricted by U.S. law. Dissemination # to non-U.S. persons", "hazardTimePhrases(self, hazard, argDict, prefixSpace=True): timeWords = self.getTimingPhrase(hazard, argDict['creationTime']) if prefixSpace", "product endTimePhrase = self.hazardTimePhrases(eachHazard, argDict) hazName = self.hazardName(eachHazard['hdln'], argDict, False)", "then # we may have to add bullets. # if", "= string.join(segmentTextSplit,\"\") if removeBulletList != []: segmentText = \"|*\\n\" +", "fractionOne fcst = self._postProcessProduct(fcst, argDict) return fcst def _getVariables(self, argDict):", "No attribution for this case if it is a bullet", "be included in the area header \"easPhrase\" :\"\", # Optional", "includeOverviewHeadline If 1, the overview header is templated # includeOverview", "to non-U.S. persons whether in the United States or abroad", "= None #no regular text after bullets return (hazard, time,", "\"type\": \"smart\", \"displayName\": None, # Source database for product. Can", "print 20* \"*\" + (eachHazard['phen']) ## bList = newBulletList.split(\",\") ##", "\"Always\": textToUse = \"|* \" + textToUse + \" *|\"", "after # product creation. # # lineLength max length of", "+ \"Default overview section\\n\" return fcst def _preProcessArea(self, fcst, segmentAreas,", "None: return prevText ### ### split the text ### bullets", "\"KSLC\". # wmoID WMO ID code for product header, such", "self._getVariables(argDict) if error is not None: return error # Get", "(don't include text) if foundCANS and not foundACTS: if 'S'", "autoSend is 1. # This value is also used for", "bullets!\" segmentTextSplit = re.split(\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", segmentText, flags=re.IGNORECASE) segmentText = \"\\n\"", "False, skipCTAs = skipCTAs) # # Check that the segment", "eachHazard['act'] not in [\"CAN\",\"EXP\"]: saveBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for saveBullet in", "for it in items: if type(it) == types.TupleType: it =", "+ \". \" else: hazardBodyPhrase += \" has issued \"", "\"defaultEditAreas\" : \"EditAreas_PublicZones_<site>_<MultiPil>\", # product identifiers \"productName\": \"Generic Hazard Product\",", "d[k] items = func() for it in items: if type(it)", "it needs to be a Statement (sig=\"S\") cans = ['CAN','UPG','EXP']", "# # DEFINITION SECTION # # Required Configuration Items: #", "checks, put in framing codes else: hazardBodyPhrase = hazardBodyPhrase +", "is not intended to be overridden. ## #------------------------------------------------------------------------- # Description:", "+ s.upper() s = eas + productName + \"\\n\" +\\", "hazName + \\ \" has been cancelled. \" # #", "it print outText return outText # The _hazardTimePhrases method is", "eachHazard in hazardList: if eachHazard['sig'] == each: if eachHazard not", "text, defined by the double line feed term. # of", "= bullets[1] else: time = None if len(bullets) >= 3:", "now in effect\" + endTimePhrase + \". \" # #", "eachHazard in sortedHazardList: if eachHazard.has_key('prevText'): prevText = eachHazard['prevText'] if eachHazard['pil']", "DEFINITION SECTION # # Required Configuration Items: # # displayName", "\"Georgia\" -- optional \"wfoCityState\": \"<wfoCityState>\", # Location of WFO -", "Full station identifier, 4 letter, such as \"KSLC\". # wmoID", "self._expireTime, argDict) fcst = self._makeProduct(fcst, segmentAreas, argDict) fcst = self._postProcessArea(fcst,", "# product to the AWIPS WAN. The product is not", "#eliminate following lines break regText = (\"\\n\").join(lines) # now clean", "code # is more complicated for ent in foundCTAs: #only", "# fullStationID Full station identifier, 4 letter, such as \"KSLC\".", "# we may have to add bullets. # if incTextFlag", "# purgeTime Maximum number of hours past issuance time for", "it, include text) elif foundCANS and foundACTS: includeFrameCodes = 1", "## bList = newBulletList.split(\",\") ## ## ### initialize the bullet", "## #------------------------------------------------------------------------- # Description: This product is a template for", "if eachHazard.has_key('prevText'): prevText = eachHazard['prevText'] if eachHazard['pil'] == 'MWS': startPara", "1 #capture text, but frame it else: includeText = 0", "# This is for upgrade hazards # for eachHazard in", "#set to 1 to automatically store product in textDB \"autoWrite\":", "= 1.0/float(len(segmentList)) percent = 50.0 self.setProgressPercentage(50) for segmentAreas in segmentList:", "= self.getTimingPhrase(eachHazard, expTimeCurrent) hazardBodyPhrase = hazardBodyPhrase + \"The \" +", "if eh['act'] in ['NEW'] and len(eh['hdln']): forceCTAList.append((eh['phen'], eh['sig'])) return (includeText,", "the lines overview = overviewHeadline + overviewBody return overview else:", "End code for DR 21310 # # This adds the", "# # This adds the call to action statements. This", "= 0 if eachHazard['act'] == 'CAN': hazardBodyPhrase = hazardBodyPhrase +", "endTimePhrase = self.hazardTimePhrases(eachHazard, argDict) hazNameA = self.hazardName(eachHazard['hdln'], argDict, True) hazNameACap", "hazNameA + forPhrase + \" has been issued. This \"", "header \"cityLocation\": \"CityLocation\", # City lat/lon dictionary to use \"cityDescriptor\":\"Including", "product. # Product is saved if autoWrite is 1. #", "#set to 1 to automatically write product to file #", "key + \"= self._definition[key]\" # Get VariableList varDict = argDict[\"varDict\"]", "licensing information. ## # ---------------------------------------------------------------------------- # # SOFTWARE HISTORY #", "use a bullet format #------------------------------------------------------------------------- # Weather Elements Needed: #", "+ \"\\n\\n\" else: ### not a bullet, CTA text outText", "eachHazard['act'] in ['CAN']: canList.append(eachHazard) elif eachHazard['act'] in ['EXP']: expList.append(eachHazard) elif", "= 1 hazardBodyPhrase = hazardBodyPhrase + \\ \" has cancelled", "\", newBullets print \"segment text is: \", segmentText for bullet", "software product contains export-restricted data whose # export/transfer/disclosure is restricted", "if autoWrite is 1. # debug If on, debug_print statements", "not None: return error # Get the segments hazardsC =", "else: hazardBodyPhrase += \"In addition, \" + \\ hazNameA +", "and len(capText): textToUse = capText[0].upper()+capText[1:] if frameit == \"Always\": textToUse", "in sortedHazardList: if eachHazard['act'] in [\"CAN\",\"EXP\"]: canBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for", "grids # citiesPhrase \"Including the cities of\" phrase used when", "_indentBulletText(self, prevText): print prevText ### if previous text is empty,", "Generate the product for each segment in the segmentList fraction", "\"= varDict[key]\" self._language = argDict[\"language\"] # Set up information for", "considered, the 'hdln' value must be # present in the", "continue #headlines and text before the bullets bullets.append(buf[x]) # find", "= fcst + \"Default overview section\\n\" return fcst def _preProcessArea(self,", "# # First, sort the hazards for this segment by", "# to non-U.S. persons whether in the United States or", "City,state that the WFO is located in, such as \"Buffalo", "the default GUI entry for # storage. # awipsWANPil Defines", "prevText is None: return prevText ### ### split the text", "canList: if len(eachHazard['hdln']) == 0: continue #no defined headline, skip", "\" \" + \\ self._ddhhmmTime + \"\\n\" + self._pil +", "short or blank # if len(segmentText) < 6: incTextFlag =", "list, or it needs to be a Statement (sig=\"S\") cans", "to be a Statement (sig=\"S\") cans = ['CAN','UPG','EXP'] acts =", "# for eachHazard in statementList: hazardBodyPhrase = \"...|* Add statement", "be defined or the GFE zone combiner # database Source", "TextRules import SampleAnalysis import time, string, types, copy, re import", "\"EditAreas_PublicZones_<site>_<MultiPil>\", # product identifiers \"productName\": \"Generic Hazard Product\", # product", "set was captured/decoded. # (hazard, time, basis, impact, afterText, multipleRecords)", "processedText + eachPara + '\\n\\n' #keep track of remaining CTAs", "if newBullet not in newBulletList: newBulletList.append(newBullet) print \"my bullets are:", "paragraphs = self.convertSingleParas(text) for x in xrange(len(paragraphs)): paragraphs[x] = string.replace(paragraphs[x],'", "=\"\" if nwsIntroUsed == 0: hazardBodyPhrase = \"The National Weather", "of map background for creating Combinations # Can be: #", "argDict[\"language\"] # Set up information for Hazards product self._hazards =", "hazName + \". \" else: hazardBodyPhrase = hazardBodyPhrase + \"The", "case if it is a bullet product endTimePhrase = self.hazardTimePhrases(eachHazard,", "formats of these lists are different, thus this code #", "self.hazardTimePhrases(eachHazard, argDict) hazNameA = self.hazardName(eachHazard['hdln'], argDict, True) hazNameACap = self.sentence(hazNameA,", "and previous segment Text, then # we may have to", "fcst = self._indentBulletText(fcst) # # Clean up multiple line feeds", "incTextFlag, incFramingCodes, skipCTAs, forceCTAList = \\ self.useCaptureText(sortedHazardList) # # #", "removeLF = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) bullet = removeLF.sub(r'\\1 \\2',b) ### indent", "elif eachHazard['act'] in ['CAN']: canList.append(eachHazard) elif eachHazard['act'] in ['EXP']: expList.append(eachHazard)", "the bullets and format the output ## for b in", "to 1, then the product will be automatically # written", "= 0 fractionOne = 1.0/float(len(segmentList)) percent = 50.0 self.setProgressPercentage(50) for", "+ bullet.upper() + \\ \"...|* Enter bullet text *|\\n\\n* \"", "diction newBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for newBullet in newBullets: if newBullet", "AWIPS text database. The product is not # automatically stored", "startPara, addFramingCodes = False, skipCTAs = skipCTAs) tester = segmentText[0]", "headlines = self.generateProduct(\"Hazards\", argDict, area = editArea, areaLabel=areaLabel, timeRange =", "= \"\" for eachHazard in sortedHazardList: ### get the default", "canBullet not in removeBulletList: removeBulletList.append(canBullet) print \"hazardBodyText info: keepBulletList: \",keepBulletList", "saved if autoWrite is 1. # debug If on, debug_print", "Generate Text Phrases for a list of edit areas #", "sets of bullets. In this case # only the 1st", "del forceCTAList[forceCTAList.index(phensig)] hazardBodyPhrase = hazardBodyPhrase + '\\n\\n' ctas = []", "#set to 1 to automatically transmit product \"autoSendAddress\": \"000\", #transmission", "for it in ctas: if type(it) == types.TupleType: it =", "to \"The National Weather Service\". Note # that this only", "(4letter) \"wmoID\": \"<wmoID>\", # WMO ID \"pil\": \"<pil>\", # Product", "# Added for DR 21309 def _bulletOrder(self): return [] ##", "(BYZ) # # Now if there is a new hazard", "entry for storage. # autoSend If set to 1, then", "and Local file names and Locations: # GenericHazards #------------------------------------------------------------------------- #", "in \" +\\ self._wfoCity nwsIntroUsed = 1 hazardBodyPhrase = hazardBodyPhrase", "canBullets: if canBullet not in keepBulletList and canBullet not in", "text goes here *|.\\n\" elif eachHazard['act'] == 'EXP': hazardBodyPhrase =", "doesn't pass the checks, put in framing codes else: hazardBodyPhrase", "\"\\n\\n\" fcst = fcst + s fcst = fcst +", "if skipCTAs and len(found): pass else: processedText = processedText +", "+ string.join(segmentTextSplit,\"* \" + bullet.upper() + \\ \"...|* Enter bullet", "to remove. removeBulletList = [] for eachHazard in sortedHazardList: if", "= argDict['creationTime'] timeWords = self.getTimingPhrase(eachHazard, expTimeCurrent) hazardBodyPhrase = hazardBodyPhrase +", "bullets and text. The multipleRecords is set to 1 in", "want text from the last in the series of bullets", "for Hazards product self._hazards = argDict['hazards'] self._combinations = argDict[\"combinations\"] return", "Enter bullet text *|\\n\\n\" ## # bullets = bullets +", "are vtec phen/sig based if ent.find('.') == 2: phensig =", "hazardBodyPhrase = hazardBodyPhrase + c + '\\n\\n' hazardBodyPhrase = hazardBodyPhrase", "hazardSamplingThreshold Defines the percentage coverage or number of # grid", "r\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", \\ r\"\\nPRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\n\") fcst = string.replace(fcst, \"\\n \",\"\\n\")", "we found a bullet if re.match(\"\\*\", b): ### remove line", "if len(bullets) > x+2: #more bullets are present multRecords =", "which #nothing is wrapped in framing codes, \"Always\" in which", "line feeds in the CAP tags to keep separate from", "segmentTextSplit = re.split(\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", segmentText, flags=re.IGNORECASE) segmentText = \"\\n\" +", "the cities of\" phrase used when including # cities #", "National Weather Service in \" + self._wfoCity nwsIntroUsed = 1", "text, wraps it preserving blank lines, # then returns the", "0 foundCANS = 0 foundSig = [] for eh in", "for statement hazards # for eachHazard in statementList: hazardBodyPhrase =", "as to its usefulness for # any purpose. #------------------------------------------------------------------------- #", "Make sure there is only one CAP tag pairs hazardBodyPhrase", "upgList.append(eachHazard) else: conList.append(eachHazard) # # Now, go through each list", "= ctao.genericCTAs() for it in ctas: if type(it) == types.TupleType:", "exists # foundCTAs = [] for eachHazard in sortedHazardList: if", "self._makeProduct(fcst, segmentAreas, argDict) fcst = self._postProcessArea(fcst, segmentAreas, argDict) fraction =", "# This software is in the public domain, furnished \"as", "21309 def _bulletOrder(self): return [] ## Replaced by 21309 code", "== each: if eachHazard not in sortedHazardList: sortedHazardList.append(eachHazard) # #", "### ### split the text ### bullets = [] bullets", "in sortedHazardList: ### get the default bullets for all hazards", "finished product. # Product is saved if autoWrite is 1.", "self._bulletOrder()[i] + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) segmentText = string.join(segmentTextSplit,\"* \" +", "func = d[k] items = func() for it in items:", "fcst, segmentAreas, expireTime, argDict): # This is the header for", "%e %Y\", stripLeading=1) return None def _preProcessProduct(self, fcst, argDict): #", "productName defines name of product e.g. \"Zone Forecast Product\" #", "letter, such as \"KSLC\". # wmoID WMO ID code for", "Items # # mapNameForCombinations Name of the map background that", "def _bulletOrder(self): return [] ## Replaced by 21309 code ##", "fcst = string.replace(fcst, \\ r\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", \\ r\"\\nPRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\n\") fcst", "or blank # if len(segmentText) < 6: incTextFlag = 0", "sortedHazardList: if not eachHazard.has_key('prevText'): newBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") print \"newBullets =", "Omaha, NE 68106 # 402.291.0100 # # See the AWIPS", "segment # # remove items from forceCTAList if they exist", "the # event that there are multiple sets of bullets.", "phraseCount = 0 for eachHazard in expList: if len(eachHazard['hdln']) ==", "to the \"output\" named disk file after # product creation.", "finish progress meter self.setProgressPercentage(100) self.progressMessage(0, 100, self._displayName + \" Complete\")", "In this case # only the 1st set was captured/decoded.", "def __init__(self): TextRules.TextRules.__init__(self) SampleAnalysis.SampleAnalysis.__init__(self) self.__overviewText = \"\" self.__procCTA = None", "storage. # awipsWANPil Defines the awips product identifier # (e.g.,", "automatically # stored into the text database using the \"textdbPil\"", "this case if it is a bullet product hazName =", "been issued\" + endTimePhrase + \". \" else: if eachHazard['phen']", "paragraphs = ptext.split('\\n') return paragraphs def ctasFound(self, text): #returns types", "for product. Can be \"Official\", # \"Fcst\" or \"ISC\" #", "of paragraphs based on the input text. lf = re.compile(r'(s*[^\\n])\\n([^\\n])',", "= hdln # # This is for the can hazards", "# The method hazardBodyText creates an attribution phrase # def", "= [] for eachHazard in sortedHazardList: if eachHazard['act'] in [\"CAN\",\"EXP\"]:", "is more complicated for ent in foundCTAs: #only process CTAs", "next bullet or up to \"The National Weather Service\". Note", "for returning the values if len(bullets) >= 1: hazard =", "return self.indentText(textToUse, indentFirstString = '', indentNextString = ' ', maxWidth=self._lineLength,", "the exp hazards # phraseCount = 0 for eachHazard in", "== 0: phraseCount = 1 if eachHazard['phen'] in ['HU', 'TR',", "elif eachHazard['act'] in ['EXP']: expList.append(eachHazard) elif eachHazard['act'] in ['EXT']: extList.append(eachHazard)", "\" is in effect\" + endTimePhrase + \". \" lastHdln", "text. ### outText = outText + bullet + \"\\n\\n\" else:", "if eachHazard['sig'] in ['S']and eachHazard['phen'] in ['CF', 'LS']: statementList.append(eachHazard) elif", "National Weather Service in \" +\\ self._wfoCity nwsIntroUsed = 1", "Needed: None #------------------------------------------------------------------------- # Associated Utilities Files e.g. Combinations file:", "algorithms in DiscretePhrases. # def hazardTimePhrases(self, hazard, argDict, prefixSpace=True): timeWords", "or eh['sig'] == 'S'): foundCANS = 1 if eh['sig'] not", "if eachHazard not in sortedHazardList: sortedHazardList.append(eachHazard) # # Next, break", "re.search(\"\\* \"+ bullet + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is not None:", "= hazardBodyPhrase + \\ \"\\n\\n|* Statement text goes here *|.\\n\\n\"", "+ \\ self._ddhhmmTime + \"\\n\" + self._pil + \"\\n\\n\" fcst", "hazardBodyPhrase = hazardBodyPhrase + \\ 'PRECAUTIONARY/PREPAREDNESS ACTIONS...\\n\\n' for c in", "= processedText + eachPara + '\\n\\n' #keep track of remaining", "# expire time. # includeCities If 1, cities will be", "hazName + \\ \" is now in effect\" + endTimePhrase", "'MWS': startPara = 0 else: startPara = 1 segmentText, foundCTAs", "variable. If capText is None or 0 length, then #the", "## Can be: ## EditAreas_PublicZones_BOU ## EditAreas_FireWx_BOU ## EditAreas_FIPS_BOU ##", "= AbsTime.AbsTime(argDict['creationTime']) self._currentTime = argDict['creationTime'] self._expireTime = self._issueTime + self._purgeTime*3600", "\"pil\": \"<pil>\", # Product pil \"areaName\": \"\", # Name of", "database for product. Can be \"Official\", \"Fcst\" or \"ISC\" \"database\":", "they exist in foundCTAs. Note # that the formats of", "Zone names will be included in the area header \"easPhrase\"", "else: startPara = 2 segmentText, foundCTAs = self.cleanCapturedText(prevText, startPara, addFramingCodes", "if bullet not in newBulletList: bulletOrder.remove(bullet) print \"reordered bullets are:", "has been issued. This \" + hazName + \\ \"", "(\"Master Rights File.pdf\") for # further licensing information. ## #", "1 if eh['act'] in cans and (len(eh['hdln']) or eh['sig'] ==", "frameit=\"Never\"): #returns a properly formatted bulleted text based on #the", "is for the new hazards # phraseCount = 0 lastHdln", "returns # time phrase wording consistent with that generated by", "text or None, returns the # regular text after the", "Dissemination # to non-U.S. persons whether in the United States", "hazardList, argDict): bulletProd = self._bulletProd hazardBodyPhrase = '' # #", "active entries, captured text is used, but still # need", "Optional EAS phrase to be include in product header #", "\"Always\" or frameit == \"DefaultOnly\": textToUse = \"|* \" +", "bulletOrder = self._bulletOrder() staticBulletOrder = self._bulletOrder() for bullet in staticBulletOrder:", "defines edit areas, default is Combinations # # purgeTime Maximum", "on the AWIPS WAN to the \"autoSendAddress\" with # the", "+ self._wfoCityState + \\ \"\\n\" + issuedByString + self._timeLabel +", "saveBullet not in keepBulletList: keepBulletList.append(saveBullet) # Now determine which bullets", "write product to file # Area Dictionary -- Descriptive information", "\" + self._fullStationID + \" \" + \\ self._ddhhmmTime +", "dictionary and split the bullets ## bDict = self._bulletDict() ##", "can be \"Never\", in which #nothing is wrapped in framing", "that generated by the headline # algorithms in DiscretePhrases. #", "s fcst = fcst + \"Default overview section\\n\" return fcst", "ctaParas: self.__procCTA.append((k,string.replace(cta,' ',''))) d = ctao.ctaPilDict() for k in d.keys():", "If segment passes the above checks, add the text #", "if the segment is 'NEW' or if the previous text", "fcst = re.sub(r'\\nPRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\s*&&\\n', \\ \"\", fcst) fcst = self._indentBulletText(fcst)", "\" + \\ hazNameA + \" has been issued.\" else:", "# Now if there is a can/exp hazard and previous", "= self.cleanCapturedText( overview, 0) break def useCaptureText(self, hazardList): #Based on", "argDict): bulletProd = self._bulletProd hazardBodyPhrase = '' # # First,", "ptext = ptext.replace('\\n\\n', '\\n') paragraphs = ptext.split('\\n') return paragraphs def", "bList: ## bullets = bullets + \"* \" + b", "= [] for eh in hazardList: if eh['act'] in acts", "overview header \"includeOverview\": 1, #include overview section \"bulletProd\": 0, #", "= \", newBullets print \"segment text is: \", segmentText for", "in textDB \"autoWrite\": 0, #set to 1 to automatically write", "from the last in the series of bullets to the", "= CallToActions.CallToActions() d = ctao.ctaDict() for k in d.keys(): func", "= None def generateForecast(self, argDict): # Generate Text Phrases for", "bullets + \"* \" + b.upper() + \"...|* Enter bullet", "2 if hdln != lastHdln: if eachHazard['phen'] in ['HU', 'TR',", "for x in xrange(len(buf)): if x == 0: continue #headlines", "hazardBodyPhrase + \"The \" + hazName + \\ \" has", "# fixMultiLF = re.compile(r'(\\n\\n)\\n*', re.DOTALL) fcst = fixMultiLF.sub(r'\\1', fcst) #", "= editArea, areaLabel=areaLabel, timeRange = self._timeRange) fcst = fcst +", "# Generate Text Phrases for a list of edit areas", "0: eas = self._easPhrase + '\\n' else: eas = ''", "text for f in found: if f not in foundCTAs:", "the default if len(self.__overviewText) == 0: if self._includeOverviewHeadline: overviewHeadline =", "### bullets = [] bullets = string.split(prevText, '\\n\\n') if len(bullets)", "' ', maxWidth=self._lineLength, breakStrings=[\" \", \"-\", \"...\"]) def convertSingleParas(self, text):", "# Product is saved if autoWrite is 1. # debug", "block of text, wraps it preserving blank lines, # then", "#make list of call to actions (type, cta text) if", "is saved if autoWrite is 1. # debug If on,", "header, such as \"Western New York\" # wfoCityState City,state that", "do not default to bullets \"hazardSamplingThreshold\": (10, None), #(%cov, #points)", "Elements Needed: # Hazards #------------------------------------------------------------------------- # Edit Areas Needed: None", "# Name of map background for creating Combinations # Can", "\" not in segmentText\" start = self._bulletOrder().index(bullet) + 1 end", "if len(segmentList) == 0: return \"No hazards to report\" #", "if eachPara.find('...') == 0: pass #ignore headlines paraCount = paraCount", "bullets return (hazard, time, basis, impact, regText, multRecords) def substituteBulletedText(self,", "to add bullets. # if incTextFlag and bulletProd: for eachHazard", "for this product, insert it # overview = self.finalOverviewText() overviewSearch", "#no defined headline, skip phrase endTimePhrase = self.hazardTimePhrases(eachHazard, argDict) hazNameA", "print outText return outText # The _hazardTimePhrases method is passed", "the \"autoSendAddress\" with # the \"awipsWANPil after product creation. #", "database. The product is not # automatically stored unless autoStore", "+ hazName + forPhrase + \\ \" is in effect\"", "\\ \" is now in effect\" + endTimePhrase + \".", "for bullet in removeBulletList: if re.search(\"\\* \"+ bullet + \"\\.\\.\\.\",", "self.ctasFound(eachPara) #get list of ctas found if skipCTAs and len(found):", "is not None and len(capText): textToUse = capText[0].upper()+capText[1:] if frameit", "defaultEditAreas defines edit areas, default is Combinations # # purgeTime", "wfoCityState City,state that the WFO is located in, such as", "'\\n') paragraphs = ptext.split('\\n') return paragraphs def ctasFound(self, text): #returns", "area header \"accurateCities\": 0, # Include all cities in area", "basis, impact, afterText, multipleRecords) if prevText is None: return (None,", "\"cityLocation\": \"CityLocation\", # City lat/lon dictionary to use \"cityDescriptor\":\"Including the", "\"\\n\\n\" else: ### not a bullet, CTA text outText =", "in segmentText\" start = self._bulletOrder().index(bullet) + 1 end = len(self._bulletOrder())", "is the pil (e.g., ZFP), #phen/sig (e.g., DU.Y), or GENERIC.", "and Locations: # GenericHazards #------------------------------------------------------------------------- # Customization Points: # #", "Local file names and Locations: # GenericHazards #------------------------------------------------------------------------- # Customization", "for eachHazard in sortedHazardList: if eachHazard['act'] not in [\"CAN\",\"EXP\"]: saveBullets", "'TY']: hazardBodyPhrase = hazardBodyPhrase + \" has issued \" +", "!= lastHdln: if eachHazard['phen'] in ['HU', 'TR', 'TY']: hazardBodyPhrase =", "CTAs fcst = string.replace(fcst, \\ r\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", \\ r\"\\nPRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\n\")", "== 0: return \"No hazards to report\" # Determine time", "Product\", # product name \"fullStationID\": \"<fullStationID>\", # full station identifier", "any, use the default if len(self.__overviewText) == 0: if self._includeOverviewHeadline:", "addFramingCodes = False, skipCTAs = False): # # This method", "self.generateProduct(\"Hazards\", argDict, area = editArea, areaLabel=areaLabel, timeRange = self._timeRange) fcst", "# # This method finds an overview in the previous", "# # Now, go through each list and build the", "needs to be a Statement (sig=\"S\") cans = ['CAN','UPG','EXP'] acts", "for x in xrange(len(lines)): if lines[x].find('The National Weather Service') ==", "multRecords) def substituteBulletedText(self, capText, defaultText, frameit=\"Never\"): #returns a properly formatted", "product contains export-restricted data whose # export/transfer/disclosure is restricted by", "self.__procCTA = None def generateForecast(self, argDict): # Generate Text Phrases", "\" is no longer in effect. \" # # This", "= 1 if eh['act'] in cans and (len(eh['hdln']) or eh['sig']", "captured text or not # incTextFlag, incFramingCodes, skipCTAs, forceCTAList =", "need to get headlines for the first edit area #", "= 0 for eachHazard in expList: if len(eachHazard['hdln']) == 0:", "all bullets bulletOrder = self._bulletOrder() staticBulletOrder = self._bulletOrder() for bullet", "feeds # fixMultiLF = re.compile(r'(\\n\\n)\\n*', re.DOTALL) fcst = fixMultiLF.sub(r'\\1', fcst)", "phrase hazName = self.hazardName(eachHazard['hdln'], argDict, False) hazardBodyPhrase = hazardBodyPhrase +", "ACTIONS\\.\\.\\.\\n', \\ \"\", hazardBodyPhrase) return hazardBodyPhrase def finalOverviewText(self): #if didn't", "\"Official\", # Defines output location of finished product. \"outputFile\": \"{prddir}/TEXT/genHaz.txt\",", "#------------------------------------------------------------------------- # Associated Utilities Files e.g. Combinations file: # Combinations", "# Marine_Zones_BOU \"mapNameForCombinations\": \"Zones_<site>\", ## Edit Areas: Create Combinations file", "eachHazard['act'] in ['EXP']: expList.append(eachHazard) elif eachHazard['act'] in ['EXT']: extList.append(eachHazard) elif", "string.join(segmentTextSplit,\"* \" + bullet.upper() + \\ \"...|* Enter bullet text", "\".|*Overview (must edit)*|.\\n\\n\" else: overviewBody = \"\" #assemble the lines", "only correctly handles the 1st set of entries in #", "# This is for statement hazards # for eachHazard in", "in removeBulletList: if re.search(\"\\* \"+ bullet + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE)", "hazardPhenSig = phen + \".\" + sig cta = self.defaultCTA(hazardPhenSig)", "') if len(buf) <= 1: return (None, None, None, None,", "statement headline *|...\\n\\n\" # # This adds segment text #", "# This adds the call to action statements. This is", "None, None) multRecords = 0 #indicator of multiple sets of", "properly formatted bulleted text based on #the capText variable. If", "Combinations file #------------------------------------------------------------------------- # Component Products: # Hazards #------------------------------------------------------------------------- #", "return None def _preProcessProduct(self, fcst, argDict): # Product header if", "comparison to be case-insensitive just in case # the site", "overview = overviewHeadline + overviewBody return overview else: return self.__overviewText", "ACTIONS\\.\\.\\.\\n\") fcst = string.replace(fcst, \"\\n \",\"\\n\") fcst = string.replace(fcst, \"&&\",", "# \"Fcst\" or \"ISC\" # outputFile Defines the output location", "# only the 1st set was captured/decoded. # (hazard, time,", "to AWIPS WAN. \"periodCombining\" : 0, # If 1, combine", "city,state \"textdbPil\": \"<textdbPil>\", # Product ID for storing to AWIPS", "#ignore headlines paraCount = paraCount + 1 # Add framing", "# Additional Information: #------------------------------------------------------------------------- # Example Output: #------------------------------------------------------------------------- import LogStream", "= re.split(\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", segmentTextSplit[1], 1, flags=re.IGNORECASE) if len(segmentTextSplit2) == 2:", "bulletFlag = 1 ## print \"bulletFlag is: \",bulletFlag if bulletFlag:", "<= 1: return (None, None, None, None, None, None) multRecords", "remains in effect\" + endTimePhrase + \". \" # #", "Date Ticket# Engineer Description # ------------ ---------- ----------- -------------------------- #", "is for the exp hazards # phraseCount = 0 for", "ID code for product header, such as \"FOUS45\" # pil", "addPeriod=False) hazName = self.hazardName(eachHazard['hdln'], argDict, False) if hazName in [\"Winter", "exp hazards # phraseCount = 0 for eachHazard in expList:", "'\\n\\n') if len(bullets) <= 1: return prevText ### ### process", "canBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for canBullet in canBullets: if canBullet not", "\\ \"...|* Enter bullet text *|\\n\\n* \" + self._bulletOrder()[i] +", "(\"\\n\").join(lines) # now clean up the text for x in", "= \"\" self.__procCTA = None def generateForecast(self, argDict): # Generate", "in [\"CAN\",\"EXP\"]: canBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for canBullet in canBullets: if", "removeBulletList != []: segmentText = \"|*\\n\" + segmentText + \"*|\"", "be overridden. ## #------------------------------------------------------------------------- # Description: This product is a", "it else: includeText = 0 #end of non statement #", "by Raytheon Company, # pursuant to Contract DG133W-05-CQ-1067 with the", "tuple indicating: # (inc capture text, inc framing codes, skip", "1. # debug If on, debug_print statements will appear. #", "hazards # phraseCount = 0 lastHdln = None for eachHazard", "# Headlines are always removed. # Framing codes are added", "debug If on, debug_print statements will appear. # textdbPil Defines", "# 07/13/2015 4648 randerso Fix bullets in follow up products", "def decodeBulletedText(self, prevText): # returns the bullet paragraph text or", "Maximum hours for expireTime \"includeCities\": 1 , # Cities included", "+ \" has been issued. This \" + hazName +", "if eachHazard['act'] == 'CAN': hazardBodyPhrase = hazardBodyPhrase + \\ \"\\n\\n|*", "and text. The multipleRecords is set to 1 in the", "None or 0 length, then #the default text is used.", "phraseCount = 2 if hdln != lastHdln: if eachHazard['phen'] in", "\"...\") bulletFlag = 0 if bulletFlag: print \"appending to bottom", "Wrap processedText = self.endline(processedText, linelength=self._lineLength, breakStr=[\" \", \"-\", \"...\"]) return", "sortedHazardList = [] for each in ['W', 'Y', 'A', 'O',", "[] canList = [] expList = [] extList = []", "if 'S' in foundSig and len(foundSig) == 1: #only S", "used to store the product # in the AWIPS text", "hazardBodyPhrase + \\ \"\\n\\n|* Statement text goes here *|.\\n\\n\" #", "found foundCTAs = [] # Process the paragraphs, keep only", "+ \\ \"...|* Enter bullet text *|\\n\\nPRECAUTIONARY/PREPAREDNESS ACTIONS...\") bulletFlag =", "eachHazard in sortedHazardList: if eachHazard['act'] not in [\"CAN\",\"EXP\"]: saveBullets =", "segment by importance # sortedHazardList = [] for each in", "x in xrange(len(bullets)): bullets[x] = string.replace(bullets[x],'\\n',' ') removeLF = re.compile(r'(s*[^\\n])\\n([^\\n])',", "break them into individual lists based on action # newList", "+ \"\\n\\n\" + \\ segmentText + '\\n\\n' elif bulletProd: bulletFlag", "cap) is wrapped in framing codes, or #DefaultOnly\" in which", "Source database for product. Can be \"Official\", # \"Fcst\" or", "defines name of product e.g. \"Zone Forecast Product\" # fullStationID", "was captured/decoded. # (hazard, time, basis, impact, afterText, multipleRecords) if", "# Hazards #------------------------------------------------------------------------- # Edit Areas Needed: None #------------------------------------------------------------------------- #", "hazardBodyPhrase + '&&\\n\\n' # Make sure there is only one", "Include all cities in area header \"cityLocation\": \"CityLocation\", # City", "else: ### not a bullet, CTA text outText = outText", "codes else: hazardBodyPhrase = hazardBodyPhrase + \\ \"\\n\\n|* Statement text", "fcst = string.replace(fcst, \"&&\", \"\\n&&\\n\") # Prevent empty Call to", "+ \" *|\" # add bullet codes textToUse = \"*", "False, skipCTAs = False): # # This method takes a", "a list of paragraphs based on the input text. lf", "the output string fcst = \"\" fcst = self._preProcessProduct(fcst, argDict)", "elif eachHazard['act'] == 'EXP': hazardBodyPhrase = hazardBodyPhrase + \\ \"\\n\\n|*", "#only S includeFrameCodes = 1 #capture text, but frame it", "argDict[\"language\"] = self._language # Generate Narrative Forecast for Edit Area", "None, None, None, None) multRecords = 0 #indicator of multiple", "the breakStrings line above is causing issues with ### offices", "argDict[\"combinations\"] return None def _determineTimeRanges(self, argDict): # Set up the", "= \"\" ## ## ### loop through the bullets and", "is in effect\" + endTimePhrase + \". \" else: if", "add the text # print \"hazardBodyText info: incTextFlag: \",incTextFlag if", "to get headlines for the first edit area # in", "is not None) and bulletFlag: print \"* \" + self._bulletOrder()[i]", "len(bullets) > x+2: #more bullets are present multRecords = 1", "tester == '*': startPara = 1 else: startPara = 2", "cities are determined from grids # citiesPhrase \"Including the cities", "segmentTextSplit2 = string.split(segmentTextSplit[1],\"*\",1) if len(segmentTextSplit2) == 2: segmentTextSplit[1] = \"*\"", "ctas.append(cta) if len(ctas) > 0: hazardBodyPhrase = hazardBodyPhrase + \\", "appear. # textdbPil Defines the awips product identifier # (e.g.,", "len(segmentText) < 6: incTextFlag = 0 # DR 21309 code", "= argDict['hazards'] segmentList = self.organizeHazards(hazardsC.rawAnalyzedTable()) if len(segmentList) == 0: return", "# be defined or the GFE zone combiner # database", "# # Check that the previous text exists # foundCTAs", "# Now, go through each list and build the phrases", "OB9.16 code to A2 # 06/17/2015 4027 dgilling Perform case-insensitive", "+= \"In addition, \" + \\ hazNameA + forPhrase +", "= re.sub(r'&&\\s*PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\n', \\ \"\", hazardBodyPhrase) return hazardBodyPhrase def finalOverviewText(self):", "Files e.g. Combinations file: # Combinations file #------------------------------------------------------------------------- # Component", "hazardBodyPhrase = hazardBodyPhrase + hazNameA + \\ \" remains in", "# First make list of bullets that we need to", "forceCTAList: hazardPhenSig = phen + \".\" + sig cta =", "something in CANS and something in acts (frame it, include", "processedText = processedText + eachPara + '\\n\\n' #keep track of", "is text up to # the next bullet or up", "textToUse = \"|* \" + textToUse + \" *|\" #", "len(bullets) <= 1: return prevText ### ### process the text", "= \"...|* Add statement headline *|...\\n\\n\" # # This adds", "else: if eachHazard['phen'] in ['HU', 'TR', 'TY']: hazardBodyPhrase = hazardBodyPhrase", "if eh['act'] in acts and \\ (eh['phen'], eh['sig']) not in", "## bLine = bDict.get(eachHazard['phen']) ## print 20* \"*\" + (eachHazard['phen'])", "added if specified. # paras = self.convertSingleParas(text) #single paragraphs #", "### if first character is a * we found a", "= segmentText # # If segment passes the above checks,", "hazardBodyPhrase + \"The \" + hazName + \\ \" is", "the segment is 'NEW' or if the previous text has", "+ \" has issued \" + \\ hazNameA + \".", "This \" + hazName + \\ \" is in effect\"", "authorization. # # Contractor Name: <NAME> # Contractor Address: 6825", "not in removeBulletList: removeBulletList.append(canBullet) print \"hazardBodyText info: keepBulletList: \",keepBulletList print", "to remove bullets. # if incTextFlag and bulletProd: # First", "self._bulletOrder() staticBulletOrder = self._bulletOrder() for bullet in staticBulletOrder: print \"correct", "# an export license or other authorization. # # Contractor", "Hazard Products. #------------------------------------------------------------------------- # Copying: # This software is in", "prefixSpace=True): timeWords = self.getTimingPhrase(hazard, argDict['creationTime']) if prefixSpace and len(timeWords): timeWords", "len(bullets) >= 2: time = bullets[1] else: time = None", "If 1, combine periods, if possible # automatic functions \"autoSend\":", "space return timeWords # # The method hazardBodyText creates an", "data whose # export/transfer/disclosure is restricted by U.S. law. Dissemination", "autoStore is 1. This # value is also used for", "0: continue #no defined headline, skip phrase endTimePhrase = self.hazardTimePhrases(eachHazard,", "# SOFTWARE HISTORY # # Date Ticket# Engineer Description #", "textToUse = \"|* \" + textToUse + \" *|\" else:", "= ctao.ctaDict() for k in d.keys(): func = d[k] items", "includeZoneNames = self._includeZoneNames, accurateCities = self._accurateCities) fcst = fcst +", "blank # if len(segmentText) < 6: incTextFlag = 0 #", "impact, regText, multRecords) def substituteBulletedText(self, capText, defaultText, frameit=\"Never\"): #returns a", "headlines for the first edit area # in the segment", "\"includeZoneNames\":1, # Zone names will be included in the area", "a base file that is not intended to be overridden.", "else: overviewBody = \"\" #assemble the lines overview = overviewHeadline", "of bullets to the # beginning of any next NWS", "in sortedHazardList: sortedHazardList.append(eachHazard) # # Next, break them into individual", "eachHazard['phen'] in ['HU', 'TR', 'TY']: hazardBodyPhrase = hazardBodyPhrase + hazNameACap", "#add a leading space return timeWords # # The method", "attribution for this case if it is a bullet product", "due to a CAN/EXP/UPG segment # # remove items from", "\\ (eh['phen'], eh['sig']) not in forceCTAList and \\ len(eh['hdln']): forceCTAList.append((eh['phen'],", "framing codes, \"Always\" in which the #text (default or cap)", "the values to be considered, the 'hdln' value must be", "return fcst def allowedHazards(self): return [] # Added for DR", "\"\" fcst = self._preProcessProduct(fcst, argDict) # Generate the product for", "varDict.keys(): if type(key) is types.TupleType: label, variable = key exec", "if incTextFlag: print \"hazardBodyText info: segmentText: \",segmentText hazardBodyPhrase = hazardBodyPhrase", "basis, impact, regText, multRecords) def substituteBulletedText(self, capText, defaultText, frameit=\"Never\"): #returns", "self.cleanCapturedText(prevText, startPara, addFramingCodes = False, skipCTAs = skipCTAs) # #", "express or implied, as to its usefulness for # any", "must set the following: # # productName defines name of", "text ### outText = \"\" for b in bullets: ###", "Statement (sig=\"S\") cans = ['CAN','UPG','EXP'] acts = ['NEW','EXT','EXA','EXB','CON'] foundACTS =", "# autoWrite If set to 1, then the product will", "bullet dictionary and split the bullets ## bDict = self._bulletDict()", "to 1, then the product will be automatically # sent", "elif phraseCount == 1: phraseCount = 2 if hdln !=", "bullets are: \", bulletOrder for b in bulletOrder: bullets =", "= self.defaultCTA(hazardPhenSig) if cta not in ctas: ctas.append(cta) if len(ctas)", "', maxWidth=self._lineLength, breakStrings=[\" \", \"-\", \"...\"]) def convertSingleParas(self, text): #returns", "areas # Get variables error = self._getVariables(argDict) if error is", "with the US Government. # # U.S. EXPORT CONTROLLED TECHNICAL", "is a template for creating Hazard Products. #------------------------------------------------------------------------- # Copying:", "skipCTAs) # # Check that the segment text isn't very", "is used for # creating/editing the combinations file. This must", "= key exec \"self._\" + variable + \"= varDict[key]\" self._language", "\"ISC\" # outputFile Defines the output location of the finished", "1, combine periods, if possible # automatic functions \"autoSend\": 0,", "eh['act'] in ['NEW'] and len(eh['hdln']): forceCTAList.append((eh['phen'], eh['sig'])) return (includeText, includeFrameCodes,", "ctaParas = self.convertSingleParas(it) for cta in ctaParas: self.__procCTA.append((k,string.replace(cta,' ',''))) d", "creating Combinations # Can be: # Zones_BOU # FireWxZones_BOU #", "= None for eachHazard in newList: hdln = eachHazard['hdln'] if", "includeText = 1 skipCTAs = 0 forceCTAList = [] #", "in effect\" + endTimePhrase + \". \" else: if eachHazard['phen']", "fcst, argDict): # # If an overview exists for this", "name \"fullStationID\": \"<fullStationID>\", # full station identifier (4letter) \"wmoID\": \"<wmoID>\",", "re.split(\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", segmentText, flags=re.IGNORECASE) segmentText = \"\\n\" + string.join(segmentTextSplit,\"* \"", "self.convertSingleParas(text) #single paragraphs # keep track of any call to", "bullet + \" not in segmentText\" start = self._bulletOrder().index(bullet) +", "product for each segment in the segmentList fraction = 0", "return hazardBodyPhrase def finalOverviewText(self): #if didn't calculate any, use the", "\"bulletFlag is: \",bulletFlag if bulletFlag: newBulletList = [] bullets =", "phrase used when including # cities # includeZoneNames If 1,", "be automatically # written to the \"output\" named disk file", "or GENERIC. Uses the CallToAction definitions. #convert text to single", "bullets \"hazardSamplingThreshold\": (10, None), #(%cov, #points) \"callToAction\": 1, } def", "index != -1: regText = bullets[x][index+2:] bullets[x] = bullets[x][0:index] #eliminate", "lastHdln = None for eachHazard in newList: hdln = eachHazard['hdln']", "or abroad requires # an export license or other authorization.", "in newBullets: if re.search(\"\\* \" + bullet + \"\\.\\.\\.\", segmentText,", "hazardBodyPhrase = hazardBodyPhrase + \"\\n\\n\" + \\ segmentText + '\\n\\n'", "self.__overviewText def overviewText(self, hazardList, pil): # # This method finds", "# # Contractor Name: <NAME> # Contractor Address: 6825 Pine", "accurateCities If 1, cities are determined from grids # citiesPhrase", "ones paraCount = 0 processedText = '' for eachPara in", "section for returning the values if len(bullets) >= 1: hazard", "== 1: #only S includeFrameCodes = 1 #capture text, but", "# See the AWIPS II Master Rights File (\"Master Rights", "cleanCapturedText(self, text, paragraphs, addFramingCodes = False, skipCTAs = False): #", "None if len(bullets) >= 3: basis = bullets[2] else: basis", "= self.convertSingleParas(it) for cta in ctaParas: self.__procCTA.append((\"GENERIC\", string.replace(cta,' ',''))) #compare", "have # the same headlines editArea = segmentAreas[0] areaLabel =", "pil and each['endTime'] > self._currentTime and each['act'] not in ['CAN',", "+ hazName + \\ \" is in effect\" + endTimePhrase", "map background for creating Combinations # Can be: # Zones_BOU", "only one CAP tag pairs hazardBodyPhrase = re.sub(r'&&\\s*PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\n', \\", "default text is wrapped. if capText is not None and", "Warning\", \"Beach Hazards Statement\"]: forPhrase = \" for |* Enter", "defined headline, skip phrase hazName = self.hazardName(eachHazard['hdln'], argDict, False) hazardBodyPhrase", "= fcst + s.upper() s = eas + productName +", "lf.sub(r'\\1 \\2', text) ptext = ptext.replace('\\n\\n', '\\n') paragraphs = ptext.split('\\n')", "+ \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is not None) and bulletFlag: print", "# Zones_BOU # FireWxZones_BOU # Counties # Marine_Zones_BOU \"mapNameForCombinations\": \"Zones_<site>\",", "'\\n\\n' #keep track of remaining CTAs in processed text for", "Hazard Product\", # product name \"fullStationID\": \"<fullStationID>\", # full station", "\"*|\" else: segmentText = segmentText # # If segment passes", "areas in the segment have # the same headlines editArea", "'TY']: hazardBodyPhrase += \"In addition, \" + \\ hazNameA +", "self._bulletProd hazardBodyPhrase = '' # # First, sort the hazards", "# ---------------------------------------------------------------------------- # # SOFTWARE HISTORY # # Date Ticket#", "Section on \"Tkgnats: Task Reporting System\". #------------------------------------------------------------------------- # Additional Information:", "automatically transmit product \"autoSendAddress\": \"000\", #transmission address \"autoStore\": 0, #set", "\" else: if eachHazard['phen'] in ['HU', 'TR', 'TY']: hazardBodyPhrase =", "self._areaName != \"\": self._areaName = \" for \" + self._areaName", "\" has cancelled the \" + hazName + \". \"", "\" Complete\") return fcst def allowedHazards(self): return [] # Added", "= self._easPhrase + '\\n' else: eas = '' s =", "returns paragraphs 2 -> end, etc. # Headlines are always", "length, then #the default text is used. frameit can be", "\"...|* Enter bullet text *|\\n\\n* \" + self._bulletOrder()[i] + \"...\")", "passes the above checks, add the text # print \"hazardBodyText", "editArea = segmentAreas[0] areaLabel = editArea headlines = self.generateProduct(\"Hazards\", argDict,", "pil \"areaName\": \"\", # Name of state, such as \"Georgia\"", "periods, if possible # automatic functions \"autoSend\": 0, #set to", "\"bulletProd\": 0, # do not default to bullets \"hazardSamplingThreshold\": (10,", "\" lastHdln = hdln # # This is for the", "eachHazard not in sortedHazardList: sortedHazardList.append(eachHazard) # # Next, break them", "= '' # # Check that this segment codes to", "Weather Elements Needed: # Hazards #------------------------------------------------------------------------- # Edit Areas Needed:", "a bullet product endTimePhrase = self.hazardTimePhrases(eachHazard, argDict) hazName = self.hazardName(eachHazard['hdln'],", "headlines editArea = segmentAreas[0] areaLabel = editArea headlines = self.generateProduct(\"Hazards\",", "line feeds removeLF = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) bullet = removeLF.sub(r'\\1 \\2',b)", "in the previous product # overview = \"\" for each", "cityDescriptor=self._cityDescriptor, areaList=segmentAreas, includeCities=self._includeCities, includeZoneNames = self._includeZoneNames, accurateCities = self._accurateCities) fcst", "import time, string, types, copy, re import CallToActions import AbsTime", "+ \". \" lastHdln = hdln # # This is", "or implied, as to its usefulness for # any purpose.", "method is passed a hazard key, and returns # time", "segmentAreas, expireTime, argDict): # This is the header for an", "# This is for ext hazards # for eachHazard in", "code addition from Middendorf (BYZ) # # Now if there", "\"wfoCityState\": \"<wfoCityState>\", # Location of WFO - city,state \"textdbPil\": \"<textdbPil>\",", "def finalOverviewText(self): #if didn't calculate any, use the default if", "# # Optional Configuration Items # # mapNameForCombinations Name of", "= bullets[x][index+2:] bullets[x] = bullets[x][0:index] #eliminate after bullet text if", "in newBulletList: bulletOrder.remove(bullet) print \"reordered bullets are: \", bulletOrder for", "argDict): argDict[\"language\"] = self._language # Generate Narrative Forecast for Edit", "+ headlines return fcst def _postProcessArea(self, fcst, segmentAreas, argDict): return", "self.hazardName(eachHazard['hdln'], argDict, True) hazardBodyPhrase = hazardBodyPhrase + hazNameA + \\", "= string.split(self._bulletDict().get(eachHazard['phen']),\",\") for newBullet in newBullets: if newBullet not in", "self._includeOverview: overviewBody = \".|*Overview (must edit)*|.\\n\\n\" else: overviewBody = \"\"", "into the text database using the \"textdbPil\" # after product", "for creating Combinations # Can be: # Zones_BOU # FireWxZones_BOU", "time ranges error = self._determineTimeRanges(argDict) if error is not None:", "Enter hazard type *|\" else: forPhrase =\"\" if nwsIntroUsed ==", "in the AWIPS text database. The product is not #", "None, None, None) # find the bullets bullets = []", "hazardBodyPhrase + hazNameACap + \\ \" has also been issued.\"", "in forceCTAList: hazardPhenSig = phen + \".\" + sig cta", "be automatically # sent on the AWIPS WAN to the", "def _postProcessProduct(self, fcst, argDict): # # If an overview exists", "newList.append(eachHazard) elif eachHazard['act'] in ['CAN']: canList.append(eachHazard) elif eachHazard['act'] in ['EXP']:", "'EXP': hazardBodyPhrase = hazardBodyPhrase + \\ \"\\n\\n|* Wrap-up text goes", "cancelled the \" + hazName + \". \" else: hazardBodyPhrase", "new hazard and previous segment Text, then # we may", "+ bullet + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is None: print bullet", "= 0 #indicator of multiple sets of bullets for x", "Replaced ellipses with commas in hazardBodyText # ## # This", "= string.split(self._bulletDict().get(eachHazard['phen']),\",\") for canBullet in canBullets: if canBullet not in", "+ \" Complete\") return fcst def allowedHazards(self): return [] #", "the text # print \"hazardBodyText info: incTextFlag: \",incTextFlag if incTextFlag:", "displayName If not None, defines how product appears in GFE", "segmentText = '' # # Check that this segment codes", "['HU', 'TR', 'TY']: hazardBodyPhrase = hazardBodyPhrase + hazNameACap + \\", "# For the values to be considered, the 'hdln' value", "base file that is not intended to be overridden. ##", "in mixed case yet. if para.upper() == cta.upper() and ctaType", "newBullets print \"segment text is: \", segmentText for bullet in", "# # Date Ticket# Engineer Description # ------------ ---------- -----------", "forPhrase = \" for |* Enter hazard type *|\" else:", "combinations. ## Can be: ## EditAreas_PublicZones_BOU ## EditAreas_FireWx_BOU ## EditAreas_FIPS_BOU", "# print \"hazardBodyText info: incTextFlag: \",incTextFlag if incTextFlag: print \"hazardBodyText", "= string.replace(paragraphs[x],' ','') #make list of call to actions (type,", "the bullets bullets.append(buf[x]) # find only the bulleted text, defined", "= bullets + \"* \" + b.upper() + \"...|* Enter", "as \"FOUS45\" # pil Product pil, such as \"SFTBOS\" #", "based on action # newList = [] canList = []", "ctaParas = self.convertSingleParas(it) for cta in ctaParas: self.__procCTA.append((k,string.replace(cta,' ',''))) ctas", "+ \"= varDict[key]\" self._language = argDict[\"language\"] # Set up information", "in the list, or it needs to be a Statement", "= hazardBodyPhrase + \"\\n\\n\" + \\ segmentText + '\\n\\n' elif", "York\" # wfoCityState City,state that the WFO is located in,", "> self._currentTime and each['act'] not in ['CAN', 'EXP']): overview =", "+ hazNameACap + forPhrase + \\ \" has also been", "# automatically transmitted unless autoSend is 1. # This value", "\" + b.upper() + \"...|* Enter bullet text *|\\n\\n\" hazardBodyPhrase", "default to bullets \"hazardSamplingThreshold\": (10, None), #(%cov, #points) \"callToAction\": 1,", "can/exp hazard and previous segment Text, then # we may", "+ \\ \" has also been issued\" + endTimePhrase +", "for k in d.keys(): func = d[k] items = func()", "text from the last in the series of bullets to", "in acts and \\ (eh['phen'], eh['sig']) not in forceCTAList and", "to automatically store product in textDB \"autoWrite\": 0, #set to", "are in CAN, UPG, EXP only (don't include text) if", "1, then the product will be automatically # written to", "is 0, it # returns the whole thing, if it's", "= None if len(bullets) >= 4: impact = bullets[3] else:", "+ bullets # If segment doesn't pass the checks, put", "store product in textDB \"autoWrite\": 0, #set to 1 to", "re.compile(r'(\\n\\n)\\n*', re.DOTALL) fcst = fixMultiLF.sub(r'\\1', fcst) # finish progress meter", "+ \". \" else: hazardBodyPhrase = hazardBodyPhrase + \"The \"", "# easPhrase Optional EAS phrase to be include in product", "\"DefaultOnly\": textToUse = \"|* \" + textToUse + \" *|\"", "else: expTimeCurrent = argDict['creationTime'] timeWords = self.getTimingPhrase(eachHazard, expTimeCurrent) hazardBodyPhrase =", "\" + self._bulletOrder()[i] + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is not None)", "past issuance time for the # expire time. # includeCities", "self.hazardName(eachHazard['hdln'], argDict, False) if eachHazard['endTime'] <= argDict['creationTime']: hazardBodyPhrase = hazardBodyPhrase", "1st set of entries in # a segment, thus double", "re.compile(r'Default overview section', re.DOTALL) fcst = overviewSearch.sub(overview, fcst) # #", "or \"ISC\" # outputFile Defines the output location of the", "foundCTAs def decodeBulletedText(self, prevText): # returns the bullet paragraph text", "the product will be automatically # written to the \"output\"", "= [] canList = [] expList = [] extList =", "incFramingCodes, skipCTAs, forceCTAList = \\ self.useCaptureText(sortedHazardList) # # # Check", "# # This method takes a block of text, wraps", "If an overview exists for this product, insert it #", "in foundSig: foundSig.append(eh['sig']) includeFrameCodes = 0 includeText = 1 skipCTAs", "end, etc. # Headlines are always removed. # Framing codes", "very short or blank # if len(segmentText) < 6: incTextFlag", "= bullets[2] else: basis = None if len(bullets) >= 4:", "Migrated A1 OB9.16 code to A2 # 06/17/2015 4027 dgilling", "each.has_key('pil') and each.has_key('endTime') and each.has_key('act')): if (each['pil'] == pil and", "for Edit Area # get the hazards text # We", "for upgrade hazards # for eachHazard in upgList: if len(eachHazard['hdln'])", "paragraphs is 0, it # returns the whole thing, if", "string.split(prevText, '\\n\\n') if len(bullets) <= 1: return prevText ### ###", "the AWIPS WAN to the \"autoSendAddress\" with # the \"awipsWANPil", "build the phrases # nwsIntroUsed = 0 # # This", "if (re.search(\"\\* \" + self._bulletOrder()[i] + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is", "for # any purpose. #------------------------------------------------------------------------- # Standard and Local file", "\"\", fcst) fcst = self._indentBulletText(fcst) # # Clean up multiple", "# This is for the new hazards # phraseCount =", "segments hazardsC = argDict['hazards'] segmentList = self.organizeHazards(hazardsC.rawAnalyzedTable()) if len(segmentList) ==", "if paragraphs is 0, it # returns the whole thing,", "pursuant to Contract DG133W-05-CQ-1067 with the US Government. # #", "length \"purgeTime\": 8, # Maximum hours for expireTime \"includeCities\": 1", "a can/exp hazard and previous segment Text, then # we", "= \".|*Overview (must edit)*|.\\n\\n\" else: overviewBody = \"\" #assemble the", "progress: # # To look up tasks and their status,", "return fcst + \"\\n\\n$$\\n\\n\" def _postProcessProduct(self, fcst, argDict): # #", "None, # Source database for product. Can be \"Official\", \"Fcst\"", "for eachHazard in sortedHazardList: if eachHazard['sig'] in ['S']and eachHazard['phen'] in", "= string.replace(fcst, \"\\n \",\"\\n\") fcst = string.replace(fcst, \"&&\", \"\\n&&\\n\") #", "list and build the phrases # nwsIntroUsed = 0 #", "eachHazard in sortedHazardList: if not eachHazard.has_key('prevText'): newBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") print", "only need to get headlines for the first edit area", "# 02/24/2016 5411 randerso Make bullet headers upper case #", "c in ctas: hazardBodyPhrase = hazardBodyPhrase + c + '\\n\\n'", "to single paragraphs paragraphs = self.convertSingleParas(text) for x in xrange(len(paragraphs)):", "in ctaParas: self.__procCTA.append((k,string.replace(cta,' ',''))) ctas = ctao.genericCTAs() for it in", "None) # find the bullets bullets = [] buf =", "error = self._determineTimeRanges(argDict) if error is not None: return error", "been issued.\" else: hazardBodyPhrase = hazardBodyPhrase + hazNameACap + forPhrase", "included in area header \"accurateCities\": 0, # Include all cities", "[] for eachHazard in sortedHazardList: if eachHazard.has_key('prevText'): prevText = eachHazard['prevText']", "+ \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is None: print bullet + \"", "[] for eachHazard in sortedHazardList: if eachHazard['sig'] in ['S']and eachHazard['phen']", "[\"CAN\",\"EXP\"]: canBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for canBullet in canBullets: if canBullet", "in the breakStrings line above is causing issues with ###", "WAN. \"periodCombining\" : 0, # If 1, combine periods, if", "\" has also been issued. This \" + hazName +", "\" for \" + self._areaName issuedByString = self.getIssuedByString() productName =", "is wrapped. if capText is not None and len(capText): textToUse", "to keep separate from CTAs fcst = string.replace(fcst, \\ r\"PRECAUTIONARY/PREPAREDNESS", "the default GUI # entry for storage. # autoSend If", "If 1, cities are determined from grids # citiesPhrase \"Including", "up multiple line feeds # fixMultiLF = re.compile(r'(\\n\\n)\\n*', re.DOTALL) fcst", "the bullets ## bDict = self._bulletDict() ## bLine = bDict.get(eachHazard['phen'])", "output ## for b in bList: ## bullets = bullets", "\"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is not None: segmentTextSplit = re.split(\"\\* \"", "### initialize the bullet output ## bullets = \"\" ##", "+ timeWords #add a leading space return timeWords # #", "[\"Winter Weather Advisory\", \"Winter Storm Warning\", \"Beach Hazards Statement\"]: forPhrase", "is templated # includeOverview If 1, the overview section is", "# # defaultEditAreas defines edit areas, default is Combinations #", "\"database\": \"Official\", # Defines output location of finished product. \"outputFile\":", "= string.replace(fcst, \"&&\", \"\\n&&\\n\") # Prevent empty Call to Action", "+ \\ \", which is in effect\" + endTimePhrase +", "+ \\ \" has cancelled the \" + hazName +", "for eachHazard in sortedHazardList: if not eachHazard.has_key('prevText'): newBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\")", "bullet text *|\\n\\n* \" + self._bulletOrder()[i] + \"...\") bulletFlag =", "if len(bullets) >= 3: basis = bullets[2] else: basis =", "re.sub(\"\\|\\*.*\\*\\|\",\"\",cta) # We want this comparison to be case-insensitive just", "in foundCTAs. Note # that the formats of these lists", "a bullet product endTimePhrase = self.hazardTimePhrases(eachHazard, argDict) hazNameA = self.hazardName(eachHazard['hdln'],", "awipsWANPil Defines the awips product identifier # (e.g., KBOUCCFDEN) that", "\"hazardBodyText info: incTextFlag: \",incTextFlag if incTextFlag: print \"hazardBodyText info: segmentText:", "\" + hazName + \\ \" is in effect\" +", "return nothing if prevText is None: return prevText ### ###", "## ## ### loop through the bullets and format the", "\"\\n\" +\\ \"National Weather Service \" + self._wfoCityState + \\", "bullet text *|\\n\\nPRECAUTIONARY/PREPAREDNESS ACTIONS...\") bulletFlag = 0 # # Now", "in xrange(len(bullets)): bullets[x] = string.replace(bullets[x],'\\n',' ') removeLF = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL)", "for i in range(start,end): if (re.search(\"\\* \" + self._bulletOrder()[i] +", "If 1, zone names will be included in the area", "# need to handle the \"NEW\" entries. else: for eh", "21194 def _bulletDict(self): return [] # Added for DR 21309", "argDict): ## ## ### get the bullet dictionary and split", "statement # something in CANS and something in acts (frame", "combiner # database Source database for product. Can be \"Official\",", "import LogStream import TextRules import SampleAnalysis import time, string, types,", "if eachHazard['phen'] in ['HU', 'TR', 'TY']: hazardBodyPhrase += \"In addition,", "ptext = lf.sub(r'\\1 \\2', text) ptext = ptext.replace('\\n\\n', '\\n') paragraphs", "= self.hazardTimePhrases(eachHazard, argDict) hazName = self.hazardName(eachHazard['hdln'], argDict, False) hazardBodyPhrase =", "hazardBodyText(self, hazardList, argDict): bulletProd = self._bulletProd hazardBodyPhrase = '' #", "percentage coverage or number of # grid points in a", "= self.convertSingleParas(it) for cta in ctaParas: self.__procCTA.append((k,string.replace(cta,' ',''))) ctas =", "keepBulletList.append(saveBullet) # Now determine which bullets we have to remove.", "\\ r\"\\nPRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\n\") fcst = string.replace(fcst, \"\\n \",\"\\n\") fcst =", "to account for framing code issues in CTA cta =", "self.hazardTimePhrases(eachHazard, argDict) hazName = self.hazardName(eachHazard['hdln'], argDict, False) hazardBodyPhrase = hazardBodyPhrase", "_determineTimeRanges(self, argDict): # Set up the time range for 0-240", "eh['sig'] == 'S'): foundCANS = 1 if eh['sig'] not in", "self._areaDictionary, None, cityDescriptor=self._cityDescriptor, areaList=segmentAreas, includeCities=self._includeCities, includeZoneNames = self._includeZoneNames, accurateCities =", "bullets + \"* \" + b + \"...|* Enter bullet", "for creating Hazard Products. #------------------------------------------------------------------------- # Copying: # This software", "= \"|* \" + textToUse + \" *|\" else: textToUse", "the US Government. # # U.S. EXPORT CONTROLLED TECHNICAL DATA", "#single paragraphs # keep track of any call to actions", "if hazName in [\"Winter Weather Advisory\", \"Winter Storm Warning\", \"Beach", "hazard, argDict, prefixSpace=True): timeWords = self.getTimingPhrase(hazard, argDict['creationTime']) if prefixSpace and", "#text (default or cap) is wrapped in framing codes, or", "as \"Western New York\" # wfoCityState City,state that the WFO", "\"<textdbPil>\", # Product ID for storing to AWIPS text database.", "_preProcessArea(self, fcst, segmentAreas, expireTime, argDict): # This is the header", "self.convertSingleParas(it) for cta in ctaParas: self.__procCTA.append((k,string.replace(cta,' ',''))) ctas = ctao.genericCTAs()", "This value is also used for the default GUI #", "is used to store the product # in the AWIPS", "# Now determine which bullets we have to remove. removeBulletList", "is: \", segmentText for bullet in newBullets: if re.search(\"\\* \"", "= 1 skipCTAs = 0 forceCTAList = [] # all", "time = bullets[1] else: time = None if len(bullets) >=", "# Omaha, NE 68106 # 402.291.0100 # # See the", "else: hazardBodyPhrase += \" has issued \" + hazNameA +", "GENERIC. Uses the CallToAction definitions. #convert text to single paragraphs", "\"includeCities\": 1 , # Cities included in area header \"accurateCities\":", "ctao.ctaDict() for k in d.keys(): func = d[k] items =", "overview header is templated # includeOverview If 1, the overview", "in hazardBodyText # ## # This is a base file", "includeZoneNames If 1, zone names will be included in the", "transmit the # product to the AWIPS WAN. The product", "framing codes, skip CTAs, forceCTAList) # # For the values", "Weather Service\". Note # that this only correctly handles the", "self.convertSingleParas(text) for x in xrange(len(paragraphs)): paragraphs[x] = string.replace(paragraphs[x],' ','') #make", "x in xrange(len(paragraphs)): paragraphs[x] = string.replace(paragraphs[x],' ','') #make list of", "0, #set to 1 to automatically transmit product \"autoSendAddress\": \"000\",", "self.hazardName(eachHazard['hdln'], argDict, False) if hazName in [\"Winter Weather Advisory\", \"Winter", "offices that use \"-20 degrees\" in the text. ### outText", "Product pil, such as \"SFTBOS\" # areaName (opt.) Area name", "is Combinations # # purgeTime Maximum number of hours past", "= self.indentText(bullet, indentFirstString = '', indentNextString = ' ', maxWidth=self._lineLength,", "skipCTAs = 0 forceCTAList = [] # all actions are", "\",\"\\n\") fcst = string.replace(fcst, \"&&\", \"\\n&&\\n\") # Prevent empty Call", "modified by Raytheon Company, # pursuant to Contract DG133W-05-CQ-1067 with", "unless autoSend is 1. # This value is also used", "'CAN': hazardBodyPhrase = hazardBodyPhrase + \\ \"\\n\\n|* Wrap-up text goes", "to AWIPS text database. \"awipsWANPil\": \"<awipsWANPil>\", # Product ID for", "that use \"-20 degrees\" in the text. ### outText =", "This is the header for an edit area combination areaHeader", "hazard type *|\" else: forPhrase =\"\" if nwsIntroUsed == 0:", "argDict): # This is the header for an edit area", "for |* Enter hazard type *|\" else: forPhrase =\"\" if", "hazardBodyPhrase = \"The National Weather Service in \" +\\ self._wfoCity", "just the default text is wrapped. if capText is not", "# WMO ID \"pil\": \"<pil>\", # Product pil \"areaName\": \"\",", "of multiple sets of bullets for x in xrange(len(buf)): if", "event that there are multiple sets of bullets. In this", "# product identifiers \"productName\": \"Generic Hazard Product\", # product name", "Statement text goes here *|.\\n\\n\" # End code for DR", "cities # includeZoneNames If 1, zone names will be included", "saveBullet in saveBullets: if saveBullet not in keepBulletList: keepBulletList.append(saveBullet) #", "GUI # # You must set the following: # #", "\" will expire \" + timeWords + \". \" #", "= re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) regText = removeLF.sub(r'\\1 \\2',regText) # extract out", "= '' for eachPara in paras: if paraCount >= paragraphs:", "to 1 to automatically transmit product \"autoSendAddress\": \"000\", #transmission address", "flags=re.IGNORECASE) if len(segmentTextSplit2) == 2: segmentTextSplit[1] = \"PRECAUTIONARY/PREPAREDNESS ACTIONS...\" +", "expList = [] extList = [] conList = [] upgList", "fcst) # # Added to place line feeds in the", "skip phrase endTimePhrase = self.hazardTimePhrases(eachHazard, argDict) hazNameA = self.hazardName(eachHazard['hdln'], argDict,", "used for the default GUI entry for # storage. #", "This is for con hazards # for eachHazard in conList:", "2 segmentText, foundCTAs = self.cleanCapturedText(prevText, startPara, addFramingCodes = False, skipCTAs", "#no defined headline, skip phrase hazName = self.hazardName(eachHazard['hdln'], argDict, False)", "code for DR 21310 # # This adds the call", "<= 1: return prevText ### ### process the text ###", "4027 dgilling Perform case-insensitive # comparisons in foundCTAs. # 07/13/2015", "the overview header is templated # includeOverview If 1, the", "capText variable. If capText is None or 0 length, then", "AbsTime.AbsTime(argDict['creationTime']) self._currentTime = argDict['creationTime'] self._expireTime = self._issueTime + self._purgeTime*3600 self._timeLabel", "Note # that this only correctly handles the 1st set", "the text. ### outText = outText + bullet + \"\\n\\n\"", "\"autoStore\": 0, #set to 1 to automatically store product in", "= 50.0 self.setProgressPercentage(50) for segmentAreas in segmentList: self.progressMessage(fraction, percent, \"Making", "print \"* \" + self._bulletOrder()[i] + \"... found!\" segmentTextSplit =", "issued.\" else: hazardBodyPhrase = hazardBodyPhrase + hazNameACap + forPhrase +", "# something in CANS and something in acts (frame it,", "\" has issued \" + \\ hazNameA + \". \"", "will only decode the first set # of bullets and", "this product, insert it # overview = self.finalOverviewText() overviewSearch =", "['CAN']: canList.append(eachHazard) elif eachHazard['act'] in ['EXP']: expList.append(eachHazard) elif eachHazard['act'] in", "background for creating Combinations # Can be: # Zones_BOU #", "(ctaType, cta) in self.__procCTA: ## Added following line to account", "product header # # hazardSamplingThreshold Defines the percentage coverage or", "# # This adds segment text # segmentText = ''", "con hazards # for eachHazard in conList: if len(eachHazard['hdln']) ==", "capture text, inc framing codes, skip CTAs, forceCTAList) # #", "the call to action statements. This is only performed #", "',''))) d = ctao.ctaPilDict() for k in d.keys(): func =", "the AWIPS II Master Rights File (\"Master Rights File.pdf\") for", "tag pairs hazardBodyPhrase = re.sub(r'&&\\s*PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\n', \\ \"\", hazardBodyPhrase) return", "2: segmentTextSplit[1] = \"*\" + segmentTextSplit2[1] else: segmentTextSplit2 = re.split(\"PRECAUTIONARY/PREPAREDNESS", "# automatic functions \"autoSend\": 0, #set to 1 to automatically", "conList: if len(eachHazard['hdln']) == 0: continue #no defined headline, skip", "So, if paragraphs is 0, it # returns the whole", "such as \"KSLC\". # wmoID WMO ID code for product", "self.hazardTimePhrases(eachHazard, argDict) hazNameA = self.hazardName(eachHazard['hdln'], argDict, True) hazardBodyPhrase = hazardBodyPhrase", "EditAreas_FireWx_BOU ## EditAreas_FIPS_BOU ## EditAreas_MarineZones_BOU \"defaultEditAreas\" : \"EditAreas_PublicZones_<site>_<MultiPil>\", # product", "# pursuant to Contract DG133W-05-CQ-1067 with the US Government. #", "not # automatically stored unless autoStore is 1. This #", "1 skipCTAs = 0 forceCTAList = [] # all actions", "segmentText[0] if tester == '*': startPara = 1 else: startPara", "WFO - city,state \"textdbPil\": \"<textdbPil>\", # Product ID for storing", "areaName (opt.) Area name for product header, such as \"Western", "the cities of\", \"includeZoneNames\":1, # Zone names will be included", "found a bullet if re.match(\"\\*\", b): ### remove line feeds", "exec \"self._\" + key + \"= self._definition[key]\" # Get VariableList", "= self._includeZoneNames, accurateCities = self._accurateCities) fcst = fcst + areaHeader", "get headlines for the first edit area # in the", "with that generated by the headline # algorithms in DiscretePhrases.", "capText, defaultText, frameit=\"Never\"): #returns a properly formatted bulleted text based", "\"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is not None) and bulletFlag: print \"*", "II Master Rights File (\"Master Rights File.pdf\") for # further", "effect\" + endTimePhrase + \". \" else: if eachHazard['phen'] in", "following line to account for framing code issues in CTA", "text after the bullets. The afterText is text up to", "text ### bullets = [] bullets = string.split(prevText, '\\n\\n') if", "for para in paragraphs: for (ctaType, cta) in self.__procCTA: ##", "Cities included in area header \"accurateCities\": 0, # Include all", "print \"segmentTextSplit is \", segmentTextSplit segmentTextSplit2 = string.split(segmentTextSplit[1],\"*\",1) if len(segmentTextSplit2)", "foundCTAs = [] # Process the paragraphs, keep only the", "forPhrase =\"\" if nwsIntroUsed == 0: hazardBodyPhrase = \"The National", "variable + \"= varDict[key]\" self._language = argDict[\"language\"] # Set up", "5749 randerso Replaced ellipses with commas in hazardBodyText # ##", "storage. # autoSend If set to 1, then the product", "phen/sig based if ent.find('.') == 2: phensig = (ent[0:2], ent[3])", "time range for 0-240 hours self._timeRange = self.createTimeRange(0, 240) self._ddhhmmTime", "expireTime, argDict): # This is the header for an edit", "# Section on \"Tkgnats: Task Reporting System\". #------------------------------------------------------------------------- # Additional", "2: time = bullets[1] else: time = None if len(bullets)", "= re.split(\"\\* \" + self._bulletOrder()[i] + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) segmentText", "None def _preProcessProduct(self, fcst, argDict): # Product header if self._areaName", "hazardBodyText # ## # This is a base file that", "elif eachHazard['act'] in ['NEW', 'EXA', 'EXB']: newList.append(eachHazard) elif eachHazard['act'] in", "in area header \"accurateCities\": 0, # Include all cities in", "NY\" # # Optional Configuration Items # # mapNameForCombinations Name", "phraseCount = 1 if eachHazard['phen'] in ['HU', 'TR', 'TY']: hazardBodyPhrase", "return error # Get the segments hazardsC = argDict['hazards'] segmentList", "eachHazard['act'] == 'EXP': hazardBodyPhrase = hazardBodyPhrase + \\ \"\\n\\n|* Wrap-up", "\"\" #assemble the lines overview = overviewHeadline + overviewBody return", "# sortedHazardList = [] for each in ['W', 'Y', 'A',", "through the bullets and format the output ## for b", "(None, None, None, None, None, None) # find the bullets", "forceCTAList) # # For the values to be considered, the", "self.progressMessage(0, 100, self._displayName + \" Complete\") return fcst def allowedHazards(self):", "fraction = fractionOne fcst = self._postProcessProduct(fcst, argDict) return fcst def", ", # Cities included in area header \"accurateCities\": 0, #", "\"NEW\" entries. else: for eh in hazardList: if eh['act'] in", "find only the bulleted text, defined by the double line", "hazNameA + \" has been issued.\" else: hazardBodyPhrase += \"In", "code ## def _getBullets(self, newBulletList, argDict): ## ## ### get", "text # segmentText = '' # # Check that this", "SOFTWARE HISTORY # # Date Ticket# Engineer Description # ------------", "\"*\" + segmentTextSplit2[1] else: segmentTextSplit2 = re.split(\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", segmentTextSplit[1], 1,", "# includeOverviewHeadline If 1, the overview header is templated #", "\\2', text) ptext = ptext.replace('\\n\\n', '\\n') paragraphs = ptext.split('\\n') return", "self.__procCTA.append((\"GENERIC\", string.replace(cta,' ',''))) #compare found = [] for para in", "is used. frameit can be \"Never\", in which #nothing is", "= self.finalOverviewText() overviewSearch = re.compile(r'Default overview section', re.DOTALL) fcst =", "elif eachHazard['act'] in ['EXT']: extList.append(eachHazard) elif eachHazard['act'] in ['UPG']: upgList.append(eachHazard)", "\"Beach Hazards Statement\"]: forPhrase = \" for |* Enter hazard", "return [] # Added for DR 21309 def _bulletOrder(self): return", "KBOUCCFDEN) that is used to transmit the # product to", "segmentText, flags=re.IGNORECASE) is not None) and bulletFlag: print \"* \"", "keepBulletList: \",keepBulletList print \"hazardBodyText info: removeBulletList: \",removeBulletList # Finally remove", "in removeBulletList: removeBulletList.append(canBullet) print \"hazardBodyText info: keepBulletList: \",keepBulletList print \"hazardBodyText", "\"\\n\" + self._pil + \"\\n\\n\" fcst = fcst + s.upper()", "continue #no defined headline, skip phrase endTimePhrase = self.hazardTimePhrases(eachHazard, argDict)", "sortedHazardList.append(eachHazard) # # Next, break them into individual lists based", "overviewHeadline = \"...|*Overview headline (must edit)*|...\\n\\n\" else: overviewHeadline = \"\"", "on, debug_print statements will appear. # textdbPil Defines the awips", "in # a segment, thus double events will only decode", "# entry for storage. # autoSend If set to 1,", "[] # Process the paragraphs, keep only the interested ones", "The product is not # automatically transmitted unless autoSend is", "automatically write product to file # Area Dictionary -- Descriptive", "in product header \"includeOverviewHeadline\": 1, #include overview header \"includeOverview\": 1,", "in varDict.keys(): if type(key) is types.TupleType: label, variable = key", "self.sentence(hazNameA, addPeriod=False) hazName = self.hazardName(eachHazard['hdln'], argDict, False) if hazName in", "used. frameit can be \"Never\", in which #nothing is wrapped", "for cta in ctaParas: self.__procCTA.append((k,string.replace(cta,' ',''))) ctas = ctao.genericCTAs() for", "or number of # grid points in a zone that", "in bulletOrder: bullets = bullets + \"* \" + b.upper()", "information about zones \"areaDictionary\": \"AreaDictionary\", # Language \"language\": \"english\", \"lineLength\":", "hazName = self.hazardName(eachHazard['hdln'], argDict, False) if hazName in [\"Winter Weather", "is not None: return error # Initialize the output string", "\"smart\", \"displayName\": None, # Source database for product. Can be", "_bulletOrder(self): return [] ## Replaced by 21309 code ## def", "startPara = 0 else: startPara = 1 segmentText, foundCTAs =", "bullets[3] else: impact = None if len(regText) == 0: regText", "bulletFlag: print \"* \" + self._bulletOrder()[i] + \"... found!\" segmentTextSplit", "if len(segmentTextSplit2) == 2: segmentTextSplit[1] = \"PRECAUTIONARY/PREPAREDNESS ACTIONS...\" + segmentTextSplit2[1]", "eh['sig'])) return (includeText, includeFrameCodes, skipCTAs, forceCTAList) def cleanCapturedText(self, text, paragraphs,", "for 0-240 hours self._timeRange = self.createTimeRange(0, 240) self._ddhhmmTime = self.getCurrentTime(", "are determined from grids # citiesPhrase \"Including the cities of\"", "# and frame captured text or not # incTextFlag, incFramingCodes,", "staticBulletOrder = self._bulletOrder() for bullet in staticBulletOrder: print \"correct bullet", "all actions are in CAN, UPG, EXP only (don't include", "substituteBulletedText(self, capText, defaultText, frameit=\"Never\"): #returns a properly formatted bulleted text", "#transmission address \"autoStore\": 0, #set to 1 to automatically store", "None: print bullet + \" not in segmentText\" start =", "frame it else: includeText = 0 #end of non statement", "if incTextFlag and bulletProd: for eachHazard in sortedHazardList: if not", "\"\\n\\n\" + \\ segmentText + '\\n\\n' elif bulletProd: bulletFlag =", "If 1, cities will be included in the area header", "= 2 if hdln != lastHdln: if eachHazard['phen'] in ['HU',", "fcst = fcst + s fcst = fcst + \"Default", "been issued.\" else: hazardBodyPhrase += \"In addition, \" + \\", "are different, thus this code # is more complicated for", "# find the bullets bullets = [] buf = prevText.split('\\n\\n*", "Create Combinations file with edit area combinations. ## Can be:", "None) and bulletFlag: print \"* \" + self._bulletOrder()[i] + \"...", "self._areaName) if len(self._easPhrase) != 0: eas = self._easPhrase + '\\n'", "\" else: expTimeCurrent = argDict['creationTime'] timeWords = self.getTimingPhrase(eachHazard, expTimeCurrent) hazardBodyPhrase", "\"ISC\" \"database\": \"Official\", # Defines output location of finished product.", "longer in effect. \" # # This is for con", "use \"cityDescriptor\":\"Including the cities of\", \"includeZoneNames\":1, # Zone names will", "*|\\n\\n* \" + self._bulletOrder()[i] + \"...\") bulletFlag = 0 if", "# export/transfer/disclosure is restricted by U.S. law. Dissemination # to", "+ self._bulletOrder()[i] + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) segmentText = string.join(segmentTextSplit,\"* \"", "\"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) print \"segmentTextSplit is \", segmentTextSplit segmentTextSplit2 =", "hdln != lastHdln: if eachHazard['phen'] in ['HU', 'TR', 'TY']: hazardBodyPhrase", "code to A2 # 06/17/2015 4027 dgilling Perform case-insensitive #", "def _getBullets(self, newBulletList, argDict): ## ## ### get the bullet", "If 1, the product will use a bullet format #-------------------------------------------------------------------------", "1, #include overview section \"bulletProd\": 0, # do not default", "hazardBodyPhrase += \"In addition, \" + \\ hazNameA + forPhrase", "in sortedHazardList: if eachHazard['act'] not in [\"CAN\",\"EXP\"]: saveBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\")", "This is for upgrade hazards # for eachHazard in upgList:", "eachHazard['act'] in [\"CAN\",\"EXP\"]: canBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for canBullet in canBullets:", "self._accurateCities) fcst = fcst + areaHeader return fcst def _makeProduct(self,", "incTextFlag: print \"hazardBodyText info: segmentText: \",segmentText hazardBodyPhrase = hazardBodyPhrase +", "d.keys(): func = d[k] items = func() for it in", "prevText ### if previous text is empty, return nothing if", "= hazardBodyPhrase + \\ \" has cancelled the \" +", "'TR', 'TY']: hazardBodyPhrase = hazardBodyPhrase + \" has issued \"", "= self.sentence(hazNameA, addPeriod=False) hazName = self.hazardName(eachHazard['hdln'], argDict, False) if hazName", "and foundACTS: includeFrameCodes = 1 skipCTAs = 1 for eh", "that the WFO is located in, such as \"Buffalo NY\"", "Product pil \"areaName\": \"\", # Name of state, such as", "full station identifier (4letter) \"wmoID\": \"<wmoID>\", # WMO ID \"pil\":", "to handle the \"NEW\" entries. else: for eh in hazardList:", "# # If segment passes the above checks, add the", "prevText is None: return (None, None, None, None, None, None)", "of\", \"includeZoneNames\":1, # Zone names will be included in the", "= self.cleanCapturedText(prevText, startPara, addFramingCodes = False, skipCTAs = skipCTAs) #", "by U.S. law. Dissemination # to non-U.S. persons whether in", "list of bullets!\" segmentTextSplit = re.split(\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", segmentText, flags=re.IGNORECASE) segmentText", "= string.split(self._bulletDict().get(eachHazard['phen']),\",\") for saveBullet in saveBullets: if saveBullet not in", "if eh['act'] in acts and (len(eh['hdln']) or eh['sig'] == 'S'):", "in GFE GUI # # You must set the following:", "key, and returns # time phrase wording consistent with that", "+ hazNameA + forPhrase + \\ \", which is in", "None def generateForecast(self, argDict): # Generate Text Phrases for a", "eachHazard in expList: if len(eachHazard['hdln']) == 0: continue #no defined", "ext hazards # for eachHazard in extList: if len(eachHazard['hdln']) ==", "whole thing, if it's 2, it returns paragraphs 2 ->", "#------------------------------------------------------------------------- # Edit Areas Needed: None #------------------------------------------------------------------------- # Associated Utilities", "# Check that this segment codes to determine capture or", "time, basis, impact, afterText, multipleRecords) if prevText is None: return", "hazardBodyPhrase = \"The National Weather Service in \" + self._wfoCity", "a bullet format #------------------------------------------------------------------------- # Weather Elements Needed: # Hazards", "+ \"The \" + hazName + \\ \" is no", "if hdln != lastHdln: if eachHazard['phen'] in ['HU', 'TR', 'TY']:", "Configuration Items # # mapNameForCombinations Name of the map background", "when including # cities # includeZoneNames If 1, zone names", "+ '\\n\\n' #keep track of remaining CTAs in processed text", "the segment since all areas in the segment have #", "statementList = [] for eachHazard in sortedHazardList: if eachHazard['sig'] in", "\". \" else: if eachHazard['phen'] in ['HU', 'TR', 'TY']: hazardBodyPhrase", "== 0: hazardBodyPhrase = \"The National Weather Service in \"", "if re.search(\"\\* \" + bullet + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is", "text goes here *|.\\n\\n\" # End code for DR 21310", "(eh['phen'], eh['sig']) not in forceCTAList and \\ len(eh['hdln']): forceCTAList.append((eh['phen'], eh['sig']))", "#get list of ctas found if skipCTAs and len(found): pass", "text is used, but still # need to handle the", "defines how product appears in GFE GUI # # You", "the WFO is located in, such as \"Buffalo NY\" #", "Hazards #------------------------------------------------------------------------- # Edit Areas Needed: None #------------------------------------------------------------------------- # Associated", "paragraphs, keep only the interested ones paraCount = 0 processedText", "call to action statements. This is only performed # if", "to actions found foundCTAs = [] # Process the paragraphs,", "in progress: # # To look up tasks and their", "such as \"SFTBOS\" # areaName (opt.) Area name for product", "+ b.upper() + \"...|* Enter bullet text *|\\n\\n\" hazardBodyPhrase =", "cta text) if self.__procCTA is None: self.__procCTA = [] ctao", "= None if len(bullets) >= 3: basis = bullets[2] else:", "== 'S'): foundCANS = 1 if eh['sig'] not in foundSig:", "removeLF.sub(r'\\1 \\2',b) ### indent code bullet = self.indentText(bullet, indentFirstString =", "to store the product # in the AWIPS text database.", "keep only the interested ones paraCount = 0 processedText =", "4: impact = bullets[3] else: impact = None if len(regText)", "endTimePhrase = self.hazardTimePhrases(eachHazard, argDict) hazNameA = self.hazardName(eachHazard['hdln'], argDict, True) hazardBodyPhrase", "+ \\ segmentText + '\\n\\n' elif bulletProd: bulletFlag = 0", "Stop B8 # Omaha, NE 68106 # 402.291.0100 # #", "lines[x].find('The National Weather Service') == 0: lines = lines[0:x] #eliminate", "the interested ones paraCount = 0 processedText = '' for", "= bullets[x].find('\\n\\n') if index != -1: regText = bullets[x][index+2:] bullets[x]", "#only interested in these bullets break # regular text is", "file: # Combinations file #------------------------------------------------------------------------- # Component Products: # Hazards", "### indent code bullet = self.indentText(bullet, indentFirstString = '', indentNextString", "each.has_key('endTime') and each.has_key('act')): if (each['pil'] == pil and each['endTime'] >", "#regular text after bullets for x in xrange(1, len(bullets)): index", "if re.search(\"\\* \"+ bullet + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is not", "in foundCTAs. # 07/13/2015 4648 randerso Fix bullets in follow", "up tasks and their status, see the Text Product User", "== '*': startPara = 1 else: startPara = 2 segmentText,", "1 for eh in hazardList: if eh['act'] in acts and", "segmentText + '\\n\\n' elif bulletProd: bulletFlag = 0 if eachHazard['act']", "edit area # in the segment since all areas in", "WMO ID code for product header, such as \"FOUS45\" #", "+ self._purgeTime*3600 self._timeLabel = self.getCurrentTime( argDict, \"%l%M %p %Z %a", "segmentAreas, self._expireTime, argDict) fcst = self._makeProduct(fcst, segmentAreas, argDict) fcst =", "Associated Utilities Files e.g. Combinations file: # Combinations file #-------------------------------------------------------------------------", "bList = newBulletList.split(\",\") ## ## ### initialize the bullet output", "bullet + \"\\n\\n\" else: ### not a bullet, CTA text", "the hazards for this segment by importance # sortedHazardList =", "\" + bullet.upper() + \\ \"...|* Enter bullet text *|\\n\\nPRECAUTIONARY/PREPAREDNESS", "the site is not transmitting in mixed case yet. if", "and \\ (eh['phen'], eh['sig']) not in forceCTAList and \\ len(eh['hdln']):", "Contractor Name: <NAME> # Contractor Address: 6825 Pine Street, Suite", "= self.getCurrentTime( argDict, \"%d%H%M\", shiftToLocal=0, stripLeading=0) self._issueTime = AbsTime.AbsTime(argDict['creationTime']) self._currentTime", "Now, go through each list and build the phrases #", "the product will be automatically # stored into the text", "== pil and each['endTime'] > self._currentTime and each['act'] not in", "randerso Migrated A1 OB9.16 code to A2 # 06/17/2015 4027", "in bullets: ### if first character is a * we", "without technical # support, and with no warranty, express or", "incTextFlag and bulletProd: # First make list of bullets that", "= string.split(segmentTextSplit[1],\"*\",1) if len(segmentTextSplit2) == 2: segmentTextSplit[1] = \"*\" +", "varDict = argDict[\"varDict\"] for key in varDict.keys(): if type(key) is", "This product is a template for creating Hazard Products. #-------------------------------------------------------------------------", "\"National Weather Service \" + self._wfoCityState + \\ \"\\n\" +", "\",incTextFlag if incTextFlag: print \"hazardBodyText info: segmentText: \",segmentText hazardBodyPhrase =", "entries in # a segment, thus double events will only", "first edit area # in the segment since all areas", "identifier # (e.g., DENCCFDEN) that is used to store the", "degrees\" in the text. ### outText = outText + bullet", "*|\\n\\n\" hazardBodyPhrase = hazardBodyPhrase + \"\\n\\n\" + bullets # If", "# then returns the part after 'paragraphs'. So, if paragraphs", "\"areaName\": \"\", # Name of state, such as \"Georgia\" --", "#------------------------------------------------------------------------- # Copying: # This software is in the public", "= self.hazardName(eachHazard['hdln'], argDict, False) if hazName in [\"Winter Weather Advisory\",", "any purpose. #------------------------------------------------------------------------- # Standard and Local file names and", "\"segment text is: \", segmentText for bullet in newBullets: if", "def useCaptureText(self, hazardList): #Based on the hazardlist, returns a tuple", "hdln = eachHazard['hdln'] if len(eachHazard['hdln']) == 0: continue #no defined", "each section for returning the values if len(bullets) >= 1:", "CTA cta = re.sub(\"\\|\\*.*\\*\\|\",\"\",cta) # We want this comparison to", "capture or not, # and frame captured text or not", "# def hazardTimePhrases(self, hazard, argDict, prefixSpace=True): timeWords = self.getTimingPhrase(hazard, argDict['creationTime'])", "in sortedHazardList: if eachHazard['sig'] in ['S']and eachHazard['phen'] in ['CF', 'LS']:", "if tester == '*': startPara = 1 else: startPara =", "names and Locations: # GenericHazards #------------------------------------------------------------------------- # Customization Points: #", "edit)*|...\\n\\n\" else: overviewHeadline = \"\" if self._includeOverview: overviewBody = \".|*Overview", "this segment by importance # sortedHazardList = [] for each", "based if ent.find('.') == 2: phensig = (ent[0:2], ent[3]) #phen.sig", "Weather Advisory\", \"Winter Storm Warning\", \"Beach Hazards Statement\"]: forPhrase =", "error = self._getVariables(argDict) if error is not None: return error", "== 0: continue #no defined headline, skip phrase endTimePhrase =", "cta not in ctas: ctas.append(cta) if len(ctas) > 0: hazardBodyPhrase", "lines break regText = (\"\\n\").join(lines) # now clean up the", "product identifier # (e.g., DENCCFDEN) that is used to store", "\\ \"\\n\" + issuedByString + self._timeLabel + \"\\n\\n\" fcst =", "self._combinations = argDict[\"combinations\"] return None def _determineTimeRanges(self, argDict): # Set", "# You must set the following: # # productName defines", "\"\\n\" + issuedByString + self._timeLabel + \"\\n\\n\" fcst = fcst", "for # further licensing information. ## # ---------------------------------------------------------------------------- # #", "\"<wfoCityState>\", # Location of WFO - city,state \"textdbPil\": \"<textdbPil>\", #", "functions \"autoSend\": 0, #set to 1 to automatically transmit product", "Combinations file: # Combinations file #------------------------------------------------------------------------- # Component Products: #", "the CAP tags to keep separate from CTAs fcst =", "be \"Never\", in which #nothing is wrapped in framing codes,", "else: startPara = 1 segmentText, foundCTAs = self.cleanCapturedText(prevText, startPara, addFramingCodes", "States or abroad requires # an export license or other", "header # accurateCities If 1, cities are determined from grids", "1 hazardBodyPhrase = hazardBodyPhrase + \\ \" has cancelled the", "ptext.replace('\\n\\n', '\\n') paragraphs = ptext.split('\\n') return paragraphs def ctasFound(self, text):", "bullets. # if incTextFlag and bulletProd: for eachHazard in sortedHazardList:", "text to single paragraphs paragraphs = self.convertSingleParas(text) for x in", "Added for DR 21309 def _bulletOrder(self): return [] ## Replaced", "a bullet, CTA text outText = outText + b +", "is a bullet product endTimePhrase = self.hazardTimePhrases(eachHazard, argDict) hazNameA =", "segmentText, flags=re.IGNORECASE) is not None: segmentTextSplit = re.split(\"\\* \" +", "# End code for DR 21310 # # This adds", "foundACTS: includeFrameCodes = 1 skipCTAs = 1 for eh in", "phrase endTimePhrase = self.hazardTimePhrases(eachHazard, argDict) hazNameA = self.hazardName(eachHazard['hdln'], argDict, True)", "extList.append(eachHazard) elif eachHazard['act'] in ['UPG']: upgList.append(eachHazard) else: conList.append(eachHazard) # #", "ACTIONS\\.\\.\\.\\s*&&\\n', \\ \"\", fcst) fcst = self._indentBulletText(fcst) # # Clean", "#assemble the lines overview = overviewHeadline + overviewBody return overview", "regular text after the bullets. The afterText is text up", "events will only decode the first set # of bullets", "should be: \", bulletOrder if bullet not in newBulletList: bulletOrder.remove(bullet)", "number of hours past issuance time for the # expire", "This is for the new hazards # phraseCount = 0", "file with edit area combinations. ## Can be: ## EditAreas_PublicZones_BOU", "Now if there is a can/exp hazard and previous segment", "and bulletProd: # First make list of bullets that we", "+ \"* \" + b.upper() + \"...|* Enter bullet text", "1st set was captured/decoded. # (hazard, time, basis, impact, afterText,", "index = bullets[x].find('\\n\\n') if index != -1: regText = bullets[x][index+2:]", "timeWords + \". \" # # This is for ext", "and something in acts (frame it, include text) elif foundCANS", "= self.convertSingleParas(it) for cta in ctaParas: self.__procCTA.append((k,string.replace(cta,' ',''))) d =", "self._hazards = argDict['hazards'] self._combinations = argDict[\"combinations\"] return None def _determineTimeRanges(self,", "attribution phrase # def hazardBodyText(self, hazardList, argDict): bulletProd = self._bulletProd", "Source database for product. Can be \"Official\", \"Fcst\" or \"ISC\"", "\"\\n&&\\n\") # Prevent empty Call to Action Tags fcst =", "has been cancelled. \" # # This is for the", "Tuple (percent, points) # includeOverviewHeadline If 1, the overview header", "not in foundCTAs: foundCTAs.append(f) if eachPara.find('...') == 0: pass #ignore", "segmentText = \"|*\\n\" + segmentText + \"*|\" else: segmentText =", "includeFrameCodes = 1 skipCTAs = 1 for eh in hazardList:", "areaHeader = self.makeAreaHeader( argDict, \"\", self._issueTime, expireTime, self._areaDictionary, None, cityDescriptor=self._cityDescriptor,", "#more bullets are present multRecords = 1 bullets = bullets[0:x+1]", "# # This is for statement hazards # for eachHazard", "outputFile Defines the output location of the finished product. #", "# present in the list, or it needs to be", "= re.sub(r'\\nPRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\s*&&\\n', \\ \"\", fcst) fcst = self._indentBulletText(fcst) #", "the formats of these lists are different, thus this code", "if there is a can/exp hazard and previous segment Text,", "\"Never\", in which #nothing is wrapped in framing codes, \"Always\"", "canList = [] expList = [] extList = [] conList", "to report\" # Determine time ranges error = self._determineTimeRanges(argDict) if", "overview exists for this product, insert it # overview =", "not None: segmentTextSplit = re.split(\"\\* \" + bullet + \"\\.\\.\\.\",", "hazName in [\"Winter Weather Advisory\", \"Winter Storm Warning\", \"Beach Hazards", "else: forPhrase =\"\" if nwsIntroUsed == 0: hazardBodyPhrase = \"The", "is only performed # if the segment is 'NEW' or", "----------- -------------------------- # 05/07/2015 4027 randerso Migrated A1 OB9.16 code", "the following: # # productName defines name of product e.g.", "## ### initialize the bullet output ## bullets = \"\"", "hazards # for eachHazard in conList: if len(eachHazard['hdln']) == 0:", "for it to be considered. Tuple (percent, points) # includeOverviewHeadline", "# Defines output location of finished product. \"outputFile\": \"{prddir}/TEXT/genHaz.txt\", \"debug\":", "can hazards # for eachHazard in canList: if len(eachHazard['hdln']) ==", "incTextFlag: \",incTextFlag if incTextFlag: print \"hazardBodyText info: segmentText: \",segmentText hazardBodyPhrase", "# regular text after the bullets. The afterText is text", "textToUse = defaultText if frameit == \"Always\" or frameit ==", "remove line feeds removeLF = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) bullet = removeLF.sub(r'\\1", "# Mail Stop B8 # Omaha, NE 68106 # 402.291.0100", "the percentage coverage or number of # grid points in", "overviewSearch = re.compile(r'Default overview section', re.DOTALL) fcst = overviewSearch.sub(overview, fcst)", "= func() for it in items: if type(it) == types.TupleType:", "if type(key) is types.TupleType: label, variable = key exec \"self._\"", "1. # This value is also used for the default", "\", newBulletList ### Determine the correct order for all bullets", "= bullets[x][0:index] #eliminate after bullet text if len(bullets) > x+2:", "segmentText # # If segment passes the above checks, add", "# City lat/lon dictionary to use \"cityDescriptor\":\"Including the cities of\",", "prefixSpace and len(timeWords): timeWords = \" \" + timeWords #add", "['NEW'] and len(eh['hdln']): forceCTAList.append((eh['phen'], eh['sig'])) return (includeText, includeFrameCodes, skipCTAs, forceCTAList)", "string.replace(fcst, \"&&\", \"\\n&&\\n\") # Prevent empty Call to Action Tags", "\" remains in effect\" + endTimePhrase + \". \" #", "hazardBodyPhrase + \\ \"\\n\\n|* Wrap-up text goes here *|.\\n\" elif", "Product header if self._areaName != \"\": self._areaName = \" for", "eachHazard['endTime'] <= argDict['creationTime']: hazardBodyPhrase = hazardBodyPhrase + \"The \" +", "before the bullets bullets.append(buf[x]) # find only the bulleted text,", "self._preProcessProduct(fcst, argDict) # Generate the product for each segment in", "hazards from the bullet diction newBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for newBullet", "bullets. The afterText is text up to # the next", "overview in the previous product # overview = \"\" for", "Forecast for Edit Area # get the hazards text #", "+ \"...\") bulletFlag = 0 if bulletFlag: print \"appending to", "0 if bulletFlag: print \"appending to bottom list of bullets!\"", "segmentTextSplit2[1] else: segmentTextSplit2 = re.split(\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", segmentTextSplit[1], 1, flags=re.IGNORECASE) if", "be automatically # stored into the text database using the", "area # in the segment since all areas in the", "regText = bullets[x][index+2:] bullets[x] = bullets[x][0:index] #eliminate after bullet text", "+ endTimePhrase + \". \" # # This is for", "capText[0].upper()+capText[1:] if frameit == \"Always\": textToUse = \"|* \" +", "= self.convertSingleParas(text) for x in xrange(len(paragraphs)): paragraphs[x] = string.replace(paragraphs[x],' ','')", "hazNameA + \\ \" remains in effect\" + endTimePhrase +", "segment, thus double events will only decode the first set", "# keep track of any call to actions found foundCTAs", "an export license or other authorization. # # Contractor Name:", "# awipsWANPil Defines the awips product identifier # (e.g., KBOUCCFDEN)", "types of ctas found. The identifier is the pil (e.g.,", "extList: if len(eachHazard['hdln']) == 0: continue #no defined headline, skip", "is wrapped in framing codes, \"Always\" in which the #text", "this segment codes to determine capture or not, # and", "Government. # # U.S. EXPORT CONTROLLED TECHNICAL DATA # This", "= self.getTimingPhrase(hazard, argDict['creationTime']) if prefixSpace and len(timeWords): timeWords = \"", "\"cityDescriptor\":\"Including the cities of\", \"includeZoneNames\":1, # Zone names will be", "Areas Needed: None #------------------------------------------------------------------------- # Associated Utilities Files e.g. Combinations", "argDict) hazName = self.hazardName(eachHazard['hdln'], argDict, False) hazardBodyPhrase = hazardBodyPhrase +", "specified. # paras = self.convertSingleParas(text) #single paragraphs # keep track", "Edit Areas: Create Combinations file with edit area combinations. ##", "Set up the time range for 0-240 hours self._timeRange =", "1 in the # event that there are multiple sets", "dictionary to use \"cityDescriptor\":\"Including the cities of\", \"includeZoneNames\":1, # Zone", "a list of edit areas # Get variables error =", "\" + \\ hazNameA + forPhrase + \" has been", "to be considered, the 'hdln' value must be # present", "track of remaining CTAs in processed text for f in", "# If 1, combine periods, if possible # automatic functions", "'\\n\\n' ctas = [] for (phen,sig) in forceCTAList: hazardPhenSig =", "to automatically transmit product \"autoSendAddress\": \"000\", #transmission address \"autoStore\": 0,", "hazardlist, returns a tuple indicating: # (inc capture text, inc", "bullet, CTA text outText = outText + b + \"\\n\\n\"", "hazardBodyPhrase = hazardBodyPhrase + '&&\\n\\n' # Make sure there is", "print \"hazardBodyText info: incTextFlag: \",incTextFlag if incTextFlag: print \"hazardBodyText info:", "bulletProd: # First make list of bullets that we need", "1 for i in range(start,end): if (re.search(\"\\* \" + self._bulletOrder()[i]", "eachPara in paras: if paraCount >= paragraphs: found = self.ctasFound(eachPara)", "## # This is a base file that is not", "- city,state \"textdbPil\": \"<textdbPil>\", # Product ID for storing to", "This adds the call to action statements. This is only", "technical # support, and with no warranty, express or implied,", "order for it to be considered. Tuple (percent, points) #", "bullets[x] = string.replace(bullets[x],'\\n',' ') removeLF = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) regText =", "\"outputFile\": \"{prddir}/TEXT/genHaz.txt\", \"debug\": 0, # Name of map background for", "= \" for \" + self._areaName issuedByString = self.getIssuedByString() productName", "Items: # # displayName If not None, defines how product", "Optional Configuration Items # # mapNameForCombinations Name of the map", "self._postProcessArea(fcst, segmentAreas, argDict) fraction = fractionOne fcst = self._postProcessProduct(fcst, argDict)", "lastHdln: if eachHazard['phen'] in ['HU', 'TR', 'TY']: hazardBodyPhrase = hazardBodyPhrase", "Note # that the formats of these lists are different,", "This # value is also used for the default GUI", "newBulletList = [] bullets = \"\" for eachHazard in sortedHazardList:", "EAS phrase to be include in product header # #", "+ \" \" + self._fullStationID + \" \" + \\", "has cancelled the \" + hazName + \". \" else:", "longer needed. for bullet in removeBulletList: if re.search(\"\\* \"+ bullet", "the awips product identifier # (e.g., KBOUCCFDEN) that is used", "1, the overview header is templated # includeOverview If 1,", "expire time. # includeCities If 1, cities will be included", "loop through the bullets and format the output ## for", "02/24/2016 5411 randerso Make bullet headers upper case # 07/15/2016", "# Required Configuration Items: # # displayName If not None,", "is None: return (None, None, None, None, None, None) #", "in newBullets: if newBullet not in newBulletList: newBulletList.append(newBullet) print \"my", "text after bullets return (hazard, time, basis, impact, regText, multRecords)", "paragraphs def ctasFound(self, text): #returns types of ctas found. The", "#no defined headline, skip phrase if self._bulletProd: continue # No", "in ['HU', 'TR', 'TY']: hazardBodyPhrase = hazardBodyPhrase + \" has", "# Prevent empty Call to Action Tags fcst = re.sub(r'\\nPRECAUTIONARY/PREPAREDNESS", "\\ \"\\n\\n|* Wrap-up text goes here *|.\\n\" elif eachHazard['act'] ==", "s.upper() s = eas + productName + \"\\n\" +\\ \"National", "eh in hazardList: if eh['act'] in ['NEW'] and len(eh['hdln']): forceCTAList.append((eh['phen'],", "area header \"cityLocation\": \"CityLocation\", # City lat/lon dictionary to use", "# cities # includeZoneNames If 1, zone names will be", "any call to actions found foundCTAs = [] # Process", "eas + productName + \"\\n\" +\\ \"National Weather Service \"", "+ timeWords + \". \" # # This is for", "out each section for returning the values if len(bullets) >=", "fcst def _preProcessArea(self, fcst, segmentAreas, expireTime, argDict): # This is", "re.DOTALL) fcst = fixMultiLF.sub(r'\\1', fcst) # finish progress meter self.setProgressPercentage(100)", "1, #include overview header \"includeOverview\": 1, #include overview section \"bulletProd\":", "# Next, break them into individual lists based on action", "Hazards #------------------------------------------------------------------------- # Development tasks that are identified and in", "be a Statement (sig=\"S\") cans = ['CAN','UPG','EXP'] acts = ['NEW','EXT','EXA','EXB','CON']", "xrange(len(bullets)): bullets[x] = string.replace(bullets[x],'\\n',' ') removeLF = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) regText", "0 for eachHazard in expList: if len(eachHazard['hdln']) == 0: continue", "+ \".\" + sig cta = self.defaultCTA(hazardPhenSig) if cta not", "\"\" for b in bullets: ### if first character is", "\" + hazName + \\ \" has been cancelled. \"", "# Maximum hours for expireTime \"includeCities\": 1 , # Cities", "for b in bulletOrder: bullets = bullets + \"* \"", "foundCANS and not foundACTS: if 'S' in foundSig and len(foundSig)", "eh in hazardList: if eh['act'] in acts and \\ (eh['phen'],", "National Weather Service\". Note # that this only correctly handles", "None and len(capText): textToUse = capText[0].upper()+capText[1:] if frameit == \"Always\":", "# Make argDict accessible self.__argDict = argDict # Get Definition", "\"The National Weather Service in \" + self._wfoCity nwsIntroUsed =", "We want this comparison to be case-insensitive just in case", "\" + self._wfoCityState + \\ \"\\n\" + issuedByString + self._timeLabel", "in case # the site is not transmitting in mixed", "= [] buf = prevText.split('\\n\\n* ') if len(buf) <= 1:", "== 0: continue #headlines and text before the bullets bullets.append(buf[x])", "is 'NEW' or if the previous text has been discarded", "progress meter self.setProgressPercentage(100) self.progressMessage(0, 100, self._displayName + \" Complete\") return", "as \"Georgia\" -- optional \"wfoCityState\": \"<wfoCityState>\", # Location of WFO", "# Clean up multiple line feeds # fixMultiLF = re.compile(r'(\\n\\n)\\n*',", "Raytheon Company, # pursuant to Contract DG133W-05-CQ-1067 with the US", "self._issueTime + self._purgeTime*3600 self._timeLabel = self.getCurrentTime( argDict, \"%l%M %p %Z", "max length of each line # # defaultEditAreas defines edit", "types.TupleType: it = it[1] #get second string which is the", "This is for the exp hazards # phraseCount = 0", "elif eachHazard['act'] in ['UPG']: upgList.append(eachHazard) else: conList.append(eachHazard) # # Now,", "= re.split(\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", segmentText, flags=re.IGNORECASE) segmentText = \"\\n\" + string.join(segmentTextSplit,\"*", "return self.__overviewText def overviewText(self, hazardList, pil): # # This method", "the previous text exists # foundCTAs = [] for eachHazard", "part after 'paragraphs'. So, if paragraphs is 0, it #", "hazardBodyPhrase + \\ \"\\n\\n|* Wrap-up text goes here *|.\\n\" else:", "a CAN/EXP/UPG segment # # remove items from forceCTAList if", "\"<awipsWANPil>\", # Product ID for transmitting to AWIPS WAN. \"periodCombining\"", "the 1st set of entries in # a segment, thus", "of ctas found. The identifier is the pil (e.g., ZFP),", "argDict) fraction = fractionOne fcst = self._postProcessProduct(fcst, argDict) return fcst", "2: phensig = (ent[0:2], ent[3]) #phen.sig if phensig in forceCTAList:", "cta = re.sub(\"\\|\\*.*\\*\\|\",\"\",cta) # We want this comparison to be", "location of the finished product. # Product is saved if", "+ \". \" # # This is for ext hazards", "not # automatically transmitted unless autoSend is 1. # This", "# for eachHazard in upgList: if len(eachHazard['hdln']) == 0: continue", "break def useCaptureText(self, hazardList): #Based on the hazardlist, returns a", "after the bullets. The afterText is text up to #", "get the default bullets for all hazards from the bullet", "= it[1] #get second string which is the CTA ctaParas", "includeOverview If 1, the overview section is templated # bulletProd", "argDict['creationTime']: hazardBodyPhrase = hazardBodyPhrase + \"The \" + hazName +", "product is a template for creating Hazard Products. #------------------------------------------------------------------------- #", "6: incTextFlag = 0 # DR 21309 code addition from", "fcst = fcst + areaHeader return fcst def _makeProduct(self, fcst,", "ctas = ctao.genericCTAs() for it in ctas: if type(it) ==", "bulletFlag = 0 if eachHazard['act'] == 'CAN': hazardBodyPhrase = hazardBodyPhrase", "Areas: Create Combinations file with edit area combinations. ## Can", "goes here *|.\\n\" else: bulletFlag = 1 ## print \"bulletFlag", "disk file after # product creation. # # lineLength max", "Edit Areas Needed: None #------------------------------------------------------------------------- # Associated Utilities Files e.g.", "is: \",bulletFlag if bulletFlag: newBulletList = [] bullets = \"\"", "appears in GFE GUI # # You must set the", "skipCTAs and len(found): pass else: processedText = processedText + eachPara", "name of product e.g. \"Zone Forecast Product\" # fullStationID Full", "GFE zone combiner # database Source database for product. Can", "transmitted unless autoSend is 1. # This value is also", "the last in the series of bullets to the #", "identifier (4letter) \"wmoID\": \"<wmoID>\", # WMO ID \"pil\": \"<pil>\", #", "# Get Definition variables self._definition = argDict[\"forecastDef\"] for key in", "None, returns the # regular text after the bullets. The", "hazardBodyPhrase = hazardBodyPhrase + \\ \" has cancelled the \"", "\"<fullStationID>\", # full station identifier (4letter) \"wmoID\": \"<wmoID>\", # WMO", "# hazardSamplingThreshold Defines the percentage coverage or number of #", "\" + timeWords + \". \" # # This is", "== 'S'): foundACTS = 1 if eh['act'] in cans and", "only the 1st set was captured/decoded. # (hazard, time, basis,", "+ \"*|\" else: segmentText = segmentText # # If segment", "# newList = [] canList = [] expList = []", "prevText): print prevText ### if previous text is empty, return", "# that this only correctly handles the 1st set of", "segmentAreas, argDict): return fcst + \"\\n\\n$$\\n\\n\" def _postProcessProduct(self, fcst, argDict):", "returns a tuple indicating: # (inc capture text, inc framing", "re import CallToActions import AbsTime class TextProduct(TextRules.TextRules, SampleAnalysis.SampleAnalysis, CallToActions.CallToActions): Definition", "if eachHazard['endTime'] <= argDict['creationTime']: hazardBodyPhrase = hazardBodyPhrase + \"The \"", "Copying: # This software is in the public domain, furnished", "This must # be defined or the GFE zone combiner", "## EditAreas_PublicZones_BOU ## EditAreas_FireWx_BOU ## EditAreas_FIPS_BOU ## EditAreas_MarineZones_BOU \"defaultEditAreas\" :", "to determine capture or not, # and frame captured text", "bullets = \"\" ## ## ### loop through the bullets", "is not None: segmentTextSplit = re.split(\"\\* \" + bullet +", "# Framing codes are added if specified. # paras =", "up to # the next bullet or up to \"The", "# Optional Configuration Items # # mapNameForCombinations Name of the", "entries. else: for eh in hazardList: if eh['act'] in ['NEW']", "text database. The product is not # automatically stored unless", "argDict['hazards'] segmentList = self.organizeHazards(hazardsC.rawAnalyzedTable()) if len(segmentList) == 0: return \"No", "# get the hazards text # We only need to", "the part after 'paragraphs'. So, if paragraphs is 0, it", "self._preProcessArea(fcst, segmentAreas, self._expireTime, argDict) fcst = self._makeProduct(fcst, segmentAreas, argDict) fcst", "= self._bulletOrder().index(bullet) + 1 end = len(self._bulletOrder()) bulletFlag = 1", "= re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) bullet = removeLF.sub(r'\\1 \\2',b) ### indent code", "hazardBodyPhrase = hazardBodyPhrase + hazNameACap + \\ \" has also", "== 'EXP': hazardBodyPhrase = hazardBodyPhrase + \\ \"\\n\\n|* Wrap-up text", "== 0: pass #ignore headlines paraCount = paraCount + 1", "and bulletFlag: print \"* \" + self._bulletOrder()[i] + \"... found!\"", "overviewHeadline = \"\" if self._includeOverview: overviewBody = \".|*Overview (must edit)*|.\\n\\n\"", "text, paragraphs, addFramingCodes = False, skipCTAs = False): # #", "is for ext hazards # for eachHazard in extList: if", "this case if it is a bullet product endTimePhrase =", "# wfoCityState City,state that the WFO is located in, such", "1 to automatically store product in textDB \"autoWrite\": 0, #set", "This is a base file that is not intended to", "100, self._displayName + \" Complete\") return fcst def allowedHazards(self): return", "flags=re.IGNORECASE) segmentText = \"\\n\" + string.join(segmentTextSplit,\"* \" + bullet.upper() +", "is the CTA ctaParas = self.convertSingleParas(it) for cta in ctaParas:", "keepBulletList and canBullet not in removeBulletList: removeBulletList.append(canBullet) print \"hazardBodyText info:", "[] bullets = string.split(prevText, '\\n\\n') if len(bullets) <= 1: return", "bullets ## bDict = self._bulletDict() ## bLine = bDict.get(eachHazard['phen']) ##", "argDict, True) hazardBodyPhrase = hazardBodyPhrase + hazNameA + \\ \"", "in xrange(len(buf)): if x == 0: continue #headlines and text", "lf = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) ptext = lf.sub(r'\\1 \\2', text) ptext", "ctaParas: self.__procCTA.append((\"GENERIC\", string.replace(cta,' ',''))) #compare found = [] for para", "file names and Locations: # GenericHazards #------------------------------------------------------------------------- # Customization Points:", "+ (eachHazard['phen']) ## bList = newBulletList.split(\",\") ## ## ### initialize", "This adds segment text # segmentText = '' # #", "it to be considered. Tuple (percent, points) # includeOverviewHeadline If", "fixMultiLF = re.compile(r'(\\n\\n)\\n*', re.DOTALL) fcst = fixMultiLF.sub(r'\\1', fcst) # finish", "+ bullet.upper() + \\ \"...|* Enter bullet text *|\\n\\nPRECAUTIONARY/PREPAREDNESS ACTIONS...\")", "includeCities=self._includeCities, includeZoneNames = self._includeZoneNames, accurateCities = self._accurateCities) fcst = fcst", "1 ## print \"bulletFlag is: \",bulletFlag if bulletFlag: newBulletList =", "or it needs to be a Statement (sig=\"S\") cans =", "it preserving blank lines, # then returns the part after", "bullets bullets = [] buf = prevText.split('\\n\\n* ') if len(buf)", "regular text is the remainder of the text. However we", "ctas: if type(it) == types.TupleType: it = it[1] #get second", "for c in ctas: hazardBodyPhrase = hazardBodyPhrase + c +", "product header, such as \"FOUS45\" # pil Product pil, such", "= 1 if eachHazard['phen'] in ['HU', 'TR', 'TY']: hazardBodyPhrase =", "text isn't very short or blank # if len(segmentText) <", "identified and in progress: # # To look up tasks", "which bullets we have to remove. removeBulletList = [] for", "paragraphs paragraphs = self.convertSingleParas(text) for x in xrange(len(paragraphs)): paragraphs[x] =", "Check that the segment text isn't very short or blank", "self._expireTime = self._issueTime + self._purgeTime*3600 self._timeLabel = self.getCurrentTime( argDict, \"%l%M", "feeds in the CAP tags to keep separate from CTAs", "else: processedText = processedText + eachPara + '\\n\\n' #keep track", "bullets[x][0:index] #eliminate after bullet text if len(bullets) > x+2: #more", "\" *|\" else: textToUse = defaultText if frameit == \"Always\"", "bullets def _indentBulletText(self, prevText): print prevText ### if previous text", "codes, or #DefaultOnly\" in which just the default text is", "# support, and with no warranty, express or implied, as", "= \"|*\\n\" + segmentText + \"*|\" else: segmentText = segmentText", "header \"easPhrase\" :\"\", # Optional EAS phrase to be include", "and canBullet not in removeBulletList: removeBulletList.append(canBullet) print \"hazardBodyText info: keepBulletList:", "of hours past issuance time for the # expire time.", "return prevText ### ### process the text ### outText =", "# if len(segmentText) < 6: incTextFlag = 0 # DR", "break # regular text is the remainder of the text.", "prevText ### ### split the text ### bullets = []", "the can hazards # for eachHazard in canList: if len(eachHazard['hdln'])", "Zones_BOU # FireWxZones_BOU # Counties # Marine_Zones_BOU \"mapNameForCombinations\": \"Zones_<site>\", ##", "further licensing information. ## # ---------------------------------------------------------------------------- # # SOFTWARE HISTORY", "the #text (default or cap) is wrapped in framing codes,", "of the text. However we only # want text from", "bulletOrder if bullet not in newBulletList: bulletOrder.remove(bullet) print \"reordered bullets", "1 skipCTAs = 1 for eh in hazardList: if eh['act']", "it's 2, it returns paragraphs 2 -> end, etc. #", "Defines the percentage coverage or number of # grid points", "in segmentList: self.progressMessage(fraction, percent, \"Making Product for Segment\") fcst =", "= \"The National Weather Service in \" +\\ self._wfoCity nwsIntroUsed", "Check that the previous text exists # foundCTAs = []", "process CTAs that are vtec phen/sig based if ent.find('.') ==", "# Contractor Name: <NAME> # Contractor Address: 6825 Pine Street,", "User Guide # Section on \"Tkgnats: Task Reporting System\". #-------------------------------------------------------------------------", "will expire \" + timeWords + \". \" # #", "segmentTextSplit[1], 1, flags=re.IGNORECASE) if len(segmentTextSplit2) == 2: segmentTextSplit[1] = \"PRECAUTIONARY/PREPAREDNESS", "the product will be automatically # sent on the AWIPS", "\"Tkgnats: Task Reporting System\". #------------------------------------------------------------------------- # Additional Information: #------------------------------------------------------------------------- #", "EditAreas_MarineZones_BOU \"defaultEditAreas\" : \"EditAreas_PublicZones_<site>_<MultiPil>\", # product identifiers \"productName\": \"Generic Hazard", "bulletFlag = 1 for i in range(start,end): if (re.search(\"\\* \"", "included in the area header # easPhrase Optional EAS phrase", "len(found): pass else: processedText = processedText + eachPara + '\\n\\n'", "tester = segmentText[0] if tester == '*': startPara = 1", "not in forceCTAList and \\ len(eh['hdln']): forceCTAList.append((eh['phen'], eh['sig'])) #everything in", "# Wrap processedText = self.endline(processedText, linelength=self._lineLength, breakStr=[\" \", \"-\", \"...\"])", "# Edit Areas Needed: None #------------------------------------------------------------------------- # Associated Utilities Files", "and / or modified by Raytheon Company, # pursuant to", "any next NWS phrase. lines = regText.split('\\n') for x in", "previous text is empty, return nothing if prevText is None:", "\\ hazNameA + forPhrase + \" has been issued. This", "Check that this segment codes to determine capture or not,", "in ctas: ctas.append(cta) if len(ctas) > 0: hazardBodyPhrase = hazardBodyPhrase", "self._purgeTime*3600 self._timeLabel = self.getCurrentTime( argDict, \"%l%M %p %Z %a %b", "[]: segmentText = \"|*\\n\" + segmentText + \"*|\" else: segmentText", "wmoID WMO ID code for product header, such as \"FOUS45\"", "products # 02/24/2016 5411 randerso Make bullet headers upper case", "was developed and / or modified by Raytheon Company, #", "self.organizeHazards(hazardsC.rawAnalyzedTable()) if len(segmentList) == 0: return \"No hazards to report\"", "def allowedHazards(self): return [] # Added for DR 21194 def", "= fcst + areaHeader return fcst def _makeProduct(self, fcst, segmentAreas,", "s = eas + productName + \"\\n\" +\\ \"National Weather", "will be automatically # written to the \"output\" named disk", "DR 21194 def _bulletDict(self): return [] # Added for DR", "\", \"-\", \"...\"]) def convertSingleParas(self, text): #returns a list of", "correctly handles the 1st set of entries in # a", "variables self._definition = argDict[\"forecastDef\"] for key in self._definition.keys(): exec \"self._\"", "fcst, segmentAreas, argDict): argDict[\"language\"] = self._language # Generate Narrative Forecast", "for bullet in newBullets: if re.search(\"\\* \" + bullet +", "defaultText if frameit == \"Always\" or frameit == \"DefaultOnly\": textToUse", "# add bullet codes textToUse = \"* \" + textToUse", "!= []: segmentText = \"|*\\n\" + segmentText + \"*|\" else:", "database using the \"textdbPil\" # after product creation. # autoWrite", "self.__procCTA.append((k,string.replace(cta,' ',''))) d = ctao.ctaPilDict() for k in d.keys(): func", "\"\\n\\n$$\\n\\n\" def _postProcessProduct(self, fcst, argDict): # # If an overview", "340 # Mail Stop B8 # Omaha, NE 68106 #", "== types.TupleType: it = it[1] #get second string which is", "= 0 forceCTAList = [] # all actions are in", "# Now if there is a new hazard and previous", "= [] for (phen,sig) in forceCTAList: hazardPhenSig = phen +", "go through each list and build the phrases # nwsIntroUsed", "segmentTextSplit[1] = \"PRECAUTIONARY/PREPAREDNESS ACTIONS...\" + segmentTextSplit2[1] segmentText = string.join(segmentTextSplit,\"\") if", "+ forPhrase + \\ \", which is in effect\" +", "the area header \"easPhrase\" :\"\", # Optional EAS phrase to", "For the values to be considered, the 'hdln' value must", "= ptext.replace('\\n\\n', '\\n') paragraphs = ptext.split('\\n') return paragraphs def ctasFound(self,", "multiple line feeds # fixMultiLF = re.compile(r'(\\n\\n)\\n*', re.DOTALL) fcst =", "Service \" + self._wfoCityState + \\ \"\\n\" + issuedByString +", "# # Now if there is a can/exp hazard and", "list of bullets that we need to keep. keepBulletList =", "+ forPhrase + \\ \" has also been issued\" +", "= \"The National Weather Service in \" + self._wfoCity nwsIntroUsed", "re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) ptext = lf.sub(r'\\1 \\2', text) ptext = ptext.replace('\\n\\n',", "statements. This is only performed # if the segment is", "hazName = self.hazardName(eachHazard['hdln'], argDict, False) if nwsIntroUsed == 0: hazardBodyPhrase", "1: phraseCount = 2 if hdln != lastHdln: if eachHazard['phen']", "*|.\\n\" else: bulletFlag = 1 ## print \"bulletFlag is: \",bulletFlag", "fixMultiLF.sub(r'\\1', fcst) # finish progress meter self.setProgressPercentage(100) self.progressMessage(0, 100, self._displayName", "stored unless autoStore is 1. This # value is also", "the GFE zone combiner # database Source database for product.", "0) break def useCaptureText(self, hazardList): #Based on the hazardlist, returns", "variables error = self._getVariables(argDict) if error is not None: return", "newBulletList.append(newBullet) print \"my bullets are: \", newBulletList ### Determine the", "and format the output ## for b in bList: ##", "self.cleanCapturedText( overview, 0) break def useCaptureText(self, hazardList): #Based on the", "defaultText, frameit=\"Never\"): #returns a properly formatted bulleted text based on", "GUI entry for # storage. # awipsWANPil Defines the awips", "line length \"purgeTime\": 8, # Maximum hours for expireTime \"includeCities\":", "return bullets def _indentBulletText(self, prevText): print prevText ### if previous", "2 -> end, etc. # Headlines are always removed. #", "'&&\\n\\n' # Make sure there is only one CAP tag", "that is used to store the product # in the", "overview section is templated # bulletProd If 1, the product", "used, but still # need to handle the \"NEW\" entries.", "1, then the product will be automatically # stored into", "in the area header \"easPhrase\" :\"\", # Optional EAS phrase", "\"segmentTextSplit is \", segmentTextSplit segmentTextSplit2 = string.split(segmentTextSplit[1],\"*\",1) if len(segmentTextSplit2) ==", "# the next bullet or up to \"The National Weather", "TextRules.TextRules.__init__(self) SampleAnalysis.SampleAnalysis.__init__(self) self.__overviewText = \"\" self.__procCTA = None def generateForecast(self,", "case # only the 1st set was captured/decoded. # (hazard,", "of call to actions (type, cta text) if self.__procCTA is", "be considered. Tuple (percent, points) # includeOverviewHeadline If 1, the", "the product # in the AWIPS text database. The product", "the hazard # in order for it to be considered.", "len(eachHazard['hdln']) == 0: continue #no defined headline, skip phrase if", "bullet diction newBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for newBullet in newBullets: if", "paragraphs 2 -> end, etc. # Headlines are always removed.", "is causing issues with ### offices that use \"-20 degrees\"", "# Hazards #------------------------------------------------------------------------- # Development tasks that are identified and", "!= -1: regText = bullets[x][index+2:] bullets[x] = bullets[x][0:index] #eliminate after", "handle the \"NEW\" entries. else: for eh in hazardList: if", "startPara = 1 segmentText, foundCTAs = self.cleanCapturedText(prevText, startPara, addFramingCodes =", "if capText is not None and len(capText): textToUse = capText[0].upper()+capText[1:]", "WAN. The product is not # automatically transmitted unless autoSend", "argDict[\"forecastDef\"] for key in self._definition.keys(): exec \"self._\" + key +", "hazard = bullets[0] else: hazard = None if len(bullets) >=", "0: lines = lines[0:x] #eliminate following lines break regText =", "such as \"Western New York\" # wfoCityState City,state that the", "export/transfer/disclosure is restricted by U.S. law. Dissemination # to non-U.S.", "for the # expire time. # includeCities If 1, cities", "default GUI # entry for storage. # autoSend If set", "in order for it to be considered. Tuple (percent, points)", "+ b + \"...|* Enter bullet text *|\\n\\n\" ## #", "outText + b + \"\\n\\n\" ### that's it print outText", "= hazardBodyPhrase + hazNameACap + \\ \" has also been", "we need to keep. keepBulletList = [] for eachHazard in", "issued.\" else: hazardBodyPhrase += \"In addition, \" + \\ hazNameA", "# # For the values to be considered, the 'hdln'", "and in progress: # # To look up tasks and", "If 1, the overview section is templated # bulletProd If", "eh['act'] in acts and \\ (eh['phen'], eh['sig']) not in forceCTAList", "linelength=self._lineLength, breakStr=[\" \", \"-\", \"...\"]) return processedText, foundCTAs def decodeBulletedText(self,", "+ textToUse + \" *|\" # add bullet codes textToUse", "# # hazardSamplingThreshold Defines the percentage coverage or number of", "+ \\ \" is now in effect\" + endTimePhrase +", "0 #indicator of multiple sets of bullets for x in", "bullets we have to remove. removeBulletList = [] for eachHazard", "# # remove items from forceCTAList if they exist in", "the area header # accurateCities If 1, cities are determined", "remove. removeBulletList = [] for eachHazard in sortedHazardList: if eachHazard['act']", "phrase to be include in product header # # hazardSamplingThreshold", "of these lists are different, thus this code # is", "following: # # productName defines name of product e.g. \"Zone", "in keepBulletList: keepBulletList.append(saveBullet) # Now determine which bullets we have", "[] bullets = \"\" for eachHazard in sortedHazardList: ### get", "First make list of bullets that we need to keep.", "product endTimePhrase = self.hazardTimePhrases(eachHazard, argDict) hazNameA = self.hazardName(eachHazard['hdln'], argDict, True)", "### Determine the correct order for all bullets bulletOrder =", "header # # hazardSamplingThreshold Defines the percentage coverage or number", "if possible # automatic functions \"autoSend\": 0, #set to 1", "#returns a list of paragraphs based on the input text.", "This software was developed and / or modified by Raytheon", "method hazardBodyText creates an attribution phrase # def hazardBodyText(self, hazardList,", "False, skipCTAs = skipCTAs) tester = segmentText[0] if tester ==", "\"...\"]) def convertSingleParas(self, text): #returns a list of paragraphs based", "== 0: lines = lines[0:x] #eliminate following lines break regText", "= fcst + s fcst = fcst + \"Default overview", "also used for the default GUI # entry for storage.", "argDict['hazards'] self._combinations = argDict[\"combinations\"] return None def _determineTimeRanges(self, argDict): #", "hazardBodyText creates an attribution phrase # def hazardBodyText(self, hazardList, argDict):", "self._wfoCityState + \\ \"\\n\" + issuedByString + self._timeLabel + \"\\n\\n\"", "self.__overviewText = \"\" self.__procCTA = None def generateForecast(self, argDict): #", "product name \"fullStationID\": \"<fullStationID>\", # full station identifier (4letter) \"wmoID\":", "ctasFound(self, text): #returns types of ctas found. The identifier is", "order should be: \", bulletOrder if bullet not in newBulletList:", "argDict, \"\", self._issueTime, expireTime, self._areaDictionary, None, cityDescriptor=self._cityDescriptor, areaList=segmentAreas, includeCities=self._includeCities, includeZoneNames", "2, it returns paragraphs 2 -> end, etc. # Headlines", "each: if eachHazard not in sortedHazardList: sortedHazardList.append(eachHazard) # # Next,", "However we only # want text from the last in", "of the finished product. # Product is saved if autoWrite", "A1 OB9.16 code to A2 # 06/17/2015 4027 dgilling Perform", "self.useCaptureText(sortedHazardList) # # # Check that the previous text exists", "interested in these bullets break # regular text is the", "be \"Official\", # \"Fcst\" or \"ISC\" # outputFile Defines the", "text) ptext = ptext.replace('\\n\\n', '\\n') paragraphs = ptext.split('\\n') return paragraphs", "self._timeRange = self.createTimeRange(0, 240) self._ddhhmmTime = self.getCurrentTime( argDict, \"%d%H%M\", shiftToLocal=0,", "hazNameACap + \\ \" has also been issued. This \"", "segmentTextSplit = re.split(\"\\* \" + self._bulletOrder()[i] + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE)", "the 1st set was captured/decoded. # (hazard, time, basis, impact,", "= None if len(bullets) >= 2: time = bullets[1] else:", "\"textdbPil\" # after product creation. # autoWrite If set to", "\" has also been issued\" + endTimePhrase + \". \"", "here *|.\\n\" else: bulletFlag = 1 ## print \"bulletFlag is:", "each['endTime'] > self._currentTime and each['act'] not in ['CAN', 'EXP']): overview", "no longer in effect. \" # # This is for", "the next bullet or up to \"The National Weather Service\".", "to actions (type, cta text) if self.__procCTA is None: self.__procCTA", "eachHazard['act'] in ['NEW', 'EXA', 'EXB']: newList.append(eachHazard) elif eachHazard['act'] in ['CAN']:", "bulleted text, defined by the double line feed term. #", "bullets bulletOrder = self._bulletOrder() staticBulletOrder = self._bulletOrder() for bullet in", "0, #set to 1 to automatically store product in textDB", "def _makeProduct(self, fcst, segmentAreas, argDict): argDict[\"language\"] = self._language # Generate", "= \" \" + timeWords #add a leading space return", "debug_print statements will appear. # textdbPil Defines the awips product", "\". \" lastHdln = hdln # # This is for", "and each['act'] not in ['CAN', 'EXP']): overview = each['prevOverviewText'] self.__overviewText,", "then #the default text is used. frameit can be \"Never\",", "1, } def __init__(self): TextRules.TextRules.__init__(self) SampleAnalysis.SampleAnalysis.__init__(self) self.__overviewText = \"\" self.__procCTA", "frameit == \"Always\": textToUse = \"|* \" + textToUse +", "only decode the first set # of bullets and text.", "'' # # First, sort the hazards for this segment", "# # productName defines name of product e.g. \"Zone Forecast", "is located in, such as \"Buffalo NY\" # # Optional", "# FireWxZones_BOU # Counties # Marine_Zones_BOU \"mapNameForCombinations\": \"Zones_<site>\", ## Edit", "product to the AWIPS WAN. The product is not #", "else: eas = '' s = self._wmoID + \" \"", "hazardBodyPhrase += \"In addition, \" + \\ hazNameA + \"", "using the \"textdbPil\" # after product creation. # autoWrite If", "hazards # for eachHazard in extList: if len(eachHazard['hdln']) == 0:", "product # overview = \"\" for each in hazardList: if", "\\2',b) ### indent code bullet = self.indentText(bullet, indentFirstString = '',", "of non statement # something in CANS and something in", "newList: hdln = eachHazard['hdln'] if len(eachHazard['hdln']) == 0: continue #no", "string.split(self._bulletDict().get(eachHazard['phen']),\",\") print \"newBullets = \", newBullets print \"segment text is:", "Optional EAS phrase to be include in product header \"includeOverviewHeadline\":", "'S' in foundSig and len(foundSig) == 1: #only S includeFrameCodes", "self._definition.keys(): exec \"self._\" + key + \"= self._definition[key]\" # Get", "bullet + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) print \"segmentTextSplit is \", segmentTextSplit", "self.progressMessage(fraction, percent, \"Making Product for Segment\") fcst = self._preProcessArea(fcst, segmentAreas,", "domain, furnished \"as is\", without technical # support, and with", "_makeProduct(self, fcst, segmentAreas, argDict): argDict[\"language\"] = self._language # Generate Narrative", "have to remove bullets. # if incTextFlag and bulletProd: #", "product will be automatically # written to the \"output\" named", "string.replace(cta,' ',''))) #compare found = [] for para in paragraphs:", "= outText + b + \"\\n\\n\" ### that's it print", "continue # No attribution for this case if it is", "# regular text is the remainder of the text. However", "textToUse # format it return self.indentText(textToUse, indentFirstString = '', indentNextString", "# of the text regText = \"\" #regular text after", "# Date Ticket# Engineer Description # ------------ ---------- ----------- --------------------------", "if self._includeOverviewHeadline: overviewHeadline = \"...|*Overview headline (must edit)*|...\\n\\n\" else: overviewHeadline", "a Statement (sig=\"S\") cans = ['CAN','UPG','EXP'] acts = ['NEW','EXT','EXA','EXB','CON'] foundACTS", "= hazardBodyPhrase + \\ \"\\n\\n|* Wrap-up text goes here *|.\\n\"", "may have to remove bullets. # if incTextFlag and bulletProd:", "in CTA cta = re.sub(\"\\|\\*.*\\*\\|\",\"\",cta) # We want this comparison", "Additional Information: #------------------------------------------------------------------------- # Example Output: #------------------------------------------------------------------------- import LogStream import", "thus this code # is more complicated for ent in", "eas = self._easPhrase + '\\n' else: eas = '' s", "## ### loop through the bullets and format the output", "time phrase wording consistent with that generated by the headline", "segmentAreas, argDict): argDict[\"language\"] = self._language # Generate Narrative Forecast for", "# incTextFlag, incFramingCodes, skipCTAs, forceCTAList = \\ self.useCaptureText(sortedHazardList) # #", "#if didn't calculate any, use the default if len(self.__overviewText) ==", "\"accurateCities\": 0, # Include all cities in area header \"cityLocation\":", "\"In addition, \" + \\ hazNameA + forPhrase + \"", "and with no warranty, express or implied, as to its", "separate from CTAs fcst = string.replace(fcst, \\ r\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", \\", "4 letter, such as \"KSLC\". # wmoID WMO ID code", "be include in product header # # hazardSamplingThreshold Defines the", "## def _getBullets(self, newBulletList, argDict): ## ## ### get the", "by importance # sortedHazardList = [] for each in ['W',", "self.getTimingPhrase(eachHazard, expTimeCurrent) hazardBodyPhrase = hazardBodyPhrase + \"The \" + hazName", "use the default if len(self.__overviewText) == 0: if self._includeOverviewHeadline: overviewHeadline", "not in keepBulletList: keepBulletList.append(saveBullet) # Now determine which bullets we", "+ \" has been issued.\" else: hazardBodyPhrase += \"In addition,", "fcst + areaHeader return fcst def _makeProduct(self, fcst, segmentAreas, argDict):", "self.defaultCTA(hazardPhenSig) if cta not in ctas: ctas.append(cta) if len(ctas) >", "+ \"\\n\\n\" fcst = fcst + s.upper() s = eas", "product creation. # autoWrite If set to 1, then the", "DG133W-05-CQ-1067 with the US Government. # # U.S. EXPORT CONTROLLED", "d = ctao.ctaDict() for k in d.keys(): func = d[k]", "file. This must # be defined or the GFE zone", "'' for eachPara in paras: if paraCount >= paragraphs: found", "0, #set to 1 to automatically write product to file", "+\\ self._wfoCity nwsIntroUsed = 1 hazardBodyPhrase = hazardBodyPhrase + \\", "fcst = self._makeProduct(fcst, segmentAreas, argDict) fcst = self._postProcessArea(fcst, segmentAreas, argDict)", "# 07/15/2016 5749 randerso Replaced ellipses with commas in hazardBodyText", "the 'hdln' value must be # present in the list,", "be case-insensitive just in case # the site is not", "value is also used for the default GUI # entry", "awips product identifier # (e.g., DENCCFDEN) that is used to", "conList = [] upgList = [] statementList = [] for", "# finish progress meter self.setProgressPercentage(100) self.progressMessage(0, 100, self._displayName + \"", "hazardBodyPhrase def finalOverviewText(self): #if didn't calculate any, use the default", "handles the 1st set of entries in # a segment,", "regText = removeLF.sub(r'\\1 \\2',regText) # extract out each section for", "product header, such as \"Western New York\" # wfoCityState City,state", "!= \"\": self._areaName = \" for \" + self._areaName issuedByString", "get the bullet dictionary and split the bullets ## bDict", "segmentTextSplit[1] = \"*\" + segmentTextSplit2[1] else: segmentTextSplit2 = re.split(\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\",", "combine periods, if possible # automatic functions \"autoSend\": 0, #set", "of the map background that is used for # creating/editing", "#include overview header \"includeOverview\": 1, #include overview section \"bulletProd\": 0,", "'EXB']: newList.append(eachHazard) elif eachHazard['act'] in ['CAN']: canList.append(eachHazard) elif eachHazard['act'] in", "if frameit == \"Always\" or frameit == \"DefaultOnly\": textToUse =", "in ['NEW'] and len(eh['hdln']): forceCTAList.append((eh['phen'], eh['sig'])) return (includeText, includeFrameCodes, skipCTAs,", "Framing codes are added if specified. # paras = self.convertSingleParas(text)", "and len(found): pass else: processedText = processedText + eachPara +", "# Description: This product is a template for creating Hazard", "Weather Service \" + self._wfoCityState + \\ \"\\n\" + issuedByString", "= hazardBodyPhrase + hazNameACap + forPhrase + \\ \" has", "sortedHazardList: if eachHazard.has_key('prevText'): prevText = eachHazard['prevText'] if eachHazard['pil'] == 'MWS':", "*|.\\n\" elif eachHazard['act'] == 'EXP': hazardBodyPhrase = hazardBodyPhrase + \\", "product identifiers \"productName\": \"Generic Hazard Product\", # product name \"fullStationID\":", "len(segmentList) == 0: return \"No hazards to report\" # Determine", "string.replace(fcst, \"\\n \",\"\\n\") fcst = string.replace(fcst, \"&&\", \"\\n&&\\n\") # Prevent", "it is a bullet product hazName = self.hazardName(eachHazard['hdln'], argDict, False)", "if (each.has_key('prevOverviewText') and each.has_key('pil') and each.has_key('endTime') and each.has_key('act')): if (each['pil']", "= False, skipCTAs = skipCTAs) # # Check that the", "Get variables error = self._getVariables(argDict) if error is not None:", "hazardBodyPhrase = hazardBodyPhrase + \\ \"\\n\\n|* Statement text goes here", "clean up the text for x in xrange(len(bullets)): bullets[x] =", "newBullets: if newBullet not in newBulletList: newBulletList.append(newBullet) print \"my bullets", "the output ## for b in bList: ## bullets =", "lines, # then returns the part after 'paragraphs'. So, if", "it # returns the whole thing, if it's 2, it", "DENCCFDEN) that is used to store the product # in", "\"|* \" + textToUse + \" *|\" else: textToUse =", "is a * we found a bullet if re.match(\"\\*\", b):", "hazardBodyPhrase + \"\\n\\n\" + \\ segmentText + '\\n\\n' elif bulletProd:", "there is only one CAP tag pairs hazardBodyPhrase = re.sub(r'&&\\s*PRECAUTIONARY/PREPAREDNESS", "(len(eh['hdln']) or eh['sig'] == 'S'): foundCANS = 1 if eh['sig']", "Uses the CallToAction definitions. #convert text to single paragraphs paragraphs", "type *|\" else: forPhrase =\"\" if nwsIntroUsed == 0: hazardBodyPhrase", "sig cta = self.defaultCTA(hazardPhenSig) if cta not in ctas: ctas.append(cta)", "time, basis, impact, regText, multRecords) def substituteBulletedText(self, capText, defaultText, frameit=\"Never\"):", "= self._indentBulletText(fcst) # # Clean up multiple line feeds #", "Added to place line feeds in the CAP tags to", "21309 code addition from Middendorf (BYZ) # # Now if", "self.indentText(textToUse, indentFirstString = '', indentNextString = ' ', maxWidth=self._lineLength, breakStrings=[\"", "\"Zones_<site>\", ## Edit Areas: Create Combinations file with edit area", "phrase hazName = self.hazardName(eachHazard['hdln'], argDict, False) if nwsIntroUsed == 0:", "product is not # automatically stored unless autoStore is 1.", "= self.convertSingleParas(text) #single paragraphs # keep track of any call", "in found: if f not in foundCTAs: foundCTAs.append(f) if eachPara.find('...')", "ctas found. The identifier is the pil (e.g., ZFP), #phen/sig", "CallToActions.CallToActions): Definition = { \"type\": \"smart\", \"displayName\": None, # Source", "Set up information for Hazards product self._hazards = argDict['hazards'] self._combinations", "transmitting to AWIPS WAN. \"periodCombining\" : 0, # If 1,", "areaLabel=areaLabel, timeRange = self._timeRange) fcst = fcst + headlines return", "same headlines editArea = segmentAreas[0] areaLabel = editArea headlines =", "eachHazard['sig'] in ['S']and eachHazard['phen'] in ['CF', 'LS']: statementList.append(eachHazard) elif eachHazard['act']", "\"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) segmentText = string.join(segmentTextSplit,\"* \" + bullet.upper() +", "return (None, None, None, None, None, None) # find the", "is None: self.__procCTA = [] ctao = CallToActions.CallToActions() d =", "# GenericHazards #------------------------------------------------------------------------- # Customization Points: # # DEFINITION SECTION", "\"self._\" + key + \"= self._definition[key]\" # Get VariableList varDict", "lines[0:x] #eliminate following lines break regText = (\"\\n\").join(lines) # now", "generated by the headline # algorithms in DiscretePhrases. # def", "addFramingCodes = False, skipCTAs = skipCTAs) # # Check that", "# displayName If not None, defines how product appears in", "is 1. # debug If on, debug_print statements will appear.", "0 # DR 21309 code addition from Middendorf (BYZ) #", "\\ \"\\n\\n|* Wrap-up text goes here *|.\\n\" else: bulletFlag =", "['CAN', 'EXP']): overview = each['prevOverviewText'] self.__overviewText, dummy = self.cleanCapturedText( overview,", "'S'): foundCANS = 1 if eh['sig'] not in foundSig: foundSig.append(eh['sig'])", "else: hazard = None if len(bullets) >= 2: time =", "\" + textToUse + \" *|\" # add bullet codes", "newBulletList, argDict): ## ## ### get the bullet dictionary and", "up information for Hazards product self._hazards = argDict['hazards'] self._combinations =", "0 else: startPara = 1 segmentText, foundCTAs = self.cleanCapturedText(prevText, startPara,", "make list of bullets that we need to keep. keepBulletList", "U.S. law. Dissemination # to non-U.S. persons whether in the", "c + '\\n\\n' hazardBodyPhrase = hazardBodyPhrase + '&&\\n\\n' # Make", "self._definition = argDict[\"forecastDef\"] for key in self._definition.keys(): exec \"self._\" +", "that we need to keep. keepBulletList = [] for eachHazard", "fcst + headlines return fcst def _postProcessArea(self, fcst, segmentAreas, argDict):", "on the hazardlist, returns a tuple indicating: # (inc capture", "# autoStore If set to 1, then the product will", ">= paragraphs: found = self.ctasFound(eachPara) #get list of ctas found", "+= \"In addition, \" + \\ hazNameA + \" has", "overview section\\n\" return fcst def _preProcessArea(self, fcst, segmentAreas, expireTime, argDict):", "text *|\\n\\nPRECAUTIONARY/PREPAREDNESS ACTIONS...\") bulletFlag = 0 # # Now if", "\",segmentText hazardBodyPhrase = hazardBodyPhrase + \"\\n\\n\" + \\ segmentText +", "ctas = [] for (phen,sig) in forceCTAList: hazardPhenSig = phen", "# (hazard, time, basis, impact, afterText, multipleRecords) if prevText is", "0 # # Now if there is a can/exp hazard", "+ c + '\\n\\n' hazardBodyPhrase = hazardBodyPhrase + '&&\\n\\n' #", "Phrases for a list of edit areas # Get variables", "the segment have # the same headlines editArea = segmentAreas[0]", "format the output ## for b in bList: ## bullets", "### that's it print outText return outText # The _hazardTimePhrases", "\"awipsWANPil\": \"<awipsWANPil>\", # Product ID for transmitting to AWIPS WAN.", "in conList: if len(eachHazard['hdln']) == 0: continue #no defined headline,", "are identified and in progress: # # To look up", "= self.makeAreaHeader( argDict, \"\", self._issueTime, expireTime, self._areaDictionary, None, cityDescriptor=self._cityDescriptor, areaList=segmentAreas,", "# # Clean up multiple line feeds # fixMultiLF =", "statementList: hazardBodyPhrase = \"...|* Add statement headline *|...\\n\\n\" # #", "segment passes the above checks, add the text # print", "\"\\n\\n\" + bullets # If segment doesn't pass the checks,", "product identifier # (e.g., KBOUCCFDEN) that is used to transmit", "value must be # present in the list, or it", "['CAN','UPG','EXP'] acts = ['NEW','EXT','EXA','EXB','CON'] foundACTS = 0 foundCANS = 0", "d = ctao.ctaPilDict() for k in d.keys(): func = d[k]", "07/13/2015 4648 randerso Fix bullets in follow up products #", "\\ \" is in effect\" + endTimePhrase + \". \"", "transmitting in mixed case yet. if para.upper() == cta.upper() and", "of # grid points in a zone that must contain", "== 0: regText = None #no regular text after bullets", "cta in ctaParas: self.__procCTA.append((k,string.replace(cta,' ',''))) d = ctao.ctaPilDict() for k", "the values if len(bullets) >= 1: hazard = bullets[0] else:", "outText # The _hazardTimePhrases method is passed a hazard key,", "argDict) fcst = self._postProcessArea(fcst, segmentAreas, argDict) fraction = fractionOne fcst", "a properly formatted bulleted text based on #the capText variable.", "para in paragraphs: for (ctaType, cta) in self.__procCTA: ## Added", "\"-\" in the breakStrings line above is causing issues with", "\" + b + \"...|* Enter bullet text *|\\n\\n\" ##", "following lines break regText = (\"\\n\").join(lines) # now clean up", "# if the segment is 'NEW' or if the previous", "# Determine time ranges error = self._determineTimeRanges(argDict) if error is", "else: return self.__overviewText def overviewText(self, hazardList, pil): # # This", "all cities in area header \"cityLocation\": \"CityLocation\", # City lat/lon", "#points) \"callToAction\": 1, } def __init__(self): TextRules.TextRules.__init__(self) SampleAnalysis.SampleAnalysis.__init__(self) self.__overviewText =", "addition, \" + \\ hazNameA + forPhrase + \" has", "to action statements. This is only performed # if the", "to 1 in the # event that there are multiple", "# Get the segments hazardsC = argDict['hazards'] segmentList = self.organizeHazards(hazardsC.rawAnalyzedTable())", "Products: # Hazards #------------------------------------------------------------------------- # Development tasks that are identified", "header is templated # includeOverview If 1, the overview section", "### outText = outText + bullet + \"\\n\\n\" else: ###", "+ \\ \" remains in effect\" + endTimePhrase + \".", "combinations file. This must # be defined or the GFE", "not, # and frame captured text or not # incTextFlag,", "b in bulletOrder: bullets = bullets + \"* \" +", "fcst def _postProcessArea(self, fcst, segmentAreas, argDict): return fcst + \"\\n\\n$$\\n\\n\"", "the combinations file. This must # be defined or the", "re.sub(r'&&\\s*PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\n', \\ \"\", hazardBodyPhrase) return hazardBodyPhrase def finalOverviewText(self): #if", "text exists # foundCTAs = [] for eachHazard in sortedHazardList:", "Finally remove the bullets no longer needed. for bullet in", "1 bullets = bullets[0:x+1] #only interested in these bullets break", "self.cleanCapturedText(prevText, startPara, addFramingCodes = False, skipCTAs = skipCTAs) tester =", "If on, debug_print statements will appear. # textdbPil Defines the", "\"<pil>\", # Product pil \"areaName\": \"\", # Name of state,", "hazards to report\" # Determine time ranges error = self._determineTimeRanges(argDict)", "Points: # # DEFINITION SECTION # # Required Configuration Items:", "# (e.g., KBOUCCFDEN) that is used to transmit the #", "0 fractionOne = 1.0/float(len(segmentList)) percent = 50.0 self.setProgressPercentage(50) for segmentAreas", "print \"hazardBodyText info: keepBulletList: \",keepBulletList print \"hazardBodyText info: removeBulletList: \",removeBulletList", "\"\\n\\n|* Wrap-up text goes here *|.\\n\" elif eachHazard['act'] == 'EXP':", "software is in the public domain, furnished \"as is\", without", "### not a bullet, CTA text outText = outText +", "headlines paraCount = paraCount + 1 # Add framing codes", "return paragraphs def ctasFound(self, text): #returns types of ctas found.", "if they exist in foundCTAs. Note # that the formats", "'PRECAUTIONARY/PREPAREDNESS ACTIONS...\\n\\n' for c in ctas: hazardBodyPhrase = hazardBodyPhrase +", "actions (type, cta text) if self.__procCTA is None: self.__procCTA =", "if para.upper() == cta.upper() and ctaType not in found: found.append(ctaType)", "Name: <NAME> # Contractor Address: 6825 Pine Street, Suite 340", "bullet order should be: \", bulletOrder if bullet not in", "is only one CAP tag pairs hazardBodyPhrase = re.sub(r'&&\\s*PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\n',", "interested ones paraCount = 0 processedText = '' for eachPara", "the awips product identifier # (e.g., DENCCFDEN) that is used", "feeds removeLF = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) bullet = removeLF.sub(r'\\1 \\2',b) ###", "segmentTextSplit2[1] segmentText = string.join(segmentTextSplit,\"\") if removeBulletList != []: segmentText =", "insert it # overview = self.finalOverviewText() overviewSearch = re.compile(r'Default overview", "the text ### outText = \"\" for b in bullets:", "+ \". \" else: if eachHazard['phen'] in ['HU', 'TR', 'TY']:", "else: for eh in hazardList: if eh['act'] in ['NEW'] and", "if error is not None: return error # Get the", "stored into the text database using the \"textdbPil\" # after", "# want text from the last in the series of", "+ bullet + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) print \"segmentTextSplit is \",", "= [] bullets = string.split(prevText, '\\n\\n') if len(bullets) <= 1:", "# in order for it to be considered. Tuple (percent,", "product creation. # # lineLength max length of each line", "not None: return error # Initialize the output string fcst", "Now if there is a new hazard and previous segment", "print \"bulletFlag is: \",bulletFlag if bulletFlag: newBulletList = [] bullets", "= prevText.split('\\n\\n* ') if len(buf) <= 1: return (None, None,", "next NWS phrase. lines = regText.split('\\n') for x in xrange(len(lines)):", "stripLeading=1) return None def _preProcessProduct(self, fcst, argDict): # Product header", "previous text exists # foundCTAs = [] for eachHazard in", "len(self.__overviewText) == 0: if self._includeOverviewHeadline: overviewHeadline = \"...|*Overview headline (must", "in CAN, UPG, EXP only (don't include text) if foundCANS", "to the AWIPS WAN. The product is not # automatically", "also been issued\" + endTimePhrase + \". \" else: if", "codes if addFramingCodes: processedText = processedText.rstrip() processedText = \"|*\\n\" +", "variable = key exec \"self._\" + variable + \"= varDict[key]\"", "1: #only S includeFrameCodes = 1 #capture text, but frame", "indentNextString = ' ', maxWidth=self._lineLength, breakStrings=[\" \", \"-\", \"...\"]) def", "EditAreas_PublicZones_BOU ## EditAreas_FireWx_BOU ## EditAreas_FIPS_BOU ## EditAreas_MarineZones_BOU \"defaultEditAreas\" : \"EditAreas_PublicZones_<site>_<MultiPil>\",", "set the following: # # productName defines name of product", "-- optional \"wfoCityState\": \"<wfoCityState>\", # Location of WFO - city,state", "\" has issued \" + hazNameA + forPhrase + \\", "# algorithms in DiscretePhrases. # def hazardTimePhrases(self, hazard, argDict, prefixSpace=True):", "if it's 2, it returns paragraphs 2 -> end, etc.", "1 to automatically write product to file # Area Dictionary", "hazardBodyPhrase = hazardBodyPhrase + \"\\n\\n\" + bullets # If segment", "+ key + \"= self._definition[key]\" # Get VariableList varDict =", "hazard = None if len(bullets) >= 2: time = bullets[1]", "for eachHazard in newList: hdln = eachHazard['hdln'] if len(eachHazard['hdln']) ==", "else: hazardBodyPhrase = hazardBodyPhrase + \"The \" + hazName +", "# wmoID WMO ID code for product header, such as", "address \"autoStore\": 0, #set to 1 to automatically store product", "endTimePhrase + \". \" # # This is for statement", "processedText = '' for eachPara in paras: if paraCount >=", "### get the bullet dictionary and split the bullets ##", "# Initialize the output string fcst = \"\" fcst =", "0: hazardBodyPhrase = hazardBodyPhrase + \\ 'PRECAUTIONARY/PREPAREDNESS ACTIONS...\\n\\n' for c", "+ '&&\\n\\n' # Make sure there is only one CAP", "format #------------------------------------------------------------------------- # Weather Elements Needed: # Hazards #------------------------------------------------------------------------- #", "S includeFrameCodes = 1 #capture text, but frame it else:", "the bullet output ## bullets = \"\" ## ## ###", "the text for x in xrange(len(bullets)): bullets[x] = string.replace(bullets[x],'\\n',' ')", "is now in effect\" + endTimePhrase + \". \" #", "ID for storing to AWIPS text database. \"awipsWANPil\": \"<awipsWANPil>\", #", "segmentTextSplit segmentTextSplit2 = string.split(segmentTextSplit[1],\"*\",1) if len(segmentTextSplit2) == 2: segmentTextSplit[1] =", "for storage. # autoSend If set to 1, then the", "found if skipCTAs and len(found): pass else: processedText = processedText", "### get the default bullets for all hazards from the", "framing codes, or #DefaultOnly\" in which just the default text", "non-U.S. persons whether in the United States or abroad requires", "WFO is located in, such as \"Buffalo NY\" # #", "outText return outText # The _hazardTimePhrases method is passed a", "if removeBulletList != []: segmentText = \"|*\\n\" + segmentText +", "product will be automatically # sent on the AWIPS WAN", "bullets[0:x+1] #only interested in these bullets break # regular text", "# DEFINITION SECTION # # Required Configuration Items: # #", "def _bulletDict(self): return [] # Added for DR 21309 def", "foundSig.append(eh['sig']) includeFrameCodes = 0 includeText = 1 skipCTAs = 0", "Perform case-insensitive # comparisons in foundCTAs. # 07/13/2015 4648 randerso", "US Government. # # U.S. EXPORT CONTROLLED TECHNICAL DATA #", "if frameit == \"Always\": textToUse = \"|* \" + textToUse", "mixed case yet. if para.upper() == cta.upper() and ctaType not", "to bottom list of bullets!\" segmentTextSplit = re.split(\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", segmentText,", "on action # newList = [] canList = [] expList", "foundSig: foundSig.append(eh['sig']) includeFrameCodes = 0 includeText = 1 skipCTAs =", "= 1 skipCTAs = 1 for eh in hazardList: if", "is in effect\" + endTimePhrase + \". \" lastHdln =", "re.sub(r'\\nPRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\s*&&\\n', \\ \"\", fcst) fcst = self._indentBulletText(fcst) # #", "a * we found a bullet if re.match(\"\\*\", b): ###", "Definition variables self._definition = argDict[\"forecastDef\"] for key in self._definition.keys(): exec", "# Get VariableList varDict = argDict[\"varDict\"] for key in varDict.keys():", "argDict, False) if hazName in [\"Winter Weather Advisory\", \"Winter Storm", "'TR', 'TY']: hazardBodyPhrase += \"In addition, \" + \\ hazNameA", "## for b in bList: ## bullets = bullets +", "\"...|* Enter bullet text *|\\n\\n\" hazardBodyPhrase = hazardBodyPhrase + \"\\n\\n\"", "hazardBodyPhrase + hazNameACap + forPhrase + \\ \" has also", "1: hazard = bullets[0] else: hazard = None if len(bullets)", "cta) in self.__procCTA: ## Added following line to account for", "in framing codes, \"Always\" in which the #text (default or", "in hazardList: if eachHazard['sig'] == each: if eachHazard not in", "if first character is a * we found a bullet", "\" + bullet + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) print \"segmentTextSplit is", "bullets are present multRecords = 1 bullets = bullets[0:x+1] #only", ">= 3: basis = bullets[2] else: basis = None if", "wording consistent with that generated by the headline # algorithms", "be # present in the list, or it needs to", "text # We only need to get headlines for the", "Language \"language\": \"english\", \"lineLength\": 66, #Maximum line length \"purgeTime\": 8,", "+ self._areaName issuedByString = self.getIssuedByString() productName = self.checkTestMode(argDict, self._productName +", "acts = ['NEW','EXT','EXA','EXB','CON'] foundACTS = 0 foundCANS = 0 foundSig", "\"AreaDictionary\", # Language \"language\": \"english\", \"lineLength\": 66, #Maximum line length", "editArea headlines = self.generateProduct(\"Hazards\", argDict, area = editArea, areaLabel=areaLabel, timeRange", "previous product # overview = \"\" for each in hazardList:", "None, None, None, None) # find the bullets bullets =", "foundCTAs.append(f) if eachPara.find('...') == 0: pass #ignore headlines paraCount =", "in xrange(len(lines)): if lines[x].find('The National Weather Service') == 0: lines", "eachHazard['hdln'] if len(eachHazard['hdln']) == 0: continue #no defined headline, skip", "self._currentTime and each['act'] not in ['CAN', 'EXP']): overview = each['prevOverviewText']", "single paragraphs paragraphs = self.convertSingleParas(text) for x in xrange(len(paragraphs)): paragraphs[x]", "= self._postProcessProduct(fcst, argDict) return fcst def _getVariables(self, argDict): # Make", "# due to a CAN/EXP/UPG segment # # remove items", "# Product pil \"areaName\": \"\", # Name of state, such", "overviewBody = \"\" #assemble the lines overview = overviewHeadline +", "-> end, etc. # Headlines are always removed. # Framing", "SampleAnalysis import time, string, types, copy, re import CallToActions import", "Combinations # Can be: # Zones_BOU # FireWxZones_BOU # Counties", "# Finally remove the bullets no longer needed. for bullet", "Dictionary -- Descriptive information about zones \"areaDictionary\": \"AreaDictionary\", # Language", "in effect. \" # # This is for con hazards", "\"%l%M %p %Z %a %b %e %Y\", stripLeading=1) return None", "fullStationID Full station identifier, 4 letter, such as \"KSLC\". #", "CTA ctaParas = self.convertSingleParas(it) for cta in ctaParas: self.__procCTA.append((\"GENERIC\", string.replace(cta,'", "bullet in removeBulletList: if re.search(\"\\* \"+ bullet + \"\\.\\.\\.\", segmentText,", "This is for the can hazards # for eachHazard in", "\" +\\ self._wfoCity nwsIntroUsed = 1 hazardBodyPhrase = hazardBodyPhrase +", "returns the part after 'paragraphs'. So, if paragraphs is 0,", "wrapped in framing codes, \"Always\" in which the #text (default", ">= 4: impact = bullets[3] else: impact = None if", "percent = 50.0 self.setProgressPercentage(50) for segmentAreas in segmentList: self.progressMessage(fraction, percent,", "in the area header # accurateCities If 1, cities are", "the output location of the finished product. # Product is", "# Source database for product. Can be \"Official\", \"Fcst\" or", "\" + \\ self._ddhhmmTime + \"\\n\" + self._pil + \"\\n\\n\"", "fcst = fcst + s.upper() s = eas + productName", "Weather Service in \" + self._wfoCity nwsIntroUsed = 1 if", "remaining CTAs in processed text for f in found: if", "forPhrase + \" has been issued. This \" + hazName", "if f not in foundCTAs: foundCTAs.append(f) if eachPara.find('...') == 0:", "\"hazardSamplingThreshold\": (10, None), #(%cov, #points) \"callToAction\": 1, } def __init__(self):", "are present multRecords = 1 bullets = bullets[0:x+1] #only interested", "Required Configuration Items: # # displayName If not None, defines", "in the # event that there are multiple sets of", "codes textToUse = \"* \" + textToUse # format it", "\"\" for each in hazardList: if (each.has_key('prevOverviewText') and each.has_key('pil') and", ":\"\", # Optional EAS phrase to be include in product", "goes here *|.\\n\" elif eachHazard['act'] == 'EXP': hazardBodyPhrase = hazardBodyPhrase", "in hazardList: if eh['act'] in acts and (len(eh['hdln']) or eh['sig']", "\"mapNameForCombinations\": \"Zones_<site>\", ## Edit Areas: Create Combinations file with edit", "# # See the AWIPS II Master Rights File (\"Master", "You must set the following: # # productName defines name", "nwsIntroUsed == 0: hazardBodyPhrase = \"The National Weather Service in", "bullets = [] buf = prevText.split('\\n\\n* ') if len(buf) <=", "+ self._bulletOrder()[i] + \"... found!\" segmentTextSplit = re.split(\"\\* \" +", "if len(regText) == 0: regText = None #no regular text", "to keep. keepBulletList = [] for eachHazard in sortedHazardList: if", "= re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) ptext = lf.sub(r'\\1 \\2', text) ptext =", "such as \"Georgia\" -- optional \"wfoCityState\": \"<wfoCityState>\", # Location of", "text outText = outText + b + \"\\n\\n\" ### that's", "included in the area header # accurateCities If 1, cities", "include in product header \"includeOverviewHeadline\": 1, #include overview header \"includeOverview\":", "upgList = [] statementList = [] for eachHazard in sortedHazardList:", "segment text isn't very short or blank # if len(segmentText)", "etc. # Headlines are always removed. # Framing codes are", "+ '\\n' else: eas = '' s = self._wmoID +", "if nwsIntroUsed == 0: hazardBodyPhrase = \"The National Weather Service", "string.split(segmentTextSplit[1],\"*\",1) if len(segmentTextSplit2) == 2: segmentTextSplit[1] = \"*\" + segmentTextSplit2[1]", "in ctaParas: self.__procCTA.append((\"GENERIC\", string.replace(cta,' ',''))) #compare found = [] for", "= hazardBodyPhrase + \" has issued \" + \\ hazNameA", "length of each line # # defaultEditAreas defines edit areas,", "is 1. This # value is also used for the", "= bullets + \"\\n\" ## return bullets def _indentBulletText(self, prevText):", "argDict, True) hazNameACap = self.sentence(hazNameA, addPeriod=False) hazName = self.hazardName(eachHazard['hdln'], argDict,", "#convert text to single paragraphs paragraphs = self.convertSingleParas(text) for x", "eachHazard in sortedHazardList: if eachHazard['sig'] in ['S']and eachHazard['phen'] in ['CF',", "split the text ### bullets = [] bullets = string.split(prevText,", "areas, default is Combinations # # purgeTime Maximum number of", "be: ## EditAreas_PublicZones_BOU ## EditAreas_FireWx_BOU ## EditAreas_FIPS_BOU ## EditAreas_MarineZones_BOU \"defaultEditAreas\"", "== 1: phraseCount = 2 if hdln != lastHdln: if", "+ \". \" # # This is for upgrade hazards", "\" + hazName + \\ \" is no longer in", "self._timeRange) fcst = fcst + headlines return fcst def _postProcessArea(self,", "1: return prevText ### ### process the text ### outText", "+ \\ \" has been cancelled. \" # # This", "or 0 length, then #the default text is used. frameit", "statementList.append(eachHazard) elif eachHazard['act'] in ['NEW', 'EXA', 'EXB']: newList.append(eachHazard) elif eachHazard['act']", "\\ \" is no longer in effect. \" else: expTimeCurrent", "startPara = 1 else: startPara = 2 segmentText, foundCTAs =", "an attribution phrase # def hazardBodyText(self, hazardList, argDict): bulletProd =", "Statement\"]: forPhrase = \" for |* Enter hazard type *|\"", "if phraseCount == 0: phraseCount = 1 if eachHazard['phen'] in", "= 1 else: startPara = 2 segmentText, foundCTAs = self.cleanCapturedText(prevText,", "len(segmentTextSplit2) == 2: segmentTextSplit[1] = \"*\" + segmentTextSplit2[1] else: segmentTextSplit2", "\"SFTBOS\" # areaName (opt.) Area name for product header, such", "0: continue #headlines and text before the bullets bullets.append(buf[x]) #", "for eh in hazardList: if eh['act'] in acts and (len(eh['hdln'])", "# Process the paragraphs, keep only the interested ones paraCount", "len(capText): textToUse = capText[0].upper()+capText[1:] if frameit == \"Always\": textToUse =", "# Zone names will be included in the area header", "# DR 21309 code addition from Middendorf (BYZ) # #", "its usefulness for # any purpose. #------------------------------------------------------------------------- # Standard and", "paraCount >= paragraphs: found = self.ctasFound(eachPara) #get list of ctas", "\" + hazName + \". \" else: hazardBodyPhrase = hazardBodyPhrase", "segment Text, then # we may have to remove bullets.", "def _getVariables(self, argDict): # Make argDict accessible self.__argDict = argDict", "but frame it else: includeText = 0 #end of non", "'hdln' value must be # present in the list, or", "extract out each section for returning the values if len(bullets)", "for eachHazard in sortedHazardList: ### get the default bullets for", "len(regText) == 0: regText = None #no regular text after", "adds segment text # segmentText = '' # # Check", "self._displayName + \" Complete\") return fcst def allowedHazards(self): return []", "# Standard and Local file names and Locations: # GenericHazards", "how product appears in GFE GUI # # You must", "#headlines and text before the bullets bullets.append(buf[x]) # find only", "Add statement headline *|...\\n\\n\" # # This adds segment text", "fcst = string.replace(fcst, \"\\n \",\"\\n\") fcst = string.replace(fcst, \"&&\", \"\\n&&\\n\")", "entries, captured text is used, but still # need to", "this case # only the 1st set was captured/decoded. #", "by 21309 code ## def _getBullets(self, newBulletList, argDict): ## ##", "optional \"wfoCityState\": \"<wfoCityState>\", # Location of WFO - city,state \"textdbPil\":", "templated # includeOverview If 1, the overview section is templated", "in effect\" + endTimePhrase + \". \" elif phraseCount ==", "is in the public domain, furnished \"as is\", without technical", "above checks, add the text # print \"hazardBodyText info: incTextFlag:", "File.pdf\") for # further licensing information. ## # ---------------------------------------------------------------------------- #", "awips product identifier # (e.g., KBOUCCFDEN) that is used to", "in cans and (len(eh['hdln']) or eh['sig'] == 'S'): foundCANS =", "= fractionOne fcst = self._postProcessProduct(fcst, argDict) return fcst def _getVariables(self,", "to # the next bullet or up to \"The National", "for the can hazards # for eachHazard in canList: if", "'EXP']): overview = each['prevOverviewText'] self.__overviewText, dummy = self.cleanCapturedText( overview, 0)", "= processedText.rstrip() processedText = \"|*\\n\" + processedText + \"*|\\n\" #", "is set to 1 in the # event that there", "all hazards from the bullet diction newBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for", "CAP tags to keep separate from CTAs fcst = string.replace(fcst,", ": \"EditAreas_PublicZones_<site>_<MultiPil>\", # product identifiers \"productName\": \"Generic Hazard Product\", #", "bullets = \"\" for eachHazard in sortedHazardList: ### get the", "call to actions (type, cta text) if self.__procCTA is None:", "\"...|* Enter bullet text *|\\n\\nPRECAUTIONARY/PREPAREDNESS ACTIONS...\") bulletFlag = 0 #", "\"In addition, \" + \\ hazNameA + \" has been", "fcst = self._postProcessProduct(fcst, argDict) return fcst def _getVariables(self, argDict): #", "still # need to handle the \"NEW\" entries. else: for", "Segment\") fcst = self._preProcessArea(fcst, segmentAreas, self._expireTime, argDict) fcst = self._makeProduct(fcst,", "Marine_Zones_BOU \"mapNameForCombinations\": \"Zones_<site>\", ## Edit Areas: Create Combinations file with", "\"autoSendAddress\": \"000\", #transmission address \"autoStore\": 0, #set to 1 to", "hours past issuance time for the # expire time. #", "Can be \"Official\", \"Fcst\" or \"ISC\" \"database\": \"Official\", # Defines", "Prevent empty Call to Action Tags fcst = re.sub(r'\\nPRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\s*&&\\n',", "in [\"CAN\",\"EXP\"]: saveBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for saveBullet in saveBullets: if", "re.DOTALL) regText = removeLF.sub(r'\\1 \\2',regText) # extract out each section", "\"\\n\\n\" ### that's it print outText return outText # The", "the product will use a bullet format #------------------------------------------------------------------------- # Weather", "## print \"bulletFlag is: \",bulletFlag if bulletFlag: newBulletList = []", "This method takes a block of text, wraps it preserving", "for \" + self._areaName issuedByString = self.getIssuedByString() productName = self.checkTestMode(argDict,", "hazardBodyPhrase + \\ \" has cancelled the \" + hazName", "a hazard key, and returns # time phrase wording consistent", "= self.hazardName(eachHazard['hdln'], argDict, True) hazNameACap = self.sentence(hazNameA, addPeriod=False) hazName =", "and len(timeWords): timeWords = \" \" + timeWords #add a", "= \"*\" + segmentTextSplit2[1] else: segmentTextSplit2 = re.split(\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", segmentTextSplit[1],", "# # SOFTWARE HISTORY # # Date Ticket# Engineer Description", "text): #returns a list of paragraphs based on the input", "upper case # 07/15/2016 5749 randerso Replaced ellipses with commas", "sortedHazardList: sortedHazardList.append(eachHazard) # # Next, break them into individual lists", "forPhrase + \\ \", which is in effect\" + endTimePhrase", "for this case if it is a bullet product endTimePhrase", "included in the area header \"easPhrase\" :\"\", # Optional EAS", "+ self._bulletOrder()[i] + \"...\") bulletFlag = 0 if bulletFlag: print", "\", \"-\", \"...\"]) return processedText, foundCTAs def decodeBulletedText(self, prevText): #", "set # of bullets and text. The multipleRecords is set", "GUI # entry for storage. # autoSend If set to", "(e.g., ZFP), #phen/sig (e.g., DU.Y), or GENERIC. Uses the CallToAction", "[] for (phen,sig) in forceCTAList: hazardPhenSig = phen + \".\"", "If 1, the overview header is templated # includeOverview If", "of the text regText = \"\" #regular text after bullets", "it is a bullet product endTimePhrase = self.hazardTimePhrases(eachHazard, argDict) hazNameA", "#keep track of remaining CTAs in processed text for f", "zone that must contain the hazard # in order for", "1 to automatically transmit product \"autoSendAddress\": \"000\", #transmission address \"autoStore\":", "for each in ['W', 'Y', 'A', 'O', 'S']: for eachHazard", "== 0: continue #no defined headline, skip phrase hazName =", "is also used for the default GUI entry for #", "HISTORY # # Date Ticket# Engineer Description # ------------ ----------", "fcst = \"\" fcst = self._preProcessProduct(fcst, argDict) # Generate the", "segmentText for bullet in newBullets: if re.search(\"\\* \" + bullet", "ctas: hazardBodyPhrase = hazardBodyPhrase + c + '\\n\\n' hazardBodyPhrase =", "items = func() for it in items: if type(it) ==", "productName = self.checkTestMode(argDict, self._productName + self._areaName) if len(self._easPhrase) != 0:", "0-240 hours self._timeRange = self.createTimeRange(0, 240) self._ddhhmmTime = self.getCurrentTime( argDict,", "status, see the Text Product User Guide # Section on", "---------- ----------- -------------------------- # 05/07/2015 4027 randerso Migrated A1 OB9.16", "a leading space return timeWords # # The method hazardBodyText", "case-insensitive just in case # the site is not transmitting", "for DR 21310 # # This adds the call to", "= overviewHeadline + overviewBody return overview else: return self.__overviewText def", "includeFrameCodes = 0 includeText = 1 skipCTAs = 0 forceCTAList", "issuance time for the # expire time. # includeCities If", "with ### offices that use \"-20 degrees\" in the text.", "len(eachHazard['hdln']) == 0: continue #no defined headline, skip phrase hazName", "+ '\\n\\n' elif bulletProd: bulletFlag = 0 if eachHazard['act'] ==", "phen + \".\" + sig cta = self.defaultCTA(hazardPhenSig) if cta", "= \"\" for b in bullets: ### if first character", "for each in hazardList: if (each.has_key('prevOverviewText') and each.has_key('pil') and each.has_key('endTime')", "(sig=\"S\") cans = ['CAN','UPG','EXP'] acts = ['NEW','EXT','EXA','EXB','CON'] foundACTS = 0", "wrapped. if capText is not None and len(capText): textToUse =", "not None and len(capText): textToUse = capText[0].upper()+capText[1:] if frameit ==", "## # bullets = bullets + \"\\n\" ## return bullets", "Text, then # we may have to remove bullets. #", "in ['EXP']: expList.append(eachHazard) elif eachHazard['act'] in ['EXT']: extList.append(eachHazard) elif eachHazard['act']", "for an edit area combination areaHeader = self.makeAreaHeader( argDict, \"\",", "the double line feed term. # of the text regText", "the text regText = \"\" #regular text after bullets for", "phrase to be include in product header \"includeOverviewHeadline\": 1, #include", "If segment doesn't pass the checks, put in framing codes", "edit areas, default is Combinations # # purgeTime Maximum number", "identifier, 4 letter, such as \"KSLC\". # wmoID WMO ID", "self._bulletOrder()[i] + \"... found!\" segmentTextSplit = re.split(\"\\* \" + self._bulletOrder()[i]", "eachHazard in newList: hdln = eachHazard['hdln'] if len(eachHazard['hdln']) == 0:", "Service\". Note # that this only correctly handles the 1st", "type(it) == types.TupleType: it = it[1] #get second string which", "len(self._easPhrase) != 0: eas = self._easPhrase + '\\n' else: eas", "ctao = CallToActions.CallToActions() d = ctao.ctaDict() for k in d.keys():", "bullet output ## bullets = \"\" ## ## ### loop", "to be overridden. ## #------------------------------------------------------------------------- # Description: This product is", "\\ 'PRECAUTIONARY/PREPAREDNESS ACTIONS...\\n\\n' for c in ctas: hazardBodyPhrase = hazardBodyPhrase", "return fcst def _postProcessArea(self, fcst, segmentAreas, argDict): return fcst +", "has been discarded # due to a CAN/EXP/UPG segment #", "furnished \"as is\", without technical # support, and with no", "the United States or abroad requires # an export license", "store the product # in the AWIPS text database. The", "\" else: hazardBodyPhrase += \" has issued \" + hazNameA", "= self.hazardName(eachHazard['hdln'], argDict, True) hazardBodyPhrase = hazardBodyPhrase + hazNameA +", "= False, skipCTAs = skipCTAs) tester = segmentText[0] if tester", "= 0 includeText = 1 skipCTAs = 0 forceCTAList =", "0, it # returns the whole thing, if it's 2,", "split the bullets ## bDict = self._bulletDict() ## bLine =", "= False, skipCTAs = False): # # This method takes", "to Contract DG133W-05-CQ-1067 with the US Government. # # U.S.", "the default text is wrapped. if capText is not None", "bullets and format the output ## for b in bList:", "foundCTAs. # 07/13/2015 4648 randerso Fix bullets in follow up", "%a %b %e %Y\", stripLeading=1) return None def _preProcessProduct(self, fcst,", "default is Combinations # # purgeTime Maximum number of hours", "## ## ### initialize the bullet output ## bullets =", "### loop through the bullets and format the output ##", "bullets break # regular text is the remainder of the", "\"output\" named disk file after # product creation. # #", "\" + textToUse # format it return self.indentText(textToUse, indentFirstString =", "First, sort the hazards for this segment by importance #", "= self._postProcessArea(fcst, segmentAreas, argDict) fraction = fractionOne fcst = self._postProcessProduct(fcst,", "storing to AWIPS text database. \"awipsWANPil\": \"<awipsWANPil>\", # Product ID", "\"\" ## ## ### loop through the bullets and format", "if bulletFlag: newBulletList = [] bullets = \"\" for eachHazard", "b in bullets: ### if first character is a *", "capText is None or 0 length, then #the default text", "and build the phrases # nwsIntroUsed = 0 # #", "the CTA ctaParas = self.convertSingleParas(it) for cta in ctaParas: self.__procCTA.append((k,string.replace(cta,'", "bullets[x] = bullets[x][0:index] #eliminate after bullet text if len(bullets) >", "area header \"easPhrase\" :\"\", # Optional EAS phrase to be", "\\ \" remains in effect\" + endTimePhrase + \". \"", "+ areaHeader return fcst def _makeProduct(self, fcst, segmentAreas, argDict): argDict[\"language\"]", "is for con hazards # for eachHazard in conList: if", "hazName + \\ \" is in effect\" + endTimePhrase +", "paras = self.convertSingleParas(text) #single paragraphs # keep track of any", "this only correctly handles the 1st set of entries in", "startPara, addFramingCodes = False, skipCTAs = skipCTAs) # # Check", "= \" for |* Enter hazard type *|\" else: forPhrase", "to be include in product header \"includeOverviewHeadline\": 1, #include overview", "leading space return timeWords # # The method hazardBodyText creates", "None, None, None, None, None) multRecords = 0 #indicator of", "fcst + s.upper() s = eas + productName + \"\\n\"", "after product creation. # autoWrite If set to 1, then", "to use \"cityDescriptor\":\"Including the cities of\", \"includeZoneNames\":1, # Zone names", "21309 code ## def _getBullets(self, newBulletList, argDict): ## ## ###", "bullets.append(buf[x]) # find only the bulleted text, defined by the", "to Action Tags fcst = re.sub(r'\\nPRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\s*&&\\n', \\ \"\", fcst)", "\"language\": \"english\", \"lineLength\": 66, #Maximum line length \"purgeTime\": 8, #", "# # displayName If not None, defines how product appears", "creation. # # lineLength max length of each line #", "effect\" + endTimePhrase + \". \" # # This is", "== cta.upper() and ctaType not in found: found.append(ctaType) return found", "NWS phrase. lines = regText.split('\\n') for x in xrange(len(lines)): if", "= 1 #capture text, but frame it else: includeText =", "text. lf = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) ptext = lf.sub(r'\\1 \\2', text)", "goes here *|.\\n\\n\" # End code for DR 21310 #", "= newBulletList.split(\",\") ## ## ### initialize the bullet output ##", "there is a can/exp hazard and previous segment Text, then", "True) hazardBodyPhrase = hazardBodyPhrase + hazNameA + \\ \" remains", "different, thus this code # is more complicated for ent", "regText.split('\\n') for x in xrange(len(lines)): if lines[x].find('The National Weather Service')", "argDict, \"%l%M %p %Z %a %b %e %Y\", stripLeading=1) return", "the CTA ctaParas = self.convertSingleParas(it) for cta in ctaParas: self.__procCTA.append((\"GENERIC\",", "an overview exists for this product, insert it # overview", "is types.TupleType: label, variable = key exec \"self._\" + variable", "in self.__procCTA: ## Added following line to account for framing", "+ hazNameACap + \\ \" has also been issued. This", "these lists are different, thus this code # is more", "# includeCities If 1, cities will be included in the", "argDict # Get Definition variables self._definition = argDict[\"forecastDef\"] for key", "CONTROLLED TECHNICAL DATA # This software product contains export-restricted data", "# Component Products: # Hazards #------------------------------------------------------------------------- # Development tasks that", "= argDict[\"forecastDef\"] for key in self._definition.keys(): exec \"self._\" + key", "= self.getIssuedByString() productName = self.checkTestMode(argDict, self._productName + self._areaName) if len(self._easPhrase)", "the list, or it needs to be a Statement (sig=\"S\")", "segmentText, flags=re.IGNORECASE) segmentText = string.join(segmentTextSplit,\"* \" + bullet.upper() + \\", "segmentText = string.join(segmentTextSplit,\"\") if removeBulletList != []: segmentText = \"|*\\n\"", "addFramingCodes: processedText = processedText.rstrip() processedText = \"|*\\n\" + processedText +", "basis = bullets[2] else: basis = None if len(bullets) >=", "1, the product will use a bullet format #------------------------------------------------------------------------- #", "is not # automatically transmitted unless autoSend is 1. #", "fcst) fcst = self._indentBulletText(fcst) # # Clean up multiple line", "# now clean up the text for x in xrange(len(bullets)):", "or the GFE zone combiner # database Source database for", "on \"Tkgnats: Task Reporting System\". #------------------------------------------------------------------------- # Additional Information: #-------------------------------------------------------------------------", "of\" phrase used when including # cities # includeZoneNames If", "------------ ---------- ----------- -------------------------- # 05/07/2015 4027 randerso Migrated A1", "up the time range for 0-240 hours self._timeRange = self.createTimeRange(0,", "'', indentNextString = ' ', maxWidth=self._lineLength, breakStrings=[\" \", \"...\"]) ###", "set to 1 in the # event that there are", "now clean up the text for x in xrange(len(bullets)): bullets[x]", "x == 0: continue #headlines and text before the bullets", "',''))) ctas = ctao.genericCTAs() for it in ctas: if type(it)", "outText = outText + bullet + \"\\n\\n\" else: ### not", "= 0 processedText = '' for eachPara in paras: if", "# includeOverview If 1, the overview section is templated #", "to the # beginning of any next NWS phrase. lines", "for the new hazards # phraseCount = 0 lastHdln =", "# Can be: # Zones_BOU # FireWxZones_BOU # Counties #", "= re.split(\"\\* \" + bullet + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) print", "text, but frame it else: includeText = 0 #end of", "nwsIntroUsed = 1 if phraseCount == 0: phraseCount = 1", "\" is no longer in effect. \" else: expTimeCurrent =", "bullet or up to \"The National Weather Service\". Note #", "that are vtec phen/sig based if ent.find('.') == 2: phensig", "# phraseCount = 0 lastHdln = None for eachHazard in", "fcst, segmentAreas, argDict): return fcst + \"\\n\\n$$\\n\\n\" def _postProcessProduct(self, fcst,", "+ segmentText + \"*|\" else: segmentText = segmentText # #", "\"\", hazardBodyPhrase) return hazardBodyPhrase def finalOverviewText(self): #if didn't calculate any,", "or up to \"The National Weather Service\". Note # that", "self._pil + \"\\n\\n\" fcst = fcst + s.upper() s =", "it = it[1] #get second string which is the CTA", "# # This is for ext hazards # for eachHazard", "\" has been cancelled. \" # # This is for", "def substituteBulletedText(self, capText, defaultText, frameit=\"Never\"): #returns a properly formatted bulleted", "eh['sig']) not in forceCTAList and \\ len(eh['hdln']): forceCTAList.append((eh['phen'], eh['sig'])) #everything", "['UPG']: upgList.append(eachHazard) else: conList.append(eachHazard) # # Now, go through each", "# # This is for the new hazards # phraseCount", "A2 # 06/17/2015 4027 dgilling Perform case-insensitive # comparisons in", "+ \\ \" will expire \" + timeWords + \".", "hazardList: if eh['act'] in ['NEW'] and len(eh['hdln']): forceCTAList.append((eh['phen'], eh['sig'])) return", "the segments hazardsC = argDict['hazards'] segmentList = self.organizeHazards(hazardsC.rawAnalyzedTable()) if len(segmentList)", "exec \"self._\" + variable + \"= varDict[key]\" self._language = argDict[\"language\"]", "product e.g. \"Zone Forecast Product\" # fullStationID Full station identifier,", "self.checkTestMode(argDict, self._productName + self._areaName) if len(self._easPhrase) != 0: eas =", "code bullet = self.indentText(bullet, indentFirstString = '', indentNextString = '", "text) if self.__procCTA is None: self.__procCTA = [] ctao =", "return error # Initialize the output string fcst = \"\"", "hazNameA = self.hazardName(eachHazard['hdln'], argDict, True) hazardBodyPhrase = hazardBodyPhrase + hazNameA", "else: includeText = 0 #end of non statement # something", "\"Fcst\" or \"ISC\" # outputFile Defines the output location of", "# full station identifier (4letter) \"wmoID\": \"<wmoID>\", # WMO ID", "argDict): # Generate Text Phrases for a list of edit", "'' # # Check that this segment codes to determine", "\"Making Product for Segment\") fcst = self._preProcessArea(fcst, segmentAreas, self._expireTime, argDict)", "law. Dissemination # to non-U.S. persons whether in the United", "\". \" elif phraseCount == 1: phraseCount = 2 if", "of text, wraps it preserving blank lines, # then returns", "\"The National Weather Service\". Note # that this only correctly", "3: basis = bullets[2] else: basis = None if len(bullets)", "on the input text. lf = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) ptext =", "with commas in hazardBodyText # ## # This is a", "Location of WFO - city,state \"textdbPil\": \"<textdbPil>\", # Product ID", "bulletProd: bulletFlag = 0 if eachHazard['act'] == 'CAN': hazardBodyPhrase =", "\\ self._ddhhmmTime + \"\\n\" + self._pil + \"\\n\\n\" fcst =", "\\ \", which is in effect\" + endTimePhrase + \".", "2: segmentTextSplit[1] = \"PRECAUTIONARY/PREPAREDNESS ACTIONS...\" + segmentTextSplit2[1] segmentText = string.join(segmentTextSplit,\"\")", "for framing code issues in CTA cta = re.sub(\"\\|\\*.*\\*\\|\",\"\",cta) #", "(re.search(\"\\* \" + self._bulletOrder()[i] + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is not", "in ['CAN', 'EXP']): overview = each['prevOverviewText'] self.__overviewText, dummy = self.cleanCapturedText(", "to be case-insensitive just in case # the site is", "in ['EXT']: extList.append(eachHazard) elif eachHazard['act'] in ['UPG']: upgList.append(eachHazard) else: conList.append(eachHazard)", "line feeds # fixMultiLF = re.compile(r'(\\n\\n)\\n*', re.DOTALL) fcst = fixMultiLF.sub(r'\\1',", "= skipCTAs) # # Check that the segment text isn't", "for a list of edit areas # Get variables error", "with no warranty, express or implied, as to its usefulness", "areaLabel = editArea headlines = self.generateProduct(\"Hazards\", argDict, area = editArea,", "not in newBulletList: bulletOrder.remove(bullet) print \"reordered bullets are: \", bulletOrder", "segment Text, then # we may have to add bullets.", "varDict[key]\" self._language = argDict[\"language\"] # Set up information for Hazards", "to the \"autoSendAddress\" with # the \"awipsWANPil after product creation.", "argDict) hazNameA = self.hazardName(eachHazard['hdln'], argDict, True) hazardBodyPhrase = hazardBodyPhrase +", "= re.compile(r'Default overview section', re.DOTALL) fcst = overviewSearch.sub(overview, fcst) #", "one CAP tag pairs hazardBodyPhrase = re.sub(r'&&\\s*PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\n', \\ \"\",", "len(ctas) > 0: hazardBodyPhrase = hazardBodyPhrase + \\ 'PRECAUTIONARY/PREPAREDNESS ACTIONS...\\n\\n'", "hazardBodyPhrase + \" has issued \" + \\ hazNameA +", "segment doesn't pass the checks, put in framing codes else:", "bullets for all hazards from the bullet diction newBullets =", "len(bullets) >= 4: impact = bullets[3] else: impact = None", "up to \"The National Weather Service\". Note # that this", "4027 randerso Migrated A1 OB9.16 code to A2 # 06/17/2015", "must # be defined or the GFE zone combiner #", "zones \"areaDictionary\": \"AreaDictionary\", # Language \"language\": \"english\", \"lineLength\": 66, #Maximum", "= self.hazardName(eachHazard['hdln'], argDict, False) hazardBodyPhrase = hazardBodyPhrase + \"The \"", "in sortedHazardList: if eachHazard.has_key('prevText'): prevText = eachHazard['prevText'] if eachHazard['pil'] ==", "0 #end of non statement # something in CANS and", "if len(segmentText) < 6: incTextFlag = 0 # DR 21309", "for DR 21309 def _bulletOrder(self): return [] ## Replaced by", "names will be included in the area header # easPhrase", "overviewBody = \".|*Overview (must edit)*|.\\n\\n\" else: overviewBody = \"\" #assemble", "continue #no defined headline, skip phrase if self._bulletProd: continue #", "len(eh['hdln']): forceCTAList.append((eh['phen'], eh['sig'])) #everything in active entries, captured text is", "up the text for x in xrange(len(bullets)): bullets[x] = string.replace(bullets[x],'\\n','", "+ \"\\n\" ## return bullets def _indentBulletText(self, prevText): print prevText", "segment since all areas in the segment have # the", "exists for this product, insert it # overview = self.finalOverviewText()", "\"autoWrite\": 0, #set to 1 to automatically write product to", "0 lastHdln = None for eachHazard in newList: hdln =", "method finds an overview in the previous product # overview", "each.has_key('act')): if (each['pil'] == pil and each['endTime'] > self._currentTime and", "argDict): # Product header if self._areaName != \"\": self._areaName =", "0: continue #no defined headline, skip phrase hazName = self.hazardName(eachHazard['hdln'],", "Counties # Marine_Zones_BOU \"mapNameForCombinations\": \"Zones_<site>\", ## Edit Areas: Create Combinations", "\"000\", #transmission address \"autoStore\": 0, #set to 1 to automatically", "segment in the segmentList fraction = 0 fractionOne = 1.0/float(len(segmentList))", "def hazardBodyText(self, hazardList, argDict): bulletProd = self._bulletProd hazardBodyPhrase = ''", "skipCTAs = skipCTAs) # # Check that the segment text", "Needed: # Hazards #------------------------------------------------------------------------- # Edit Areas Needed: None #-------------------------------------------------------------------------", "string.split(self._bulletDict().get(eachHazard['phen']),\",\") for saveBullet in saveBullets: if saveBullet not in keepBulletList:", "'\\n\\n' elif bulletProd: bulletFlag = 0 if eachHazard['act'] == 'CAN':", "%Y\", stripLeading=1) return None def _preProcessProduct(self, fcst, argDict): # Product", "cans = ['CAN','UPG','EXP'] acts = ['NEW','EXT','EXA','EXB','CON'] foundACTS = 0 foundCANS", "first set # of bullets and text. The multipleRecords is", "bullet product endTimePhrase = self.hazardTimePhrases(eachHazard, argDict) hazNameA = self.hazardName(eachHazard['hdln'], argDict,", "for this segment by importance # sortedHazardList = [] for", "the segmentList fraction = 0 fractionOne = 1.0/float(len(segmentList)) percent =", "phrase if self._bulletProd: continue # No attribution for this case", "if (each['pil'] == pil and each['endTime'] > self._currentTime and each['act']", "Can be: ## EditAreas_PublicZones_BOU ## EditAreas_FireWx_BOU ## EditAreas_FIPS_BOU ## EditAreas_MarineZones_BOU", "abroad requires # an export license or other authorization. #", "+ sig cta = self.defaultCTA(hazardPhenSig) if cta not in ctas:", "21310 # # This adds the call to action statements.", "of bullets and text. The multipleRecords is set to 1", "None: segmentTextSplit = re.split(\"\\* \" + bullet + \"\\.\\.\\.\", segmentText,", "# areaName (opt.) Area name for product header, such as", "+ self._areaName) if len(self._easPhrase) != 0: eas = self._easPhrase +", "# the same headlines editArea = segmentAreas[0] areaLabel = editArea", "creating/editing the combinations file. This must # be defined or", "# outputFile Defines the output location of the finished product.", "= [] for each in ['W', 'Y', 'A', 'O', 'S']:", "+ \" \" + \\ self._ddhhmmTime + \"\\n\" + self._pil", "skipCTAs, forceCTAList) def cleanCapturedText(self, text, paragraphs, addFramingCodes = False, skipCTAs", "public domain, furnished \"as is\", without technical # support, and", "= argDict['creationTime'] self._expireTime = self._issueTime + self._purgeTime*3600 self._timeLabel = self.getCurrentTime(", "identifiers \"productName\": \"Generic Hazard Product\", # product name \"fullStationID\": \"<fullStationID>\",", "DR 21309 def _bulletOrder(self): return [] ## Replaced by 21309", "\"purgeTime\": 8, # Maximum hours for expireTime \"includeCities\": 1 ,", "not in segmentText\" start = self._bulletOrder().index(bullet) + 1 end =", "'S'): foundACTS = 1 if eh['act'] in cans and (len(eh['hdln'])", "the bullet paragraph text or None, returns the # regular", "06/17/2015 4027 dgilling Perform case-insensitive # comparisons in foundCTAs. #", "the product for each segment in the segmentList fraction =", "and frame captured text or not # incTextFlag, incFramingCodes, skipCTAs,", "eachHazard in conList: if len(eachHazard['hdln']) == 0: continue #no defined", "if prevText is None: return prevText ### ### split the", "# # mapNameForCombinations Name of the map background that is", "if eachHazard['pil'] == 'MWS': startPara = 0 else: startPara =", "\", segmentText for bullet in newBullets: if re.search(\"\\* \" +", "= [] for eachHazard in sortedHazardList: if eachHazard['act'] not in", "values if len(bullets) >= 1: hazard = bullets[0] else: hazard", "\" + textToUse + \" *|\" else: textToUse = defaultText", "if prefixSpace and len(timeWords): timeWords = \" \" + timeWords", "expList: if len(eachHazard['hdln']) == 0: continue #no defined headline, skip", "\\ hazNameA + \" has been issued.\" else: hazardBodyPhrase +=", "in statementList: hazardBodyPhrase = \"...|* Add statement headline *|...\\n\\n\" #", "# further licensing information. ## # ---------------------------------------------------------------------------- # # SOFTWARE", "+ \\ \" is no longer in effect. \" else:", "eachHazard in extList: if len(eachHazard['hdln']) == 0: continue #no defined", "paraCount = paraCount + 1 # Add framing codes if", "# accurateCities If 1, cities are determined from grids #", "\"reordered bullets are: \", bulletOrder for b in bulletOrder: bullets", "the whole thing, if it's 2, it returns paragraphs 2", "in paras: if paraCount >= paragraphs: found = self.ctasFound(eachPara) #get", "= self.hazardName(eachHazard['hdln'], argDict, False) if eachHazard['endTime'] <= argDict['creationTime']: hazardBodyPhrase =", "line feed term. # of the text regText = \"\"", "This software product contains export-restricted data whose # export/transfer/disclosure is", "x in xrange(len(buf)): if x == 0: continue #headlines and", "# 402.291.0100 # # See the AWIPS II Master Rights", "pil, such as \"SFTBOS\" # areaName (opt.) Area name for", "empty Call to Action Tags fcst = re.sub(r'\\nPRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\s*&&\\n', \\", "issuedByString + self._timeLabel + \"\\n\\n\" fcst = fcst + s", "that is not intended to be overridden. ## #------------------------------------------------------------------------- #", "not in keepBulletList and canBullet not in removeBulletList: removeBulletList.append(canBullet) print", "TECHNICAL DATA # This software product contains export-restricted data whose", "to bullets \"hazardSamplingThreshold\": (10, None), #(%cov, #points) \"callToAction\": 1, }", "is used, but still # need to handle the \"NEW\"", "all areas in the segment have # the same headlines", "multiple sets of bullets. In this case # only the", "regText = (\"\\n\").join(lines) # now clean up the text for", "in which #nothing is wrapped in framing codes, \"Always\" in", "= ' ', maxWidth=self._lineLength, breakStrings=[\" \", \"-\", \"...\"]) def convertSingleParas(self,", "1.0/float(len(segmentList)) percent = 50.0 self.setProgressPercentage(50) for segmentAreas in segmentList: self.progressMessage(fraction,", "self._issueTime = AbsTime.AbsTime(argDict['creationTime']) self._currentTime = argDict['creationTime'] self._expireTime = self._issueTime +", "hdln # # This is for the can hazards #", "the paragraphs, keep only the interested ones paraCount = 0", "order for all bullets bulletOrder = self._bulletOrder() staticBulletOrder = self._bulletOrder()", "up products # 02/24/2016 5411 randerso Make bullet headers upper", "product. \"outputFile\": \"{prddir}/TEXT/genHaz.txt\", \"debug\": 0, # Name of map background", "segmentAreas[0] areaLabel = editArea headlines = self.generateProduct(\"Hazards\", argDict, area =", "\\ segmentText + '\\n\\n' elif bulletProd: bulletFlag = 0 if", "lists are different, thus this code # is more complicated", "= hazardBodyPhrase + '&&\\n\\n' # Make sure there is only", "is for the can hazards # for eachHazard in canList:", "frameit == \"Always\" or frameit == \"DefaultOnly\": textToUse = \"|*", "\"* \" + textToUse # format it return self.indentText(textToUse, indentFirstString", "self._indentBulletText(fcst) # # Clean up multiple line feeds # fixMultiLF", "finished product. \"outputFile\": \"{prddir}/TEXT/genHaz.txt\", \"debug\": 0, # Name of map", "LogStream import TextRules import SampleAnalysis import time, string, types, copy,", "AWIPS WAN to the \"autoSendAddress\" with # the \"awipsWANPil after", "each line # # defaultEditAreas defines edit areas, default is", "= argDict[\"language\"] # Set up information for Hazards product self._hazards", "if eachHazard['phen'] in ['HU', 'TR', 'TY']: hazardBodyPhrase = hazardBodyPhrase +", "not intended to be overridden. ## #------------------------------------------------------------------------- # Description: This", "1, the overview section is templated # bulletProd If 1,", "= self.ctasFound(eachPara) #get list of ctas found if skipCTAs and", "Suite 340 # Mail Stop B8 # Omaha, NE 68106", "self.convertSingleParas(it) for cta in ctaParas: self.__procCTA.append((k,string.replace(cta,' ',''))) d = ctao.ctaPilDict()", "# written to the \"output\" named disk file after #", "fcst + \"\\n\\n$$\\n\\n\" def _postProcessProduct(self, fcst, argDict): # # If", "not in [\"CAN\",\"EXP\"]: saveBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for saveBullet in saveBullets:", "xrange(len(paragraphs)): paragraphs[x] = string.replace(paragraphs[x],' ','') #make list of call to", "license or other authorization. # # Contractor Name: <NAME> #", "statements will appear. # textdbPil Defines the awips product identifier", "'LS']: statementList.append(eachHazard) elif eachHazard['act'] in ['NEW', 'EXA', 'EXB']: newList.append(eachHazard) elif", "= paraCount + 1 # Add framing codes if addFramingCodes:", "\\2',regText) # extract out each section for returning the values", "multRecords = 1 bullets = bullets[0:x+1] #only interested in these", "will be included in the area header # easPhrase Optional", "#end of non statement # something in CANS and something", "or \"ISC\" \"database\": \"Official\", # Defines output location of finished", "bullets that we need to keep. keepBulletList = [] for", "Contract DG133W-05-CQ-1067 with the US Government. # # U.S. EXPORT", "#------------------------------------------------------------------------- # Standard and Local file names and Locations: #", "the bulleted text, defined by the double line feed term.", "edit areas # Get variables error = self._getVariables(argDict) if error", "i in range(start,end): if (re.search(\"\\* \" + self._bulletOrder()[i] + \"\\.\\.\\.\",", "else: conList.append(eachHazard) # # Now, go through each list and", "#------------------------------------------------------------------------- # Description: This product is a template for creating", "bDict = self._bulletDict() ## bLine = bDict.get(eachHazard['phen']) ## print 20*", "bulletProd = self._bulletProd hazardBodyPhrase = '' # # First, sort", "foundCANS = 0 foundSig = [] for eh in hazardList:", "Definition = { \"type\": \"smart\", \"displayName\": None, # Source database", "new hazards # phraseCount = 0 lastHdln = None for", "= 0 foundSig = [] for eh in hazardList: if", "foundSig and len(foundSig) == 1: #only S includeFrameCodes = 1", "Configuration Items: # # displayName If not None, defines how", "# Copying: # This software is in the public domain,", "Information: #------------------------------------------------------------------------- # Example Output: #------------------------------------------------------------------------- import LogStream import TextRules", "longer in effect. \" else: expTimeCurrent = argDict['creationTime'] timeWords =", "+ \" *|\" else: textToUse = defaultText if frameit ==", "paraCount + 1 # Add framing codes if addFramingCodes: processedText", "empty, return nothing if prevText is None: return prevText ###", "#the capText variable. If capText is None or 0 length,", "fcst = fcst + headlines return fcst def _postProcessArea(self, fcst,", "#(%cov, #points) \"callToAction\": 1, } def __init__(self): TextRules.TextRules.__init__(self) SampleAnalysis.SampleAnalysis.__init__(self) self.__overviewText", "None, defines how product appears in GFE GUI # #", "# First, sort the hazards for this segment by importance", "in range(start,end): if (re.search(\"\\* \" + self._bulletOrder()[i] + \"\\.\\.\\.\", segmentText,", "Defines the output location of the finished product. # Product", "\"hazardBodyText info: removeBulletList: \",removeBulletList # Finally remove the bullets no", "double events will only decode the first set # of", "Name of state, such as \"Georgia\" -- optional \"wfoCityState\": \"<wfoCityState>\",", "Generate Narrative Forecast for Edit Area # get the hazards", "creating Hazard Products. #------------------------------------------------------------------------- # Copying: # This software is", "identifier # (e.g., KBOUCCFDEN) that is used to transmit the", "textdbPil Defines the awips product identifier # (e.g., DENCCFDEN) that", "WAN to the \"autoSendAddress\" with # the \"awipsWANPil after product", "Area # get the hazards text # We only need", "tags to keep separate from CTAs fcst = string.replace(fcst, \\", "and their status, see the Text Product User Guide #", "present in the list, or it needs to be a", "found = self.ctasFound(eachPara) #get list of ctas found if skipCTAs", "warranty, express or implied, as to its usefulness for #", "## EditAreas_FireWx_BOU ## EditAreas_FIPS_BOU ## EditAreas_MarineZones_BOU \"defaultEditAreas\" : \"EditAreas_PublicZones_<site>_<MultiPil>\", #", "(e.g., KBOUCCFDEN) that is used to transmit the # product", "addition, \" + \\ hazNameA + \" has been issued.\"", "= self.checkTestMode(argDict, self._productName + self._areaName) if len(self._easPhrase) != 0: eas", "to A2 # 06/17/2015 4027 dgilling Perform case-insensitive # comparisons", "incTextFlag = 0 # DR 21309 code addition from Middendorf", "CTAs that are vtec phen/sig based if ent.find('.') == 2:", "there is a new hazard and previous segment Text, then", "skipCTAs = skipCTAs) tester = segmentText[0] if tester == '*':", "<= argDict['creationTime']: hazardBodyPhrase = hazardBodyPhrase + \"The \" + hazName", "The product is not # automatically stored unless autoStore is", "= \\ self.useCaptureText(sortedHazardList) # # # Check that the previous", "+ \"\\n\\n$$\\n\\n\" def _postProcessProduct(self, fcst, argDict): # # If an", "textDB \"autoWrite\": 0, #set to 1 to automatically write product", "Determine time ranges error = self._determineTimeRanges(argDict) if error is not", "+ eachPara + '\\n\\n' #keep track of remaining CTAs in", "\"includeOverview\": 1, #include overview section \"bulletProd\": 0, # do not", "second string which is the CTA ctaParas = self.convertSingleParas(it) for", "line above is causing issues with ### offices that use", "which just the default text is wrapped. if capText is", "# # This is for con hazards # for eachHazard", "template for creating Hazard Products. #------------------------------------------------------------------------- # Copying: # This", "be \"Official\", \"Fcst\" or \"ISC\" \"database\": \"Official\", # Defines output", "for eachHazard in statementList: hazardBodyPhrase = \"...|* Add statement headline", "True) hazNameACap = self.sentence(hazNameA, addPeriod=False) hazName = self.hazardName(eachHazard['hdln'], argDict, False)", "\"\": self._areaName = \" for \" + self._areaName issuedByString =", "(frame it, include text) elif foundCANS and foundACTS: includeFrameCodes =", "them into individual lists based on action # newList =", "breakStrings=[\" \", \"...\"]) ### ### the \"-\" in the breakStrings", "DATA # This software product contains export-restricted data whose #", "-------------------------- # 05/07/2015 4027 randerso Migrated A1 OB9.16 code to", "### ### the \"-\" in the breakStrings line above is", "term. # of the text regText = \"\" #regular text", "information for Hazards product self._hazards = argDict['hazards'] self._combinations = argDict[\"combinations\"]", "sortedHazardList: if eachHazard['act'] not in [\"CAN\",\"EXP\"]: saveBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for", "Task Reporting System\". #------------------------------------------------------------------------- # Additional Information: #------------------------------------------------------------------------- # Example", "accessible self.__argDict = argDict # Get Definition variables self._definition =", "\"Always\" in which the #text (default or cap) is wrapped", "for expireTime \"includeCities\": 1 , # Cities included in area", "return fcst def _preProcessArea(self, fcst, segmentAreas, expireTime, argDict): # This", "def ctasFound(self, text): #returns types of ctas found. The identifier", "will be included in the area header \"easPhrase\" :\"\", #", "areaHeader return fcst def _makeProduct(self, fcst, segmentAreas, argDict): argDict[\"language\"] =", "'O', 'S']: for eachHazard in hazardList: if eachHazard['sig'] == each:", "bullets for x in xrange(len(buf)): if x == 0: continue", "|* Enter hazard type *|\" else: forPhrase =\"\" if nwsIntroUsed", "= \"\\n\" + string.join(segmentTextSplit,\"* \" + bullet.upper() + \\ \"...|*", "+ \"* \" + b + \"...|* Enter bullet text", "(type, cta text) if self.__procCTA is None: self.__procCTA = []", "text is: \", segmentText for bullet in newBullets: if re.search(\"\\*", "+ \\ \"\\n\\n|* Wrap-up text goes here *|.\\n\" else: bulletFlag", "= self._preProcessProduct(fcst, argDict) # Generate the product for each segment", "self._areaName = \" for \" + self._areaName issuedByString = self.getIssuedByString()", "and split the bullets ## bDict = self._bulletDict() ## bLine", "self._wfoCity nwsIntroUsed = 1 hazardBodyPhrase = hazardBodyPhrase + \\ \"", "if len(ctas) > 0: hazardBodyPhrase = hazardBodyPhrase + \\ 'PRECAUTIONARY/PREPAREDNESS", "in acts and (len(eh['hdln']) or eh['sig'] == 'S'): foundACTS =", "else: textToUse = defaultText if frameit == \"Always\" or frameit", "Get Definition variables self._definition = argDict[\"forecastDef\"] for key in self._definition.keys():", "# segmentText = '' # # Check that this segment", "Master Rights File (\"Master Rights File.pdf\") for # further licensing", "in ['CAN']: canList.append(eachHazard) elif eachHazard['act'] in ['EXP']: expList.append(eachHazard) elif eachHazard['act']", "# value is also used for the default GUI entry", "the phrases # nwsIntroUsed = 0 # # This is", "incTextFlag and bulletProd: for eachHazard in sortedHazardList: if not eachHazard.has_key('prevText'):", "[] conList = [] upgList = [] statementList = []", "overview = \"\" for each in hazardList: if (each.has_key('prevOverviewText') and", "\",bulletFlag if bulletFlag: newBulletList = [] bullets = \"\" for", "maxWidth=self._lineLength, breakStrings=[\" \", \"-\", \"...\"]) def convertSingleParas(self, text): #returns a", "will be automatically # stored into the text database using", "requires # an export license or other authorization. # #", "eachHazard['prevText'] if eachHazard['pil'] == 'MWS': startPara = 0 else: startPara", "eachHazard in sortedHazardList: if eachHazard['act'] in [\"CAN\",\"EXP\"]: canBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\")", "print \"hazardBodyText info: removeBulletList: \",removeBulletList # Finally remove the bullets", "import SampleAnalysis import time, string, types, copy, re import CallToActions", "ACTIONS...\" + segmentTextSplit2[1] segmentText = string.join(segmentTextSplit,\"\") if removeBulletList != []:", "string.split(self._bulletDict().get(eachHazard['phen']),\",\") for newBullet in newBullets: if newBullet not in newBulletList:", "named disk file after # product creation. # # lineLength", "newBulletList: bulletOrder.remove(bullet) print \"reordered bullets are: \", bulletOrder for b", "cans and (len(eh['hdln']) or eh['sig'] == 'S'): foundCANS = 1", "hazardList: if eh['act'] in acts and (len(eh['hdln']) or eh['sig'] ==", "self._language = argDict[\"language\"] # Set up information for Hazards product", "discarded # due to a CAN/EXP/UPG segment # # remove", "# # This is for the exp hazards # phraseCount", "considered. Tuple (percent, points) # includeOverviewHeadline If 1, the overview", "for product. Can be \"Official\", \"Fcst\" or \"ISC\" \"database\": \"Official\",", "canBullet not in keepBulletList and canBullet not in removeBulletList: removeBulletList.append(canBullet)", "cta in ctaParas: self.__procCTA.append((k,string.replace(cta,' ',''))) ctas = ctao.genericCTAs() for it", "account for framing code issues in CTA cta = re.sub(\"\\|\\*.*\\*\\|\",\"\",cta)", "para.upper() == cta.upper() and ctaType not in found: found.append(ctaType) return", "# # To look up tasks and their status, see", "forceCTAList = \\ self.useCaptureText(sortedHazardList) # # # Check that the", "intended to be overridden. ## #------------------------------------------------------------------------- # Description: This product", "self._timeLabel = self.getCurrentTime( argDict, \"%l%M %p %Z %a %b %e", "in ['NEW', 'EXA', 'EXB']: newList.append(eachHazard) elif eachHazard['act'] in ['CAN']: canList.append(eachHazard)", "product. Can be \"Official\", # \"Fcst\" or \"ISC\" # outputFile", "\"areaDictionary\": \"AreaDictionary\", # Language \"language\": \"english\", \"lineLength\": 66, #Maximum line", "and len(foundSig) == 1: #only S includeFrameCodes = 1 #capture", "list of edit areas # Get variables error = self._getVariables(argDict)", "in canBullets: if canBullet not in keepBulletList and canBullet not", "finalOverviewText(self): #if didn't calculate any, use the default if len(self.__overviewText)", "for cta in ctaParas: self.__procCTA.append((\"GENERIC\", string.replace(cta,' ',''))) #compare found =", "len(bullets)): index = bullets[x].find('\\n\\n') if index != -1: regText =", "*|\" else: forPhrase =\"\" if nwsIntroUsed == 0: hazardBodyPhrase =", "Forecast Product\" # fullStationID Full station identifier, 4 letter, such", "'*': startPara = 1 else: startPara = 2 segmentText, foundCTAs", "for canBullet in canBullets: if canBullet not in keepBulletList and", "segmentText, flags=re.IGNORECASE) segmentText = \"\\n\" + string.join(segmentTextSplit,\"* \" + bullet.upper()", "Enter bullet text *|\\n\\n\" hazardBodyPhrase = hazardBodyPhrase + \"\\n\\n\" +", "overview, 0) break def useCaptureText(self, hazardList): #Based on the hazardlist,", "\\ len(eh['hdln']): forceCTAList.append((eh['phen'], eh['sig'])) #everything in active entries, captured text", "Development tasks that are identified and in progress: # #", "removed. # Framing codes are added if specified. # paras", "headline, skip phrase if self._bulletProd: continue # No attribution for", "if re.match(\"\\*\", b): ### remove line feeds removeLF = re.compile(r'(s*[^\\n])\\n([^\\n])',", "segmentText = string.join(segmentTextSplit,\"* \" + bullet.upper() + \\ \"...|* Enter", "} def __init__(self): TextRules.TextRules.__init__(self) SampleAnalysis.SampleAnalysis.__init__(self) self.__overviewText = \"\" self.__procCTA =", "\" # # This is for upgrade hazards # for", "= 1 bullets = bullets[0:x+1] #only interested in these bullets", "if len(bullets) >= 1: hazard = bullets[0] else: hazard =", "= self.getCurrentTime( argDict, \"%l%M %p %Z %a %b %e %Y\",", "*|\" # add bullet codes textToUse = \"* \" +", "= segmentAreas[0] areaLabel = editArea headlines = self.generateProduct(\"Hazards\", argDict, area", "### remove line feeds removeLF = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) bullet =", "#no regular text after bullets return (hazard, time, basis, impact,", "re.DOTALL) fcst = overviewSearch.sub(overview, fcst) # # Added to place", "we may have to remove bullets. # if incTextFlag and", "buf = prevText.split('\\n\\n* ') if len(buf) <= 1: return (None,", "# # Check that the segment text isn't very short", "[] statementList = [] for eachHazard in sortedHazardList: if eachHazard['sig']", "we have to remove. removeBulletList = [] for eachHazard in", "\"Buffalo NY\" # # Optional Configuration Items # # mapNameForCombinations", "identifier is the pil (e.g., ZFP), #phen/sig (e.g., DU.Y), or", "= None if len(regText) == 0: regText = None #no", "skip phrase hazName = self.hazardName(eachHazard['hdln'], argDict, False) hazardBodyPhrase = hazardBodyPhrase", "and len(eh['hdln']): forceCTAList.append((eh['phen'], eh['sig'])) return (includeText, includeFrameCodes, skipCTAs, forceCTAList) def", "returns the # regular text after the bullets. The afterText", "map background that is used for # creating/editing the combinations", "text is wrapped. if capText is not None and len(capText):", "-1: regText = bullets[x][index+2:] bullets[x] = bullets[x][0:index] #eliminate after bullet", "flags=re.IGNORECASE) print \"segmentTextSplit is \", segmentTextSplit segmentTextSplit2 = string.split(segmentTextSplit[1],\"*\",1) if", "bullet text *|\\n\\n\" ## # bullets = bullets + \"\\n\"", "if self._bulletProd: continue # No attribution for this case if", "= hazardBodyPhrase + '\\n\\n' ctas = [] for (phen,sig) in", "k in d.keys(): func = d[k] items = func() for", "nothing if prevText is None: return prevText ### ### split", "= hazardBodyPhrase + \\ 'PRECAUTIONARY/PREPAREDNESS ACTIONS...\\n\\n' for c in ctas:", "e.g. \"Zone Forecast Product\" # fullStationID Full station identifier, 4", "\"* \" + b + \"...|* Enter bullet text *|\\n\\n\"", "for con hazards # for eachHazard in conList: if len(eachHazard['hdln'])", "if it is a bullet product hazName = self.hazardName(eachHazard['hdln'], argDict,", "citiesPhrase \"Including the cities of\" phrase used when including #", "self.__overviewText, dummy = self.cleanCapturedText( overview, 0) break def useCaptureText(self, hazardList):", "line # # defaultEditAreas defines edit areas, default is Combinations", "def generateForecast(self, argDict): # Generate Text Phrases for a list", "of bullets!\" segmentTextSplit = re.split(\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", segmentText, flags=re.IGNORECASE) segmentText =", "# ------------ ---------- ----------- -------------------------- # 05/07/2015 4027 randerso Migrated", "Next, break them into individual lists based on action #", "as \"KSLC\". # wmoID WMO ID code for product header,", "+ \"\\n\" + self._pil + \"\\n\\n\" fcst = fcst +", "productName + \"\\n\" +\\ \"National Weather Service \" + self._wfoCityState", "= bullets + \"* \" + b + \"...|* Enter", "Advisory\", \"Winter Storm Warning\", \"Beach Hazards Statement\"]: forPhrase = \"", "and each.has_key('act')): if (each['pil'] == pil and each['endTime'] > self._currentTime", "text) elif foundCANS and foundACTS: includeFrameCodes = 1 skipCTAs =", "Storm Warning\", \"Beach Hazards Statement\"]: forPhrase = \" for |*", "x in xrange(1, len(bullets)): index = bullets[x].find('\\n\\n') if index !=", "# sent on the AWIPS WAN to the \"autoSendAddress\" with", "CallToAction definitions. #convert text to single paragraphs paragraphs = self.convertSingleParas(text)", "bullets in follow up products # 02/24/2016 5411 randerso Make", "0 includeText = 1 skipCTAs = 0 forceCTAList = []", "then the product will be automatically # sent on the", "expList.append(eachHazard) elif eachHazard['act'] in ['EXT']: extList.append(eachHazard) elif eachHazard['act'] in ['UPG']:", "string fcst = \"\" fcst = self._preProcessProduct(fcst, argDict) # Generate", "the \" + hazName + \". \" else: hazardBodyPhrase =", "UPG, EXP only (don't include text) if foundCANS and not", "input text. lf = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) ptext = lf.sub(r'\\1 \\2',", "# Area Dictionary -- Descriptive information about zones \"areaDictionary\": \"AreaDictionary\",", "text is the remainder of the text. However we only", "#compare found = [] for para in paragraphs: for (ctaType,", "the remainder of the text. However we only # want", "issues in CTA cta = re.sub(\"\\|\\*.*\\*\\|\",\"\",cta) # We want this", "\". \" # # This is for upgrade hazards #", "foundACTS = 1 if eh['act'] in cans and (len(eh['hdln']) or", "the bullets bullets = [] buf = prevText.split('\\n\\n* ') if", "+ \\ \"\\n\" + issuedByString + self._timeLabel + \"\\n\\n\" fcst", "'TY']: hazardBodyPhrase = hazardBodyPhrase + hazNameACap + \\ \" has", "\"\" if self._includeOverview: overviewBody = \".|*Overview (must edit)*|.\\n\\n\" else: overviewBody", "basis = None if len(bullets) >= 4: impact = bullets[3]", "editArea, areaLabel=areaLabel, timeRange = self._timeRange) fcst = fcst + headlines", "# Customization Points: # # DEFINITION SECTION # # Required", "fcst = overviewSearch.sub(overview, fcst) # # Added to place line", "database. \"awipsWANPil\": \"<awipsWANPil>\", # Product ID for transmitting to AWIPS", "if len(self.__overviewText) == 0: if self._includeOverviewHeadline: overviewHeadline = \"...|*Overview headline", "self.hazardName(eachHazard['hdln'], argDict, True) hazNameACap = self.sentence(hazNameA, addPeriod=False) hazName = self.hazardName(eachHazard['hdln'],", "\"as is\", without technical # support, and with no warranty,", "hazNameA + \". \" else: hazardBodyPhrase += \" has issued", "\\ \" is no longer in effect. \" # #", "self.indentText(bullet, indentFirstString = '', indentNextString = ' ', maxWidth=self._lineLength, breakStrings=[\"", "for x in xrange(len(bullets)): bullets[x] = string.replace(bullets[x],'\\n',' ') removeLF =", "set to 1, then the product will be automatically #", "creation. # autoStore If set to 1, then the product", "[] # Added for DR 21194 def _bulletDict(self): return []", "= 0 # DR 21309 code addition from Middendorf (BYZ)", "#------------------------------------------------------------------------- # Weather Elements Needed: # Hazards #------------------------------------------------------------------------- # Edit", "prevText = eachHazard['prevText'] if eachHazard['pil'] == 'MWS': startPara = 0", "text) if foundCANS and not foundACTS: if 'S' in foundSig", "and \\ len(eh['hdln']): forceCTAList.append((eh['phen'], eh['sig'])) #everything in active entries, captured", "are always removed. # Framing codes are added if specified.", "Service in \" +\\ self._wfoCity nwsIntroUsed = 1 hazardBodyPhrase =", "outText + bullet + \"\\n\\n\" else: ### not a bullet,", "\"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is None: print bullet + \" not", "[] expList = [] extList = [] conList = []", "issued. This \" + hazName + forPhrase + \\ \"", "lineLength max length of each line # # defaultEditAreas defines", "not None) and bulletFlag: print \"* \" + self._bulletOrder()[i] +", "\"* \" + self._bulletOrder()[i] + \"... found!\" segmentTextSplit = re.split(\"\\*", "be included in the area header # easPhrase Optional EAS", "= '' # # First, sort the hazards for this", "= self._bulletOrder() for bullet in staticBulletOrder: print \"correct bullet order", "## EditAreas_MarineZones_BOU \"defaultEditAreas\" : \"EditAreas_PublicZones_<site>_<MultiPil>\", # product identifiers \"productName\": \"Generic", "segmentText + \"*|\" else: segmentText = segmentText # # If", "finds an overview in the previous product # overview =", "need to handle the \"NEW\" entries. else: for eh in", "product \"autoSendAddress\": \"000\", #transmission address \"autoStore\": 0, #set to 1", "hazardBodyPhrase + \"\\n\\n\" + bullets # If segment doesn't pass", "len(eachHazard['hdln']) == 0: continue #no defined headline, skip phrase endTimePhrase", "= fixMultiLF.sub(r'\\1', fcst) # finish progress meter self.setProgressPercentage(100) self.progressMessage(0, 100,", "= fcst + headlines return fcst def _postProcessArea(self, fcst, segmentAreas,", "+ hazName + \\ \" is now in effect\" +", "product will be automatically # stored into the text database", "percent, \"Making Product for Segment\") fcst = self._preProcessArea(fcst, segmentAreas, self._expireTime,", "### the \"-\" in the breakStrings line above is causing", "section', re.DOTALL) fcst = overviewSearch.sub(overview, fcst) # # Added to", "product appears in GFE GUI # # You must set", "\"Fcst\" or \"ISC\" \"database\": \"Official\", # Defines output location of", "# do not default to bullets \"hazardSamplingThreshold\": (10, None), #(%cov,", "print \"appending to bottom list of bullets!\" segmentTextSplit = re.split(\"PRECAUTIONARY/PREPAREDNESS", "bottom list of bullets!\" segmentTextSplit = re.split(\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", segmentText, flags=re.IGNORECASE)", "0 forceCTAList = [] # all actions are in CAN,", "return (None, None, None, None, None, None) multRecords = 0", "b + \"...|* Enter bullet text *|\\n\\n\" ## # bullets", "such as \"FOUS45\" # pil Product pil, such as \"SFTBOS\"", "hazardBodyPhrase = hazardBodyPhrase + '\\n\\n' ctas = [] for (phen,sig)", "a bullet product hazName = self.hazardName(eachHazard['hdln'], argDict, False) if eachHazard['endTime']", "## bDict = self._bulletDict() ## bLine = bDict.get(eachHazard['phen']) ## print", "product in textDB \"autoWrite\": 0, #set to 1 to automatically", "= 1 if phraseCount == 0: phraseCount = 1 if", "hazName + \\ \" is no longer in effect. \"", "not in newBulletList: newBulletList.append(newBullet) print \"my bullets are: \", newBulletList", "\"displayName\": None, # Source database for product. Can be \"Official\",", "in ['W', 'Y', 'A', 'O', 'S']: for eachHazard in hazardList:", "Utilities Files e.g. Combinations file: # Combinations file #------------------------------------------------------------------------- #", "multipleRecords is set to 1 in the # event that", "in ctaParas: self.__procCTA.append((k,string.replace(cta,' ',''))) d = ctao.ctaPilDict() for k in", "\\ \"...|* Enter bullet text *|\\n\\nPRECAUTIONARY/PREPAREDNESS ACTIONS...\") bulletFlag = 0", "# To look up tasks and their status, see the", "= 0 # # This is for the new hazards", "1, then the product will be automatically # sent on", "been issued. This \" + hazName + forPhrase + \\", "if error is not None: return error # Initialize the", "5411 randerso Make bullet headers upper case # 07/15/2016 5749", "bullet = removeLF.sub(r'\\1 \\2',b) ### indent code bullet = self.indentText(bullet,", "text *|\\n\\n\" ## # bullets = bullets + \"\\n\" ##", "= [] conList = [] upgList = [] statementList =", "\\ \" has cancelled the \" + hazName + \".", "forceCTAList: del forceCTAList[forceCTAList.index(phensig)] hazardBodyPhrase = hazardBodyPhrase + '\\n\\n' ctas =", "in xrange(len(paragraphs)): paragraphs[x] = string.replace(paragraphs[x],' ','') #make list of call", "1, cities will be included in the area header #", "endTimePhrase + \". \" else: if eachHazard['phen'] in ['HU', 'TR',", "eachHazard.has_key('prevText'): newBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") print \"newBullets = \", newBullets print", "will be automatically # sent on the AWIPS WAN to", "regText = None #no regular text after bullets return (hazard,", "#eliminate after bullet text if len(bullets) > x+2: #more bullets", "the series of bullets to the # beginning of any", "segmentAreas, argDict) fraction = fractionOne fcst = self._postProcessProduct(fcst, argDict) return", "This is for ext hazards # for eachHazard in extList:", "#Maximum line length \"purgeTime\": 8, # Maximum hours for expireTime", "+ hazName + \\ \" has been cancelled. \" #", "+ \" not in segmentText\" start = self._bulletOrder().index(bullet) + 1", "removeBulletList.append(canBullet) print \"hazardBodyText info: keepBulletList: \",keepBulletList print \"hazardBodyText info: removeBulletList:", "text): #returns types of ctas found. The identifier is the", "= \"\" #regular text after bullets for x in xrange(1,", "processedText + \"*|\\n\" # Wrap processedText = self.endline(processedText, linelength=self._lineLength, breakStr=[\"", "ACTIONS...\\n\\n' for c in ctas: hazardBodyPhrase = hazardBodyPhrase + c", "has also been issued. This \" + hazName + forPhrase", "\"autoSendAddress\" with # the \"awipsWANPil after product creation. # autoStore", "self._easPhrase + '\\n' else: eas = '' s = self._wmoID", "automatically store product in textDB \"autoWrite\": 0, #set to 1", "is the header for an edit area combination areaHeader =", "effect. \" else: expTimeCurrent = argDict['creationTime'] timeWords = self.getTimingPhrase(eachHazard, expTimeCurrent)", "## print 20* \"*\" + (eachHazard['phen']) ## bList = newBulletList.split(\",\")", "AWIPS WAN. The product is not # automatically transmitted unless", "self._bulletDict() ## bLine = bDict.get(eachHazard['phen']) ## print 20* \"*\" +", "len(eh['hdln']): forceCTAList.append((eh['phen'], eh['sig'])) return (includeText, includeFrameCodes, skipCTAs, forceCTAList) def cleanCapturedText(self,", "0, # If 1, combine periods, if possible # automatic", "bullets: ### if first character is a * we found", "from the bullet diction newBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for newBullet in", "Service in \" + self._wfoCity nwsIntroUsed = 1 if phraseCount", "area combination areaHeader = self.makeAreaHeader( argDict, \"\", self._issueTime, expireTime, self._areaDictionary,", "endTimePhrase + \". \" # # This is for upgrade", "[] upgList = [] statementList = [] for eachHazard in", "f not in foundCTAs: foundCTAs.append(f) if eachPara.find('...') == 0: pass", "return prevText ### ### split the text ### bullets =", "bulletOrder: bullets = bullets + \"* \" + b.upper() +", "if eachHazard['act'] not in [\"CAN\",\"EXP\"]: saveBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for saveBullet", "1 end = len(self._bulletOrder()) bulletFlag = 1 for i in", "If capText is None or 0 length, then #the default", "for eh in hazardList: if eh['act'] in ['NEW'] and len(eh['hdln']):", "product header \"includeOverviewHeadline\": 1, #include overview header \"includeOverview\": 1, #include", "if not eachHazard.has_key('prevText'): newBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") print \"newBullets = \",", "None: self.__procCTA = [] ctao = CallToActions.CallToActions() d = ctao.ctaDict()", "Weather Service in \" +\\ self._wfoCity nwsIntroUsed = 1 hazardBodyPhrase", "addFramingCodes = False, skipCTAs = skipCTAs) tester = segmentText[0] if", "indicating: # (inc capture text, inc framing codes, skip CTAs,", "+ '\\n\\n' hazardBodyPhrase = hazardBodyPhrase + '&&\\n\\n' # Make sure", "include text) elif foundCANS and foundACTS: includeFrameCodes = 1 skipCTAs", "calculate any, use the default if len(self.__overviewText) == 0: if", "the previous product # overview = \"\" for each in", "# Product header if self._areaName != \"\": self._areaName = \"", "WMO ID \"pil\": \"<pil>\", # Product pil \"areaName\": \"\", #", "that the formats of these lists are different, thus this", "text based on #the capText variable. If capText is None", "# Added for DR 21194 def _bulletDict(self): return [] #", "Enter bullet text *|\\n\\nPRECAUTIONARY/PREPAREDNESS ACTIONS...\") bulletFlag = 0 # #", "for this case if it is a bullet product hazName", "if specified. # paras = self.convertSingleParas(text) #single paragraphs # keep", "has been issued.\" else: hazardBodyPhrase += \"In addition, \" +", "skipCTAs = 1 for eh in hazardList: if eh['act'] in", "Products. #------------------------------------------------------------------------- # Copying: # This software is in the", "fcst def _getVariables(self, argDict): # Make argDict accessible self.__argDict =", "f in found: if f not in foundCTAs: foundCTAs.append(f) if", "foundACTS = 0 foundCANS = 0 foundSig = [] for", "whether in the United States or abroad requires # an", "= editArea headlines = self.generateProduct(\"Hazards\", argDict, area = editArea, areaLabel=areaLabel,", "if it is a bullet product endTimePhrase = self.hazardTimePhrases(eachHazard, argDict)", "\"...|* Enter bullet text *|\\n\\n\" ## # bullets = bullets", "for the default GUI entry for # storage. # awipsWANPil", "timeWords = \" \" + timeWords #add a leading space", "# if incTextFlag and bulletProd: # First make list of", "# Include all cities in area header \"cityLocation\": \"CityLocation\", #", "\", which is in effect\" + endTimePhrase + \". \"", "= [] upgList = [] statementList = [] for eachHazard", "return \"No hazards to report\" # Determine time ranges error", "the map background that is used for # creating/editing the", "hours self._timeRange = self.createTimeRange(0, 240) self._ddhhmmTime = self.getCurrentTime( argDict, \"%d%H%M\",", "if previous text is empty, return nothing if prevText is", "#only process CTAs that are vtec phen/sig based if ent.find('.')", "for eachHazard in extList: if len(eachHazard['hdln']) == 0: continue #no", "self.__procCTA is None: self.__procCTA = [] ctao = CallToActions.CallToActions() d", "in the area header # easPhrase Optional EAS phrase to", "'' s = self._wmoID + \" \" + self._fullStationID +", "= each['prevOverviewText'] self.__overviewText, dummy = self.cleanCapturedText( overview, 0) break def", "class TextProduct(TextRules.TextRules, SampleAnalysis.SampleAnalysis, CallToActions.CallToActions): Definition = { \"type\": \"smart\", \"displayName\":", "self._timeLabel + \"\\n\\n\" fcst = fcst + s fcst =", "on #the capText variable. If capText is None or 0", "\\ \" has also been issued. This \" + hazName", "return fcst def _getVariables(self, argDict): # Make argDict accessible self.__argDict", "Fix bullets in follow up products # 02/24/2016 5411 randerso", "mapNameForCombinations Name of the map background that is used for", "bullet format #------------------------------------------------------------------------- # Weather Elements Needed: # Hazards #-------------------------------------------------------------------------", "\"\", self._issueTime, expireTime, self._areaDictionary, None, cityDescriptor=self._cityDescriptor, areaList=segmentAreas, includeCities=self._includeCities, includeZoneNames =", "processedText.rstrip() processedText = \"|*\\n\" + processedText + \"*|\\n\" # Wrap", "returns the whole thing, if it's 2, it returns paragraphs", "be included in the area header # accurateCities If 1,", "[] for each in ['W', 'Y', 'A', 'O', 'S']: for", "case if it is a bullet product hazName = self.hazardName(eachHazard['hdln'],", "hazName + forPhrase + \\ \" is in effect\" +", "hazardList): #Based on the hazardlist, returns a tuple indicating: #", "+= \" has issued \" + hazNameA + forPhrase +", "','') #make list of call to actions (type, cta text)", "in xrange(1, len(bullets)): index = bullets[x].find('\\n\\n') if index != -1:", "CAN, UPG, EXP only (don't include text) if foundCANS and", "has issued \" + hazNameA + forPhrase + \\ \",", "# Added to place line feeds in the CAP tags", "\"\\n\" ## return bullets def _indentBulletText(self, prevText): print prevText ###", "\\ \" has been cancelled. \" # # This is", "processedText = processedText.rstrip() processedText = \"|*\\n\" + processedText + \"*|\\n\"", "for transmitting to AWIPS WAN. \"periodCombining\" : 0, # If", "['W', 'Y', 'A', 'O', 'S']: for eachHazard in hazardList: if", "argDict): # # If an overview exists for this product,", "possible # automatic functions \"autoSend\": 0, #set to 1 to", "+ hazName + \\ \" will expire \" + timeWords", "0, # Include all cities in area header \"cityLocation\": \"CityLocation\",", "# productName defines name of product e.g. \"Zone Forecast Product\"", "#------------------------------------------------------------------------- # Example Output: #------------------------------------------------------------------------- import LogStream import TextRules import", "', maxWidth=self._lineLength, breakStrings=[\" \", \"...\"]) ### ### the \"-\" in", "be: # Zones_BOU # FireWxZones_BOU # Counties # Marine_Zones_BOU \"mapNameForCombinations\":", "hazardBodyPhrase = \"...|* Add statement headline *|...\\n\\n\" # # This", "import TextRules import SampleAnalysis import time, string, types, copy, re", "= lines[0:x] #eliminate following lines break regText = (\"\\n\").join(lines) #", "header \"includeOverviewHeadline\": 1, #include overview header \"includeOverview\": 1, #include overview", "# defaultEditAreas defines edit areas, default is Combinations # #", "# The _hazardTimePhrases method is passed a hazard key, and", "(hazard, time, basis, impact, afterText, multipleRecords) if prevText is None:", "+ textToUse # format it return self.indentText(textToUse, indentFirstString = '',", "= re.compile(r'(\\n\\n)\\n*', re.DOTALL) fcst = fixMultiLF.sub(r'\\1', fcst) # finish progress", "of bullets. In this case # only the 1st set", "the \"textdbPil\" # after product creation. # autoWrite If set", "product to file # Area Dictionary -- Descriptive information about", "time. # includeCities If 1, cities will be included in", "# Get variables error = self._getVariables(argDict) if error is not", "paragraphs based on the input text. lf = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL)", "self._bulletOrder()[i] + \"...\") bulletFlag = 0 if bulletFlag: print \"appending", "importance # sortedHazardList = [] for each in ['W', 'Y',", "= 0 # # Now if there is a can/exp", "# all actions are in CAN, UPG, EXP only (don't", "have to remove. removeBulletList = [] for eachHazard in sortedHazardList:", "paragraphs: found = self.ctasFound(eachPara) #get list of ctas found if", "= \"|*\\n\" + processedText + \"*|\\n\" # Wrap processedText =", "CTA text outText = outText + b + \"\\n\\n\" ###", "segmentAreas in segmentList: self.progressMessage(fraction, percent, \"Making Product for Segment\") fcst", "hazNameACap + \\ \" has also been issued.\" else: hazardBodyPhrase", "text is empty, return nothing if prevText is None: return", "bullets[1] else: time = None if len(bullets) >= 3: basis", "#returns a properly formatted bulleted text based on #the capText", "place line feeds in the CAP tags to keep separate", "each in hazardList: if (each.has_key('prevOverviewText') and each.has_key('pil') and each.has_key('endTime') and", "\" *|\" # add bullet codes textToUse = \"* \"", "#get second string which is the CTA ctaParas = self.convertSingleParas(it)", "False) if hazName in [\"Winter Weather Advisory\", \"Winter Storm Warning\",", "not in ctas: ctas.append(cta) if len(ctas) > 0: hazardBodyPhrase =", "bullets = bullets + \"\\n\" ## return bullets def _indentBulletText(self,", "= \"\" fcst = self._preProcessProduct(fcst, argDict) # Generate the product", "= argDict[\"combinations\"] return None def _determineTimeRanges(self, argDict): # Set up", "time = None if len(bullets) >= 3: basis = bullets[2]", "label, variable = key exec \"self._\" + variable + \"=", "= 0 #end of non statement # something in CANS", "self._language # Generate Narrative Forecast for Edit Area # get", "# Add framing codes if addFramingCodes: processedText = processedText.rstrip() processedText", "framing codes else: hazardBodyPhrase = hazardBodyPhrase + \\ \"\\n\\n|* Statement", "for segmentAreas in segmentList: self.progressMessage(fraction, percent, \"Making Product for Segment\")", "or eh['sig'] == 'S'): foundACTS = 1 if eh['act'] in", "capText is not None and len(capText): textToUse = capText[0].upper()+capText[1:] if", "key in varDict.keys(): if type(key) is types.TupleType: label, variable =", "bullet if re.match(\"\\*\", b): ### remove line feeds removeLF =", "+ endTimePhrase + \". \" lastHdln = hdln # #", "[] for para in paragraphs: for (ctaType, cta) in self.__procCTA:", "This is for statement hazards # for eachHazard in statementList:", "to 1 to automatically store product in textDB \"autoWrite\": 0,", "\" + self._bulletOrder()[i] + \"... found!\" segmentTextSplit = re.split(\"\\* \"", "a block of text, wraps it preserving blank lines, #", "is the remainder of the text. However we only #", "has also been issued\" + endTimePhrase + \". \" else:", "# No attribution for this case if it is a", "stripLeading=0) self._issueTime = AbsTime.AbsTime(argDict['creationTime']) self._currentTime = argDict['creationTime'] self._expireTime = self._issueTime", "= self._language # Generate Narrative Forecast for Edit Area #", "been cancelled. \" # # This is for the exp", "+ \"The \" + hazName + \\ \" will expire", "string.split(self._bulletDict().get(eachHazard['phen']),\",\") for canBullet in canBullets: if canBullet not in keepBulletList", "\" has been issued. This \" + hazName + \\", "the correct order for all bullets bulletOrder = self._bulletOrder() staticBulletOrder", "error is not None: return error # Get the segments", "useCaptureText(self, hazardList): #Based on the hazardlist, returns a tuple indicating:", "# mapNameForCombinations Name of the map background that is used", "= 1 for i in range(start,end): if (re.search(\"\\* \" +", "Product User Guide # Section on \"Tkgnats: Task Reporting System\".", "the # product to the AWIPS WAN. The product is", "\" else: hazardBodyPhrase = hazardBodyPhrase + \"The \" + hazName", "re.search(\"\\* \" + bullet + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is None:", "AWIPS WAN. \"periodCombining\" : 0, # If 1, combine periods,", "file that is not intended to be overridden. ## #-------------------------------------------------------------------------", "issued \" + hazNameA + forPhrase + \\ \", which", "if len(bullets) >= 4: impact = bullets[3] else: impact =", "eachHazard['pil'] == 'MWS': startPara = 0 else: startPara = 1", "+ \"\\n\\n\" fcst = fcst + s fcst = fcst", "== 2: segmentTextSplit[1] = \"PRECAUTIONARY/PREPAREDNESS ACTIONS...\" + segmentTextSplit2[1] segmentText =", "tasks and their status, see the Text Product User Guide", "keepBulletList = [] for eachHazard in sortedHazardList: if eachHazard['act'] not", "The identifier is the pil (e.g., ZFP), #phen/sig (e.g., DU.Y),", "\"Winter Storm Warning\", \"Beach Hazards Statement\"]: forPhrase = \" for", "_bulletDict(self): return [] # Added for DR 21309 def _bulletOrder(self):", "argDict, \"%d%H%M\", shiftToLocal=0, stripLeading=0) self._issueTime = AbsTime.AbsTime(argDict['creationTime']) self._currentTime = argDict['creationTime']", "= [] # Process the paragraphs, keep only the interested", "series of bullets to the # beginning of any next", "self._issueTime, expireTime, self._areaDictionary, None, cityDescriptor=self._cityDescriptor, areaList=segmentAreas, includeCities=self._includeCities, includeZoneNames = self._includeZoneNames,", "False) if nwsIntroUsed == 0: hazardBodyPhrase = \"The National Weather", "# for eachHazard in extList: if len(eachHazard['hdln']) == 0: continue", "if bulletFlag: print \"appending to bottom list of bullets!\" segmentTextSplit", "codes, \"Always\" in which the #text (default or cap) is", "timeWords = self.getTimingPhrase(eachHazard, expTimeCurrent) hazardBodyPhrase = hazardBodyPhrase + \"The \"", "+ segmentTextSplit2[1] segmentText = string.join(segmentTextSplit,\"\") if removeBulletList != []: segmentText", "### if previous text is empty, return nothing if prevText", "Wrap-up text goes here *|.\\n\" else: bulletFlag = 1 ##", "autoSend If set to 1, then the product will be", "automatically transmitted unless autoSend is 1. # This value is", "pass else: processedText = processedText + eachPara + '\\n\\n' #keep", "phrase. lines = regText.split('\\n') for x in xrange(len(lines)): if lines[x].find('The", "if x == 0: continue #headlines and text before the", "bullet codes textToUse = \"* \" + textToUse # format", "# Name of state, such as \"Georgia\" -- optional \"wfoCityState\":", "\" is now in effect\" + endTimePhrase + \". \"", "case # the site is not transmitting in mixed case", "overview = each['prevOverviewText'] self.__overviewText, dummy = self.cleanCapturedText( overview, 0) break", "This method finds an overview in the previous product #", "+ bullet + \"\\n\\n\" else: ### not a bullet, CTA", "adds the call to action statements. This is only performed", "shiftToLocal=0, stripLeading=0) self._issueTime = AbsTime.AbsTime(argDict['creationTime']) self._currentTime = argDict['creationTime'] self._expireTime =", "def _postProcessArea(self, fcst, segmentAreas, argDict): return fcst + \"\\n\\n$$\\n\\n\" def", "[] for eachHazard in sortedHazardList: if eachHazard['act'] not in [\"CAN\",\"EXP\"]:", "vtec phen/sig based if ent.find('.') == 2: phensig = (ent[0:2],", "0 processedText = '' for eachPara in paras: if paraCount", "Reporting System\". #------------------------------------------------------------------------- # Additional Information: #------------------------------------------------------------------------- # Example Output:", "\" has also been issued.\" else: hazardBodyPhrase = hazardBodyPhrase +", "= segmentText[0] if tester == '*': startPara = 1 else:", "cities of\", \"includeZoneNames\":1, # Zone names will be included in", "text *|\\n\\n* \" + self._bulletOrder()[i] + \"...\") bulletFlag = 0", "\". \" else: hazardBodyPhrase += \" has issued \" +", "1, flags=re.IGNORECASE) if len(segmentTextSplit2) == 2: segmentTextSplit[1] = \"PRECAUTIONARY/PREPAREDNESS ACTIONS...\"", "convertSingleParas(self, text): #returns a list of paragraphs based on the", "hazards # for eachHazard in upgList: if len(eachHazard['hdln']) == 0:", "of state, such as \"Georgia\" -- optional \"wfoCityState\": \"<wfoCityState>\", #", "# Location of WFO - city,state \"textdbPil\": \"<textdbPil>\", # Product", "(opt.) Area name for product header, such as \"Western New", "overview section', re.DOTALL) fcst = overviewSearch.sub(overview, fcst) # # Added", "bullets = bullets + \"* \" + b + \"...|*", "Hazards product self._hazards = argDict['hazards'] self._combinations = argDict[\"combinations\"] return None", "foundCTAs. Note # that the formats of these lists are", "overviewBody return overview else: return self.__overviewText def overviewText(self, hazardList, pil):", "or not, # and frame captured text or not #", "the \"awipsWANPil after product creation. # autoStore If set to", "is also used for the default GUI # entry for", "remove bullets. # if incTextFlag and bulletProd: # First make", "\". \" # # This is for statement hazards #", "station identifier, 4 letter, such as \"KSLC\". # wmoID WMO", "To look up tasks and their status, see the Text", "the previous text has been discarded # due to a", "(None, None, None, None, None, None) multRecords = 0 #indicator", "in which the #text (default or cap) is wrapped in", "in ctas: if type(it) == types.TupleType: it = it[1] #get", "NE 68106 # 402.291.0100 # # See the AWIPS II", "key exec \"self._\" + variable + \"= varDict[key]\" self._language =", "68106 # 402.291.0100 # # See the AWIPS II Master", "Area name for product header, such as \"Western New York\"", "return None def _determineTimeRanges(self, argDict): # Set up the time", "Get VariableList varDict = argDict[\"varDict\"] for key in varDict.keys(): if", "None #no regular text after bullets return (hazard, time, basis,", "the bullet diction newBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for newBullet in newBullets:", "# (e.g., DENCCFDEN) that is used to store the product", "Customization Points: # # DEFINITION SECTION # # Required Configuration", "CallToActions import AbsTime class TextProduct(TextRules.TextRules, SampleAnalysis.SampleAnalysis, CallToActions.CallToActions): Definition = {", "Descriptive information about zones \"areaDictionary\": \"AreaDictionary\", # Language \"language\": \"english\",", "= [] bullets = \"\" for eachHazard in sortedHazardList: ###", "\\ \"\\n\\n|* Statement text goes here *|.\\n\\n\" # End code", "newBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for newBullet in newBullets: if newBullet not", "bulletFlag = 0 if bulletFlag: print \"appending to bottom list", "(ent[0:2], ent[3]) #phen.sig if phensig in forceCTAList: del forceCTAList[forceCTAList.index(phensig)] hazardBodyPhrase", "= \"\" for each in hazardList: if (each.has_key('prevOverviewText') and each.has_key('pil')", "argDict): # Set up the time range for 0-240 hours", "bullets to the # beginning of any next NWS phrase.", "bullets[0] else: hazard = None if len(bullets) >= 2: time", ": 0, # If 1, combine periods, if possible #", "# This is for the exp hazards # phraseCount =", "= [] ctao = CallToActions.CallToActions() d = ctao.ctaDict() for k", "in newList: hdln = eachHazard['hdln'] if len(eachHazard['hdln']) == 0: continue", "not a bullet, CTA text outText = outText + b", "# def hazardBodyText(self, hazardList, argDict): bulletProd = self._bulletProd hazardBodyPhrase =", "8, # Maximum hours for expireTime \"includeCities\": 1 , #", "phraseCount == 0: phraseCount = 1 if eachHazard['phen'] in ['HU',", "bullet in newBullets: if re.search(\"\\* \" + bullet + \"\\.\\.\\.\",", "argDict accessible self.__argDict = argDict # Get Definition variables self._definition", "hazNameA + forPhrase + \\ \", which is in effect\"", "+ \". \" elif phraseCount == 1: phraseCount = 2", "foundCANS = 1 if eh['sig'] not in foundSig: foundSig.append(eh['sig']) includeFrameCodes", "bullet product hazName = self.hazardName(eachHazard['hdln'], argDict, False) if eachHazard['endTime'] <=", "bullet text if len(bullets) > x+2: #more bullets are present", "for product header, such as \"Western New York\" # wfoCityState", "### outText = \"\" for b in bullets: ### if", "if prevText is None: return (None, None, None, None, None,", "for the default GUI # entry for storage. # autoSend", "= bullets[3] else: impact = None if len(regText) == 0:", "\" + self._wfoCity nwsIntroUsed = 1 if phraseCount == 0:", "if there is a new hazard and previous segment Text,", "to its usefulness for # any purpose. #------------------------------------------------------------------------- # Standard", "\\ self.useCaptureText(sortedHazardList) # # # Check that the previous text", "foundCTAs: foundCTAs.append(f) if eachPara.find('...') == 0: pass #ignore headlines paraCount", "with # the \"awipsWANPil after product creation. # autoStore If", "is not None: return error # Get the segments hazardsC", "for bullet in staticBulletOrder: print \"correct bullet order should be:", "in CANS and something in acts (frame it, include text)", "includeFrameCodes, skipCTAs, forceCTAList) def cleanCapturedText(self, text, paragraphs, addFramingCodes = False,", "hazardBodyPhrase + hazNameACap + \\ \" has also been issued.", "thus double events will only decode the first set #", "CAP tag pairs hazardBodyPhrase = re.sub(r'&&\\s*PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\n', \\ \"\", hazardBodyPhrase)", "correct order for all bullets bulletOrder = self._bulletOrder() staticBulletOrder =", "the bullet dictionary and split the bullets ## bDict =", "+ \". \" # # This is for statement hazards", "False) if eachHazard['endTime'] <= argDict['creationTime']: hazardBodyPhrase = hazardBodyPhrase + \"The", "if len(bullets) <= 1: return prevText ### ### process the", "# This is a base file that is not intended", "__init__(self): TextRules.TextRules.__init__(self) SampleAnalysis.SampleAnalysis.__init__(self) self.__overviewText = \"\" self.__procCTA = None def", "# This software product contains export-restricted data whose # export/transfer/disclosure", "_postProcessProduct(self, fcst, argDict): # # If an overview exists for", "not # incTextFlag, incFramingCodes, skipCTAs, forceCTAList = \\ self.useCaptureText(sortedHazardList) #", "returning the values if len(bullets) >= 1: hazard = bullets[0]", "or cap) is wrapped in framing codes, or #DefaultOnly\" in", "+ \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is not None: segmentTextSplit = re.split(\"\\*", "*|\" else: textToUse = defaultText if frameit == \"Always\" or", "is None: return prevText ### ### split the text ###", "print bullet + \" not in segmentText\" start = self._bulletOrder().index(bullet)", "fcst def _makeProduct(self, fcst, segmentAreas, argDict): argDict[\"language\"] = self._language #", "(must edit)*|.\\n\\n\" else: overviewBody = \"\" #assemble the lines overview", "1, cities are determined from grids # citiesPhrase \"Including the", "the same headlines editArea = segmentAreas[0] areaLabel = editArea headlines", "if type(it) == types.TupleType: it = it[1] #get second string", "FireWxZones_BOU # Counties # Marine_Zones_BOU \"mapNameForCombinations\": \"Zones_<site>\", ## Edit Areas:", "= self._bulletProd hazardBodyPhrase = '' # # First, sort the", "string.join(segmentTextSplit,\"\") if removeBulletList != []: segmentText = \"|*\\n\" + segmentText", "\" # # This is for statement hazards # for", "\"lineLength\": 66, #Maximum line length \"purgeTime\": 8, # Maximum hours", "of any next NWS phrase. lines = regText.split('\\n') for x", "['NEW', 'EXA', 'EXB']: newList.append(eachHazard) elif eachHazard['act'] in ['CAN']: canList.append(eachHazard) elif", "a bullet if re.match(\"\\*\", b): ### remove line feeds removeLF", "Initialize the output string fcst = \"\" fcst = self._preProcessProduct(fcst,", "0: hazardBodyPhrase = \"The National Weather Service in \" +\\", "[] ctao = CallToActions.CallToActions() d = ctao.ctaDict() for k in", "self.setProgressPercentage(100) self.progressMessage(0, 100, self._displayName + \" Complete\") return fcst def", "## EditAreas_FIPS_BOU ## EditAreas_MarineZones_BOU \"defaultEditAreas\" : \"EditAreas_PublicZones_<site>_<MultiPil>\", # product identifiers", "issues with ### offices that use \"-20 degrees\" in the", "None, None) # find the bullets bullets = [] buf", "# Weather Elements Needed: # Hazards #------------------------------------------------------------------------- # Edit Areas", "= 0 if bulletFlag: print \"appending to bottom list of", "the above checks, add the text # print \"hazardBodyText info:", "saveBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for saveBullet in saveBullets: if saveBullet not", "is wrapped in framing codes, or #DefaultOnly\" in which just", "the default bullets for all hazards from the bullet diction", "in ['HU', 'TR', 'TY']: hazardBodyPhrase = hazardBodyPhrase + hazNameACap +", "+ segmentTextSplit2[1] else: segmentTextSplit2 = re.split(\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", segmentTextSplit[1], 1, flags=re.IGNORECASE)", "else: segmentTextSplit2 = re.split(\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", segmentTextSplit[1], 1, flags=re.IGNORECASE) if len(segmentTextSplit2)", "+ \"\\n\\n\" + bullets # If segment doesn't pass the", "meter self.setProgressPercentage(100) self.progressMessage(0, 100, self._displayName + \" Complete\") return fcst", "Weather Service') == 0: lines = lines[0:x] #eliminate following lines", "line to account for framing code issues in CTA cta", "len(timeWords): timeWords = \" \" + timeWords #add a leading", "forceCTAList) def cleanCapturedText(self, text, paragraphs, addFramingCodes = False, skipCTAs =", "inc framing codes, skip CTAs, forceCTAList) # # For the", "= '', indentNextString = ' ', maxWidth=self._lineLength, breakStrings=[\" \", \"...\"])", "then the product will be automatically # stored into the", "= string.replace(bullets[x],'\\n',' ') removeLF = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) regText = removeLF.sub(r'\\1", "removeLF.sub(r'\\1 \\2',regText) # extract out each section for returning the", "canList.append(eachHazard) elif eachHazard['act'] in ['EXP']: expList.append(eachHazard) elif eachHazard['act'] in ['EXT']:", "been issued.\" else: hazardBodyPhrase = hazardBodyPhrase + hazNameACap + \\", "Address: 6825 Pine Street, Suite 340 # Mail Stop B8", "Locations: # GenericHazards #------------------------------------------------------------------------- # Customization Points: # # DEFINITION", "from grids # citiesPhrase \"Including the cities of\" phrase used", "['EXP']: expList.append(eachHazard) elif eachHazard['act'] in ['EXT']: extList.append(eachHazard) elif eachHazard['act'] in", "None for eachHazard in newList: hdln = eachHazard['hdln'] if len(eachHazard['hdln'])", "flags=re.IGNORECASE) is not None: segmentTextSplit = re.split(\"\\* \" + bullet", "items from forceCTAList if they exist in foundCTAs. Note #", "paragraphs, addFramingCodes = False, skipCTAs = False): # # This", "eachPara.find('...') == 0: pass #ignore headlines paraCount = paraCount +", "defined headline, skip phrase endTimePhrase = self.hazardTimePhrases(eachHazard, argDict) hazNameA =", "0: continue #no defined headline, skip phrase if self._bulletProd: continue", "by the double line feed term. # of the text", "are multiple sets of bullets. In this case # only", "list of ctas found if skipCTAs and len(found): pass else:", "self.__procCTA: ## Added following line to account for framing code", "eachHazard.has_key('prevText'): prevText = eachHazard['prevText'] if eachHazard['pil'] == 'MWS': startPara =", "the finished product. # Product is saved if autoWrite is", "= self._getVariables(argDict) if error is not None: return error #", "been issued. This \" + hazName + \\ \" is", "Replaced by 21309 code ## def _getBullets(self, newBulletList, argDict): ##", "\"No hazards to report\" # Determine time ranges error =", "# for eachHazard in canList: if len(eachHazard['hdln']) == 0: continue", "+ self._wfoCity nwsIntroUsed = 1 if phraseCount == 0: phraseCount", "Service') == 0: lines = lines[0:x] #eliminate following lines break", "fcst = self._preProcessProduct(fcst, argDict) # Generate the product for each", "CANS and something in acts (frame it, include text) elif", "hazardBodyPhrase + \"The \" + hazName + \\ \" will", "eh['act'] in cans and (len(eh['hdln']) or eh['sig'] == 'S'): foundCANS", "export-restricted data whose # export/transfer/disclosure is restricted by U.S. law.", "default text is used. frameit can be \"Never\", in which", "\" # # This is for ext hazards # for", "time for the # expire time. # includeCities If 1,", "regText, multRecords) def substituteBulletedText(self, capText, defaultText, frameit=\"Never\"): #returns a properly", "self.convertSingleParas(it) for cta in ctaParas: self.__procCTA.append((\"GENERIC\", string.replace(cta,' ',''))) #compare found", "\" + self._areaName issuedByString = self.getIssuedByString() productName = self.checkTestMode(argDict, self._productName", "[] extList = [] conList = [] upgList = []", "removeBulletList = [] for eachHazard in sortedHazardList: if eachHazard['act'] in", "for cta in ctaParas: self.__procCTA.append((k,string.replace(cta,' ',''))) d = ctao.ctaPilDict() for", "0 length, then #the default text is used. frameit can", "product creation. # autoStore If set to 1, then the", "cities will be included in the area header # accurateCities", "fcst) # finish progress meter self.setProgressPercentage(100) self.progressMessage(0, 100, self._displayName +", "0: pass #ignore headlines paraCount = paraCount + 1 #", "# If segment doesn't pass the checks, put in framing", "there are multiple sets of bullets. In this case #", "File (\"Master Rights File.pdf\") for # further licensing information. ##", "self.makeAreaHeader( argDict, \"\", self._issueTime, expireTime, self._areaDictionary, None, cityDescriptor=self._cityDescriptor, areaList=segmentAreas, includeCities=self._includeCities,", "\". \" else: hazardBodyPhrase = hazardBodyPhrase + \"The \" +", "in bList: ## bullets = bullets + \"* \" +", "output string fcst = \"\" fcst = self._preProcessProduct(fcst, argDict) #", "skipCTAs) tester = segmentText[0] if tester == '*': startPara =", "for eachHazard in conList: if len(eachHazard['hdln']) == 0: continue #no", "for # creating/editing the combinations file. This must # be", "dgilling Perform case-insensitive # comparisons in foundCTAs. # 07/13/2015 4648", "statement hazards # for eachHazard in statementList: hazardBodyPhrase = \"...|*", "bDict.get(eachHazard['phen']) ## print 20* \"*\" + (eachHazard['phen']) ## bList =", "ctas found if skipCTAs and len(found): pass else: processedText =", "that the segment text isn't very short or blank #", "CTAs, forceCTAList) # # For the values to be considered,", "error # Get the segments hazardsC = argDict['hazards'] segmentList =", "prevText): # returns the bullet paragraph text or None, returns", "file # Area Dictionary -- Descriptive information about zones \"areaDictionary\":", "hazards for this segment by importance # sortedHazardList = []", "self.hazardName(eachHazard['hdln'], argDict, False) if nwsIntroUsed == 0: hazardBodyPhrase = \"The", "an overview in the previous product # overview = \"\"", "frameit can be \"Never\", in which #nothing is wrapped in", "hazNameACap = self.sentence(hazNameA, addPeriod=False) hazName = self.hazardName(eachHazard['hdln'], argDict, False) if", "will be included in the area header # accurateCities If", "hazardBodyPhrase = hazardBodyPhrase + \"The \" + hazName + \\", "more complicated for ent in foundCTAs: #only process CTAs that", "National Weather Service') == 0: lines = lines[0:x] #eliminate following", "is used to transmit the # product to the AWIPS", "+ endTimePhrase + \". \" else: if eachHazard['phen'] in ['HU',", "hazardBodyPhrase = hazardBodyPhrase + hazNameACap + forPhrase + \\ \"", "indent code bullet = self.indentText(bullet, indentFirstString = '', indentNextString =", "# if incTextFlag and bulletProd: for eachHazard in sortedHazardList: if", "elif foundCANS and foundACTS: includeFrameCodes = 1 skipCTAs = 1", "False): # # This method takes a block of text,", "text or not # incTextFlag, incFramingCodes, skipCTAs, forceCTAList = \\", "info: incTextFlag: \",incTextFlag if incTextFlag: print \"hazardBodyText info: segmentText: \",segmentText", "= defaultText if frameit == \"Always\" or frameit == \"DefaultOnly\":", "6825 Pine Street, Suite 340 # Mail Stop B8 #", "string.replace(fcst, \\ r\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", \\ r\"\\nPRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\n\") fcst = string.replace(fcst,", "Text Product User Guide # Section on \"Tkgnats: Task Reporting", "# Optional EAS phrase to be include in product header", "b in bList: ## bullets = bullets + \"* \"", "\" + hazName + \\ \" will expire \" +", "newList = [] canList = [] expList = [] extList", "paragraph text or None, returns the # regular text after", "determined from grids # citiesPhrase \"Including the cities of\" phrase", "persons whether in the United States or abroad requires #", "argDict['creationTime']) if prefixSpace and len(timeWords): timeWords = \" \" +", "== 0: continue #no defined headline, skip phrase if self._bulletProd:", "= \"* \" + textToUse # format it return self.indentText(textToUse,", "# # lineLength max length of each line # #", "['CF', 'LS']: statementList.append(eachHazard) elif eachHazard['act'] in ['NEW', 'EXA', 'EXB']: newList.append(eachHazard)", "newBulletList: newBulletList.append(newBullet) print \"my bullets are: \", newBulletList ### Determine", "bullets[2] else: basis = None if len(bullets) >= 4: impact", "#------------------------------------------------------------------------- # Customization Points: # # DEFINITION SECTION # #", "= 1 if eh['sig'] not in foundSig: foundSig.append(eh['sig']) includeFrameCodes =", "text if len(bullets) > x+2: #more bullets are present multRecords", "len(bullets) >= 1: hazard = bullets[0] else: hazard = None", "string.replace(paragraphs[x],' ','') #make list of call to actions (type, cta", "argDict['creationTime'] self._expireTime = self._issueTime + self._purgeTime*3600 self._timeLabel = self.getCurrentTime( argDict,", "or modified by Raytheon Company, # pursuant to Contract DG133W-05-CQ-1067", "info: removeBulletList: \",removeBulletList # Finally remove the bullets no longer", "is a bullet product hazName = self.hazardName(eachHazard['hdln'], argDict, False) if", "section is templated # bulletProd If 1, the product will", "# bullets = bullets + \"\\n\" ## return bullets def", "= bullets[0] else: hazard = None if len(bullets) >= 2:", "and each.has_key('endTime') and each.has_key('act')): if (each['pil'] == pil and each['endTime']", "Headlines are always removed. # Framing codes are added if", "are added if specified. # paras = self.convertSingleParas(text) #single paragraphs", "want this comparison to be case-insensitive just in case #", "get the hazards text # We only need to get", "else: hazardBodyPhrase = hazardBodyPhrase + hazNameACap + forPhrase + \\", "# a segment, thus double events will only decode the", "# the \"awipsWANPil after product creation. # autoStore If set", "= ['NEW','EXT','EXA','EXB','CON'] foundACTS = 0 foundCANS = 0 foundSig =", "paraCount = 0 processedText = '' for eachPara in paras:", "None, None, None, None, None) # find the bullets bullets", "upgrade hazards # for eachHazard in upgList: if len(eachHazard['hdln']) ==", "newBullets: if re.search(\"\\* \" + bullet + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE)", "# database Source database for product. Can be \"Official\", #", "dummy = self.cleanCapturedText( overview, 0) break def useCaptureText(self, hazardList): #Based", "removeBulletList: \",removeBulletList # Finally remove the bullets no longer needed.", "the first edit area # in the segment since all", "\"+ bullet + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is not None: segmentTextSplit", "the bullets no longer needed. for bullet in removeBulletList: if", "DU.Y), or GENERIC. Uses the CallToAction definitions. #convert text to", "# Set up the time range for 0-240 hours self._timeRange", "headline (must edit)*|...\\n\\n\" else: overviewHeadline = \"\" if self._includeOverview: overviewBody", "# time phrase wording consistent with that generated by the", "action statements. This is only performed # if the segment", "header \"includeOverview\": 1, #include overview section \"bulletProd\": 0, # do", "that the previous text exists # foundCTAs = [] for", "that is used for # creating/editing the combinations file. This", "issued \" + \\ hazNameA + \". \" else: hazardBodyPhrase", "that this segment codes to determine capture or not, #", "If set to 1, then the product will be automatically", "# # The method hazardBodyText creates an attribution phrase #", "for b in bList: ## bullets = bullets + \"*", "+ \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) print \"segmentTextSplit is \", segmentTextSplit segmentTextSplit2", "hazardList, pil): # # This method finds an overview in", "see the Text Product User Guide # Section on \"Tkgnats:", "= eas + productName + \"\\n\" +\\ \"National Weather Service", "each list and build the phrases # nwsIntroUsed = 0", "impact = bullets[3] else: impact = None if len(regText) ==", "after bullets for x in xrange(1, len(bullets)): index = bullets[x].find('\\n\\n')", "= '', indentNextString = ' ', maxWidth=self._lineLength, breakStrings=[\" \", \"-\",", "None if len(bullets) >= 2: time = bullets[1] else: time", "sortedHazardList: if eachHazard['sig'] in ['S']and eachHazard['phen'] in ['CF', 'LS']: statementList.append(eachHazard)", "area = editArea, areaLabel=areaLabel, timeRange = self._timeRange) fcst = fcst", "eachHazard in statementList: hazardBodyPhrase = \"...|* Add statement headline *|...\\n\\n\"", "The afterText is text up to # the next bullet", "in items: if type(it) == types.TupleType: it = it[1] #get", "this comparison to be case-insensitive just in case # the", "(len(eh['hdln']) or eh['sig'] == 'S'): foundACTS = 1 if eh['act']", "\"The \" + hazName + \\ \" will expire \"", "of entries in # a segment, thus double events will", "captured/decoded. # (hazard, time, basis, impact, afterText, multipleRecords) if prevText", "something in acts (frame it, include text) elif foundCANS and", "Pine Street, Suite 340 # Mail Stop B8 # Omaha,", "\"= self._definition[key]\" # Get VariableList varDict = argDict[\"varDict\"] for key", "United States or abroad requires # an export license or", "fcst def allowedHazards(self): return [] # Added for DR 21194", "that's it print outText return outText # The _hazardTimePhrases method", "a tuple indicating: # (inc capture text, inc framing codes,", "EAS phrase to be include in product header \"includeOverviewHeadline\": 1,", "\"debug\": 0, # Name of map background for creating Combinations", "= \"\" #assemble the lines overview = overviewHeadline + overviewBody", "forceCTAList[forceCTAList.index(phensig)] hazardBodyPhrase = hazardBodyPhrase + '\\n\\n' ctas = [] for", "# Contractor Address: 6825 Pine Street, Suite 340 # Mail", "if paraCount >= paragraphs: found = self.ctasFound(eachPara) #get list of", "processedText, foundCTAs def decodeBulletedText(self, prevText): # returns the bullet paragraph", "= string.split(self._bulletDict().get(eachHazard['phen']),\",\") print \"newBullets = \", newBullets print \"segment text", "\"Official\", # \"Fcst\" or \"ISC\" # outputFile Defines the output", "_hazardTimePhrases method is passed a hazard key, and returns #", "bullet = self.indentText(bullet, indentFirstString = '', indentNextString = ' ',", "previous segment Text, then # we may have to add", "CallToActions.CallToActions() d = ctao.ctaDict() for k in d.keys(): func =", "a zone that must contain the hazard # in order", "has issued \" + \\ hazNameA + \". \" else:", "self.createTimeRange(0, 240) self._ddhhmmTime = self.getCurrentTime( argDict, \"%d%H%M\", shiftToLocal=0, stripLeading=0) self._issueTime", "50.0 self.setProgressPercentage(50) for segmentAreas in segmentList: self.progressMessage(fraction, percent, \"Making Product", "argDict) hazNameA = self.hazardName(eachHazard['hdln'], argDict, True) hazNameACap = self.sentence(hazNameA, addPeriod=False)", "\" + self._bulletOrder()[i] + \"...\") bulletFlag = 0 if bulletFlag:", "print \"correct bullet order should be: \", bulletOrder if bullet", "240) self._ddhhmmTime = self.getCurrentTime( argDict, \"%d%H%M\", shiftToLocal=0, stripLeading=0) self._issueTime =", "items: if type(it) == types.TupleType: it = it[1] #get second", "if eh['act'] in cans and (len(eh['hdln']) or eh['sig'] == 'S'):", "*|\\n\\nPRECAUTIONARY/PREPAREDNESS ACTIONS...\") bulletFlag = 0 # # Now if there", "usefulness for # any purpose. #------------------------------------------------------------------------- # Standard and Local", "Combinations # # purgeTime Maximum number of hours past issuance", "\" # # This is for con hazards # for", "in forceCTAList: del forceCTAList[forceCTAList.index(phensig)] hazardBodyPhrase = hazardBodyPhrase + '\\n\\n' ctas", "def convertSingleParas(self, text): #returns a list of paragraphs based on", "\"Western New York\" # wfoCityState City,state that the WFO is", "+ hazName + \". \" else: hazardBodyPhrase = hazardBodyPhrase +", "if saveBullet not in keepBulletList: keepBulletList.append(saveBullet) # Now determine which", "#the default text is used. frameit can be \"Never\", in", "self.__procCTA = [] ctao = CallToActions.CallToActions() d = ctao.ctaDict() for", "Product for Segment\") fcst = self._preProcessArea(fcst, segmentAreas, self._expireTime, argDict) fcst", "overviewText(self, hazardList, pil): # # This method finds an overview", "sort the hazards for this segment by importance # sortedHazardList", "segment text # segmentText = '' # # Check that", "it return self.indentText(textToUse, indentFirstString = '', indentNextString = ' ',", "Output: #------------------------------------------------------------------------- import LogStream import TextRules import SampleAnalysis import time,", "for eachHazard in canList: if len(eachHazard['hdln']) == 0: continue #no", "bullets bullets.append(buf[x]) # find only the bulleted text, defined by", "first character is a * we found a bullet if", "# Language \"language\": \"english\", \"lineLength\": 66, #Maximum line length \"purgeTime\":", "in the segmentList fraction = 0 fractionOne = 1.0/float(len(segmentList)) percent", "\"\\n \",\"\\n\") fcst = string.replace(fcst, \"&&\", \"\\n&&\\n\") # Prevent empty", "for x in xrange(1, len(bullets)): index = bullets[x].find('\\n\\n') if index", "paragraphs[x] = string.replace(paragraphs[x],' ','') #make list of call to actions", "hazardList: if eachHazard['sig'] == each: if eachHazard not in sortedHazardList:", "preserving blank lines, # then returns the part after 'paragraphs'.", "skipCTAs = False): # # This method takes a block", "return processedText, foundCTAs def decodeBulletedText(self, prevText): # returns the bullet", "%Z %a %b %e %Y\", stripLeading=1) return None def _preProcessProduct(self,", "hazardBodyPhrase + hazNameA + \\ \" remains in effect\" +", "forceCTAList = [] # all actions are in CAN, UPG,", "bullet in staticBulletOrder: print \"correct bullet order should be: \",", "(default or cap) is wrapped in framing codes, or #DefaultOnly\"", "if len(segmentTextSplit2) == 2: segmentTextSplit[1] = \"*\" + segmentTextSplit2[1] else:", "pass the checks, put in framing codes else: hazardBodyPhrase =", "is no longer in effect. \" # # This is", "remainder of the text. However we only # want text", "for saveBullet in saveBullets: if saveBullet not in keepBulletList: keepBulletList.append(saveBullet)", "indentFirstString = '', indentNextString = ' ', maxWidth=self._lineLength, breakStrings=[\" \",", "The method hazardBodyText creates an attribution phrase # def hazardBodyText(self,", "code for product header, such as \"FOUS45\" # pil Product", "* we found a bullet if re.match(\"\\*\", b): ### remove", "= 0 else: startPara = 1 segmentText, foundCTAs = self.cleanCapturedText(prevText,", "# (inc capture text, inc framing codes, skip CTAs, forceCTAList)", "which is in effect\" + endTimePhrase + \". \" elif", "None) multRecords = 0 #indicator of multiple sets of bullets", "is\", without technical # support, and with no warranty, express", "need to keep. keepBulletList = [] for eachHazard in sortedHazardList:", "hours for expireTime \"includeCities\": 1 , # Cities included in", "effect. \" # # This is for con hazards #", "print \"reordered bullets are: \", bulletOrder for b in bulletOrder:", "after bullet text if len(bullets) > x+2: #more bullets are", "\" # # This is for the exp hazards #", "bullet text *|\\n\\n\" hazardBodyPhrase = hazardBodyPhrase + \"\\n\\n\" + bullets", "in foundSig and len(foundSig) == 1: #only S includeFrameCodes =", "zone combiner # database Source database for product. Can be", "# event that there are multiple sets of bullets. In", "range(start,end): if (re.search(\"\\* \" + self._bulletOrder()[i] + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE)", "contains export-restricted data whose # export/transfer/disclosure is restricted by U.S.", "the overview section is templated # bulletProd If 1, the", "+ \"= self._definition[key]\" # Get VariableList varDict = argDict[\"varDict\"] for", "re.DOTALL) bullet = removeLF.sub(r'\\1 \\2',b) ### indent code bullet =", "add bullets. # if incTextFlag and bulletProd: for eachHazard in", "\"-\", \"...\"]) return processedText, foundCTAs def decodeBulletedText(self, prevText): # returns", "City lat/lon dictionary to use \"cityDescriptor\":\"Including the cities of\", \"includeZoneNames\":1,", "product hazName = self.hazardName(eachHazard['hdln'], argDict, False) if eachHazard['endTime'] <= argDict['creationTime']:", "for ent in foundCTAs: #only process CTAs that are vtec", "hazNameACap + forPhrase + \\ \" has also been issued\"", "\"hazardBodyText info: keepBulletList: \",keepBulletList print \"hazardBodyText info: removeBulletList: \",removeBulletList #", "# U.S. EXPORT CONTROLLED TECHNICAL DATA # This software product", "of bullets that we need to keep. keepBulletList = []", "Defines the awips product identifier # (e.g., DENCCFDEN) that is", "software was developed and / or modified by Raytheon Company,", "\"Including the cities of\" phrase used when including # cities", "CAN/EXP/UPG segment # # remove items from forceCTAList if they", "# This software was developed and / or modified by", "format it return self.indentText(textToUse, indentFirstString = '', indentNextString = '", "See the AWIPS II Master Rights File (\"Master Rights File.pdf\")", "in effect\" + endTimePhrase + \". \" # # This", "or frameit == \"DefaultOnly\": textToUse = \"|* \" + textToUse", "\"%d%H%M\", shiftToLocal=0, stripLeading=0) self._issueTime = AbsTime.AbsTime(argDict['creationTime']) self._currentTime = argDict['creationTime'] self._expireTime", "randerso Make bullet headers upper case # 07/15/2016 5749 randerso", "of product e.g. \"Zone Forecast Product\" # fullStationID Full station", "+ s fcst = fcst + \"Default overview section\\n\" return", "in DiscretePhrases. # def hazardTimePhrases(self, hazard, argDict, prefixSpace=True): timeWords =", "\" has been issued.\" else: hazardBodyPhrase += \"In addition, \"", "for all bullets bulletOrder = self._bulletOrder() staticBulletOrder = self._bulletOrder() for", "Product ID for transmitting to AWIPS WAN. \"periodCombining\" : 0,", "bulletOrder.remove(bullet) print \"reordered bullets are: \", bulletOrder for b in", "foundCTAs = [] for eachHazard in sortedHazardList: if eachHazard.has_key('prevText'): prevText", "EXPORT CONTROLLED TECHNICAL DATA # This software product contains export-restricted", "segment have # the same headlines editArea = segmentAreas[0] areaLabel", "list of call to actions (type, cta text) if self.__procCTA", "bullet + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is None: print bullet +", "is templated # bulletProd If 1, the product will use", "+ hazNameA + \\ \" remains in effect\" + endTimePhrase", "== 'CAN': hazardBodyPhrase = hazardBodyPhrase + \\ \"\\n\\n|* Wrap-up text", "database for product. Can be \"Official\", # \"Fcst\" or \"ISC\"", "decodeBulletedText(self, prevText): # returns the bullet paragraph text or None,", "other authorization. # # Contractor Name: <NAME> # Contractor Address:", "hazards # for eachHazard in canList: if len(eachHazard['hdln']) == 0:", "in upgList: if len(eachHazard['hdln']) == 0: continue #no defined headline,", "Rights File.pdf\") for # further licensing information. ## # ----------------------------------------------------------------------------", "of each line # # defaultEditAreas defines edit areas, default", "that must contain the hazard # in order for it", "{ \"type\": \"smart\", \"displayName\": None, # Source database for product.", "for # storage. # awipsWANPil Defines the awips product identifier", "in effect. \" else: expTimeCurrent = argDict['creationTime'] timeWords = self.getTimingPhrase(eachHazard,", "# automatically stored unless autoStore is 1. This # value", "keepBulletList: keepBulletList.append(saveBullet) # Now determine which bullets we have to", "# This method takes a block of text, wraps it", "tasks that are identified and in progress: # # To", "developed and / or modified by Raytheon Company, # pursuant", "None #------------------------------------------------------------------------- # Associated Utilities Files e.g. Combinations file: #", "CTAs in processed text for f in found: if f", "U.S. EXPORT CONTROLLED TECHNICAL DATA # This software product contains", "in effect\" + endTimePhrase + \". \" lastHdln = hdln", "state, such as \"Georgia\" -- optional \"wfoCityState\": \"<wfoCityState>\", # Location", "None, None, None) multRecords = 0 #indicator of multiple sets", "Added following line to account for framing code issues in", "may have to add bullets. # if incTextFlag and bulletProd:", "\" else: if eachHazard['phen'] in ['HU', 'TR', 'TY']: hazardBodyPhrase +=", "Can be: # Zones_BOU # FireWxZones_BOU # Counties # Marine_Zones_BOU", "VariableList varDict = argDict[\"varDict\"] for key in varDict.keys(): if type(key)", "= outText + bullet + \"\\n\\n\" else: ### not a", "newBullet not in newBulletList: newBulletList.append(newBullet) print \"my bullets are: \",", "+ hazNameACap + \\ \" has also been issued.\" else:", "for eachHazard in sortedHazardList: if eachHazard.has_key('prevText'): prevText = eachHazard['prevText'] if", "1. This # value is also used for the default", "for (ctaType, cta) in self.__procCTA: ## Added following line to", "bullets. In this case # only the 1st set was", "# Combinations file #------------------------------------------------------------------------- # Component Products: # Hazards #-------------------------------------------------------------------------", "# Example Output: #------------------------------------------------------------------------- import LogStream import TextRules import SampleAnalysis", "eachHazard['phen'] in ['HU', 'TR', 'TY']: hazardBodyPhrase += \"In addition, \"", "Make bullet headers upper case # 07/15/2016 5749 randerso Replaced", "= { \"type\": \"smart\", \"displayName\": None, # Source database for", "+ productName + \"\\n\" +\\ \"National Weather Service \" +", "found!\" segmentTextSplit = re.split(\"\\* \" + self._bulletOrder()[i] + \"\\.\\.\\.\", segmentText,", "_getVariables(self, argDict): # Make argDict accessible self.__argDict = argDict #", "## ## ### get the bullet dictionary and split the", "> 0: hazardBodyPhrase = hazardBodyPhrase + \\ 'PRECAUTIONARY/PREPAREDNESS ACTIONS...\\n\\n' for", "\"\" #regular text after bullets for x in xrange(1, len(bullets)):", "',''))) #compare found = [] for para in paragraphs: for", "about zones \"areaDictionary\": \"AreaDictionary\", # Language \"language\": \"english\", \"lineLength\": 66,", "print prevText ### if previous text is empty, return nothing", "for key in varDict.keys(): if type(key) is types.TupleType: label, variable", "if len(bullets) >= 2: time = bullets[1] else: time =", "includeCities If 1, cities will be included in the area", "+ \\ 'PRECAUTIONARY/PREPAREDNESS ACTIONS...\\n\\n' for c in ctas: hazardBodyPhrase =", "includeFrameCodes = 1 #capture text, but frame it else: includeText", "= 1 for eh in hazardList: if eh['act'] in acts", "in the series of bullets to the # beginning of", "then the product will be automatically # written to the", "to transmit the # product to the AWIPS WAN. The", "(phen,sig) in forceCTAList: hazardPhenSig = phen + \".\" + sig", "ACTIONS\\.\\.\\.\", segmentTextSplit[1], 1, flags=re.IGNORECASE) if len(segmentTextSplit2) == 2: segmentTextSplit[1] =", "#nothing is wrapped in framing codes, \"Always\" in which the", "xrange(len(lines)): if lines[x].find('The National Weather Service') == 0: lines =", "each in ['W', 'Y', 'A', 'O', 'S']: for eachHazard in", "\"easPhrase\" :\"\", # Optional EAS phrase to be include in", "### ### process the text ### outText = \"\" for", "skip phrase hazName = self.hazardName(eachHazard['hdln'], argDict, False) if nwsIntroUsed ==", "the first set # of bullets and text. The multipleRecords", "\\ r\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", \\ r\"\\nPRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\n\") fcst = string.replace(fcst, \"\\n", "pil Product pil, such as \"SFTBOS\" # areaName (opt.) Area", "list of paragraphs based on the input text. lf =", "segmentList fraction = 0 fractionOne = 1.0/float(len(segmentList)) percent = 50.0", "bulletFlag: newBulletList = [] bullets = \"\" for eachHazard in", "present multRecords = 1 bullets = bullets[0:x+1] #only interested in", "nwsIntroUsed = 0 # # This is for the new", "file #------------------------------------------------------------------------- # Component Products: # Hazards #------------------------------------------------------------------------- # Development", "foundSig = [] for eh in hazardList: if eh['act'] in", "that are identified and in progress: # # To look", "AbsTime class TextProduct(TextRules.TextRules, SampleAnalysis.SampleAnalysis, CallToActions.CallToActions): Definition = { \"type\": \"smart\",", "codes are added if specified. # paras = self.convertSingleParas(text) #single", "purpose. #------------------------------------------------------------------------- # Standard and Local file names and Locations:", "in hazardList: if eh['act'] in ['NEW'] and len(eh['hdln']): forceCTAList.append((eh['phen'], eh['sig']))", "0 if eachHazard['act'] == 'CAN': hazardBodyPhrase = hazardBodyPhrase + \\", "1 if eachHazard['phen'] in ['HU', 'TR', 'TY']: hazardBodyPhrase = hazardBodyPhrase", "\"\\n\\n|* Wrap-up text goes here *|.\\n\" else: bulletFlag = 1", "only (don't include text) if foundCANS and not foundACTS: if", "Determine the correct order for all bullets bulletOrder = self._bulletOrder()", "\" \" + self._fullStationID + \" \" + \\ self._ddhhmmTime", "for each segment in the segmentList fraction = 0 fractionOne", "'paragraphs'. So, if paragraphs is 0, it # returns the", "else: overviewHeadline = \"\" if self._includeOverview: overviewBody = \".|*Overview (must", "= self.endline(processedText, linelength=self._lineLength, breakStr=[\" \", \"-\", \"...\"]) return processedText, foundCTAs", "timeRange = self._timeRange) fcst = fcst + headlines return fcst", "= ' ', maxWidth=self._lineLength, breakStrings=[\" \", \"...\"]) ### ### the", "in ['S']and eachHazard['phen'] in ['CF', 'LS']: statementList.append(eachHazard) elif eachHazard['act'] in", "Now determine which bullets we have to remove. removeBulletList =", "= regText.split('\\n') for x in xrange(len(lines)): if lines[x].find('The National Weather", "the input text. lf = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) ptext = lf.sub(r'\\1", "ID \"pil\": \"<pil>\", # Product pil \"areaName\": \"\", # Name", "found. The identifier is the pil (e.g., ZFP), #phen/sig (e.g.,", "bullet product endTimePhrase = self.hazardTimePhrases(eachHazard, argDict) hazName = self.hazardName(eachHazard['hdln'], argDict,", "for storing to AWIPS text database. \"awipsWANPil\": \"<awipsWANPil>\", # Product", "self.__argDict = argDict # Get Definition variables self._definition = argDict[\"forecastDef\"]", "s = self._wmoID + \" \" + self._fullStationID + \"", "flags=re.IGNORECASE) segmentText = string.join(segmentTextSplit,\"* \" + bullet.upper() + \\ \"...|*", "canBullet in canBullets: if canBullet not in keepBulletList and canBullet", "self._definition[key]\" # Get VariableList varDict = argDict[\"varDict\"] for key in", "# overview = self.finalOverviewText() overviewSearch = re.compile(r'Default overview section', re.DOTALL)", "x in xrange(len(lines)): if lines[x].find('The National Weather Service') == 0:", "#phen.sig if phensig in forceCTAList: del forceCTAList[forceCTAList.index(phensig)] hazardBodyPhrase = hazardBodyPhrase", "eachPara + '\\n\\n' #keep track of remaining CTAs in processed", "the \"output\" named disk file after # product creation. #", "been discarded # due to a CAN/EXP/UPG segment # #", "is no longer in effect. \" else: expTimeCurrent = argDict['creationTime']", "to automatically write product to file # Area Dictionary --", "# for eachHazard in conList: if len(eachHazard['hdln']) == 0: continue", "in newBulletList: newBulletList.append(newBullet) print \"my bullets are: \", newBulletList ###", "in framing codes else: hazardBodyPhrase = hazardBodyPhrase + \\ \"\\n\\n|*", "located in, such as \"Buffalo NY\" # # Optional Configuration", "Guide # Section on \"Tkgnats: Task Reporting System\". #------------------------------------------------------------------------- #", "re.DOTALL) ptext = lf.sub(r'\\1 \\2', text) ptext = ptext.replace('\\n\\n', '\\n')", "regular text after bullets return (hazard, time, basis, impact, regText,", "lat/lon dictionary to use \"cityDescriptor\":\"Including the cities of\", \"includeZoneNames\":1, #", "0 # # This is for the new hazards #", "frame captured text or not # incTextFlag, incFramingCodes, skipCTAs, forceCTAList", "multipleRecords) if prevText is None: return (None, None, None, None,", "== \"Always\" or frameit == \"DefaultOnly\": textToUse = \"|* \"", "# If segment passes the above checks, add the text", "previous text has been discarded # due to a CAN/EXP/UPG", "automatic functions \"autoSend\": 0, #set to 1 to automatically transmit", "return fcst def _makeProduct(self, fcst, segmentAreas, argDict): argDict[\"language\"] = self._language", "# paras = self.convertSingleParas(text) #single paragraphs # keep track of", "['S']and eachHazard['phen'] in ['CF', 'LS']: statementList.append(eachHazard) elif eachHazard['act'] in ['NEW',", "B8 # Omaha, NE 68106 # 402.291.0100 # # See", "bullets are: \", newBulletList ### Determine the correct order for", "#------------------------------------------------------------------------- # Additional Information: #------------------------------------------------------------------------- # Example Output: #------------------------------------------------------------------------- import", "ellipses with commas in hazardBodyText # ## # This is", "0: if self._includeOverviewHeadline: overviewHeadline = \"...|*Overview headline (must edit)*|...\\n\\n\" else:", "self._fullStationID + \" \" + \\ self._ddhhmmTime + \"\\n\" +", "= phen + \".\" + sig cta = self.defaultCTA(hazardPhenSig) if", "else: hazardBodyPhrase = hazardBodyPhrase + \\ \"\\n\\n|* Statement text goes", "text before the bullets bullets.append(buf[x]) # find only the bulleted", "grid points in a zone that must contain the hazard", "= skipCTAs) tester = segmentText[0] if tester == '*': startPara", "['EXT']: extList.append(eachHazard) elif eachHazard['act'] in ['UPG']: upgList.append(eachHazard) else: conList.append(eachHazard) #", "= \"PRECAUTIONARY/PREPAREDNESS ACTIONS...\" + segmentTextSplit2[1] segmentText = string.join(segmentTextSplit,\"\") if removeBulletList", "phensig in forceCTAList: del forceCTAList[forceCTAList.index(phensig)] hazardBodyPhrase = hazardBodyPhrase + '\\n\\n'", "in these bullets break # regular text is the remainder", "print \"newBullets = \", newBullets print \"segment text is: \",", "through each list and build the phrases # nwsIntroUsed =", "area header # easPhrase Optional EAS phrase to be include", "Action Tags fcst = re.sub(r'\\nPRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\s*&&\\n', \\ \"\", fcst) fcst", "in acts (frame it, include text) elif foundCANS and foundACTS:", "\"self._\" + variable + \"= varDict[key]\" self._language = argDict[\"language\"] #", "\" + hazName + \\ \" is now in effect\"", "for product header, such as \"FOUS45\" # pil Product pil,", "# remove items from forceCTAList if they exist in foundCTAs.", "hazardBodyPhrase = re.sub(r'&&\\s*PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\n', \\ \"\", hazardBodyPhrase) return hazardBodyPhrase def", "= re.sub(\"\\|\\*.*\\*\\|\",\"\",cta) # We want this comparison to be case-insensitive", "= string.replace(fcst, \\ r\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", \\ r\"\\nPRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\n\") fcst =", "\"...|* Add statement headline *|...\\n\\n\" # # This adds segment", "outText = \"\" for b in bullets: ### if first", "expireTime, self._areaDictionary, None, cityDescriptor=self._cityDescriptor, areaList=segmentAreas, includeCities=self._includeCities, includeZoneNames = self._includeZoneNames, accurateCities", "a new hazard and previous segment Text, then # we", "in ctas: hazardBodyPhrase = hazardBodyPhrase + c + '\\n\\n' hazardBodyPhrase", "def overviewText(self, hazardList, pil): # # This method finds an", "hazardBodyPhrase + c + '\\n\\n' hazardBodyPhrase = hazardBodyPhrase + '&&\\n\\n'", "end = len(self._bulletOrder()) bulletFlag = 1 for i in range(start,end):", "randerso Fix bullets in follow up products # 02/24/2016 5411", "\" for |* Enter hazard type *|\" else: forPhrase =\"\"", "forceCTAList.append((eh['phen'], eh['sig'])) return (includeText, includeFrameCodes, skipCTAs, forceCTAList) def cleanCapturedText(self, text,", "else: basis = None if len(bullets) >= 4: impact =", "area combinations. ## Can be: ## EditAreas_PublicZones_BOU ## EditAreas_FireWx_BOU ##", "based on #the capText variable. If capText is None or", "# the site is not transmitting in mixed case yet.", "for eachHazard in upgList: if len(eachHazard['hdln']) == 0: continue #no", "= len(self._bulletOrder()) bulletFlag = 1 for i in range(start,end): if", "here *|.\\n\\n\" # End code for DR 21310 # #", "a segment, thus double events will only decode the first", "staticBulletOrder: print \"correct bullet order should be: \", bulletOrder if", "areaList=segmentAreas, includeCities=self._includeCities, includeZoneNames = self._includeZoneNames, accurateCities = self._accurateCities) fcst =", "lines = lines[0:x] #eliminate following lines break regText = (\"\\n\").join(lines)", "else: time = None if len(bullets) >= 3: basis =", "#DefaultOnly\" in which just the default text is wrapped. if", "# # Added to place line feeds in the CAP", "nwsIntroUsed = 1 hazardBodyPhrase = hazardBodyPhrase + \\ \" has", "not eachHazard.has_key('prevText'): newBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") print \"newBullets = \", newBullets", "return [] # Added for DR 21194 def _bulletDict(self): return", "forPhrase + \\ \" is in effect\" + endTimePhrase +", "such as \"Buffalo NY\" # # Optional Configuration Items #", "# If an overview exists for this product, insert it", "copy, re import CallToActions import AbsTime class TextProduct(TextRules.TextRules, SampleAnalysis.SampleAnalysis, CallToActions.CallToActions):", "eachHazard in upgList: if len(eachHazard['hdln']) == 0: continue #no defined", "### process the text ### outText = \"\" for b", "# Product ID for transmitting to AWIPS WAN. \"periodCombining\" :", "addition from Middendorf (BYZ) # # Now if there is", "bLine = bDict.get(eachHazard['phen']) ## print 20* \"*\" + (eachHazard['phen']) ##", "performed # if the segment is 'NEW' or if the", "in d.keys(): func = d[k] items = func() for it", "case yet. if para.upper() == cta.upper() and ctaType not in", "text database. \"awipsWANPil\": \"<awipsWANPil>\", # Product ID for transmitting to", "= [] extList = [] conList = [] upgList =", "= string.split(prevText, '\\n\\n') if len(bullets) <= 1: return prevText ###", "eachHazard['act'] in ['UPG']: upgList.append(eachHazard) else: conList.append(eachHazard) # # Now, go", "Engineer Description # ------------ ---------- ----------- -------------------------- # 05/07/2015 4027", "the time range for 0-240 hours self._timeRange = self.createTimeRange(0, 240)", "the new hazards # phraseCount = 0 lastHdln = None", "# This adds segment text # segmentText = '' #", "the Text Product User Guide # Section on \"Tkgnats: Task", "self._bulletProd: continue # No attribution for this case if it", "break regText = (\"\\n\").join(lines) # now clean up the text", "Name of the map background that is used for #", "it is a bullet product endTimePhrase = self.hazardTimePhrases(eachHazard, argDict) hazName", "in foundCTAs: foundCTAs.append(f) if eachPara.find('...') == 0: pass #ignore headlines", "= False): # # This method takes a block of", "segmentTextSplit = re.split(\"\\* \" + bullet + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE)", "info: segmentText: \",segmentText hazardBodyPhrase = hazardBodyPhrase + \"\\n\\n\" + \\", "Hazards Statement\"]: forPhrase = \" for |* Enter hazard type", "and each.has_key('pil') and each.has_key('endTime') and each.has_key('act')): if (each['pil'] == pil", "timeWords = self.getTimingPhrase(hazard, argDict['creationTime']) if prefixSpace and len(timeWords): timeWords =", "is a can/exp hazard and previous segment Text, then #", "ctaParas: self.__procCTA.append((k,string.replace(cta,' ',''))) ctas = ctao.genericCTAs() for it in ctas:", "overview = self.finalOverviewText() overviewSearch = re.compile(r'Default overview section', re.DOTALL) fcst", "issued. This \" + hazName + \\ \" is in", "if phensig in forceCTAList: del forceCTAList[forceCTAList.index(phensig)] hazardBodyPhrase = hazardBodyPhrase +", "= self._preProcessArea(fcst, segmentAreas, self._expireTime, argDict) fcst = self._makeProduct(fcst, segmentAreas, argDict)", "thing, if it's 2, it returns paragraphs 2 -> end,", "overviewSearch.sub(overview, fcst) # # Added to place line feeds in", "skipCTAs, forceCTAList = \\ self.useCaptureText(sortedHazardList) # # # Check that", "len(buf) <= 1: return (None, None, None, None, None, None)", "start = self._bulletOrder().index(bullet) + 1 end = len(self._bulletOrder()) bulletFlag =", "# nwsIntroUsed = 0 # # This is for the", "\"newBullets = \", newBullets print \"segment text is: \", segmentText", "report\" # Determine time ranges error = self._determineTimeRanges(argDict) if error", "else: bulletFlag = 1 ## print \"bulletFlag is: \",bulletFlag if", "segmentTextSplit2 = re.split(\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", segmentTextSplit[1], 1, flags=re.IGNORECASE) if len(segmentTextSplit2) ==", "header # easPhrase Optional EAS phrase to be include in", "newBullet in newBullets: if newBullet not in newBulletList: newBulletList.append(newBullet) print", "\"wmoID\": \"<wmoID>\", # WMO ID \"pil\": \"<pil>\", # Product pil", "EditAreas_FIPS_BOU ## EditAreas_MarineZones_BOU \"defaultEditAreas\" : \"EditAreas_PublicZones_<site>_<MultiPil>\", # product identifiers \"productName\":", "None), #(%cov, #points) \"callToAction\": 1, } def __init__(self): TextRules.TextRules.__init__(self) SampleAnalysis.SampleAnalysis.__init__(self)", "background that is used for # creating/editing the combinations file.", "0 foundSig = [] for eh in hazardList: if eh['act']", "ACTIONS\\.\\.\\.\", \\ r\"\\nPRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\n\") fcst = string.replace(fcst, \"\\n \",\"\\n\") fcst", "combination areaHeader = self.makeAreaHeader( argDict, \"\", self._issueTime, expireTime, self._areaDictionary, None,", "in the CAP tags to keep separate from CTAs fcst", "in hazardList: if eh['act'] in acts and \\ (eh['phen'], eh['sig'])", "sets of bullets for x in xrange(len(buf)): if x ==", "cities in area header \"cityLocation\": \"CityLocation\", # City lat/lon dictionary", "of remaining CTAs in processed text for f in found:", "fcst = self._preProcessArea(fcst, segmentAreas, self._expireTime, argDict) fcst = self._makeProduct(fcst, segmentAreas,", "hazardBodyPhrase) return hazardBodyPhrase def finalOverviewText(self): #if didn't calculate any, use", "string, types, copy, re import CallToActions import AbsTime class TextProduct(TextRules.TextRules,", "## Edit Areas: Create Combinations file with edit area combinations.", "This is only performed # if the segment is 'NEW'", "returns the bullet paragraph text or None, returns the #", "header \"accurateCities\": 0, # Include all cities in area header", "last in the series of bullets to the # beginning", "framing code issues in CTA cta = re.sub(\"\\|\\*.*\\*\\|\",\"\",cta) # We", "# 06/17/2015 4027 dgilling Perform case-insensitive # comparisons in foundCTAs.", "indentNextString = ' ', maxWidth=self._lineLength, breakStrings=[\" \", \"...\"]) ### ###", "\",removeBulletList # Finally remove the bullets no longer needed. for", "default if len(self.__overviewText) == 0: if self._includeOverviewHeadline: overviewHeadline = \"...|*Overview", "## return bullets def _indentBulletText(self, prevText): print prevText ### if", "autoWrite If set to 1, then the product will be", "We only need to get headlines for the first edit", "< 6: incTextFlag = 0 # DR 21309 code addition", "include text) if foundCANS and not foundACTS: if 'S' in", "#indicator of multiple sets of bullets for x in xrange(len(buf)):", "in, such as \"Buffalo NY\" # # Optional Configuration Items", "for all hazards from the bullet diction newBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\")", "+ \\ \"\\n\\n|* Statement text goes here *|.\\n\\n\" # End", "= [] for para in paragraphs: for (ctaType, cta) in", "is not # automatically stored unless autoStore is 1. This", "*|...\\n\\n\" # # This adds segment text # segmentText =", "bullets = string.split(prevText, '\\n\\n') if len(bullets) <= 1: return prevText", "overviewHeadline + overviewBody return overview else: return self.__overviewText def overviewText(self,", "multiple sets of bullets for x in xrange(len(buf)): if x", "each['act'] not in ['CAN', 'EXP']): overview = each['prevOverviewText'] self.__overviewText, dummy", "AWIPS text database. \"awipsWANPil\": \"<awipsWANPil>\", # Product ID for transmitting", "'S']: for eachHazard in hazardList: if eachHazard['sig'] == each: if", "bulletProd If 1, the product will use a bullet format", "string which is the CTA ctaParas = self.convertSingleParas(it) for cta", "upgList: if len(eachHazard['hdln']) == 0: continue #no defined headline, skip", "GenericHazards #------------------------------------------------------------------------- # Customization Points: # # DEFINITION SECTION #", "randerso Replaced ellipses with commas in hazardBodyText # ## #", "not in ['CAN', 'EXP']): overview = each['prevOverviewText'] self.__overviewText, dummy =", "+ textToUse + \" *|\" else: textToUse = defaultText if", "autoWrite is 1. # debug If on, debug_print statements will", "yet. if para.upper() == cta.upper() and ctaType not in found:", "0: return \"No hazards to report\" # Determine time ranges", "timeWords # # The method hazardBodyText creates an attribution phrase", "+ \\ \" is in effect\" + endTimePhrase + \".", "is not transmitting in mixed case yet. if para.upper() ==", "1 if eh['sig'] not in foundSig: foundSig.append(eh['sig']) includeFrameCodes = 0", "= removeLF.sub(r'\\1 \\2',regText) # extract out each section for returning", "\\ \"\", fcst) fcst = self._indentBulletText(fcst) # # Clean up", "cities of\" phrase used when including # cities # includeZoneNames", "(eachHazard['phen']) ## bList = newBulletList.split(\",\") ## ## ### initialize the", "implied, as to its usefulness for # any purpose. #-------------------------------------------------------------------------", "Ticket# Engineer Description # ------------ ---------- ----------- -------------------------- # 05/07/2015", "#include overview section \"bulletProd\": 0, # do not default to", "ctao.ctaPilDict() for k in d.keys(): func = d[k] items =", "previous segment Text, then # we may have to remove", "# Make sure there is only one CAP tag pairs", "product will use a bullet format #------------------------------------------------------------------------- # Weather Elements", "The _hazardTimePhrases method is passed a hazard key, and returns", "range for 0-240 hours self._timeRange = self.createTimeRange(0, 240) self._ddhhmmTime =", "eachHazard['phen'] in ['CF', 'LS']: statementList.append(eachHazard) elif eachHazard['act'] in ['NEW', 'EXA',", "'\\n\\n' hazardBodyPhrase = hazardBodyPhrase + '&&\\n\\n' # Make sure there", "self.hazardName(eachHazard['hdln'], argDict, False) hazardBodyPhrase = hazardBodyPhrase + \"The \" +", "the # regular text after the bullets. The afterText is", "\" + hazNameA + forPhrase + \\ \", which is", "not in sortedHazardList: sortedHazardList.append(eachHazard) # # Next, break them into", "phensig = (ent[0:2], ent[3]) #phen.sig if phensig in forceCTAList: del", "+ \\ hazNameA + \" has been issued.\" else: hazardBodyPhrase", "name for product header, such as \"Western New York\" #", "bullets[x].find('\\n\\n') if index != -1: regText = bullets[x][index+2:] bullets[x] =", "didn't calculate any, use the default if len(self.__overviewText) == 0:", "overview else: return self.__overviewText def overviewText(self, hazardList, pil): # #", "%b %e %Y\", stripLeading=1) return None def _preProcessProduct(self, fcst, argDict):", "foundACTS: if 'S' in foundSig and len(foundSig) == 1: #only", "double line feed term. # of the text regText =", "if cta not in ctas: ctas.append(cta) if len(ctas) > 0:", "argDict) fcst = self._makeProduct(fcst, segmentAreas, argDict) fcst = self._postProcessArea(fcst, segmentAreas,", "is None: print bullet + \" not in segmentText\" start", "no longer needed. for bullet in removeBulletList: if re.search(\"\\* \"+", "info: keepBulletList: \",keepBulletList print \"hazardBodyText info: removeBulletList: \",removeBulletList # Finally", "default GUI entry for # storage. # awipsWANPil Defines the", "text database using the \"textdbPil\" # after product creation. #", "+ forPhrase + \\ \" is in effect\" + endTimePhrase", "argDict, area = editArea, areaLabel=areaLabel, timeRange = self._timeRange) fcst =", "# Product ID for storing to AWIPS text database. \"awipsWANPil\":", "+ self._pil + \"\\n\\n\" fcst = fcst + s.upper() s", "self.getTimingPhrase(hazard, argDict['creationTime']) if prefixSpace and len(timeWords): timeWords = \" \"", "forceCTAList if they exist in foundCTAs. Note # that the", "the headline # algorithms in DiscretePhrases. # def hazardTimePhrases(self, hazard,", "# This is for con hazards # for eachHazard in", "complicated for ent in foundCTAs: #only process CTAs that are", "1 else: startPara = 2 segmentText, foundCTAs = self.cleanCapturedText(prevText, startPara,", "text for x in xrange(len(bullets)): bullets[x] = string.replace(bullets[x],'\\n',' ') removeLF", "= [] # all actions are in CAN, UPG, EXP", "passed a hazard key, and returns # time phrase wording", "# is more complicated for ent in foundCTAs: #only process", "bullet not in newBulletList: bulletOrder.remove(bullet) print \"reordered bullets are: \",", "forPhrase + \\ \" has also been issued\" + endTimePhrase", "\"productName\": \"Generic Hazard Product\", # product name \"fullStationID\": \"<fullStationID>\", #", "timeWords #add a leading space return timeWords # # The", "if incTextFlag and bulletProd: # First make list of bullets", "self.getCurrentTime( argDict, \"%d%H%M\", shiftToLocal=0, stripLeading=0) self._issueTime = AbsTime.AbsTime(argDict['creationTime']) self._currentTime =", "ACTIONS\\.\\.\\.\", segmentText, flags=re.IGNORECASE) segmentText = \"\\n\" + string.join(segmentTextSplit,\"* \" +", "*|.\\n\\n\" # End code for DR 21310 # # This", "self._includeZoneNames, accurateCities = self._accurateCities) fcst = fcst + areaHeader return", "Area Dictionary -- Descriptive information about zones \"areaDictionary\": \"AreaDictionary\", #", "# grid points in a zone that must contain the", "\"The \" + hazName + \\ \" is no longer", "if index != -1: regText = bullets[x][index+2:] bullets[x] = bullets[x][0:index]", "will use a bullet format #------------------------------------------------------------------------- # Weather Elements Needed:", "== \"Always\": textToUse = \"|* \" + textToUse + \"", "allowedHazards(self): return [] # Added for DR 21194 def _bulletDict(self):", "header if self._areaName != \"\": self._areaName = \" for \"", "the pil (e.g., ZFP), #phen/sig (e.g., DU.Y), or GENERIC. Uses", "(each.has_key('prevOverviewText') and each.has_key('pil') and each.has_key('endTime') and each.has_key('act')): if (each['pil'] ==", "Clean up multiple line feeds # fixMultiLF = re.compile(r'(\\n\\n)\\n*', re.DOTALL)", "here *|.\\n\" elif eachHazard['act'] == 'EXP': hazardBodyPhrase = hazardBodyPhrase +", "used to transmit the # product to the AWIPS WAN.", "segmentAreas, argDict) fcst = self._postProcessArea(fcst, segmentAreas, argDict) fraction = fractionOne", "for eachPara in paras: if paraCount >= paragraphs: found =", "x+2: #more bullets are present multRecords = 1 bullets =", "# # This is for upgrade hazards # for eachHazard", "after 'paragraphs'. So, if paragraphs is 0, it # returns", "the text. However we only # want text from the", "the segment text isn't very short or blank # if", "in foundCTAs: #only process CTAs that are vtec phen/sig based", "None, cityDescriptor=self._cityDescriptor, areaList=segmentAreas, includeCities=self._includeCities, includeZoneNames = self._includeZoneNames, accurateCities = self._accurateCities)", "of finished product. \"outputFile\": \"{prddir}/TEXT/genHaz.txt\", \"debug\": 0, # Name of", "return (hazard, time, basis, impact, regText, multRecords) def substituteBulletedText(self, capText,", "ctaParas = self.convertSingleParas(it) for cta in ctaParas: self.__procCTA.append((\"GENERIC\", string.replace(cta,' ','')))", "frameit == \"DefaultOnly\": textToUse = \"|* \" + textToUse +", "or other authorization. # # Contractor Name: <NAME> # Contractor", "product. Can be \"Official\", \"Fcst\" or \"ISC\" \"database\": \"Official\", #", "\\ hazNameA + \". \" else: hazardBodyPhrase += \" has", "= self.hazardTimePhrases(eachHazard, argDict) hazNameA = self.hazardName(eachHazard['hdln'], argDict, True) hazardBodyPhrase =", "hazards text # We only need to get headlines for", "for eachHazard in sortedHazardList: if eachHazard['act'] in [\"CAN\",\"EXP\"]: canBullets =", "in area header \"cityLocation\": \"CityLocation\", # City lat/lon dictionary to", "# foundCTAs = [] for eachHazard in sortedHazardList: if eachHazard.has_key('prevText'):", "# overview = \"\" for each in hazardList: if (each.has_key('prevOverviewText')", "#returns types of ctas found. The identifier is the pil", "[] # Added for DR 21309 def _bulletOrder(self): return []", "in [\"Winter Weather Advisory\", \"Winter Storm Warning\", \"Beach Hazards Statement\"]:", "multRecords = 0 #indicator of multiple sets of bullets for", "textToUse + \" *|\" # add bullet codes textToUse =", "= bullets[0:x+1] #only interested in these bullets break # regular", "# # This is for the can hazards # for", "have to add bullets. # if incTextFlag and bulletProd: for", "and (len(eh['hdln']) or eh['sig'] == 'S'): foundCANS = 1 if", "breakStrings=[\" \", \"-\", \"...\"]) def convertSingleParas(self, text): #returns a list", "in ['HU', 'TR', 'TY']: hazardBodyPhrase += \"In addition, \" +", "self._wfoCity nwsIntroUsed = 1 if phraseCount == 0: phraseCount =", "bullet.upper() + \\ \"...|* Enter bullet text *|\\n\\nPRECAUTIONARY/PREPAREDNESS ACTIONS...\") bulletFlag", "code issues in CTA cta = re.sub(\"\\|\\*.*\\*\\|\",\"\",cta) # We want", "fcst = self._postProcessArea(fcst, segmentAreas, argDict) fraction = fractionOne fcst =", "in follow up products # 02/24/2016 5411 randerso Make bullet", "foundCANS and foundACTS: includeFrameCodes = 1 skipCTAs = 1 for", "file after # product creation. # # lineLength max length", "key in self._definition.keys(): exec \"self._\" + key + \"= self._definition[key]\"", "\"|*\\n\" + processedText + \"*|\\n\" # Wrap processedText = self.endline(processedText,", "generateForecast(self, argDict): # Generate Text Phrases for a list of", "comparisons in foundCTAs. # 07/13/2015 4648 randerso Fix bullets in", "# extract out each section for returning the values if", "only performed # if the segment is 'NEW' or if", "# Cities included in area header \"accurateCities\": 0, # Include", "not None, defines how product appears in GFE GUI #", "self._includeOverviewHeadline: overviewHeadline = \"...|*Overview headline (must edit)*|...\\n\\n\" else: overviewHeadline =", "= \"...|*Overview headline (must edit)*|...\\n\\n\" else: overviewHeadline = \"\" if", "lines = regText.split('\\n') for x in xrange(len(lines)): if lines[x].find('The National", "SampleAnalysis.SampleAnalysis, CallToActions.CallToActions): Definition = { \"type\": \"smart\", \"displayName\": None, #", "\"*\" + (eachHazard['phen']) ## bList = newBulletList.split(\",\") ## ## ###", "sure there is only one CAP tag pairs hazardBodyPhrase =", "header, such as \"FOUS45\" # pil Product pil, such as", "# textdbPil Defines the awips product identifier # (e.g., DENCCFDEN)", "used for the default GUI # entry for storage. #", "# Counties # Marine_Zones_BOU \"mapNameForCombinations\": \"Zones_<site>\", ## Edit Areas: Create", "argDict, prefixSpace=True): timeWords = self.getTimingPhrase(hazard, argDict['creationTime']) if prefixSpace and len(timeWords):", "import AbsTime class TextProduct(TextRules.TextRules, SampleAnalysis.SampleAnalysis, CallToActions.CallToActions): Definition = { \"type\":", "initialize the bullet output ## bullets = \"\" ## ##", "eh['sig'])) #everything in active entries, captured text is used, but", "New York\" # wfoCityState City,state that the WFO is located", "= self._accurateCities) fcst = fcst + areaHeader return fcst def", "from Middendorf (BYZ) # # Now if there is a", "isn't very short or blank # if len(segmentText) < 6:", "the AWIPS WAN. The product is not # automatically transmitted", "support, and with no warranty, express or implied, as to", "bullets. # if incTextFlag and bulletProd: # First make list", "+ \"The \" + hazName + \\ \" is now", "# # # Check that the previous text exists #", "= d[k] items = func() for it in items: if", "re.split(\"\\* \" + bullet + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) print \"segmentTextSplit", "Defines the awips product identifier # (e.g., KBOUCCFDEN) that is", "beginning of any next NWS phrase. lines = regText.split('\\n') for", "# format it return self.indentText(textToUse, indentFirstString = '', indentNextString =", "if canBullet not in keepBulletList and canBullet not in removeBulletList:", "remove the bullets no longer needed. for bullet in removeBulletList:", "that there are multiple sets of bullets. In this case", "in canList: if len(eachHazard['hdln']) == 0: continue #no defined headline,", "> x+2: #more bullets are present multRecords = 1 bullets", "= ptext.split('\\n') return paragraphs def ctasFound(self, text): #returns types of", "\"Zone Forecast Product\" # fullStationID Full station identifier, 4 letter,", "in the segment since all areas in the segment have", "of WFO - city,state \"textdbPil\": \"<textdbPil>\", # Product ID for", "Name of map background for creating Combinations # Can be:", "!= 0: eas = self._easPhrase + '\\n' else: eas =", "'TR', 'TY']: hazardBodyPhrase = hazardBodyPhrase + hazNameACap + \\ \"", "def hazardTimePhrases(self, hazard, argDict, prefixSpace=True): timeWords = self.getTimingPhrase(hazard, argDict['creationTime']) if", "self._bulletOrder()[i] + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is not None) and bulletFlag:", "+ \\ \"\\n\\n|* Wrap-up text goes here *|.\\n\" elif eachHazard['act']", "+ \\ hazNameA + \". \" else: hazardBodyPhrase += \"", "from forceCTAList if they exist in foundCTAs. Note # that", "'', indentNextString = ' ', maxWidth=self._lineLength, breakStrings=[\" \", \"-\", \"...\"])", "argDict, False) hazardBodyPhrase = hazardBodyPhrase + \"The \" + hazName", "False) hazardBodyPhrase = hazardBodyPhrase + \"The \" + hazName +", "ptext.split('\\n') return paragraphs def ctasFound(self, text): #returns types of ctas", "## ### get the bullet dictionary and split the bullets", "= self.generateProduct(\"Hazards\", argDict, area = editArea, areaLabel=areaLabel, timeRange = self._timeRange)", "startPara = 2 segmentText, foundCTAs = self.cleanCapturedText(prevText, startPara, addFramingCodes =", "## # ---------------------------------------------------------------------------- # # SOFTWARE HISTORY # # Date", "contain the hazard # in order for it to be", "eachHazard in canList: if len(eachHazard['hdln']) == 0: continue #no defined", "in expList: if len(eachHazard['hdln']) == 0: continue #no defined headline,", "# # U.S. EXPORT CONTROLLED TECHNICAL DATA # This software", "text has been discarded # due to a CAN/EXP/UPG segment", "### split the text ### bullets = [] bullets =", "in extList: if len(eachHazard['hdln']) == 0: continue #no defined headline,", "the CallToAction definitions. #convert text to single paragraphs paragraphs =", "The multipleRecords is set to 1 in the # event", "and returns # time phrase wording consistent with that generated", "is passed a hazard key, and returns # time phrase", "acts and \\ (eh['phen'], eh['sig']) not in forceCTAList and \\", "segmentText = segmentText # # If segment passes the above", "Combinations file with edit area combinations. ## Can be: ##", "re.split(\"\\* \" + self._bulletOrder()[i] + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) segmentText =", "len(segmentTextSplit2) == 2: segmentTextSplit[1] = \"PRECAUTIONARY/PREPAREDNESS ACTIONS...\" + segmentTextSplit2[1] segmentText", "defined by the double line feed term. # of the", "in the segment have # the same headlines editArea =", "for the first edit area # in the segment since", "# # Required Configuration Items: # # displayName If not", "in self._definition.keys(): exec \"self._\" + key + \"= self._definition[key]\" #", "= removeLF.sub(r'\\1 \\2',b) ### indent code bullet = self.indentText(bullet, indentFirstString", "\"awipsWANPil after product creation. # autoStore If set to 1,", "Edit Area # get the hazards text # We only", "lists based on action # newList = [] canList =", "\"|*\\n\" + segmentText + \"*|\" else: segmentText = segmentText #", "to place line feeds in the CAP tags to keep", "\" + bullet + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is None: print", "station identifier (4letter) \"wmoID\": \"<wmoID>\", # WMO ID \"pil\": \"<pil>\",", "types.TupleType: label, variable = key exec \"self._\" + variable +", "# creating/editing the combinations file. This must # be defined", "\", segmentTextSplit segmentTextSplit2 = string.split(segmentTextSplit[1],\"*\",1) if len(segmentTextSplit2) == 2: segmentTextSplit[1]", "+ \"*|\\n\" # Wrap processedText = self.endline(processedText, linelength=self._lineLength, breakStr=[\" \",", "0: regText = None #no regular text after bullets return", "None def _determineTimeRanges(self, argDict): # Set up the time range", "Get the segments hazardsC = argDict['hazards'] segmentList = self.organizeHazards(hazardsC.rawAnalyzedTable()) if", "maxWidth=self._lineLength, breakStrings=[\" \", \"...\"]) ### ### the \"-\" in the", "if self.__procCTA is None: self.__procCTA = [] ctao = CallToActions.CallToActions()", "import CallToActions import AbsTime class TextProduct(TextRules.TextRules, SampleAnalysis.SampleAnalysis, CallToActions.CallToActions): Definition =", "headline, skip phrase endTimePhrase = self.hazardTimePhrases(eachHazard, argDict) hazNameA = self.hazardName(eachHazard['hdln'],", "= 2 segmentText, foundCTAs = self.cleanCapturedText(prevText, startPara, addFramingCodes = False,", "re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) regText = removeLF.sub(r'\\1 \\2',regText) # extract out each", "commas in hazardBodyText # ## # This is a base", "== 'MWS': startPara = 0 else: startPara = 1 segmentText,", "\"-20 degrees\" in the text. ### outText = outText +", "be: \", bulletOrder if bullet not in newBulletList: bulletOrder.remove(bullet) print", "\"\\n\\n|* Statement text goes here *|.\\n\\n\" # End code for", "definitions. #convert text to single paragraphs paragraphs = self.convertSingleParas(text) for", "location of finished product. \"outputFile\": \"{prddir}/TEXT/genHaz.txt\", \"debug\": 0, # Name", "0: hazardBodyPhrase = \"The National Weather Service in \" +", "flags=re.IGNORECASE) is None: print bullet + \" not in segmentText\"", "headline, skip phrase hazName = self.hazardName(eachHazard['hdln'], argDict, False) if nwsIntroUsed", "processed text for f in found: if f not in", "information. ## # ---------------------------------------------------------------------------- # # SOFTWARE HISTORY # #", "eh in hazardList: if eh['act'] in acts and (len(eh['hdln']) or", "is in effect\" + endTimePhrase + \". \" elif phraseCount", "Description # ------------ ---------- ----------- -------------------------- # 05/07/2015 4027 randerso", "is 1. # This value is also used for the", "the area header # easPhrase Optional EAS phrase to be", "= self._issueTime + self._purgeTime*3600 self._timeLabel = self.getCurrentTime( argDict, \"%l%M %p", "expTimeCurrent) hazardBodyPhrase = hazardBodyPhrase + \"The \" + hazName +", "hazardBodyPhrase = hazardBodyPhrase + \\ \"\\n\\n|* Wrap-up text goes here", "has also been issued.\" else: hazardBodyPhrase = hazardBodyPhrase + hazNameACap", "\"...|*Overview headline (must edit)*|...\\n\\n\" else: overviewHeadline = \"\" if self._includeOverview:", "if addFramingCodes: processedText = processedText.rstrip() processedText = \"|*\\n\" + processedText", "= argDict # Get Definition variables self._definition = argDict[\"forecastDef\"] for", ">= 1: hazard = bullets[0] else: hazard = None if", "defined or the GFE zone combiner # database Source database", "phrases # nwsIntroUsed = 0 # # This is for", "argDict['creationTime'] timeWords = self.getTimingPhrase(eachHazard, expTimeCurrent) hazardBodyPhrase = hazardBodyPhrase + \"The", "hazName + \\ \" will expire \" + timeWords +", "no longer in effect. \" else: expTimeCurrent = argDict['creationTime'] timeWords", "for Segment\") fcst = self._preProcessArea(fcst, segmentAreas, self._expireTime, argDict) fcst =", "+ self._timeLabel + \"\\n\\n\" fcst = fcst + s fcst", "edit area combinations. ## Can be: ## EditAreas_PublicZones_BOU ## EditAreas_FireWx_BOU", "it in ctas: if type(it) == types.TupleType: it = it[1]", "+ \"\\n\" +\\ \"National Weather Service \" + self._wfoCityState +", "bullets # If segment doesn't pass the checks, put in", "1 # Add framing codes if addFramingCodes: processedText = processedText.rstrip()", "ZFP), #phen/sig (e.g., DU.Y), or GENERIC. Uses the CallToAction definitions.", "hazardList: if eh['act'] in acts and \\ (eh['phen'], eh['sig']) not", "paragraphs: for (ctaType, cta) in self.__procCTA: ## Added following line", "each segment in the segmentList fraction = 0 fractionOne =", "fractionOne = 1.0/float(len(segmentList)) percent = 50.0 self.setProgressPercentage(50) for segmentAreas in", "segmentText, foundCTAs = self.cleanCapturedText(prevText, startPara, addFramingCodes = False, skipCTAs =", "text after bullets for x in xrange(1, len(bullets)): index =", "\", bulletOrder for b in bulletOrder: bullets = bullets +", "or #DefaultOnly\" in which just the default text is wrapped.", "zone names will be included in the area header #", "self.finalOverviewText() overviewSearch = re.compile(r'Default overview section', re.DOTALL) fcst = overviewSearch.sub(overview,", "EXP only (don't include text) if foundCANS and not foundACTS:", "look up tasks and their status, see the Text Product", "if self._areaName != \"\": self._areaName = \" for \" +", "= 1 segmentText, foundCTAs = self.cleanCapturedText(prevText, startPara, addFramingCodes = False,", "402.291.0100 # # See the AWIPS II Master Rights File", "of edit areas # Get variables error = self._getVariables(argDict) if", "skip phrase if self._bulletProd: continue # No attribution for this", "includeText = 0 #end of non statement # something in", "edit area combination areaHeader = self.makeAreaHeader( argDict, \"\", self._issueTime, expireTime,", "follow up products # 02/24/2016 5411 randerso Make bullet headers", "include in product header # # hazardSamplingThreshold Defines the percentage", "for key in self._definition.keys(): exec \"self._\" + key + \"=", "[] for eachHazard in sortedHazardList: if eachHazard['act'] in [\"CAN\",\"EXP\"]: canBullets", "\\ \" has also been issued.\" else: hazardBodyPhrase = hazardBodyPhrase", "# comparisons in foundCTAs. # 07/13/2015 4648 randerso Fix bullets", "bullet.upper() + \\ \"...|* Enter bullet text *|\\n\\n* \" +", "coverage or number of # grid points in a zone", "argDict) return fcst def _getVariables(self, argDict): # Make argDict accessible", "then # we may have to remove bullets. # if", "as \"Buffalo NY\" # # Optional Configuration Items # #", "which is the CTA ctaParas = self.convertSingleParas(it) for cta in", "return outText # The _hazardTimePhrases method is passed a hazard", "fcst, argDict): # Product header if self._areaName != \"\": self._areaName", "in the United States or abroad requires # an export", "+ \\ \" has also been issued. This \" +", "argDict, False) if nwsIntroUsed == 0: hazardBodyPhrase = \"The National", "hazard key, and returns # time phrase wording consistent with", "fcst = fcst + \"Default overview section\\n\" return fcst def", "including # cities # includeZoneNames If 1, zone names will", "\"\" self.__procCTA = None def generateForecast(self, argDict): # Generate Text", "for f in found: if f not in foundCTAs: foundCTAs.append(f)", "' ', maxWidth=self._lineLength, breakStrings=[\" \", \"...\"]) ### ### the \"-\"", "(e.g., DU.Y), or GENERIC. Uses the CallToAction definitions. #convert text", "\"...\"]) return processedText, foundCTAs def decodeBulletedText(self, prevText): # returns the", "self._bulletOrder().index(bullet) + 1 end = len(self._bulletOrder()) bulletFlag = 1 for", "then returns the part after 'paragraphs'. So, if paragraphs is", "text regText = \"\" #regular text after bullets for x", "else: hazardBodyPhrase = hazardBodyPhrase + hazNameACap + \\ \" has", "= capText[0].upper()+capText[1:] if frameit == \"Always\": textToUse = \"|* \"", "in a zone that must contain the hazard # in", "ACTIONS...\") bulletFlag = 0 # # Now if there is", "to be considered. Tuple (percent, points) # includeOverviewHeadline If 1,", "System\". #------------------------------------------------------------------------- # Additional Information: #------------------------------------------------------------------------- # Example Output: #-------------------------------------------------------------------------", "self._determineTimeRanges(argDict) if error is not None: return error # Initialize", "type(key) is types.TupleType: label, variable = key exec \"self._\" +", "textToUse + \" *|\" else: textToUse = defaultText if frameit", "an edit area combination areaHeader = self.makeAreaHeader( argDict, \"\", self._issueTime,", "= overviewSearch.sub(overview, fcst) # # Added to place line feeds", "hazards # for eachHazard in statementList: hazardBodyPhrase = \"...|* Add", "in staticBulletOrder: print \"correct bullet order should be: \", bulletOrder", "ctas: ctas.append(cta) if len(ctas) > 0: hazardBodyPhrase = hazardBodyPhrase +", "forceCTAList and \\ len(eh['hdln']): forceCTAList.append((eh['phen'], eh['sig'])) #everything in active entries,", "wraps it preserving blank lines, # then returns the part", "product is not # automatically transmitted unless autoSend is 1.", "bullets = bullets + \"* \" + b.upper() + \"...|*", "## bullets = \"\" ## ## ### loop through the", "[] # all actions are in CAN, UPG, EXP only", "argDict): return fcst + \"\\n\\n$$\\n\\n\" def _postProcessProduct(self, fcst, argDict): #", "\"textdbPil\": \"<textdbPil>\", # Product ID for storing to AWIPS text", "= lf.sub(r'\\1 \\2', text) ptext = ptext.replace('\\n\\n', '\\n') paragraphs =", "process the text ### outText = \"\" for b in", "and bulletProd: for eachHazard in sortedHazardList: if not eachHazard.has_key('prevText'): newBullets", "if the previous text has been discarded # due to", "product # in the AWIPS text database. The product is", "for eachHazard in expList: if len(eachHazard['hdln']) == 0: continue #no", "creation. # autoWrite If set to 1, then the product", "headline *|...\\n\\n\" # # This adds segment text # segmentText", "= eachHazard['prevText'] if eachHazard['pil'] == 'MWS': startPara = 0 else:", "output location of the finished product. # Product is saved", "= hazardBodyPhrase + hazNameA + \\ \" remains in effect\"", "len(self._bulletOrder()) bulletFlag = 1 for i in range(start,end): if (re.search(\"\\*", "# Set up information for Hazards product self._hazards = argDict['hazards']", "hazardBodyPhrase + \\ 'PRECAUTIONARY/PREPAREDNESS ACTIONS...\\n\\n' for c in ctas: hazardBodyPhrase", "= self._bulletDict() ## bLine = bDict.get(eachHazard['phen']) ## print 20* \"*\"", "\"my bullets are: \", newBulletList ### Determine the correct order", "must contain the hazard # in order for it to", "e.g. Combinations file: # Combinations file #------------------------------------------------------------------------- # Component Products:", "and (len(eh['hdln']) or eh['sig'] == 'S'): foundACTS = 1 if", "# Check that the segment text isn't very short or", "GFE GUI # # You must set the following: #", "print \"hazardBodyText info: segmentText: \",segmentText hazardBodyPhrase = hazardBodyPhrase + \"\\n\\n\"", "endTimePhrase + \". \" elif phraseCount == 1: phraseCount =", "pass #ignore headlines paraCount = paraCount + 1 # Add", "headers upper case # 07/15/2016 5749 randerso Replaced ellipses with", "expire \" + timeWords + \". \" # # This", "each['prevOverviewText'] self.__overviewText, dummy = self.cleanCapturedText( overview, 0) break def useCaptureText(self,", "of any call to actions found foundCTAs = [] #", "CTA ctaParas = self.convertSingleParas(it) for cta in ctaParas: self.__procCTA.append((k,string.replace(cta,' ','')))", "after bullets return (hazard, time, basis, impact, regText, multRecords) def", "This software is in the public domain, furnished \"as is\",", "+ forPhrase + \" has been issued. This \" +", "def _preProcessProduct(self, fcst, argDict): # Product header if self._areaName !=", "\"periodCombining\" : 0, # If 1, combine periods, if possible", "self._currentTime = argDict['creationTime'] self._expireTime = self._issueTime + self._purgeTime*3600 self._timeLabel =", "breakStrings line above is causing issues with ### offices that", "re.split(\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", segmentTextSplit[1], 1, flags=re.IGNORECASE) if len(segmentTextSplit2) == 2: segmentTextSplit[1]", "issued.\" else: hazardBodyPhrase = hazardBodyPhrase + hazNameACap + \\ \"", "') removeLF = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) regText = removeLF.sub(r'\\1 \\2',regText) #", "checks, add the text # print \"hazardBodyText info: incTextFlag: \",incTextFlag", "points) # includeOverviewHeadline If 1, the overview header is templated", "regText = \"\" #regular text after bullets for x in", "text up to # the next bullet or up to", "text. The multipleRecords is set to 1 in the #", "whose # export/transfer/disclosure is restricted by U.S. law. Dissemination #", "self.setProgressPercentage(50) for segmentAreas in segmentList: self.progressMessage(fraction, percent, \"Making Product for", "newBulletList ### Determine the correct order for all bullets bulletOrder", "func() for it in items: if type(it) == types.TupleType: it", "put in framing codes else: hazardBodyPhrase = hazardBodyPhrase + \\", "in sortedHazardList: if not eachHazard.has_key('prevText'): newBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") print \"newBullets", "\" + \\ hazNameA + \". \" else: hazardBodyPhrase +=", "'Y', 'A', 'O', 'S']: for eachHazard in hazardList: if eachHazard['sig']", "phraseCount = 0 lastHdln = None for eachHazard in newList:", "+ issuedByString + self._timeLabel + \"\\n\\n\" fcst = fcst +", "self._areaName issuedByString = self.getIssuedByString() productName = self.checkTestMode(argDict, self._productName + self._areaName)", "default bullets for all hazards from the bullet diction newBullets", "keep separate from CTAs fcst = string.replace(fcst, \\ r\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\",", "cta = self.defaultCTA(hazardPhenSig) if cta not in ctas: ctas.append(cta) if", "\" + hazName + forPhrase + \\ \" is in", "for eh in hazardList: if eh['act'] in acts and \\", "but still # need to handle the \"NEW\" entries. else:", "it in items: if type(it) == types.TupleType: it = it[1]", "found: if f not in foundCTAs: foundCTAs.append(f) if eachPara.find('...') ==", "segmentText, flags=re.IGNORECASE) print \"segmentTextSplit is \", segmentTextSplit segmentTextSplit2 = string.split(segmentTextSplit[1],\"*\",1)", "the # beginning of any next NWS phrase. lines =", "paras: if paraCount >= paragraphs: found = self.ctasFound(eachPara) #get list", "+\\ \"National Weather Service \" + self._wfoCityState + \\ \"\\n\"", "eachHazard['act'] in ['EXT']: extList.append(eachHazard) elif eachHazard['act'] in ['UPG']: upgList.append(eachHazard) else:", "effect\" + endTimePhrase + \". \" lastHdln = hdln #", "value is also used for the default GUI entry for", "removeLF = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) regText = removeLF.sub(r'\\1 \\2',regText) # extract", "+ overviewBody return overview else: return self.__overviewText def overviewText(self, hazardList,", "(must edit)*|...\\n\\n\" else: overviewHeadline = \"\" if self._includeOverview: overviewBody =", "removeBulletList: removeBulletList.append(canBullet) print \"hazardBodyText info: keepBulletList: \",keepBulletList print \"hazardBodyText info:", "textToUse = capText[0].upper()+capText[1:] if frameit == \"Always\": textToUse = \"|*", "4648 randerso Fix bullets in follow up products # 02/24/2016", "values to be considered, the 'hdln' value must be #", "keep. keepBulletList = [] for eachHazard in sortedHazardList: if eachHazard['act']", "return timeWords # # The method hazardBodyText creates an attribution", "paragraphs # keep track of any call to actions found", "_preProcessProduct(self, fcst, argDict): # Product header if self._areaName != \"\":", "0, # Name of map background for creating Combinations #", "cancelled. \" # # This is for the exp hazards", "# storage. # awipsWANPil Defines the awips product identifier #", "'\\n' else: eas = '' s = self._wmoID + \"", "\"hazardBodyText info: segmentText: \",segmentText hazardBodyPhrase = hazardBodyPhrase + \"\\n\\n\" +", "## Added following line to account for framing code issues", "saveBullets: if saveBullet not in keepBulletList: keepBulletList.append(saveBullet) # Now determine", "the # expire time. # includeCities If 1, cities will", "(includeText, includeFrameCodes, skipCTAs, forceCTAList) def cleanCapturedText(self, text, paragraphs, addFramingCodes =", "= self.hazardTimePhrases(eachHazard, argDict) hazNameA = self.hazardName(eachHazard['hdln'], argDict, True) hazNameACap =", "determine capture or not, # and frame captured text or", "If not None, defines how product appears in GFE GUI", "hazard # in order for it to be considered. Tuple", "if eachHazard['act'] in [\"CAN\",\"EXP\"]: canBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for canBullet in", "exist in foundCTAs. Note # that the formats of these", "these bullets break # regular text is the remainder of", "= self.hazardName(eachHazard['hdln'], argDict, False) if nwsIntroUsed == 0: hazardBodyPhrase =", "Rights File (\"Master Rights File.pdf\") for # further licensing information.", "# This is the header for an edit area combination", "the hazards text # We only need to get headlines", "ent[3]) #phen.sig if phensig in forceCTAList: del forceCTAList[forceCTAList.index(phensig)] hazardBodyPhrase =", "must be # present in the list, or it needs", "the AWIPS text database. The product is not # automatically", "output location of finished product. \"outputFile\": \"{prddir}/TEXT/genHaz.txt\", \"debug\": 0, #", "None: return error # Initialize the output string fcst =", "in which just the default text is wrapped. if capText", "= 1 ## print \"bulletFlag is: \",bulletFlag if bulletFlag: newBulletList", "site is not transmitting in mixed case yet. if para.upper()", "blank lines, # then returns the part after 'paragraphs'. So,", "afterText is text up to # the next bullet or", "= argDict[\"varDict\"] for key in varDict.keys(): if type(key) is types.TupleType:", "output ## bullets = \"\" ## ## ### loop through", "not default to bullets \"hazardSamplingThreshold\": (10, None), #(%cov, #points) \"callToAction\":", "None: return error # Get the segments hazardsC = argDict['hazards']", "hazardBodyPhrase += \" has issued \" + hazNameA + forPhrase", "Product\" # fullStationID Full station identifier, 4 letter, such as", "entry for # storage. # awipsWANPil Defines the awips product", "# Development tasks that are identified and in progress: #", "re.match(\"\\*\", b): ### remove line feeds removeLF = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL)", "be considered, the 'hdln' value must be # present in", "forceCTAList.append((eh['phen'], eh['sig'])) #everything in active entries, captured text is used,", "unless autoStore is 1. This # value is also used", "(hazard, time, basis, impact, regText, multRecords) def substituteBulletedText(self, capText, defaultText,", "= self._makeProduct(fcst, segmentAreas, argDict) fcst = self._postProcessArea(fcst, segmentAreas, argDict) fraction", "05/07/2015 4027 randerso Migrated A1 OB9.16 code to A2 #", "\".\" + sig cta = self.defaultCTA(hazardPhenSig) if cta not in", "self.endline(processedText, linelength=self._lineLength, breakStr=[\" \", \"-\", \"...\"]) return processedText, foundCTAs def", "\" + self._bulletOrder()[i] + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) segmentText = string.join(segmentTextSplit,\"*", "'NEW' or if the previous text has been discarded #", "(10, None), #(%cov, #points) \"callToAction\": 1, } def __init__(self): TextRules.TextRules.__init__(self)", "processedText = \"|*\\n\" + processedText + \"*|\\n\" # Wrap processedText", "# returns the whole thing, if it's 2, it returns", "\"{prddir}/TEXT/genHaz.txt\", \"debug\": 0, # Name of map background for creating", "sent on the AWIPS WAN to the \"autoSendAddress\" with #", "# debug If on, debug_print statements will appear. # textdbPil", "be include in product header \"includeOverviewHeadline\": 1, #include overview header", "'A', 'O', 'S']: for eachHazard in hazardList: if eachHazard['sig'] ==", "eachHazard['sig'] == each: if eachHazard not in sortedHazardList: sortedHazardList.append(eachHazard) #", "# Generate Narrative Forecast for Edit Area # get the", "issued\" + endTimePhrase + \". \" else: if eachHazard['phen'] in", "codes, skip CTAs, forceCTAList) # # For the values to", "# lineLength max length of each line # # defaultEditAreas", "the checks, put in framing codes else: hazardBodyPhrase = hazardBodyPhrase", "b): ### remove line feeds removeLF = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) bullet", "in the text. ### outText = outText + bullet +", "endTimePhrase + \". \" lastHdln = hdln # # This", "\\ \" will expire \" + timeWords + \". \"", "eachHazard in sortedHazardList: ### get the default bullets for all", "\",keepBulletList print \"hazardBodyText info: removeBulletList: \",removeBulletList # Finally remove the", "or None, returns the # regular text after the bullets.", "it[1] #get second string which is the CTA ctaParas =", "Can be \"Official\", # \"Fcst\" or \"ISC\" # outputFile Defines", "will appear. # textdbPil Defines the awips product identifier #", "argDict) # Generate the product for each segment in the", "\"autoSend\": 0, #set to 1 to automatically transmit product \"autoSendAddress\":", "print \"segment text is: \", segmentText for bullet in newBullets:", "else: if eachHazard['phen'] in ['HU', 'TR', 'TY']: hazardBodyPhrase += \"In", "argDict[\"varDict\"] for key in varDict.keys(): if type(key) is types.TupleType: label,", "\"\", # Name of state, such as \"Georgia\" -- optional", "edit)*|.\\n\\n\" else: overviewBody = \"\" #assemble the lines overview =", "in paragraphs: for (ctaType, cta) in self.__procCTA: ## Added following", "= '' s = self._wmoID + \" \" + self._fullStationID", "[\"CAN\",\"EXP\"]: saveBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for saveBullet in saveBullets: if saveBullet", "# product creation. # # lineLength max length of each", "\"Official\", \"Fcst\" or \"ISC\" \"database\": \"Official\", # Defines output location", "consistent with that generated by the headline # algorithms in", "for (phen,sig) in forceCTAList: hazardPhenSig = phen + \".\" +", "None if len(regText) == 0: regText = None #no regular", "in framing codes, or #DefaultOnly\" in which just the default", "+ \\ \" has also been issued.\" else: hazardBodyPhrase =", "bullet paragraph text or None, returns the # regular text", "= self._timeRange) fcst = fcst + headlines return fcst def", "headline, skip phrase hazName = self.hazardName(eachHazard['hdln'], argDict, False) hazardBodyPhrase =", "# We only need to get headlines for the first", "DR 21309 code addition from Middendorf (BYZ) # # Now", "for newBullet in newBullets: if newBullet not in newBulletList: newBulletList.append(newBullet)", "# Associated Utilities Files e.g. Combinations file: # Combinations file", "## Replaced by 21309 code ## def _getBullets(self, newBulletList, argDict):", "defined headline, skip phrase hazName = self.hazardName(eachHazard['hdln'], argDict, False) if", "and each['endTime'] > self._currentTime and each['act'] not in ['CAN', 'EXP']):", "text. However we only # want text from the last", "case # 07/15/2016 5749 randerso Replaced ellipses with commas in", "section \"bulletProd\": 0, # do not default to bullets \"hazardSamplingThreshold\":", "hazardBodyPhrase = hazardBodyPhrase + \" has issued \" + \\", "for ext hazards # for eachHazard in extList: if len(eachHazard['hdln'])", "<NAME> # Contractor Address: 6825 Pine Street, Suite 340 #", "self.getIssuedByString() productName = self.checkTestMode(argDict, self._productName + self._areaName) if len(self._easPhrase) !=", "# any purpose. #------------------------------------------------------------------------- # Standard and Local file names", "framing codes if addFramingCodes: processedText = processedText.rstrip() processedText = \"|*\\n\"", "# we may have to remove bullets. # if incTextFlag", "# returns the bullet paragraph text or None, returns the", "the bullets. The afterText is text up to # the", "# We want this comparison to be case-insensitive just in", "= bDict.get(eachHazard['phen']) ## print 20* \"*\" + (eachHazard['phen']) ## bList", "we may have to add bullets. # if incTextFlag and", "def cleanCapturedText(self, text, paragraphs, addFramingCodes = False, skipCTAs = False):", "\"<wmoID>\", # WMO ID \"pil\": \"<pil>\", # Product pil \"areaName\":", "Make argDict accessible self.__argDict = argDict # Get Definition variables", "hazNameA = self.hazardName(eachHazard['hdln'], argDict, True) hazNameACap = self.sentence(hazNameA, addPeriod=False) hazName", "string.replace(bullets[x],'\\n',' ') removeLF = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) regText = removeLF.sub(r'\\1 \\2',regText)", "= argDict['hazards'] self._combinations = argDict[\"combinations\"] return None def _determineTimeRanges(self, argDict):", "Product is saved if autoWrite is 1. # debug If", "\"... found!\" segmentTextSplit = re.split(\"\\* \" + self._bulletOrder()[i] + \"\\.\\.\\.\",", "self._postProcessProduct(fcst, argDict) return fcst def _getVariables(self, argDict): # Make argDict", "newBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") print \"newBullets = \", newBullets print \"segment", "only # want text from the last in the series", "headlines return fcst def _postProcessArea(self, fcst, segmentAreas, argDict): return fcst", "+ \"The \" + hazName + \\ \" has been", "captured text is used, but still # need to handle", "\\ \"\", hazardBodyPhrase) return hazardBodyPhrase def finalOverviewText(self): #if didn't calculate", "return overview else: return self.__overviewText def overviewText(self, hazardList, pil): #", "Product ID for storing to AWIPS text database. \"awipsWANPil\": \"<awipsWANPil>\",", "restricted by U.S. law. Dissemination # to non-U.S. persons whether", "that this only correctly handles the 1st set of entries", "automatically stored unless autoStore is 1. This # value is", "number of # grid points in a zone that must", "\", bulletOrder if bullet not in newBulletList: bulletOrder.remove(bullet) print \"reordered", "keep track of any call to actions found foundCTAs =", "prevText.split('\\n\\n* ') if len(buf) <= 1: return (None, None, None,", "foundCTAs = self.cleanCapturedText(prevText, startPara, addFramingCodes = False, skipCTAs = skipCTAs)", "# # You must set the following: # # productName", "to a CAN/EXP/UPG segment # # remove items from forceCTAList", "remove items from forceCTAList if they exist in foundCTAs. Note", "else: impact = None if len(regText) == 0: regText =", "= self.organizeHazards(hazardsC.rawAnalyzedTable()) if len(segmentList) == 0: return \"No hazards to", "issuedByString = self.getIssuedByString() productName = self.checkTestMode(argDict, self._productName + self._areaName) if", "removeBulletList: if re.search(\"\\* \"+ bullet + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is", "---------------------------------------------------------------------------- # # SOFTWARE HISTORY # # Date Ticket# Engineer", "_postProcessArea(self, fcst, segmentAreas, argDict): return fcst + \"\\n\\n$$\\n\\n\" def _postProcessProduct(self,", "bulletProd: for eachHazard in sortedHazardList: if not eachHazard.has_key('prevText'): newBullets =", "to 1, then the product will be automatically # stored", "segmentText\" start = self._bulletOrder().index(bullet) + 1 end = len(self._bulletOrder()) bulletFlag", "segmentText = \"\\n\" + string.join(segmentTextSplit,\"* \" + bullet.upper() + \\", "impact, afterText, multipleRecords) if prevText is None: return (None, None,", "# citiesPhrase \"Including the cities of\" phrase used when including", "expireTime \"includeCities\": 1 , # Cities included in area header", "in product header # # hazardSamplingThreshold Defines the percentage coverage", "the header for an edit area combination areaHeader = self.makeAreaHeader(", "\"The \" + hazName + \\ \" has been cancelled.", "Added for DR 21194 def _bulletDict(self): return [] # Added", "is a bullet product endTimePhrase = self.hazardTimePhrases(eachHazard, argDict) hazName =", "(each['pil'] == pil and each['endTime'] > self._currentTime and each['act'] not", "afterText, multipleRecords) if prevText is None: return (None, None, None,", "\"The National Weather Service in \" +\\ self._wfoCity nwsIntroUsed =", "is \", segmentTextSplit segmentTextSplit2 = string.split(segmentTextSplit[1],\"*\",1) if len(segmentTextSplit2) == 2:", "Wrap-up text goes here *|.\\n\" elif eachHazard['act'] == 'EXP': hazardBodyPhrase", "+ \\ hazNameA + forPhrase + \" has been issued.", "# # Now if there is a new hazard and", "call to actions found foundCTAs = [] # Process the", "bullets + \"\\n\" ## return bullets def _indentBulletText(self, prevText): print", "hazName = self.hazardName(eachHazard['hdln'], argDict, False) hazardBodyPhrase = hazardBodyPhrase + \"The", "pil (e.g., ZFP), #phen/sig (e.g., DU.Y), or GENERIC. Uses the", "is None or 0 length, then #the default text is", "track of any call to actions found foundCTAs = []", "DiscretePhrases. # def hazardTimePhrases(self, hazard, argDict, prefixSpace=True): timeWords = self.getTimingPhrase(hazard,", "in processed text for f in found: if f not", "ID for transmitting to AWIPS WAN. \"periodCombining\" : 0, #", "Call to Action Tags fcst = re.sub(r'\\nPRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\\s*&&\\n', \\ \"\",", "+ \\ \"...|* Enter bullet text *|\\n\\n* \" + self._bulletOrder()[i]", "based on the input text. lf = re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) ptext", "#Based on the hazardlist, returns a tuple indicating: # (inc", "\"PRECAUTIONARY/PREPAREDNESS ACTIONS...\" + segmentTextSplit2[1] segmentText = string.join(segmentTextSplit,\"\") if removeBulletList !=", "+ \"...|* Enter bullet text *|\\n\\n\" hazardBodyPhrase = hazardBodyPhrase +", "# beginning of any next NWS phrase. lines = regText.split('\\n')", "\" + bullet.upper() + \\ \"...|* Enter bullet text *|\\n\\n*", "as \"SFTBOS\" # areaName (opt.) Area name for product header,", "newBulletList.split(\",\") ## ## ### initialize the bullet output ## bullets", "Text Phrases for a list of edit areas # Get", "+ hazName + \\ \" is no longer in effect.", "written to the \"output\" named disk file after # product", "creates an attribution phrase # def hazardBodyText(self, hazardList, argDict): bulletProd", "bullet headers upper case # 07/15/2016 5749 randerso Replaced ellipses", "that is used to transmit the # product to the", "Maximum number of hours past issuance time for the #", "only the interested ones paraCount = 0 processedText = ''", "the hazardlist, returns a tuple indicating: # (inc capture text,", "66, #Maximum line length \"purgeTime\": 8, # Maximum hours for", "error # Initialize the output string fcst = \"\" fcst", "= hazardBodyPhrase + \"The \" + hazName + \\ \"", "def _preProcessArea(self, fcst, segmentAreas, expireTime, argDict): # This is the", "Component Products: # Hazards #------------------------------------------------------------------------- # Development tasks that are", "product, insert it # overview = self.finalOverviewText() overviewSearch = re.compile(r'Default", "== 0: if self._includeOverviewHeadline: overviewHeadline = \"...|*Overview headline (must edit)*|...\\n\\n\"", "to file # Area Dictionary -- Descriptive information about zones", "\". \" # # This is for ext hazards #", "eh['sig'] not in foundSig: foundSig.append(eh['sig']) includeFrameCodes = 0 includeText =", "the \"NEW\" entries. else: for eh in hazardList: if eh['act']", "self._productName + self._areaName) if len(self._easPhrase) != 0: eas = self._easPhrase", "processedText = self.endline(processedText, linelength=self._lineLength, breakStr=[\" \", \"-\", \"...\"]) return processedText,", "outText = outText + b + \"\\n\\n\" ### that's it", "their status, see the Text Product User Guide # Section", "for b in bullets: ### if first character is a", "or not # incTextFlag, incFramingCodes, skipCTAs, forceCTAList = \\ self.useCaptureText(sortedHazardList)", "text is used. frameit can be \"Never\", in which #nothing", "elif bulletProd: bulletFlag = 0 if eachHazard['act'] == 'CAN': hazardBodyPhrase", "None if len(bullets) >= 4: impact = bullets[3] else: impact", "Middendorf (BYZ) # # Now if there is a new", "bulleted text based on #the capText variable. If capText is", "# This method finds an overview in the previous product", "(e.g., DENCCFDEN) that is used to store the product #", "actions found foundCTAs = [] # Process the paragraphs, keep", "case-insensitive # comparisons in foundCTAs. # 07/13/2015 4648 randerso Fix", "segmentText, flags=re.IGNORECASE) is None: print bullet + \" not in", "# ## # This is a base file that is", "# # Next, break them into individual lists based on", "\"* \" + b.upper() + \"...|* Enter bullet text *|\\n\\n\"", "hazName = self.hazardName(eachHazard['hdln'], argDict, False) if eachHazard['endTime'] <= argDict['creationTime']: hazardBodyPhrase", "Add framing codes if addFramingCodes: processedText = processedText.rstrip() processedText =", "formatted bulleted text based on #the capText variable. If capText", "#------------------------------------------------------------------------- # Component Products: # Hazards #------------------------------------------------------------------------- # Development tasks", "bullets = [] bullets = string.split(prevText, '\\n\\n') if len(bullets) <=", "0: phraseCount = 1 if eachHazard['phen'] in ['HU', 'TR', 'TY']:", "= \"\" if self._includeOverview: overviewBody = \".|*Overview (must edit)*|.\\n\\n\" else:", "area header # accurateCities If 1, cities are determined from", "used when including # cities # includeZoneNames If 1, zone", "== \"DefaultOnly\": textToUse = \"|* \" + textToUse + \"", "+ self._bulletOrder()[i] + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is not None) and", "for the exp hazards # phraseCount = 0 for eachHazard", "for eachHazard in hazardList: if eachHazard['sig'] == each: if eachHazard", "into individual lists based on action # newList = []", "\"CityLocation\", # City lat/lon dictionary to use \"cityDescriptor\":\"Including the cities", "0, # do not default to bullets \"hazardSamplingThreshold\": (10, None),", "# Check that the previous text exists # foundCTAs =", "Standard and Local file names and Locations: # GenericHazards #-------------------------------------------------------------------------", "expTimeCurrent = argDict['creationTime'] timeWords = self.getTimingPhrase(eachHazard, expTimeCurrent) hazardBodyPhrase = hazardBodyPhrase", "# This is for the can hazards # for eachHazard", "to be include in product header # # hazardSamplingThreshold Defines", "# bulletProd If 1, the product will use a bullet", "Description: This product is a template for creating Hazard Products.", "hazardsC = argDict['hazards'] segmentList = self.organizeHazards(hazardsC.rawAnalyzedTable()) if len(segmentList) == 0:", "eas = '' s = self._wmoID + \" \" +", "(percent, points) # includeOverviewHeadline If 1, the overview header is", "= self._determineTimeRanges(argDict) if error is not None: return error #", "fcst = fixMultiLF.sub(r'\\1', fcst) # finish progress meter self.setProgressPercentage(100) self.progressMessage(0,", "segment is 'NEW' or if the previous text has been", "in forceCTAList and \\ len(eh['hdln']): forceCTAList.append((eh['phen'], eh['sig'])) #everything in active", "_getBullets(self, newBulletList, argDict): ## ## ### get the bullet dictionary", "\"includeOverviewHeadline\": 1, #include overview header \"includeOverview\": 1, #include overview section", "textToUse = \"* \" + textToUse # format it return", "the \"-\" in the breakStrings line above is causing issues", "\" + timeWords #add a leading space return timeWords #", "1: return (None, None, None, None, None, None) multRecords =", "AWIPS II Master Rights File (\"Master Rights File.pdf\") for #", "07/15/2016 5749 randerso Replaced ellipses with commas in hazardBodyText #", "+ \"\\n\\n\" ### that's it print outText return outText #", "no warranty, express or implied, as to its usefulness for", "+ b + \"\\n\\n\" ### that's it print outText return", "= \"|* \" + textToUse + \" *|\" # add", "bullets no longer needed. for bullet in removeBulletList: if re.search(\"\\*", "# that the formats of these lists are different, thus", "+ \\ \" is no longer in effect. \" #", "bulletOrder for b in bulletOrder: bullets = bullets + \"*", "find the bullets bullets = [] buf = prevText.split('\\n\\n* ')", "segment codes to determine capture or not, # and frame", "= hazardBodyPhrase + c + '\\n\\n' hazardBodyPhrase = hazardBodyPhrase +", "actions are in CAN, UPG, EXP only (don't include text)", "only the bulleted text, defined by the double line feed", "also been issued. This \" + hazName + forPhrase +", "prevText ### ### process the text ### outText = \"\"", "lastHdln = hdln # # This is for the can", "segmentText: \",segmentText hazardBodyPhrase = hazardBodyPhrase + \"\\n\\n\" + \\ segmentText", "if len(eachHazard['hdln']) == 0: continue #no defined headline, skip phrase", "individual lists based on action # newList = [] canList", "a template for creating Hazard Products. #------------------------------------------------------------------------- # Copying: #", "self._ddhhmmTime + \"\\n\" + self._pil + \"\\n\\n\" fcst = fcst", "\"The \" + hazName + \\ \" is now in", "DR 21310 # # This adds the call to action", "time, string, types, copy, re import CallToActions import AbsTime class", "= hazardBodyPhrase + \"\\n\\n\" + bullets # If segment doesn't", "+ 1 # Add framing codes if addFramingCodes: processedText =", "#------------------------------------------------------------------------- import LogStream import TextRules import SampleAnalysis import time, string,", "1 segmentText, foundCTAs = self.cleanCapturedText(prevText, startPara, addFramingCodes = False, skipCTAs", "lines overview = overviewHeadline + overviewBody return overview else: return", "automatically # written to the \"output\" named disk file after", "\"Default overview section\\n\" return fcst def _preProcessArea(self, fcst, segmentAreas, expireTime,", "causing issues with ### offices that use \"-20 degrees\" in", "use \"-20 degrees\" in the text. ### outText = outText", "determine which bullets we have to remove. removeBulletList = []", "len(foundSig) == 1: #only S includeFrameCodes = 1 #capture text,", "text goes here *|.\\n\" else: bulletFlag = 1 ## print", "are: \", newBulletList ### Determine the correct order for all", "if len(buf) <= 1: return (None, None, None, None, None,", "\"appending to bottom list of bullets!\" segmentTextSplit = re.split(\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\",", "is a base file that is not intended to be", "in ['CF', 'LS']: statementList.append(eachHazard) elif eachHazard['act'] in ['NEW', 'EXA', 'EXB']:", "= self.cleanCapturedText(prevText, startPara, addFramingCodes = False, skipCTAs = skipCTAs) tester", "# product name \"fullStationID\": \"<fullStationID>\", # full station identifier (4letter)", "or if the previous text has been discarded # due", "found = [] for para in paragraphs: for (ctaType, cta)", "codes to determine capture or not, # and frame captured", "Example Output: #------------------------------------------------------------------------- import LogStream import TextRules import SampleAnalysis import", "bullet + \"\\.\\.\\.\", segmentText, flags=re.IGNORECASE) is not None: segmentTextSplit =", "pil): # # This method finds an overview in the", "method takes a block of text, wraps it preserving blank", "defined headline, skip phrase if self._bulletProd: continue # No attribution", "hazard and previous segment Text, then # we may have", "\"\" for eachHazard in sortedHazardList: ### get the default bullets", "= ['CAN','UPG','EXP'] acts = ['NEW','EXT','EXA','EXB','CON'] foundACTS = 0 foundCANS =", "just in case # the site is not transmitting in", "autoStore If set to 1, then the product will be", "purgeTime Maximum number of hours past issuance time for the", "ranges error = self._determineTimeRanges(argDict) if error is not None: return", "it # overview = self.finalOverviewText() overviewSearch = re.compile(r'Default overview section',", "templated # bulletProd If 1, the product will use a", "conList.append(eachHazard) # # Now, go through each list and build", "\" \" + timeWords #add a leading space return timeWords", "eachHazard['phen'] in ['HU', 'TR', 'TY']: hazardBodyPhrase = hazardBodyPhrase + \"", "#------------------------------------------------------------------------- # Development tasks that are identified and in progress:", "= eachHazard['hdln'] if len(eachHazard['hdln']) == 0: continue #no defined headline,", "%p %Z %a %b %e %Y\", stripLeading=1) return None def", "fcst + s fcst = fcst + \"Default overview section\\n\"", "20* \"*\" + (eachHazard['phen']) ## bList = newBulletList.split(\",\") ## ##", "Text, then # we may have to add bullets. #", "acts and (len(eh['hdln']) or eh['sig'] == 'S'): foundACTS = 1", "points in a zone that must contain the hazard #", "= [] for eachHazard in sortedHazardList: if eachHazard['sig'] in ['S']and", "in the public domain, furnished \"as is\", without technical #", "return [] ## Replaced by 21309 code ## def _getBullets(self,", "is a new hazard and previous segment Text, then #", "of bullets for x in xrange(len(buf)): if x == 0:", "and not foundACTS: if 'S' in foundSig and len(foundSig) ==", "[] buf = prevText.split('\\n\\n* ') if len(buf) <= 1: return", "self._bulletOrder() for bullet in staticBulletOrder: print \"correct bullet order should", "Defines output location of finished product. \"outputFile\": \"{prddir}/TEXT/genHaz.txt\", \"debug\": 0,", "since all areas in the segment have # the same", "= [] statementList = [] for eachHazard in sortedHazardList: if", "in saveBullets: if saveBullet not in keepBulletList: keepBulletList.append(saveBullet) # Now", "product self._hazards = argDict['hazards'] self._combinations = argDict[\"combinations\"] return None def", "# find only the bulleted text, defined by the double", "= (\"\\n\").join(lines) # now clean up the text for x", "text, inc framing codes, skip CTAs, forceCTAList) # # For", "= ctao.ctaPilDict() for k in d.keys(): func = d[k] items", "= [] for eachHazard in sortedHazardList: if eachHazard.has_key('prevText'): prevText =", "\", \"...\"]) ### ### the \"-\" in the breakStrings line", "## # This software was developed and / or modified", "self._wmoID + \" \" + self._fullStationID + \" \" +", "Complete\") return fcst def allowedHazards(self): return [] # Added for", "phrase # def hazardBodyText(self, hazardList, argDict): bulletProd = self._bulletProd hazardBodyPhrase", "= [] expList = [] extList = [] conList =", "\"english\", \"lineLength\": 66, #Maximum line length \"purgeTime\": 8, # Maximum", "b.upper() + \"...|* Enter bullet text *|\\n\\n\" hazardBodyPhrase = hazardBodyPhrase", "it returns paragraphs 2 -> end, etc. # Headlines are", "bullets[x][index+2:] bullets[x] = bullets[x][0:index] #eliminate after bullet text if len(bullets)", "## bullets = bullets + \"* \" + b +", "sortedHazardList: if eachHazard['act'] in [\"CAN\",\"EXP\"]: canBullets = string.split(self._bulletDict().get(eachHazard['phen']),\",\") for canBullet", "# # Check that this segment codes to determine capture", "if lines[x].find('The National Weather Service') == 0: lines = lines[0:x]", "= string.join(segmentTextSplit,\"* \" + bullet.upper() + \\ \"...|* Enter bullet", "automatically # sent on the AWIPS WAN to the \"autoSendAddress\"", "Process the paragraphs, keep only the interested ones paraCount =", "also been issued.\" else: hazardBodyPhrase = hazardBodyPhrase + hazNameACap +", "# pil Product pil, such as \"SFTBOS\" # areaName (opt.)", "if self._includeOverview: overviewBody = \".|*Overview (must edit)*|.\\n\\n\" else: overviewBody =", "if eachHazard['sig'] == each: if eachHazard not in sortedHazardList: sortedHazardList.append(eachHazard)", "by the headline # algorithms in DiscretePhrases. # def hazardTimePhrases(self,", "# includeZoneNames If 1, zone names will be included in", "hazards # phraseCount = 0 for eachHazard in expList: if", "text *|\\n\\n\" hazardBodyPhrase = hazardBodyPhrase + \"\\n\\n\" + bullets #", "if ent.find('.') == 2: phensig = (ent[0:2], ent[3]) #phen.sig if", "bullets = bullets[0:x+1] #only interested in these bullets break #", "extList = [] conList = [] upgList = [] statementList", "def _indentBulletText(self, prevText): print prevText ### if previous text is", "acts (frame it, include text) elif foundCANS and foundACTS: includeFrameCodes", "in hazardList: if (each.has_key('prevOverviewText') and each.has_key('pil') and each.has_key('endTime') and each.has_key('act')):", "in \" + self._wfoCity nwsIntroUsed = 1 if phraseCount ==", "header for an edit area combination areaHeader = self.makeAreaHeader( argDict,", "None: return (None, None, None, None, None, None) # find", "# stored into the text database using the \"textdbPil\" #", "for x in xrange(len(paragraphs)): paragraphs[x] = string.replace(paragraphs[x],' ','') #make list", "is empty, return nothing if prevText is None: return prevText", "['HU', 'TR', 'TY']: hazardBodyPhrase = hazardBodyPhrase + \" has issued", "character is a * we found a bullet if re.match(\"\\*\",", "error is not None: return error # Initialize the output", "+ \"...|* Enter bullet text *|\\n\\n\" ## # bullets =", "#everything in active entries, captured text is used, but still", "which the #text (default or cap) is wrapped in framing", "[] for eh in hazardList: if eh['act'] in acts and", "transmit product \"autoSendAddress\": \"000\", #transmission address \"autoStore\": 0, #set to", "not foundACTS: if 'S' in foundSig and len(foundSig) == 1:", "self.__procCTA.append((k,string.replace(cta,' ',''))) ctas = ctao.genericCTAs() for it in ctas: if", "for DR 21194 def _bulletDict(self): return [] # Added for", "argDict): # Make argDict accessible self.__argDict = argDict # Get", "above is causing issues with ### offices that use \"-20", "'EXA', 'EXB']: newList.append(eachHazard) elif eachHazard['act'] in ['CAN']: canList.append(eachHazard) elif eachHazard['act']", "['HU', 'TR', 'TY']: hazardBodyPhrase += \"In addition, \" + \\", "from CTAs fcst = string.replace(fcst, \\ r\"PRECAUTIONARY/PREPAREDNESS ACTIONS\\.\\.\\.\", \\ r\"\\nPRECAUTIONARY/PREPAREDNESS", "skip CTAs, forceCTAList) # # For the values to be", "the text database using the \"textdbPil\" # after product creation.", "\"...\"]) ### ### the \"-\" in the breakStrings line above", "sortedHazardList: ### get the default bullets for all hazards from", "\"\\n\\n\" fcst = fcst + s.upper() s = eas +", "eh['sig'] == 'S'): foundACTS = 1 if eh['act'] in cans", "cta in ctaParas: self.__procCTA.append((\"GENERIC\", string.replace(cta,' ',''))) #compare found = []", "# after product creation. # autoWrite If set to 1,", "b + \"\\n\\n\" ### that's it print outText return outText", "ent.find('.') == 2: phensig = (ent[0:2], ent[3]) #phen.sig if phensig", "names will be included in the area header \"easPhrase\" :\"\",", "database Source database for product. Can be \"Official\", # \"Fcst\"", "\"callToAction\": 1, } def __init__(self): TextRules.TextRules.__init__(self) SampleAnalysis.SampleAnalysis.__init__(self) self.__overviewText = \"\"", "# in the segment since all areas in the segment", "ctao.genericCTAs() for it in ctas: if type(it) == types.TupleType: it", "Narrative Forecast for Edit Area # get the hazards text", "return (includeText, includeFrameCodes, skipCTAs, forceCTAList) def cleanCapturedText(self, text, paragraphs, addFramingCodes", "print \"my bullets are: \", newBulletList ### Determine the correct", "export license or other authorization. # # Contractor Name: <NAME>", "eh['act'] in acts and (len(eh['hdln']) or eh['sig'] == 'S'): foundACTS", "\"Generic Hazard Product\", # product name \"fullStationID\": \"<fullStationID>\", # full", "# Generate the product for each segment in the segmentList", "this code # is more complicated for ent in foundCTAs:", "-- Descriptive information about zones \"areaDictionary\": \"AreaDictionary\", # Language \"language\":", "bullets for x in xrange(1, len(bullets)): index = bullets[x].find('\\n\\n') if", "hazardBodyPhrase = '' # # First, sort the hazards for", "wrapped in framing codes, or #DefaultOnly\" in which just the", "headline # algorithms in DiscretePhrases. # def hazardTimePhrases(self, hazard, argDict,", "This \" + hazName + forPhrase + \\ \" is", "non statement # something in CANS and something in acts", "#phen/sig (e.g., DU.Y), or GENERIC. Uses the CallToAction definitions. #convert", "\"*|\\n\" # Wrap processedText = self.endline(processedText, linelength=self._lineLength, breakStr=[\" \", \"-\",", "in active entries, captured text is used, but still #", "add bullet codes textToUse = \"* \" + textToUse #", "are: \", bulletOrder for b in bulletOrder: bullets = bullets", "Enter bullet text *|\\n\\n* \" + self._bulletOrder()[i] + \"...\") bulletFlag", "and text before the bullets bullets.append(buf[x]) # find only the", "# phraseCount = 0 for eachHazard in expList: if len(eachHazard['hdln'])", "+ endTimePhrase + \". \" elif phraseCount == 1: phraseCount", "\" elif phraseCount == 1: phraseCount = 2 if hdln", "SECTION # # Required Configuration Items: # # displayName If", "not transmitting in mixed case yet. if para.upper() == cta.upper()", "# # purgeTime Maximum number of hours past issuance time", "flags=re.IGNORECASE) is not None) and bulletFlag: print \"* \" +", "\"correct bullet order should be: \", bulletOrder if bullet not", "decode the first set # of bullets and text. The", "== 2: segmentTextSplit[1] = \"*\" + segmentTextSplit2[1] else: segmentTextSplit2 =", "argDict, False) if eachHazard['endTime'] <= argDict['creationTime']: hazardBodyPhrase = hazardBodyPhrase +", "#capture text, but frame it else: includeText = 0 #end", "segmentList: self.progressMessage(fraction, percent, \"Making Product for Segment\") fcst = self._preProcessArea(fcst,", "used for # creating/editing the combinations file. This must #", "# autoSend If set to 1, then the product will", "1 , # Cities included in area header \"accurateCities\": 0,", "re.compile(r'(s*[^\\n])\\n([^\\n])', re.DOTALL) bullet = removeLF.sub(r'\\1 \\2',b) ### indent code bullet", "len(bullets) >= 3: basis = bullets[2] else: basis = None", "after product creation. # autoStore If set to 1, then", "if len(self._easPhrase) != 0: eas = self._easPhrase + '\\n' else:", "# in the AWIPS text database. The product is not", "self._ddhhmmTime = self.getCurrentTime( argDict, \"%d%H%M\", shiftToLocal=0, stripLeading=0) self._issueTime = AbsTime.AbsTime(argDict['creationTime'])", "SampleAnalysis.SampleAnalysis.__init__(self) self.__overviewText = \"\" self.__procCTA = None def generateForecast(self, argDict):", "+ variable + \"= varDict[key]\" self._language = argDict[\"language\"] # Set", "= (ent[0:2], ent[3]) #phen.sig if phensig in forceCTAList: del forceCTAList[forceCTAList.index(phensig)]", "\"\\n\" + string.join(segmentTextSplit,\"* \" + bullet.upper() + \\ \"...|* Enter", "if foundCANS and not foundACTS: if 'S' in foundSig and", "of ctas found if skipCTAs and len(found): pass else: processedText", "also used for the default GUI entry for # storage.", "with edit area combinations. ## Can be: ## EditAreas_PublicZones_BOU ##", "xrange(1, len(bullets)): index = bullets[x].find('\\n\\n') if index != -1: regText", "[] ## Replaced by 21309 code ## def _getBullets(self, newBulletList,", "\"-\", \"...\"]) def convertSingleParas(self, text): #returns a list of paragraphs", "['NEW','EXT','EXA','EXB','CON'] foundACTS = 0 foundCANS = 0 foundSig = []", "1, zone names will be included in the area header", "phrase wording consistent with that generated by the headline #", "takes a block of text, wraps it preserving blank lines,", "Street, Suite 340 # Mail Stop B8 # Omaha, NE", "+ \"... found!\" segmentTextSplit = re.split(\"\\* \" + self._bulletOrder()[i] +", "segmentList = self.organizeHazards(hazardsC.rawAnalyzedTable()) if len(segmentList) == 0: return \"No hazards", "\" is in effect\" + endTimePhrase + \". \" else:", "# This value is also used for the default GUI" ]
[ "= splited_aux_lig[1] pose = area[0] buried_total = \"{:.4f}\".format(area[1]) #line =", "as normalized_buried_area FROM database a JOIN buried_area_total_sort b ON b.ligand", "Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() sasa_complex = get_value_from_xvg_sasa(f_temp_sasa_complex)", "line = \"# buried_area_total[nm2]\\tpose\"+\"\\n\" f_buried_area.write(line) for area in buried_area_sorted_by_lig_rec_perc: #receptor", "str(normalized_buried_total)+\"\\t\"+str(pose)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def loading_lines_from_area_files(line): line_splited = str(line).split() #line_ret =", "in receptor_file: f_compl.write(item) #Insert lines of model and insert Z", "pdb_complex + \" -nopbc \" + \" -n \" +", "= sc.broadcast(gromacs_path) pdb_ligand_path = sc.broadcast(pdb_ligand_path) probe = sc.broadcast(probe) ndots =", "ndots. Example: 24 ndots = int(sys.argv[2]) #Broadcast path_analysis_pdb_complex_b = sc.broadcast(path_analysis_pdb)", "except: returned_list = (base_name, float(0)) #Deleting files if os.path.exists(f_ndx): os.remove(f_ndx)", "z) and the rest (non chain z) script_make_ndx = SparkFiles.get(\"make_ndx_buried_area_total.sh\")", "file path_file_buried_area = os.path.join(path_analysis, \"summary_buried_areas_total.dat\") save_buried_area(path_file_buried_area, buried_area_sorted_by_buried_total_LIST) #Calculating normalized buried", "= str(normalized_buried_total)+\"\\t\"+str(pose)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def loading_lines_from_area_files(line): line_splited = str(line).split() #line_ret", "' + str(start_time) +'\\n' log_file.write(msg) msg = 'Finishing ' +", "get_name_receptor_pdb(str(pdb_receptor_files[0])) #PDB file loaded into memory is sent by broadcast", "pdb files of models generated by VS path_analysis_pdb = get_directory_pdb_analysis(path_analysis)", "files if os.path.exists(f_ndx): os.remove(f_ndx) if os.path.exists(f_temp_sasa_complex): os.remove(f_temp_sasa_complex) if os.path.exists(f_temp_sasa_rec): os.remove(f_temp_sasa_rec)", "area[0] buried_total = \"{:.4f}\".format(area[1]) #line = receptor+\"\\t\"+ligand+\"\\t\"+model+\"\\t\"+str(buried_lig_rec)+\"\\t\"+str(buried_lig_rec_perc)+\"\\t\"+str(buried_lig_lig_perc)+\"\\n\" line = pose+\"\\t\"+str(buried_total)+\"\\n\"", "files for analysis path_analysis = config.get('DEFAULT', 'path_analysis') #Ligand Database file", "maxResultSize = str(config.get('SPARK', 'maxResultSize')) conf = (SparkConf().set(\"spark.driver.maxResultSize\", maxResultSize)) # Create", "base_file_name_receptor+\"_-_\" all_model_for_complex = get_files_pdb_filter(path_analysis_pdb,base_file_name_receptor_for_filter) all_model_for_complexRDD = sc.parallelize(all_model_for_complex) all_model_filesRDD = all_model_for_complexRDD.map(loading_pdb_2_list).collect()", "\" -n \" + f_ndx + \" -surface System \"", "Z chain for item in model_file: item = replace_chain_atom_line(item,\"d\",\"z\") f_compl.write(item)", "\"+ f_ndx process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr =", "pdb_complex + \" \"+ f_ndx process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE)", "'Starting ' + str(start_time) +'\\n' log_file.write(msg) msg = 'Finishing '", "SQLContext(sc) #Adding Python Source file #Path for drugdesign project path_spark_drugdesign", "#Loading database rdd_database = load_database(sc, ligand_database) #Creating Dataframe database_table =", "file in files: if file.endswith(\".area\"): f_path = os.path.join(root,file) only_mol2_file.append(f_path) return", "int(sys.argv[2]) #Broadcast path_analysis_pdb_complex_b = sc.broadcast(path_analysis_pdb) gromacs_path = sc.broadcast(gromacs_path) pdb_ligand_path =", "\"{:.4f}\".format(area[5]) #line = receptor+\"\\t\"+ligand+\"\\t\"+str(model)+\"\\t\"+str(buried_lig_rec)+\"\\t\"+str(buried_lig_rec_perc)+\"\\t\"+str(buried_lig_lig_perc)+\"\\n\" line = str(buried_total)+\"\\t\"+str(pose)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def", "# ********** Finish function ********************************************************** all_model_filesRDD = sc.parallelize(all_model_filesRDD) all_model_filesRDD =", "#line = receptor+\"\\t\"+ligand+\"\\t\"+model+\"\\t\"+str(buried_lig_rec)+\"\\t\"+str(buried_lig_rec_perc)+\"\\t\"+str(buried_lig_lig_perc)+\"\\n\" line = pose+\"\\t\"+str(buried_total)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def save_buried_area(path_file_buried_area,", "sc.addPyFile(os.path.join(path_spark_drugdesign,\"os_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"gromacs_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"pdb_io.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"database_io.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"json_utils.py\")) #Adding bash scripts sc.addFile(os.path.join(path_spark_drugdesign,\"make_ndx_buried_area_total.sh\")) sc.addFile(os.path.join(path_spark_drugdesign,\"make_sasa_rec_buried_area_total.sh\"))", "********** Starting function ********************************************************** def compute_buried_area(pdb_complex): chZ = \"chZ\" sasa_complex", "because resultaed file will be created based on this sorting", "files based on receptor into memory base_file_name_receptor_for_filter = base_file_name_receptor+\"_-_\" all_model_for_complex", "be created based on this sorting returned_list = (base_name, buried_total)", "buried_total = -1.0 returned_list = [] try: base_name = get_name_model_pdb(pdb_complex)", "process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() sasa_complex", "= splited_line[0] #aux_lig = str(splited_line[1]) #preparing receptor #receptor = str(str(aux_recep).replace(\"compl_\",", "sys from os_utils import preparing_path from gromacs_utils import get_value_from_xvg_sasa from", "path_analysis_pdb = get_directory_pdb_analysis(path_analysis) # Create SPARK config maxResultSize = str(config.get('SPARK',", "buried_lig_rec=float(p[3]), buried_lig_rec_perc=float(p[4]), buried_lig_lig_perc=float(p[5]) ) ) buried_areaRDD = buried_areaRDD.map(lambda p: Row(pose=str(p[0]),", "#buried_lig_lig_perc return buried_area_sorted_by_buried_total def save_receptor_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc): f_buried_area = open(path_file_buried_area,\"w\") for", "based on receptor into memory base_file_name_receptor_for_filter = base_file_name_receptor+\"_-_\" all_model_for_complex =", "path_analysis_pdb_complex_b.value #Obtained from broadcast #Building complex file based on model", "= os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_lig.xvg\") # Makes the index file with the ligand", "p.buried_total) ).collect() #Saving buried area file path_file_buried_area = os.path.join(path_analysis, \"summary_buried_areas_total.dat\")", "#Insert lines of receptor for item in receptor_file: f_compl.write(item) #Insert", "model[0] model_file = model[1] path_pdb_complex = path_analysis_pdb_complex_b.value #Obtained from broadcast", "os, sys from os_utils import preparing_path from gromacs_utils import get_value_from_xvg_sasa", "function ********************************************************** def save_model_receptor(list_receptor_model_file): receptor_file = pdb_file_receptor.value #Obtained from broadcast", "by broadcast pdb_file_receptor = pdb_receptor_files[1] pdb_file_receptor = sc.broadcast(pdb_file_receptor) #Loading PDB", "function ********************************************************** def compute_buried_area(pdb_complex): chZ = \"chZ\" sasa_complex = -1.0", "def main(): config = configparser.ConfigParser() config.read('config.ini') #Path for Gromacs project", "receptor_file = pdb_file_receptor.value #Obtained from broadcast model_file = list_receptor_model_file[0] full_path_for_save_complex", "model[1] path_pdb_complex = path_analysis_pdb_complex_b.value #Obtained from broadcast #Building complex file", "buried_areaRDD) buried_area_sorted_by_buried_total.cache() buried_area_sorted_by_buried_total_LIST = buried_area_sorted_by_buried_total.map(lambda p: (p.pose, p.buried_total) ).collect() #Saving", "into memory list_all_pdb_receptor_files_path = [] all_receptor_for_complex = get_files_pdb(path_receptor_pdb) for receptor", "b ON b.ligand = a.ligand ORDER BY normalized_buried_area DESC \"\"\"", "save_receptor_buried_area(full_area_file, all_model_filesRDD) #Loading all area file all_area_file = os.path.join(path_analysis,\"*.area\") buried_areaRDD", "full_path_model = model[0] model_file = model[1] path_pdb_complex = path_analysis_pdb_complex_b.value #Obtained", "the index file with the ligand (chain z) and the", "\" -nopbc \" + \" -n \" + f_ndx +", "= \"{:.4f}\".format(area[4]) #buried_lig_lig_perc = \"{:.4f}\".format(area[5]) #line = receptor+\"\\t\"+ligand+\"\\t\"+str(model)+\"\\t\"+str(buried_lig_rec)+\"\\t\"+str(buried_lig_rec_perc)+\"\\t\"+str(buried_lig_lig_perc)+\"\\n\" line =", "sc.addFile(os.path.join(path_spark_drugdesign,\"make_sasa_rec_buried_area_total.sh\")) #Parameters form command line #Indicates probe. Example: 0.14 probe", "import os, sys from os_utils import preparing_path from gromacs_utils import", "'path_analysis') #Ligand Database file ligand_database = config.get('DEFAULT', 'ligand_database_path_file') #Path where", "process.communicate() sasa_complex = get_value_from_xvg_sasa(f_temp_sasa_complex) sasa_rec = get_value_from_xvg_sasa(f_temp_sasa_rec) sasa_lig = get_value_from_xvg_sasa(f_temp_sasa_lig)", "file.endswith(\".area\"): f_path = os.path.join(root,file) only_mol2_file.append(f_path) return only_mol2_file def save_log(finish_time, start_time):", "file path_file_buried_area = os.path.join(path_analysis, \"summary_normalized_buried_areas.dat\") save_normalized_buried_area(path_file_buried_area, full_dataRDD) #Removing all area", "buried_areaRDD.map(lambda p: Row(receptor=str(p[0]), ligand=str(p[1]), model=int(p[2]), buried_lig_rec=float(p[3]), buried_lig_rec_perc=float(p[4]), buried_lig_lig_perc=float(p[5]) ) )", "#Ligand Database file ligand_database = config.get('DEFAULT', 'ligand_database_path_file') #Path where all", "from os_utils import preparing_path from gromacs_utils import get_value_from_xvg_sasa from pdb_io", "gromacs_path = sc.broadcast(gromacs_path) pdb_ligand_path = sc.broadcast(pdb_ligand_path) probe = sc.broadcast(probe) ndots", "file with the ligand (chain z) and the rest (non", "in list_all_pdb_receptor_files_path: #Getting receptor name by fully path base_file_name_receptor =", "Finish function ********************************************************** # ********** Starting function ********************************************************** def save_model_receptor(list_receptor_model_file):", "the complex f_compl = open(full_path_for_save_complex, \"w\") #Insert lines of receptor", "line = \"# normalized_buried_area_total[nm2]\\tpose\"+\"\\n\" f_buried_area.write(line) for area in full_dataRDD.collect(): pose", "command command = script_make_ndx + \" \" + gromacs_path.value +", "= os.path.join(path_analysis, \"summary_normalized_buried_areas.dat\") save_normalized_buried_area(path_file_buried_area, full_dataRDD) #Removing all area files all_area_files", "#Removing all area files all_area_files = get_files_area(path_analysis) for area_file in", "if file.endswith(\".area\"): f_path = os.path.join(root,file) only_mol2_file.append(f_path) return only_mol2_file def save_log(finish_time,", "+ \" \"+ pdb_complex + \" \"+ f_ndx process =", "str(line_splited[0]), str(line_splited[1]), int(line_splited[2]), float(line_splited[3]), float(line_splited[4]), float(line_splited[5]) ) line_ret = (", "= sc.broadcast(ndots) start_time = datetime.now() os.environ[\"GMX_MAXBACKUP\"]=\"-1\" #Loading all PDB receptor", "all_receptor_for_complex = get_files_pdb(path_receptor_pdb) for receptor in all_receptor_for_complex: list_all_pdb_receptor_files_path.append(loading_pdb_2_list(receptor)) #Computing Buried", "lines of model and insert Z chain for item in", "import SparkContext, SparkConf, SparkFiles from pyspark.sql import SQLContext, Row import", "sasa_rec = get_value_from_xvg_sasa(f_temp_sasa_rec) sasa_lig = get_value_from_xvg_sasa(f_temp_sasa_lig) buried_total = sasa_rec +", "buried_areaRDD.map(lambda p: Row(pose=str(p[0]), buried_total=float(p[1]) ) ) buried_area_table = sqlCtx.createDataFrame(buried_areaRDD) buried_area_table.registerTempTable(\"buried_area\")", "files into memory list_all_pdb_receptor_files_path = [] all_receptor_for_complex = get_files_pdb(path_receptor_pdb) for", "+\"gmx sasa -f \" + pdb_complex + \" -s \"", "\" + pdb_complex + \" -s \" + pdb_complex +", "PIPE from datetime import datetime from vina_utils import get_directory_complex_pdb_analysis, get_files_pdb,", "[] for root, dirs, files in os.walk(mypath): for file in", "= -1.0 returned_list = [] try: base_name = get_name_model_pdb(pdb_complex) ligand_name", "= open(path_file_buried_area,\"w\") line = \"# buried_area_total[nm2]\\tpose\"+\"\\n\" f_buried_area.write(line) for area in", "f_temp_sasa_lig = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_lig.xvg\") # Makes the index file with the", "= Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() sasa_complex =", "= model[1] path_pdb_complex = path_analysis_pdb_complex_b.value #Obtained from broadcast #Building complex", "get_name_model_pdb(pdb_complex) ligand_name = get_ligand_from_receptor_ligand_model(base_name) f_pdb_ligand_no_docking = os.path.join(pdb_ligand_path.value,ligand_name+\".pdb\") f_ndx = os.path.join(path_analysis_pdb_complex_b.value,base_name+\".ndx\")", "full_dataRDD): f_buried_area = open(path_file_buried_area,\"w\") line = \"# normalized_buried_area_total[nm2]\\tpose\"+\"\\n\" f_buried_area.write(line) for", "path_pdb_complex = path_analysis_pdb_complex_b.value #Obtained from broadcast #Building complex file based", "\"\"\" SELECT pose, (b.buried_total / a.heavyAtom) as normalized_buried_area FROM database", "line = str(normalized_buried_total)+\"\\t\"+str(pose)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def loading_lines_from_area_files(line): line_splited = str(line).split()", "f_buried_area.close() def save_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc): f_buried_area = open(path_file_buried_area,\"w\") line = \"#", "Row(buried_total=int(p.buried_total), ligand=get_ligand_from_receptor_ligand_model(p.pose), pose=str(p.pose) ) ).collect() number_pose_ligand_table = sqlCtx.createDataFrame(number_pose_ligandRDD) number_pose_ligand_table.registerTempTable(\"buried_area_total_sort\") sql", "into memory is sent by broadcast pdb_file_receptor = pdb_receptor_files[1] pdb_file_receptor", "-1.0 sasa_lig = -1.0 buried_total = -1.0 returned_list = []", "sasa_rec = -1.0 sasa_lig = -1.0 buried_total = -1.0 returned_list", "sorting returned_list = (base_name, buried_total) except: returned_list = (base_name, float(0))", "complex f_compl = open(full_path_for_save_complex, \"w\") #Insert lines of receptor for", "FROM database a JOIN buried_area_total_sort b ON b.ligand = a.ligand", "process.communicate() # Makes f_temp_sasa_rec file script_make_sasa_rec = SparkFiles.get(\"make_sasa_rec_buried_area_total.sh\") #Getting bash", "#receptor = str(str(aux_recep).replace(\"compl_\", \" \")).strip() #preparing ligand #splited_aux_lig = str(aux_lig).split(get_separator_filename_mode())", "config maxResultSize = str(config.get('SPARK', 'maxResultSize')) conf = (SparkConf().set(\"spark.driver.maxResultSize\", maxResultSize)) #", "NOT participated in docking pdb_ligand_path = config.get('DEFAULT', 'pdb_ligand_path') #Path that", ").collect() number_pose_ligand_table = sqlCtx.createDataFrame(number_pose_ligandRDD) number_pose_ligand_table.registerTempTable(\"buried_area_total_sort\") sql = \"\"\" SELECT pose,", "file based on model file name base_name_model = get_name_model_pdb(full_path_model) complex_name", "name base_name_model = get_name_model_pdb(full_path_model) complex_name = \"compl_\"+base_name_model+\".pdb\" full_path_for_save_complex = os.path.join(path_pdb_complex,complex_name)", "path_file_buried_area = os.path.join(path_analysis, \"summary_buried_areas_total.dat\") save_buried_area(path_file_buried_area, buried_area_sorted_by_buried_total_LIST) #Calculating normalized buried area", "from broadcast model_file = list_receptor_model_file[0] full_path_for_save_complex = list_receptor_model_file[1] #Open file", "+ \" -s \" + pdb_complex + \" -nopbc \"", "index file with the ligand (chain z) and the rest", "\"summary_buried_areas_total.dat\") save_buried_area(path_file_buried_area, buried_area_sorted_by_buried_total_LIST) #Calculating normalized buried area #Loading database rdd_database", "pyspark import SparkContext, SparkConf, SparkFiles from pyspark.sql import SQLContext, Row", "= int(sys.argv[2]) #Broadcast path_analysis_pdb_complex_b = sc.broadcast(path_analysis_pdb) gromacs_path = sc.broadcast(gromacs_path) pdb_ligand_path", "str(buried_total)+\"\\t\"+str(pose)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def save_normalized_buried_area(path_file_buried_area, full_dataRDD): f_buried_area = open(path_file_buried_area,\"w\") line", "msg = 'Starting ' + str(start_time) +'\\n' log_file.write(msg) msg =", "analysis path_analysis = config.get('DEFAULT', 'path_analysis') #Ligand Database file ligand_database =", "file loaded into memory is sent by broadcast pdb_file_receptor =", "script_make_ndx = SparkFiles.get(\"make_ndx_buried_area_total.sh\") #Getting bash script that was copied by", "= str(splited_line[1]) #preparing receptor #receptor = str(str(aux_recep).replace(\"compl_\", \" \")).strip() #preparing", "-surface System \" + \" -output System \"+ \" -xvg", "buried_area_sorted_by_buried_total_LIST) #Calculating normalized buried area #Loading database rdd_database = load_database(sc,", "# Makes the index file with the ligand (chain z)", "os.path.join(current_path, log_file_name) log_file = open(path_file, 'w') diff_time = finish_time -", "broadcast pdb_file_receptor = pdb_receptor_files[1] pdb_file_receptor = sc.broadcast(pdb_file_receptor) #Loading PDB model", "configparser.ConfigParser() config.read('config.ini') #Path for Gromacs project gromacs_path = preparing_path(config.get('DRUGDESIGN', 'gromacs_path'))", "line #Indicates probe. Example: 0.14 probe = float(sys.argv[1]) #Indicates ndots.", "only_mol2_file = [] for root, dirs, files in os.walk(mypath): for", "all_receptor_for_complex: list_all_pdb_receptor_files_path.append(loading_pdb_2_list(receptor)) #Computing Buried areas for pdb_receptor_files in list_all_pdb_receptor_files_path: #Getting", "receptor are path_receptor_pdb = config.get('DEFAULT', 'pdb_path') #Path for saving pdb", ") buried_areaRDD = buried_areaRDD.map(lambda p: Row(pose=str(p[0]), buried_total=float(p[1]) ) ) buried_area_table", "Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() # Makes f_temp_sasa_rec", "\" -o \" + f_temp_sasa_lig process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE)", "area in buried_area_sorted_by_lig_rec_perc: #splited_line = area[0].split(\"_-_\") #aux_recep = splited_line[0] #aux_lig", "SELECT pose, (b.buried_total / a.heavyAtom) as normalized_buried_area FROM database a", "area files all_area_files = get_files_area(path_analysis) for area_file in all_area_files: os.remove(area_file)", "sc.addPyFile(os.path.join(path_spark_drugdesign,\"json_utils.py\")) #Adding bash scripts sc.addFile(os.path.join(path_spark_drugdesign,\"make_ndx_buried_area_total.sh\")) sc.addFile(os.path.join(path_spark_drugdesign,\"make_sasa_rec_buried_area_total.sh\")) #Parameters form command line", "#Parameters form command line #Indicates probe. Example: 0.14 probe =", "os.environ[\"GMX_MAXBACKUP\"]=\"-1\" #Loading all PDB receptor files into memory list_all_pdb_receptor_files_path =", "line_ret = ( str(line_splited[0]), float(line_splited[1]) ) return line_ret def get_files_area(mypath):", "files of models generated by VS path_analysis_pdb = get_directory_pdb_analysis(path_analysis) #", "splited_aux_lig[0] #model = splited_aux_lig[1] pose = area[0] buried_total = \"{:.4f}\".format(area[1])", "\" -xvg none \" + \" -o \" + f_temp_sasa_complex", "finish_time - start_time msg = 'Starting ' + str(start_time) +'\\n'", "will be created based on this sorting returned_list = (base_name,", "save_normalized_buried_area(path_file_buried_area, full_dataRDD): f_buried_area = open(path_file_buried_area,\"w\") line = \"# normalized_buried_area_total[nm2]\\tpose\"+\"\\n\" f_buried_area.write(line)", "= [] for root, dirs, files in os.walk(mypath): for file", "#Obtained from broadcast #Building complex file based on model file", "System \" + \" -output System \"+ \" -xvg none", "sc.broadcast(path_analysis_pdb) gromacs_path = sc.broadcast(gromacs_path) pdb_ligand_path = sc.broadcast(pdb_ligand_path) probe = sc.broadcast(probe)", "f_buried_area = open(path_file_buried_area,\"w\") for area in buried_area_sorted_by_lig_rec_perc: #splited_line = area[0].split(\"_-_\")", "DESC\") #buried_lig_lig_perc return buried_area_sorted_by_buried_total def save_receptor_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc): f_buried_area = open(path_file_buried_area,\"w\")", "#preparing ligand #splited_aux_lig = str(aux_lig).split(get_separator_filename_mode()) #ligand = splited_aux_lig[0] #model =", "buried_area_sorted_by_lig_rec_perc: #receptor = area[0] #ligand = area[1] #model = area[2]", "f_path = os.path.join(root,file) only_mol2_file.append(f_path) return only_mol2_file def save_log(finish_time, start_time): log_file_name", "'w') diff_time = finish_time - start_time msg = 'Starting '", "= sc.broadcast(probe) ndots = sc.broadcast(ndots) start_time = datetime.now() os.environ[\"GMX_MAXBACKUP\"]=\"-1\" #Loading", "all area file all_area_file = os.path.join(path_analysis,\"*.area\") buried_areaRDD = sc.textFile(all_area_file).map(loading_lines_from_area_files).collect() #Sorting", "'pdb_path') #Path for saving pdb files of models generated by", "msg = 'Finishing ' + str(finish_time) +'\\n' log_file.write(msg) msg =", "for item in receptor_file: f_compl.write(item) #Insert lines of model and", "= \"{:.4f}\".format(area[1]) #line = receptor+\"\\t\"+ligand+\"\\t\"+model+\"\\t\"+str(buried_lig_rec)+\"\\t\"+str(buried_lig_rec_perc)+\"\\t\"+str(buried_lig_lig_perc)+\"\\n\" line = pose+\"\\t\"+str(buried_total)+\"\\n\" f_buried_area.write(line) f_buried_area.close()", "line = str(buried_total)+\"\\t\"+str(pose)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def save_normalized_buried_area(path_file_buried_area, full_dataRDD): f_buried_area =", "ligand_name = get_ligand_from_receptor_ligand_model(base_name) f_pdb_ligand_no_docking = os.path.join(pdb_ligand_path.value,ligand_name+\".pdb\") f_ndx = os.path.join(path_analysis_pdb_complex_b.value,base_name+\".ndx\") f_temp_sasa_complex", "'vs_buried_areas.log' current_path = os.getcwd() path_file = os.path.join(current_path, log_file_name) log_file =", "get_files_pdb(path_receptor_pdb) for receptor in all_receptor_for_complex: list_all_pdb_receptor_files_path.append(loading_pdb_2_list(receptor)) #Computing Buried areas for", "from gromacs_utils import get_value_from_xvg_sasa from pdb_io import replace_chain_atom_line from database_io", "open(path_file_buried_area,\"w\") line = \"# normalized_buried_area_total[nm2]\\tpose\"+\"\\n\" f_buried_area.write(line) for area in full_dataRDD.collect():", "ligand are - They are NOT participated in docking pdb_ligand_path", "= list_receptor_model_file[0] full_path_for_save_complex = list_receptor_model_file[1] #Open file for writting the", "areas for pdb_receptor_files in list_all_pdb_receptor_files_path: #Getting receptor name by fully", "os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_lig.xvg\") # Makes the index file with the ligand (chain", "pose+\"\\t\"+str(buried_total)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def save_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc): f_buried_area = open(path_file_buried_area,\"w\") line", "probe = sc.broadcast(probe) ndots = sc.broadcast(ndots) start_time = datetime.now() os.environ[\"GMX_MAXBACKUP\"]=\"-1\"", "get_name_model_pdb(full_path_model) complex_name = \"compl_\"+base_name_model+\".pdb\" full_path_for_save_complex = os.path.join(path_pdb_complex,complex_name) list_receptor_model_file = (model_file,", "= open(path_file, 'w') diff_time = finish_time - start_time msg =", "area file path_file_buried_area = os.path.join(path_analysis, \"summary_buried_areas_total.dat\") save_buried_area(path_file_buried_area, buried_area_sorted_by_buried_total_LIST) #Calculating normalized", "all_model_filesRDD = sc.parallelize(all_model_filesRDD) all_model_filesRDD = all_model_filesRDD.map(build_list_model_for_complex).collect() #Saving buried area of", "get_files_area(path_analysis) for area_file in all_area_files: os.remove(area_file) finish_time = datetime.now() save_log(finish_time,", "for saving pdb files of models generated by VS path_analysis_pdb", "f_compl.close() # ********** Finish function ********************************************************** # ********** Starting function", "for root, dirs, files in os.walk(mypath): for file in files:", "receptor #receptor = str(str(aux_recep).replace(\"compl_\", \" \")).strip() #preparing ligand #splited_aux_lig =", "= preparing_path(config.get('DRUGDESIGN', 'gromacs_path')) #Path where PDB ligand are - They", "\" + \" -n \" + f_ndx + \" -surface", "are path_receptor_pdb = config.get('DEFAULT', 'pdb_path') #Path for saving pdb files", "JOIN buried_area_total_sort b ON b.ligand = a.ligand ORDER BY normalized_buried_area", "= \"{:.4f}\".format(area[5]) #line = receptor+\"\\t\"+ligand+\"\\t\"+str(model)+\"\\t\"+str(buried_lig_rec)+\"\\t\"+str(buried_lig_rec_perc)+\"\\t\"+str(buried_lig_lig_perc)+\"\\n\" line = str(buried_total)+\"\\t\"+str(pose)+\"\\n\" f_buried_area.write(line) f_buried_area.close()", "= str(buried_total)+\"\\t\"+str(pose)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def save_normalized_buried_area(path_file_buried_area, full_dataRDD): f_buried_area = open(path_file_buried_area,\"w\")", "pose, (b.buried_total / a.heavyAtom) as normalized_buried_area FROM database a JOIN", "data full_dataRDD = sqlCtx.sql(sql) #Saving normalized buried area file path_file_buried_area", "SQLContext, Row import ConfigParser as configparser from subprocess import Popen,", "preparing_path from gromacs_utils import get_value_from_xvg_sasa from pdb_io import replace_chain_atom_line from", "import get_value_from_xvg_sasa from pdb_io import replace_chain_atom_line from database_io import load_database", "= os.path.join(pdb_ligand_path.value,ligand_name+\".pdb\") f_ndx = os.path.join(path_analysis_pdb_complex_b.value,base_name+\".ndx\") f_temp_sasa_complex = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_complex.xvg\") f_temp_sasa_rec =", "load_database(sc, ligand_database) #Creating Dataframe database_table = sqlCtx.createDataFrame(rdd_database) database_table.registerTempTable(\"database\") number_pose_ligandRDD =", "SparkContext(conf=conf) sqlCtx = SQLContext(sc) #Adding Python Source file #Path for", "contains all files for analysis path_analysis = config.get('DEFAULT', 'path_analysis') #Ligand", "+ str(start_time) +'\\n' log_file.write(msg) msg = 'Finishing ' + str(finish_time)", "BY normalized_buried_area DESC \"\"\" #Getting all data full_dataRDD = sqlCtx.sql(sql)", "save_model_receptor(list_receptor_model_file) list_ret = compute_buried_area(full_path_for_save_complex) os.remove(full_path_for_save_complex) return list_ret # ********** Finish", "********** Starting function ********************************************************** def build_list_model_for_complex(model): full_path_model = model[0] model_file", "sasa_lig - sasa_complex #Generating result - See column sorting because", "#Indicates probe. Example: 0.14 probe = float(sys.argv[1]) #Indicates ndots. Example:", "= SQLContext(sc) #Adding Python Source file #Path for drugdesign project", "sorting_buried_area(sc, buried_areaRDD) buried_area_sorted_by_buried_total.cache() buried_area_sorted_by_buried_total_LIST = buried_area_sorted_by_buried_total.map(lambda p: (p.pose, p.buried_total) ).collect()", "'pdb_ligand_path') #Path that contains all files for analysis path_analysis =", "SparkContext, SparkConf, SparkFiles from pyspark.sql import SQLContext, Row import ConfigParser", "def save_log(finish_time, start_time): log_file_name = 'vs_buried_areas.log' current_path = os.getcwd() path_file", "in full_dataRDD.collect(): pose = str(str(area[0]).replace(\"compl_\", \" \")).strip() normalized_buried_total = \"{:.4f}\".format(area[1])", "from pyspark.sql import SQLContext, Row import ConfigParser as configparser from", "str(config.get('SPARK', 'maxResultSize')) conf = (SparkConf().set(\"spark.driver.maxResultSize\", maxResultSize)) # Create context sc", "= buried_area_sorted_by_buried_total.map(lambda p: Row(buried_total=int(p.buried_total), ligand=get_ligand_from_receptor_ligand_model(p.pose), pose=str(p.pose) ) ).collect() number_pose_ligand_table =", "complex_name = \"compl_\"+base_name_model+\".pdb\" full_path_for_save_complex = os.path.join(path_pdb_complex,complex_name) list_receptor_model_file = (model_file, full_path_for_save_complex)", "- sasa_complex #Generating result - See column sorting because resultaed", "= -1.0 sasa_rec = -1.0 sasa_lig = -1.0 buried_total =", "buried_area_table = sqlCtx.createDataFrame(buried_areaRDD) buried_area_table.registerTempTable(\"buried_area\") buried_area_sorted_by_buried_total = sqlCtx.sql(\"SELECT * FROM buried_area", "stdout, stderr = process.communicate() # Makes f_temp_sasa_rec file script_make_sasa_rec =", "get_ligand_from_receptor_ligand_model(base_name) f_pdb_ligand_no_docking = os.path.join(pdb_ligand_path.value,ligand_name+\".pdb\") f_ndx = os.path.join(path_analysis_pdb_complex_b.value,base_name+\".ndx\") f_temp_sasa_complex = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_complex.xvg\")", "24 ndots = int(sys.argv[2]) #Broadcast path_analysis_pdb_complex_b = sc.broadcast(path_analysis_pdb) gromacs_path =", "full_area_file = os.path.join(path_analysis,base_file_name_receptor+\".area\") save_receptor_buried_area(full_area_file, all_model_filesRDD) #Loading all area file all_area_file", "Makes the index file with the ligand (chain z) and", "log_file_name) log_file = open(path_file, 'w') diff_time = finish_time - start_time", "f_ndx + \" -surface System \" + \" -output System", "= sc.broadcast(path_analysis_pdb) gromacs_path = sc.broadcast(gromacs_path) pdb_ligand_path = sc.broadcast(pdb_ligand_path) probe =", "get_files_pdb, get_name_model_pdb, get_ligand_from_receptor_ligand_model, get_separator_filename_mode, get_directory_pdb_analysis, loading_pdb_2_list, get_name_receptor_pdb, get_files_pdb_filter import os,", "get_directory_pdb_analysis(path_analysis) # Create SPARK config maxResultSize = str(config.get('SPARK', 'maxResultSize')) conf", "os.remove(f_temp_sasa_rec) if os.path.exists(f_temp_sasa_lig): os.remove(f_temp_sasa_lig) return returned_list # ********** Finish function", "\"{:.4f}\".format(area[1]) #buried_lig_rec_perc = \"{:.4f}\".format(area[4]) #buried_lig_lig_perc = \"{:.4f}\".format(area[5]) #line = receptor+\"\\t\"+ligand+\"\\t\"+str(model)+\"\\t\"+str(buried_lig_rec)+\"\\t\"+str(buried_lig_rec_perc)+\"\\t\"+str(buried_lig_lig_perc)+\"\\n\"", "os.remove(f_temp_sasa_lig) return returned_list # ********** Finish function ********************************************************** # **********", "for drugdesign project path_spark_drugdesign = config.get('DRUGDESIGN', 'path_spark_drugdesign') sc.addPyFile(os.path.join(path_spark_drugdesign,\"vina_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"os_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"gromacs_utils.py\"))", "= str(aux_lig).split(get_separator_filename_mode()) #ligand = splited_aux_lig[0] #model = splited_aux_lig[1] pose =", "was copied by addFile command command = script_make_ndx + \"", "item in model_file: item = replace_chain_atom_line(item,\"d\",\"z\") f_compl.write(item) f_compl.close() # **********", "a JOIN buried_area_total_sort b ON b.ligand = a.ligand ORDER BY", "\" \")).strip() #preparing ligand #splited_aux_lig = str(aux_lig).split(get_separator_filename_mode()) #ligand = splited_aux_lig[0]", "normalized_buried_area DESC \"\"\" #Getting all data full_dataRDD = sqlCtx.sql(sql) #Saving", "buried_areaRDD = sc.parallelize(buried_areaRDD) #buried_areaRDD = buried_areaRDD.map(lambda p: Row(receptor=str(p[0]), ligand=str(p[1]), model=int(p[2]),", "#Getting all data full_dataRDD = sqlCtx.sql(sql) #Saving normalized buried area", "********** Finish function ********************************************************** all_model_filesRDD = sc.parallelize(all_model_filesRDD) all_model_filesRDD = all_model_filesRDD.map(build_list_model_for_complex).collect()", "current_path = os.getcwd() path_file = os.path.join(current_path, log_file_name) log_file = open(path_file,", "+ f_temp_sasa_lig process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr =", "Source file #Path for drugdesign project path_spark_drugdesign = config.get('DRUGDESIGN', 'path_spark_drugdesign')", "#model = area[2] pose = str(str(area[0]).replace(\"compl_\", \" \")).strip() buried_total =", "= list_receptor_model_file[1] #Open file for writting the complex f_compl =", "= SQLContext(sc) buried_areaRDD = sc.parallelize(buried_areaRDD) #buried_areaRDD = buried_areaRDD.map(lambda p: Row(receptor=str(p[0]),", "# ********** Starting function ********************************************************** def compute_buried_area(pdb_complex): chZ = \"chZ\"", "get_files_pdb_filter import os, sys from os_utils import preparing_path from gromacs_utils", "os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_rec.xvg\") f_temp_sasa_lig = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_lig.xvg\") # Makes the index file with", "+ sasa_lig - sasa_complex #Generating result - See column sorting", "#Path where PDB ligand are - They are NOT participated", "= \"{:.4f}\".format(area[1]) line = str(normalized_buried_total)+\"\\t\"+str(pose)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def loading_lines_from_area_files(line): line_splited", "pdb_file_receptor = sc.broadcast(pdb_file_receptor) #Loading PDB model files based on receptor", "resultaed file will be created based on this sorting returned_list", "list_ret = compute_buried_area(full_path_for_save_complex) os.remove(full_path_for_save_complex) return list_ret # ********** Finish function", "Example: 0.14 probe = float(sys.argv[1]) #Indicates ndots. Example: 24 ndots", "pyspark.sql import SQLContext, Row import ConfigParser as configparser from subprocess", "save_normalized_buried_area(path_file_buried_area, full_dataRDD) #Removing all area files all_area_files = get_files_area(path_analysis) for", "of models generated by VS path_analysis_pdb = get_directory_pdb_analysis(path_analysis) # Create", "= get_files_pdb(path_receptor_pdb) for receptor in all_receptor_for_complex: list_all_pdb_receptor_files_path.append(loading_pdb_2_list(receptor)) #Computing Buried areas", "= config.get('DEFAULT', 'pdb_path') #Path for saving pdb files of models", "of model and insert Z chain for item in model_file:", "[] try: base_name = get_name_model_pdb(pdb_complex) ligand_name = get_ligand_from_receptor_ligand_model(base_name) f_pdb_ligand_no_docking =", "buried_areaRDD = buried_areaRDD.map(lambda p: Row(pose=str(p[0]), buried_total=float(p[1]) ) ) buried_area_table =", "-n \" + f_ndx + \" -surface chZ \" +", "script that was copied by addFile command command = script_make_ndx", "all_model_filesRDD) #Loading all area file all_area_file = os.path.join(path_analysis,\"*.area\") buried_areaRDD =", "#Saving buried area of receptor full_area_file = os.path.join(path_analysis,base_file_name_receptor+\".area\") save_receptor_buried_area(full_area_file, all_model_filesRDD)", "and the rest (non chain z) script_make_ndx = SparkFiles.get(\"make_ndx_buried_area_total.sh\") #Getting", ") buried_area_table = sqlCtx.createDataFrame(buried_areaRDD) buried_area_table.registerTempTable(\"buried_area\") buried_area_sorted_by_buried_total = sqlCtx.sql(\"SELECT * FROM", "ConfigParser as configparser from subprocess import Popen, PIPE from datetime", "open(path_file_buried_area,\"w\") line = \"# buried_area_total[nm2]\\tpose\"+\"\\n\" f_buried_area.write(line) for area in buried_area_sorted_by_lig_rec_perc:", "buried_area ORDER BY buried_total DESC\") #buried_lig_lig_perc return buried_area_sorted_by_buried_total def save_receptor_buried_area(path_file_buried_area,", "with the ligand (chain z) and the rest (non chain", "column sorting because resultaed file will be created based on", "= model[0] model_file = model[1] path_pdb_complex = path_analysis_pdb_complex_b.value #Obtained from", "file all_area_file = os.path.join(path_analysis,\"*.area\") buried_areaRDD = sc.textFile(all_area_file).map(loading_lines_from_area_files).collect() #Sorting by buried_total", "#line_ret = ( str(line_splited[0]), str(line_splited[1]), int(line_splited[2]), float(line_splited[3]), float(line_splited[4]), float(line_splited[5]) )", "sc.addPyFile(os.path.join(path_spark_drugdesign,\"gromacs_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"pdb_io.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"database_io.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"json_utils.py\")) #Adding bash scripts sc.addFile(os.path.join(path_spark_drugdesign,\"make_ndx_buried_area_total.sh\")) sc.addFile(os.path.join(path_spark_drugdesign,\"make_sasa_rec_buried_area_total.sh\")) #Parameters", "the ligand (chain z) and the rest (non chain z)", "float(line_splited[3]), float(line_splited[4]), float(line_splited[5]) ) line_ret = ( str(line_splited[0]), float(line_splited[1]) )", "-s \" + pdb_complex + \" -nopbc \" + \"", "datetime from vina_utils import get_directory_complex_pdb_analysis, get_files_pdb, get_name_model_pdb, get_ligand_from_receptor_ligand_model, get_separator_filename_mode, get_directory_pdb_analysis,", "on model file name base_name_model = get_name_model_pdb(full_path_model) complex_name = \"compl_\"+base_name_model+\".pdb\"", "= get_name_receptor_pdb(str(pdb_receptor_files[0])) #PDB file loaded into memory is sent by", "= get_ligand_from_receptor_ligand_model(base_name) f_pdb_ligand_no_docking = os.path.join(pdb_ligand_path.value,ligand_name+\".pdb\") f_ndx = os.path.join(path_analysis_pdb_complex_b.value,base_name+\".ndx\") f_temp_sasa_complex =", "for writting the complex f_compl = open(full_path_for_save_complex, \"w\") #Insert lines", "\"+ pdb_complex + \" \"+ f_ndx process = Popen(command,shell=True, stdout=PIPE,", "os.path.join(root,file) only_mol2_file.append(f_path) return only_mol2_file def save_log(finish_time, start_time): log_file_name = 'vs_buried_areas.log'", "command = gromacs_path.value +\"gmx sasa -f \" + pdb_complex +", "\" -output System \"+ \" -xvg none \" + \"", "log_file = open(path_file, 'w') diff_time = finish_time - start_time msg", "participated in docking pdb_ligand_path = config.get('DEFAULT', 'pdb_ligand_path') #Path that contains", "path_file_buried_area = os.path.join(path_analysis, \"summary_normalized_buried_areas.dat\") save_normalized_buried_area(path_file_buried_area, full_dataRDD) #Removing all area files", "= base_file_name_receptor+\"_-_\" all_model_for_complex = get_files_pdb_filter(path_analysis_pdb,base_file_name_receptor_for_filter) all_model_for_complexRDD = sc.parallelize(all_model_for_complex) all_model_filesRDD =", "bash script that was copied by addFile command command =", "column buried_area_sorted_by_buried_total = sorting_buried_area(sc, buried_areaRDD) buried_area_sorted_by_buried_total.cache() buried_area_sorted_by_buried_total_LIST = buried_area_sorted_by_buried_total.map(lambda p:", "pdb_ligand_path = config.get('DEFAULT', 'pdb_ligand_path') #Path that contains all files for", "= str(config.get('SPARK', 'maxResultSize')) conf = (SparkConf().set(\"spark.driver.maxResultSize\", maxResultSize)) # Create context", "log_file_name = 'vs_buried_areas.log' current_path = os.getcwd() path_file = os.path.join(current_path, log_file_name)", "= 'Starting ' + str(start_time) +'\\n' log_file.write(msg) msg = 'Finishing", "pdb_file_receptor = pdb_receptor_files[1] pdb_file_receptor = sc.broadcast(pdb_file_receptor) #Loading PDB model files", "chain z) script_make_ndx = SparkFiles.get(\"make_ndx_buried_area_total.sh\") #Getting bash script that was", "#ligand = splited_aux_lig[0] #model = splited_aux_lig[1] pose = area[0] buried_total", "float(line_splited[4]), float(line_splited[5]) ) line_ret = ( str(line_splited[0]), float(line_splited[1]) ) return", "and insert Z chain for item in model_file: item =", "= \"# buried_area_total[nm2]\\tpose\"+\"\\n\" f_buried_area.write(line) for area in buried_area_sorted_by_lig_rec_perc: #receptor =", "********** Finish function ********************************************************** # ********** Starting function ********************************************************** def", "-1.0 returned_list = [] try: base_name = get_name_model_pdb(pdb_complex) ligand_name =", "= os.path.join(path_analysis,\"*.area\") buried_areaRDD = sc.textFile(all_area_file).map(loading_lines_from_area_files).collect() #Sorting by buried_total column buried_area_sorted_by_buried_total", "= buried_areaRDD.map(lambda p: Row(receptor=str(p[0]), ligand=str(p[1]), model=int(p[2]), buried_lig_rec=float(p[3]), buried_lig_rec_perc=float(p[4]), buried_lig_lig_perc=float(p[5]) )", "list_all_pdb_receptor_files_path.append(loading_pdb_2_list(receptor)) #Computing Buried areas for pdb_receptor_files in list_all_pdb_receptor_files_path: #Getting receptor", "0.14 probe = float(sys.argv[1]) #Indicates ndots. Example: 24 ndots =", "f_ndx process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate()", "#Sorting by buried_total column buried_area_sorted_by_buried_total = sorting_buried_area(sc, buried_areaRDD) buried_area_sorted_by_buried_total.cache() buried_area_sorted_by_buried_total_LIST", "sent by broadcast pdb_file_receptor = pdb_receptor_files[1] pdb_file_receptor = sc.broadcast(pdb_file_receptor) #Loading", "\")).strip() normalized_buried_total = \"{:.4f}\".format(area[1]) line = str(normalized_buried_total)+\"\\t\"+str(pose)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def", "+ \" \"+ pdb_complex + \" \"+ f_ndx + \"", "vina_utils import get_directory_complex_pdb_analysis, get_files_pdb, get_name_model_pdb, get_ligand_from_receptor_ligand_model, get_separator_filename_mode, get_directory_pdb_analysis, loading_pdb_2_list, get_name_receptor_pdb,", "ON b.ligand = a.ligand ORDER BY normalized_buried_area DESC \"\"\" #Getting", "os.getcwd() path_file = os.path.join(current_path, log_file_name) log_file = open(path_file, 'w') diff_time", "in docking pdb_ligand_path = config.get('DEFAULT', 'pdb_ligand_path') #Path that contains all", "copied by addFile command command = script_make_ndx + \" \"", "base_name = get_name_model_pdb(pdb_complex) ligand_name = get_ligand_from_receptor_ligand_model(base_name) f_pdb_ligand_no_docking = os.path.join(pdb_ligand_path.value,ligand_name+\".pdb\") f_ndx", "\" -surface chZ \" + \" -output chZ \"+ \"", "all_model_for_complex = get_files_pdb_filter(path_analysis_pdb,base_file_name_receptor_for_filter) all_model_for_complexRDD = sc.parallelize(all_model_for_complex) all_model_filesRDD = all_model_for_complexRDD.map(loading_pdb_2_list).collect() #", "sqlCtx.createDataFrame(number_pose_ligandRDD) number_pose_ligand_table.registerTempTable(\"buried_area_total_sort\") sql = \"\"\" SELECT pose, (b.buried_total / a.heavyAtom)", "save_receptor_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc): f_buried_area = open(path_file_buried_area,\"w\") for area in buried_area_sorted_by_lig_rec_perc: #splited_line", "in os.walk(mypath): for file in files: if file.endswith(\".area\"): f_path =", "log_file.write(msg) msg = 'Time Execution (seconds): ' + str(diff_time.total_seconds()) +'\\n'", "subprocess import Popen, PIPE from datetime import datetime from vina_utils", "[] all_receptor_for_complex = get_files_pdb(path_receptor_pdb) for receptor in all_receptor_for_complex: list_all_pdb_receptor_files_path.append(loading_pdb_2_list(receptor)) #Computing", "pdb_complex + \" \"+ f_ndx + \" \" + f_temp_sasa_rec", "buried_area_total_sort b ON b.ligand = a.ligand ORDER BY normalized_buried_area DESC", "buried_areaRDD): sqlCtx = SQLContext(sc) buried_areaRDD = sc.parallelize(buried_areaRDD) #buried_areaRDD = buried_areaRDD.map(lambda", "Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() command = gromacs_path.value", "project path_spark_drugdesign = config.get('DRUGDESIGN', 'path_spark_drugdesign') sc.addPyFile(os.path.join(path_spark_drugdesign,\"vina_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"os_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"gromacs_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"pdb_io.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"database_io.py\"))", "( str(line_splited[0]), str(line_splited[1]), int(line_splited[2]), float(line_splited[3]), float(line_splited[4]), float(line_splited[5]) ) line_ret =", "Gromacs project gromacs_path = preparing_path(config.get('DRUGDESIGN', 'gromacs_path')) #Path where PDB ligand", "= get_value_from_xvg_sasa(f_temp_sasa_rec) sasa_lig = get_value_from_xvg_sasa(f_temp_sasa_lig) buried_total = sasa_rec + sasa_lig", "ligand=str(p[1]), model=int(p[2]), buried_lig_rec=float(p[3]), buried_lig_rec_perc=float(p[4]), buried_lig_lig_perc=float(p[5]) ) ) buried_areaRDD = buried_areaRDD.map(lambda", "= area[0].split(\"_-_\") #aux_recep = splited_line[0] #aux_lig = str(splited_line[1]) #preparing receptor", "os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_complex.xvg\") f_temp_sasa_rec = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_rec.xvg\") f_temp_sasa_lig = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_lig.xvg\") # Makes the", "= process.communicate() command = gromacs_path.value +\"gmx sasa -f \" +", "buried_total=float(p[1]) ) ) buried_area_table = sqlCtx.createDataFrame(buried_areaRDD) buried_area_table.registerTempTable(\"buried_area\") buried_area_sorted_by_buried_total = sqlCtx.sql(\"SELECT", "pose = str(str(area[0]).replace(\"compl_\", \" \")).strip() buried_total = \"{:.4f}\".format(area[1]) #buried_lig_rec_perc =", "+ f_ndx + \" -surface chZ \" + \" -output", "buried_area_sorted_by_buried_total = sqlCtx.sql(\"SELECT * FROM buried_area ORDER BY buried_total DESC\")", "#splited_line = area[0].split(\"_-_\") #aux_recep = splited_line[0] #aux_lig = str(splited_line[1]) #preparing", "build_list_model_for_complex(model): full_path_model = model[0] model_file = model[1] path_pdb_complex = path_analysis_pdb_complex_b.value", "def save_normalized_buried_area(path_file_buried_area, full_dataRDD): f_buried_area = open(path_file_buried_area,\"w\") line = \"# normalized_buried_area_total[nm2]\\tpose\"+\"\\n\"", "Execution (seconds): ' + str(diff_time.total_seconds()) +'\\n' log_file.write(msg) def main(): config", "chZ = \"chZ\" sasa_complex = -1.0 sasa_rec = -1.0 sasa_lig", "#aux_recep = splited_line[0] #aux_lig = str(splited_line[1]) #preparing receptor #receptor =", "path_analysis = config.get('DEFAULT', 'path_analysis') #Ligand Database file ligand_database = config.get('DEFAULT',", "base_file_name_receptor_for_filter = base_file_name_receptor+\"_-_\" all_model_for_complex = get_files_pdb_filter(path_analysis_pdb,base_file_name_receptor_for_filter) all_model_for_complexRDD = sc.parallelize(all_model_for_complex) all_model_filesRDD", "are - They are NOT participated in docking pdb_ligand_path =", "\" + f_temp_sasa_complex process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr", "Row(pose=str(p[0]), buried_total=float(p[1]) ) ) buried_area_table = sqlCtx.createDataFrame(buried_areaRDD) buried_area_table.registerTempTable(\"buried_area\") buried_area_sorted_by_buried_total =", "sorting because resultaed file will be created based on this", "replace_chain_atom_line from database_io import load_database def sorting_buried_area(sc, buried_areaRDD): sqlCtx =", "f_temp_sasa_lig process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate()", "function ********************************************************** def build_list_model_for_complex(model): full_path_model = model[0] model_file = model[1]", "this sorting returned_list = (base_name, buried_total) except: returned_list = (base_name,", "= (model_file, full_path_for_save_complex) save_model_receptor(list_receptor_model_file) list_ret = compute_buried_area(full_path_for_save_complex) os.remove(full_path_for_save_complex) return list_ret", "= area[1] #model = area[2] pose = str(str(area[0]).replace(\"compl_\", \" \")).strip()", "#Adding Python Source file #Path for drugdesign project path_spark_drugdesign =", "# ********** Starting function ********************************************************** def build_list_model_for_complex(model): full_path_model = model[0]", "buried_area_table.registerTempTable(\"buried_area\") buried_area_sorted_by_buried_total = sqlCtx.sql(\"SELECT * FROM buried_area ORDER BY buried_total", "= sorting_buried_area(sc, buried_areaRDD) buried_area_sorted_by_buried_total.cache() buried_area_sorted_by_buried_total_LIST = buried_area_sorted_by_buried_total.map(lambda p: (p.pose, p.buried_total)", "f_ndx = os.path.join(path_analysis_pdb_complex_b.value,base_name+\".ndx\") f_temp_sasa_complex = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_complex.xvg\") f_temp_sasa_rec = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_rec.xvg\") f_temp_sasa_lig", "import Popen, PIPE from datetime import datetime from vina_utils import", "pdb_io import replace_chain_atom_line from database_io import load_database def sorting_buried_area(sc, buried_areaRDD):", "= ( str(line_splited[0]), float(line_splited[1]) ) return line_ret def get_files_area(mypath): only_mol2_file", "files in os.walk(mypath): for file in files: if file.endswith(\".area\"): f_path", "get_name_receptor_pdb, get_files_pdb_filter import os, sys from os_utils import preparing_path from", "= sc.parallelize(all_model_for_complex) all_model_filesRDD = all_model_for_complexRDD.map(loading_pdb_2_list).collect() # ********** Starting function **********************************************************", "\"# normalized_buried_area_total[nm2]\\tpose\"+\"\\n\" f_buried_area.write(line) for area in full_dataRDD.collect(): pose = str(str(area[0]).replace(\"compl_\",", "sc.parallelize(all_model_for_complex) all_model_filesRDD = all_model_for_complexRDD.map(loading_pdb_2_list).collect() # ********** Starting function ********************************************************** def", "********************************************************** def compute_buried_area(pdb_complex): chZ = \"chZ\" sasa_complex = -1.0 sasa_rec", "sasa -f \" + pdb_complex + \" -s \" +", "function ********************************************************** # ********** Starting function ********************************************************** def save_model_receptor(list_receptor_model_file): receptor_file", "= open(path_file_buried_area,\"w\") line = \"# normalized_buried_area_total[nm2]\\tpose\"+\"\\n\" f_buried_area.write(line) for area in", "receptor full_area_file = os.path.join(path_analysis,base_file_name_receptor+\".area\") save_receptor_buried_area(full_area_file, all_model_filesRDD) #Loading all area file", "list_receptor_model_file[0] full_path_for_save_complex = list_receptor_model_file[1] #Open file for writting the complex", "stderr=PIPE) stdout, stderr = process.communicate() command = gromacs_path.value +\"gmx sasa", "function ********************************************************** all_model_filesRDD = sc.parallelize(all_model_filesRDD) all_model_filesRDD = all_model_filesRDD.map(build_list_model_for_complex).collect() #Saving buried", "all_area_file = os.path.join(path_analysis,\"*.area\") buried_areaRDD = sc.textFile(all_area_file).map(loading_lines_from_area_files).collect() #Sorting by buried_total column", "= sc.textFile(all_area_file).map(loading_lines_from_area_files).collect() #Sorting by buried_total column buried_area_sorted_by_buried_total = sorting_buried_area(sc, buried_areaRDD)", "buried_area_sorted_by_buried_total = sorting_buried_area(sc, buried_areaRDD) buried_area_sorted_by_buried_total.cache() buried_area_sorted_by_buried_total_LIST = buried_area_sorted_by_buried_total.map(lambda p: (p.pose,", "f_buried_area.write(line) for area in buried_area_sorted_by_lig_rec_perc: #receptor = area[0] #ligand =", "= sc.parallelize(buried_areaRDD) #buried_areaRDD = buried_areaRDD.map(lambda p: Row(receptor=str(p[0]), ligand=str(p[1]), model=int(p[2]), buried_lig_rec=float(p[3]),", "= SparkContext(conf=conf) sqlCtx = SQLContext(sc) #Adding Python Source file #Path", "for pdb_receptor_files in list_all_pdb_receptor_files_path: #Getting receptor name by fully path", "from datetime import datetime from vina_utils import get_directory_complex_pdb_analysis, get_files_pdb, get_name_model_pdb,", "for area in buried_area_sorted_by_lig_rec_perc: #receptor = area[0] #ligand = area[1]", "import SQLContext, Row import ConfigParser as configparser from subprocess import", "area[2] pose = str(str(area[0]).replace(\"compl_\", \" \")).strip() buried_total = \"{:.4f}\".format(area[1]) #buried_lig_rec_perc", "f_buried_area.write(line) for area in full_dataRDD.collect(): pose = str(str(area[0]).replace(\"compl_\", \" \")).strip()", "# Create SPARK config maxResultSize = str(config.get('SPARK', 'maxResultSize')) conf =", "-o \" + f_temp_sasa_complex process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout,", "#Generating result - See column sorting because resultaed file will", "#Path for saving pdb files of models generated by VS", "probe = float(sys.argv[1]) #Indicates ndots. Example: 24 ndots = int(sys.argv[2])", "= SparkFiles.get(\"make_sasa_rec_buried_area_total.sh\") #Getting bash script that was copied by addFile", "created based on this sorting returned_list = (base_name, buried_total) except:", "os.remove(f_ndx) if os.path.exists(f_temp_sasa_complex): os.remove(f_temp_sasa_complex) if os.path.exists(f_temp_sasa_rec): os.remove(f_temp_sasa_rec) if os.path.exists(f_temp_sasa_lig): os.remove(f_temp_sasa_lig)", "splited_line[0] #aux_lig = str(splited_line[1]) #preparing receptor #receptor = str(str(aux_recep).replace(\"compl_\", \"", "def save_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc): f_buried_area = open(path_file_buried_area,\"w\") line = \"# buried_area_total[nm2]\\tpose\"+\"\\n\"", "= float(sys.argv[1]) #Indicates ndots. Example: 24 ndots = int(sys.argv[2]) #Broadcast", "memory list_all_pdb_receptor_files_path = [] all_receptor_for_complex = get_files_pdb(path_receptor_pdb) for receptor in", "receptor name by fully path base_file_name_receptor = get_name_receptor_pdb(str(pdb_receptor_files[0])) #PDB file", "return buried_area_sorted_by_buried_total def save_receptor_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc): f_buried_area = open(path_file_buried_area,\"w\") for area", "b.ligand = a.ligand ORDER BY normalized_buried_area DESC \"\"\" #Getting all", "returned_list # ********** Finish function ********************************************************** # ********** Starting function", "= buried_area_sorted_by_buried_total.map(lambda p: (p.pose, p.buried_total) ).collect() #Saving buried area file", "config.get('DRUGDESIGN', 'path_spark_drugdesign') sc.addPyFile(os.path.join(path_spark_drugdesign,\"vina_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"os_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"gromacs_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"pdb_io.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"database_io.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"json_utils.py\")) #Adding bash", "SparkFiles from pyspark.sql import SQLContext, Row import ConfigParser as configparser", "#Insert lines of model and insert Z chain for item", "root, dirs, files in os.walk(mypath): for file in files: if", "= str(line).split() #line_ret = ( str(line_splited[0]), str(line_splited[1]), int(line_splited[2]), float(line_splited[3]), float(line_splited[4]),", "+ str(finish_time) +'\\n' log_file.write(msg) msg = 'Time Execution (seconds): '", "********************************************************** def build_list_model_for_complex(model): full_path_model = model[0] model_file = model[1] path_pdb_complex", "none \" + \" -o \" + f_temp_sasa_lig process =", "Popen, PIPE from datetime import datetime from vina_utils import get_directory_complex_pdb_analysis,", "by addFile command command = script_make_ndx + \" \" +", "full_path_for_save_complex = os.path.join(path_pdb_complex,complex_name) list_receptor_model_file = (model_file, full_path_for_save_complex) save_model_receptor(list_receptor_model_file) list_ret =", "+ \" -surface System \" + \" -output System \"+", "from broadcast #Building complex file based on model file name", "database a JOIN buried_area_total_sort b ON b.ligand = a.ligand ORDER", "= all_model_for_complexRDD.map(loading_pdb_2_list).collect() # ********** Starting function ********************************************************** def compute_buried_area(pdb_complex): chZ", "#Getting receptor name by fully path base_file_name_receptor = get_name_receptor_pdb(str(pdb_receptor_files[0])) #PDB", "gromacs_utils import get_value_from_xvg_sasa from pdb_io import replace_chain_atom_line from database_io import", "save_log(finish_time, start_time): log_file_name = 'vs_buried_areas.log' current_path = os.getcwd() path_file =", "in buried_area_sorted_by_lig_rec_perc: #receptor = area[0] #ligand = area[1] #model =", "PDB model files based on receptor into memory base_file_name_receptor_for_filter =", "= script_make_ndx + \" \" + gromacs_path.value + \" \"+", "broadcast model_file = list_receptor_model_file[0] full_path_for_save_complex = list_receptor_model_file[1] #Open file for", "log_file.write(msg) msg = 'Finishing ' + str(finish_time) +'\\n' log_file.write(msg) msg", "on receptor into memory base_file_name_receptor_for_filter = base_file_name_receptor+\"_-_\" all_model_for_complex = get_files_pdb_filter(path_analysis_pdb,base_file_name_receptor_for_filter)", "pose = str(str(area[0]).replace(\"compl_\", \" \")).strip() normalized_buried_total = \"{:.4f}\".format(area[1]) line =", "buried_area_total[nm2]\\tpose\"+\"\\n\" f_buried_area.write(line) for area in buried_area_sorted_by_lig_rec_perc: #receptor = area[0] #ligand", "writting the complex f_compl = open(full_path_for_save_complex, \"w\") #Insert lines of", "= os.path.join(path_pdb_complex,complex_name) list_receptor_model_file = (model_file, full_path_for_save_complex) save_model_receptor(list_receptor_model_file) list_ret = compute_buried_area(full_path_for_save_complex)", "\" + \" -o \" + f_temp_sasa_complex process = Popen(command,shell=True,", "str(finish_time) +'\\n' log_file.write(msg) msg = 'Time Execution (seconds): ' +", "ORDER BY buried_total DESC\") #buried_lig_lig_perc return buried_area_sorted_by_buried_total def save_receptor_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc):", "= sasa_rec + sasa_lig - sasa_complex #Generating result - See", "config.read('config.ini') #Path for Gromacs project gromacs_path = preparing_path(config.get('DRUGDESIGN', 'gromacs_path')) #Path", "os.path.join(path_analysis,base_file_name_receptor+\".area\") save_receptor_buried_area(full_area_file, all_model_filesRDD) #Loading all area file all_area_file = os.path.join(path_analysis,\"*.area\")", "\" \"+ f_ndx process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr", "saving pdb files of models generated by VS path_analysis_pdb =", "list_all_pdb_receptor_files_path: #Getting receptor name by fully path base_file_name_receptor = get_name_receptor_pdb(str(pdb_receptor_files[0]))", "sc.addFile(os.path.join(path_spark_drugdesign,\"make_ndx_buried_area_total.sh\")) sc.addFile(os.path.join(path_spark_drugdesign,\"make_sasa_rec_buried_area_total.sh\")) #Parameters form command line #Indicates probe. Example: 0.14", "maxResultSize)) # Create context sc = SparkContext(conf=conf) sqlCtx = SQLContext(sc)", "= area[0] #ligand = area[1] #model = area[2] pose =", "= area[2] pose = str(str(area[0]).replace(\"compl_\", \" \")).strip() buried_total = \"{:.4f}\".format(area[1])", "#Adding bash scripts sc.addFile(os.path.join(path_spark_drugdesign,\"make_ndx_buried_area_total.sh\")) sc.addFile(os.path.join(path_spark_drugdesign,\"make_sasa_rec_buried_area_total.sh\")) #Parameters form command line #Indicates", "is sent by broadcast pdb_file_receptor = pdb_receptor_files[1] pdb_file_receptor = sc.broadcast(pdb_file_receptor)", "project gromacs_path = preparing_path(config.get('DRUGDESIGN', 'gromacs_path')) #Path where PDB ligand are", "#Calculating normalized buried area #Loading database rdd_database = load_database(sc, ligand_database)", "sorting_buried_area(sc, buried_areaRDD): sqlCtx = SQLContext(sc) buried_areaRDD = sc.parallelize(buried_areaRDD) #buried_areaRDD =", "VS path_analysis_pdb = get_directory_pdb_analysis(path_analysis) # Create SPARK config maxResultSize =", "os_utils import preparing_path from gromacs_utils import get_value_from_xvg_sasa from pdb_io import", "********** Starting function ********************************************************** def save_model_receptor(list_receptor_model_file): receptor_file = pdb_file_receptor.value #Obtained", "sc.broadcast(gromacs_path) pdb_ligand_path = sc.broadcast(pdb_ligand_path) probe = sc.broadcast(probe) ndots = sc.broadcast(ndots)", "+ gromacs_path.value + \" \"+ pdb_complex + \" \"+ f_ndx", "by addFile command command = script_make_sasa_rec + \" \" +", "Starting function ********************************************************** def build_list_model_for_complex(model): full_path_model = model[0] model_file =", "area of receptor full_area_file = os.path.join(path_analysis,base_file_name_receptor+\".area\") save_receptor_buried_area(full_area_file, all_model_filesRDD) #Loading all", "process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() command", "/ a.heavyAtom) as normalized_buried_area FROM database a JOIN buried_area_total_sort b", "os.path.join(path_pdb_complex,complex_name) list_receptor_model_file = (model_file, full_path_for_save_complex) save_model_receptor(list_receptor_model_file) list_ret = compute_buried_area(full_path_for_save_complex) os.remove(full_path_for_save_complex)", "buried_lig_lig_perc=float(p[5]) ) ) buried_areaRDD = buried_areaRDD.map(lambda p: Row(pose=str(p[0]), buried_total=float(p[1]) )", "normalized_buried_area_total[nm2]\\tpose\"+\"\\n\" f_buried_area.write(line) for area in full_dataRDD.collect(): pose = str(str(area[0]).replace(\"compl_\", \"", "import datetime from vina_utils import get_directory_complex_pdb_analysis, get_files_pdb, get_name_model_pdb, get_ligand_from_receptor_ligand_model, get_separator_filename_mode,", "dirs, files in os.walk(mypath): for file in files: if file.endswith(\".area\"):", "= \"{:.4f}\".format(area[1]) #buried_lig_rec_perc = \"{:.4f}\".format(area[4]) #buried_lig_lig_perc = \"{:.4f}\".format(area[5]) #line =", "= sqlCtx.sql(\"SELECT * FROM buried_area ORDER BY buried_total DESC\") #buried_lig_lig_perc", "main(): config = configparser.ConfigParser() config.read('config.ini') #Path for Gromacs project gromacs_path", "= os.path.join(root,file) only_mol2_file.append(f_path) return only_mol2_file def save_log(finish_time, start_time): log_file_name =", "= config.get('DEFAULT', 'ligand_database_path_file') #Path where all pdb receptor are path_receptor_pdb", "= pdb_receptor_files[1] pdb_file_receptor = sc.broadcast(pdb_file_receptor) #Loading PDB model files based", "os.path.exists(f_ndx): os.remove(f_ndx) if os.path.exists(f_temp_sasa_complex): os.remove(f_temp_sasa_complex) if os.path.exists(f_temp_sasa_rec): os.remove(f_temp_sasa_rec) if os.path.exists(f_temp_sasa_lig):", "normalized_buried_area FROM database a JOIN buried_area_total_sort b ON b.ligand =", "= os.getcwd() path_file = os.path.join(current_path, log_file_name) log_file = open(path_file, 'w')", "model=int(p[2]), buried_lig_rec=float(p[3]), buried_lig_rec_perc=float(p[4]), buried_lig_lig_perc=float(p[5]) ) ) buried_areaRDD = buried_areaRDD.map(lambda p:", "\" + pdb_complex + \" -nopbc \" + \" -n", "where PDB ligand are - They are NOT participated in", "buried_total column buried_area_sorted_by_buried_total = sorting_buried_area(sc, buried_areaRDD) buried_area_sorted_by_buried_total.cache() buried_area_sorted_by_buried_total_LIST = buried_area_sorted_by_buried_total.map(lambda", "DESC \"\"\" #Getting all data full_dataRDD = sqlCtx.sql(sql) #Saving normalized", "os.path.exists(f_temp_sasa_lig): os.remove(f_temp_sasa_lig) return returned_list # ********** Finish function ********************************************************** #", "f_temp_sasa_rec process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate()", "#Broadcast path_analysis_pdb_complex_b = sc.broadcast(path_analysis_pdb) gromacs_path = sc.broadcast(gromacs_path) pdb_ligand_path = sc.broadcast(pdb_ligand_path)", "= config.get('DRUGDESIGN', 'path_spark_drugdesign') sc.addPyFile(os.path.join(path_spark_drugdesign,\"vina_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"os_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"gromacs_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"pdb_io.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"database_io.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"json_utils.py\")) #Adding", "sqlCtx.createDataFrame(rdd_database) database_table.registerTempTable(\"database\") number_pose_ligandRDD = buried_area_sorted_by_buried_total.map(lambda p: Row(buried_total=int(p.buried_total), ligand=get_ligand_from_receptor_ligand_model(p.pose), pose=str(p.pose) )", "int(line_splited[2]), float(line_splited[3]), float(line_splited[4]), float(line_splited[5]) ) line_ret = ( str(line_splited[0]), float(line_splited[1])", "script_make_sasa_rec = SparkFiles.get(\"make_sasa_rec_buried_area_total.sh\") #Getting bash script that was copied by", "import preparing_path from gromacs_utils import get_value_from_xvg_sasa from pdb_io import replace_chain_atom_line", "os.walk(mypath): for file in files: if file.endswith(\".area\"): f_path = os.path.join(root,file)", "start_time msg = 'Starting ' + str(start_time) +'\\n' log_file.write(msg) msg", "probe. Example: 0.14 probe = float(sys.argv[1]) #Indicates ndots. Example: 24", "#Path where all pdb receptor are path_receptor_pdb = config.get('DEFAULT', 'pdb_path')", "+ \" \" + gromacs_path.value + \" \"+ pdb_complex +", "receptor in all_receptor_for_complex: list_all_pdb_receptor_files_path.append(loading_pdb_2_list(receptor)) #Computing Buried areas for pdb_receptor_files in", "#Saving buried area file path_file_buried_area = os.path.join(path_analysis, \"summary_buried_areas_total.dat\") save_buried_area(path_file_buried_area, buried_area_sorted_by_buried_total_LIST)", "buried_total = sasa_rec + sasa_lig - sasa_complex #Generating result -", "sc.broadcast(pdb_ligand_path) probe = sc.broadcast(probe) ndots = sc.broadcast(ndots) start_time = datetime.now()", "model_file = model[1] path_pdb_complex = path_analysis_pdb_complex_b.value #Obtained from broadcast #Building", "buried_total = \"{:.4f}\".format(area[1]) #line = receptor+\"\\t\"+ligand+\"\\t\"+model+\"\\t\"+str(buried_lig_rec)+\"\\t\"+str(buried_lig_rec_perc)+\"\\t\"+str(buried_lig_lig_perc)+\"\\n\" line = pose+\"\\t\"+str(buried_total)+\"\\n\" f_buried_area.write(line)", "+ pdb_complex + \" -s \" + pdb_complex + \"", "os.path.exists(f_temp_sasa_complex): os.remove(f_temp_sasa_complex) if os.path.exists(f_temp_sasa_rec): os.remove(f_temp_sasa_rec) if os.path.exists(f_temp_sasa_lig): os.remove(f_temp_sasa_lig) return returned_list", "list_ret # ********** Finish function ********************************************************** all_model_filesRDD = sc.parallelize(all_model_filesRDD) all_model_filesRDD", "str(line_splited[0]), float(line_splited[1]) ) return line_ret def get_files_area(mypath): only_mol2_file = []", "stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() sasa_complex = get_value_from_xvg_sasa(f_temp_sasa_complex) sasa_rec", "model_file: item = replace_chain_atom_line(item,\"d\",\"z\") f_compl.write(item) f_compl.close() # ********** Finish function", "Python Source file #Path for drugdesign project path_spark_drugdesign = config.get('DRUGDESIGN',", "sc.textFile(all_area_file).map(loading_lines_from_area_files).collect() #Sorting by buried_total column buried_area_sorted_by_buried_total = sorting_buried_area(sc, buried_areaRDD) buried_area_sorted_by_buried_total.cache()", "fully path base_file_name_receptor = get_name_receptor_pdb(str(pdb_receptor_files[0])) #PDB file loaded into memory", "sc.parallelize(buried_areaRDD) #buried_areaRDD = buried_areaRDD.map(lambda p: Row(receptor=str(p[0]), ligand=str(p[1]), model=int(p[2]), buried_lig_rec=float(p[3]), buried_lig_rec_perc=float(p[4]),", "+ f_temp_sasa_rec process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr =", "(p.pose, p.buried_total) ).collect() #Saving buried area file path_file_buried_area = os.path.join(path_analysis,", "\" -s \" + pdb_complex + \" -nopbc \" +", "sasa_complex = get_value_from_xvg_sasa(f_temp_sasa_complex) sasa_rec = get_value_from_xvg_sasa(f_temp_sasa_rec) sasa_lig = get_value_from_xvg_sasa(f_temp_sasa_lig) buried_total", "os.path.join(path_analysis,\"*.area\") buried_areaRDD = sc.textFile(all_area_file).map(loading_lines_from_area_files).collect() #Sorting by buried_total column buried_area_sorted_by_buried_total =", "line_splited = str(line).split() #line_ret = ( str(line_splited[0]), str(line_splited[1]), int(line_splited[2]), float(line_splited[3]),", "(SparkConf().set(\"spark.driver.maxResultSize\", maxResultSize)) # Create context sc = SparkContext(conf=conf) sqlCtx =", ") return line_ret def get_files_area(mypath): only_mol2_file = [] for root,", "buried_area_sorted_by_lig_rec_perc): f_buried_area = open(path_file_buried_area,\"w\") line = \"# buried_area_total[nm2]\\tpose\"+\"\\n\" f_buried_area.write(line) for", "\"{:.4f}\".format(area[1]) line = str(normalized_buried_total)+\"\\t\"+str(pose)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def loading_lines_from_area_files(line): line_splited =", "= area[0] buried_total = \"{:.4f}\".format(area[1]) #line = receptor+\"\\t\"+ligand+\"\\t\"+model+\"\\t\"+str(buried_lig_rec)+\"\\t\"+str(buried_lig_rec_perc)+\"\\t\"+str(buried_lig_lig_perc)+\"\\n\" line =", "f_pdb_ligand_no_docking = os.path.join(pdb_ligand_path.value,ligand_name+\".pdb\") f_ndx = os.path.join(path_analysis_pdb_complex_b.value,base_name+\".ndx\") f_temp_sasa_complex = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_complex.xvg\") f_temp_sasa_rec", "result - See column sorting because resultaed file will be", "(base_name, float(0)) #Deleting files if os.path.exists(f_ndx): os.remove(f_ndx) if os.path.exists(f_temp_sasa_complex): os.remove(f_temp_sasa_complex)", "complex file based on model file name base_name_model = get_name_model_pdb(full_path_model)", "buried_lig_rec_perc=float(p[4]), buried_lig_lig_perc=float(p[5]) ) ) buried_areaRDD = buried_areaRDD.map(lambda p: Row(pose=str(p[0]), buried_total=float(p[1])", "+ \" -o \" + f_temp_sasa_lig process = Popen(command,shell=True, stdout=PIPE,", "= str(str(area[0]).replace(\"compl_\", \" \")).strip() normalized_buried_total = \"{:.4f}\".format(area[1]) line = str(normalized_buried_total)+\"\\t\"+str(pose)+\"\\n\"", "buried_total DESC\") #buried_lig_lig_perc return buried_area_sorted_by_buried_total def save_receptor_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc): f_buried_area =", "def sorting_buried_area(sc, buried_areaRDD): sqlCtx = SQLContext(sc) buried_areaRDD = sc.parallelize(buried_areaRDD) #buried_areaRDD", "float(sys.argv[1]) #Indicates ndots. Example: 24 ndots = int(sys.argv[2]) #Broadcast path_analysis_pdb_complex_b", "= ( str(line_splited[0]), str(line_splited[1]), int(line_splited[2]), float(line_splited[3]), float(line_splited[4]), float(line_splited[5]) ) line_ret", "get_directory_complex_pdb_analysis, get_files_pdb, get_name_model_pdb, get_ligand_from_receptor_ligand_model, get_separator_filename_mode, get_directory_pdb_analysis, loading_pdb_2_list, get_name_receptor_pdb, get_files_pdb_filter import", "for Gromacs project gromacs_path = preparing_path(config.get('DRUGDESIGN', 'gromacs_path')) #Path where PDB", "stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() # Makes f_temp_sasa_rec file", "= os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_complex.xvg\") f_temp_sasa_rec = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_rec.xvg\") f_temp_sasa_lig = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_lig.xvg\") # Makes", "(base_name, buried_total) except: returned_list = (base_name, float(0)) #Deleting files if", "<filename>virtualscreening/vina/spark/buried_areas.py from pyspark import SparkContext, SparkConf, SparkFiles from pyspark.sql import", "open(path_file_buried_area,\"w\") for area in buried_area_sorted_by_lig_rec_perc: #splited_line = area[0].split(\"_-_\") #aux_recep =", "#buried_lig_rec_perc = \"{:.4f}\".format(area[4]) #buried_lig_lig_perc = \"{:.4f}\".format(area[5]) #line = receptor+\"\\t\"+ligand+\"\\t\"+str(model)+\"\\t\"+str(buried_lig_rec)+\"\\t\"+str(buried_lig_rec_perc)+\"\\t\"+str(buried_lig_lig_perc)+\"\\n\" line", ") line_ret = ( str(line_splited[0]), float(line_splited[1]) ) return line_ret def", "compute_buried_area(pdb_complex): chZ = \"chZ\" sasa_complex = -1.0 sasa_rec = -1.0", "\"compl_\"+base_name_model+\".pdb\" full_path_for_save_complex = os.path.join(path_pdb_complex,complex_name) list_receptor_model_file = (model_file, full_path_for_save_complex) save_model_receptor(list_receptor_model_file) list_ret", "\"summary_normalized_buried_areas.dat\") save_normalized_buried_area(path_file_buried_area, full_dataRDD) #Removing all area files all_area_files = get_files_area(path_analysis)", "f_buried_area = open(path_file_buried_area,\"w\") line = \"# normalized_buried_area_total[nm2]\\tpose\"+\"\\n\" f_buried_area.write(line) for area", "files all_area_files = get_files_area(path_analysis) for area_file in all_area_files: os.remove(area_file) finish_time", "model files based on receptor into memory base_file_name_receptor_for_filter = base_file_name_receptor+\"_-_\"", "str(str(area[0]).replace(\"compl_\", \" \")).strip() normalized_buried_total = \"{:.4f}\".format(area[1]) line = str(normalized_buried_total)+\"\\t\"+str(pose)+\"\\n\" f_buried_area.write(line)", "line_ret def get_files_area(mypath): only_mol2_file = [] for root, dirs, files", "= open(full_path_for_save_complex, \"w\") #Insert lines of receptor for item in", "if os.path.exists(f_temp_sasa_rec): os.remove(f_temp_sasa_rec) if os.path.exists(f_temp_sasa_lig): os.remove(f_temp_sasa_lig) return returned_list # **********", "for item in model_file: item = replace_chain_atom_line(item,\"d\",\"z\") f_compl.write(item) f_compl.close() #", "full_dataRDD) #Removing all area files all_area_files = get_files_area(path_analysis) for area_file", "f_temp_sasa_rec = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_rec.xvg\") f_temp_sasa_lig = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_lig.xvg\") # Makes the index", "database_table.registerTempTable(\"database\") number_pose_ligandRDD = buried_area_sorted_by_buried_total.map(lambda p: Row(buried_total=int(p.buried_total), ligand=get_ligand_from_receptor_ligand_model(p.pose), pose=str(p.pose) ) ).collect()", "bash scripts sc.addFile(os.path.join(path_spark_drugdesign,\"make_ndx_buried_area_total.sh\")) sc.addFile(os.path.join(path_spark_drugdesign,\"make_sasa_rec_buried_area_total.sh\")) #Parameters form command line #Indicates probe.", "config.get('DEFAULT', 'pdb_ligand_path') #Path that contains all files for analysis path_analysis", "\" \"+ f_ndx + \" \" + f_temp_sasa_rec process =", "start_time = datetime.now() os.environ[\"GMX_MAXBACKUP\"]=\"-1\" #Loading all PDB receptor files into", "script that was copied by addFile command command = script_make_sasa_rec", "for analysis path_analysis = config.get('DEFAULT', 'path_analysis') #Ligand Database file ligand_database", "= receptor+\"\\t\"+ligand+\"\\t\"+str(model)+\"\\t\"+str(buried_lig_rec)+\"\\t\"+str(buried_lig_rec_perc)+\"\\t\"+str(buried_lig_lig_perc)+\"\\n\" line = str(buried_total)+\"\\t\"+str(pose)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def save_normalized_buried_area(path_file_buried_area, full_dataRDD):", "= get_files_pdb_filter(path_analysis_pdb,base_file_name_receptor_for_filter) all_model_for_complexRDD = sc.parallelize(all_model_for_complex) all_model_filesRDD = all_model_for_complexRDD.map(loading_pdb_2_list).collect() # **********", "\" \" + gromacs_path.value + \" \"+ pdb_complex + \"", "# ********** Finish function ********************************************************** # ********** Starting function **********************************************************", "all_model_filesRDD = all_model_filesRDD.map(build_list_model_for_complex).collect() #Saving buried area of receptor full_area_file =", "compute_buried_area(full_path_for_save_complex) os.remove(full_path_for_save_complex) return list_ret # ********** Finish function ********************************************************** all_model_filesRDD", "PDB receptor files into memory list_all_pdb_receptor_files_path = [] all_receptor_for_complex =", ").collect() #Saving buried area file path_file_buried_area = os.path.join(path_analysis, \"summary_buried_areas_total.dat\") save_buried_area(path_file_buried_area,", "\" + \" -output chZ \"+ \" -xvg none \"", "'Finishing ' + str(finish_time) +'\\n' log_file.write(msg) msg = 'Time Execution", "area[0].split(\"_-_\") #aux_recep = splited_line[0] #aux_lig = str(splited_line[1]) #preparing receptor #receptor", "gromacs_path = preparing_path(config.get('DRUGDESIGN', 'gromacs_path')) #Path where PDB ligand are -", "os.path.join(pdb_ligand_path.value,ligand_name+\".pdb\") f_ndx = os.path.join(path_analysis_pdb_complex_b.value,base_name+\".ndx\") f_temp_sasa_complex = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_complex.xvg\") f_temp_sasa_rec = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_rec.xvg\")", "stderr = process.communicate() # Makes f_temp_sasa_rec file script_make_sasa_rec = SparkFiles.get(\"make_sasa_rec_buried_area_total.sh\")", "\" -n \" + f_ndx + \" -surface chZ \"", "SparkFiles.get(\"make_ndx_buried_area_total.sh\") #Getting bash script that was copied by addFile command", "+ \" \"+ f_ndx process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout,", "str(str(area[0]).replace(\"compl_\", \" \")).strip() buried_total = \"{:.4f}\".format(area[1]) #buried_lig_rec_perc = \"{:.4f}\".format(area[4]) #buried_lig_lig_perc", "import load_database def sorting_buried_area(sc, buried_areaRDD): sqlCtx = SQLContext(sc) buried_areaRDD =", "f_compl.write(item) #Insert lines of model and insert Z chain for", "list_receptor_model_file = (model_file, full_path_for_save_complex) save_model_receptor(list_receptor_model_file) list_ret = compute_buried_area(full_path_for_save_complex) os.remove(full_path_for_save_complex) return", "SparkFiles.get(\"make_sasa_rec_buried_area_total.sh\") #Getting bash script that was copied by addFile command", "= datetime.now() os.environ[\"GMX_MAXBACKUP\"]=\"-1\" #Loading all PDB receptor files into memory", "\"{:.4f}\".format(area[1]) #line = receptor+\"\\t\"+ligand+\"\\t\"+model+\"\\t\"+str(buried_lig_rec)+\"\\t\"+str(buried_lig_rec_perc)+\"\\t\"+str(buried_lig_lig_perc)+\"\\n\" line = pose+\"\\t\"+str(buried_total)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def", "load_database def sorting_buried_area(sc, buried_areaRDD): sqlCtx = SQLContext(sc) buried_areaRDD = sc.parallelize(buried_areaRDD)", "#splited_aux_lig = str(aux_lig).split(get_separator_filename_mode()) #ligand = splited_aux_lig[0] #model = splited_aux_lig[1] pose", "+ pdb_complex + \" -nopbc \" + \" -n \"", "-n \" + f_ndx + \" -surface System \" +", "that contains all files for analysis path_analysis = config.get('DEFAULT', 'path_analysis')", "sasa_complex #Generating result - See column sorting because resultaed file", "#Building complex file based on model file name base_name_model =", ") ).collect() number_pose_ligand_table = sqlCtx.createDataFrame(number_pose_ligandRDD) number_pose_ligand_table.registerTempTable(\"buried_area_total_sort\") sql = \"\"\" SELECT", "System \"+ \" -xvg none \" + \" -o \"", "\" + f_ndx + \" -surface chZ \" + \"", "+ \" -o \" + f_temp_sasa_complex process = Popen(command,shell=True, stdout=PIPE,", "sc.broadcast(ndots) start_time = datetime.now() os.environ[\"GMX_MAXBACKUP\"]=\"-1\" #Loading all PDB receptor files", "get_value_from_xvg_sasa(f_temp_sasa_complex) sasa_rec = get_value_from_xvg_sasa(f_temp_sasa_rec) sasa_lig = get_value_from_xvg_sasa(f_temp_sasa_lig) buried_total = sasa_rec", "SparkConf, SparkFiles from pyspark.sql import SQLContext, Row import ConfigParser as", "rest (non chain z) script_make_ndx = SparkFiles.get(\"make_ndx_buried_area_total.sh\") #Getting bash script", "buried_area_sorted_by_buried_total def save_receptor_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc): f_buried_area = open(path_file_buried_area,\"w\") for area in", "as configparser from subprocess import Popen, PIPE from datetime import", "context sc = SparkContext(conf=conf) sqlCtx = SQLContext(sc) #Adding Python Source", "addFile command command = script_make_ndx + \" \" + gromacs_path.value", "(model_file, full_path_for_save_complex) save_model_receptor(list_receptor_model_file) list_ret = compute_buried_area(full_path_for_save_complex) os.remove(full_path_for_save_complex) return list_ret #", "ligand #splited_aux_lig = str(aux_lig).split(get_separator_filename_mode()) #ligand = splited_aux_lig[0] #model = splited_aux_lig[1]", "returned_list = (base_name, float(0)) #Deleting files if os.path.exists(f_ndx): os.remove(f_ndx) if", "(seconds): ' + str(diff_time.total_seconds()) +'\\n' log_file.write(msg) def main(): config =", "\" \")).strip() buried_total = \"{:.4f}\".format(area[1]) #buried_lig_rec_perc = \"{:.4f}\".format(area[4]) #buried_lig_lig_perc =", "path_receptor_pdb = config.get('DEFAULT', 'pdb_path') #Path for saving pdb files of", "datetime import datetime from vina_utils import get_directory_complex_pdb_analysis, get_files_pdb, get_name_model_pdb, get_ligand_from_receptor_ligand_model,", "def compute_buried_area(pdb_complex): chZ = \"chZ\" sasa_complex = -1.0 sasa_rec =", "of receptor full_area_file = os.path.join(path_analysis,base_file_name_receptor+\".area\") save_receptor_buried_area(full_area_file, all_model_filesRDD) #Loading all area", ") ) buried_areaRDD = buried_areaRDD.map(lambda p: Row(pose=str(p[0]), buried_total=float(p[1]) ) )", "#Indicates ndots. Example: 24 ndots = int(sys.argv[2]) #Broadcast path_analysis_pdb_complex_b =", "from subprocess import Popen, PIPE from datetime import datetime from", "= os.path.join(current_path, log_file_name) log_file = open(path_file, 'w') diff_time = finish_time", "********************************************************** def save_model_receptor(list_receptor_model_file): receptor_file = pdb_file_receptor.value #Obtained from broadcast model_file", "\" \"+ pdb_complex + \" \"+ f_ndx process = Popen(command,shell=True,", "= Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() # Makes", "for area in full_dataRDD.collect(): pose = str(str(area[0]).replace(\"compl_\", \" \")).strip() normalized_buried_total", "area file all_area_file = os.path.join(path_analysis,\"*.area\") buried_areaRDD = sc.textFile(all_area_file).map(loading_lines_from_area_files).collect() #Sorting by", "of receptor for item in receptor_file: f_compl.write(item) #Insert lines of", "pdb_ligand_path = sc.broadcast(pdb_ligand_path) probe = sc.broadcast(probe) ndots = sc.broadcast(ndots) start_time", "path_spark_drugdesign = config.get('DRUGDESIGN', 'path_spark_drugdesign') sc.addPyFile(os.path.join(path_spark_drugdesign,\"vina_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"os_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"gromacs_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"pdb_io.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"database_io.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"json_utils.py\"))", "copied by addFile command command = script_make_sasa_rec + \" \"", "loading_lines_from_area_files(line): line_splited = str(line).split() #line_ret = ( str(line_splited[0]), str(line_splited[1]), int(line_splited[2]),", "return line_ret def get_files_area(mypath): only_mol2_file = [] for root, dirs,", "generated by VS path_analysis_pdb = get_directory_pdb_analysis(path_analysis) # Create SPARK config", "memory is sent by broadcast pdb_file_receptor = pdb_receptor_files[1] pdb_file_receptor =", "file name base_name_model = get_name_model_pdb(full_path_model) complex_name = \"compl_\"+base_name_model+\".pdb\" full_path_for_save_complex =", "SQLContext(sc) buried_areaRDD = sc.parallelize(buried_areaRDD) #buried_areaRDD = buried_areaRDD.map(lambda p: Row(receptor=str(p[0]), ligand=str(p[1]),", "#Path for drugdesign project path_spark_drugdesign = config.get('DRUGDESIGN', 'path_spark_drugdesign') sc.addPyFile(os.path.join(path_spark_drugdesign,\"vina_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"os_utils.py\"))", "#Deleting files if os.path.exists(f_ndx): os.remove(f_ndx) if os.path.exists(f_temp_sasa_complex): os.remove(f_temp_sasa_complex) if os.path.exists(f_temp_sasa_rec):", "#Getting bash script that was copied by addFile command command", "that was copied by addFile command command = script_make_sasa_rec +", "= sqlCtx.createDataFrame(rdd_database) database_table.registerTempTable(\"database\") number_pose_ligandRDD = buried_area_sorted_by_buried_total.map(lambda p: Row(buried_total=int(p.buried_total), ligand=get_ligand_from_receptor_ligand_model(p.pose), pose=str(p.pose)", "return returned_list # ********** Finish function ********************************************************** # ********** Starting", "chZ \" + \" -output chZ \"+ \" -xvg none", "os.path.join(path_analysis, \"summary_buried_areas_total.dat\") save_buried_area(path_file_buried_area, buried_area_sorted_by_buried_total_LIST) #Calculating normalized buried area #Loading database", "= sc.broadcast(pdb_file_receptor) #Loading PDB model files based on receptor into", "= os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_rec.xvg\") f_temp_sasa_lig = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_lig.xvg\") # Makes the index file", "#Saving normalized buried area file path_file_buried_area = os.path.join(path_analysis, \"summary_normalized_buried_areas.dat\") save_normalized_buried_area(path_file_buried_area,", "(non chain z) script_make_ndx = SparkFiles.get(\"make_ndx_buried_area_total.sh\") #Getting bash script that", "gromacs_path.value + \" \"+ pdb_complex + \" \"+ f_ndx process", "gromacs_path.value +\"gmx sasa -f \" + pdb_complex + \" -s", "\" + gromacs_path.value + \" \"+ pdb_complex + \" \"+", "+ \" -output chZ \"+ \" -xvg none \" +", "= \"# normalized_buried_area_total[nm2]\\tpose\"+\"\\n\" f_buried_area.write(line) for area in full_dataRDD.collect(): pose =", "sasa_lig = get_value_from_xvg_sasa(f_temp_sasa_lig) buried_total = sasa_rec + sasa_lig - sasa_complex", "#Loading all area file all_area_file = os.path.join(path_analysis,\"*.area\") buried_areaRDD = sc.textFile(all_area_file).map(loading_lines_from_area_files).collect()", "model and insert Z chain for item in model_file: item", "= get_directory_pdb_analysis(path_analysis) # Create SPARK config maxResultSize = str(config.get('SPARK', 'maxResultSize'))", "buried_total = \"{:.4f}\".format(area[1]) #buried_lig_rec_perc = \"{:.4f}\".format(area[4]) #buried_lig_lig_perc = \"{:.4f}\".format(area[5]) #line", "area in full_dataRDD.collect(): pose = str(str(area[0]).replace(\"compl_\", \" \")).strip() normalized_buried_total =", "full_dataRDD = sqlCtx.sql(sql) #Saving normalized buried area file path_file_buried_area =", "files: if file.endswith(\".area\"): f_path = os.path.join(root,file) only_mol2_file.append(f_path) return only_mol2_file def", "replace_chain_atom_line(item,\"d\",\"z\") f_compl.write(item) f_compl.close() # ********** Finish function ********************************************************** # **********", "#Creating Dataframe database_table = sqlCtx.createDataFrame(rdd_database) database_table.registerTempTable(\"database\") number_pose_ligandRDD = buried_area_sorted_by_buried_total.map(lambda p:", "by VS path_analysis_pdb = get_directory_pdb_analysis(path_analysis) # Create SPARK config maxResultSize", "'gromacs_path')) #Path where PDB ligand are - They are NOT", "= 'Finishing ' + str(finish_time) +'\\n' log_file.write(msg) msg = 'Time", "#ligand = area[1] #model = area[2] pose = str(str(area[0]).replace(\"compl_\", \"", "docking pdb_ligand_path = config.get('DEFAULT', 'pdb_ligand_path') #Path that contains all files", "receptor_file: f_compl.write(item) #Insert lines of model and insert Z chain", "+'\\n' log_file.write(msg) msg = 'Finishing ' + str(finish_time) +'\\n' log_file.write(msg)", "They are NOT participated in docking pdb_ligand_path = config.get('DEFAULT', 'pdb_ligand_path')", "config.get('DEFAULT', 'pdb_path') #Path for saving pdb files of models generated", "\" -o \" + f_temp_sasa_complex process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE)", "model file name base_name_model = get_name_model_pdb(full_path_model) complex_name = \"compl_\"+base_name_model+\".pdb\" full_path_for_save_complex", "= sc.parallelize(all_model_filesRDD) all_model_filesRDD = all_model_filesRDD.map(build_list_model_for_complex).collect() #Saving buried area of receptor", "= sqlCtx.createDataFrame(buried_areaRDD) buried_area_table.registerTempTable(\"buried_area\") buried_area_sorted_by_buried_total = sqlCtx.sql(\"SELECT * FROM buried_area ORDER", "' + str(diff_time.total_seconds()) +'\\n' log_file.write(msg) def main(): config = configparser.ConfigParser()", "\"\"\" #Getting all data full_dataRDD = sqlCtx.sql(sql) #Saving normalized buried", "pdb_receptor_files in list_all_pdb_receptor_files_path: #Getting receptor name by fully path base_file_name_receptor", "get_files_area(mypath): only_mol2_file = [] for root, dirs, files in os.walk(mypath):", "-o \" + f_temp_sasa_lig process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout,", "float(line_splited[1]) ) return line_ret def get_files_area(mypath): only_mol2_file = [] for", "path_file = os.path.join(current_path, log_file_name) log_file = open(path_file, 'w') diff_time =", "get_directory_pdb_analysis, loading_pdb_2_list, get_name_receptor_pdb, get_files_pdb_filter import os, sys from os_utils import", "os.remove(f_temp_sasa_complex) if os.path.exists(f_temp_sasa_rec): os.remove(f_temp_sasa_rec) if os.path.exists(f_temp_sasa_lig): os.remove(f_temp_sasa_lig) return returned_list #", "= SparkFiles.get(\"make_ndx_buried_area_total.sh\") #Getting bash script that was copied by addFile", "p: (p.pose, p.buried_total) ).collect() #Saving buried area file path_file_buried_area =", "list_all_pdb_receptor_files_path = [] all_receptor_for_complex = get_files_pdb(path_receptor_pdb) for receptor in all_receptor_for_complex:", "\"{:.4f}\".format(area[4]) #buried_lig_lig_perc = \"{:.4f}\".format(area[5]) #line = receptor+\"\\t\"+ligand+\"\\t\"+str(model)+\"\\t\"+str(buried_lig_rec)+\"\\t\"+str(buried_lig_rec_perc)+\"\\t\"+str(buried_lig_lig_perc)+\"\\n\" line = str(buried_total)+\"\\t\"+str(pose)+\"\\n\"", "\" -surface System \" + \" -output System \"+ \"", "f_temp_sasa_rec file script_make_sasa_rec = SparkFiles.get(\"make_sasa_rec_buried_area_total.sh\") #Getting bash script that was", "-output System \"+ \" -xvg none \" + \" -o", "float(0)) #Deleting files if os.path.exists(f_ndx): os.remove(f_ndx) if os.path.exists(f_temp_sasa_complex): os.remove(f_temp_sasa_complex) if", "all files for analysis path_analysis = config.get('DEFAULT', 'path_analysis') #Ligand Database", "buried_areaRDD = sc.textFile(all_area_file).map(loading_lines_from_area_files).collect() #Sorting by buried_total column buried_area_sorted_by_buried_total = sorting_buried_area(sc,", "pdb_receptor_files[1] pdb_file_receptor = sc.broadcast(pdb_file_receptor) #Loading PDB model files based on", "#Path for Gromacs project gromacs_path = preparing_path(config.get('DRUGDESIGN', 'gromacs_path')) #Path where", "f_ndx + \" \" + f_temp_sasa_rec process = Popen(command,shell=True, stdout=PIPE,", "ligand=get_ligand_from_receptor_ligand_model(p.pose), pose=str(p.pose) ) ).collect() number_pose_ligand_table = sqlCtx.createDataFrame(number_pose_ligandRDD) number_pose_ligand_table.registerTempTable(\"buried_area_total_sort\") sql =", "database_table = sqlCtx.createDataFrame(rdd_database) database_table.registerTempTable(\"database\") number_pose_ligandRDD = buried_area_sorted_by_buried_total.map(lambda p: Row(buried_total=int(p.buried_total), ligand=get_ligand_from_receptor_ligand_model(p.pose),", "normalized_buried_total = \"{:.4f}\".format(area[1]) line = str(normalized_buried_total)+\"\\t\"+str(pose)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def loading_lines_from_area_files(line):", "( str(line_splited[0]), float(line_splited[1]) ) return line_ret def get_files_area(mypath): only_mol2_file =", "\" -xvg none \" + \" -o \" + f_temp_sasa_lig", "def build_list_model_for_complex(model): full_path_model = model[0] model_file = model[1] path_pdb_complex =", "+'\\n' log_file.write(msg) def main(): config = configparser.ConfigParser() config.read('config.ini') #Path for", "f_compl = open(full_path_for_save_complex, \"w\") #Insert lines of receptor for item", "all PDB receptor files into memory list_all_pdb_receptor_files_path = [] all_receptor_for_complex", "buried_area_sorted_by_lig_rec_perc): f_buried_area = open(path_file_buried_area,\"w\") for area in buried_area_sorted_by_lig_rec_perc: #splited_line =", "-1.0 buried_total = -1.0 returned_list = [] try: base_name =", "script_make_sasa_rec + \" \" + gromacs_path.value + \" \"+ pdb_complex", "get_value_from_xvg_sasa(f_temp_sasa_lig) buried_total = sasa_rec + sasa_lig - sasa_complex #Generating result", "area[1] #model = area[2] pose = str(str(area[0]).replace(\"compl_\", \" \")).strip() buried_total", "sc.addPyFile(os.path.join(path_spark_drugdesign,\"pdb_io.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"database_io.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"json_utils.py\")) #Adding bash scripts sc.addFile(os.path.join(path_spark_drugdesign,\"make_ndx_buried_area_total.sh\")) sc.addFile(os.path.join(path_spark_drugdesign,\"make_sasa_rec_buried_area_total.sh\")) #Parameters form", "normalized buried area file path_file_buried_area = os.path.join(path_analysis, \"summary_normalized_buried_areas.dat\") save_normalized_buried_area(path_file_buried_area, full_dataRDD)", "# ********** Starting function ********************************************************** def save_model_receptor(list_receptor_model_file): receptor_file = pdb_file_receptor.value", "process.communicate() command = gromacs_path.value +\"gmx sasa -f \" + pdb_complex", "all_model_filesRDD.map(build_list_model_for_complex).collect() #Saving buried area of receptor full_area_file = os.path.join(path_analysis,base_file_name_receptor+\".area\") save_receptor_buried_area(full_area_file,", "buried_area_sorted_by_buried_total.map(lambda p: Row(buried_total=int(p.buried_total), ligand=get_ligand_from_receptor_ligand_model(p.pose), pose=str(p.pose) ) ).collect() number_pose_ligand_table = sqlCtx.createDataFrame(number_pose_ligandRDD)", "from pdb_io import replace_chain_atom_line from database_io import load_database def sorting_buried_area(sc,", "= replace_chain_atom_line(item,\"d\",\"z\") f_compl.write(item) f_compl.close() # ********** Finish function ********************************************************** #", "\"w\") #Insert lines of receptor for item in receptor_file: f_compl.write(item)", "sasa_lig = -1.0 buried_total = -1.0 returned_list = [] try:", "stderr = process.communicate() command = gromacs_path.value +\"gmx sasa -f \"", "= sqlCtx.createDataFrame(number_pose_ligandRDD) number_pose_ligand_table.registerTempTable(\"buried_area_total_sort\") sql = \"\"\" SELECT pose, (b.buried_total /", "#preparing receptor #receptor = str(str(aux_recep).replace(\"compl_\", \" \")).strip() #preparing ligand #splited_aux_lig", "os.path.exists(f_temp_sasa_rec): os.remove(f_temp_sasa_rec) if os.path.exists(f_temp_sasa_lig): os.remove(f_temp_sasa_lig) return returned_list # ********** Finish", "#line = receptor+\"\\t\"+ligand+\"\\t\"+str(model)+\"\\t\"+str(buried_lig_rec)+\"\\t\"+str(buried_lig_rec_perc)+\"\\t\"+str(buried_lig_lig_perc)+\"\\n\" line = str(buried_total)+\"\\t\"+str(pose)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def save_normalized_buried_area(path_file_buried_area,", "sc.broadcast(probe) ndots = sc.broadcast(ndots) start_time = datetime.now() os.environ[\"GMX_MAXBACKUP\"]=\"-1\" #Loading all", "\" + f_ndx + \" -surface System \" + \"", "#Computing Buried areas for pdb_receptor_files in list_all_pdb_receptor_files_path: #Getting receptor name", "diff_time = finish_time - start_time msg = 'Starting ' +", "base_file_name_receptor = get_name_receptor_pdb(str(pdb_receptor_files[0])) #PDB file loaded into memory is sent", "= \"\"\" SELECT pose, (b.buried_total / a.heavyAtom) as normalized_buried_area FROM", "= [] all_receptor_for_complex = get_files_pdb(path_receptor_pdb) for receptor in all_receptor_for_complex: list_all_pdb_receptor_files_path.append(loading_pdb_2_list(receptor))", "sqlCtx.sql(sql) #Saving normalized buried area file path_file_buried_area = os.path.join(path_analysis, \"summary_normalized_buried_areas.dat\")", "+ f_ndx + \" -surface System \" + \" -output", "#Obtained from broadcast model_file = list_receptor_model_file[0] full_path_for_save_complex = list_receptor_model_file[1] #Open", "def get_files_area(mypath): only_mol2_file = [] for root, dirs, files in", "= buried_areaRDD.map(lambda p: Row(pose=str(p[0]), buried_total=float(p[1]) ) ) buried_area_table = sqlCtx.createDataFrame(buried_areaRDD)", "all_model_filesRDD = all_model_for_complexRDD.map(loading_pdb_2_list).collect() # ********** Starting function ********************************************************** def compute_buried_area(pdb_complex):", "file ligand_database = config.get('DEFAULT', 'ligand_database_path_file') #Path where all pdb receptor", "z) script_make_ndx = SparkFiles.get(\"make_ndx_buried_area_total.sh\") #Getting bash script that was copied", "= get_name_model_pdb(full_path_model) complex_name = \"compl_\"+base_name_model+\".pdb\" full_path_for_save_complex = os.path.join(path_pdb_complex,complex_name) list_receptor_model_file =", "database rdd_database = load_database(sc, ligand_database) #Creating Dataframe database_table = sqlCtx.createDataFrame(rdd_database)", "get_name_model_pdb, get_ligand_from_receptor_ligand_model, get_separator_filename_mode, get_directory_pdb_analysis, loading_pdb_2_list, get_name_receptor_pdb, get_files_pdb_filter import os, sys", "#model = splited_aux_lig[1] pose = area[0] buried_total = \"{:.4f}\".format(area[1]) #line", "pdb_complex + \" -s \" + pdb_complex + \" -nopbc", "lines of receptor for item in receptor_file: f_compl.write(item) #Insert lines", "item = replace_chain_atom_line(item,\"d\",\"z\") f_compl.write(item) f_compl.close() # ********** Finish function **********************************************************", "script_make_ndx + \" \" + gromacs_path.value + \" \"+ pdb_complex", "command = script_make_sasa_rec + \" \" + gromacs_path.value + \"", "receptor+\"\\t\"+ligand+\"\\t\"+str(model)+\"\\t\"+str(buried_lig_rec)+\"\\t\"+str(buried_lig_rec_perc)+\"\\t\"+str(buried_lig_lig_perc)+\"\\n\" line = str(buried_total)+\"\\t\"+str(pose)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def save_normalized_buried_area(path_file_buried_area, full_dataRDD): f_buried_area", "= 'Time Execution (seconds): ' + str(diff_time.total_seconds()) +'\\n' log_file.write(msg) def", "#receptor = area[0] #ligand = area[1] #model = area[2] pose", "str(diff_time.total_seconds()) +'\\n' log_file.write(msg) def main(): config = configparser.ConfigParser() config.read('config.ini') #Path", "\"+ pdb_complex + \" \"+ f_ndx + \" \" +", "\" + f_temp_sasa_rec process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr", "\" + \" -o \" + f_temp_sasa_lig process = Popen(command,shell=True,", "stderr=PIPE) stdout, stderr = process.communicate() sasa_complex = get_value_from_xvg_sasa(f_temp_sasa_complex) sasa_rec =", "memory base_file_name_receptor_for_filter = base_file_name_receptor+\"_-_\" all_model_for_complex = get_files_pdb_filter(path_analysis_pdb,base_file_name_receptor_for_filter) all_model_for_complexRDD = sc.parallelize(all_model_for_complex)", "def save_receptor_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc): f_buried_area = open(path_file_buried_area,\"w\") for area in buried_area_sorted_by_lig_rec_perc:", "= os.path.join(path_analysis,base_file_name_receptor+\".area\") save_receptor_buried_area(full_area_file, all_model_filesRDD) #Loading all area file all_area_file =", "sasa_complex = -1.0 sasa_rec = -1.0 sasa_lig = -1.0 buried_total", "= \"chZ\" sasa_complex = -1.0 sasa_rec = -1.0 sasa_lig =", "try: base_name = get_name_model_pdb(pdb_complex) ligand_name = get_ligand_from_receptor_ligand_model(base_name) f_pdb_ligand_no_docking = os.path.join(pdb_ligand_path.value,ligand_name+\".pdb\")", "all_model_for_complexRDD = sc.parallelize(all_model_for_complex) all_model_filesRDD = all_model_for_complexRDD.map(loading_pdb_2_list).collect() # ********** Starting function", "returned_list = (base_name, buried_total) except: returned_list = (base_name, float(0)) #Deleting", "import ConfigParser as configparser from subprocess import Popen, PIPE from", "import replace_chain_atom_line from database_io import load_database def sorting_buried_area(sc, buried_areaRDD): sqlCtx", "ligand_database = config.get('DEFAULT', 'ligand_database_path_file') #Path where all pdb receptor are", "(chain z) and the rest (non chain z) script_make_ndx =", "by fully path base_file_name_receptor = get_name_receptor_pdb(str(pdb_receptor_files[0])) #PDB file loaded into", "buried_area_sorted_by_buried_total.map(lambda p: (p.pose, p.buried_total) ).collect() #Saving buried area file path_file_buried_area", "a.ligand ORDER BY normalized_buried_area DESC \"\"\" #Getting all data full_dataRDD", "= get_files_area(path_analysis) for area_file in all_area_files: os.remove(area_file) finish_time = datetime.now()", "gromacs_path.value + \" \"+ pdb_complex + \" \"+ f_ndx +", "file will be created based on this sorting returned_list =", "area in buried_area_sorted_by_lig_rec_perc: #receptor = area[0] #ligand = area[1] #model", "\" + f_temp_sasa_lig process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr", "from vina_utils import get_directory_complex_pdb_analysis, get_files_pdb, get_name_model_pdb, get_ligand_from_receptor_ligand_model, get_separator_filename_mode, get_directory_pdb_analysis, loading_pdb_2_list,", "model_file = list_receptor_model_file[0] full_path_for_save_complex = list_receptor_model_file[1] #Open file for writting", "p: Row(pose=str(p[0]), buried_total=float(p[1]) ) ) buried_area_table = sqlCtx.createDataFrame(buried_areaRDD) buried_area_table.registerTempTable(\"buried_area\") buried_area_sorted_by_buried_total", "Row import ConfigParser as configparser from subprocess import Popen, PIPE", "for area in buried_area_sorted_by_lig_rec_perc: #splited_line = area[0].split(\"_-_\") #aux_recep = splited_line[0]", "buried_total) except: returned_list = (base_name, float(0)) #Deleting files if os.path.exists(f_ndx):", "\")).strip() buried_total = \"{:.4f}\".format(area[1]) #buried_lig_rec_perc = \"{:.4f}\".format(area[4]) #buried_lig_lig_perc = \"{:.4f}\".format(area[5])", "def save_model_receptor(list_receptor_model_file): receptor_file = pdb_file_receptor.value #Obtained from broadcast model_file =", "os.path.join(path_analysis, \"summary_normalized_buried_areas.dat\") save_normalized_buried_area(path_file_buried_area, full_dataRDD) #Removing all area files all_area_files =", "log_file.write(msg) def main(): config = configparser.ConfigParser() config.read('config.ini') #Path for Gromacs", "= get_value_from_xvg_sasa(f_temp_sasa_lig) buried_total = sasa_rec + sasa_lig - sasa_complex #Generating", "\"# buried_area_total[nm2]\\tpose\"+\"\\n\" f_buried_area.write(line) for area in buried_area_sorted_by_lig_rec_perc: #receptor = area[0]", "= process.communicate() # Makes f_temp_sasa_rec file script_make_sasa_rec = SparkFiles.get(\"make_sasa_rec_buried_area_total.sh\") #Getting", "in model_file: item = replace_chain_atom_line(item,\"d\",\"z\") f_compl.write(item) f_compl.close() # ********** Finish", "all data full_dataRDD = sqlCtx.sql(sql) #Saving normalized buried area file", "p: Row(receptor=str(p[0]), ligand=str(p[1]), model=int(p[2]), buried_lig_rec=float(p[3]), buried_lig_rec_perc=float(p[4]), buried_lig_lig_perc=float(p[5]) ) ) buried_areaRDD", "Row(receptor=str(p[0]), ligand=str(p[1]), model=int(p[2]), buried_lig_rec=float(p[3]), buried_lig_rec_perc=float(p[4]), buried_lig_lig_perc=float(p[5]) ) ) buried_areaRDD =", "FROM buried_area ORDER BY buried_total DESC\") #buried_lig_lig_perc return buried_area_sorted_by_buried_total def", "Finish function ********************************************************** all_model_filesRDD = sc.parallelize(all_model_filesRDD) all_model_filesRDD = all_model_filesRDD.map(build_list_model_for_complex).collect() #Saving", "= 'vs_buried_areas.log' current_path = os.getcwd() path_file = os.path.join(current_path, log_file_name) log_file", "= -1.0 buried_total = -1.0 returned_list = [] try: base_name", "+ \" \"+ f_ndx + \" \" + f_temp_sasa_rec process", "get_files_pdb_filter(path_analysis_pdb,base_file_name_receptor_for_filter) all_model_for_complexRDD = sc.parallelize(all_model_for_complex) all_model_filesRDD = all_model_for_complexRDD.map(loading_pdb_2_list).collect() # ********** Starting", "full_dataRDD.collect(): pose = str(str(area[0]).replace(\"compl_\", \" \")).strip() normalized_buried_total = \"{:.4f}\".format(area[1]) line", "in files: if file.endswith(\".area\"): f_path = os.path.join(root,file) only_mol2_file.append(f_path) return only_mol2_file", "rdd_database = load_database(sc, ligand_database) #Creating Dataframe database_table = sqlCtx.createDataFrame(rdd_database) database_table.registerTempTable(\"database\")", "into memory base_file_name_receptor_for_filter = base_file_name_receptor+\"_-_\" all_model_for_complex = get_files_pdb_filter(path_analysis_pdb,base_file_name_receptor_for_filter) all_model_for_complexRDD =", "'maxResultSize')) conf = (SparkConf().set(\"spark.driver.maxResultSize\", maxResultSize)) # Create context sc =", "f_buried_area.write(line) f_buried_area.close() def save_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc): f_buried_area = open(path_file_buried_area,\"w\") line =", "= open(path_file_buried_area,\"w\") for area in buried_area_sorted_by_lig_rec_perc: #splited_line = area[0].split(\"_-_\") #aux_recep", "for file in files: if file.endswith(\".area\"): f_path = os.path.join(root,file) only_mol2_file.append(f_path)", "\" \")).strip() normalized_buried_total = \"{:.4f}\".format(area[1]) line = str(normalized_buried_total)+\"\\t\"+str(pose)+\"\\n\" f_buried_area.write(line) f_buried_area.close()", "stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() command = gromacs_path.value +\"gmx", "os.path.join(path_analysis_pdb_complex_b.value,base_name+\".ndx\") f_temp_sasa_complex = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_complex.xvg\") f_temp_sasa_rec = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_rec.xvg\") f_temp_sasa_lig = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_lig.xvg\")", "str(line_splited[1]), int(line_splited[2]), float(line_splited[3]), float(line_splited[4]), float(line_splited[5]) ) line_ret = ( str(line_splited[0]),", "get_value_from_xvg_sasa from pdb_io import replace_chain_atom_line from database_io import load_database def", "= pose+\"\\t\"+str(buried_total)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def save_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc): f_buried_area = open(path_file_buried_area,\"w\")", "str(aux_lig).split(get_separator_filename_mode()) #ligand = splited_aux_lig[0] #model = splited_aux_lig[1] pose = area[0]", "from database_io import load_database def sorting_buried_area(sc, buried_areaRDD): sqlCtx = SQLContext(sc)", "receptor files into memory list_all_pdb_receptor_files_path = [] all_receptor_for_complex = get_files_pdb(path_receptor_pdb)", "insert Z chain for item in model_file: item = replace_chain_atom_line(item,\"d\",\"z\")", "broadcast #Building complex file based on model file name base_name_model", "= str(str(aux_recep).replace(\"compl_\", \" \")).strip() #preparing ligand #splited_aux_lig = str(aux_lig).split(get_separator_filename_mode()) #ligand", "import get_directory_complex_pdb_analysis, get_files_pdb, get_name_model_pdb, get_ligand_from_receptor_ligand_model, get_separator_filename_mode, get_directory_pdb_analysis, loading_pdb_2_list, get_name_receptor_pdb, get_files_pdb_filter", "Database file ligand_database = config.get('DEFAULT', 'ligand_database_path_file') #Path where all pdb", "config.get('DEFAULT', 'path_analysis') #Ligand Database file ligand_database = config.get('DEFAULT', 'ligand_database_path_file') #Path", "that was copied by addFile command command = script_make_ndx +", "based on this sorting returned_list = (base_name, buried_total) except: returned_list", "#aux_lig = str(splited_line[1]) #preparing receptor #receptor = str(str(aux_recep).replace(\"compl_\", \" \")).strip()", "- See column sorting because resultaed file will be created", "msg = 'Time Execution (seconds): ' + str(diff_time.total_seconds()) +'\\n' log_file.write(msg)", "all area files all_area_files = get_files_area(path_analysis) for area_file in all_area_files:", "buried area of receptor full_area_file = os.path.join(path_analysis,base_file_name_receptor+\".area\") save_receptor_buried_area(full_area_file, all_model_filesRDD) #Loading", "process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() #", "- They are NOT participated in docking pdb_ligand_path = config.get('DEFAULT',", "drugdesign project path_spark_drugdesign = config.get('DRUGDESIGN', 'path_spark_drugdesign') sc.addPyFile(os.path.join(path_spark_drugdesign,\"vina_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"os_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"gromacs_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"pdb_io.py\"))", "+ \" \" + f_temp_sasa_rec process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE)", "+ \" -n \" + f_ndx + \" -surface chZ", ") ) buried_area_table = sqlCtx.createDataFrame(buried_areaRDD) buried_area_table.registerTempTable(\"buried_area\") buried_area_sorted_by_buried_total = sqlCtx.sql(\"SELECT *", "\" + \" -output System \"+ \" -xvg none \"", "normalized buried area #Loading database rdd_database = load_database(sc, ligand_database) #Creating", "stdout, stderr = process.communicate() sasa_complex = get_value_from_xvg_sasa(f_temp_sasa_complex) sasa_rec = get_value_from_xvg_sasa(f_temp_sasa_rec)", "only_mol2_file def save_log(finish_time, start_time): log_file_name = 'vs_buried_areas.log' current_path = os.getcwd()", "#Loading PDB model files based on receptor into memory base_file_name_receptor_for_filter", "preparing_path(config.get('DRUGDESIGN', 'gromacs_path')) #Path where PDB ligand are - They are", "are NOT participated in docking pdb_ligand_path = config.get('DEFAULT', 'pdb_ligand_path') #Path", "f_buried_area.write(line) f_buried_area.close() def save_normalized_buried_area(path_file_buried_area, full_dataRDD): f_buried_area = open(path_file_buried_area,\"w\") line =", "form command line #Indicates probe. Example: 0.14 probe = float(sys.argv[1])", "= finish_time - start_time msg = 'Starting ' + str(start_time)", "Starting function ********************************************************** def compute_buried_area(pdb_complex): chZ = \"chZ\" sasa_complex =", "Starting function ********************************************************** def save_model_receptor(list_receptor_model_file): receptor_file = pdb_file_receptor.value #Obtained from", "+ \" -nopbc \" + \" -n \" + f_ndx", "f_buried_area = open(path_file_buried_area,\"w\") line = \"# buried_area_total[nm2]\\tpose\"+\"\\n\" f_buried_area.write(line) for area", "base_name_model = get_name_model_pdb(full_path_model) complex_name = \"compl_\"+base_name_model+\".pdb\" full_path_for_save_complex = os.path.join(path_pdb_complex,complex_name) list_receptor_model_file", "= (SparkConf().set(\"spark.driver.maxResultSize\", maxResultSize)) # Create context sc = SparkContext(conf=conf) sqlCtx", "pose=str(p.pose) ) ).collect() number_pose_ligand_table = sqlCtx.createDataFrame(number_pose_ligandRDD) number_pose_ligand_table.registerTempTable(\"buried_area_total_sort\") sql = \"\"\"", "\"chZ\" sasa_complex = -1.0 sasa_rec = -1.0 sasa_lig = -1.0", "sqlCtx.sql(\"SELECT * FROM buried_area ORDER BY buried_total DESC\") #buried_lig_lig_perc return", "= os.path.join(path_analysis, \"summary_buried_areas_total.dat\") save_buried_area(path_file_buried_area, buried_area_sorted_by_buried_total_LIST) #Calculating normalized buried area #Loading", "path_analysis_pdb_complex_b = sc.broadcast(path_analysis_pdb) gromacs_path = sc.broadcast(gromacs_path) pdb_ligand_path = sc.broadcast(pdb_ligand_path) probe", "area_file in all_area_files: os.remove(area_file) finish_time = datetime.now() save_log(finish_time, start_time) main()", "' + str(finish_time) +'\\n' log_file.write(msg) msg = 'Time Execution (seconds):", "buried_area_sorted_by_lig_rec_perc: #splited_line = area[0].split(\"_-_\") #aux_recep = splited_line[0] #aux_lig = str(splited_line[1])", "f_buried_area.write(line) f_buried_area.close() def loading_lines_from_area_files(line): line_splited = str(line).split() #line_ret = (", "+'\\n' log_file.write(msg) msg = 'Time Execution (seconds): ' + str(diff_time.total_seconds())", "\"+ f_ndx + \" \" + f_temp_sasa_rec process = Popen(command,shell=True,", "********************************************************** # ********** Starting function ********************************************************** def save_model_receptor(list_receptor_model_file): receptor_file =", "+ f_temp_sasa_complex process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr =", "sqlCtx = SQLContext(sc) buried_areaRDD = sc.parallelize(buried_areaRDD) #buried_areaRDD = buried_areaRDD.map(lambda p:", "str(str(aux_recep).replace(\"compl_\", \" \")).strip() #preparing ligand #splited_aux_lig = str(aux_lig).split(get_separator_filename_mode()) #ligand =", "based on model file name base_name_model = get_name_model_pdb(full_path_model) complex_name =", "configparser from subprocess import Popen, PIPE from datetime import datetime", "********************************************************** all_model_filesRDD = sc.parallelize(all_model_filesRDD) all_model_filesRDD = all_model_filesRDD.map(build_list_model_for_complex).collect() #Saving buried area", "= get_value_from_xvg_sasa(f_temp_sasa_complex) sasa_rec = get_value_from_xvg_sasa(f_temp_sasa_rec) sasa_lig = get_value_from_xvg_sasa(f_temp_sasa_lig) buried_total =", "Finish function ********************************************************** # ********** Starting function ********************************************************** def build_list_model_for_complex(model):", "Example: 24 ndots = int(sys.argv[2]) #Broadcast path_analysis_pdb_complex_b = sc.broadcast(path_analysis_pdb) gromacs_path", "= all_model_filesRDD.map(build_list_model_for_complex).collect() #Saving buried area of receptor full_area_file = os.path.join(path_analysis,base_file_name_receptor+\".area\")", "Makes f_temp_sasa_rec file script_make_sasa_rec = SparkFiles.get(\"make_sasa_rec_buried_area_total.sh\") #Getting bash script that", "See column sorting because resultaed file will be created based", "sasa_rec + sasa_lig - sasa_complex #Generating result - See column", "receptor into memory base_file_name_receptor_for_filter = base_file_name_receptor+\"_-_\" all_model_for_complex = get_files_pdb_filter(path_analysis_pdb,base_file_name_receptor_for_filter) all_model_for_complexRDD", "\")).strip() #preparing ligand #splited_aux_lig = str(aux_lig).split(get_separator_filename_mode()) #ligand = splited_aux_lig[0] #model", "stdout, stderr = process.communicate() command = gromacs_path.value +\"gmx sasa -f", "= pdb_file_receptor.value #Obtained from broadcast model_file = list_receptor_model_file[0] full_path_for_save_complex =", "* FROM buried_area ORDER BY buried_total DESC\") #buried_lig_lig_perc return buried_area_sorted_by_buried_total", "f_buried_area.close() def loading_lines_from_area_files(line): line_splited = str(line).split() #line_ret = ( str(line_splited[0]),", "(b.buried_total / a.heavyAtom) as normalized_buried_area FROM database a JOIN buried_area_total_sort", "f_buried_area.close() def save_normalized_buried_area(path_file_buried_area, full_dataRDD): f_buried_area = open(path_file_buried_area,\"w\") line = \"#", "'path_spark_drugdesign') sc.addPyFile(os.path.join(path_spark_drugdesign,\"vina_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"os_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"gromacs_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"pdb_io.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"database_io.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"json_utils.py\")) #Adding bash scripts", "= splited_aux_lig[0] #model = splited_aux_lig[1] pose = area[0] buried_total =", "splited_aux_lig[1] pose = area[0] buried_total = \"{:.4f}\".format(area[1]) #line = receptor+\"\\t\"+ligand+\"\\t\"+model+\"\\t\"+str(buried_lig_rec)+\"\\t\"+str(buried_lig_rec_perc)+\"\\t\"+str(buried_lig_lig_perc)+\"\\n\"", "= configparser.ConfigParser() config.read('config.ini') #Path for Gromacs project gromacs_path = preparing_path(config.get('DRUGDESIGN',", "= -1.0 sasa_lig = -1.0 buried_total = -1.0 returned_list =", "the rest (non chain z) script_make_ndx = SparkFiles.get(\"make_ndx_buried_area_total.sh\") #Getting bash", "for receptor in all_receptor_for_complex: list_all_pdb_receptor_files_path.append(loading_pdb_2_list(receptor)) #Computing Buried areas for pdb_receptor_files", "= script_make_sasa_rec + \" \" + gromacs_path.value + \" \"+", "-xvg none \" + \" -o \" + f_temp_sasa_lig process", "buried area #Loading database rdd_database = load_database(sc, ligand_database) #Creating Dataframe", "f_temp_sasa_complex = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_complex.xvg\") f_temp_sasa_rec = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_rec.xvg\") f_temp_sasa_lig = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_lig.xvg\") #", "= sc.broadcast(pdb_ligand_path) probe = sc.broadcast(probe) ndots = sc.broadcast(ndots) start_time =", "f_ndx + \" -surface chZ \" + \" -output chZ", "ligand (chain z) and the rest (non chain z) script_make_ndx", "function ********************************************************** # ********** Starting function ********************************************************** def build_list_model_for_complex(model): full_path_model", "#buried_areaRDD = buried_areaRDD.map(lambda p: Row(receptor=str(p[0]), ligand=str(p[1]), model=int(p[2]), buried_lig_rec=float(p[3]), buried_lig_rec_perc=float(p[4]), buried_lig_lig_perc=float(p[5])", "str(line).split() #line_ret = ( str(line_splited[0]), str(line_splited[1]), int(line_splited[2]), float(line_splited[3]), float(line_splited[4]), float(line_splited[5])", "sc.broadcast(pdb_file_receptor) #Loading PDB model files based on receptor into memory", "chain for item in model_file: item = replace_chain_atom_line(item,\"d\",\"z\") f_compl.write(item) f_compl.close()", "models generated by VS path_analysis_pdb = get_directory_pdb_analysis(path_analysis) # Create SPARK", "get_ligand_from_receptor_ligand_model, get_separator_filename_mode, get_directory_pdb_analysis, loading_pdb_2_list, get_name_receptor_pdb, get_files_pdb_filter import os, sys from", "open(full_path_for_save_complex, \"w\") #Insert lines of receptor for item in receptor_file:", "number_pose_ligand_table.registerTempTable(\"buried_area_total_sort\") sql = \"\"\" SELECT pose, (b.buried_total / a.heavyAtom) as", "receptor for item in receptor_file: f_compl.write(item) #Insert lines of model", "ligand_database) #Creating Dataframe database_table = sqlCtx.createDataFrame(rdd_database) database_table.registerTempTable(\"database\") number_pose_ligandRDD = buried_area_sorted_by_buried_total.map(lambda", "conf = (SparkConf().set(\"spark.driver.maxResultSize\", maxResultSize)) # Create context sc = SparkContext(conf=conf)", "+ \" -surface chZ \" + \" -output chZ \"+", "BY buried_total DESC\") #buried_lig_lig_perc return buried_area_sorted_by_buried_total def save_receptor_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc): f_buried_area", "\" \" + f_temp_sasa_rec process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout,", "a.heavyAtom) as normalized_buried_area FROM database a JOIN buried_area_total_sort b ON", "= [] try: base_name = get_name_model_pdb(pdb_complex) ligand_name = get_ligand_from_receptor_ligand_model(base_name) f_pdb_ligand_no_docking", "= \"compl_\"+base_name_model+\".pdb\" full_path_for_save_complex = os.path.join(path_pdb_complex,complex_name) list_receptor_model_file = (model_file, full_path_for_save_complex) save_model_receptor(list_receptor_model_file)", "database_io import load_database def sorting_buried_area(sc, buried_areaRDD): sqlCtx = SQLContext(sc) buried_areaRDD", "command command = script_make_sasa_rec + \" \" + gromacs_path.value +", "ORDER BY normalized_buried_area DESC \"\"\" #Getting all data full_dataRDD =", "-1.0 sasa_rec = -1.0 sasa_lig = -1.0 buried_total = -1.0", "datetime.now() os.environ[\"GMX_MAXBACKUP\"]=\"-1\" #Loading all PDB receptor files into memory list_all_pdb_receptor_files_path", "only_mol2_file.append(f_path) return only_mol2_file def save_log(finish_time, start_time): log_file_name = 'vs_buried_areas.log' current_path", "config.get('DEFAULT', 'ligand_database_path_file') #Path where all pdb receptor are path_receptor_pdb =", "open(path_file, 'w') diff_time = finish_time - start_time msg = 'Starting", "area file path_file_buried_area = os.path.join(path_analysis, \"summary_normalized_buried_areas.dat\") save_normalized_buried_area(path_file_buried_area, full_dataRDD) #Removing all", "-f \" + pdb_complex + \" -s \" + pdb_complex", "chZ \"+ \" -xvg none \" + \" -o \"", "file for writting the complex f_compl = open(full_path_for_save_complex, \"w\") #Insert", "all pdb receptor are path_receptor_pdb = config.get('DEFAULT', 'pdb_path') #Path for", "in all_receptor_for_complex: list_all_pdb_receptor_files_path.append(loading_pdb_2_list(receptor)) #Computing Buried areas for pdb_receptor_files in list_all_pdb_receptor_files_path:", "\" \"+ pdb_complex + \" \"+ f_ndx + \" \"", "for area_file in all_area_files: os.remove(area_file) finish_time = datetime.now() save_log(finish_time, start_time)", "SPARK config maxResultSize = str(config.get('SPARK', 'maxResultSize')) conf = (SparkConf().set(\"spark.driver.maxResultSize\", maxResultSize))", "= config.get('DEFAULT', 'path_analysis') #Ligand Database file ligand_database = config.get('DEFAULT', 'ligand_database_path_file')", "\"+ \" -xvg none \" + \" -o \" +", "buried area file path_file_buried_area = os.path.join(path_analysis, \"summary_buried_areas_total.dat\") save_buried_area(path_file_buried_area, buried_area_sorted_by_buried_total_LIST) #Calculating", "'Time Execution (seconds): ' + str(diff_time.total_seconds()) +'\\n' log_file.write(msg) def main():", "command = script_make_ndx + \" \" + gromacs_path.value + \"", "file #Path for drugdesign project path_spark_drugdesign = config.get('DRUGDESIGN', 'path_spark_drugdesign') sc.addPyFile(os.path.join(path_spark_drugdesign,\"vina_utils.py\"))", "+ str(diff_time.total_seconds()) +'\\n' log_file.write(msg) def main(): config = configparser.ConfigParser() config.read('config.ini')", "full_path_for_save_complex = list_receptor_model_file[1] #Open file for writting the complex f_compl", "save_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc): f_buried_area = open(path_file_buried_area,\"w\") line = \"# buried_area_total[nm2]\\tpose\"+\"\\n\" f_buried_area.write(line)", "buried_area_sorted_by_buried_total.cache() buried_area_sorted_by_buried_total_LIST = buried_area_sorted_by_buried_total.map(lambda p: (p.pose, p.buried_total) ).collect() #Saving buried", "scripts sc.addFile(os.path.join(path_spark_drugdesign,\"make_ndx_buried_area_total.sh\")) sc.addFile(os.path.join(path_spark_drugdesign,\"make_sasa_rec_buried_area_total.sh\")) #Parameters form command line #Indicates probe. Example:", "# Create context sc = SparkContext(conf=conf) sqlCtx = SQLContext(sc) #Adding", "buried_area_sorted_by_buried_total_LIST = buried_area_sorted_by_buried_total.map(lambda p: (p.pose, p.buried_total) ).collect() #Saving buried area", "from pyspark import SparkContext, SparkConf, SparkFiles from pyspark.sql import SQLContext,", "= config.get('DEFAULT', 'pdb_ligand_path') #Path that contains all files for analysis", "os.remove(full_path_for_save_complex) return list_ret # ********** Finish function ********************************************************** all_model_filesRDD =", "pdb receptor are path_receptor_pdb = config.get('DEFAULT', 'pdb_path') #Path for saving", "= str(str(area[0]).replace(\"compl_\", \" \")).strip() buried_total = \"{:.4f}\".format(area[1]) #buried_lig_rec_perc = \"{:.4f}\".format(area[4])", "#PDB file loaded into memory is sent by broadcast pdb_file_receptor", "= Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() command =", "none \" + \" -o \" + f_temp_sasa_complex process =", "str(splited_line[1]) #preparing receptor #receptor = str(str(aux_recep).replace(\"compl_\", \" \")).strip() #preparing ligand", "return list_ret # ********** Finish function ********************************************************** all_model_filesRDD = sc.parallelize(all_model_filesRDD)", "stderr = process.communicate() sasa_complex = get_value_from_xvg_sasa(f_temp_sasa_complex) sasa_rec = get_value_from_xvg_sasa(f_temp_sasa_rec) sasa_lig", "Dataframe database_table = sqlCtx.createDataFrame(rdd_database) database_table.registerTempTable(\"database\") number_pose_ligandRDD = buried_area_sorted_by_buried_total.map(lambda p: Row(buried_total=int(p.buried_total),", "return only_mol2_file def save_log(finish_time, start_time): log_file_name = 'vs_buried_areas.log' current_path =", "item in receptor_file: f_compl.write(item) #Insert lines of model and insert", "p: Row(buried_total=int(p.buried_total), ligand=get_ligand_from_receptor_ligand_model(p.pose), pose=str(p.pose) ) ).collect() number_pose_ligand_table = sqlCtx.createDataFrame(number_pose_ligandRDD) number_pose_ligand_table.registerTempTable(\"buried_area_total_sort\")", "def loading_lines_from_area_files(line): line_splited = str(line).split() #line_ret = ( str(line_splited[0]), str(line_splited[1]),", "= (base_name, buried_total) except: returned_list = (base_name, float(0)) #Deleting files", "area #Loading database rdd_database = load_database(sc, ligand_database) #Creating Dataframe database_table", "+ \" -n \" + f_ndx + \" -surface System", "all_area_files = get_files_area(path_analysis) for area_file in all_area_files: os.remove(area_file) finish_time =", "sc.parallelize(all_model_filesRDD) all_model_filesRDD = all_model_filesRDD.map(build_list_model_for_complex).collect() #Saving buried area of receptor full_area_file", "sc = SparkContext(conf=conf) sqlCtx = SQLContext(sc) #Adding Python Source file", "was copied by addFile command command = script_make_sasa_rec + \"", "sqlCtx = SQLContext(sc) #Adding Python Source file #Path for drugdesign", "PDB ligand are - They are NOT participated in docking", "number_pose_ligand_table = sqlCtx.createDataFrame(number_pose_ligandRDD) number_pose_ligand_table.registerTempTable(\"buried_area_total_sort\") sql = \"\"\" SELECT pose, (b.buried_total", "number_pose_ligandRDD = buried_area_sorted_by_buried_total.map(lambda p: Row(buried_total=int(p.buried_total), ligand=get_ligand_from_receptor_ligand_model(p.pose), pose=str(p.pose) ) ).collect() number_pose_ligand_table", "sqlCtx.createDataFrame(buried_areaRDD) buried_area_table.registerTempTable(\"buried_area\") buried_area_sorted_by_buried_total = sqlCtx.sql(\"SELECT * FROM buried_area ORDER BY", "save_buried_area(path_file_buried_area, buried_area_sorted_by_buried_total_LIST) #Calculating normalized buried area #Loading database rdd_database =", "get_value_from_xvg_sasa(f_temp_sasa_rec) sasa_lig = get_value_from_xvg_sasa(f_temp_sasa_lig) buried_total = sasa_rec + sasa_lig -", "f_compl.write(item) f_compl.close() # ********** Finish function ********************************************************** # ********** Starting", "Create context sc = SparkContext(conf=conf) sqlCtx = SQLContext(sc) #Adding Python", "Buried areas for pdb_receptor_files in list_all_pdb_receptor_files_path: #Getting receptor name by", "-surface chZ \" + \" -output chZ \"+ \" -xvg", "if os.path.exists(f_temp_sasa_complex): os.remove(f_temp_sasa_complex) if os.path.exists(f_temp_sasa_rec): os.remove(f_temp_sasa_rec) if os.path.exists(f_temp_sasa_lig): os.remove(f_temp_sasa_lig) return", "returned_list = [] try: base_name = get_name_model_pdb(pdb_complex) ligand_name = get_ligand_from_receptor_ligand_model(base_name)", "float(line_splited[5]) ) line_ret = ( str(line_splited[0]), float(line_splited[1]) ) return line_ret", "stderr=PIPE) stdout, stderr = process.communicate() # Makes f_temp_sasa_rec file script_make_sasa_rec", "- start_time msg = 'Starting ' + str(start_time) +'\\n' log_file.write(msg)", "= a.ligand ORDER BY normalized_buried_area DESC \"\"\" #Getting all data", "if os.path.exists(f_temp_sasa_lig): os.remove(f_temp_sasa_lig) return returned_list # ********** Finish function **********************************************************", "#Open file for writting the complex f_compl = open(full_path_for_save_complex, \"w\")", "#Path that contains all files for analysis path_analysis = config.get('DEFAULT',", "loaded into memory is sent by broadcast pdb_file_receptor = pdb_receptor_files[1]", "file script_make_sasa_rec = SparkFiles.get(\"make_sasa_rec_buried_area_total.sh\") #Getting bash script that was copied", "config = configparser.ConfigParser() config.read('config.ini') #Path for Gromacs project gromacs_path =", "= receptor+\"\\t\"+ligand+\"\\t\"+model+\"\\t\"+str(buried_lig_rec)+\"\\t\"+str(buried_lig_rec_perc)+\"\\t\"+str(buried_lig_lig_perc)+\"\\n\" line = pose+\"\\t\"+str(buried_total)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def save_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc):", "where all pdb receptor are path_receptor_pdb = config.get('DEFAULT', 'pdb_path') #Path", "sc.addPyFile(os.path.join(path_spark_drugdesign,\"database_io.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"json_utils.py\")) #Adding bash scripts sc.addFile(os.path.join(path_spark_drugdesign,\"make_ndx_buried_area_total.sh\")) sc.addFile(os.path.join(path_spark_drugdesign,\"make_sasa_rec_buried_area_total.sh\")) #Parameters form command", "= os.path.join(path_analysis_pdb_complex_b.value,base_name+\".ndx\") f_temp_sasa_complex = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_complex.xvg\") f_temp_sasa_rec = os.path.join(path_analysis_pdb_complex_b.value,base_name+\"_sasa_rec.xvg\") f_temp_sasa_lig =", "f_temp_sasa_complex process = Popen(command,shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate()", "+ \" -output System \"+ \" -xvg none \" +", "if os.path.exists(f_ndx): os.remove(f_ndx) if os.path.exists(f_temp_sasa_complex): os.remove(f_temp_sasa_complex) if os.path.exists(f_temp_sasa_rec): os.remove(f_temp_sasa_rec) if", "= path_analysis_pdb_complex_b.value #Obtained from broadcast #Building complex file based on", "ndots = int(sys.argv[2]) #Broadcast path_analysis_pdb_complex_b = sc.broadcast(path_analysis_pdb) gromacs_path = sc.broadcast(gromacs_path)", "by buried_total column buried_area_sorted_by_buried_total = sorting_buried_area(sc, buried_areaRDD) buried_area_sorted_by_buried_total.cache() buried_area_sorted_by_buried_total_LIST =", "in buried_area_sorted_by_lig_rec_perc: #splited_line = area[0].split(\"_-_\") #aux_recep = splited_line[0] #aux_lig =", "-nopbc \" + \" -n \" + f_ndx + \"", "= load_database(sc, ligand_database) #Creating Dataframe database_table = sqlCtx.createDataFrame(rdd_database) database_table.registerTempTable(\"database\") number_pose_ligandRDD", "full_path_for_save_complex) save_model_receptor(list_receptor_model_file) list_ret = compute_buried_area(full_path_for_save_complex) os.remove(full_path_for_save_complex) return list_ret # **********", "= sqlCtx.sql(sql) #Saving normalized buried area file path_file_buried_area = os.path.join(path_analysis,", "= process.communicate() sasa_complex = get_value_from_xvg_sasa(f_temp_sasa_complex) sasa_rec = get_value_from_xvg_sasa(f_temp_sasa_rec) sasa_lig =", "= (base_name, float(0)) #Deleting files if os.path.exists(f_ndx): os.remove(f_ndx) if os.path.exists(f_temp_sasa_complex):", "receptor+\"\\t\"+ligand+\"\\t\"+model+\"\\t\"+str(buried_lig_rec)+\"\\t\"+str(buried_lig_rec_perc)+\"\\t\"+str(buried_lig_lig_perc)+\"\\n\" line = pose+\"\\t\"+str(buried_total)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def save_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc): f_buried_area", "ndots = sc.broadcast(ndots) start_time = datetime.now() os.environ[\"GMX_MAXBACKUP\"]=\"-1\" #Loading all PDB", "sql = \"\"\" SELECT pose, (b.buried_total / a.heavyAtom) as normalized_buried_area", "'ligand_database_path_file') #Path where all pdb receptor are path_receptor_pdb = config.get('DEFAULT',", "= get_name_model_pdb(pdb_complex) ligand_name = get_ligand_from_receptor_ligand_model(base_name) f_pdb_ligand_no_docking = os.path.join(pdb_ligand_path.value,ligand_name+\".pdb\") f_ndx =", "buried area file path_file_buried_area = os.path.join(path_analysis, \"summary_normalized_buried_areas.dat\") save_normalized_buried_area(path_file_buried_area, full_dataRDD) #Removing", "all_model_for_complexRDD.map(loading_pdb_2_list).collect() # ********** Starting function ********************************************************** def compute_buried_area(pdb_complex): chZ =", "save_model_receptor(list_receptor_model_file): receptor_file = pdb_file_receptor.value #Obtained from broadcast model_file = list_receptor_model_file[0]", "= compute_buried_area(full_path_for_save_complex) os.remove(full_path_for_save_complex) return list_ret # ********** Finish function **********************************************************", "command line #Indicates probe. Example: 0.14 probe = float(sys.argv[1]) #Indicates", "str(start_time) +'\\n' log_file.write(msg) msg = 'Finishing ' + str(finish_time) +'\\n'", "loading_pdb_2_list, get_name_receptor_pdb, get_files_pdb_filter import os, sys from os_utils import preparing_path", "pose = area[0] buried_total = \"{:.4f}\".format(area[1]) #line = receptor+\"\\t\"+ligand+\"\\t\"+model+\"\\t\"+str(buried_lig_rec)+\"\\t\"+str(buried_lig_rec_perc)+\"\\t\"+str(buried_lig_lig_perc)+\"\\n\" line", "line = pose+\"\\t\"+str(buried_total)+\"\\n\" f_buried_area.write(line) f_buried_area.close() def save_buried_area(path_file_buried_area, buried_area_sorted_by_lig_rec_perc): f_buried_area =", "area[0] #ligand = area[1] #model = area[2] pose = str(str(area[0]).replace(\"compl_\",", "#buried_lig_lig_perc = \"{:.4f}\".format(area[5]) #line = receptor+\"\\t\"+ligand+\"\\t\"+str(model)+\"\\t\"+str(buried_lig_rec)+\"\\t\"+str(buried_lig_rec_perc)+\"\\t\"+str(buried_lig_lig_perc)+\"\\n\" line = str(buried_total)+\"\\t\"+str(pose)+\"\\n\" f_buried_area.write(line)", "name by fully path base_file_name_receptor = get_name_receptor_pdb(str(pdb_receptor_files[0])) #PDB file loaded", "start_time): log_file_name = 'vs_buried_areas.log' current_path = os.getcwd() path_file = os.path.join(current_path,", "#Loading all PDB receptor files into memory list_all_pdb_receptor_files_path = []", "\" -output chZ \"+ \" -xvg none \" + \"", "sc.addPyFile(os.path.join(path_spark_drugdesign,\"vina_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"os_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"gromacs_utils.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"pdb_io.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"database_io.py\")) sc.addPyFile(os.path.join(path_spark_drugdesign,\"json_utils.py\")) #Adding bash scripts sc.addFile(os.path.join(path_spark_drugdesign,\"make_ndx_buried_area_total.sh\"))", "pdb_file_receptor.value #Obtained from broadcast model_file = list_receptor_model_file[0] full_path_for_save_complex = list_receptor_model_file[1]", "list_receptor_model_file[1] #Open file for writting the complex f_compl = open(full_path_for_save_complex,", "********************************************************** # ********** Starting function ********************************************************** def build_list_model_for_complex(model): full_path_model =", "-xvg none \" + \" -o \" + f_temp_sasa_complex process", "Create SPARK config maxResultSize = str(config.get('SPARK', 'maxResultSize')) conf = (SparkConf().set(\"spark.driver.maxResultSize\",", "# Makes f_temp_sasa_rec file script_make_sasa_rec = SparkFiles.get(\"make_sasa_rec_buried_area_total.sh\") #Getting bash script", "addFile command command = script_make_sasa_rec + \" \" + gromacs_path.value", "-output chZ \"+ \" -xvg none \" + \" -o", "on this sorting returned_list = (base_name, buried_total) except: returned_list =", "path base_file_name_receptor = get_name_receptor_pdb(str(pdb_receptor_files[0])) #PDB file loaded into memory is", "get_separator_filename_mode, get_directory_pdb_analysis, loading_pdb_2_list, get_name_receptor_pdb, get_files_pdb_filter import os, sys from os_utils", "= gromacs_path.value +\"gmx sasa -f \" + pdb_complex + \"" ]
[ "'smtp_server', request=request) if len(rq['smtp_server']) > 128: error_flag = True error_msg['smtp_server']", "#todo 仮でこのエラーは名前に入れている error_msg['mail_disp_name'] += get_message('MOSJA27215', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07005', rq['smtp_server'],", "for key_value in defs.SMTP_PROTOCOL.LIST_ALL} defines = { 'list_all': defs.SMTP_PROTOCOL.LIST_ALL, 'dict':", "ver, icon_name): self.drv_id = drv_id self.act_id = act_id self.name =", "if not emo_flag: duplication = MailDriver.objects.filter(mail_disp_name=rq['mail_disp_name']) if len(duplication) == 1", "with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: resp_code = sock.connect_ex((rq['smtp_server'], int(rq['port']))) #", "len(value_list) > 0: error_flag = True emo_flag = True error_msg['mail_disp_name']", "2.0 (the \"License\"); # you may not use this file", "showMsgId=False) + '\\n' if len(rq['password']) > 64: error_flag = True", "'protocol', 64, rq['protocol'], request=request) if len(rq['smtp_server']) == 0: error_flag =", "locked.(driver_id=%s)' % driver_id, request=request) def modify(self, json_str, request): \"\"\" [メソッド概要]", "user_groups): try: mail_driver_obj_list = MailDriver.objects.all() except Exception as e: #", "request=request) def modify(self, json_str, request): \"\"\" [メソッド概要] グループのDB更新処理 \"\"\" logger.logic_log('LOSI00001',", "password = <PASSWORD>, last_update_user = request.user.user_name, last_update_timestamp = now ).save(force_insert=True)", "= True error_msg['smtp_server'] += get_message('MOSJA27204', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'smtp_server',", "ver%s' % (self.name, self.ver) def get_driver_id(self): return self.drv_id def get_icon_name(self):", "= MailDriver( mail_disp_name = rq['mail_disp_name'], protocol = rq['protocol'], smtp_server =", "import socket import traceback from django.http import HttpResponse from django.http", "+ '\\n' if not emo_flag: duplication = MailDriver.objects.filter(mail_disp_name=rq['mail_disp_name']) if len(duplication)", "True error_msg['mail_disp_name'] += get_message('MOSJA27216', request.user.get_lang_mode(), showMsgId=False) + '\\n' if len(rq['protocol'])", "'\\n' logger.user_log('LOSM07003', 'port', rq['port'], request=request) except ValueError: error_flag = True", "MailDriver.DoesNotExist: logger.logic_log('LOSM07006', \"mail_driver_id\", mail_driver_id, request=request) except Exception as e: logger.logic_log('LOSI00005',", "driver_info_mod.smtp_server = rq['smtp_server'] driver_info_mod.port = rq['port'] driver_info_mod.user = rq['user'] driver_info_mod.password", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "request=request) except Exception as e: logger.logic_log('LOSI00005', traceback.format_exc(), request=request) response =", "'None', request=request) error_flag = False error_msg = { 'mail_disp_name' :", "error_msg, request): \"\"\" [概要] 入力チェック [引数] rq: dict リクエストされた入力データ error_msg:", "+ '\\n' if len(rq['password']) > 64: error_flag = True error_msg['password']", "def get_group_list(cls, user_groups): \"\"\" [概要] グループ一覧を取得する(システム管理グループを除く) \"\"\" return [] @classmethod", "request=request) driver_id = self.get_driver_id() # 更新前にレコードロック if json_str['json_str']['ope'] in (defs.DABASE_OPECODE.OPE_UPDATE,", "+= get_message('MOSJA27215', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07005', rq['smtp_server'], rq['port'], request=request) return", "now = datetime.datetime.now(pytz.timezone('UTC')) emo_chk = UnicodeCheck() # 成功時データ response =", "ここでの例外は大外で拾う raise protocol_dict = cls.get_define()['dict'] mail_driver_dto_list = [] cipher =", "logger.user_log('LOSM07002', 'protocol', 64, rq['protocol'], request=request) if len(rq['smtp_server']) == 0: error_flag", "rq['protocol'], request=request) if len(rq['smtp_server']) == 0: error_flag = True error_msg['smtp_server']", "# Copyright 2019 NEC Corporation # # Licensed under the", "get_message('MOSJA27218', request.user.get_lang_mode(), showMsgId=False) + '\\n' if len(rq['password']) > 64: error_flag", "= rq['mail_disp_name'] driver_info_mod.protocol = rq['protocol'] driver_info_mod.smtp_server = rq['smtp_server'] driver_info_mod.port =", "'smtp_server' : '', 'port' : '', 'user' : '', 'password'", "error_flag == False: # 疎通確認 resp_code = -1 try: with", "= emo_chk.is_emotion(rq['smtp_server']) if len(value_list) > 0: error_flag = True error_msg['smtp_server']", "64, rq['smtp_server'], request=request) # 絵文字チェック value_list = emo_chk.is_emotion(rq['smtp_server']) if len(value_list)", "showMsgId=False) + '\\n' if len(rq['protocol']) == 0: error_flag = True", "# host名名前解決が必要/etc/hostsとか sock.close() except Exception as e: pass if resp_code", "+ '\\n' logger.user_log('LOSM07003', 'port', rq['port'], request=request) if len(rq['user']) > 64:", "import transaction from django.conf import settings from libs.commonlibs import define", "use this file except in compliance with the License. #", "True error_msg['port'] += get_message('MOSJA27206', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07003', 'port', rq['port'],", "Driver ver%s' % (self.name, self.ver) def get_driver_id(self): return self.drv_id def", "rq['smtp_server'] driver_info_mod.port = rq['port'] driver_info_mod.user = rq['user'] driver_info_mod.password = <PASSWORD>", "self._validate(rq, error_msg, request) if error_flag: raise UserWarning('validation error.') # パスワードを暗号化", "user_groups): \"\"\" [概要] グループ一覧を取得する(システム管理グループを除く) \"\"\" return [] @classmethod def get_define(cls):", "smtp_server = rq['smtp_server'], port = rq['port'], user = rq['user'], password", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "json import socket import traceback from django.http import HttpResponse from", "libs.commonlibs.aes_cipher import AESCipher from web_app.models.models import ActionType from web_app.models.mail_models import", "# ここでの例外は大外で拾う raise protocol_dict = cls.get_define()['dict'] mail_driver_dto_list = [] cipher", "License. # You may obtain a copy of the License", "= { 'mail_disp_name' : '', 'protocol' : '', 'smtp_server' :", "= rq['mail_disp_name'], protocol = rq['protocol'], smtp_server = rq['smtp_server'], port =", "{ 'status': 'failure', 'error_msg': error_msg, # エラー詳細(エラーアイコンで出す) } logger.logic_log('LOSI00002', 'response=%s'", "error_flag = True error_msg['mail_disp_name'] += get_message('MOSJA27202', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002',", "mail_info = mail_obj.__dict__ if mail_obj.password: mail_info['password'] = <PASSWORD>.decrypt(mail_obj.password) mail_info['protocol_str'] =", "mail_obj.__dict__ if mail_obj.password: mail_info['password'] = <PASSWORD>.decrypt(mail_obj.password) mail_info['protocol_str'] = protocol_dict[mail_obj.protocol] mail_driver_dto_list.append(mail_info)", "\"\"\" logger.logic_log('LOSI00001', 'None', request=request) error_flag = False error_msg = {", "'\\n' logger.user_log('LOSM07003', 'port', rq['port'], request=request) if len(rq['user']) > 64: error_flag", "rq['password'], request=request) # 絵文字チェック value_list = emo_chk.is_emotion(rq['password']) if len(value_list) >", "under the License is distributed on an \"AS IS\" BASIS,", "except Exception as e: pass if resp_code != 0: error_flag", "= icon_name def __str__(self): return '%s(ver%s)' % (self.name, self.ver) def", "絵文字チェック value_list = emo_chk.is_emotion(rq['mail_disp_name']) if len(value_list) > 0: error_flag =", "License for the specific language governing permissions and # limitations", "request=request) # 絵文字チェック value_list = emo_chk.is_emotion(rq['mail_disp_name']) if len(value_list) > 0:", "== defs.DABASE_OPECODE.OPE_UPDATE: encrypted_password = <PASSWORD>.encrypt(rq['password']) if rq['password'] else '' driver_info_mod", "request=request) except ValueError: error_flag = True error_msg['port'] += get_message('MOSJA27206', request.user.get_lang_mode())", "+ '\\n' logger.user_log('LOSM07001', 'mail_disp_name', request=request) if len(rq['mail_disp_name']) > 64: error_flag", "= rq['port'], user = rq['user'], password = <PASSWORD>, last_update_user =", "= True error_msg['smtp_server'] += get_message('MOSJA27217', request.user.get_lang_mode(), showMsgId=False) + '\\n' if", "成功時データ response = {\"status\": \"success\",} try: rq = json_str['json_str'] ope", "defs.DABASE_OPECODE.OPE_DELETE: MailDriver.objects.filter(pk=rq['mail_driver_id']).delete() elif ope == defs.DABASE_OPECODE.OPE_INSERT: encrypted_password = <PASSWORD>.encrypt(rq['password']) if", "+= get_message('MOSJA27209', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07004', 'mail_disp_name', rq['mail_disp_name'], request=request) if", ": '', 'protocol' : '', 'smtp_server' : '', 'port' :", "OaseLogger from libs.commonlibs.aes_cipher import AESCipher from web_app.models.models import ActionType from", "0 > tmp_port or tmp_port > 65535: error_flag = True", "emo_chk = UnicodeCheck() emo_flag = False emo_flag_ita_disp_name = False emo_flag_hostname", "ロガー初期化 class mailDriverInfo(): def __init__(self, drv_id, act_id, name, ver, icon_name):", "mail_obj in mail_driver_obj_list: mail_info = mail_obj.__dict__ if mail_obj.password: mail_info['password'] =", "from libs.commonlibs.aes_cipher import AESCipher from web_app.models.models import ActionType from web_app.models.mail_models", "(self.name, self.ver) def get_driver_id(self): return self.drv_id def get_icon_name(self): return self.icon_name", "> 64: error_flag = True error_msg['mail_disp_name'] += get_message('MOSJA27202', request.user.get_lang_mode()) +", "rq['password'] else '' driver_info_reg = MailDriver( mail_disp_name = rq['mail_disp_name'], protocol", "+= get_message('MOSJA27218', request.user.get_lang_mode(), showMsgId=False) + '\\n' if len(rq['password']) > 64:", "error_flag = True error_msg['mail_disp_name'] += get_message('MOSJA27201', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001',", "(defs.DABASE_OPECODE.OPE_UPDATE, defs.DABASE_OPECODE.OPE_DELETE): drvinfo_modify = int(json_str['json_str']['mail_driver_id']) MailDriver.objects.select_for_update().filter(pk=drvinfo_modify) logger.logic_log('LOSI00002', 'Record locked.(driver_id=%s)' %", "emo_chk.is_emotion(rq['mail_disp_name']) if len(value_list) > 0: error_flag = True emo_flag =", "logger.user_log('LOSM07002', 'smtp_server', 64, rq['smtp_server'], request=request) # 絵文字チェック value_list = emo_chk.is_emotion(rq['smtp_server'])", "Copyright 2019 NEC Corporation # # Licensed under the Apache", "= sock.connect_ex((rq['smtp_server'], int(rq['port']))) # host名名前解決が必要/etc/hostsとか sock.close() except Exception as e:", "-1 try: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: resp_code = sock.connect_ex((rq['smtp_server'],", "error_flag = True error_msg['smtp_server'] += get_message('MOSJA27204', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002',", "# \"\"\" [概要] MAILアクション用画面表示補助クラス \"\"\" import pytz import datetime import", "def get_define(cls): protocol_dict = {key_value['v']: key_value['k'] for key_value in defs.SMTP_PROTOCOL.LIST_ALL}", "now ).save(force_insert=True) except MailDriver.DoesNotExist: logger.logic_log('LOSM07006', \"mail_driver_id\", mail_driver_id, request=request) except Exception", "libs.commonlibs.oase_logger import OaseLogger from libs.commonlibs.aes_cipher import AESCipher from web_app.models.models import", "as e: # ここでの例外は大外で拾う raise protocol_dict = cls.get_define()['dict'] mail_driver_dto_list =", "in compliance with the License. # You may obtain a", "{key_value['v']: key_value['k'] for key_value in defs.SMTP_PROTOCOL.LIST_ALL} defines = { 'list_all':", "self.ver) def get_driver_name(self): return '%s Driver ver%s' % (self.name, self.ver)", "= rq['protocol'] driver_info_mod.smtp_server = rq['smtp_server'] driver_info_mod.port = rq['port'] driver_info_mod.user =", "e: logger.logic_log('LOSI00005', traceback.format_exc(), request=request) response = { 'status': 'failure', 'error_msg':", "software # distributed under the License is distributed on an", "import traceback from django.http import HttpResponse from django.http import HttpResponseServerError", "> 64: error_flag = True error_msg['user'] += get_message('MOSJA27207', request.user.get_lang_mode()) +", "django.http import HttpResponse from django.http import HttpResponseServerError from django.db import", "# 絵文字チェック value_list = emo_chk.is_emotion(rq['user']) if len(value_list) > 0: error_flag", "error_flag = False emo_chk = UnicodeCheck() emo_flag = False emo_flag_ita_disp_name", "in mail_driver_obj_list: mail_info = mail_obj.__dict__ if mail_obj.password: mail_info['password'] = <PASSWORD>.decrypt(mail_obj.password)", "'\\n' if len(rq['protocol']) == 0: error_flag = True error_msg['protocol'] +=", "mail_driver_obj_list = MailDriver.objects.all() except Exception as e: # ここでの例外は大外で拾う raise", "rq['port'] driver_info_mod.user = rq['user'] driver_info_mod.password = <PASSWORD> driver_info_mod.last_update_user = request.user.user_name", "False error_msg = { 'mail_disp_name' : '', 'protocol' : '',", "= False if len(rq['mail_disp_name']) == 0: error_flag = True error_msg['mail_disp_name']", "@classmethod def get_define(cls): protocol_dict = {key_value['v']: key_value['k'] for key_value in", "sock.close() except Exception as e: pass if resp_code != 0:", "logger.logic_log('LOSI00002', 'response=%s' % response, request=request) return response def _validate(self, rq,", "空なら空文字 cipher = AESCipher(settings.AES_KEY) if ope == defs.DABASE_OPECODE.OPE_UPDATE: encrypted_password =", "get_message('MOSJA27216', request.user.get_lang_mode(), showMsgId=False) + '\\n' if len(rq['protocol']) == 0: error_flag", "key_value['k'] for key_value in defs.SMTP_PROTOCOL.LIST_ALL} defines = { 'list_all': defs.SMTP_PROTOCOL.LIST_ALL,", "mail_info['protocol_str'] = protocol_dict[mail_obj.protocol] mail_driver_dto_list.append(mail_info) return mail_driver_dto_list @classmethod def get_group_list(cls, user_groups):", "'', 'protocol' : '', 'smtp_server' : '', 'port' : '',", "request=request) if len(rq['smtp_server']) > 128: error_flag = True error_msg['smtp_server'] +=", "= <PASSWORD>.encrypt(rq['password']) if rq['password'] else '' driver_info_reg = MailDriver( mail_disp_name", "True error_msg['mail_disp_name'] += get_message('MOSJA27201', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'mail_disp_name', request=request)", "traceback from django.http import HttpResponse from django.http import HttpResponseServerError from", "パスワードを暗号化 空なら空文字 cipher = AESCipher(settings.AES_KEY) if ope == defs.DABASE_OPECODE.OPE_UPDATE: encrypted_password", "'\\n' if not emo_flag: duplication = MailDriver.objects.filter(mail_disp_name=rq['mail_disp_name']) if len(duplication) ==", "{ 'mail_disp_name' : '', 'protocol' : '', 'smtp_server' : '',", "MailDriver.objects.select_for_update().filter(pk=drvinfo_modify) logger.logic_log('LOSI00002', 'Record locked.(driver_id=%s)' % driver_id, request=request) def modify(self, json_str,", "0: error_flag = True error_msg['mail_disp_name'] += get_message('MOSJA27201', request.user.get_lang_mode()) + '\\n'", "= act_id self.name = name self.ver = ver self.icon_name =", "> 64: error_flag = True error_msg['password'] += get_message('<PASSWORD>', request.user.get_lang_mode()) +", "django.http import HttpResponseServerError from django.db import transaction from django.conf import", "int(rq['port']))) # host名名前解決が必要/etc/hostsとか sock.close() except Exception as e: pass if", "!= defs.DABASE_OPECODE.OPE_DELETE: error_flag = self._validate(rq, error_msg, request) if error_flag: raise", "logger.logic_log('LOSM07006', \"mail_driver_id\", mail_driver_id, request=request) except Exception as e: logger.logic_log('LOSI00005', traceback.format_exc(),", "error_msg['password'] += get_message('<PASSWORD>', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'password', 64, rq['password'],", "as sock: resp_code = sock.connect_ex((rq['smtp_server'], int(rq['port']))) # host名名前解決が必要/etc/hostsとか sock.close() except", "+= get_message('MOSJA27202', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'mail_disp_name', 64, rq['mail_disp_name'], request=request)", "resp_code = sock.connect_ex((rq['smtp_server'], int(rq['port']))) # host名名前解決が必要/etc/hostsとか sock.close() except Exception as", "ver self.icon_name = icon_name def __str__(self): return '%s(ver%s)' % (self.name,", "error_flag = True emo_flag = True error_msg['mail_disp_name'] += get_message('MOSJA27216', request.user.get_lang_mode(),", "# 絵文字チェック value_list = emo_chk.is_emotion(rq['smtp_server']) if len(value_list) > 0: error_flag", "絵文字チェック value_list = emo_chk.is_emotion(rq['password']) if len(value_list) > 0: error_flag =", "error_flag: raise UserWarning('validation error.') # パスワードを暗号化 空なら空文字 cipher = AESCipher(settings.AES_KEY)", "get_message('MOSJA27217', request.user.get_lang_mode(), showMsgId=False) + '\\n' if len(rq['port']) == 0: error_flag", "request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'smtp_server', 64, rq['smtp_server'], request=request) # 絵文字チェック", "emo_flag_ita_disp_name = False emo_flag_hostname = False if len(rq['mail_disp_name']) == 0:", "64: error_flag = True error_msg['mail_disp_name'] += get_message('MOSJA27202', request.user.get_lang_mode()) + '\\n'", "return [] @classmethod def get_define(cls): protocol_dict = {key_value['v']: key_value['k'] for", "疎通確認 resp_code = -1 try: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "in (defs.DABASE_OPECODE.OPE_UPDATE, defs.DABASE_OPECODE.OPE_DELETE): drvinfo_modify = int(json_str['json_str']['mail_driver_id']) MailDriver.objects.select_for_update().filter(pk=drvinfo_modify) logger.logic_log('LOSI00002', 'Record locked.(driver_id=%s)'", "{\"status\": \"success\",} try: rq = json_str['json_str'] ope = int(rq['ope']) #削除以外の場合の入力チェック", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "key_value in defs.SMTP_PROTOCOL.LIST_ALL} defines = { 'list_all': defs.SMTP_PROTOCOL.LIST_ALL, 'dict': protocol_dict,", "import UnicodeCheck logger = OaseLogger.get_instance() # ロガー初期化 class mailDriverInfo(): def", "raise protocol_dict = cls.get_define()['dict'] mail_driver_dto_list = [] cipher = AESCipher(settings.AES_KEY)", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "ope == defs.DABASE_OPECODE.OPE_INSERT: encrypted_password = <PASSWORD>.encrypt(rq['password']) if rq['password'] else ''", "request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'mail_disp_name', request=request) if len(rq['mail_disp_name']) > 64:", "'\\n' logger.user_log('LOSM07001', 'protocol', request=request) if len(rq['protocol']) > 64: error_flag =", "'\\n' logger.user_log('LOSM07002', 'user', 64, rq['user'], request=request) # 絵文字チェック value_list =", "request=request) if len(rq['smtp_server']) == 0: error_flag = True error_msg['smtp_server'] +=", "to in writing, software # distributed under the License is", "int(rq['port']) if 0 > tmp_port or tmp_port > 65535: error_flag", "License. # \"\"\" [概要] MAILアクション用画面表示補助クラス \"\"\" import pytz import datetime", "if ope != defs.DABASE_OPECODE.OPE_DELETE: error_flag = self._validate(rq, error_msg, request) if", "# See the License for the specific language governing permissions", "} logger.logic_log('LOSI00002', 'response=%s' % response, request=request) return response def _validate(self,", "= True error_msg['user'] += get_message('MOSJA27218', request.user.get_lang_mode(), showMsgId=False) + '\\n' if", "logger.user_log('LOSM07002', 'user', 64, rq['user'], request=request) # 絵文字チェック value_list = emo_chk.is_emotion(rq['user'])", "driver_id, request=request) def modify(self, json_str, request): \"\"\" [メソッド概要] グループのDB更新処理 \"\"\"", "len(rq['smtp_server']) > 128: error_flag = True error_msg['smtp_server'] += get_message('MOSJA27204', request.user.get_lang_mode())", ": '', 'user' : '', 'password' : '', } now", "language governing permissions and # limitations under the License. #", "or agreed to in writing, software # distributed under the", "error_msg['mail_disp_name'] += get_message('MOSJA27216', request.user.get_lang_mode(), showMsgId=False) + '\\n' if len(rq['protocol']) ==", "mail_driver_id, request=request) except Exception as e: logger.logic_log('LOSI00005', traceback.format_exc(), request=request) response", "if len(value_list) > 0: error_flag = True error_msg['password'] += get_message('<PASSWORD>',", "required by applicable law or agreed to in writing, software", "from web_app.serializers.unicode_check import UnicodeCheck logger = OaseLogger.get_instance() # ロガー初期化 class", "web_app.models.mail_models import MailDriver from web_app.templatetags.common import get_message from web_app.serializers.unicode_check import", "+= get_message('MOSJA27201', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'mail_disp_name', request=request) if len(rq['mail_disp_name'])", "error_flag = True error_msg['port'] += get_message('MOSJA27206', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07003',", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "from django.http import HttpResponseServerError from django.db import transaction from django.conf", "with the License. # You may obtain a copy of", "= True error_msg['protocol'] += get_message('MOSJA27212', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'protocol',", "= True error_msg['protocol'] += get_message('MOSJA27213', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'protocol',", "MailDriver.objects.all() except Exception as e: # ここでの例外は大外で拾う raise protocol_dict =", "MailDriver from web_app.templatetags.common import get_message from web_app.serializers.unicode_check import UnicodeCheck logger", "logger.logic_log('LOSI00005', traceback.format_exc(), request=request) response = { 'status': 'failure', 'error_msg': error_msg,", "the License. # \"\"\" [概要] MAILアクション用画面表示補助クラス \"\"\" import pytz import", "error_flag = True error_msg['protocol'] += get_message('MOSJA27212', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001',", "showMsgId=False) + '\\n' if len(rq['port']) == 0: error_flag = True", "# パスワードを暗号化 空なら空文字 cipher = AESCipher(settings.AES_KEY) if ope == defs.DABASE_OPECODE.OPE_UPDATE:", "error_msg['port'] += get_message('MOSJA27205', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'port', request=request) try:", "MAILアクション用画面表示補助クラス \"\"\" import pytz import datetime import json import socket", "128: error_flag = True error_msg['smtp_server'] += get_message('MOSJA27204', request.user.get_lang_mode()) + '\\n'", "import HttpResponse from django.http import HttpResponseServerError from django.db import transaction", "tmp_port = int(rq['port']) if 0 > tmp_port or tmp_port >", "+ '\\n' logger.user_log('LOSM07003', 'port', rq['port'], request=request) except ValueError: error_flag =", "dict [戻り値] \"\"\" logger.logic_log('LOSI00001', 'data: %s, error_msg:%s'%(rq, error_msg)) error_flag =", "== False: # 疎通確認 resp_code = -1 try: with socket.socket(socket.AF_INET,", "get_icon_name(self): return self.icon_name @classmethod def get_template_file(cls): return 'system/mail/action_mail.html' @classmethod def", "get_driver_id(self): return self.drv_id def get_icon_name(self): return self.icon_name @classmethod def get_template_file(cls):", "if error_flag: raise UserWarning('validation error.') # パスワードを暗号化 空なら空文字 cipher =", "in defs.SMTP_PROTOCOL.LIST_ALL} defines = { 'list_all': defs.SMTP_PROTOCOL.LIST_ALL, 'dict': protocol_dict, }", "compliance with the License. # You may obtain a copy", "agreed to in writing, software # distributed under the License", "True error_msg['mail_disp_name'] += get_message('MOSJA27209', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07004', 'mail_disp_name', rq['mail_disp_name'],", "'protocol', request=request) if len(rq['protocol']) > 64: error_flag = True error_msg['protocol']", "'smtp_server', 64, rq['smtp_server'], request=request) # 絵文字チェック value_list = emo_chk.is_emotion(rq['smtp_server']) if", "distributed under the License is distributed on an \"AS IS\"", "datetime import json import socket import traceback from django.http import", "request): \"\"\" [メソッド概要] グループのDB更新処理 \"\"\" logger.logic_log('LOSI00001', 'None', request=request) error_flag =", "response def _validate(self, rq, error_msg, request): \"\"\" [概要] 入力チェック [引数]", "not emo_flag: duplication = MailDriver.objects.filter(mail_disp_name=rq['mail_disp_name']) if len(duplication) == 1 and", "driver_info_mod.mail_disp_name = rq['mail_disp_name'] driver_info_mod.protocol = rq['protocol'] driver_info_mod.smtp_server = rq['smtp_server'] driver_info_mod.port", "get_group_list(cls, user_groups): \"\"\" [概要] グループ一覧を取得する(システム管理グループを除く) \"\"\" return [] @classmethod def", "0: error_flag = True emo_flag = True error_msg['mail_disp_name'] += get_message('MOSJA27216',", "64, rq['protocol'], request=request) if len(rq['smtp_server']) == 0: error_flag = True", "error_msg['smtp_server'] += get_message('MOSJA27203', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'smtp_server', request=request) if", "error_msg['smtp_server'] += get_message('MOSJA27217', request.user.get_lang_mode(), showMsgId=False) + '\\n' if len(rq['port']) ==", "web_app.models.models import ActionType from web_app.models.mail_models import MailDriver from web_app.templatetags.common import", "#削除以外の場合の入力チェック if ope != defs.DABASE_OPECODE.OPE_DELETE: error_flag = self._validate(rq, error_msg, request)", "= UnicodeCheck() emo_flag = False emo_flag_ita_disp_name = False emo_flag_hostname =", "= rq['user'] driver_info_mod.password = <PASSWORD> driver_info_mod.last_update_user = request.user.user_name driver_info_mod.last_update_timestamp =", "express or implied. # See the License for the specific", "cipher = AESCipher(settings.AES_KEY) if ope == defs.DABASE_OPECODE.OPE_UPDATE: encrypted_password = <PASSWORD>.encrypt(rq['password'])", "response, request=request) return response def _validate(self, rq, error_msg, request): \"\"\"", "except in compliance with the License. # You may obtain", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "from libs.commonlibs.oase_logger import OaseLogger from libs.commonlibs.aes_cipher import AESCipher from web_app.models.models", "not use this file except in compliance with the License.", "return defines def record_lock(self, json_str, request): logger.logic_log('LOSI00001', 'None', request=request) driver_id", "int(rq['mail_driver_id']) != duplication[0].mail_driver_id: error_flag = True error_msg['mail_disp_name'] += get_message('MOSJA27209', request.user.get_lang_mode())", "request.user.get_lang_mode(), showMsgId=False) + '\\n' if len(rq['port']) == 0: error_flag =", "mail_info['password'] = <PASSWORD>.decrypt(mail_obj.password) mail_info['protocol_str'] = protocol_dict[mail_obj.protocol] mail_driver_dto_list.append(mail_info) return mail_driver_dto_list @classmethod", "'\\n' logger.user_log('LOSM07001', 'port', request=request) try: tmp_port = int(rq['port']) if 0", "port = rq['port'], user = rq['user'], password = <PASSWORD>, last_update_user", "writing, software # distributed under the License is distributed on", "rq['smtp_server'], port = rq['port'], user = rq['user'], password = <PASSWORD>,", "NEC Corporation # # Licensed under the Apache License, Version", "resp_code = -1 try: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: resp_code", "return self.drv_id def get_icon_name(self): return self.icon_name @classmethod def get_template_file(cls): return", "you may not use this file except in compliance with", "= False emo_chk = UnicodeCheck() emo_flag = False emo_flag_ita_disp_name =", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "request=request) # 絵文字チェック value_list = emo_chk.is_emotion(rq['password']) if len(value_list) > 0:", "elif ope == defs.DABASE_OPECODE.OPE_INSERT: encrypted_password = <PASSWORD>.encrypt(rq['password']) if rq['password'] else", "error_msg['mail_disp_name'] += get_message('MOSJA27209', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07004', 'mail_disp_name', rq['mail_disp_name'], request=request)", "self.get_driver_id() # 更新前にレコードロック if json_str['json_str']['ope'] in (defs.DABASE_OPECODE.OPE_UPDATE, defs.DABASE_OPECODE.OPE_DELETE): drvinfo_modify =", "= True error_msg['mail_disp_name'] += get_message('MOSJA27216', request.user.get_lang_mode(), showMsgId=False) + '\\n' if", "emo_chk.is_emotion(rq['user']) if len(value_list) > 0: error_flag = True error_msg['user'] +=", "self.drv_id = drv_id self.act_id = act_id self.name = name self.ver", "from django.conf import settings from libs.commonlibs import define as defs", "ope == defs.DABASE_OPECODE.OPE_UPDATE: encrypted_password = <PASSWORD>.encrypt(rq['password']) if rq['password'] else ''", "[] @classmethod def get_define(cls): protocol_dict = {key_value['v']: key_value['k'] for key_value", "rq['mail_disp_name'] driver_info_mod.protocol = rq['protocol'] driver_info_mod.smtp_server = rq['smtp_server'] driver_info_mod.port = rq['port']", "True error_msg['user'] += get_message('MOSJA27218', request.user.get_lang_mode(), showMsgId=False) + '\\n' if len(rq['password'])", "CONDITIONS OF ANY KIND, either express or implied. # See", "rq['user'], request=request) # 絵文字チェック value_list = emo_chk.is_emotion(rq['user']) if len(value_list) >", "encrypted_password = <PASSWORD>.encrypt(rq['password']) if rq['password'] else '' driver_info_reg = MailDriver(", "True error_msg['password'] += get_message('<PASSWORD>', request.user.get_lang_mode(), showMsgId=False) + '\\n' if not", "protocol_dict, } return defines def record_lock(self, json_str, request): logger.logic_log('LOSI00001', 'None',", "'mail_disp_name', rq['mail_disp_name'], request=request) if error_flag == False: # 疎通確認 resp_code", "logger.logic_log('LOSI00001', 'None', request=request) error_flag = False error_msg = { 'mail_disp_name'", "icon_name def __str__(self): return '%s(ver%s)' % (self.name, self.ver) def get_driver_name(self):", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "== 0: error_flag = True error_msg['port'] += get_message('MOSJA27205', request.user.get_lang_mode()) +", "request=request) try: tmp_port = int(rq['port']) if 0 > tmp_port or", "driver_info_mod.protocol = rq['protocol'] driver_info_mod.smtp_server = rq['smtp_server'] driver_info_mod.port = rq['port'] driver_info_mod.user", "if json_str['json_str']['ope'] in (defs.DABASE_OPECODE.OPE_UPDATE, defs.DABASE_OPECODE.OPE_DELETE): drvinfo_modify = int(json_str['json_str']['mail_driver_id']) MailDriver.objects.select_for_update().filter(pk=drvinfo_modify) logger.logic_log('LOSI00002',", "\"mail_driver_id\", mail_driver_id, request=request) except Exception as e: logger.logic_log('LOSI00005', traceback.format_exc(), request=request)", "'mail_disp_name' : '', 'protocol' : '', 'smtp_server' : '', 'port'", "get_message('MOSJA27206', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07003', 'port', rq['port'], request=request) if len(rq['user'])", "try: rq = json_str['json_str'] ope = int(rq['ope']) #削除以外の場合の入力チェック if ope", "defs.DABASE_OPECODE.OPE_INSERT: encrypted_password = <PASSWORD>.encrypt(rq['password']) if rq['password'] else '' driver_info_reg =", "= <PASSWORD>, last_update_user = request.user.user_name, last_update_timestamp = now ).save(force_insert=True) except", "get_message('MOSJA27201', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'mail_disp_name', request=request) if len(rq['mail_disp_name']) >", "and int(rq['mail_driver_id']) != duplication[0].mail_driver_id: error_flag = True error_msg['mail_disp_name'] += get_message('MOSJA27209',", "request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'user', 64, rq['user'], request=request) # 絵文字チェック", "cipher = AESCipher(settings.AES_KEY) for mail_obj in mail_driver_obj_list: mail_info = mail_obj.__dict__", "UnicodeCheck() # 成功時データ response = {\"status\": \"success\",} try: rq =", "error_flag = True error_msg['password'] += get_message('<PASSWORD>', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002',", "> 65535: error_flag = True error_msg['port'] += get_message('MOSJA27206', request.user.get_lang_mode()) +", "= json_str['json_str'] ope = int(rq['ope']) #削除以外の場合の入力チェック if ope != defs.DABASE_OPECODE.OPE_DELETE:", "get_message('MOSJA27203', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'smtp_server', request=request) if len(rq['smtp_server']) >", "rq['protocol'] driver_info_mod.smtp_server = rq['smtp_server'] driver_info_mod.port = rq['port'] driver_info_mod.user = rq['user']", "if len(rq['user']) > 64: error_flag = True error_msg['user'] += get_message('MOSJA27207',", "request.user.user_name, last_update_timestamp = now ).save(force_insert=True) except MailDriver.DoesNotExist: logger.logic_log('LOSM07006', \"mail_driver_id\", mail_driver_id,", "request=request) if error_flag == False: # 疎通確認 resp_code = -1", "def record_lock(self, json_str, request): logger.logic_log('LOSI00001', 'None', request=request) driver_id = self.get_driver_id()", "and # limitations under the License. # \"\"\" [概要] MAILアクション用画面表示補助クラス", "# エラー詳細(エラーアイコンで出す) } logger.logic_log('LOSI00002', 'response=%s' % response, request=request) return response", "'system/mail/action_mail.html' @classmethod def get_info_list(cls, user_groups): try: mail_driver_obj_list = MailDriver.objects.all() except", "request): \"\"\" [概要] 入力チェック [引数] rq: dict リクエストされた入力データ error_msg: dict", "\"\"\" [概要] グループ一覧を取得する(システム管理グループを除く) \"\"\" return [] @classmethod def get_define(cls): protocol_dict", "+ '\\n' logger.user_log('LOSM07002', 'user', 64, rq['user'], request=request) # 絵文字チェック value_list", "if len(value_list) > 0: error_flag = True emo_flag = True", "if len(rq['protocol']) == 0: error_flag = True error_msg['protocol'] += get_message('MOSJA27212',", "= True error_msg['port'] += get_message('MOSJA27205', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'port',", "= True #todo 仮でこのエラーは名前に入れている error_msg['mail_disp_name'] += get_message('MOSJA27215', request.user.get_lang_mode()) + '\\n'", "OR CONDITIONS OF ANY KIND, either express or implied. #", "%s, error_msg:%s'%(rq, error_msg)) error_flag = False emo_chk = UnicodeCheck() emo_flag", "OaseLogger.get_instance() # ロガー初期化 class mailDriverInfo(): def __init__(self, drv_id, act_id, name,", "[] cipher = AESCipher(settings.AES_KEY) for mail_obj in mail_driver_obj_list: mail_info =", "False if len(rq['mail_disp_name']) == 0: error_flag = True error_msg['mail_disp_name'] +=", "the License is distributed on an \"AS IS\" BASIS, #", "from web_app.models.models import ActionType from web_app.models.mail_models import MailDriver from web_app.templatetags.common", "int(json_str['json_str']['mail_driver_id']) MailDriver.objects.select_for_update().filter(pk=drvinfo_modify) logger.logic_log('LOSI00002', 'Record locked.(driver_id=%s)' % driver_id, request=request) def modify(self,", "tmp_port > 65535: error_flag = True error_msg['port'] += get_message('MOSJA27206', request.user.get_lang_mode())", "'port', rq['port'], request=request) except ValueError: error_flag = True error_msg['port'] +=", "if ope == defs.DABASE_OPECODE.OPE_UPDATE: encrypted_password = <PASSWORD>.encrypt(rq['password']) if rq['password'] else", "value_list = emo_chk.is_emotion(rq['mail_disp_name']) if len(value_list) > 0: error_flag = True", "[概要] グループ一覧を取得する(システム管理グループを除く) \"\"\" return [] @classmethod def get_define(cls): protocol_dict =", "def get_driver_name(self): return '%s Driver ver%s' % (self.name, self.ver) def", "driver_info_mod.user = rq['user'] driver_info_mod.password = <PASSWORD> driver_info_mod.last_update_user = request.user.user_name driver_info_mod.last_update_timestamp", "== 0: error_flag = True error_msg['mail_disp_name'] += get_message('MOSJA27201', request.user.get_lang_mode()) +", "governing permissions and # limitations under the License. # \"\"\"", "'status': 'failure', 'error_msg': error_msg, # エラー詳細(エラーアイコンで出す) } logger.logic_log('LOSI00002', 'response=%s' %", "== 1 and int(rq['mail_driver_id']) != duplication[0].mail_driver_id: error_flag = True error_msg['mail_disp_name']", "def __init__(self, drv_id, act_id, name, ver, icon_name): self.drv_id = drv_id", "import settings from libs.commonlibs import define as defs from libs.commonlibs.oase_logger", "user = rq['user'], password = <PASSWORD>, last_update_user = request.user.user_name, last_update_timestamp", "+ '\\n' logger.user_log('LOSM07004', 'mail_disp_name', rq['mail_disp_name'], request=request) if error_flag == False:", "logger.logic_log('LOSI00001', 'None', request=request) driver_id = self.get_driver_id() # 更新前にレコードロック if json_str['json_str']['ope']", "logger.logic_log('LOSI00001', 'data: %s, error_msg:%s'%(rq, error_msg)) error_flag = False emo_chk =", "+ '\\n' logger.user_log('LOSM07001', 'protocol', request=request) if len(rq['protocol']) > 64: error_flag", "libs.commonlibs import define as defs from libs.commonlibs.oase_logger import OaseLogger from", "def _validate(self, rq, error_msg, request): \"\"\" [概要] 入力チェック [引数] rq:", "# 更新前にレコードロック if json_str['json_str']['ope'] in (defs.DABASE_OPECODE.OPE_UPDATE, defs.DABASE_OPECODE.OPE_DELETE): drvinfo_modify = int(json_str['json_str']['mail_driver_id'])", "'' driver_info_mod = MailDriver.objects.get(mail_driver_id=rq['mail_driver_id']) driver_info_mod.mail_disp_name = rq['mail_disp_name'] driver_info_mod.protocol = rq['protocol']", "# ロガー初期化 class mailDriverInfo(): def __init__(self, drv_id, act_id, name, ver,", "'\\n' logger.user_log('LOSM07002', 'smtp_server', 64, rq['smtp_server'], request=request) # 絵文字チェック value_list =", "= rq['user'], password = <PASSWORD>, last_update_user = request.user.user_name, last_update_timestamp =", "get_driver_name(self): return '%s Driver ver%s' % (self.name, self.ver) def get_driver_id(self):", "[戻り値] \"\"\" logger.logic_log('LOSI00001', 'data: %s, error_msg:%s'%(rq, error_msg)) error_flag = False", "エラー詳細(エラーアイコンで出す) } logger.logic_log('LOSI00002', 'response=%s' % response, request=request) return response def", "MailDriver( mail_disp_name = rq['mail_disp_name'], protocol = rq['protocol'], smtp_server = rq['smtp_server'],", "request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'port', request=request) try: tmp_port = int(rq['port'])", "def get_info_list(cls, user_groups): try: mail_driver_obj_list = MailDriver.objects.all() except Exception as", "law or agreed to in writing, software # distributed under", "rq['mail_disp_name'], protocol = rq['protocol'], smtp_server = rq['smtp_server'], port = rq['port'],", "get_message('MOSJA27215', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07005', rq['smtp_server'], rq['port'], request=request) return error_flag", "socket import traceback from django.http import HttpResponse from django.http import", "UnicodeCheck logger = OaseLogger.get_instance() # ロガー初期化 class mailDriverInfo(): def __init__(self,", "64, rq['user'], request=request) # 絵文字チェック value_list = emo_chk.is_emotion(rq['user']) if len(value_list)", "= AESCipher(settings.AES_KEY) for mail_obj in mail_driver_obj_list: mail_info = mail_obj.__dict__ if", "True error_msg['protocol'] += get_message('MOSJA27213', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'protocol', 64,", "value_list = emo_chk.is_emotion(rq['user']) if len(value_list) > 0: error_flag = True", "defs.DABASE_OPECODE.OPE_UPDATE: encrypted_password = <PASSWORD>.encrypt(rq['password']) if rq['password'] else '' driver_info_mod =", "(self.name, self.ver) def get_driver_name(self): return '%s Driver ver%s' % (self.name,", "from django.db import transaction from django.conf import settings from libs.commonlibs", "pytz import datetime import json import socket import traceback from", "driver_info_mod.save(force_update=True) elif ope == defs.DABASE_OPECODE.OPE_DELETE: MailDriver.objects.filter(pk=rq['mail_driver_id']).delete() elif ope == defs.DABASE_OPECODE.OPE_INSERT:", "= MailDriver.objects.all() except Exception as e: # ここでの例外は大外で拾う raise protocol_dict", "json_str, request): \"\"\" [メソッド概要] グループのDB更新処理 \"\"\" logger.logic_log('LOSI00001', 'None', request=request) error_flag", "= [] cipher = AESCipher(settings.AES_KEY) for mail_obj in mail_driver_obj_list: mail_info", "json_str, request): logger.logic_log('LOSI00001', 'None', request=request) driver_id = self.get_driver_id() # 更新前にレコードロック", "sock.connect_ex((rq['smtp_server'], int(rq['port']))) # host名名前解決が必要/etc/hostsとか sock.close() except Exception as e: pass", "'\\n' if len(rq['password']) > 64: error_flag = True error_msg['password'] +=", "ValueError: error_flag = True error_msg['port'] += get_message('MOSJA27206', request.user.get_lang_mode()) + '\\n'", "error_flag = True error_msg['mail_disp_name'] += get_message('MOSJA27209', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07004',", "True error_msg['port'] += get_message('MOSJA27205', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'port', request=request)", "len(rq['protocol']) > 64: error_flag = True error_msg['protocol'] += get_message('MOSJA27213', request.user.get_lang_mode())", "if resp_code != 0: error_flag = True #todo 仮でこのエラーは名前に入れている error_msg['mail_disp_name']", "mail_obj.password: mail_info['password'] = <PASSWORD>.decrypt(mail_obj.password) mail_info['protocol_str'] = protocol_dict[mail_obj.protocol] mail_driver_dto_list.append(mail_info) return mail_driver_dto_list", "logger.user_log('LOSM07001', 'protocol', request=request) if len(rq['protocol']) > 64: error_flag = True", "may obtain a copy of the License at # #", "get_message from web_app.serializers.unicode_check import UnicodeCheck logger = OaseLogger.get_instance() # ロガー初期化", "error_flag = True error_msg['smtp_server'] += get_message('MOSJA27203', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001',", "if len(rq['smtp_server']) > 128: error_flag = True error_msg['smtp_server'] += get_message('MOSJA27204',", "== 0: error_flag = True error_msg['smtp_server'] += get_message('MOSJA27203', request.user.get_lang_mode()) +", "'port', rq['port'], request=request) if len(rq['user']) > 64: error_flag = True", "emo_chk.is_emotion(rq['password']) if len(value_list) > 0: error_flag = True error_msg['password'] +=", "request=request) error_flag = False error_msg = { 'mail_disp_name' : '',", "driver_info_mod.password = <PASSWORD> driver_info_mod.last_update_user = request.user.user_name driver_info_mod.last_update_timestamp = now driver_info_mod.save(force_update=True)", "+ '\\n' logger.user_log('LOSM07002', 'password', 64, rq['password'], request=request) # 絵文字チェック value_list", "+ '\\n' logger.user_log('LOSM07002', 'smtp_server', 64, rq['smtp_server'], request=request) # 絵文字チェック value_list", "'', 'port' : '', 'user' : '', 'password' : '',", "False: # 疎通確認 resp_code = -1 try: with socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "error_msg = { 'mail_disp_name' : '', 'protocol' : '', 'smtp_server'", "duplication[0].mail_driver_id: error_flag = True error_msg['mail_disp_name'] += get_message('MOSJA27209', request.user.get_lang_mode()) + '\\n'", "from web_app.models.mail_models import MailDriver from web_app.templatetags.common import get_message from web_app.serializers.unicode_check", "drv_id, act_id, name, ver, icon_name): self.drv_id = drv_id self.act_id =", "= emo_chk.is_emotion(rq['user']) if len(value_list) > 0: error_flag = True error_msg['user']", "= emo_chk.is_emotion(rq['password']) if len(value_list) > 0: error_flag = True error_msg['password']", "import get_message from web_app.serializers.unicode_check import UnicodeCheck logger = OaseLogger.get_instance() #", "+= get_message('MOSJA27206', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07003', 'port', rq['port'], request=request) except", "may not use this file except in compliance with the", "= rq['smtp_server'] driver_info_mod.port = rq['port'] driver_info_mod.user = rq['user'] driver_info_mod.password =", "rq['port'], user = rq['user'], password = <PASSWORD>, last_update_user = request.user.user_name,", "True emo_flag = True error_msg['mail_disp_name'] += get_message('MOSJA27216', request.user.get_lang_mode(), showMsgId=False) +", "error_msg:%s'%(rq, error_msg)) error_flag = False emo_chk = UnicodeCheck() emo_flag =", "defs from libs.commonlibs.oase_logger import OaseLogger from libs.commonlibs.aes_cipher import AESCipher from", "HttpResponseServerError from django.db import transaction from django.conf import settings from", "<PASSWORD> driver_info_mod.last_update_user = request.user.user_name driver_info_mod.last_update_timestamp = now driver_info_mod.save(force_update=True) elif ope", "request=request) if len(rq['protocol']) > 64: error_flag = True error_msg['protocol'] +=", "error_msg['port'] += get_message('MOSJA27206', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07003', 'port', rq['port'], request=request)", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "mail_driver_dto_list @classmethod def get_group_list(cls, user_groups): \"\"\" [概要] グループ一覧を取得する(システム管理グループを除く) \"\"\" return", "request) if error_flag: raise UserWarning('validation error.') # パスワードを暗号化 空なら空文字 cipher", "this file except in compliance with the License. # You", "\"\"\" [メソッド概要] グループのDB更新処理 \"\"\" logger.logic_log('LOSI00001', 'None', request=request) error_flag = False", "= self._validate(rq, error_msg, request) if error_flag: raise UserWarning('validation error.') #", "define as defs from libs.commonlibs.oase_logger import OaseLogger from libs.commonlibs.aes_cipher import", "emo_flag: duplication = MailDriver.objects.filter(mail_disp_name=rq['mail_disp_name']) if len(duplication) == 1 and int(rq['mail_driver_id'])", "driver_info_reg = MailDriver( mail_disp_name = rq['mail_disp_name'], protocol = rq['protocol'], smtp_server", "'password' : '', } now = datetime.datetime.now(pytz.timezone('UTC')) emo_chk = UnicodeCheck()", "pass if resp_code != 0: error_flag = True #todo 仮でこのエラーは名前に入れている", "Exception as e: logger.logic_log('LOSI00005', traceback.format_exc(), request=request) response = { 'status':", "import OaseLogger from libs.commonlibs.aes_cipher import AESCipher from web_app.models.models import ActionType", "> tmp_port or tmp_port > 65535: error_flag = True error_msg['port']", "= True error_msg['user'] += get_message('MOSJA27207', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'user',", "if mail_obj.password: mail_info['password'] = <PASSWORD>.decrypt(mail_obj.password) mail_info['protocol_str'] = protocol_dict[mail_obj.protocol] mail_driver_dto_list.append(mail_info) return", "permissions and # limitations under the License. # \"\"\" [概要]", "icon_name): self.drv_id = drv_id self.act_id = act_id self.name = name", "emo_flag = True error_msg['mail_disp_name'] += get_message('MOSJA27216', request.user.get_lang_mode(), showMsgId=False) + '\\n'", "try: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: resp_code = sock.connect_ex((rq['smtp_server'], int(rq['port'])))", "logger = OaseLogger.get_instance() # ロガー初期化 class mailDriverInfo(): def __init__(self, drv_id,", "= drv_id self.act_id = act_id self.name = name self.ver =", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "as e: logger.logic_log('LOSI00005', traceback.format_exc(), request=request) response = { 'status': 'failure',", "socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: resp_code = sock.connect_ex((rq['smtp_server'], int(rq['port']))) # host名名前解決が必要/etc/hostsとか", "logger.user_log('LOSM07001', 'smtp_server', request=request) if len(rq['smtp_server']) > 128: error_flag = True", "try: mail_driver_obj_list = MailDriver.objects.all() except Exception as e: # ここでの例外は大外で拾う", "# # Licensed under the Apache License, Version 2.0 (the", "rq: dict リクエストされた入力データ error_msg: dict [戻り値] \"\"\" logger.logic_log('LOSI00001', 'data: %s,", "last_update_timestamp = now ).save(force_insert=True) except MailDriver.DoesNotExist: logger.logic_log('LOSM07006', \"mail_driver_id\", mail_driver_id, request=request)", "= now ).save(force_insert=True) except MailDriver.DoesNotExist: logger.logic_log('LOSM07006', \"mail_driver_id\", mail_driver_id, request=request) except", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "+ '\\n' logger.user_log('LOSM07001', 'port', request=request) try: tmp_port = int(rq['port']) if", "protocol_dict = {key_value['v']: key_value['k'] for key_value in defs.SMTP_PROTOCOL.LIST_ALL} defines =", "import HttpResponseServerError from django.db import transaction from django.conf import settings", "return 'system/mail/action_mail.html' @classmethod def get_info_list(cls, user_groups): try: mail_driver_obj_list = MailDriver.objects.all()", "__str__(self): return '%s(ver%s)' % (self.name, self.ver) def get_driver_name(self): return '%s", "error_flag = False error_msg = { 'mail_disp_name' : '', 'protocol'", "emo_chk = UnicodeCheck() # 成功時データ response = {\"status\": \"success\",} try:", "[概要] 入力チェック [引数] rq: dict リクエストされた入力データ error_msg: dict [戻り値] \"\"\"", "len(rq['mail_disp_name']) > 64: error_flag = True error_msg['mail_disp_name'] += get_message('MOSJA27202', request.user.get_lang_mode())", "value_list = emo_chk.is_emotion(rq['password']) if len(value_list) > 0: error_flag = True", "\"success\",} try: rq = json_str['json_str'] ope = int(rq['ope']) #削除以外の場合の入力チェック if", "error_flag = True error_msg['port'] += get_message('MOSJA27205', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001',", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "return mail_driver_dto_list @classmethod def get_group_list(cls, user_groups): \"\"\" [概要] グループ一覧を取得する(システム管理グループを除く) \"\"\"", "'password', 64, rq['password'], request=request) # 絵文字チェック value_list = emo_chk.is_emotion(rq['password']) if", "入力チェック [引数] rq: dict リクエストされた入力データ error_msg: dict [戻り値] \"\"\" logger.logic_log('LOSI00001',", "'', 'smtp_server' : '', 'port' : '', 'user' : '',", "= True emo_flag = True error_msg['mail_disp_name'] += get_message('MOSJA27216', request.user.get_lang_mode(), showMsgId=False)", "or tmp_port > 65535: error_flag = True error_msg['port'] += get_message('MOSJA27206',", "'%s(ver%s)' % (self.name, self.ver) def get_driver_name(self): return '%s Driver ver%s'", "if rq['password'] else '' driver_info_mod = MailDriver.objects.get(mail_driver_id=rq['mail_driver_id']) driver_info_mod.mail_disp_name = rq['mail_disp_name']", "+= get_message('MOSJA27204', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'smtp_server', 64, rq['smtp_server'], request=request)", "'\\n' logger.user_log('LOSM07002', 'mail_disp_name', 64, rq['mail_disp_name'], request=request) # 絵文字チェック value_list =", "\"\"\" [概要] MAILアクション用画面表示補助クラス \"\"\" import pytz import datetime import json", "as defs from libs.commonlibs.oase_logger import OaseLogger from libs.commonlibs.aes_cipher import AESCipher", "ope != defs.DABASE_OPECODE.OPE_DELETE: error_flag = self._validate(rq, error_msg, request) if error_flag:", "= request.user.user_name driver_info_mod.last_update_timestamp = now driver_info_mod.save(force_update=True) elif ope == defs.DABASE_OPECODE.OPE_DELETE:", "error_msg['user'] += get_message('MOSJA27207', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'user', 64, rq['user'],", "= True error_msg['mail_disp_name'] += get_message('MOSJA27201', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'mail_disp_name',", "64: error_flag = True error_msg['protocol'] += get_message('MOSJA27213', request.user.get_lang_mode()) + '\\n'", "request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'mail_disp_name', 64, rq['mail_disp_name'], request=request) # 絵文字チェック", "> 0: error_flag = True error_msg['user'] += get_message('MOSJA27218', request.user.get_lang_mode(), showMsgId=False)", "= {key_value['v']: key_value['k'] for key_value in defs.SMTP_PROTOCOL.LIST_ALL} defines = {", "mail_disp_name = rq['mail_disp_name'], protocol = rq['protocol'], smtp_server = rq['smtp_server'], port", "True error_msg['user'] += get_message('MOSJA27207', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'user', 64,", "import MailDriver from web_app.templatetags.common import get_message from web_app.serializers.unicode_check import UnicodeCheck", "self.icon_name = icon_name def __str__(self): return '%s(ver%s)' % (self.name, self.ver)", "def __str__(self): return '%s(ver%s)' % (self.name, self.ver) def get_driver_name(self): return", "!= 0: error_flag = True #todo 仮でこのエラーは名前に入れている error_msg['mail_disp_name'] += get_message('MOSJA27215',", "True error_msg['smtp_server'] += get_message('MOSJA27217', request.user.get_lang_mode(), showMsgId=False) + '\\n' if len(rq['port'])", "len(value_list) > 0: error_flag = True error_msg['password'] += get_message('<PASSWORD>', request.user.get_lang_mode(),", "rq['mail_disp_name'], request=request) # 絵文字チェック value_list = emo_chk.is_emotion(rq['mail_disp_name']) if len(value_list) >", "= AESCipher(settings.AES_KEY) if ope == defs.DABASE_OPECODE.OPE_UPDATE: encrypted_password = <PASSWORD>.encrypt(rq['password']) if", "if len(duplication) == 1 and int(rq['mail_driver_id']) != duplication[0].mail_driver_id: error_flag =", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "except MailDriver.DoesNotExist: logger.logic_log('LOSM07006', \"mail_driver_id\", mail_driver_id, request=request) except Exception as e:", "error_msg['password'] += get_message('<PASSWORD>', request.user.get_lang_mode(), showMsgId=False) + '\\n' if not emo_flag:", "len(rq['port']) == 0: error_flag = True error_msg['port'] += get_message('MOSJA27205', request.user.get_lang_mode())", "drvinfo_modify = int(json_str['json_str']['mail_driver_id']) MailDriver.objects.select_for_update().filter(pk=drvinfo_modify) logger.logic_log('LOSI00002', 'Record locked.(driver_id=%s)' % driver_id, request=request)", "+ '\\n' if len(rq['port']) == 0: error_flag = True error_msg['port']", "[概要] MAILアクション用画面表示補助クラス \"\"\" import pytz import datetime import json import", "'\\n' logger.user_log('LOSM07002', 'password', 64, rq['password'], request=request) # 絵文字チェック value_list =", ": '', 'smtp_server' : '', 'port' : '', 'user' :", "'', } now = datetime.datetime.now(pytz.timezone('UTC')) emo_chk = UnicodeCheck() # 成功時データ", "name, ver, icon_name): self.drv_id = drv_id self.act_id = act_id self.name", "json_str['json_str']['ope'] in (defs.DABASE_OPECODE.OPE_UPDATE, defs.DABASE_OPECODE.OPE_DELETE): drvinfo_modify = int(json_str['json_str']['mail_driver_id']) MailDriver.objects.select_for_update().filter(pk=drvinfo_modify) logger.logic_log('LOSI00002', 'Record", "tmp_port or tmp_port > 65535: error_flag = True error_msg['port'] +=", "else '' driver_info_mod = MailDriver.objects.get(mail_driver_id=rq['mail_driver_id']) driver_info_mod.mail_disp_name = rq['mail_disp_name'] driver_info_mod.protocol =", "driver_info_mod.last_update_user = request.user.user_name driver_info_mod.last_update_timestamp = now driver_info_mod.save(force_update=True) elif ope ==", "\"\"\" logger.logic_log('LOSI00001', 'data: %s, error_msg:%s'%(rq, error_msg)) error_flag = False emo_chk", "64: error_flag = True error_msg['password'] += get_message('<PASSWORD>', request.user.get_lang_mode()) + '\\n'", "or implied. # See the License for the specific language", "self.act_id = act_id self.name = name self.ver = ver self.icon_name", "> 0: error_flag = True emo_flag = True error_msg['mail_disp_name'] +=", "settings from libs.commonlibs import define as defs from libs.commonlibs.oase_logger import", "= <PASSWORD>.encrypt(rq['password']) if rq['password'] else '' driver_info_mod = MailDriver.objects.get(mail_driver_id=rq['mail_driver_id']) driver_info_mod.mail_disp_name", "request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'password', 64, rq['password'], request=request) # 絵文字チェック", "error_msg, request) if error_flag: raise UserWarning('validation error.') # パスワードを暗号化 空なら空文字", "'error_msg': error_msg, # エラー詳細(エラーアイコンで出す) } logger.logic_log('LOSI00002', 'response=%s' % response, request=request)", "error.') # パスワードを暗号化 空なら空文字 cipher = AESCipher(settings.AES_KEY) if ope ==", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "'port' : '', 'user' : '', 'password' : '', }", "mail_driver_dto_list = [] cipher = AESCipher(settings.AES_KEY) for mail_obj in mail_driver_obj_list:", "== defs.DABASE_OPECODE.OPE_INSERT: encrypted_password = <PASSWORD>.encrypt(rq['password']) if rq['password'] else '' driver_info_reg", "defs.DABASE_OPECODE.OPE_DELETE: error_flag = self._validate(rq, error_msg, request) if error_flag: raise UserWarning('validation", "error_msg['smtp_server'] += get_message('MOSJA27204', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'smtp_server', 64, rq['smtp_server'],", "error_flag = True #todo 仮でこのエラーは名前に入れている error_msg['mail_disp_name'] += get_message('MOSJA27215', request.user.get_lang_mode()) +", "0: error_flag = True error_msg['port'] += get_message('MOSJA27205', request.user.get_lang_mode()) + '\\n'", "@classmethod def get_group_list(cls, user_groups): \"\"\" [概要] グループ一覧を取得する(システム管理グループを除く) \"\"\" return []", "elif ope == defs.DABASE_OPECODE.OPE_DELETE: MailDriver.objects.filter(pk=rq['mail_driver_id']).delete() elif ope == defs.DABASE_OPECODE.OPE_INSERT: encrypted_password", "len(rq['protocol']) == 0: error_flag = True error_msg['protocol'] += get_message('MOSJA27212', request.user.get_lang_mode())", "rq = json_str['json_str'] ope = int(rq['ope']) #削除以外の場合の入力チェック if ope !=", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "error_flag = self._validate(rq, error_msg, request) if error_flag: raise UserWarning('validation error.')", "絵文字チェック value_list = emo_chk.is_emotion(rq['smtp_server']) if len(value_list) > 0: error_flag =", "get_define(cls): protocol_dict = {key_value['v']: key_value['k'] for key_value in defs.SMTP_PROTOCOL.LIST_ALL} defines", "get_message('MOSJA27202', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'mail_disp_name', 64, rq['mail_disp_name'], request=request) #", "'\\n' logger.user_log('LOSM07001', 'smtp_server', request=request) if len(rq['smtp_server']) > 128: error_flag =", "> 128: error_flag = True error_msg['smtp_server'] += get_message('MOSJA27204', request.user.get_lang_mode()) +", "get_message('MOSJA27209', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07004', 'mail_disp_name', rq['mail_disp_name'], request=request) if error_flag", "+= get_message('MOSJA27207', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'user', 64, rq['user'], request=request)", "under the License. # \"\"\" [概要] MAILアクション用画面表示補助クラス \"\"\" import pytz", "(the \"License\"); # you may not use this file except", "@classmethod def get_template_file(cls): return 'system/mail/action_mail.html' @classmethod def get_info_list(cls, user_groups): try:", "# you may not use this file except in compliance", "!= duplication[0].mail_driver_id: error_flag = True error_msg['mail_disp_name'] += get_message('MOSJA27209', request.user.get_lang_mode()) +", "int(rq['ope']) #削除以外の場合の入力チェック if ope != defs.DABASE_OPECODE.OPE_DELETE: error_flag = self._validate(rq, error_msg,", "import AESCipher from web_app.models.models import ActionType from web_app.models.mail_models import MailDriver", "error_flag = True error_msg['user'] += get_message('MOSJA27207', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002',", "'list_all': defs.SMTP_PROTOCOL.LIST_ALL, 'dict': protocol_dict, } return defines def record_lock(self, json_str,", "request=request) return response def _validate(self, rq, error_msg, request): \"\"\" [概要]", "'Record locked.(driver_id=%s)' % driver_id, request=request) def modify(self, json_str, request): \"\"\"", ": '', } now = datetime.datetime.now(pytz.timezone('UTC')) emo_chk = UnicodeCheck() #", "logger.user_log('LOSM07002', 'mail_disp_name', 64, rq['mail_disp_name'], request=request) # 絵文字チェック value_list = emo_chk.is_emotion(rq['mail_disp_name'])", "cls.get_define()['dict'] mail_driver_dto_list = [] cipher = AESCipher(settings.AES_KEY) for mail_obj in", "request.user.user_name driver_info_mod.last_update_timestamp = now driver_info_mod.save(force_update=True) elif ope == defs.DABASE_OPECODE.OPE_DELETE: MailDriver.objects.filter(pk=rq['mail_driver_id']).delete()", "+= get_message('MOSJA27216', request.user.get_lang_mode(), showMsgId=False) + '\\n' if len(rq['protocol']) == 0:", "if len(rq['protocol']) > 64: error_flag = True error_msg['protocol'] += get_message('MOSJA27213',", "UserWarning('validation error.') # パスワードを暗号化 空なら空文字 cipher = AESCipher(settings.AES_KEY) if ope", "response = { 'status': 'failure', 'error_msg': error_msg, # エラー詳細(エラーアイコンで出す) }", "[引数] rq: dict リクエストされた入力データ error_msg: dict [戻り値] \"\"\" logger.logic_log('LOSI00001', 'data:", "# # Unless required by applicable law or agreed to", "if len(rq['password']) > 64: error_flag = True error_msg['password'] += get_message('<PASSWORD>',", "request.user.get_lang_mode(), showMsgId=False) + '\\n' if len(rq['protocol']) == 0: error_flag =", "= True error_msg['password'] += get_message('<PASSWORD>', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'password',", "showMsgId=False) + '\\n' if not emo_flag: duplication = MailDriver.objects.filter(mail_disp_name=rq['mail_disp_name']) if", "'user' : '', 'password' : '', } now = datetime.datetime.now(pytz.timezone('UTC'))", "= now driver_info_mod.save(force_update=True) elif ope == defs.DABASE_OPECODE.OPE_DELETE: MailDriver.objects.filter(pk=rq['mail_driver_id']).delete() elif ope", "host名名前解決が必要/etc/hostsとか sock.close() except Exception as e: pass if resp_code !=", "= True error_msg['mail_disp_name'] += get_message('MOSJA27209', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07004', 'mail_disp_name',", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "def get_icon_name(self): return self.icon_name @classmethod def get_template_file(cls): return 'system/mail/action_mail.html' @classmethod", "import pytz import datetime import json import socket import traceback", "error_msg: dict [戻り値] \"\"\" logger.logic_log('LOSI00001', 'data: %s, error_msg:%s'%(rq, error_msg)) error_flag", ": '', 'port' : '', 'user' : '', 'password' :", "Version 2.0 (the \"License\"); # you may not use this", "if rq['password'] else '' driver_info_reg = MailDriver( mail_disp_name = rq['mail_disp_name'],", "mail_driver_dto_list.append(mail_info) return mail_driver_dto_list @classmethod def get_group_list(cls, user_groups): \"\"\" [概要] グループ一覧を取得する(システム管理グループを除く)", "len(rq['mail_disp_name']) == 0: error_flag = True error_msg['mail_disp_name'] += get_message('MOSJA27201', request.user.get_lang_mode())", "error_msg['mail_disp_name'] += get_message('MOSJA27202', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'mail_disp_name', 64, rq['mail_disp_name'],", "== 0: error_flag = True error_msg['protocol'] += get_message('MOSJA27212', request.user.get_lang_mode()) +", "rq['user'] driver_info_mod.password = <PASSWORD> driver_info_mod.last_update_user = request.user.user_name driver_info_mod.last_update_timestamp = now", "if len(rq['port']) == 0: error_flag = True error_msg['port'] += get_message('MOSJA27205',", "get_message('MOSJA27207', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'user', 64, rq['user'], request=request) #", "> 64: error_flag = True error_msg['protocol'] += get_message('MOSJA27213', request.user.get_lang_mode()) +", "defs.SMTP_PROTOCOL.LIST_ALL, 'dict': protocol_dict, } return defines def record_lock(self, json_str, request):", "import json import socket import traceback from django.http import HttpResponse", "<PASSWORD>, last_update_user = request.user.user_name, last_update_timestamp = now ).save(force_insert=True) except MailDriver.DoesNotExist:", "= { 'status': 'failure', 'error_msg': error_msg, # エラー詳細(エラーアイコンで出す) } logger.logic_log('LOSI00002',", "drv_id self.act_id = act_id self.name = name self.ver = ver", "implied. # See the License for the specific language governing", "'%s Driver ver%s' % (self.name, self.ver) def get_driver_id(self): return self.drv_id", "False emo_chk = UnicodeCheck() emo_flag = False emo_flag_ita_disp_name = False", "error_msg['protocol'] += get_message('MOSJA27212', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'protocol', request=request) if", "= -1 try: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: resp_code =", "= int(json_str['json_str']['mail_driver_id']) MailDriver.objects.select_for_update().filter(pk=drvinfo_modify) logger.logic_log('LOSI00002', 'Record locked.(driver_id=%s)' % driver_id, request=request) def", "under the Apache License, Version 2.0 (the \"License\"); # you", "rq['protocol'], smtp_server = rq['smtp_server'], port = rq['port'], user = rq['user'],", "# limitations under the License. # \"\"\" [概要] MAILアクション用画面表示補助クラス \"\"\"", "request=request) response = { 'status': 'failure', 'error_msg': error_msg, # エラー詳細(エラーアイコンで出す)", "__init__(self, drv_id, act_id, name, ver, icon_name): self.drv_id = drv_id self.act_id", "= self.get_driver_id() # 更新前にレコードロック if json_str['json_str']['ope'] in (defs.DABASE_OPECODE.OPE_UPDATE, defs.DABASE_OPECODE.OPE_DELETE): drvinfo_modify", "else '' driver_info_reg = MailDriver( mail_disp_name = rq['mail_disp_name'], protocol =", "dict リクエストされた入力データ error_msg: dict [戻り値] \"\"\" logger.logic_log('LOSI00001', 'data: %s, error_msg:%s'%(rq,", "+= get_message('MOSJA27212', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'protocol', request=request) if len(rq['protocol'])", "= True error_msg['smtp_server'] += get_message('MOSJA27203', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'smtp_server',", "\"\"\" return [] @classmethod def get_define(cls): protocol_dict = {key_value['v']: key_value['k']", "response = {\"status\": \"success\",} try: rq = json_str['json_str'] ope =", "len(value_list) > 0: error_flag = True error_msg['smtp_server'] += get_message('MOSJA27217', request.user.get_lang_mode(),", "1 and int(rq['mail_driver_id']) != duplication[0].mail_driver_id: error_flag = True error_msg['mail_disp_name'] +=", "UnicodeCheck() emo_flag = False emo_flag_ita_disp_name = False emo_flag_hostname = False", "0: error_flag = True error_msg['user'] += get_message('MOSJA27218', request.user.get_lang_mode(), showMsgId=False) +", "= True error_msg['mail_disp_name'] += get_message('MOSJA27202', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'mail_disp_name',", "'failure', 'error_msg': error_msg, # エラー詳細(エラーアイコンで出す) } logger.logic_log('LOSI00002', 'response=%s' % response,", "= mail_obj.__dict__ if mail_obj.password: mail_info['password'] = <PASSWORD>.decrypt(mail_obj.password) mail_info['protocol_str'] = protocol_dict[mail_obj.protocol]", "import ActionType from web_app.models.mail_models import MailDriver from web_app.templatetags.common import get_message", "rq['password'] else '' driver_info_mod = MailDriver.objects.get(mail_driver_id=rq['mail_driver_id']) driver_info_mod.mail_disp_name = rq['mail_disp_name'] driver_info_mod.protocol", "protocol_dict[mail_obj.protocol] mail_driver_dto_list.append(mail_info) return mail_driver_dto_list @classmethod def get_group_list(cls, user_groups): \"\"\" [概要]", "by applicable law or agreed to in writing, software #", "defines def record_lock(self, json_str, request): logger.logic_log('LOSI00001', 'None', request=request) driver_id =", "== defs.DABASE_OPECODE.OPE_DELETE: MailDriver.objects.filter(pk=rq['mail_driver_id']).delete() elif ope == defs.DABASE_OPECODE.OPE_INSERT: encrypted_password = <PASSWORD>.encrypt(rq['password'])", "# 疎通確認 resp_code = -1 try: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as", "import datetime import json import socket import traceback from django.http", "act_id, name, ver, icon_name): self.drv_id = drv_id self.act_id = act_id", "emo_chk.is_emotion(rq['smtp_server']) if len(value_list) > 0: error_flag = True error_msg['smtp_server'] +=", "django.db import transaction from django.conf import settings from libs.commonlibs import", "error_msg['protocol'] += get_message('MOSJA27213', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'protocol', 64, rq['protocol'],", "modify(self, json_str, request): \"\"\" [メソッド概要] グループのDB更新処理 \"\"\" logger.logic_log('LOSI00001', 'None', request=request)", "json_str['json_str'] ope = int(rq['ope']) #削除以外の場合の入力チェック if ope != defs.DABASE_OPECODE.OPE_DELETE: error_flag", "= True error_msg['port'] += get_message('MOSJA27206', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07003', 'port',", "= name self.ver = ver self.icon_name = icon_name def __str__(self):", "+= get_message('MOSJA27203', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'smtp_server', request=request) if len(rq['smtp_server'])", "get_message('MOSJA27204', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'smtp_server', 64, rq['smtp_server'], request=request) #", "64: error_flag = True error_msg['user'] += get_message('MOSJA27207', request.user.get_lang_mode()) + '\\n'", "} now = datetime.datetime.now(pytz.timezone('UTC')) emo_chk = UnicodeCheck() # 成功時データ response", "= rq['port'] driver_info_mod.user = rq['user'] driver_info_mod.password = <PASSWORD> driver_info_mod.last_update_user =", "仮でこのエラーは名前に入れている error_msg['mail_disp_name'] += get_message('MOSJA27215', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07005', rq['smtp_server'], rq['port'],", "Exception as e: pass if resp_code != 0: error_flag =", "get_info_list(cls, user_groups): try: mail_driver_obj_list = MailDriver.objects.all() except Exception as e:", "def get_template_file(cls): return 'system/mail/action_mail.html' @classmethod def get_info_list(cls, user_groups): try: mail_driver_obj_list", "request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'protocol', 64, rq['protocol'], request=request) if len(rq['smtp_server'])", "request.user.get_lang_mode(), showMsgId=False) + '\\n' if not emo_flag: duplication = MailDriver.objects.filter(mail_disp_name=rq['mail_disp_name'])", "\"\"\" import pytz import datetime import json import socket import", "グループ一覧を取得する(システム管理グループを除く) \"\"\" return [] @classmethod def get_define(cls): protocol_dict = {key_value['v']:", "_validate(self, rq, error_msg, request): \"\"\" [概要] 入力チェック [引数] rq: dict", "value_list = emo_chk.is_emotion(rq['smtp_server']) if len(value_list) > 0: error_flag = True", "last_update_user = request.user.user_name, last_update_timestamp = now ).save(force_insert=True) except MailDriver.DoesNotExist: logger.logic_log('LOSM07006',", "\"\"\" [概要] 入力チェック [引数] rq: dict リクエストされた入力データ error_msg: dict [戻り値]", "'mail_disp_name', 64, rq['mail_disp_name'], request=request) # 絵文字チェック value_list = emo_chk.is_emotion(rq['mail_disp_name']) if", "= True error_msg['password'] += get_message('<PASSWORD>', request.user.get_lang_mode(), showMsgId=False) + '\\n' if", "rq['smtp_server'], request=request) # 絵文字チェック value_list = emo_chk.is_emotion(rq['smtp_server']) if len(value_list) >", "e: pass if resp_code != 0: error_flag = True #todo", "% (self.name, self.ver) def get_driver_name(self): return '%s Driver ver%s' %", "sock: resp_code = sock.connect_ex((rq['smtp_server'], int(rq['port']))) # host名名前解決が必要/etc/hostsとか sock.close() except Exception", "+= get_message('<PASSWORD>', request.user.get_lang_mode(), showMsgId=False) + '\\n' if not emo_flag: duplication", "get_template_file(cls): return 'system/mail/action_mail.html' @classmethod def get_info_list(cls, user_groups): try: mail_driver_obj_list =", "except Exception as e: logger.logic_log('LOSI00005', traceback.format_exc(), request=request) response = {", "self.name = name self.ver = ver self.icon_name = icon_name def", "@classmethod def get_info_list(cls, user_groups): try: mail_driver_obj_list = MailDriver.objects.all() except Exception", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "rq['user'], password = <PASSWORD>, last_update_user = request.user.user_name, last_update_timestamp = now", "False emo_flag_hostname = False if len(rq['mail_disp_name']) == 0: error_flag =", "'user', 64, rq['user'], request=request) # 絵文字チェック value_list = emo_chk.is_emotion(rq['user']) if", "return self.icon_name @classmethod def get_template_file(cls): return 'system/mail/action_mail.html' @classmethod def get_info_list(cls,", "resp_code != 0: error_flag = True #todo 仮でこのエラーは名前に入れている error_msg['mail_disp_name'] +=", "Unless required by applicable law or agreed to in writing,", "True #todo 仮でこのエラーは名前に入れている error_msg['mail_disp_name'] += get_message('MOSJA27215', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07005',", "'protocol' : '', 'smtp_server' : '', 'port' : '', 'user'", "request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07003', 'port', rq['port'], request=request) if len(rq['user']) >", "64, rq['mail_disp_name'], request=request) # 絵文字チェック value_list = emo_chk.is_emotion(rq['mail_disp_name']) if len(value_list)", "# 絵文字チェック value_list = emo_chk.is_emotion(rq['password']) if len(value_list) > 0: error_flag", "True error_msg['protocol'] += get_message('MOSJA27212', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'protocol', request=request)", "get_message('MOSJA27213', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'protocol', 64, rq['protocol'], request=request) if", "raise UserWarning('validation error.') # パスワードを暗号化 空なら空文字 cipher = AESCipher(settings.AES_KEY) if", "protocol_dict = cls.get_define()['dict'] mail_driver_dto_list = [] cipher = AESCipher(settings.AES_KEY) for", "the specific language governing permissions and # limitations under the", "グループのDB更新処理 \"\"\" logger.logic_log('LOSI00001', 'None', request=request) error_flag = False error_msg =", "True error_msg['mail_disp_name'] += get_message('MOSJA27202', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'mail_disp_name', 64,", "from django.http import HttpResponse from django.http import HttpResponseServerError from django.db", "'' driver_info_reg = MailDriver( mail_disp_name = rq['mail_disp_name'], protocol = rq['protocol'],", "applicable law or agreed to in writing, software # distributed", "request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'protocol', request=request) if len(rq['protocol']) > 64:", "logger.user_log('LOSM07001', 'port', request=request) try: tmp_port = int(rq['port']) if 0 >", "self.drv_id def get_icon_name(self): return self.icon_name @classmethod def get_template_file(cls): return 'system/mail/action_mail.html'", "MailDriver.objects.filter(pk=rq['mail_driver_id']).delete() elif ope == defs.DABASE_OPECODE.OPE_INSERT: encrypted_password = <PASSWORD>.encrypt(rq['password']) if rq['password']", "= MailDriver.objects.filter(mail_disp_name=rq['mail_disp_name']) if len(duplication) == 1 and int(rq['mail_driver_id']) != duplication[0].mail_driver_id:", "= cls.get_define()['dict'] mail_driver_dto_list = [] cipher = AESCipher(settings.AES_KEY) for mail_obj", "logger.user_log('LOSM07003', 'port', rq['port'], request=request) except ValueError: error_flag = True error_msg['port']", "request=request) if len(rq['user']) > 64: error_flag = True error_msg['user'] +=", "mailDriverInfo(): def __init__(self, drv_id, act_id, name, ver, icon_name): self.drv_id =", ": '', 'password' : '', } now = datetime.datetime.now(pytz.timezone('UTC')) emo_chk", "'mail_disp_name', request=request) if len(rq['mail_disp_name']) > 64: error_flag = True error_msg['mail_disp_name']", "+ '\\n' if len(rq['protocol']) == 0: error_flag = True error_msg['protocol']", "request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07003', 'port', rq['port'], request=request) except ValueError: error_flag", "in writing, software # distributed under the License is distributed", "ope == defs.DABASE_OPECODE.OPE_DELETE: MailDriver.objects.filter(pk=rq['mail_driver_id']).delete() elif ope == defs.DABASE_OPECODE.OPE_INSERT: encrypted_password =", "= UnicodeCheck() # 成功時データ response = {\"status\": \"success\",} try: rq", "web_app.serializers.unicode_check import UnicodeCheck logger = OaseLogger.get_instance() # ロガー初期化 class mailDriverInfo():", "+ '\\n' logger.user_log('LOSM07002', 'mail_disp_name', 64, rq['mail_disp_name'], request=request) # 絵文字チェック value_list", "+= get_message('MOSJA27205', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'port', request=request) try: tmp_port", "self.ver) def get_driver_id(self): return self.drv_id def get_icon_name(self): return self.icon_name @classmethod", "error_flag = True error_msg['user'] += get_message('MOSJA27218', request.user.get_lang_mode(), showMsgId=False) + '\\n'", "= <PASSWORD> driver_info_mod.last_update_user = request.user.user_name driver_info_mod.last_update_timestamp = now driver_info_mod.save(force_update=True) elif", "0: error_flag = True error_msg['smtp_server'] += get_message('MOSJA27217', request.user.get_lang_mode(), showMsgId=False) +", "get_message('<PASSWORD>', request.user.get_lang_mode(), showMsgId=False) + '\\n' if not emo_flag: duplication =", "as e: pass if resp_code != 0: error_flag = True", "MailDriver.objects.filter(mail_disp_name=rq['mail_disp_name']) if len(duplication) == 1 and int(rq['mail_driver_id']) != duplication[0].mail_driver_id: error_flag", "class mailDriverInfo(): def __init__(self, drv_id, act_id, name, ver, icon_name): self.drv_id", "return '%s Driver ver%s' % (self.name, self.ver) def get_driver_id(self): return", "logger.logic_log('LOSI00002', 'Record locked.(driver_id=%s)' % driver_id, request=request) def modify(self, json_str, request):", "= {\"status\": \"success\",} try: rq = json_str['json_str'] ope = int(rq['ope'])", "if 0 > tmp_port or tmp_port > 65535: error_flag =", "リクエストされた入力データ error_msg: dict [戻り値] \"\"\" logger.logic_log('LOSI00001', 'data: %s, error_msg:%s'%(rq, error_msg))", "try: tmp_port = int(rq['port']) if 0 > tmp_port or tmp_port", "0: error_flag = True #todo 仮でこのエラーは名前に入れている error_msg['mail_disp_name'] += get_message('MOSJA27215', request.user.get_lang_mode())", "True error_msg['smtp_server'] += get_message('MOSJA27204', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'smtp_server', 64,", "0: error_flag = True error_msg['protocol'] += get_message('MOSJA27212', request.user.get_lang_mode()) + '\\n'", "logger.user_log('LOSM07002', 'password', 64, rq['password'], request=request) # 絵文字チェック value_list = emo_chk.is_emotion(rq['password'])", "return '%s(ver%s)' % (self.name, self.ver) def get_driver_name(self): return '%s Driver", "emo_flag_hostname = False if len(rq['mail_disp_name']) == 0: error_flag = True", "Exception as e: # ここでの例外は大外で拾う raise protocol_dict = cls.get_define()['dict'] mail_driver_dto_list", "self.icon_name @classmethod def get_template_file(cls): return 'system/mail/action_mail.html' @classmethod def get_info_list(cls, user_groups):", "rq['port'], request=request) except ValueError: error_flag = True error_msg['port'] += get_message('MOSJA27206',", "act_id self.name = name self.ver = ver self.icon_name = icon_name", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "[メソッド概要] グループのDB更新処理 \"\"\" logger.logic_log('LOSI00001', 'None', request=request) error_flag = False error_msg", "License, Version 2.0 (the \"License\"); # you may not use", "+ '\\n' logger.user_log('LOSM07001', 'smtp_server', request=request) if len(rq['smtp_server']) > 128: error_flag", "request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07004', 'mail_disp_name', rq['mail_disp_name'], request=request) if error_flag ==", "except ValueError: error_flag = True error_msg['port'] += get_message('MOSJA27206', request.user.get_lang_mode()) +", "# You may obtain a copy of the License at", "# 成功時データ response = {\"status\": \"success\",} try: rq = json_str['json_str']", "} return defines def record_lock(self, json_str, request): logger.logic_log('LOSI00001', 'None', request=request)", "HttpResponse from django.http import HttpResponseServerError from django.db import transaction from", "mail_driver_obj_list: mail_info = mail_obj.__dict__ if mail_obj.password: mail_info['password'] = <PASSWORD>.decrypt(mail_obj.password) mail_info['protocol_str']", "= rq['protocol'], smtp_server = rq['smtp_server'], port = rq['port'], user =", "get_message('MOSJA27206', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07003', 'port', rq['port'], request=request) except ValueError:", "len(value_list) > 0: error_flag = True error_msg['user'] += get_message('MOSJA27218', request.user.get_lang_mode(),", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "rq['mail_disp_name'], request=request) if error_flag == False: # 疎通確認 resp_code =", "+ '\\n' logger.user_log('LOSM07002', 'protocol', 64, rq['protocol'], request=request) if len(rq['smtp_server']) ==", "web_app.templatetags.common import get_message from web_app.serializers.unicode_check import UnicodeCheck logger = OaseLogger.get_instance()", "0: error_flag = True error_msg['password'] += get_message('<PASSWORD>', request.user.get_lang_mode(), showMsgId=False) +", "get_message('MOSJA27205', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'port', request=request) try: tmp_port =", "{ 'list_all': defs.SMTP_PROTOCOL.LIST_ALL, 'dict': protocol_dict, } return defines def record_lock(self,", "e: # ここでの例外は大外で拾う raise protocol_dict = cls.get_define()['dict'] mail_driver_dto_list = []", "self.ver = ver self.icon_name = icon_name def __str__(self): return '%s(ver%s)'", "record_lock(self, json_str, request): logger.logic_log('LOSI00001', 'None', request=request) driver_id = self.get_driver_id() #", "= False emo_flag_hostname = False if len(rq['mail_disp_name']) == 0: error_flag", "% (self.name, self.ver) def get_driver_id(self): return self.drv_id def get_icon_name(self): return", "'', 'password' : '', } now = datetime.datetime.now(pytz.timezone('UTC')) emo_chk =", "error_msg)) error_flag = False emo_chk = UnicodeCheck() emo_flag = False", "len(duplication) == 1 and int(rq['mail_driver_id']) != duplication[0].mail_driver_id: error_flag = True", "= { 'list_all': defs.SMTP_PROTOCOL.LIST_ALL, 'dict': protocol_dict, } return defines def", "'\\n' if len(rq['port']) == 0: error_flag = True error_msg['port'] +=", "request=request) # 絵文字チェック value_list = emo_chk.is_emotion(rq['user']) if len(value_list) > 0:", "'\\n' logger.user_log('LOSM07004', 'mail_disp_name', rq['mail_disp_name'], request=request) if error_flag == False: #", "the License for the specific language governing permissions and #", ").save(force_insert=True) except MailDriver.DoesNotExist: logger.logic_log('LOSM07006', \"mail_driver_id\", mail_driver_id, request=request) except Exception as", "rq, error_msg, request): \"\"\" [概要] 入力チェック [引数] rq: dict リクエストされた入力データ", "request=request) # 絵文字チェック value_list = emo_chk.is_emotion(rq['smtp_server']) if len(value_list) > 0:", "Apache License, Version 2.0 (the \"License\"); # you may not", "65535: error_flag = True error_msg['port'] += get_message('MOSJA27206', request.user.get_lang_mode()) + '\\n'", "len(rq['password']) > 64: error_flag = True error_msg['password'] += get_message('<PASSWORD>', request.user.get_lang_mode())", "either express or implied. # See the License for the", "def get_driver_id(self): return self.drv_id def get_icon_name(self): return self.icon_name @classmethod def", "defines = { 'list_all': defs.SMTP_PROTOCOL.LIST_ALL, 'dict': protocol_dict, } return defines", "= ver self.icon_name = icon_name def __str__(self): return '%s(ver%s)' %", "64, rq['password'], request=request) # 絵文字チェック value_list = emo_chk.is_emotion(rq['password']) if len(value_list)", "defs.DABASE_OPECODE.OPE_DELETE): drvinfo_modify = int(json_str['json_str']['mail_driver_id']) MailDriver.objects.select_for_update().filter(pk=drvinfo_modify) logger.logic_log('LOSI00002', 'Record locked.(driver_id=%s)' % driver_id,", "<gh_stars>1-10 # Copyright 2019 NEC Corporation # # Licensed under", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "if len(rq['smtp_server']) == 0: error_flag = True error_msg['smtp_server'] += get_message('MOSJA27203',", "error_flag = True error_msg['smtp_server'] += get_message('MOSJA27217', request.user.get_lang_mode(), showMsgId=False) + '\\n'", "protocol = rq['protocol'], smtp_server = rq['smtp_server'], port = rq['port'], user", "if error_flag == False: # 疎通確認 resp_code = -1 try:", "'None', request=request) driver_id = self.get_driver_id() # 更新前にレコードロック if json_str['json_str']['ope'] in", "ope = int(rq['ope']) #削除以外の場合の入力チェック if ope != defs.DABASE_OPECODE.OPE_DELETE: error_flag =", "traceback.format_exc(), request=request) response = { 'status': 'failure', 'error_msg': error_msg, #", "AESCipher from web_app.models.models import ActionType from web_app.models.mail_models import MailDriver from", "from web_app.templatetags.common import get_message from web_app.serializers.unicode_check import UnicodeCheck logger =", "= emo_chk.is_emotion(rq['mail_disp_name']) if len(value_list) > 0: error_flag = True emo_flag", "from libs.commonlibs import define as defs from libs.commonlibs.oase_logger import OaseLogger", "0: error_flag = True error_msg['smtp_server'] += get_message('MOSJA27203', request.user.get_lang_mode()) + '\\n'", "len(rq['user']) > 64: error_flag = True error_msg['user'] += get_message('MOSJA27207', request.user.get_lang_mode())", "> 0: error_flag = True error_msg['smtp_server'] += get_message('MOSJA27217', request.user.get_lang_mode(), showMsgId=False)", "import define as defs from libs.commonlibs.oase_logger import OaseLogger from libs.commonlibs.aes_cipher", "= OaseLogger.get_instance() # ロガー初期化 class mailDriverInfo(): def __init__(self, drv_id, act_id,", "if len(rq['mail_disp_name']) > 64: error_flag = True error_msg['mail_disp_name'] += get_message('MOSJA27202',", "+= get_message('<PASSWORD>', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'password', 64, rq['password'], request=request)", "= request.user.user_name, last_update_timestamp = now ).save(force_insert=True) except MailDriver.DoesNotExist: logger.logic_log('LOSM07006', \"mail_driver_id\",", "= False emo_flag_ita_disp_name = False emo_flag_hostname = False if len(rq['mail_disp_name'])", "'\\n' logger.user_log('LOSM07002', 'protocol', 64, rq['protocol'], request=request) if len(rq['smtp_server']) == 0:", "2019 NEC Corporation # # Licensed under the Apache License,", "except Exception as e: # ここでの例外は大外で拾う raise protocol_dict = cls.get_define()['dict']", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "transaction from django.conf import settings from libs.commonlibs import define as", "<PASSWORD>.decrypt(mail_obj.password) mail_info['protocol_str'] = protocol_dict[mail_obj.protocol] mail_driver_dto_list.append(mail_info) return mail_driver_dto_list @classmethod def get_group_list(cls,", "def modify(self, json_str, request): \"\"\" [メソッド概要] グループのDB更新処理 \"\"\" logger.logic_log('LOSI00001', 'None',", "'response=%s' % response, request=request) return response def _validate(self, rq, error_msg,", "rq['port'], request=request) if len(rq['user']) > 64: error_flag = True error_msg['user']", "limitations under the License. # \"\"\" [概要] MAILアクション用画面表示補助クラス \"\"\" import", "= MailDriver.objects.get(mail_driver_id=rq['mail_driver_id']) driver_info_mod.mail_disp_name = rq['mail_disp_name'] driver_info_mod.protocol = rq['protocol'] driver_info_mod.smtp_server =", "driver_info_mod.last_update_timestamp = now driver_info_mod.save(force_update=True) elif ope == defs.DABASE_OPECODE.OPE_DELETE: MailDriver.objects.filter(pk=rq['mail_driver_id']).delete() elif", "driver_id = self.get_driver_id() # 更新前にレコードロック if json_str['json_str']['ope'] in (defs.DABASE_OPECODE.OPE_UPDATE, defs.DABASE_OPECODE.OPE_DELETE):", "= protocol_dict[mail_obj.protocol] mail_driver_dto_list.append(mail_info) return mail_driver_dto_list @classmethod def get_group_list(cls, user_groups): \"\"\"", "if len(value_list) > 0: error_flag = True error_msg['user'] += get_message('MOSJA27218',", "error_msg['mail_disp_name'] += get_message('MOSJA27201', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'mail_disp_name', request=request) if", "driver_info_mod.port = rq['port'] driver_info_mod.user = rq['user'] driver_info_mod.password = <PASSWORD> driver_info_mod.last_update_user", "return response def _validate(self, rq, error_msg, request): \"\"\" [概要] 入力チェック", "len(rq['smtp_server']) == 0: error_flag = True error_msg['smtp_server'] += get_message('MOSJA27203', request.user.get_lang_mode())", "= rq['smtp_server'], port = rq['port'], user = rq['user'], password =", "False emo_flag_ita_disp_name = False emo_flag_hostname = False if len(rq['mail_disp_name']) ==", "= int(rq['ope']) #削除以外の場合の入力チェック if ope != defs.DABASE_OPECODE.OPE_DELETE: error_flag = self._validate(rq,", "% response, request=request) return response def _validate(self, rq, error_msg, request):", "name self.ver = ver self.icon_name = icon_name def __str__(self): return", "socket.SOCK_STREAM) as sock: resp_code = sock.connect_ex((rq['smtp_server'], int(rq['port']))) # host名名前解決が必要/etc/hostsとか sock.close()", "'port', request=request) try: tmp_port = int(rq['port']) if 0 > tmp_port", "request=request) if len(rq['mail_disp_name']) > 64: error_flag = True error_msg['mail_disp_name'] +=", "error_msg['user'] += get_message('MOSJA27218', request.user.get_lang_mode(), showMsgId=False) + '\\n' if len(rq['password']) >", "defs.SMTP_PROTOCOL.LIST_ALL} defines = { 'list_all': defs.SMTP_PROTOCOL.LIST_ALL, 'dict': protocol_dict, } return", "emo_flag = False emo_flag_ita_disp_name = False emo_flag_hostname = False if", "logger.user_log('LOSM07001', 'mail_disp_name', request=request) if len(rq['mail_disp_name']) > 64: error_flag = True", "= int(rq['port']) if 0 > tmp_port or tmp_port > 65535:", "\"License\"); # you may not use this file except in", "datetime.datetime.now(pytz.timezone('UTC')) emo_chk = UnicodeCheck() # 成功時データ response = {\"status\": \"success\",}", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "更新前にレコードロック if json_str['json_str']['ope'] in (defs.DABASE_OPECODE.OPE_UPDATE, defs.DABASE_OPECODE.OPE_DELETE): drvinfo_modify = int(json_str['json_str']['mail_driver_id']) MailDriver.objects.select_for_update().filter(pk=drvinfo_modify)", "<PASSWORD>.encrypt(rq['password']) if rq['password'] else '' driver_info_mod = MailDriver.objects.get(mail_driver_id=rq['mail_driver_id']) driver_info_mod.mail_disp_name =", "+= get_message('MOSJA27213', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'protocol', 64, rq['protocol'], request=request)", "絵文字チェック value_list = emo_chk.is_emotion(rq['user']) if len(value_list) > 0: error_flag =", "= datetime.datetime.now(pytz.timezone('UTC')) emo_chk = UnicodeCheck() # 成功時データ response = {\"status\":", "logger.user_log('LOSM07004', 'mail_disp_name', rq['mail_disp_name'], request=request) if error_flag == False: # 疎通確認", "MailDriver.objects.get(mail_driver_id=rq['mail_driver_id']) driver_info_mod.mail_disp_name = rq['mail_disp_name'] driver_info_mod.protocol = rq['protocol'] driver_info_mod.smtp_server = rq['smtp_server']", "# 絵文字チェック value_list = emo_chk.is_emotion(rq['mail_disp_name']) if len(value_list) > 0: error_flag", "get_message('MOSJA27212', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'protocol', request=request) if len(rq['protocol']) >", "# distributed under the License is distributed on an \"AS", "if len(rq['mail_disp_name']) == 0: error_flag = True error_msg['mail_disp_name'] += get_message('MOSJA27201',", "'', 'user' : '', 'password' : '', } now =", "+= get_message('MOSJA27206', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07003', 'port', rq['port'], request=request) if", "True error_msg['password'] += get_message('<PASSWORD>', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'password', 64,", "'data: %s, error_msg:%s'%(rq, error_msg)) error_flag = False emo_chk = UnicodeCheck()", "# Unless required by applicable law or agreed to in", "AESCipher(settings.AES_KEY) for mail_obj in mail_driver_obj_list: mail_info = mail_obj.__dict__ if mail_obj.password:", "encrypted_password = <PASSWORD>.encrypt(rq['password']) if rq['password'] else '' driver_info_mod = MailDriver.objects.get(mail_driver_id=rq['mail_driver_id'])", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "'\\n' logger.user_log('LOSM07001', 'mail_disp_name', request=request) if len(rq['mail_disp_name']) > 64: error_flag =", "'dict': protocol_dict, } return defines def record_lock(self, json_str, request): logger.logic_log('LOSI00001',", "<PASSWORD>.encrypt(rq['password']) if rq['password'] else '' driver_info_reg = MailDriver( mail_disp_name =", "request): logger.logic_log('LOSI00001', 'None', request=request) driver_id = self.get_driver_id() # 更新前にレコードロック if", "django.conf import settings from libs.commonlibs import define as defs from", "You may obtain a copy of the License at #", "get_message('<PASSWORD>', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002', 'password', 64, rq['password'], request=request) #", "now driver_info_mod.save(force_update=True) elif ope == defs.DABASE_OPECODE.OPE_DELETE: MailDriver.objects.filter(pk=rq['mail_driver_id']).delete() elif ope ==", "= <PASSWORD>.decrypt(mail_obj.password) mail_info['protocol_str'] = protocol_dict[mail_obj.protocol] mail_driver_dto_list.append(mail_info) return mail_driver_dto_list @classmethod def", "> 0: error_flag = True error_msg['password'] += get_message('<PASSWORD>', request.user.get_lang_mode(), showMsgId=False)", "duplication = MailDriver.objects.filter(mail_disp_name=rq['mail_disp_name']) if len(duplication) == 1 and int(rq['mail_driver_id']) !=", "error_msg, # エラー詳細(エラーアイコンで出す) } logger.logic_log('LOSI00002', 'response=%s' % response, request=request) return", "Corporation # # Licensed under the Apache License, Version 2.0", "if len(value_list) > 0: error_flag = True error_msg['smtp_server'] += get_message('MOSJA27217',", "True error_msg['smtp_server'] += get_message('MOSJA27203', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'smtp_server', request=request)", "driver_info_mod = MailDriver.objects.get(mail_driver_id=rq['mail_driver_id']) driver_info_mod.mail_disp_name = rq['mail_disp_name'] driver_info_mod.protocol = rq['protocol'] driver_info_mod.smtp_server", "= False error_msg = { 'mail_disp_name' : '', 'protocol' :", "AESCipher(settings.AES_KEY) if ope == defs.DABASE_OPECODE.OPE_UPDATE: encrypted_password = <PASSWORD>.encrypt(rq['password']) if rq['password']", "logger.user_log('LOSM07003', 'port', rq['port'], request=request) if len(rq['user']) > 64: error_flag =", "the Apache License, Version 2.0 (the \"License\"); # you may", "ActionType from web_app.models.mail_models import MailDriver from web_app.templatetags.common import get_message from", "request.user.get_lang_mode(), showMsgId=False) + '\\n' if len(rq['password']) > 64: error_flag =", "for mail_obj in mail_driver_obj_list: mail_info = mail_obj.__dict__ if mail_obj.password: mail_info['password']", "error_msg['mail_disp_name'] += get_message('MOSJA27215', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07005', rq['smtp_server'], rq['port'], request=request)", "% driver_id, request=request) def modify(self, json_str, request): \"\"\" [メソッド概要] グループのDB更新処理", "error_flag = True error_msg['protocol'] += get_message('MOSJA27213', request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07002',", "+= get_message('MOSJA27217', request.user.get_lang_mode(), showMsgId=False) + '\\n' if len(rq['port']) == 0:", "request.user.get_lang_mode()) + '\\n' logger.user_log('LOSM07001', 'smtp_server', request=request) if len(rq['smtp_server']) > 128:", "error_flag = True error_msg['password'] += get_message('<PASSWORD>', request.user.get_lang_mode(), showMsgId=False) + '\\n'" ]
[ "-> Dog: return self.dogs.remove() def dequeCat(self) -> Cat: return self.cats.remove()", "= self.tail.next_node def remove(self) -> Any: if self.head is None:", "None def __str__(self) -> str: current = self.head string =", "__str__(self) -> str: return f\"{self.name}\" class Node: def __init__(self, data:", "for animal in both] string = \"\" for anim in", "def peak(self): return self.head.data class Dog(Animal): def __init__(self, name: str):", "is None: raise (\"Empty LinkedList!\") else: data = self.head.data self.head", "self.dogs.remove() def dequeCat(self) -> Cat: return self.cats.remove() def main(): q", "self.dogs.is_empty(): return self.dequeCat() elif self.cats.is_empty(): return self.dequeDog() if self.dogs.head.data.peek_order() >", "f\"{current.data} -> \" current = current.next_node return string + \"END\"", "[]: both.append(cats.pop()) both.append(dogs.pop()) [q.enqueue(animal) for animal in both] string =", "\"END\" def is_empty(self) -> bool: if self.head is None: return", "self.dequeCat() elif self.cats.is_empty(): return self.dequeDog() if self.dogs.head.data.peek_order() > self.cats.head.data.peek_order(): return", "__str__(self) -> str: current = self.head string = f\"\" while", "not None: string += f\"{cat.data.name} {cat.data.peek_order()} | \" cat =", "= q.dequeDog() print(get.order,get.name) get = q.dequeAny() print(get.order,get.name) if __name__ ==", "[Dog(\"d1\"), Dog(\"d2\"), Dog(\"d3\")] cats = [Cat(\"c1\"), Cat(\"c2\"), Cat(\"c3\")] both =", "= LinkedList() self.cats = LinkedList() self.order = 0 def enqueue(self,", "# print(q.print_cats()) get = q.dequeDog() print(get.order,get.name) get = q.dequeAny() print(get.order,get.name)", "def main(): q = AnimalQueue() dogs = [Dog(\"d1\"), Dog(\"d2\"), Dog(\"d3\")]", "self.cats = LinkedList() self.order = 0 def enqueue(self, animal: Union[Dog,", "not None: string += f\"{current.data} -> \" current = current.next_node", "| \" cat = cat.next_node return string def dequeDog(self) ->", "animal in both] string = \"\" for anim in both:", "Node: def __init__(self, data: Any): self.data = data self.next_node =", "if not isinstance(animal, (Dog, Cat)): raise Exception(\"Expected Dog or Cat!\")", "\"\" cat = self.cats.head while cat is not None: string", "= \"\" cat = self.cats.head while cat is not None:", "self.cats.remove() def main(): q = AnimalQueue() dogs = [Dog(\"d1\"), Dog(\"d2\"),", "def insert(self, item: Any) -> None: if self.is_empty(): self.head =", "-> Any: if self.head is None: raise (\"Empty LinkedList!\") else:", "(\"Empty LinkedList!\") else: data = self.head.data self.head = self.head.next_node return", "is not None: string += f\"{cat.data.name} {cat.data.peek_order()} | \" cat", "return self.dequeDog() def print_cats(self) -> str: string = \"\" cat", "Cat(\"c2\"), Cat(\"c3\")] both = [] while cats != []: both.append(cats.pop())", "get = q.dequeDog() print(get.order,get.name) get = q.dequeAny() print(get.order,get.name) if __name__", "order def peek_order(self) -> int: return self.order def __str__(self) ->", "Animal: def __init__(self, name: str) -> None: self.name = name", "= AnimalQueue() dogs = [Dog(\"d1\"), Dog(\"d2\"), Dog(\"d3\")] cats = [Cat(\"c1\"),", "= order def peek_order(self) -> int: return self.order def __str__(self)", "cat = self.cats.head while cat is not None: string +=", "{cat.data.peek_order()} | \" cat = cat.next_node return string def dequeDog(self)", "current.next_node return string + \"END\" def is_empty(self) -> bool: if", "print(q.print_cats()) get = q.dequeDog() print(get.order,get.name) get = q.dequeAny() print(get.order,get.name) if", "is_empty(self) -> bool: if self.head is None: return True else:", "__init__(self, name: str): super().__init__(name) class AnimalQueue: def __init__(self) -> None:", "both.append(cats.pop()) both.append(dogs.pop()) [q.enqueue(animal) for animal in both] string = \"\"", "def peek_order(self) -> int: return self.order def __str__(self) -> str:", "def __init__(self, name: str): super().__init__(name) class Cat(Animal): def __init__(self, name:", "current = current.next_node return string + \"END\" def is_empty(self) ->", "string = \"\" cat = self.cats.head while cat is not", "class Dog(Animal): def __init__(self, name: str): super().__init__(name) class Cat(Animal): def", "Node(item) self.tail.next_node = new_node self.tail = self.tail.next_node def remove(self) ->", "def dequeCat(self) -> Cat: return self.cats.remove() def main(): q =", "self.dequeDog() if self.dogs.head.data.peek_order() > self.cats.head.data.peek_order(): return self.dequeCat() else: return self.dequeDog()", "dequeAny(self) -> Union[Dog, Cat]: if self.dogs.is_empty(): return self.dequeCat() elif self.cats.is_empty():", "string def dequeDog(self) -> Dog: return self.dogs.remove() def dequeCat(self) ->", "peak(self): return self.head.data class Dog(Animal): def __init__(self, name: str): super().__init__(name)", "= f\"\" while current.next_node is not None: string += f\"{current.data}", "-> str: string = \"\" cat = self.cats.head while cat", "insert(self, item: Any) -> None: if self.is_empty(): self.head = Node(item)", "return self.dogs.remove() def dequeCat(self) -> Cat: return self.cats.remove() def main():", "-> Cat: return self.cats.remove() def main(): q = AnimalQueue() dogs", "= self.head.data self.head = self.head.next_node return data def peak(self): return", "class Animal: def __init__(self, name: str) -> None: self.name =", "def __init__(self) -> None: self.dogs = LinkedList() self.cats = LinkedList()", "self.tail.next_node def remove(self) -> Any: if self.head is None: raise", "string = \"\" for anim in both: string += f\"{anim.name}", "self.tail.next_node = new_node self.tail = self.tail.next_node def remove(self) -> Any:", "-> None: if not isinstance(animal, (Dog, Cat)): raise Exception(\"Expected Dog", "= name def set_order(self, order: int) -> None: self.order =", "class LinkedList: def __init__(self) -> None: self.head = None self.tail", "self.head.next_node return data def peak(self): return self.head.data class Dog(Animal): def", "None: string += f\"{current.data} -> \" current = current.next_node return", "self.is_empty(): self.head = Node(item) self.tail = self.head else: new_node =", "self.dequeCat() else: return self.dequeDog() def print_cats(self) -> str: string =", "\" current = current.next_node return string + \"END\" def is_empty(self)", "!= []: both.append(cats.pop()) both.append(dogs.pop()) [q.enqueue(animal) for animal in both] string", "class AnimalQueue: def __init__(self) -> None: self.dogs = LinkedList() self.cats", "Dog(Animal): def __init__(self, name: str): super().__init__(name) class Cat(Animal): def __init__(self,", "else: return False def insert(self, item: Any) -> None: if", "Exception(\"Expected Dog or Cat!\") else: animal.set_order(self.order) self.order += 1 if", "Any: if self.head is None: raise (\"Empty LinkedList!\") else: data", "isinstance(animal, Cat): self.cats.insert(animal) def dequeAny(self) -> Union[Dog, Cat]: if self.dogs.is_empty():", "Cat]) -> None: if not isinstance(animal, (Dog, Cat)): raise Exception(\"Expected", "self.order = order def peek_order(self) -> int: return self.order def", "in both] string = \"\" for anim in both: string", "self.cats.head while cat is not None: string += f\"{cat.data.name} {cat.data.peek_order()}", "animal.set_order(self.order) self.order += 1 if isinstance(animal, Dog): self.dogs.insert(animal) elif isinstance(animal,", "= [Cat(\"c1\"), Cat(\"c2\"), Cat(\"c3\")] both = [] while cats !=", "class Cat(Animal): def __init__(self, name: str): super().__init__(name) class AnimalQueue: def", "int: return self.order def __str__(self) -> str: return f\"{self.name}\" class", "None: self.name = name def set_order(self, order: int) -> None:", "str) -> None: self.name = name def set_order(self, order: int)", "Union[Dog, Cat]: if self.dogs.is_empty(): return self.dequeCat() elif self.cats.is_empty(): return self.dequeDog()", "return False def insert(self, item: Any) -> None: if self.is_empty():", "str): super().__init__(name) class Cat(Animal): def __init__(self, name: str): super().__init__(name) class", "0 def enqueue(self, animal: Union[Dog, Cat]) -> None: if not", "new_node self.tail = self.tail.next_node def remove(self) -> Any: if self.head", "__init__(self) -> None: self.head = None self.tail = None def", "self.tail = self.tail.next_node def remove(self) -> Any: if self.head is", "is None: return True else: return False def insert(self, item:", "= self.head string = f\"\" while current.next_node is not None:", "+= f\"{current.data} -> \" current = current.next_node return string +", "= LinkedList() self.order = 0 def enqueue(self, animal: Union[Dog, Cat])", "return string def dequeDog(self) -> Dog: return self.dogs.remove() def dequeCat(self)", "-> Union[Dog, Cat]: if self.dogs.is_empty(): return self.dequeCat() elif self.cats.is_empty(): return", "None: return True else: return False def insert(self, item: Any)", "return self.dequeCat() else: return self.dequeDog() def print_cats(self) -> str: string", "self.head string = f\"\" while current.next_node is not None: string", "-> None: self.head = None self.tail = None def __str__(self)", "both] string = \"\" for anim in both: string +=", "dequeDog(self) -> Dog: return self.dogs.remove() def dequeCat(self) -> Cat: return", "data: Any): self.data = data self.next_node = None class LinkedList:", "= Node(item) self.tail.next_node = new_node self.tail = self.tail.next_node def remove(self)", "current.next_node is not None: string += f\"{current.data} -> \" current", "[Cat(\"c1\"), Cat(\"c2\"), Cat(\"c3\")] both = [] while cats != []:", "both = [] while cats != []: both.append(cats.pop()) both.append(dogs.pop()) [q.enqueue(animal)", "__init__(self, name: str): super().__init__(name) class Cat(Animal): def __init__(self, name: str):", "= cat.next_node return string def dequeDog(self) -> Dog: return self.dogs.remove()", "else: new_node = Node(item) self.tail.next_node = new_node self.tail = self.tail.next_node", "Node(item) self.tail = self.head else: new_node = Node(item) self.tail.next_node =", "def print_cats(self) -> str: string = \"\" cat = self.cats.head", "= current.next_node return string + \"END\" def is_empty(self) -> bool:", "dogs = [Dog(\"d1\"), Dog(\"d2\"), Dog(\"d3\")] cats = [Cat(\"c1\"), Cat(\"c2\"), Cat(\"c3\")]", "None: self.head = None self.tail = None def __str__(self) ->", "= None class LinkedList: def __init__(self) -> None: self.head =", "False def insert(self, item: Any) -> None: if self.is_empty(): self.head", "cats = [Cat(\"c1\"), Cat(\"c2\"), Cat(\"c3\")] both = [] while cats", "both: string += f\"{anim.name} {anim.order} | \" print(string) # print(q.print_cats())", "LinkedList!\") else: data = self.head.data self.head = self.head.next_node return data", "not isinstance(animal, (Dog, Cat)): raise Exception(\"Expected Dog or Cat!\") else:", "is not None: string += f\"{current.data} -> \" current =", "cats != []: both.append(cats.pop()) both.append(dogs.pop()) [q.enqueue(animal) for animal in both]", "-> \" current = current.next_node return string + \"END\" def", "self.head = self.head.next_node return data def peak(self): return self.head.data class", "if self.is_empty(): self.head = Node(item) self.tail = self.head else: new_node", "-> int: return self.order def __str__(self) -> str: return f\"{self.name}\"", "string += f\"{cat.data.name} {cat.data.peek_order()} | \" cat = cat.next_node return", "self.order def __str__(self) -> str: return f\"{self.name}\" class Node: def", "bool: if self.head is None: return True else: return False", "super().__init__(name) class Cat(Animal): def __init__(self, name: str): super().__init__(name) class AnimalQueue:", "self.cats.insert(animal) def dequeAny(self) -> Union[Dog, Cat]: if self.dogs.is_empty(): return self.dequeCat()", "name: str): super().__init__(name) class Cat(Animal): def __init__(self, name: str): super().__init__(name)", "__init__(self, data: Any): self.data = data self.next_node = None class", "str: return f\"{self.name}\" class Node: def __init__(self, data: Any): self.data", "if self.head is None: return True else: return False def", "None: self.order = order def peek_order(self) -> int: return self.order", "f\"\" while current.next_node is not None: string += f\"{current.data} ->", "\"\" for anim in both: string += f\"{anim.name} {anim.order} |", "__init__(self, name: str) -> None: self.name = name def set_order(self,", "AnimalQueue: def __init__(self) -> None: self.dogs = LinkedList() self.cats =", "int) -> None: self.order = order def peek_order(self) -> int:", "if self.dogs.head.data.peek_order() > self.cats.head.data.peek_order(): return self.dequeCat() else: return self.dequeDog() def", "Cat]: if self.dogs.is_empty(): return self.dequeCat() elif self.cats.is_empty(): return self.dequeDog() if", "isinstance(animal, (Dog, Cat)): raise Exception(\"Expected Dog or Cat!\") else: animal.set_order(self.order)", "1 if isinstance(animal, Dog): self.dogs.insert(animal) elif isinstance(animal, Cat): self.cats.insert(animal) def", "print(string) # print(q.print_cats()) get = q.dequeDog() print(get.order,get.name) get = q.dequeAny()", "Any) -> None: if self.is_empty(): self.head = Node(item) self.tail =", "f\"{cat.data.name} {cat.data.peek_order()} | \" cat = cat.next_node return string def", "if self.dogs.is_empty(): return self.dequeCat() elif self.cats.is_empty(): return self.dequeDog() if self.dogs.head.data.peek_order()", "or Cat!\") else: animal.set_order(self.order) self.order += 1 if isinstance(animal, Dog):", "def is_empty(self) -> bool: if self.head is None: return True", "else: return self.dequeDog() def print_cats(self) -> str: string = \"\"", "Cat)): raise Exception(\"Expected Dog or Cat!\") else: animal.set_order(self.order) self.order +=", "self.head is None: raise (\"Empty LinkedList!\") else: data = self.head.data", "isinstance(animal, Dog): self.dogs.insert(animal) elif isinstance(animal, Cat): self.cats.insert(animal) def dequeAny(self) ->", "self.dequeDog() def print_cats(self) -> str: string = \"\" cat =", "from typing import Any, Union class Animal: def __init__(self, name:", "def __init__(self, name: str) -> None: self.name = name def", "-> None: if self.is_empty(): self.head = Node(item) self.tail = self.head", "raise Exception(\"Expected Dog or Cat!\") else: animal.set_order(self.order) self.order += 1", "self.head is None: return True else: return False def insert(self,", "main(): q = AnimalQueue() dogs = [Dog(\"d1\"), Dog(\"d2\"), Dog(\"d3\")] cats", "return data def peak(self): return self.head.data class Dog(Animal): def __init__(self,", "Dog(\"d3\")] cats = [Cat(\"c1\"), Cat(\"c2\"), Cat(\"c3\")] both = [] while", "string = f\"\" while current.next_node is not None: string +=", "LinkedList() self.cats = LinkedList() self.order = 0 def enqueue(self, animal:", "-> None: self.order = order def peek_order(self) -> int: return", "None: string += f\"{cat.data.name} {cat.data.peek_order()} | \" cat = cat.next_node", "new_node = Node(item) self.tail.next_node = new_node self.tail = self.tail.next_node def", "None: if self.is_empty(): self.head = Node(item) self.tail = self.head else:", "Any, Union class Animal: def __init__(self, name: str) -> None:", "class Node: def __init__(self, data: Any): self.data = data self.next_node", "self.tail = self.head else: new_node = Node(item) self.tail.next_node = new_node", "def __init__(self) -> None: self.head = None self.tail = None", "None: self.dogs = LinkedList() self.cats = LinkedList() self.order = 0", "> self.cats.head.data.peek_order(): return self.dequeCat() else: return self.dequeDog() def print_cats(self) ->", "+ \"END\" def is_empty(self) -> bool: if self.head is None:", "-> bool: if self.head is None: return True else: return", "q.dequeDog() print(get.order,get.name) get = q.dequeAny() print(get.order,get.name) if __name__ == \"__main__\":", "def __init__(self, name: str): super().__init__(name) class AnimalQueue: def __init__(self) ->", "-> None: self.dogs = LinkedList() self.cats = LinkedList() self.order =", "Cat(\"c3\")] both = [] while cats != []: both.append(cats.pop()) both.append(dogs.pop())", "AnimalQueue() dogs = [Dog(\"d1\"), Dog(\"d2\"), Dog(\"d3\")] cats = [Cat(\"c1\"), Cat(\"c2\"),", "[q.enqueue(animal) for animal in both] string = \"\" for anim", "animal: Union[Dog, Cat]) -> None: if not isinstance(animal, (Dog, Cat)):", "in both: string += f\"{anim.name} {anim.order} | \" print(string) #", "current = self.head string = f\"\" while current.next_node is not", "Cat: return self.cats.remove() def main(): q = AnimalQueue() dogs =", "Union class Animal: def __init__(self, name: str) -> None: self.name", "name def set_order(self, order: int) -> None: self.order = order", "self.tail = None def __str__(self) -> str: current = self.head", "return self.dequeDog() if self.dogs.head.data.peek_order() > self.cats.head.data.peek_order(): return self.dequeCat() else: return", "[] while cats != []: both.append(cats.pop()) both.append(dogs.pop()) [q.enqueue(animal) for animal", "def remove(self) -> Any: if self.head is None: raise (\"Empty", "+= f\"{anim.name} {anim.order} | \" print(string) # print(q.print_cats()) get =", "None self.tail = None def __str__(self) -> str: current =", "else: animal.set_order(self.order) self.order += 1 if isinstance(animal, Dog): self.dogs.insert(animal) elif", "name: str): super().__init__(name) class AnimalQueue: def __init__(self) -> None: self.dogs", "= None def __str__(self) -> str: current = self.head string", "self.order += 1 if isinstance(animal, Dog): self.dogs.insert(animal) elif isinstance(animal, Cat):", "Dog or Cat!\") else: animal.set_order(self.order) self.order += 1 if isinstance(animal,", "= [] while cats != []: both.append(cats.pop()) both.append(dogs.pop()) [q.enqueue(animal) for", "return f\"{self.name}\" class Node: def __init__(self, data: Any): self.data =", "data self.next_node = None class LinkedList: def __init__(self) -> None:", "\" print(string) # print(q.print_cats()) get = q.dequeDog() print(get.order,get.name) get =", "| \" print(string) # print(q.print_cats()) get = q.dequeDog() print(get.order,get.name) get", "return self.dequeCat() elif self.cats.is_empty(): return self.dequeDog() if self.dogs.head.data.peek_order() > self.cats.head.data.peek_order():", "\" cat = cat.next_node return string def dequeDog(self) -> Dog:", "Cat!\") else: animal.set_order(self.order) self.order += 1 if isinstance(animal, Dog): self.dogs.insert(animal)", "string += f\"{current.data} -> \" current = current.next_node return string", "= Node(item) self.tail = self.head else: new_node = Node(item) self.tail.next_node", "self.head.data class Dog(Animal): def __init__(self, name: str): super().__init__(name) class Cat(Animal):", "while cats != []: both.append(cats.pop()) both.append(dogs.pop()) [q.enqueue(animal) for animal in", "both.append(dogs.pop()) [q.enqueue(animal) for animal in both] string = \"\" for", "self.head else: new_node = Node(item) self.tail.next_node = new_node self.tail =", "str: string = \"\" cat = self.cats.head while cat is", "= [Dog(\"d1\"), Dog(\"d2\"), Dog(\"d3\")] cats = [Cat(\"c1\"), Cat(\"c2\"), Cat(\"c3\")] both", "True else: return False def insert(self, item: Any) -> None:", "-> str: current = self.head string = f\"\" while current.next_node", "self.order = 0 def enqueue(self, animal: Union[Dog, Cat]) -> None:", "self.head = None self.tail = None def __str__(self) -> str:", "Cat(Animal): def __init__(self, name: str): super().__init__(name) class AnimalQueue: def __init__(self)", "while cat is not None: string += f\"{cat.data.name} {cat.data.peek_order()} |", "enqueue(self, animal: Union[Dog, Cat]) -> None: if not isinstance(animal, (Dog,", "typing import Any, Union class Animal: def __init__(self, name: str)", "= self.head else: new_node = Node(item) self.tail.next_node = new_node self.tail", "self.dogs.head.data.peek_order() > self.cats.head.data.peek_order(): return self.dequeCat() else: return self.dequeDog() def print_cats(self)", "= data self.next_node = None class LinkedList: def __init__(self) ->", "str: current = self.head string = f\"\" while current.next_node is", "None: if not isinstance(animal, (Dog, Cat)): raise Exception(\"Expected Dog or", "q = AnimalQueue() dogs = [Dog(\"d1\"), Dog(\"d2\"), Dog(\"d3\")] cats =", "#!/usr/bin/env python3 from typing import Any, Union class Animal: def", "LinkedList() self.order = 0 def enqueue(self, animal: Union[Dog, Cat]) ->", "remove(self) -> Any: if self.head is None: raise (\"Empty LinkedList!\")", "f\"{anim.name} {anim.order} | \" print(string) # print(q.print_cats()) get = q.dequeDog()", "while current.next_node is not None: string += f\"{current.data} -> \"", "= 0 def enqueue(self, animal: Union[Dog, Cat]) -> None: if", "str): super().__init__(name) class AnimalQueue: def __init__(self) -> None: self.dogs =", "Union[Dog, Cat]) -> None: if not isinstance(animal, (Dog, Cat)): raise", "peek_order(self) -> int: return self.order def __str__(self) -> str: return", "None: raise (\"Empty LinkedList!\") else: data = self.head.data self.head =", "return True else: return False def insert(self, item: Any) ->", "return self.order def __str__(self) -> str: return f\"{self.name}\" class Node:", "import Any, Union class Animal: def __init__(self, name: str) ->", "def set_order(self, order: int) -> None: self.order = order def", "cat = cat.next_node return string def dequeDog(self) -> Dog: return", "Cat): self.cats.insert(animal) def dequeAny(self) -> Union[Dog, Cat]: if self.dogs.is_empty(): return", "= \"\" for anim in both: string += f\"{anim.name} {anim.order}", "raise (\"Empty LinkedList!\") else: data = self.head.data self.head = self.head.next_node", "order: int) -> None: self.order = order def peek_order(self) ->", "cat.next_node return string def dequeDog(self) -> Dog: return self.dogs.remove() def", "Dog): self.dogs.insert(animal) elif isinstance(animal, Cat): self.cats.insert(animal) def dequeAny(self) -> Union[Dog,", "cat is not None: string += f\"{cat.data.name} {cat.data.peek_order()} | \"", "None class LinkedList: def __init__(self) -> None: self.head = None", "Any): self.data = data self.next_node = None class LinkedList: def", "LinkedList: def __init__(self) -> None: self.head = None self.tail =", "self.head.data self.head = self.head.next_node return data def peak(self): return self.head.data", "-> None: self.name = name def set_order(self, order: int) ->", "def dequeAny(self) -> Union[Dog, Cat]: if self.dogs.is_empty(): return self.dequeCat() elif", "if isinstance(animal, Dog): self.dogs.insert(animal) elif isinstance(animal, Cat): self.cats.insert(animal) def dequeAny(self)", "set_order(self, order: int) -> None: self.order = order def peek_order(self)", "self.name = name def set_order(self, order: int) -> None: self.order", "= self.head.next_node return data def peak(self): return self.head.data class Dog(Animal):", "if self.head is None: raise (\"Empty LinkedList!\") else: data =", "print(get.order,get.name) get = q.dequeAny() print(get.order,get.name) if __name__ == \"__main__\": main()", "def __str__(self) -> str: return f\"{self.name}\" class Node: def __init__(self,", "Dog(\"d2\"), Dog(\"d3\")] cats = [Cat(\"c1\"), Cat(\"c2\"), Cat(\"c3\")] both = []", "item: Any) -> None: if self.is_empty(): self.head = Node(item) self.tail", "= self.cats.head while cat is not None: string += f\"{cat.data.name}", "self.next_node = None class LinkedList: def __init__(self) -> None: self.head", "= new_node self.tail = self.tail.next_node def remove(self) -> Any: if", "__init__(self) -> None: self.dogs = LinkedList() self.cats = LinkedList() self.order", "+= 1 if isinstance(animal, Dog): self.dogs.insert(animal) elif isinstance(animal, Cat): self.cats.insert(animal)", "f\"{self.name}\" class Node: def __init__(self, data: Any): self.data = data", "return self.head.data class Dog(Animal): def __init__(self, name: str): super().__init__(name) class", "Dog: return self.dogs.remove() def dequeCat(self) -> Cat: return self.cats.remove() def", "def enqueue(self, animal: Union[Dog, Cat]) -> None: if not isinstance(animal,", "self.data = data self.next_node = None class LinkedList: def __init__(self)", "print_cats(self) -> str: string = \"\" cat = self.cats.head while", "{anim.order} | \" print(string) # print(q.print_cats()) get = q.dequeDog() print(get.order,get.name)", "(Dog, Cat)): raise Exception(\"Expected Dog or Cat!\") else: animal.set_order(self.order) self.order", "-> str: return f\"{self.name}\" class Node: def __init__(self, data: Any):", "self.dogs = LinkedList() self.cats = LinkedList() self.order = 0 def", "def __str__(self) -> str: current = self.head string = f\"\"", "name: str) -> None: self.name = name def set_order(self, order:", "data def peak(self): return self.head.data class Dog(Animal): def __init__(self, name:", "return self.cats.remove() def main(): q = AnimalQueue() dogs = [Dog(\"d1\"),", "def __init__(self, data: Any): self.data = data self.next_node = None", "+= f\"{cat.data.name} {cat.data.peek_order()} | \" cat = cat.next_node return string", "string += f\"{anim.name} {anim.order} | \" print(string) # print(q.print_cats()) get", "anim in both: string += f\"{anim.name} {anim.order} | \" print(string)", "data = self.head.data self.head = self.head.next_node return data def peak(self):", "return string + \"END\" def is_empty(self) -> bool: if self.head", "self.cats.head.data.peek_order(): return self.dequeCat() else: return self.dequeDog() def print_cats(self) -> str:", "def dequeDog(self) -> Dog: return self.dogs.remove() def dequeCat(self) -> Cat:", "super().__init__(name) class AnimalQueue: def __init__(self) -> None: self.dogs = LinkedList()", "python3 from typing import Any, Union class Animal: def __init__(self,", "string + \"END\" def is_empty(self) -> bool: if self.head is", "elif self.cats.is_empty(): return self.dequeDog() if self.dogs.head.data.peek_order() > self.cats.head.data.peek_order(): return self.dequeCat()", "elif isinstance(animal, Cat): self.cats.insert(animal) def dequeAny(self) -> Union[Dog, Cat]: if", "= None self.tail = None def __str__(self) -> str: current", "else: data = self.head.data self.head = self.head.next_node return data def", "dequeCat(self) -> Cat: return self.cats.remove() def main(): q = AnimalQueue()", "self.cats.is_empty(): return self.dequeDog() if self.dogs.head.data.peek_order() > self.cats.head.data.peek_order(): return self.dequeCat() else:", "for anim in both: string += f\"{anim.name} {anim.order} | \"", "self.dogs.insert(animal) elif isinstance(animal, Cat): self.cats.insert(animal) def dequeAny(self) -> Union[Dog, Cat]:", "self.head = Node(item) self.tail = self.head else: new_node = Node(item)" ]
[ "Any datum kwargs that should go to all children. \"\"\"", "cam __all__ = ['DetectorBase', 'AreaDetector', 'AdscDetector', 'Andor3Detector', 'AndorDetector', 'BrukerDetector', 'DexelaDetector',", "for the hardware-specific classes that follow. Note that Plugin also", "_html_docs = ['EVTDoc.html'] cam = C(cam.EmergentVisionDetectorCam, 'cam1:') class EigerDetector(DetectorBase): _html_docs", "C(cam.AdscDetectorCam, 'cam1:') class AndorDetector(DetectorBase): _html_docs = ['andorDoc.html'] cam = C(cam.AndorDetectorCam,", "C(cam.FirewireWinDetectorCam, 'cam1:') class GreatEyesDetector(DetectorBase): _html_docs = [] # the documentation", "'cam1:') class AdscDetector(DetectorBase): _html_docs = ['adscDoc.html'] cam = C(cam.AdscDetectorCam, 'cam1:')", "\"\"\" _default_configuration_attrs = (ADBase._default_configuration_attrs + ('cam', )) def generate_datum(self, key,", "'GreatEyesDetector', 'LightFieldDetector', 'Mar345Detector', 'MarCCDDetector', 'PSLDetector', 'PerkinElmerDetector', 'PICamDetector', 'PilatusDetector', 'PixiradDetector', 'PointGreyDetector',", "'generate_datum')] for p in file_plugins: if p.enable.get(): p.generate_datum(key, timestamp, datum_kwargs)", "'MarCCDDetector', 'PSLDetector', 'PerkinElmerDetector', 'PICamDetector', 'PilatusDetector', 'PixiradDetector', 'PointGreyDetector', 'ProsilicaDetector', 'PvcamDetector', 'RoperDetector',", "that follow. Note that Plugin also inherits from ADBase. This", "the signature :: def generate_datum(key: str, timestamp: float, datum_kwargs: dict):", "timestamp, {}) dispatch.__doc__ = generate_datum.__doc__ def make_data_key(self): source = 'PV:{}'.format(self.prefix)", "= ['pvcamDoc.html'] cam = C(cam.PvcamDetectorCam, 'cam1:') class RoperDetector(DetectorBase): _html_docs =", "'dark', or 'gain8'. It in turn calls ``generate_datum`` on all", "is not public cam = C(cam.GreatEyesDetectorCam, 'cam1:') class LightFieldDetector(DetectorBase): _html_docs", "str, timestamp: float, datum_kwargs: dict): ... Parameters ---------- key :", "# This shape is expected to match arr.shape for the", "class PixiradDetector(DetectorBase): _html_docs = ['PixiradDoc.html'] cam = C(cam.PixiradDetectorCam, 'cam1:') class", "by the plugins. \"\"\" _default_configuration_attrs = (ADBase._default_configuration_attrs + ('cam', ))", "SimDetector(DetectorBase): _html_docs = ['simDetectorDoc.html'] cam = C(cam.SimDetectorCam, 'cam1:') class AdscDetector(DetectorBase):", "file_plugins = [s for s in self._signals.values() if hasattr(s, 'generate_datum')]", "if p.enable.get(): p.generate_datum(key, timestamp, datum_kwargs) def dispatch(self, key, timestamp): warnings.warn(", "key, timestamp): warnings.warn( \".dispatch is deprecated, use .generate_datum instead\", stacklevel=2", "C(cam.FirewireLinDetectorCam, 'cam1:') class FirewireWinDetector(DetectorBase): _html_docs = ['FirewireWinDoc.html'] cam = C(cam.FirewireWinDetectorCam,", "UVCDetector(DetectorBase): _html_docs = ['UVCDoc.html'] cam = C(cam.UVCDetectorCam, 'cam1:') class Xspress3Detector(DetectorBase):", "PilatusDetector(DetectorBase): _html_docs = ['pilatusDoc.html'] cam = C(cam.PilatusDetectorCam, 'cam1:') class PixiradDetector(DetectorBase):", "_html_docs = ['BrukerDoc.html'] cam = C(cam.BrukerDetectorCam, 'cam1:') class DexelaDetector(DetectorBase): _html_docs", "C(cam.PerkinElmerDetectorCam, 'cam1:') class PSLDetector(DetectorBase): _html_docs = ['PSLDoc.html'] cam = C(cam.PSLDetectorCam,", "C(cam.UVCDetectorCam, 'cam1:') class Xspress3Detector(DetectorBase): _html_docs = ['Xspress3Doc.html'] cam = C(cam.Xspress3DetectorCam,", "= ['BrukerDoc.html'] cam = C(cam.BrukerDetectorCam, 'cam1:') class DexelaDetector(DetectorBase): _html_docs =", "'Mar345Detector', 'MarCCDDetector', 'PSLDetector', 'PerkinElmerDetector', 'PICamDetector', 'PilatusDetector', 'PixiradDetector', 'PointGreyDetector', 'ProsilicaDetector', 'PvcamDetector',", "'cam1:') class FirewireWinDetector(DetectorBase): _html_docs = ['FirewireWinDoc.html'] cam = C(cam.FirewireWinDetectorCam, 'cam1:')", "ADComponent as C) from . import cam __all__ = ['DetectorBase',", "def collect_asset_docs(self): file_plugins = [s for s in self._signals.values() if", "'AndorDetector', 'BrukerDetector', 'DexelaDetector', 'EmergentVisionDetector', 'EigerDetector', 'FirewireLinDetector', 'FirewireWinDetector', 'GreatEyesDetector', 'LightFieldDetector', 'Mar345Detector',", "= ['EigerDoc.html'] cam = C(cam.EigerDetectorCam, 'cam1:') class FirewireLinDetector(DetectorBase): _html_docs =", "should go to all children. \"\"\" if datum_kwargs is None:", "'PV:{}'.format(self.prefix) # This shape is expected to match arr.shape for", "warnings from .base import (ADBase, ADComponent as C) from .", "ProsilicaDetector(DetectorBase): _html_docs = ['prosilicaDoc.html'] cam = C(cam.ProsilicaDetectorCam, 'cam1:') class PvcamDetector(DetectorBase):", "'cam1:') class ProsilicaDetector(DetectorBase): _html_docs = ['prosilicaDoc.html'] cam = C(cam.ProsilicaDetectorCam, 'cam1:')", "_html_docs = ['URLDoc.html'] cam = C(cam.URLDetectorCam, 'cam1:') class UVCDetector(DetectorBase): _html_docs", "= ['EVTDoc.html'] cam = C(cam.EmergentVisionDetectorCam, 'cam1:') class EigerDetector(DetectorBase): _html_docs =", "the hardware-specific classes that follow. Note that Plugin also inherits", "which is a label like 'light', 'dark', or 'gain8'. It", "= ['FirewireWinDoc.html'] cam = C(cam.FirewireLinDetectorCam, 'cam1:') class FirewireWinDetector(DetectorBase): _html_docs =", "= ['DexelaDoc.html'] cam = C(cam.DexelaDetectorCam, 'cam1:') class EmergentVisionDetector(DetectorBase): _html_docs =", "must have the signature :: def generate_datum(key: str, timestamp: float,", "C(cam.PointGreyDetectorCam, 'cam1:') class ProsilicaDetector(DetectorBase): _html_docs = ['prosilicaDoc.html'] cam = C(cam.ProsilicaDetectorCam,", "['Mar345Doc.html'] cam = C(cam.Mar345DetectorCam, 'cam1:') class MarCCDDetector(DetectorBase): _html_docs = ['MarCCDDoc.html']", "datum_kwargs) def dispatch(self, key, timestamp): warnings.warn( \".dispatch is deprecated, use", "acquisition is started, this method is called with a key", "the datum that should be generated timestamp : float The", "from ADBase. This adds some AD-specific methods that are not", "'cam1:') class RoperDetector(DetectorBase): _html_docs = ['RoperDoc.html'] cam = C(cam.RoperDetectorCam, 'cam1:')", "that are not shared by the plugins. \"\"\" _default_configuration_attrs =", "have that method. File plugins are identified by searching for", "hasattr(s, 'generate_datum')] for p in file_plugins: if p.enable.get(): p.generate_datum(key, timestamp,", "that must have the signature :: def generate_datum(key: str, timestamp:", "'RoperDetector', 'SimDetector', 'URLDetector', 'UVCDetector', 'Xspress3Detector' ] class DetectorBase(ADBase): \"\"\" The", "= ['andorDoc.html'] cam = C(cam.AndorDetectorCam, 'cam1:') class Andor3Detector(DetectorBase): _html_docs =", "class FirewireLinDetector(DetectorBase): _html_docs = ['FirewireWinDoc.html'] cam = C(cam.FirewireLinDetectorCam, 'cam1:') class", "kwargs that should go to all children. \"\"\" if datum_kwargs", "'cam1:') class PilatusDetector(DetectorBase): _html_docs = ['pilatusDoc.html'] cam = C(cam.PilatusDetectorCam, 'cam1:')", "EigerDetector(DetectorBase): _html_docs = ['EigerDoc.html'] cam = C(cam.EigerDetectorCam, 'cam1:') class FirewireLinDetector(DetectorBase):", "all children. \"\"\" if datum_kwargs is None: datum_kwargs = {}", "the plugins that have that method. File plugins are identified", "label like 'light', 'dark', or 'gain8'. It in turn calls", "_html_docs = ['PerkinElmerDoc.html'] cam = C(cam.PerkinElmerDetectorCam, 'cam1:') class PSLDetector(DetectorBase): _html_docs", "cam = C(cam.DexelaDetectorCam, 'cam1:') class EmergentVisionDetector(DetectorBase): _html_docs = ['EVTDoc.html'] cam", "not public cam = C(cam.GreatEyesDetectorCam, 'cam1:') class LightFieldDetector(DetectorBase): _html_docs =", "'EigerDetector', 'FirewireLinDetector', 'FirewireWinDetector', 'GreatEyesDetector', 'LightFieldDetector', 'Mar345Detector', 'MarCCDDetector', 'PSLDetector', 'PerkinElmerDetector', 'PICamDetector',", "= ['prosilicaDoc.html'] cam = C(cam.ProsilicaDetectorCam, 'cam1:') class PvcamDetector(DetectorBase): _html_docs =", "timestamp, datum_kwargs) def dispatch(self, key, timestamp): warnings.warn( \".dispatch is deprecated,", "file_plugins = [s for s in self._signals.values() if hasattr(s, 'collect_asset_docs')]", "AreaDetector(DetectorBase): cam = C(cam.AreaDetectorCam, 'cam1:') class SimDetector(DetectorBase): _html_docs = ['simDetectorDoc.html']", "This shape is expected to match arr.shape for the array.", "'cam1:') class UVCDetector(DetectorBase): _html_docs = ['UVCDoc.html'] cam = C(cam.UVCDetectorCam, 'cam1:')", "['MarCCDDoc.html'] cam = C(cam.MarCCDDetectorCam, 'cam1:') class PerkinElmerDetector(DetectorBase): _html_docs = ['PerkinElmerDoc.html']", "FirewireLinDetector(DetectorBase): _html_docs = ['FirewireWinDoc.html'] cam = C(cam.FirewireLinDetectorCam, 'cam1:') class FirewireWinDetector(DetectorBase):", "should be generated timestamp : float The time of the", "datum_kwargs = {} file_plugins = [s for s in self._signals.values()", "['PerkinElmerDoc.html'] cam = C(cam.PerkinElmerDetectorCam, 'cam1:') class PSLDetector(DetectorBase): _html_docs = ['PSLDoc.html']", "plugins that have that method. File plugins are identified by", "class AreaDetector(DetectorBase): cam = C(cam.AreaDetectorCam, 'cam1:') class SimDetector(DetectorBase): _html_docs =", "'AreaDetector', 'AdscDetector', 'Andor3Detector', 'AndorDetector', 'BrukerDetector', 'DexelaDetector', 'EmergentVisionDetector', 'EigerDetector', 'FirewireLinDetector', 'FirewireWinDetector',", "plugins are identified by searching for a :meth:`~ophyd.areadetector.filestore_mixins.FileStoreBase.generate_datum` method that", "timestamp: float, datum_kwargs: dict): ... Parameters ---------- key : str", "... Parameters ---------- key : str The label for the", "new acquisition is started, this method is called with a", "in file_plugins: yield from p.collect_asset_docs() class AreaDetector(DetectorBase): cam = C(cam.AreaDetectorCam,", "public cam = C(cam.GreatEyesDetectorCam, 'cam1:') class LightFieldDetector(DetectorBase): _html_docs = ['LightFieldDoc.html']", ": Dict[str, Any], optional Any datum kwargs that should go", "detector abstractions .. _areaDetector: https://areadetector.github.io/master/index.html ''' import warnings from .base", "timestamp : float The time of the trigger datum_kwargs :", "\".dispatch is deprecated, use .generate_datum instead\", stacklevel=2 ) return self.generate_datum(key,", "# vi: ts=4 sw=4 '''AreaDetector Devices `areaDetector`_ detector abstractions ..", "MarCCDDetector(DetectorBase): _html_docs = ['MarCCDDoc.html'] cam = C(cam.MarCCDDetectorCam, 'cam1:') class PerkinElmerDetector(DetectorBase):", "Devices `areaDetector`_ detector abstractions .. _areaDetector: https://areadetector.github.io/master/index.html ''' import warnings", "_default_configuration_attrs = (ADBase._default_configuration_attrs + ('cam', )) def generate_datum(self, key, timestamp,", "signature :: def generate_datum(key: str, timestamp: float, datum_kwargs: dict): ...", "'cam1:') class DexelaDetector(DetectorBase): _html_docs = ['DexelaDoc.html'] cam = C(cam.DexelaDetectorCam, 'cam1:')", "= C(cam.FirewireWinDetectorCam, 'cam1:') class GreatEyesDetector(DetectorBase): _html_docs = [] # the", "datum kwargs that should go to all children. \"\"\" if", "= C(cam.AndorDetectorCam, 'cam1:') class Andor3Detector(DetectorBase): _html_docs = ['andor3Doc.html'] cam =", "= C(cam.BrukerDetectorCam, 'cam1:') class DexelaDetector(DetectorBase): _html_docs = ['DexelaDoc.html'] cam =", "This adds some AD-specific methods that are not shared by", "# the documentation is not public cam = C(cam.GreatEyesDetectorCam, 'cam1:')", "] class DetectorBase(ADBase): \"\"\" The base class for the hardware-specific", "'cam1:') class PointGreyDetector(DetectorBase): _html_docs = ['PointGreyDoc.html'] cam = C(cam.PointGreyDetectorCam, 'cam1:')", "instead\", stacklevel=2 ) return self.generate_datum(key, timestamp, {}) dispatch.__doc__ = generate_datum.__doc__", "PICamDetector(DetectorBase): _html_docs = ['PICamDoc.html'] cam = C(cam.PICamDetectorCam, 'cam1:') class PilatusDetector(DetectorBase):", "def generate_datum(self, key, timestamp, datum_kwargs=None): \"\"\" Notify plugins of acquisition", "{}) dispatch.__doc__ = generate_datum.__doc__ def make_data_key(self): source = 'PV:{}'.format(self.prefix) #", "that have that method. File plugins are identified by searching", "for p in file_plugins: yield from p.collect_asset_docs() class AreaDetector(DetectorBase): cam", "method. File plugins are identified by searching for a :meth:`~ophyd.areadetector.filestore_mixins.FileStoreBase.generate_datum`", "= ['DetectorBase', 'AreaDetector', 'AdscDetector', 'Andor3Detector', 'AndorDetector', 'BrukerDetector', 'DexelaDetector', 'EmergentVisionDetector', 'EigerDetector',", "['adscDoc.html'] cam = C(cam.AdscDetectorCam, 'cam1:') class AndorDetector(DetectorBase): _html_docs = ['andorDoc.html']", "self.generate_datum(key, timestamp, {}) dispatch.__doc__ = generate_datum.__doc__ def make_data_key(self): source =", "dispatch(self, key, timestamp): warnings.warn( \".dispatch is deprecated, use .generate_datum instead\",", "a key which is a label like 'light', 'dark', or", "PixiradDetector(DetectorBase): _html_docs = ['PixiradDoc.html'] cam = C(cam.PixiradDetectorCam, 'cam1:') class PointGreyDetector(DetectorBase):", "source=source, dtype='array', external='FILESTORE:') def collect_asset_docs(self): file_plugins = [s for s", "cam = C(cam.BrukerDetectorCam, 'cam1:') class DexelaDetector(DetectorBase): _html_docs = ['DexelaDoc.html'] cam", "array. shape = (self.cam.num_images.get(), self.cam.array_size.array_size_y.get(), self.cam.array_size.array_size_x.get()) return dict(shape=shape, source=source, dtype='array',", "= C(cam.DexelaDetectorCam, 'cam1:') class EmergentVisionDetector(DetectorBase): _html_docs = ['EVTDoc.html'] cam =", "RoperDetector(DetectorBase): _html_docs = ['RoperDoc.html'] cam = C(cam.RoperDetectorCam, 'cam1:') class URLDetector(DetectorBase):", "to match arr.shape for the array. shape = (self.cam.num_images.get(), self.cam.array_size.array_size_y.get(),", "``generate_datum`` on all of the plugins that have that method.", ": float The time of the trigger datum_kwargs : Dict[str,", "for a :meth:`~ophyd.areadetector.filestore_mixins.FileStoreBase.generate_datum` method that must have the signature ::", "'cam1:') class LightFieldDetector(DetectorBase): _html_docs = ['LightFieldDoc.html'] cam = C(cam.LightFieldDetectorCam, 'cam1:')", "DetectorBase(ADBase): \"\"\" The base class for the hardware-specific classes that", "['andor3Doc.html'] cam = C(cam.Andor3DetectorCam, 'cam1:') class BrukerDetector(DetectorBase): _html_docs = ['BrukerDoc.html']", "hardware-specific classes that follow. Note that Plugin also inherits from", "datum that should be generated timestamp : float The time", "{} file_plugins = [s for s in self._signals.values() if hasattr(s,", "---------- key : str The label for the datum that", "_html_docs = ['FirewireWinDoc.html'] cam = C(cam.FirewireWinDetectorCam, 'cam1:') class GreatEyesDetector(DetectorBase): _html_docs", "base class for the hardware-specific classes that follow. Note that", "documentation is not public cam = C(cam.GreatEyesDetectorCam, 'cam1:') class LightFieldDetector(DetectorBase):", "identified by searching for a :meth:`~ophyd.areadetector.filestore_mixins.FileStoreBase.generate_datum` method that must have", ") return self.generate_datum(key, timestamp, {}) dispatch.__doc__ = generate_datum.__doc__ def make_data_key(self):", "_areaDetector: https://areadetector.github.io/master/index.html ''' import warnings from .base import (ADBase, ADComponent", "cam = C(cam.PointGreyDetectorCam, 'cam1:') class ProsilicaDetector(DetectorBase): _html_docs = ['prosilicaDoc.html'] cam", "methods that are not shared by the plugins. \"\"\" _default_configuration_attrs", "= {} file_plugins = [s for s in self._signals.values() if", "+ ('cam', )) def generate_datum(self, key, timestamp, datum_kwargs=None): \"\"\" Notify", "= C(cam.AdscDetectorCam, 'cam1:') class AndorDetector(DetectorBase): _html_docs = ['andorDoc.html'] cam =", "method is called with a key which is a label", "[s for s in self._signals.values() if hasattr(s, 'generate_datum')] for p", "the plugins. \"\"\" _default_configuration_attrs = (ADBase._default_configuration_attrs + ('cam', )) def", ")) def generate_datum(self, key, timestamp, datum_kwargs=None): \"\"\" Notify plugins of", "key which is a label like 'light', 'dark', or 'gain8'.", "C(cam.LightFieldDetectorCam, 'cam1:') class Mar345Detector(DetectorBase): _html_docs = ['Mar345Doc.html'] cam = C(cam.Mar345DetectorCam,", "import (ADBase, ADComponent as C) from . import cam __all__", "from . import cam __all__ = ['DetectorBase', 'AreaDetector', 'AdscDetector', 'Andor3Detector',", "= C(cam.FirewireLinDetectorCam, 'cam1:') class FirewireWinDetector(DetectorBase): _html_docs = ['FirewireWinDoc.html'] cam =", "When a new acquisition is started, this method is called", "datum_kwargs: dict): ... Parameters ---------- key : str The label", "['PixiradDoc.html'] cam = C(cam.PixiradDetectorCam, 'cam1:') class PointGreyDetector(DetectorBase): _html_docs = ['PointGreyDoc.html']", "'cam1:') class EigerDetector(DetectorBase): _html_docs = ['EigerDoc.html'] cam = C(cam.EigerDetectorCam, 'cam1:')", "self._signals.values() if hasattr(s, 'generate_datum')] for p in file_plugins: if p.enable.get():", "plugins of acquisition being complete. When a new acquisition is", "expected to match arr.shape for the array. shape = (self.cam.num_images.get(),", "C(cam.DexelaDetectorCam, 'cam1:') class EmergentVisionDetector(DetectorBase): _html_docs = ['EVTDoc.html'] cam = C(cam.EmergentVisionDetectorCam,", "class EmergentVisionDetector(DetectorBase): _html_docs = ['EVTDoc.html'] cam = C(cam.EmergentVisionDetectorCam, 'cam1:') class", "PvcamDetector(DetectorBase): _html_docs = ['pvcamDoc.html'] cam = C(cam.PvcamDetectorCam, 'cam1:') class RoperDetector(DetectorBase):", "['DexelaDoc.html'] cam = C(cam.DexelaDetectorCam, 'cam1:') class EmergentVisionDetector(DetectorBase): _html_docs = ['EVTDoc.html']", "'SimDetector', 'URLDetector', 'UVCDetector', 'Xspress3Detector' ] class DetectorBase(ADBase): \"\"\" The base", "import cam __all__ = ['DetectorBase', 'AreaDetector', 'AdscDetector', 'Andor3Detector', 'AndorDetector', 'BrukerDetector',", "= ['LightFieldDoc.html'] cam = C(cam.LightFieldDetectorCam, 'cam1:') class Mar345Detector(DetectorBase): _html_docs =", "cam = C(cam.GreatEyesDetectorCam, 'cam1:') class LightFieldDetector(DetectorBase): _html_docs = ['LightFieldDoc.html'] cam", "a :meth:`~ophyd.areadetector.filestore_mixins.FileStoreBase.generate_datum` method that must have the signature :: def", "'cam1:') class MarCCDDetector(DetectorBase): _html_docs = ['MarCCDDoc.html'] cam = C(cam.MarCCDDetectorCam, 'cam1:')", "class BrukerDetector(DetectorBase): _html_docs = ['BrukerDoc.html'] cam = C(cam.BrukerDetectorCam, 'cam1:') class", "DexelaDetector(DetectorBase): _html_docs = ['DexelaDoc.html'] cam = C(cam.DexelaDetectorCam, 'cam1:') class EmergentVisionDetector(DetectorBase):", ":: def generate_datum(key: str, timestamp: float, datum_kwargs: dict): ... Parameters", "The time of the trigger datum_kwargs : Dict[str, Any], optional", "follow. Note that Plugin also inherits from ADBase. This adds", "'LightFieldDetector', 'Mar345Detector', 'MarCCDDetector', 'PSLDetector', 'PerkinElmerDetector', 'PICamDetector', 'PilatusDetector', 'PixiradDetector', 'PointGreyDetector', 'ProsilicaDetector',", "cam = C(cam.UVCDetectorCam, 'cam1:') class Xspress3Detector(DetectorBase): _html_docs = ['Xspress3Doc.html'] cam", "this method is called with a key which is a", "like 'light', 'dark', or 'gain8'. It in turn calls ``generate_datum``", "C(cam.URLDetectorCam, 'cam1:') class UVCDetector(DetectorBase): _html_docs = ['UVCDoc.html'] cam = C(cam.UVCDetectorCam,", "('cam', )) def generate_datum(self, key, timestamp, datum_kwargs=None): \"\"\" Notify plugins", "called with a key which is a label like 'light',", "_html_docs = ['Mar345Doc.html'] cam = C(cam.Mar345DetectorCam, 'cam1:') class MarCCDDetector(DetectorBase): _html_docs", "['simDetectorDoc.html'] cam = C(cam.SimDetectorCam, 'cam1:') class AdscDetector(DetectorBase): _html_docs = ['adscDoc.html']", "cam = C(cam.SimDetectorCam, 'cam1:') class AdscDetector(DetectorBase): _html_docs = ['adscDoc.html'] cam", "cam = C(cam.AreaDetectorCam, 'cam1:') class SimDetector(DetectorBase): _html_docs = ['simDetectorDoc.html'] cam", "timestamp, datum_kwargs=None): \"\"\" Notify plugins of acquisition being complete. When", "C(cam.AndorDetectorCam, 'cam1:') class Andor3Detector(DetectorBase): _html_docs = ['andor3Doc.html'] cam = C(cam.Andor3DetectorCam,", "'cam1:') class PixiradDetector(DetectorBase): _html_docs = ['PixiradDoc.html'] cam = C(cam.PixiradDetectorCam, 'cam1:')", "class PointGreyDetector(DetectorBase): _html_docs = ['PointGreyDoc.html'] cam = C(cam.PointGreyDetectorCam, 'cam1:') class", "str The label for the datum that should be generated", "dict): ... Parameters ---------- key : str The label for", "['andorDoc.html'] cam = C(cam.AndorDetectorCam, 'cam1:') class Andor3Detector(DetectorBase): _html_docs = ['andor3Doc.html']", "Note that Plugin also inherits from ADBase. This adds some", "as C) from . import cam __all__ = ['DetectorBase', 'AreaDetector',", "PerkinElmerDetector(DetectorBase): _html_docs = ['PerkinElmerDoc.html'] cam = C(cam.PerkinElmerDetectorCam, 'cam1:') class PSLDetector(DetectorBase):", "The label for the datum that should be generated timestamp", "the documentation is not public cam = C(cam.GreatEyesDetectorCam, 'cam1:') class", "[s for s in self._signals.values() if hasattr(s, 'collect_asset_docs')] for p", "= C(cam.PointGreyDetectorCam, 'cam1:') class ProsilicaDetector(DetectorBase): _html_docs = ['prosilicaDoc.html'] cam =", "the trigger datum_kwargs : Dict[str, Any], optional Any datum kwargs", "class PilatusDetector(DetectorBase): _html_docs = ['pilatusDoc.html'] cam = C(cam.PilatusDetectorCam, 'cam1:') class", "from p.collect_asset_docs() class AreaDetector(DetectorBase): cam = C(cam.AreaDetectorCam, 'cam1:') class SimDetector(DetectorBase):", "_html_docs = ['PointGreyDoc.html'] cam = C(cam.PointGreyDetectorCam, 'cam1:') class ProsilicaDetector(DetectorBase): _html_docs", "Notify plugins of acquisition being complete. When a new acquisition", "C(cam.PSLDetectorCam, 'cam1:') class PICamDetector(DetectorBase): _html_docs = ['PICamDoc.html'] cam = C(cam.PICamDetectorCam,", "is expected to match arr.shape for the array. shape =", "'cam1:') class FirewireLinDetector(DetectorBase): _html_docs = ['FirewireWinDoc.html'] cam = C(cam.FirewireLinDetectorCam, 'cam1:')", "for the datum that should be generated timestamp : float", "on all of the plugins that have that method. File", "= [] # the documentation is not public cam =", "= generate_datum.__doc__ def make_data_key(self): source = 'PV:{}'.format(self.prefix) # This shape", "_html_docs = ['EigerDoc.html'] cam = C(cam.EigerDetectorCam, 'cam1:') class FirewireLinDetector(DetectorBase): _html_docs", "deprecated, use .generate_datum instead\", stacklevel=2 ) return self.generate_datum(key, timestamp, {})", "= C(cam.PvcamDetectorCam, 'cam1:') class RoperDetector(DetectorBase): _html_docs = ['RoperDoc.html'] cam =", "use .generate_datum instead\", stacklevel=2 ) return self.generate_datum(key, timestamp, {}) dispatch.__doc__", "'cam1:') class Andor3Detector(DetectorBase): _html_docs = ['andor3Doc.html'] cam = C(cam.Andor3DetectorCam, 'cam1:')", "plugins. \"\"\" _default_configuration_attrs = (ADBase._default_configuration_attrs + ('cam', )) def generate_datum(self,", "C(cam.PilatusDetectorCam, 'cam1:') class PixiradDetector(DetectorBase): _html_docs = ['PixiradDoc.html'] cam = C(cam.PixiradDetectorCam,", "['DetectorBase', 'AreaDetector', 'AdscDetector', 'Andor3Detector', 'AndorDetector', 'BrukerDetector', 'DexelaDetector', 'EmergentVisionDetector', 'EigerDetector', 'FirewireLinDetector',", "source = 'PV:{}'.format(self.prefix) # This shape is expected to match", "cam = C(cam.PSLDetectorCam, 'cam1:') class PICamDetector(DetectorBase): _html_docs = ['PICamDoc.html'] cam", "generate_datum(key: str, timestamp: float, datum_kwargs: dict): ... Parameters ---------- key", "= (self.cam.num_images.get(), self.cam.array_size.array_size_y.get(), self.cam.array_size.array_size_x.get()) return dict(shape=shape, source=source, dtype='array', external='FILESTORE:') def", "['UVCDoc.html'] cam = C(cam.UVCDetectorCam, 'cam1:') class Xspress3Detector(DetectorBase): _html_docs = ['Xspress3Doc.html']", "= ['pilatusDoc.html'] cam = C(cam.PilatusDetectorCam, 'cam1:') class PixiradDetector(DetectorBase): _html_docs =", "dispatch.__doc__ = generate_datum.__doc__ def make_data_key(self): source = 'PV:{}'.format(self.prefix) # This", "if hasattr(s, 'collect_asset_docs')] for p in file_plugins: yield from p.collect_asset_docs()", "cam = C(cam.Andor3DetectorCam, 'cam1:') class BrukerDetector(DetectorBase): _html_docs = ['BrukerDoc.html'] cam", "stacklevel=2 ) return self.generate_datum(key, timestamp, {}) dispatch.__doc__ = generate_datum.__doc__ def", "['PSLDoc.html'] cam = C(cam.PSLDetectorCam, 'cam1:') class PICamDetector(DetectorBase): _html_docs = ['PICamDoc.html']", "def dispatch(self, key, timestamp): warnings.warn( \".dispatch is deprecated, use .generate_datum", "'UVCDetector', 'Xspress3Detector' ] class DetectorBase(ADBase): \"\"\" The base class for", "match arr.shape for the array. shape = (self.cam.num_images.get(), self.cam.array_size.array_size_y.get(), self.cam.array_size.array_size_x.get())", "cam = C(cam.URLDetectorCam, 'cam1:') class UVCDetector(DetectorBase): _html_docs = ['UVCDoc.html'] cam", "for s in self._signals.values() if hasattr(s, 'collect_asset_docs')] for p in", "['FirewireWinDoc.html'] cam = C(cam.FirewireWinDetectorCam, 'cam1:') class GreatEyesDetector(DetectorBase): _html_docs = []", "that method. File plugins are identified by searching for a", ".. _areaDetector: https://areadetector.github.io/master/index.html ''' import warnings from .base import (ADBase,", "collect_asset_docs(self): file_plugins = [s for s in self._signals.values() if hasattr(s,", "None: datum_kwargs = {} file_plugins = [s for s in", "LightFieldDetector(DetectorBase): _html_docs = ['LightFieldDoc.html'] cam = C(cam.LightFieldDetectorCam, 'cam1:') class Mar345Detector(DetectorBase):", "cam = C(cam.EmergentVisionDetectorCam, 'cam1:') class EigerDetector(DetectorBase): _html_docs = ['EigerDoc.html'] cam", "also inherits from ADBase. This adds some AD-specific methods that", "key : str The label for the datum that should", "def make_data_key(self): source = 'PV:{}'.format(self.prefix) # This shape is expected", "class DetectorBase(ADBase): \"\"\" The base class for the hardware-specific classes", "s in self._signals.values() if hasattr(s, 'collect_asset_docs')] for p in file_plugins:", "= ['PICamDoc.html'] cam = C(cam.PICamDetectorCam, 'cam1:') class PilatusDetector(DetectorBase): _html_docs =", "timestamp): warnings.warn( \".dispatch is deprecated, use .generate_datum instead\", stacklevel=2 )", "shape = (self.cam.num_images.get(), self.cam.array_size.array_size_y.get(), self.cam.array_size.array_size_x.get()) return dict(shape=shape, source=source, dtype='array', external='FILESTORE:')", "'PICamDetector', 'PilatusDetector', 'PixiradDetector', 'PointGreyDetector', 'ProsilicaDetector', 'PvcamDetector', 'RoperDetector', 'SimDetector', 'URLDetector', 'UVCDetector',", "file_plugins: if p.enable.get(): p.generate_datum(key, timestamp, datum_kwargs) def dispatch(self, key, timestamp):", "'cam1:') class PSLDetector(DetectorBase): _html_docs = ['PSLDoc.html'] cam = C(cam.PSLDetectorCam, 'cam1:')", "C(cam.EmergentVisionDetectorCam, 'cam1:') class EigerDetector(DetectorBase): _html_docs = ['EigerDoc.html'] cam = C(cam.EigerDetectorCam,", "have the signature :: def generate_datum(key: str, timestamp: float, datum_kwargs:", "C(cam.Mar345DetectorCam, 'cam1:') class MarCCDDetector(DetectorBase): _html_docs = ['MarCCDDoc.html'] cam = C(cam.MarCCDDetectorCam,", "'PixiradDetector', 'PointGreyDetector', 'ProsilicaDetector', 'PvcamDetector', 'RoperDetector', 'SimDetector', 'URLDetector', 'UVCDetector', 'Xspress3Detector' ]", "'EmergentVisionDetector', 'EigerDetector', 'FirewireLinDetector', 'FirewireWinDetector', 'GreatEyesDetector', 'LightFieldDetector', 'Mar345Detector', 'MarCCDDetector', 'PSLDetector', 'PerkinElmerDetector',", "AD-specific methods that are not shared by the plugins. \"\"\"", "'BrukerDetector', 'DexelaDetector', 'EmergentVisionDetector', 'EigerDetector', 'FirewireLinDetector', 'FirewireWinDetector', 'GreatEyesDetector', 'LightFieldDetector', 'Mar345Detector', 'MarCCDDetector',", "cam = C(cam.MarCCDDetectorCam, 'cam1:') class PerkinElmerDetector(DetectorBase): _html_docs = ['PerkinElmerDoc.html'] cam", "turn calls ``generate_datum`` on all of the plugins that have", "classes that follow. Note that Plugin also inherits from ADBase.", "cam = C(cam.FirewireWinDetectorCam, 'cam1:') class GreatEyesDetector(DetectorBase): _html_docs = [] #", "C(cam.PvcamDetectorCam, 'cam1:') class RoperDetector(DetectorBase): _html_docs = ['RoperDoc.html'] cam = C(cam.RoperDetectorCam,", "_html_docs = ['MarCCDDoc.html'] cam = C(cam.MarCCDDetectorCam, 'cam1:') class PerkinElmerDetector(DetectorBase): _html_docs", "'DexelaDetector', 'EmergentVisionDetector', 'EigerDetector', 'FirewireLinDetector', 'FirewireWinDetector', 'GreatEyesDetector', 'LightFieldDetector', 'Mar345Detector', 'MarCCDDetector', 'PSLDetector',", "class DexelaDetector(DetectorBase): _html_docs = ['DexelaDoc.html'] cam = C(cam.DexelaDetectorCam, 'cam1:') class", "_html_docs = ['FirewireWinDoc.html'] cam = C(cam.FirewireLinDetectorCam, 'cam1:') class FirewireWinDetector(DetectorBase): _html_docs", "shape is expected to match arr.shape for the array. shape", "C(cam.RoperDetectorCam, 'cam1:') class URLDetector(DetectorBase): _html_docs = ['URLDoc.html'] cam = C(cam.URLDetectorCam,", "return dict(shape=shape, source=source, dtype='array', external='FILESTORE:') def collect_asset_docs(self): file_plugins = [s", "dict(shape=shape, source=source, dtype='array', external='FILESTORE:') def collect_asset_docs(self): file_plugins = [s for", "= C(cam.MarCCDDetectorCam, 'cam1:') class PerkinElmerDetector(DetectorBase): _html_docs = ['PerkinElmerDoc.html'] cam =", "= C(cam.PSLDetectorCam, 'cam1:') class PICamDetector(DetectorBase): _html_docs = ['PICamDoc.html'] cam =", "['LightFieldDoc.html'] cam = C(cam.LightFieldDetectorCam, 'cam1:') class Mar345Detector(DetectorBase): _html_docs = ['Mar345Doc.html']", "yield from p.collect_asset_docs() class AreaDetector(DetectorBase): cam = C(cam.AreaDetectorCam, 'cam1:') class", "def generate_datum(key: str, timestamp: float, datum_kwargs: dict): ... Parameters ----------", "cam = C(cam.ProsilicaDetectorCam, 'cam1:') class PvcamDetector(DetectorBase): _html_docs = ['pvcamDoc.html'] cam", "_html_docs = ['UVCDoc.html'] cam = C(cam.UVCDetectorCam, 'cam1:') class Xspress3Detector(DetectorBase): _html_docs", "class Andor3Detector(DetectorBase): _html_docs = ['andor3Doc.html'] cam = C(cam.Andor3DetectorCam, 'cam1:') class", "= C(cam.UVCDetectorCam, 'cam1:') class Xspress3Detector(DetectorBase): _html_docs = ['Xspress3Doc.html'] cam =", "_html_docs = ['PICamDoc.html'] cam = C(cam.PICamDetectorCam, 'cam1:') class PilatusDetector(DetectorBase): _html_docs", "if hasattr(s, 'generate_datum')] for p in file_plugins: if p.enable.get(): p.generate_datum(key,", "optional Any datum kwargs that should go to all children.", "'URLDetector', 'UVCDetector', 'Xspress3Detector' ] class DetectorBase(ADBase): \"\"\" The base class", "cam = C(cam.PICamDetectorCam, 'cam1:') class PilatusDetector(DetectorBase): _html_docs = ['pilatusDoc.html'] cam", "in self._signals.values() if hasattr(s, 'generate_datum')] for p in file_plugins: if", "_html_docs = ['pilatusDoc.html'] cam = C(cam.PilatusDetectorCam, 'cam1:') class PixiradDetector(DetectorBase): _html_docs", "is None: datum_kwargs = {} file_plugins = [s for s", "'gain8'. It in turn calls ``generate_datum`` on all of the", "URLDetector(DetectorBase): _html_docs = ['URLDoc.html'] cam = C(cam.URLDetectorCam, 'cam1:') class UVCDetector(DetectorBase):", "Parameters ---------- key : str The label for the datum", "= C(cam.EmergentVisionDetectorCam, 'cam1:') class EigerDetector(DetectorBase): _html_docs = ['EigerDoc.html'] cam =", "started, this method is called with a key which is", "C(cam.MarCCDDetectorCam, 'cam1:') class PerkinElmerDetector(DetectorBase): _html_docs = ['PerkinElmerDoc.html'] cam = C(cam.PerkinElmerDetectorCam,", "= ['PSLDoc.html'] cam = C(cam.PSLDetectorCam, 'cam1:') class PICamDetector(DetectorBase): _html_docs =", "C(cam.SimDetectorCam, 'cam1:') class AdscDetector(DetectorBase): _html_docs = ['adscDoc.html'] cam = C(cam.AdscDetectorCam,", "class MarCCDDetector(DetectorBase): _html_docs = ['MarCCDDoc.html'] cam = C(cam.MarCCDDetectorCam, 'cam1:') class", "`areaDetector`_ detector abstractions .. _areaDetector: https://areadetector.github.io/master/index.html ''' import warnings from", "_html_docs = ['prosilicaDoc.html'] cam = C(cam.ProsilicaDetectorCam, 'cam1:') class PvcamDetector(DetectorBase): _html_docs", "Any], optional Any datum kwargs that should go to all", "'Xspress3Detector' ] class DetectorBase(ADBase): \"\"\" The base class for the", "that Plugin also inherits from ADBase. This adds some AD-specific", "p in file_plugins: yield from p.collect_asset_docs() class AreaDetector(DetectorBase): cam =", "['EigerDoc.html'] cam = C(cam.EigerDetectorCam, 'cam1:') class FirewireLinDetector(DetectorBase): _html_docs = ['FirewireWinDoc.html']", "C(cam.Andor3DetectorCam, 'cam1:') class BrukerDetector(DetectorBase): _html_docs = ['BrukerDoc.html'] cam = C(cam.BrukerDetectorCam,", "= ['simDetectorDoc.html'] cam = C(cam.SimDetectorCam, 'cam1:') class AdscDetector(DetectorBase): _html_docs =", "'cam1:') class BrukerDetector(DetectorBase): _html_docs = ['BrukerDoc.html'] cam = C(cam.BrukerDetectorCam, 'cam1:')", "in turn calls ``generate_datum`` on all of the plugins that", "class AndorDetector(DetectorBase): _html_docs = ['andorDoc.html'] cam = C(cam.AndorDetectorCam, 'cam1:') class", "class GreatEyesDetector(DetectorBase): _html_docs = [] # the documentation is not", "GreatEyesDetector(DetectorBase): _html_docs = [] # the documentation is not public", "datum_kwargs is None: datum_kwargs = {} file_plugins = [s for", "['pilatusDoc.html'] cam = C(cam.PilatusDetectorCam, 'cam1:') class PixiradDetector(DetectorBase): _html_docs = ['PixiradDoc.html']", "= ['RoperDoc.html'] cam = C(cam.RoperDetectorCam, 'cam1:') class URLDetector(DetectorBase): _html_docs =", "class UVCDetector(DetectorBase): _html_docs = ['UVCDoc.html'] cam = C(cam.UVCDetectorCam, 'cam1:') class", "class FirewireWinDetector(DetectorBase): _html_docs = ['FirewireWinDoc.html'] cam = C(cam.FirewireWinDetectorCam, 'cam1:') class", "_html_docs = ['pvcamDoc.html'] cam = C(cam.PvcamDetectorCam, 'cam1:') class RoperDetector(DetectorBase): _html_docs", "be generated timestamp : float The time of the trigger", "cam = C(cam.AdscDetectorCam, 'cam1:') class AndorDetector(DetectorBase): _html_docs = ['andorDoc.html'] cam", "not shared by the plugins. \"\"\" _default_configuration_attrs = (ADBase._default_configuration_attrs +", "that should go to all children. \"\"\" if datum_kwargs is", "is called with a key which is a label like", "= C(cam.GreatEyesDetectorCam, 'cam1:') class LightFieldDetector(DetectorBase): _html_docs = ['LightFieldDoc.html'] cam =", ". import cam __all__ = ['DetectorBase', 'AreaDetector', 'AdscDetector', 'Andor3Detector', 'AndorDetector',", "some AD-specific methods that are not shared by the plugins.", "cam = C(cam.LightFieldDetectorCam, 'cam1:') class Mar345Detector(DetectorBase): _html_docs = ['Mar345Doc.html'] cam", "is deprecated, use .generate_datum instead\", stacklevel=2 ) return self.generate_datum(key, timestamp,", "p in file_plugins: if p.enable.get(): p.generate_datum(key, timestamp, datum_kwargs) def dispatch(self,", "['pvcamDoc.html'] cam = C(cam.PvcamDetectorCam, 'cam1:') class RoperDetector(DetectorBase): _html_docs = ['RoperDoc.html']", "Dict[str, Any], optional Any datum kwargs that should go to", "cam = C(cam.PvcamDetectorCam, 'cam1:') class RoperDetector(DetectorBase): _html_docs = ['RoperDoc.html'] cam", "from .base import (ADBase, ADComponent as C) from . import", "cam = C(cam.FirewireLinDetectorCam, 'cam1:') class FirewireWinDetector(DetectorBase): _html_docs = ['FirewireWinDoc.html'] cam", "= ['MarCCDDoc.html'] cam = C(cam.MarCCDDetectorCam, 'cam1:') class PerkinElmerDetector(DetectorBase): _html_docs =", "= ['Mar345Doc.html'] cam = C(cam.Mar345DetectorCam, 'cam1:') class MarCCDDetector(DetectorBase): _html_docs =", "cam = C(cam.PerkinElmerDetectorCam, 'cam1:') class PSLDetector(DetectorBase): _html_docs = ['PSLDoc.html'] cam", "_html_docs = ['andor3Doc.html'] cam = C(cam.Andor3DetectorCam, 'cam1:') class BrukerDetector(DetectorBase): _html_docs", "= ['URLDoc.html'] cam = C(cam.URLDetectorCam, 'cam1:') class UVCDetector(DetectorBase): _html_docs =", "are identified by searching for a :meth:`~ophyd.areadetector.filestore_mixins.FileStoreBase.generate_datum` method that must", "file_plugins: yield from p.collect_asset_docs() class AreaDetector(DetectorBase): cam = C(cam.AreaDetectorCam, 'cam1:')", "'light', 'dark', or 'gain8'. It in turn calls ``generate_datum`` on", "_html_docs = ['PixiradDoc.html'] cam = C(cam.PixiradDetectorCam, 'cam1:') class PointGreyDetector(DetectorBase): _html_docs", "'cam1:') class URLDetector(DetectorBase): _html_docs = ['URLDoc.html'] cam = C(cam.URLDetectorCam, 'cam1:')", "all of the plugins that have that method. File plugins", "= C(cam.PixiradDetectorCam, 'cam1:') class PointGreyDetector(DetectorBase): _html_docs = ['PointGreyDoc.html'] cam =", "calls ``generate_datum`` on all of the plugins that have that", "= [s for s in self._signals.values() if hasattr(s, 'collect_asset_docs')] for", "searching for a :meth:`~ophyd.areadetector.filestore_mixins.FileStoreBase.generate_datum` method that must have the signature", "trigger datum_kwargs : Dict[str, Any], optional Any datum kwargs that", "class LightFieldDetector(DetectorBase): _html_docs = ['LightFieldDoc.html'] cam = C(cam.LightFieldDetectorCam, 'cam1:') class", "key, timestamp, datum_kwargs=None): \"\"\" Notify plugins of acquisition being complete.", "adds some AD-specific methods that are not shared by the", "the array. shape = (self.cam.num_images.get(), self.cam.array_size.array_size_y.get(), self.cam.array_size.array_size_x.get()) return dict(shape=shape, source=source,", "= ['PointGreyDoc.html'] cam = C(cam.PointGreyDetectorCam, 'cam1:') class ProsilicaDetector(DetectorBase): _html_docs =", ": str The label for the datum that should be", "PSLDetector(DetectorBase): _html_docs = ['PSLDoc.html'] cam = C(cam.PSLDetectorCam, 'cam1:') class PICamDetector(DetectorBase):", "= C(cam.Andor3DetectorCam, 'cam1:') class BrukerDetector(DetectorBase): _html_docs = ['BrukerDoc.html'] cam =", "'PilatusDetector', 'PixiradDetector', 'PointGreyDetector', 'ProsilicaDetector', 'PvcamDetector', 'RoperDetector', 'SimDetector', 'URLDetector', 'UVCDetector', 'Xspress3Detector'", "'''AreaDetector Devices `areaDetector`_ detector abstractions .. _areaDetector: https://areadetector.github.io/master/index.html ''' import", "The base class for the hardware-specific classes that follow. Note", "= C(cam.ProsilicaDetectorCam, 'cam1:') class PvcamDetector(DetectorBase): _html_docs = ['pvcamDoc.html'] cam =", "for s in self._signals.values() if hasattr(s, 'generate_datum')] for p in", "cam = C(cam.PixiradDetectorCam, 'cam1:') class PointGreyDetector(DetectorBase): _html_docs = ['PointGreyDoc.html'] cam", "is started, this method is called with a key which", "class PSLDetector(DetectorBase): _html_docs = ['PSLDoc.html'] cam = C(cam.PSLDetectorCam, 'cam1:') class", "label for the datum that should be generated timestamp :", "'Andor3Detector', 'AndorDetector', 'BrukerDetector', 'DexelaDetector', 'EmergentVisionDetector', 'EigerDetector', 'FirewireLinDetector', 'FirewireWinDetector', 'GreatEyesDetector', 'LightFieldDetector',", "a label like 'light', 'dark', or 'gain8'. It in turn", "C(cam.EigerDetectorCam, 'cam1:') class FirewireLinDetector(DetectorBase): _html_docs = ['FirewireWinDoc.html'] cam = C(cam.FirewireLinDetectorCam,", "= C(cam.RoperDetectorCam, 'cam1:') class URLDetector(DetectorBase): _html_docs = ['URLDoc.html'] cam =", "'cam1:') class Xspress3Detector(DetectorBase): _html_docs = ['Xspress3Doc.html'] cam = C(cam.Xspress3DetectorCam, 'det1:')", "_html_docs = [] # the documentation is not public cam", "PointGreyDetector(DetectorBase): _html_docs = ['PointGreyDoc.html'] cam = C(cam.PointGreyDetectorCam, 'cam1:') class ProsilicaDetector(DetectorBase):", "= [s for s in self._signals.values() if hasattr(s, 'generate_datum')] for", "\"\"\" if datum_kwargs is None: datum_kwargs = {} file_plugins =", "'cam1:') class PICamDetector(DetectorBase): _html_docs = ['PICamDoc.html'] cam = C(cam.PICamDetectorCam, 'cam1:')", "children. \"\"\" if datum_kwargs is None: datum_kwargs = {} file_plugins", "(self.cam.num_images.get(), self.cam.array_size.array_size_y.get(), self.cam.array_size.array_size_x.get()) return dict(shape=shape, source=source, dtype='array', external='FILESTORE:') def collect_asset_docs(self):", "float, datum_kwargs: dict): ... Parameters ---------- key : str The", "is a label like 'light', 'dark', or 'gain8'. It in", "'cam1:') class PvcamDetector(DetectorBase): _html_docs = ['pvcamDoc.html'] cam = C(cam.PvcamDetectorCam, 'cam1:')", ".generate_datum instead\", stacklevel=2 ) return self.generate_datum(key, timestamp, {}) dispatch.__doc__ =", "dtype='array', external='FILESTORE:') def collect_asset_docs(self): file_plugins = [s for s in", "class AdscDetector(DetectorBase): _html_docs = ['adscDoc.html'] cam = C(cam.AdscDetectorCam, 'cam1:') class", "EmergentVisionDetector(DetectorBase): _html_docs = ['EVTDoc.html'] cam = C(cam.EmergentVisionDetectorCam, 'cam1:') class EigerDetector(DetectorBase):", "ADBase. This adds some AD-specific methods that are not shared", "generate_datum.__doc__ def make_data_key(self): source = 'PV:{}'.format(self.prefix) # This shape is", "(ADBase, ADComponent as C) from . import cam __all__ =", "['FirewireWinDoc.html'] cam = C(cam.FirewireLinDetectorCam, 'cam1:') class FirewireWinDetector(DetectorBase): _html_docs = ['FirewireWinDoc.html']", "cam = C(cam.Mar345DetectorCam, 'cam1:') class MarCCDDetector(DetectorBase): _html_docs = ['MarCCDDoc.html'] cam", "C(cam.PixiradDetectorCam, 'cam1:') class PointGreyDetector(DetectorBase): _html_docs = ['PointGreyDoc.html'] cam = C(cam.PointGreyDetectorCam,", "go to all children. \"\"\" if datum_kwargs is None: datum_kwargs", "[] # the documentation is not public cam = C(cam.GreatEyesDetectorCam,", "to all children. \"\"\" if datum_kwargs is None: datum_kwargs =", "= ['adscDoc.html'] cam = C(cam.AdscDetectorCam, 'cam1:') class AndorDetector(DetectorBase): _html_docs =", "abstractions .. _areaDetector: https://areadetector.github.io/master/index.html ''' import warnings from .base import", "C(cam.PICamDetectorCam, 'cam1:') class PilatusDetector(DetectorBase): _html_docs = ['pilatusDoc.html'] cam = C(cam.PilatusDetectorCam,", "= C(cam.PerkinElmerDetectorCam, 'cam1:') class PSLDetector(DetectorBase): _html_docs = ['PSLDoc.html'] cam =", "arr.shape for the array. shape = (self.cam.num_images.get(), self.cam.array_size.array_size_y.get(), self.cam.array_size.array_size_x.get()) return", "p.enable.get(): p.generate_datum(key, timestamp, datum_kwargs) def dispatch(self, key, timestamp): warnings.warn( \".dispatch", "['BrukerDoc.html'] cam = C(cam.BrukerDetectorCam, 'cam1:') class DexelaDetector(DetectorBase): _html_docs = ['DexelaDoc.html']", "warnings.warn( \".dispatch is deprecated, use .generate_datum instead\", stacklevel=2 ) return", "Andor3Detector(DetectorBase): _html_docs = ['andor3Doc.html'] cam = C(cam.Andor3DetectorCam, 'cam1:') class BrukerDetector(DetectorBase):", "sw=4 '''AreaDetector Devices `areaDetector`_ detector abstractions .. _areaDetector: https://areadetector.github.io/master/index.html '''", "class RoperDetector(DetectorBase): _html_docs = ['RoperDoc.html'] cam = C(cam.RoperDetectorCam, 'cam1:') class", "https://areadetector.github.io/master/index.html ''' import warnings from .base import (ADBase, ADComponent as", "of acquisition being complete. When a new acquisition is started,", "s in self._signals.values() if hasattr(s, 'generate_datum')] for p in file_plugins:", "= C(cam.EigerDetectorCam, 'cam1:') class FirewireLinDetector(DetectorBase): _html_docs = ['FirewireWinDoc.html'] cam =", "self.cam.array_size.array_size_x.get()) return dict(shape=shape, source=source, dtype='array', external='FILESTORE:') def collect_asset_docs(self): file_plugins =", "time of the trigger datum_kwargs : Dict[str, Any], optional Any", "generate_datum(self, key, timestamp, datum_kwargs=None): \"\"\" Notify plugins of acquisition being", "self._signals.values() if hasattr(s, 'collect_asset_docs')] for p in file_plugins: yield from", "_html_docs = ['andorDoc.html'] cam = C(cam.AndorDetectorCam, 'cam1:') class Andor3Detector(DetectorBase): _html_docs", "(ADBase._default_configuration_attrs + ('cam', )) def generate_datum(self, key, timestamp, datum_kwargs=None): \"\"\"", "float The time of the trigger datum_kwargs : Dict[str, Any],", "_html_docs = ['adscDoc.html'] cam = C(cam.AdscDetectorCam, 'cam1:') class AndorDetector(DetectorBase): _html_docs", "['PICamDoc.html'] cam = C(cam.PICamDetectorCam, 'cam1:') class PilatusDetector(DetectorBase): _html_docs = ['pilatusDoc.html']", "'PointGreyDetector', 'ProsilicaDetector', 'PvcamDetector', 'RoperDetector', 'SimDetector', 'URLDetector', 'UVCDetector', 'Xspress3Detector' ] class", "hasattr(s, 'collect_asset_docs')] for p in file_plugins: yield from p.collect_asset_docs() class", "class SimDetector(DetectorBase): _html_docs = ['simDetectorDoc.html'] cam = C(cam.SimDetectorCam, 'cam1:') class", "for the array. shape = (self.cam.num_images.get(), self.cam.array_size.array_size_y.get(), self.cam.array_size.array_size_x.get()) return dict(shape=shape,", "external='FILESTORE:') def collect_asset_docs(self): file_plugins = [s for s in self._signals.values()", "self.cam.array_size.array_size_y.get(), self.cam.array_size.array_size_x.get()) return dict(shape=shape, source=source, dtype='array', external='FILESTORE:') def collect_asset_docs(self): file_plugins", "class ProsilicaDetector(DetectorBase): _html_docs = ['prosilicaDoc.html'] cam = C(cam.ProsilicaDetectorCam, 'cam1:') class", "= ['FirewireWinDoc.html'] cam = C(cam.FirewireWinDetectorCam, 'cam1:') class GreatEyesDetector(DetectorBase): _html_docs =", "class URLDetector(DetectorBase): _html_docs = ['URLDoc.html'] cam = C(cam.URLDetectorCam, 'cam1:') class", "in self._signals.values() if hasattr(s, 'collect_asset_docs')] for p in file_plugins: yield", "'FirewireWinDetector', 'GreatEyesDetector', 'LightFieldDetector', 'Mar345Detector', 'MarCCDDetector', 'PSLDetector', 'PerkinElmerDetector', 'PICamDetector', 'PilatusDetector', 'PixiradDetector',", "= ['PerkinElmerDoc.html'] cam = C(cam.PerkinElmerDetectorCam, 'cam1:') class PSLDetector(DetectorBase): _html_docs =", "or 'gain8'. It in turn calls ``generate_datum`` on all of", "vi: ts=4 sw=4 '''AreaDetector Devices `areaDetector`_ detector abstractions .. _areaDetector:", "'AdscDetector', 'Andor3Detector', 'AndorDetector', 'BrukerDetector', 'DexelaDetector', 'EmergentVisionDetector', 'EigerDetector', 'FirewireLinDetector', 'FirewireWinDetector', 'GreatEyesDetector',", "p.collect_asset_docs() class AreaDetector(DetectorBase): cam = C(cam.AreaDetectorCam, 'cam1:') class SimDetector(DetectorBase): _html_docs", "= ['andor3Doc.html'] cam = C(cam.Andor3DetectorCam, 'cam1:') class BrukerDetector(DetectorBase): _html_docs =", "acquisition being complete. When a new acquisition is started, this", "cam = C(cam.EigerDetectorCam, 'cam1:') class FirewireLinDetector(DetectorBase): _html_docs = ['FirewireWinDoc.html'] cam", "datum_kwargs : Dict[str, Any], optional Any datum kwargs that should", "'cam1:') class Mar345Detector(DetectorBase): _html_docs = ['Mar345Doc.html'] cam = C(cam.Mar345DetectorCam, 'cam1:')", "shared by the plugins. \"\"\" _default_configuration_attrs = (ADBase._default_configuration_attrs + ('cam',", "''' import warnings from .base import (ADBase, ADComponent as C)", "Mar345Detector(DetectorBase): _html_docs = ['Mar345Doc.html'] cam = C(cam.Mar345DetectorCam, 'cam1:') class MarCCDDetector(DetectorBase):", "datum_kwargs=None): \"\"\" Notify plugins of acquisition being complete. When a", "= C(cam.LightFieldDetectorCam, 'cam1:') class Mar345Detector(DetectorBase): _html_docs = ['Mar345Doc.html'] cam =", "_html_docs = ['PSLDoc.html'] cam = C(cam.PSLDetectorCam, 'cam1:') class PICamDetector(DetectorBase): _html_docs", "class EigerDetector(DetectorBase): _html_docs = ['EigerDoc.html'] cam = C(cam.EigerDetectorCam, 'cam1:') class", "generated timestamp : float The time of the trigger datum_kwargs", "ts=4 sw=4 '''AreaDetector Devices `areaDetector`_ detector abstractions .. _areaDetector: https://areadetector.github.io/master/index.html", "by searching for a :meth:`~ophyd.areadetector.filestore_mixins.FileStoreBase.generate_datum` method that must have the", "= C(cam.URLDetectorCam, 'cam1:') class UVCDetector(DetectorBase): _html_docs = ['UVCDoc.html'] cam =", "'cam1:') class SimDetector(DetectorBase): _html_docs = ['simDetectorDoc.html'] cam = C(cam.SimDetectorCam, 'cam1:')", "= 'PV:{}'.format(self.prefix) # This shape is expected to match arr.shape", "cam = C(cam.PilatusDetectorCam, 'cam1:') class PixiradDetector(DetectorBase): _html_docs = ['PixiradDoc.html'] cam", "class Mar345Detector(DetectorBase): _html_docs = ['Mar345Doc.html'] cam = C(cam.Mar345DetectorCam, 'cam1:') class", "C(cam.BrukerDetectorCam, 'cam1:') class DexelaDetector(DetectorBase): _html_docs = ['DexelaDoc.html'] cam = C(cam.DexelaDetectorCam,", "Plugin also inherits from ADBase. This adds some AD-specific methods", "_html_docs = ['simDetectorDoc.html'] cam = C(cam.SimDetectorCam, 'cam1:') class AdscDetector(DetectorBase): _html_docs", "in file_plugins: if p.enable.get(): p.generate_datum(key, timestamp, datum_kwargs) def dispatch(self, key,", "'cam1:') class AndorDetector(DetectorBase): _html_docs = ['andorDoc.html'] cam = C(cam.AndorDetectorCam, 'cam1:')", "_html_docs = ['LightFieldDoc.html'] cam = C(cam.LightFieldDetectorCam, 'cam1:') class Mar345Detector(DetectorBase): _html_docs", "'PerkinElmerDetector', 'PICamDetector', 'PilatusDetector', 'PixiradDetector', 'PointGreyDetector', 'ProsilicaDetector', 'PvcamDetector', 'RoperDetector', 'SimDetector', 'URLDetector',", "import warnings from .base import (ADBase, ADComponent as C) from", "p.generate_datum(key, timestamp, datum_kwargs) def dispatch(self, key, timestamp): warnings.warn( \".dispatch is", "C(cam.GreatEyesDetectorCam, 'cam1:') class LightFieldDetector(DetectorBase): _html_docs = ['LightFieldDoc.html'] cam = C(cam.LightFieldDetectorCam,", "= ['PixiradDoc.html'] cam = C(cam.PixiradDetectorCam, 'cam1:') class PointGreyDetector(DetectorBase): _html_docs =", "being complete. When a new acquisition is started, this method", "C) from . import cam __all__ = ['DetectorBase', 'AreaDetector', 'AdscDetector',", "= ['UVCDoc.html'] cam = C(cam.UVCDetectorCam, 'cam1:') class Xspress3Detector(DetectorBase): _html_docs =", "['PointGreyDoc.html'] cam = C(cam.PointGreyDetectorCam, 'cam1:') class ProsilicaDetector(DetectorBase): _html_docs = ['prosilicaDoc.html']", "class PICamDetector(DetectorBase): _html_docs = ['PICamDoc.html'] cam = C(cam.PICamDetectorCam, 'cam1:') class", "= C(cam.Mar345DetectorCam, 'cam1:') class MarCCDDetector(DetectorBase): _html_docs = ['MarCCDDoc.html'] cam =", "= (ADBase._default_configuration_attrs + ('cam', )) def generate_datum(self, key, timestamp, datum_kwargs=None):", "for p in file_plugins: if p.enable.get(): p.generate_datum(key, timestamp, datum_kwargs) def", "inherits from ADBase. This adds some AD-specific methods that are", "File plugins are identified by searching for a :meth:`~ophyd.areadetector.filestore_mixins.FileStoreBase.generate_datum` method", "class PerkinElmerDetector(DetectorBase): _html_docs = ['PerkinElmerDoc.html'] cam = C(cam.PerkinElmerDetectorCam, 'cam1:') class", "make_data_key(self): source = 'PV:{}'.format(self.prefix) # This shape is expected to", "= C(cam.PilatusDetectorCam, 'cam1:') class PixiradDetector(DetectorBase): _html_docs = ['PixiradDoc.html'] cam =", "if datum_kwargs is None: datum_kwargs = {} file_plugins = [s", "AdscDetector(DetectorBase): _html_docs = ['adscDoc.html'] cam = C(cam.AdscDetectorCam, 'cam1:') class AndorDetector(DetectorBase):", "It in turn calls ``generate_datum`` on all of the plugins", "BrukerDetector(DetectorBase): _html_docs = ['BrukerDoc.html'] cam = C(cam.BrukerDetectorCam, 'cam1:') class DexelaDetector(DetectorBase):", "FirewireWinDetector(DetectorBase): _html_docs = ['FirewireWinDoc.html'] cam = C(cam.FirewireWinDetectorCam, 'cam1:') class GreatEyesDetector(DetectorBase):", "\"\"\" Notify plugins of acquisition being complete. When a new", "['prosilicaDoc.html'] cam = C(cam.ProsilicaDetectorCam, 'cam1:') class PvcamDetector(DetectorBase): _html_docs = ['pvcamDoc.html']", "'cam1:') class PerkinElmerDetector(DetectorBase): _html_docs = ['PerkinElmerDoc.html'] cam = C(cam.PerkinElmerDetectorCam, 'cam1:')", "cam = C(cam.RoperDetectorCam, 'cam1:') class URLDetector(DetectorBase): _html_docs = ['URLDoc.html'] cam", "return self.generate_datum(key, timestamp, {}) dispatch.__doc__ = generate_datum.__doc__ def make_data_key(self): source", "= C(cam.SimDetectorCam, 'cam1:') class AdscDetector(DetectorBase): _html_docs = ['adscDoc.html'] cam =", "class PvcamDetector(DetectorBase): _html_docs = ['pvcamDoc.html'] cam = C(cam.PvcamDetectorCam, 'cam1:') class", ":meth:`~ophyd.areadetector.filestore_mixins.FileStoreBase.generate_datum` method that must have the signature :: def generate_datum(key:", "that should be generated timestamp : float The time of", "'cam1:') class EmergentVisionDetector(DetectorBase): _html_docs = ['EVTDoc.html'] cam = C(cam.EmergentVisionDetectorCam, 'cam1:')", "'FirewireLinDetector', 'FirewireWinDetector', 'GreatEyesDetector', 'LightFieldDetector', 'Mar345Detector', 'MarCCDDetector', 'PSLDetector', 'PerkinElmerDetector', 'PICamDetector', 'PilatusDetector',", "'cam1:') class GreatEyesDetector(DetectorBase): _html_docs = [] # the documentation is", "_html_docs = ['RoperDoc.html'] cam = C(cam.RoperDetectorCam, 'cam1:') class URLDetector(DetectorBase): _html_docs", "a new acquisition is started, this method is called with", "['RoperDoc.html'] cam = C(cam.RoperDetectorCam, 'cam1:') class URLDetector(DetectorBase): _html_docs = ['URLDoc.html']", "C(cam.ProsilicaDetectorCam, 'cam1:') class PvcamDetector(DetectorBase): _html_docs = ['pvcamDoc.html'] cam = C(cam.PvcamDetectorCam,", "of the trigger datum_kwargs : Dict[str, Any], optional Any datum", "= C(cam.PICamDetectorCam, 'cam1:') class PilatusDetector(DetectorBase): _html_docs = ['pilatusDoc.html'] cam =", "= C(cam.AreaDetectorCam, 'cam1:') class SimDetector(DetectorBase): _html_docs = ['simDetectorDoc.html'] cam =", "__all__ = ['DetectorBase', 'AreaDetector', 'AdscDetector', 'Andor3Detector', 'AndorDetector', 'BrukerDetector', 'DexelaDetector', 'EmergentVisionDetector',", "complete. When a new acquisition is started, this method is", "['URLDoc.html'] cam = C(cam.URLDetectorCam, 'cam1:') class UVCDetector(DetectorBase): _html_docs = ['UVCDoc.html']", "AndorDetector(DetectorBase): _html_docs = ['andorDoc.html'] cam = C(cam.AndorDetectorCam, 'cam1:') class Andor3Detector(DetectorBase):", "['EVTDoc.html'] cam = C(cam.EmergentVisionDetectorCam, 'cam1:') class EigerDetector(DetectorBase): _html_docs = ['EigerDoc.html']", "C(cam.AreaDetectorCam, 'cam1:') class SimDetector(DetectorBase): _html_docs = ['simDetectorDoc.html'] cam = C(cam.SimDetectorCam,", "cam = C(cam.AndorDetectorCam, 'cam1:') class Andor3Detector(DetectorBase): _html_docs = ['andor3Doc.html'] cam", "class for the hardware-specific classes that follow. Note that Plugin", "_html_docs = ['DexelaDoc.html'] cam = C(cam.DexelaDetectorCam, 'cam1:') class EmergentVisionDetector(DetectorBase): _html_docs", "are not shared by the plugins. \"\"\" _default_configuration_attrs = (ADBase._default_configuration_attrs", "method that must have the signature :: def generate_datum(key: str,", "'PvcamDetector', 'RoperDetector', 'SimDetector', 'URLDetector', 'UVCDetector', 'Xspress3Detector' ] class DetectorBase(ADBase): \"\"\"", ".base import (ADBase, ADComponent as C) from . import cam", "'PSLDetector', 'PerkinElmerDetector', 'PICamDetector', 'PilatusDetector', 'PixiradDetector', 'PointGreyDetector', 'ProsilicaDetector', 'PvcamDetector', 'RoperDetector', 'SimDetector',", "\"\"\" The base class for the hardware-specific classes that follow.", "with a key which is a label like 'light', 'dark',", "'ProsilicaDetector', 'PvcamDetector', 'RoperDetector', 'SimDetector', 'URLDetector', 'UVCDetector', 'Xspress3Detector' ] class DetectorBase(ADBase):", "'collect_asset_docs')] for p in file_plugins: yield from p.collect_asset_docs() class AreaDetector(DetectorBase):", "of the plugins that have that method. File plugins are" ]
[ "(m): ')) area(f'A área do seu terreno {l}X{c} é de", "(m): ')) c = float(input('Comprimento (m): ')) area(f'A área do", "def area(msg):#declaracao da funcao com o parametro msg print(msg)#aqui msg", "funcao com o parametro msg print(msg)#aqui msg e a area", "20) l = float(input('Largura (m): ')) c = float(input('Comprimento (m):", "da funcao com o parametro msg print(msg)#aqui msg e a", "print('Controle de Terrenos') print('-' * 20) l = float(input('Largura (m):", "= float(input('Comprimento (m): ')) area(f'A área do seu terreno {l}X{c}", "Terrenos') print('-' * 20) l = float(input('Largura (m): ')) c", "print('-' * 20) l = float(input('Largura (m): ')) c =", "= float(input('Largura (m): ')) c = float(input('Comprimento (m): ')) area(f'A", "area(msg):#declaracao da funcao com o parametro msg print(msg)#aqui msg e", "o parametro msg print(msg)#aqui msg e a area print('Controle de", "de Terrenos') print('-' * 20) l = float(input('Largura (m): '))", "msg print(msg)#aqui msg e a area print('Controle de Terrenos') print('-'", "e a area print('Controle de Terrenos') print('-' * 20) l", "')) area(f'A área do seu terreno {l}X{c} é de {l*c}m².')", "float(input('Comprimento (m): ')) area(f'A área do seu terreno {l}X{c} é", "msg e a area print('Controle de Terrenos') print('-' * 20)", "area print('Controle de Terrenos') print('-' * 20) l = float(input('Largura", "com o parametro msg print(msg)#aqui msg e a area print('Controle", "')) c = float(input('Comprimento (m): ')) area(f'A área do seu", "parametro msg print(msg)#aqui msg e a area print('Controle de Terrenos')", "c = float(input('Comprimento (m): ')) area(f'A área do seu terreno", "* 20) l = float(input('Largura (m): ')) c = float(input('Comprimento", "l = float(input('Largura (m): ')) c = float(input('Comprimento (m): '))", "float(input('Largura (m): ')) c = float(input('Comprimento (m): ')) area(f'A área", "print(msg)#aqui msg e a area print('Controle de Terrenos') print('-' *", "a area print('Controle de Terrenos') print('-' * 20) l =" ]
[ "html template.\"\"\" pyfile_path = os.path.dirname(os.path.abspath(__file__)) path = join_path(pyfile_path, 'templates', filename)", "Tuple with endpoints where access must not be checked. \"\"\"", "response = authorizer.validate() return response def authorize_endpoint(function): @wraps(function) def authorized_function(*args,", "def logout(): _, response = authorizer.clean_cookie() return response def auth():", "\"\"\"Loads the login html template.\"\"\" pyfile_path = os.path.dirname(os.path.abspath(__file__)) path =", "app = app.server login_template = load_template('login.html') app.add_url_rule('/auth', '/auth', auth) app.add_url_rule('/login',", "for endpoint, function in app.view_functions.items(): if endpoint not in excluded_resources_endpoints:", "isinstance(app, Dash): app = app.server login_template = load_template('login.html') app.add_url_rule('/auth', '/auth',", "to a flask app. Decorates other endpoints to grant access.", "auth) app.add_url_rule('/login', '/login', login) app.add_url_rule('/logout', '/logout', logout) for endpoint, function", "endpoints to a flask app. Decorates other endpoints to grant", "DASHBOARD-AUTH username=([^/]*)/password=([^/]*)' * Sets cookies on login * Rejects unauthorized", "if ok: return make_response(redirect('/'), 307) return render_template_string(login_template) def logout(): _,", "def authorize_endpoint(function): @wraps(function) def authorized_function(*args, **kwargs): ok, response = authorizer.validate()", "response return authorized_function if isinstance(app, Dash): app = app.server login_template", "in app.view_functions.items(): if endpoint not in excluded_resources_endpoints: app.view_functions[endpoint] = authorize_endpoint(function)", "path = join_path(pyfile_path, 'templates', filename) with open(path, 'r') as f:", "wraps from os.path import join as join_path from dash import", "Method: GET * /logout * Method: GET * Erases cookies", "or header authentication * Header: 'Authorization: DASHBOARD-AUTH username=([^/]*)/password=([^/]*)' * Sets", "endpoints are: * /login * Method: GET * /logout *", "GET * Validates cookies if present or header authentication *", "The flask or dash application excluded_resources_endpoints: tuple(str) Tuple with endpoints", "Decorates other endpoints to grant access. The endpoints are: *", "_ = authorizer.validate() if ok: return make_response(redirect('/'), 307) return render_template_string(login_template)", "return make_response(redirect('/'), 307) return render_template_string(login_template) def logout(): _, response =", "* Header: 'Authorization: DASHBOARD-AUTH username=([^/]*)/password=([^/]*)' * Sets cookies on login", "def authorized_function(*args, **kwargs): ok, response = authorizer.validate() if ok: return", "app.view_functions.items(): if endpoint not in excluded_resources_endpoints: app.view_functions[endpoint] = authorize_endpoint(function) def", "the login html template.\"\"\" pyfile_path = os.path.dirname(os.path.abspath(__file__)) path = join_path(pyfile_path,", "from os.path import join as join_path from dash import Dash", "response def auth(): _, response = authorizer.validate() return response def", "make_response(redirect('/'), 307) return render_template_string(login_template) def logout(): _, response = authorizer.clean_cookie()", "make_response, render_template_string, redirect excluded_resources_endpoints = ( 'static', '_dash_assets.static', '/_favicon.ico', '/login',", "ok, _ = authorizer.validate() if ok: return make_response(redirect('/'), 307) return", "app.add_url_rule('/auth', '/auth', auth) app.add_url_rule('/login', '/login', login) app.add_url_rule('/logout', '/logout', logout) for", "template.\"\"\" pyfile_path = os.path.dirname(os.path.abspath(__file__)) path = join_path(pyfile_path, 'templates', filename) with", "cookies on login * Rejects unauthorized users Parameters ---------- app:", "= authorizer.validate() if ok: return make_response(redirect('/'), 307) return render_template_string(login_template) def", "'_dash_assets.static', '/_favicon.ico', '/login', '/logout', '/_user', '/auth') def add_routes(app, authorizer): \"\"\"Adds", "authorizer.validate() return response def authorize_endpoint(function): @wraps(function) def authorized_function(*args, **kwargs): ok,", "\"\"\" def login(): ok, _ = authorizer.validate() if ok: return", "join_path from dash import Dash from flask import make_response, render_template_string,", "@wraps(function) def authorized_function(*args, **kwargs): ok, response = authorizer.validate() if ok:", "auth(): _, response = authorizer.validate() return response def authorize_endpoint(function): @wraps(function)", "response = authorizer.clean_cookie() return response def auth(): _, response =", "grant access. The endpoints are: * /login * Method: GET", "**kwargs) return response return authorized_function if isinstance(app, Dash): app =", "* /login * Method: GET * /logout * Method: GET", "'static', '_dash_assets.static', '/_favicon.ico', '/login', '/logout', '/_user', '/auth') def add_routes(app, authorizer):", "cookies * /auth * Method: GET * Validates cookies if", "authorize_endpoint(function): @wraps(function) def authorized_function(*args, **kwargs): ok, response = authorizer.validate() if", "authorized_function(*args, **kwargs): ok, response = authorizer.validate() if ok: return function(*args,", "in excluded_resources_endpoints: app.view_functions[endpoint] = authorize_endpoint(function) def load_template(filename): \"\"\"Loads the login", "app: flask.Flask or dash.Dash The flask or dash application excluded_resources_endpoints:", "authorizer.validate() if ok: return function(*args, **kwargs) return response return authorized_function", "authorize_endpoint(function) def load_template(filename): \"\"\"Loads the login html template.\"\"\" pyfile_path =", "login_template = load_template('login.html') app.add_url_rule('/auth', '/auth', auth) app.add_url_rule('/login', '/login', login) app.add_url_rule('/logout',", "'/auth') def add_routes(app, authorizer): \"\"\"Adds authentication endpoints to a flask", "return response return authorized_function if isinstance(app, Dash): app = app.server", "'/logout', logout) for endpoint, function in app.view_functions.items(): if endpoint not", "= os.path.dirname(os.path.abspath(__file__)) path = join_path(pyfile_path, 'templates', filename) with open(path, 'r')", "= ( 'static', '_dash_assets.static', '/_favicon.ico', '/login', '/logout', '/_user', '/auth') def", "are: * /login * Method: GET * /logout * Method:", "header authentication * Header: 'Authorization: DASHBOARD-AUTH username=([^/]*)/password=([^/]*)' * Sets cookies", "if ok: return function(*args, **kwargs) return response return authorized_function if", "import wraps from os.path import join as join_path from dash", "'/_favicon.ico', '/login', '/logout', '/_user', '/auth') def add_routes(app, authorizer): \"\"\"Adds authentication", "be checked. \"\"\" def login(): ok, _ = authorizer.validate() if", "**kwargs): ok, response = authorizer.validate() if ok: return function(*args, **kwargs)", "login * Rejects unauthorized users Parameters ---------- app: flask.Flask or", "access must not be checked. \"\"\" def login(): ok, _", "tuple(str) Tuple with endpoints where access must not be checked.", "return render_template_string(login_template) def logout(): _, response = authorizer.clean_cookie() return response", "functools import wraps from os.path import join as join_path from", "checked. \"\"\" def login(): ok, _ = authorizer.validate() if ok:", "'/login', login) app.add_url_rule('/logout', '/logout', logout) for endpoint, function in app.view_functions.items():", "* Erases cookies * /auth * Method: GET * Validates", "Method: GET * Erases cookies * /auth * Method: GET", "function(*args, **kwargs) return response return authorized_function if isinstance(app, Dash): app", "GET * Erases cookies * /auth * Method: GET *", "endpoint, function in app.view_functions.items(): if endpoint not in excluded_resources_endpoints: app.view_functions[endpoint]", "with endpoints where access must not be checked. \"\"\" def", "authorizer.validate() if ok: return make_response(redirect('/'), 307) return render_template_string(login_template) def logout():", "= app.server login_template = load_template('login.html') app.add_url_rule('/auth', '/auth', auth) app.add_url_rule('/login', '/login',", "os from functools import wraps from os.path import join as", "return response def auth(): _, response = authorizer.validate() return response", "* Method: GET * /logout * Method: GET * Erases", "from functools import wraps from os.path import join as join_path", "excluded_resources_endpoints: tuple(str) Tuple with endpoints where access must not be", "excluded_resources_endpoints: app.view_functions[endpoint] = authorize_endpoint(function) def load_template(filename): \"\"\"Loads the login html", "os.path import join as join_path from dash import Dash from", "'/_user', '/auth') def add_routes(app, authorizer): \"\"\"Adds authentication endpoints to a", "function in app.view_functions.items(): if endpoint not in excluded_resources_endpoints: app.view_functions[endpoint] =", "return authorized_function if isinstance(app, Dash): app = app.server login_template =", "Erases cookies * /auth * Method: GET * Validates cookies", "'Authorization: DASHBOARD-AUTH username=([^/]*)/password=([^/]*)' * Sets cookies on login * Rejects", "if endpoint not in excluded_resources_endpoints: app.view_functions[endpoint] = authorize_endpoint(function) def load_template(filename):", "Sets cookies on login * Rejects unauthorized users Parameters ----------", "on login * Rejects unauthorized users Parameters ---------- app: flask.Flask", "_, response = authorizer.validate() return response def authorize_endpoint(function): @wraps(function) def", "not in excluded_resources_endpoints: app.view_functions[endpoint] = authorize_endpoint(function) def load_template(filename): \"\"\"Loads the", "return function(*args, **kwargs) return response return authorized_function if isinstance(app, Dash):", "import make_response, render_template_string, redirect excluded_resources_endpoints = ( 'static', '_dash_assets.static', '/_favicon.ico',", "from flask import make_response, render_template_string, redirect excluded_resources_endpoints = ( 'static',", "flask or dash application excluded_resources_endpoints: tuple(str) Tuple with endpoints where", "response = authorizer.validate() if ok: return function(*args, **kwargs) return response", "login html template.\"\"\" pyfile_path = os.path.dirname(os.path.abspath(__file__)) path = join_path(pyfile_path, 'templates',", "Parameters ---------- app: flask.Flask or dash.Dash The flask or dash", "render_template_string, redirect excluded_resources_endpoints = ( 'static', '_dash_assets.static', '/_favicon.ico', '/login', '/logout',", "'/auth', auth) app.add_url_rule('/login', '/login', login) app.add_url_rule('/logout', '/logout', logout) for endpoint,", "def login(): ok, _ = authorizer.validate() if ok: return make_response(redirect('/'),", "flask.Flask or dash.Dash The flask or dash application excluded_resources_endpoints: tuple(str)", "endpoints where access must not be checked. \"\"\" def login():", "login) app.add_url_rule('/logout', '/logout', logout) for endpoint, function in app.view_functions.items(): if", "= load_template('login.html') app.add_url_rule('/auth', '/auth', auth) app.add_url_rule('/login', '/login', login) app.add_url_rule('/logout', '/logout',", "or dash application excluded_resources_endpoints: tuple(str) Tuple with endpoints where access", "\"\"\"Adds authentication endpoints to a flask app. Decorates other endpoints", "_, response = authorizer.clean_cookie() return response def auth(): _, response", "ok: return function(*args, **kwargs) return response return authorized_function if isinstance(app,", "authorizer.clean_cookie() return response def auth(): _, response = authorizer.validate() return", "application excluded_resources_endpoints: tuple(str) Tuple with endpoints where access must not", "excluded_resources_endpoints = ( 'static', '_dash_assets.static', '/_favicon.ico', '/login', '/logout', '/_user', '/auth')", "as join_path from dash import Dash from flask import make_response,", "os.path.dirname(os.path.abspath(__file__)) path = join_path(pyfile_path, 'templates', filename) with open(path, 'r') as", "authorized_function if isinstance(app, Dash): app = app.server login_template = load_template('login.html')", "dash import Dash from flask import make_response, render_template_string, redirect excluded_resources_endpoints", "pyfile_path = os.path.dirname(os.path.abspath(__file__)) path = join_path(pyfile_path, 'templates', filename) with open(path,", "endpoint not in excluded_resources_endpoints: app.view_functions[endpoint] = authorize_endpoint(function) def load_template(filename): \"\"\"Loads", "ok, response = authorizer.validate() if ok: return function(*args, **kwargs) return", "not be checked. \"\"\" def login(): ok, _ = authorizer.validate()", "authentication endpoints to a flask app. Decorates other endpoints to", "load_template(filename): \"\"\"Loads the login html template.\"\"\" pyfile_path = os.path.dirname(os.path.abspath(__file__)) path", "= authorize_endpoint(function) def load_template(filename): \"\"\"Loads the login html template.\"\"\" pyfile_path", "endpoints to grant access. The endpoints are: * /login *", "present or header authentication * Header: 'Authorization: DASHBOARD-AUTH username=([^/]*)/password=([^/]*)' *", "Validates cookies if present or header authentication * Header: 'Authorization:", "* Sets cookies on login * Rejects unauthorized users Parameters", "Dash): app = app.server login_template = load_template('login.html') app.add_url_rule('/auth', '/auth', auth)", "= join_path(pyfile_path, 'templates', filename) with open(path, 'r') as f: return", "def add_routes(app, authorizer): \"\"\"Adds authentication endpoints to a flask app.", "= authorizer.validate() return response def authorize_endpoint(function): @wraps(function) def authorized_function(*args, **kwargs):", "app.server login_template = load_template('login.html') app.add_url_rule('/auth', '/auth', auth) app.add_url_rule('/login', '/login', login)", "Dash from flask import make_response, render_template_string, redirect excluded_resources_endpoints = (", "= authorizer.clean_cookie() return response def auth(): _, response = authorizer.validate()", "GET * /logout * Method: GET * Erases cookies *", "* /auth * Method: GET * Validates cookies if present", "add_routes(app, authorizer): \"\"\"Adds authentication endpoints to a flask app. Decorates", "from dash import Dash from flask import make_response, render_template_string, redirect", "or dash.Dash The flask or dash application excluded_resources_endpoints: tuple(str) Tuple", "cookies if present or header authentication * Header: 'Authorization: DASHBOARD-AUTH", "app.add_url_rule('/login', '/login', login) app.add_url_rule('/logout', '/logout', logout) for endpoint, function in", "if isinstance(app, Dash): app = app.server login_template = load_template('login.html') app.add_url_rule('/auth',", "must not be checked. \"\"\" def login(): ok, _ =", "authorizer): \"\"\"Adds authentication endpoints to a flask app. Decorates other", "Rejects unauthorized users Parameters ---------- app: flask.Flask or dash.Dash The", "authentication * Header: 'Authorization: DASHBOARD-AUTH username=([^/]*)/password=([^/]*)' * Sets cookies on", "* Rejects unauthorized users Parameters ---------- app: flask.Flask or dash.Dash", "flask app. Decorates other endpoints to grant access. The endpoints", "def load_template(filename): \"\"\"Loads the login html template.\"\"\" pyfile_path = os.path.dirname(os.path.abspath(__file__))", "access. The endpoints are: * /login * Method: GET *", "* Method: GET * Erases cookies * /auth * Method:", "app. Decorates other endpoints to grant access. The endpoints are:", "* Validates cookies if present or header authentication * Header:", "307) return render_template_string(login_template) def logout(): _, response = authorizer.clean_cookie() return", "import join as join_path from dash import Dash from flask", "import Dash from flask import make_response, render_template_string, redirect excluded_resources_endpoints =", "import os from functools import wraps from os.path import join", "Header: 'Authorization: DASHBOARD-AUTH username=([^/]*)/password=([^/]*)' * Sets cookies on login *", "app.view_functions[endpoint] = authorize_endpoint(function) def load_template(filename): \"\"\"Loads the login html template.\"\"\"", "def auth(): _, response = authorizer.validate() return response def authorize_endpoint(function):", "join_path(pyfile_path, 'templates', filename) with open(path, 'r') as f: return f.read().strip()", "return response def authorize_endpoint(function): @wraps(function) def authorized_function(*args, **kwargs): ok, response", "= authorizer.validate() if ok: return function(*args, **kwargs) return response return", "load_template('login.html') app.add_url_rule('/auth', '/auth', auth) app.add_url_rule('/login', '/login', login) app.add_url_rule('/logout', '/logout', logout)", "* Method: GET * Validates cookies if present or header", "dash.Dash The flask or dash application excluded_resources_endpoints: tuple(str) Tuple with", "ok: return make_response(redirect('/'), 307) return render_template_string(login_template) def logout(): _, response", "users Parameters ---------- app: flask.Flask or dash.Dash The flask or", "---------- app: flask.Flask or dash.Dash The flask or dash application", "where access must not be checked. \"\"\" def login(): ok,", "( 'static', '_dash_assets.static', '/_favicon.ico', '/login', '/logout', '/_user', '/auth') def add_routes(app,", "redirect excluded_resources_endpoints = ( 'static', '_dash_assets.static', '/_favicon.ico', '/login', '/logout', '/_user',", "flask import make_response, render_template_string, redirect excluded_resources_endpoints = ( 'static', '_dash_assets.static',", "a flask app. Decorates other endpoints to grant access. The", "to grant access. The endpoints are: * /login * Method:", "response def authorize_endpoint(function): @wraps(function) def authorized_function(*args, **kwargs): ok, response =", "app.add_url_rule('/logout', '/logout', logout) for endpoint, function in app.view_functions.items(): if endpoint", "The endpoints are: * /login * Method: GET * /logout", "login(): ok, _ = authorizer.validate() if ok: return make_response(redirect('/'), 307)", "render_template_string(login_template) def logout(): _, response = authorizer.clean_cookie() return response def", "/login * Method: GET * /logout * Method: GET *", "Method: GET * Validates cookies if present or header authentication", "logout(): _, response = authorizer.clean_cookie() return response def auth(): _,", "join as join_path from dash import Dash from flask import", "dash application excluded_resources_endpoints: tuple(str) Tuple with endpoints where access must", "username=([^/]*)/password=([^/]*)' * Sets cookies on login * Rejects unauthorized users", "if present or header authentication * Header: 'Authorization: DASHBOARD-AUTH username=([^/]*)/password=([^/]*)'", "/auth * Method: GET * Validates cookies if present or", "logout) for endpoint, function in app.view_functions.items(): if endpoint not in", "/logout * Method: GET * Erases cookies * /auth *", "unauthorized users Parameters ---------- app: flask.Flask or dash.Dash The flask", "'/login', '/logout', '/_user', '/auth') def add_routes(app, authorizer): \"\"\"Adds authentication endpoints", "'/logout', '/_user', '/auth') def add_routes(app, authorizer): \"\"\"Adds authentication endpoints to", "other endpoints to grant access. The endpoints are: * /login", "* /logout * Method: GET * Erases cookies * /auth" ]
[ "name='_id', field=models.AutoField(editable=False, primary_key=True, serialize=False), ), migrations.AlterField( model_name='orderedproduct', name='_id', field=models.AutoField(editable=False, primary_key=True,", "name='dateTimeCreated', ), migrations.AlterField( model_name='order', name='_id', field=models.AutoField(editable=False, primary_key=True, serialize=False), ), migrations.AlterField(", "class Migration(migrations.Migration): dependencies = [ ('model_api', '0004_remove_order_created_remove_order_id_and_more'), ] operations =", "operations = [ migrations.RemoveField( model_name='order', name='dateTimeCreated', ), migrations.AlterField( model_name='order', name='_id',", "('model_api', '0004_remove_order_created_remove_order_id_and_more'), ] operations = [ migrations.RemoveField( model_name='order', name='dateTimeCreated', ),", "migrations, models class Migration(migrations.Migration): dependencies = [ ('model_api', '0004_remove_order_created_remove_order_id_and_more'), ]", "field=models.AutoField(editable=False, primary_key=True, serialize=False), ), migrations.AlterField( model_name='orderedproduct', name='price', field=models.CharField(blank=True, max_length=20, null=True),", "Migration(migrations.Migration): dependencies = [ ('model_api', '0004_remove_order_created_remove_order_id_and_more'), ] operations = [", "migrations.RemoveField( model_name='order', name='dateTimeCreated', ), migrations.AlterField( model_name='order', name='_id', field=models.AutoField(editable=False, primary_key=True, serialize=False),", "Generated by Django 4.0.1 on 2022-04-07 01:20 from django.db import", "models class Migration(migrations.Migration): dependencies = [ ('model_api', '0004_remove_order_created_remove_order_id_and_more'), ] operations", "field=models.AutoField(editable=False, primary_key=True, serialize=False), ), migrations.AlterField( model_name='orderedproduct', name='_id', field=models.AutoField(editable=False, primary_key=True, serialize=False),", "on 2022-04-07 01:20 from django.db import migrations, models class Migration(migrations.Migration):", "), migrations.AlterField( model_name='orderedproduct', name='_id', field=models.AutoField(editable=False, primary_key=True, serialize=False), ), migrations.AlterField( model_name='orderedproduct',", "migrations.AlterField( model_name='orderedproduct', name='_id', field=models.AutoField(editable=False, primary_key=True, serialize=False), ), migrations.AlterField( model_name='orderedproduct', name='price',", "by Django 4.0.1 on 2022-04-07 01:20 from django.db import migrations,", "2022-04-07 01:20 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "] operations = [ migrations.RemoveField( model_name='order', name='dateTimeCreated', ), migrations.AlterField( model_name='order',", "4.0.1 on 2022-04-07 01:20 from django.db import migrations, models class", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('model_api',", "serialize=False), ), migrations.AlterField( model_name='orderedproduct', name='price', field=models.CharField(blank=True, max_length=20, null=True), ), ]", "'0004_remove_order_created_remove_order_id_and_more'), ] operations = [ migrations.RemoveField( model_name='order', name='dateTimeCreated', ), migrations.AlterField(", "= [ ('model_api', '0004_remove_order_created_remove_order_id_and_more'), ] operations = [ migrations.RemoveField( model_name='order',", "name='_id', field=models.AutoField(editable=False, primary_key=True, serialize=False), ), migrations.AlterField( model_name='orderedproduct', name='price', field=models.CharField(blank=True, max_length=20,", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "01:20 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "= [ migrations.RemoveField( model_name='order', name='dateTimeCreated', ), migrations.AlterField( model_name='order', name='_id', field=models.AutoField(editable=False,", "model_name='order', name='dateTimeCreated', ), migrations.AlterField( model_name='order', name='_id', field=models.AutoField(editable=False, primary_key=True, serialize=False), ),", "serialize=False), ), migrations.AlterField( model_name='orderedproduct', name='_id', field=models.AutoField(editable=False, primary_key=True, serialize=False), ), migrations.AlterField(", "primary_key=True, serialize=False), ), migrations.AlterField( model_name='orderedproduct', name='price', field=models.CharField(blank=True, max_length=20, null=True), ),", "primary_key=True, serialize=False), ), migrations.AlterField( model_name='orderedproduct', name='_id', field=models.AutoField(editable=False, primary_key=True, serialize=False), ),", "[ ('model_api', '0004_remove_order_created_remove_order_id_and_more'), ] operations = [ migrations.RemoveField( model_name='order', name='dateTimeCreated',", "[ migrations.RemoveField( model_name='order', name='dateTimeCreated', ), migrations.AlterField( model_name='order', name='_id', field=models.AutoField(editable=False, primary_key=True,", "model_name='order', name='_id', field=models.AutoField(editable=False, primary_key=True, serialize=False), ), migrations.AlterField( model_name='orderedproduct', name='_id', field=models.AutoField(editable=False,", "Django 4.0.1 on 2022-04-07 01:20 from django.db import migrations, models", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('model_api', '0004_remove_order_created_remove_order_id_and_more'),", "dependencies = [ ('model_api', '0004_remove_order_created_remove_order_id_and_more'), ] operations = [ migrations.RemoveField(", "model_name='orderedproduct', name='_id', field=models.AutoField(editable=False, primary_key=True, serialize=False), ), migrations.AlterField( model_name='orderedproduct', name='price', field=models.CharField(blank=True,", "# Generated by Django 4.0.1 on 2022-04-07 01:20 from django.db", "), migrations.AlterField( model_name='order', name='_id', field=models.AutoField(editable=False, primary_key=True, serialize=False), ), migrations.AlterField( model_name='orderedproduct',", "migrations.AlterField( model_name='order', name='_id', field=models.AutoField(editable=False, primary_key=True, serialize=False), ), migrations.AlterField( model_name='orderedproduct', name='_id'," ]
[ "('pages', models.PositiveIntegerField(blank=True, default=0, null=True)), ('ddc', models.CharField(blank=True, default='', max_length=1024)), ('llcc', models.CharField(blank=True,", "('description', models.TextField(blank=True, default='')), ('publisher', models.CharField(blank=True, default='', max_length=512)), ('tags', models.CharField(blank=True, max_length=1024,", "('publisher', models.CharField(blank=True, default='', max_length=512)), ('tags', models.CharField(blank=True, max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)),", "related_name='category_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Language', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category')), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('language', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language')), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "null=True)), ('volume', models.PositiveIntegerField(blank=True, null=True)), ('issue', models.PositiveIntegerField(blank=True, null=True)), ('remarks', models.TextField(blank=True, default='')),", "# -*- coding: utf-8 -*- # Generated by Django 1.9", "Django 1.9 on 2015-12-21 12:22 from __future__ import unicode_literals from", "verbose_name='ID')), ('title', models.CharField(max_length=1024)), ('author', models.CharField(default='Unknown', max_length=1024)), ('description', models.TextField(blank=True, default='')), ('publisher',", "serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=512)), ('short_code', models.CharField(db_index=True, max_length=8, unique=True)), ('description', models.TextField(blank=True,", "'Protected'), (6, 'Damaged')])), ('published_on', models.DateField(blank=True, null=True)), ('volume', models.PositiveIntegerField(blank=True, null=True)), ('issue',", "to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='bookdetail', name='category', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category'), ), migrations.AddField(", "(3, 'Temporarily Unavailable'), (4, 'Unavailable'), (5, 'Protected'), (6, 'Damaged')])), ('published_on',", "], ), migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "max_length=512)), ('tags', models.CharField(blank=True, max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('category',", "migrations.CreateModel( name='BookCopy', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('book_status', models.IntegerField(choices=[(1,", "], ), migrations.CreateModel( name='PeriodicalIssue', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "default='', max_length=1024)), ('isbn', models.CharField(blank=True, default='', max_length=1024)), ('tags', models.CharField(blank=True, max_length=1024, null=True)),", "models.DateTimeField(auto_now=True)), ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category')), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('language', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language')),", "django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL),", "], ), migrations.AddField( model_name='bookdetail', name='category', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category'), ), migrations.AddField( model_name='bookdetail',", "('title', models.CharField(max_length=1024)), ('author', models.CharField(default='Unknown', max_length=1024)), ('description', models.TextField(blank=True, default='')), ('publisher', models.CharField(blank=True,", "-*- coding: utf-8 -*- # Generated by Django 1.9 on", "null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ], ), migrations.CreateModel( name='Category', fields=[", "('slug', models.SlugField(max_length=128, unique=True)), ('description', models.TextField(blank=True, default='')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)),", "to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Language', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "model_name='bookdetail', name='language', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language'), ), migrations.AddField( model_name='bookdetail', name='updated_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_detail_updated_by',", "field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_detail_updated_by', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookcopy', name='book_detail', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.BookDetail'), ),", "to='items.BookDetail'), ), migrations.AddField( model_name='bookcopy', name='created_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookcopy',", "), migrations.CreateModel( name='Periodical', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title',", "('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('language', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language')), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_updated_by', to=settings.AUTH_USER_MODEL)),", "= [ migrations.CreateModel( name='BookCopy', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "('author', models.CharField(default='Unknown', max_length=1024)), ('description', models.TextField(blank=True, default='')), ('publisher', models.CharField(blank=True, default='', max_length=512)),", "('remarks', models.TextField(blank=True, default='')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ], ), migrations.CreateModel(", "models.DateTimeField(auto_now=True)), ], ), migrations.CreateModel( name='BookDetail', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "models.TextField(blank=True, default='')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ], ), migrations.CreateModel( name='BookDetail',", "models.PositiveIntegerField(blank=True, default=0, null=True)), ('ddc', models.CharField(blank=True, default='', max_length=1024)), ('llcc', models.CharField(blank=True, default='',", "default='', max_length=1024)), ('llcc', models.CharField(blank=True, default='', max_length=1024)), ('isbn', models.CharField(blank=True, default='', max_length=1024)),", "related_name='language_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Periodical', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "('published_on', models.DateField(blank=True, null=True)), ('volume', models.PositiveIntegerField(blank=True, null=True)), ('issue', models.PositiveIntegerField(blank=True, null=True)), ('remarks',", "__future__ import unicode_literals from django.conf import settings from django.db import", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1024)), ('author', models.CharField(default='Unknown', max_length=1024)),", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=512)), ('short_code', models.CharField(db_index=True, max_length=8,", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1024)), ('description', models.TextField(blank=True, default='')),", "('remarks', models.TextField(blank=True, default='')), ('tags', models.CharField(blank=True, max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on',", "migrations.AddField( model_name='bookcopy', name='book_detail', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.BookDetail'), ), migrations.AddField( model_name='bookcopy', name='created_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "name='book_detail', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.BookDetail'), ), migrations.AddField( model_name='bookcopy', name='created_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ),", "models.CharField(max_length=1024)), ('author', models.CharField(default='Unknown', max_length=1024)), ('description', models.TextField(blank=True, default='')), ('publisher', models.CharField(blank=True, default='',", "('updated_on', models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_updated_by', to=settings.AUTH_USER_MODEL)), ],", "= True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [", "max_length=1024)), ('tags', models.CharField(blank=True, max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ],", "2015-12-21 12:22 from __future__ import unicode_literals from django.conf import settings", "serialize=False, verbose_name='ID')), ('book_status', models.IntegerField(choices=[(1, 'Available'), (2, 'In Circulation'), (3, 'Temporarily", "models.CharField(blank=True, default='', max_length=512)), ('tags', models.CharField(blank=True, max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on',", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1024)), ('description', models.TextField(blank=True, default='')), ('publisher',", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('book_status', models.IntegerField(choices=[(1, 'Available'), (2, 'In Circulation'),", "default='')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "verbose_name='ID')), ('name', models.CharField(max_length=512)), ('short_code', models.CharField(db_index=True, max_length=8, unique=True)), ('description', models.TextField(blank=True, default='')),", "migrations.AddField( model_name='bookdetail', name='category', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category'), ), migrations.AddField( model_name='bookdetail', name='created_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "migrations.CreateModel( name='PeriodicalIssue', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('issue_status', models.IntegerField(choices=[(1,", "to=settings.AUTH_USER_MODEL)), ('language', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language')), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_updated_by', to=settings.AUTH_USER_MODEL)), ], ),", "('llcc', models.CharField(blank=True, default='', max_length=1024)), ('isbn', models.CharField(blank=True, default='', max_length=1024)), ('tags', models.CharField(blank=True,", "to=settings.AUTH_USER_MODEL)), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Language', fields=[", "name='language', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language'), ), migrations.AddField( model_name='bookdetail', name='updated_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_detail_updated_by', to=settings.AUTH_USER_MODEL),", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('issue_status', models.IntegerField(choices=[(1, 'Available'), (2, 'In", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Language',", "('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category')), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('language', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language')), ('updated_by',", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('language', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language')), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_updated_by', to=settings.AUTH_USER_MODEL)), ],", "serialize=False, verbose_name='ID')), ('issue_status', models.IntegerField(choices=[(1, 'Available'), (2, 'In Circulation'), (3, 'Temporarily", "'In Circulation'), (3, 'Temporarily Unavailable'), (4, 'Unavailable'), (5, 'Protected'), (6,", "serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1024)), ('description', models.TextField(blank=True, default='')), ('publisher', models.CharField(blank=True, default='',", "models.TextField(blank=True, default='')), ('tags', models.CharField(blank=True, max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)),", "('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='language_updated_by',", "('description', models.TextField(blank=True, default='')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),", "model_name='bookdetail', name='created_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookdetail', name='language', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language'),", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=512)), ('short_code', models.CharField(db_index=True, max_length=8, unique=True)),", "primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1024)), ('author', models.CharField(default='Unknown', max_length=1024)), ('description', models.TextField(blank=True,", "primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1024)), ('description', models.TextField(blank=True, default='')), ('publisher', models.CharField(blank=True,", "models.CharField(blank=True, default='', max_length=1024)), ('isbn', models.CharField(blank=True, default='', max_length=1024)), ('tags', models.CharField(blank=True, max_length=1024,", "('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Language', fields=[ ('id',", "models.CharField(max_length=512)), ('short_code', models.CharField(db_index=True, max_length=8, unique=True)), ('description', models.TextField(blank=True, default='')), ('created_on', models.DateTimeField(auto_now_add=True)),", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language')), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='PeriodicalIssue',", "import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [", "('published_on', models.DateField(blank=True, null=True)), ('pages', models.PositiveIntegerField(blank=True, default=0, null=True)), ('ddc', models.CharField(blank=True, default='',", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True", "1.9 on 2015-12-21 12:22 from __future__ import unicode_literals from django.conf", "('title', models.CharField(max_length=512)), ('slug', models.SlugField(max_length=128, unique=True)), ('description', models.TextField(blank=True, default='')), ('created_on', models.DateTimeField(auto_now_add=True)),", "Generated by Django 1.9 on 2015-12-21 12:22 from __future__ import", "coding: utf-8 -*- # Generated by Django 1.9 on 2015-12-21", "models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='language_updated_by', to=settings.AUTH_USER_MODEL)),", "('updated_on', models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('periodical', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Periodical')), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "], ), migrations.CreateModel( name='BookDetail', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "model_name='bookdetail', name='category', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category'), ), migrations.AddField( model_name='bookdetail', name='created_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='language_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Periodical',", "null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category')), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "models.CharField(blank=True, max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),", "from __future__ import unicode_literals from django.conf import settings from django.db", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=512)), ('slug', models.SlugField(max_length=128,", "# Generated by Django 1.9 on 2015-12-21 12:22 from __future__", "('tags', models.CharField(blank=True, max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "Unavailable'), (4, 'Unavailable'), (5, 'Protected'), (6, 'Damaged')])), ('remarks', models.TextField(blank=True, default='')),", "models.PositiveIntegerField(blank=True, null=True)), ('issue', models.PositiveIntegerField(blank=True, null=True)), ('remarks', models.TextField(blank=True, default='')), ('tags', models.CharField(blank=True,", "null=True)), ('pages', models.PositiveIntegerField(blank=True, default=0, null=True)), ('ddc', models.CharField(blank=True, default='', max_length=1024)), ('llcc',", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=512)), ('slug', models.SlugField(max_length=128, unique=True)),", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Periodical')), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_issue_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='bookdetail',", "name='created_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookcopy', name='updated_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_copy_updated_by', to=settings.AUTH_USER_MODEL),", "to=settings.AUTH_USER_MODEL)), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='language_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Periodical', fields=[", "('issue', models.PositiveIntegerField(blank=True, null=True)), ('remarks', models.TextField(blank=True, default='')), ('tags', models.CharField(blank=True, max_length=1024, null=True)),", "(3, 'Temporarily Unavailable'), (4, 'Unavailable'), (5, 'Protected'), (6, 'Damaged')])), ('remarks',", "to='items.Language')), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='PeriodicalIssue', fields=[", "utf-8 -*- # Generated by Django 1.9 on 2015-12-21 12:22", "models.CharField(db_index=True, max_length=8, unique=True)), ('description', models.TextField(blank=True, default='')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)),", "import settings from django.db import migrations, models import django.db.models.deletion class", "migrations.AddField( model_name='bookdetail', name='created_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookdetail', name='language', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "name='BookDetail', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1024)), ('author',", "primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=512)), ('slug', models.SlugField(max_length=128, unique=True)), ('description', models.TextField(blank=True,", "-*- # Generated by Django 1.9 on 2015-12-21 12:22 from", "models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_updated_by', to=settings.AUTH_USER_MODEL)),", "models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies =", "dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='BookCopy',", "to='items.Category'), ), migrations.AddField( model_name='bookdetail', name='created_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookdetail',", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial =", "models.CharField(max_length=512)), ('slug', models.SlugField(max_length=128, unique=True)), ('description', models.TextField(blank=True, default='')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on',", "primary_key=True, serialize=False, verbose_name='ID')), ('book_status', models.IntegerField(choices=[(1, 'Available'), (2, 'In Circulation'), (3,", "models.CharField(blank=True, default='', max_length=1024)), ('tags', models.CharField(blank=True, max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on',", "verbose_name='ID')), ('title', models.CharField(max_length=1024)), ('description', models.TextField(blank=True, default='')), ('publisher', models.CharField(blank=True, default='', max_length=512)),", "), migrations.AddField( model_name='bookdetail', name='created_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookdetail', name='language',", "models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ], ), migrations.CreateModel( name='BookDetail', fields=[ ('id', models.AutoField(auto_created=True,", "('description', models.TextField(blank=True, default='')), ('publisher', models.CharField(blank=True, default='', max_length=512)), ('published_on', models.DateField(blank=True, null=True)),", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1024)), ('description', models.TextField(blank=True,", "to='items.Category')), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('language', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language')), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_updated_by',", "[ migrations.CreateModel( name='BookCopy', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('book_status',", "models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('periodical', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Periodical')), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_issue_updated_by',", "initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations =", "('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ], ), migrations.CreateModel( name='BookDetail', fields=[ ('id',", "models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category')), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('language',", "('title', models.CharField(max_length=1024)), ('description', models.TextField(blank=True, default='')), ('publisher', models.CharField(blank=True, default='', max_length=512)), ('tags',", "to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookcopy', name='book_detail', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.BookDetail'), ), migrations.AddField( model_name='bookcopy',", "max_length=512)), ('published_on', models.DateField(blank=True, null=True)), ('pages', models.PositiveIntegerField(blank=True, default=0, null=True)), ('ddc', models.CharField(blank=True,", "name='Language', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=512)), ('short_code',", "to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookcopy', name='updated_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_copy_updated_by', to=settings.AUTH_USER_MODEL), ), ]", "models.TextField(blank=True, default='')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('updated_by',", "('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category')), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),", "models.DateField(blank=True, null=True)), ('pages', models.PositiveIntegerField(blank=True, default=0, null=True)), ('ddc', models.CharField(blank=True, default='', max_length=1024)),", "(4, 'Unavailable'), (5, 'Protected'), (6, 'Damaged')])), ('published_on', models.DateField(blank=True, null=True)), ('volume',", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=512)), ('slug', models.SlugField(max_length=128, unique=True)), ('description',", "models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('periodical', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Periodical')), ('updated_by',", "name='PeriodicalIssue', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('issue_status', models.IntegerField(choices=[(1, 'Available'),", "('periodical', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Periodical')), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_issue_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField(", "default='')), ('publisher', models.CharField(blank=True, default='', max_length=512)), ('published_on', models.DateField(blank=True, null=True)), ('pages', models.PositiveIntegerField(blank=True,", "name='created_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookdetail', name='language', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language'), ),", "('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ], ), migrations.CreateModel( name='Category', fields=[ ('id',", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial", "), migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title',", "), migrations.CreateModel( name='BookDetail', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title',", "name='updated_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_detail_updated_by', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookcopy', name='book_detail', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.BookDetail'),", "by Django 1.9 on 2015-12-21 12:22 from __future__ import unicode_literals", "] operations = [ migrations.CreateModel( name='BookCopy', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "max_length=8, unique=True)), ('description', models.TextField(blank=True, default='')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('created_by',", "null=True)), ('ddc', models.CharField(blank=True, default='', max_length=1024)), ('llcc', models.CharField(blank=True, default='', max_length=1024)), ('isbn',", "primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=512)), ('short_code', models.CharField(db_index=True, max_length=8, unique=True)), ('description',", "models.IntegerField(choices=[(1, 'Available'), (2, 'In Circulation'), (3, 'Temporarily Unavailable'), (4, 'Unavailable'),", "'Damaged')])), ('remarks', models.TextField(blank=True, default='')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ], ),", "class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ]", "models.CharField(blank=True, default='', max_length=512)), ('published_on', models.DateField(blank=True, null=True)), ('pages', models.PositiveIntegerField(blank=True, default=0, null=True)),", "models.DateTimeField(auto_now=True)), ], ), migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "[ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='BookCopy', fields=[ ('id',", "(5, 'Protected'), (6, 'Damaged')])), ('published_on', models.DateField(blank=True, null=True)), ('volume', models.PositiveIntegerField(blank=True, null=True)),", "'Unavailable'), (5, 'Protected'), (6, 'Damaged')])), ('published_on', models.DateField(blank=True, null=True)), ('volume', models.PositiveIntegerField(blank=True,", "'Available'), (2, 'In Circulation'), (3, 'Temporarily Unavailable'), (4, 'Unavailable'), (5,", "max_length=1024)), ('isbn', models.CharField(blank=True, default='', max_length=1024)), ('tags', models.CharField(blank=True, max_length=1024, null=True)), ('created_on',", "('updated_on', models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='language_updated_by', to=settings.AUTH_USER_MODEL)), ],", "models.TextField(blank=True, default='')), ('publisher', models.CharField(blank=True, default='', max_length=512)), ('tags', models.CharField(blank=True, max_length=1024, null=True)),", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('issue_status', models.IntegerField(choices=[(1, 'Available'), (2, 'In Circulation'),", "max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category')), ('created_by',", "Unavailable'), (4, 'Unavailable'), (5, 'Protected'), (6, 'Damaged')])), ('published_on', models.DateField(blank=True, null=True)),", "(6, 'Damaged')])), ('published_on', models.DateField(blank=True, null=True)), ('volume', models.PositiveIntegerField(blank=True, null=True)), ('issue', models.PositiveIntegerField(blank=True,", "True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel(", "], ), migrations.CreateModel( name='Language', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "migrations.AddField( model_name='bookdetail', name='updated_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_detail_updated_by', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookcopy', name='book_detail',", "default='', max_length=1024)), ('tags', models.CharField(blank=True, max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)),", "), migrations.CreateModel( name='PeriodicalIssue', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('issue_status',", "default='', max_length=512)), ('published_on', models.DateField(blank=True, null=True)), ('pages', models.PositiveIntegerField(blank=True, default=0, null=True)), ('ddc',", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='PeriodicalIssue', fields=[ ('id', models.AutoField(auto_created=True,", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Language', fields=[ ('id', models.AutoField(auto_created=True,", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies", "migrations.CreateModel( name='BookDetail', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1024)),", "on 2015-12-21 12:22 from __future__ import unicode_literals from django.conf import", "Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations", "models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ], ), migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True,", "('issue_status', models.IntegerField(choices=[(1, 'Available'), (2, 'In Circulation'), (3, 'Temporarily Unavailable'), (4,", "migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='BookCopy', fields=[ ('id', models.AutoField(auto_created=True,", "('publisher', models.CharField(blank=True, default='', max_length=512)), ('published_on', models.DateField(blank=True, null=True)), ('pages', models.PositiveIntegerField(blank=True, default=0,", "related_name='book_detail_updated_by', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookcopy', name='book_detail', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.BookDetail'), ), migrations.AddField(", "Circulation'), (3, 'Temporarily Unavailable'), (4, 'Unavailable'), (5, 'Protected'), (6, 'Damaged')])),", "('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('periodical', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Periodical')), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_issue_updated_by', to=settings.AUTH_USER_MODEL)),", "to=settings.AUTH_USER_MODEL)), ('periodical', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Periodical')), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_issue_updated_by', to=settings.AUTH_USER_MODEL)), ], ),", "12:22 from __future__ import unicode_literals from django.conf import settings from", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=512)), ('short_code', models.CharField(db_index=True,", "default='')), ('publisher', models.CharField(blank=True, default='', max_length=512)), ('tags', models.CharField(blank=True, max_length=1024, null=True)), ('created_on',", "field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookcopy', name='updated_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_copy_updated_by', to=settings.AUTH_USER_MODEL), ),", "'Damaged')])), ('published_on', models.DateField(blank=True, null=True)), ('volume', models.PositiveIntegerField(blank=True, null=True)), ('issue', models.PositiveIntegerField(blank=True, null=True)),", "('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='language_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel(", "('updated_on', models.DateTimeField(auto_now=True)), ], ), migrations.CreateModel( name='BookDetail', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "model_name='bookdetail', name='updated_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_detail_updated_by', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookcopy', name='book_detail', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "models.CharField(blank=True, max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category')),", "migrations.CreateModel( name='Periodical', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1024)),", "related_name='periodical_issue_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='bookdetail', name='category', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category'), ),", "('ddc', models.CharField(blank=True, default='', max_length=1024)), ('llcc', models.CharField(blank=True, default='', max_length=1024)), ('isbn', models.CharField(blank=True,", "field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category'), ), migrations.AddField( model_name='bookdetail', name='created_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField(", "('name', models.CharField(max_length=512)), ('short_code', models.CharField(db_index=True, max_length=8, unique=True)), ('description', models.TextField(blank=True, default='')), ('created_on',", "max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('periodical',", "('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('periodical', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Periodical')),", "models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_updated_by', to=settings.AUTH_USER_MODEL)), ], ),", "operations = [ migrations.CreateModel( name='BookCopy', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('periodical', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1024)), ('author', models.CharField(default='Unknown',", "(6, 'Damaged')])), ('remarks', models.TextField(blank=True, default='')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ],", "unique=True)), ('description', models.TextField(blank=True, default='')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "primary_key=True, serialize=False, verbose_name='ID')), ('issue_status', models.IntegerField(choices=[(1, 'Available'), (2, 'In Circulation'), (3,", "('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_issue_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='bookdetail', name='category', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "max_length=1024)), ('description', models.TextField(blank=True, default='')), ('publisher', models.CharField(blank=True, default='', max_length=512)), ('published_on', models.DateField(blank=True,", "related_name='periodical_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='PeriodicalIssue', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_updated_by',", "('volume', models.PositiveIntegerField(blank=True, null=True)), ('issue', models.PositiveIntegerField(blank=True, null=True)), ('remarks', models.TextField(blank=True, default='')), ('tags',", "to='items.Periodical')), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_issue_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='bookdetail', name='category',", "name='category', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category'), ), migrations.AddField( model_name='bookdetail', name='created_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ),", "(2, 'In Circulation'), (3, 'Temporarily Unavailable'), (4, 'Unavailable'), (5, 'Protected'),", "), migrations.AddField( model_name='bookdetail', name='language', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language'), ), migrations.AddField( model_name='bookdetail', name='updated_by',", "models.TextField(blank=True, default='')), ('publisher', models.CharField(blank=True, default='', max_length=512)), ('published_on', models.DateField(blank=True, null=True)), ('pages',", "default=0, null=True)), ('ddc', models.CharField(blank=True, default='', max_length=1024)), ('llcc', models.CharField(blank=True, default='', max_length=1024)),", "max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ], ), migrations.CreateModel( name='Category',", "name='Periodical', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1024)), ('description',", "to='items.Language'), ), migrations.AddField( model_name='bookdetail', name='updated_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_detail_updated_by', to=settings.AUTH_USER_MODEL), ), migrations.AddField(", "('short_code', models.CharField(db_index=True, max_length=8, unique=True)), ('description', models.TextField(blank=True, default='')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on',", "models.PositiveIntegerField(blank=True, null=True)), ('remarks', models.TextField(blank=True, default='')), ('tags', models.CharField(blank=True, max_length=1024, null=True)), ('created_on',", "field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language'), ), migrations.AddField( model_name='bookdetail', name='updated_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_detail_updated_by', to=settings.AUTH_USER_MODEL), ),", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('book_status', models.IntegerField(choices=[(1, 'Available'), (2, 'In", "'Temporarily Unavailable'), (4, 'Unavailable'), (5, 'Protected'), (6, 'Damaged')])), ('remarks', models.TextField(blank=True,", "models.CharField(blank=True, default='', max_length=1024)), ('llcc', models.CharField(blank=True, default='', max_length=1024)), ('isbn', models.CharField(blank=True, default='',", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('book_status', models.IntegerField(choices=[(1, 'Available'), (2,", "('updated_on', models.DateTimeField(auto_now=True)), ], ), migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "('isbn', models.CharField(blank=True, default='', max_length=1024)), ('tags', models.CharField(blank=True, max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)),", "models.SlugField(max_length=128, unique=True)), ('description', models.TextField(blank=True, default='')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('created_by',", "migrations.AddField( model_name='bookcopy', name='created_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookcopy', name='updated_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "name='BookCopy', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('book_status', models.IntegerField(choices=[(1, 'Available'),", "), migrations.AddField( model_name='bookdetail', name='category', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category'), ), migrations.AddField( model_name='bookdetail', name='created_by',", "model_name='bookcopy', name='book_detail', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.BookDetail'), ), migrations.AddField( model_name='bookcopy', name='created_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),", "models.CharField(default='Unknown', max_length=1024)), ('description', models.TextField(blank=True, default='')), ('publisher', models.CharField(blank=True, default='', max_length=512)), ('published_on',", "models.DateTimeField(auto_now=True)), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='language_updated_by', to=settings.AUTH_USER_MODEL)), ], ),", "), migrations.AddField( model_name='bookcopy', name='created_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookcopy', name='updated_by',", "'Protected'), (6, 'Damaged')])), ('remarks', models.TextField(blank=True, default='')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)),", "migrations.CreateModel( name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=512)),", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='language_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Periodical', fields=[ ('id', models.AutoField(auto_created=True,", "('updated_on', models.DateTimeField(auto_now=True)), ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category')), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('language', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "('language', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language')), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel(", "models.DateField(blank=True, null=True)), ('volume', models.PositiveIntegerField(blank=True, null=True)), ('issue', models.PositiveIntegerField(blank=True, null=True)), ('remarks', models.TextField(blank=True,", "), migrations.CreateModel( name='Language', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name',", "verbose_name='ID')), ('book_status', models.IntegerField(choices=[(1, 'Available'), (2, 'In Circulation'), (3, 'Temporarily Unavailable'),", "verbose_name='ID')), ('title', models.CharField(max_length=512)), ('slug', models.SlugField(max_length=128, unique=True)), ('description', models.TextField(blank=True, default='')), ('created_on',", "('tags', models.CharField(blank=True, max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "null=True)), ('remarks', models.TextField(blank=True, default='')), ('tags', models.CharField(blank=True, max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)),", "= [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='BookCopy', fields=[", "('book_status', models.IntegerField(choices=[(1, 'Available'), (2, 'In Circulation'), (3, 'Temporarily Unavailable'), (4,", "null=True)), ('issue', models.PositiveIntegerField(blank=True, null=True)), ('remarks', models.TextField(blank=True, default='')), ('tags', models.CharField(blank=True, max_length=1024,", "default='')), ('tags', models.CharField(blank=True, max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('created_by',", "(4, 'Unavailable'), (5, 'Protected'), (6, 'Damaged')])), ('remarks', models.TextField(blank=True, default='')), ('created_on',", "from django.conf import settings from django.db import migrations, models import", "migrations.AddField( model_name='bookdetail', name='language', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language'), ), migrations.AddField( model_name='bookdetail', name='updated_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.BookDetail'), ), migrations.AddField( model_name='bookcopy', name='created_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField(", "('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='PeriodicalIssue', fields=[ ('id',", "to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookdetail', name='language', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language'), ), migrations.AddField( model_name='bookdetail',", "model_name='bookcopy', name='created_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookcopy', name='updated_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_copy_updated_by',", "name='Category', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=512)), ('slug',", "('tags', models.CharField(blank=True, max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ], ),", "('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='language_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Periodical', fields=[ ('id',", "), migrations.AddField( model_name='bookdetail', name='updated_by', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_detail_updated_by', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookcopy',", "serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1024)), ('author', models.CharField(default='Unknown', max_length=1024)), ('description', models.TextField(blank=True, default='')),", "], ), migrations.CreateModel( name='Periodical', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "), migrations.AddField( model_name='bookcopy', name='book_detail', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.BookDetail'), ), migrations.AddField( model_name='bookcopy', name='created_by',", "max_length=1024)), ('llcc', models.CharField(blank=True, default='', max_length=1024)), ('isbn', models.CharField(blank=True, default='', max_length=1024)), ('tags',", "default='', max_length=512)), ('tags', models.CharField(blank=True, max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)),", "to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Periodical', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel(", "'Unavailable'), (5, 'Protected'), (6, 'Damaged')])), ('remarks', models.TextField(blank=True, default='')), ('created_on', models.DateTimeField(auto_now_add=True)),", "migrations.CreateModel( name='Language', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=512)),", "models.CharField(max_length=1024)), ('description', models.TextField(blank=True, default='')), ('publisher', models.CharField(blank=True, default='', max_length=512)), ('tags', models.CharField(blank=True,", "settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('issue_status', models.IntegerField(choices=[(1, 'Available'), (2,", "django.conf import settings from django.db import migrations, models import django.db.models.deletion", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ('periodical', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Periodical')), ('updated_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_issue_updated_by', to=settings.AUTH_USER_MODEL)), ],", "(5, 'Protected'), (6, 'Damaged')])), ('remarks', models.TextField(blank=True, default='')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on',", "models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='periodical_issue_updated_by', to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='bookdetail', name='category', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Category'),", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=1024)), ('author', models.CharField(default='Unknown', max_length=1024)), ('description',", "field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='bookdetail', name='language', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Language'), ), migrations.AddField(", "default='')), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ], ), migrations.CreateModel( name='BookDetail', fields=[", "models.CharField(blank=True, max_length=1024, null=True)), ('created_on', models.DateTimeField(auto_now_add=True)), ('updated_on', models.DateTimeField(auto_now=True)), ], ), migrations.CreateModel(", "import unicode_literals from django.conf import settings from django.db import migrations,", "to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='PeriodicalIssue', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "verbose_name='ID')), ('issue_status', models.IntegerField(choices=[(1, 'Available'), (2, 'In Circulation'), (3, 'Temporarily Unavailable'),", "'Temporarily Unavailable'), (4, 'Unavailable'), (5, 'Protected'), (6, 'Damaged')])), ('published_on', models.DateField(blank=True,", "unicode_literals from django.conf import settings from django.db import migrations, models", "serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=512)), ('slug', models.SlugField(max_length=128, unique=True)), ('description', models.TextField(blank=True, default=''))," ]
[ "<filename>compliance_suite/exceptions/user_config_exception.py<gh_stars>1-10 # -*- coding: utf-8 -*- \"\"\"Module compliance_suite.exceptions.user_config_exception.py This module", "module contains class definition for user config file exceptions. \"\"\"", "# -*- coding: utf-8 -*- \"\"\"Module compliance_suite.exceptions.user_config_exception.py This module contains", "-*- \"\"\"Module compliance_suite.exceptions.user_config_exception.py This module contains class definition for user", "exceptions. \"\"\" class UserConfigException(Exception): \"\"\"Exception for user config file-related errors\"\"\"", "for user config file exceptions. \"\"\" class UserConfigException(Exception): \"\"\"Exception for", "contains class definition for user config file exceptions. \"\"\" class", "user config file exceptions. \"\"\" class UserConfigException(Exception): \"\"\"Exception for user", "config file exceptions. \"\"\" class UserConfigException(Exception): \"\"\"Exception for user config", "-*- coding: utf-8 -*- \"\"\"Module compliance_suite.exceptions.user_config_exception.py This module contains class", "utf-8 -*- \"\"\"Module compliance_suite.exceptions.user_config_exception.py This module contains class definition for", "file exceptions. \"\"\" class UserConfigException(Exception): \"\"\"Exception for user config file-related", "class definition for user config file exceptions. \"\"\" class UserConfigException(Exception):", "\"\"\"Module compliance_suite.exceptions.user_config_exception.py This module contains class definition for user config", "compliance_suite.exceptions.user_config_exception.py This module contains class definition for user config file", "This module contains class definition for user config file exceptions.", "definition for user config file exceptions. \"\"\" class UserConfigException(Exception): \"\"\"Exception", "coding: utf-8 -*- \"\"\"Module compliance_suite.exceptions.user_config_exception.py This module contains class definition", "\"\"\" class UserConfigException(Exception): \"\"\"Exception for user config file-related errors\"\"\" pass" ]
[ "self._ncols @property def nrows (self): \"\"\"The number of rows in", "RiskMap.load(filename) start = Point(0, 0) end = Point(rmap.ncols - 1,", "def ncols (self): \"\"\"The number of columns in this `RiskMap`.\"\"\"", "pos.x in range(0, self.ncols) def search (rmap, start, end): \"\"\"Searches", "position `pos`, i.e. `RiskMap[pos]`.\"\"\" if self._factor > 1: risk =", "def resize (self, factor): \"\"\"Resizes this `RiskMap` by setting its", "# A: Total Risk = 3016 rmap.resize(factor=5) end = Point(rmap.ncols", "['x', 'y']) Point.__add__ = lambda self, q: Point(self[0] + q[0],", "def __init__ (self): \"\"\"Creates a new (empty) risk-level map. Individual", "rmap = RiskMap.load(filename) start = Point(0, 0) end = Point(rmap.ncols", "for p in rmap.neighbors(start) ] visited = { start }", "(rmap[p], p) for p in rmap.neighbors(start) ] visited = {", "`RiskMap.load()` \"\"\" self._factor = 1 self._levels = [ ] self._nrows", "(self, row): \"\"\"Appends `row` to this `RiskMap`.\"\"\" if len(self._levels) ==", "for `pos`ition.\"\"\" deltas = (0, -1), (0, 1), (-1, 0),", "a new (empty) risk-level map. Individual risk-levels as specific positions", "self._nrows += 1 def neighbors (self, pos): \"\"\"Iterable 4-neighbors (up,", "RiskMap: def __init__ (self): \"\"\"Creates a new (empty) risk-level map.", "(https://adventofcode.com/2021/day/15) # Author: <NAME> import collections import heapq Point =", "horizontally and vertically. \"\"\" self._factor = factor def valid (self,", "[ ] self._nrows = 0 self._ncols = 0 def __getitem__", "to `factor` copies both horizontally and vertically. \"\"\" self._factor =", "self._factor = factor def valid (self, pos): \"\"\"Indicates whether or", "risk += pos.y // self._nrows risk += pos.x // self._ncols", "\"\"\"Searches `RiskMap` `rmap` (breadth-first) to find the least risky path", "= len(row) self._levels.append(row) self._nrows += 1 def neighbors (self, pos):", "columns in this `RiskMap`.\"\"\" return self._factor * self._ncols @property def", "] self._nrows = 0 self._ncols = 0 def __getitem__ (self,", "((rmap[pos] + risk), pos) ) visited.add(pos) return risk filename =", "0: self._ncols = len(row) self._levels.append(row) self._nrows += 1 def neighbors", "q: Point(self[0] + q[0], self[1] + q[1]) class RiskMap: def", "(up, down, left, right) for `pos`ition.\"\"\" deltas = (0, -1),", "> 9: risk = risk % 9 else: risk =", "whether or not `pos` is valid (inside this `RiskMap`).\"\"\" return", "for pos in rmap.neighbors(current): if pos not in visited: heapq.heappush(", "stream: for line in stream.readlines(): rmap.append([ int(c) for c in", "pos in rmap.neighbors(current): if pos not in visited: heapq.heappush( queue,", "search (rmap, start, end): \"\"\"Searches `RiskMap` `rmap` (breadth-first) to find", "heapq.heappush( queue, ((rmap[pos] + risk), pos) ) visited.add(pos) return risk", "of any path from the top left to the bottom", "while len(queue) > 0: risk, current = heapq.heappop(queue) if current", "import collections import heapq Point = collections.namedtuple('Point', ['x', 'y']) Point.__add__", "Part 1 # # Q: Lowest total risk of any", "= 0 def __getitem__ (self, pos): \"\"\"Returns the risk-level at", "Returns the total risk of that path. \"\"\" risk =", "p in rmap.neighbors(start) ] visited = { start } heapq.heapify(queue)", "of rows in this `RiskMap`.\"\"\" return self._factor * self._nrows def", "append (self, row): \"\"\"Appends `row` to this `RiskMap`.\"\"\" if len(self._levels)", "return rmap @property def ncols (self): \"\"\"The number of columns", "q[1]) class RiskMap: def __init__ (self): \"\"\"Creates a new (empty)", "in rmap.neighbors(current): if pos not in visited: heapq.heappush( queue, ((rmap[pos]", "p for p in adjacent if self.valid(p) ) def resize", "pos.y in range(0, self.nrows) and pos.x in range(0, self.ncols) def", "with open(filename) as stream: for line in stream.readlines(): rmap.append([ int(c)", "Point(*delta) for delta in deltas ) yield from ( p", "total risk of that path. \"\"\" risk = 0 queue", "self._factor = 1 self._levels = [ ] self._nrows = 0", "self.ncols) def search (rmap, start, end): \"\"\"Searches `RiskMap` `rmap` (breadth-first)", "int(c) for c in line.strip() ]) return rmap @property def", "\"\"\"Appends `row` to this `RiskMap`.\"\"\" if len(self._levels) == 0: self._ncols", "to `end`. Returns the total risk of that path. \"\"\"", "this `RiskMap`.\"\"\" return self._factor * self._ncols @property def nrows (self):", "self._levels.append(row) self._nrows += 1 def neighbors (self, pos): \"\"\"Iterable 4-neighbors", "Point(self[0] + q[0], self[1] + q[1]) class RiskMap: def __init__", "and pos.x in range(0, self.ncols) def search (rmap, start, end):", "+ q[1]) class RiskMap: def __init__ (self): \"\"\"Creates a new", "@property def ncols (self): \"\"\"The number of columns in this", "as specific positions are accessible via `RiskMap[Point]`. See also `RiskMap.load()`", "rmap.neighbors(start) ] visited = { start } heapq.heapify(queue) while len(queue)", ") yield from ( p for p in adjacent if", "end: break for pos in rmap.neighbors(current): if pos not in", "valid (self, pos): \"\"\"Indicates whether or not `pos` is valid", "copies both horizontally and vertically. \"\"\" self._factor = factor def", "from the top left to the bottom right? # A:", "= 755 print(f'Part 1: Total Risk = {search(rmap, start, end):4}')", "Total Risk = {search(rmap, start, end):4}') # Part 2 #", "in line.strip() ]) return rmap @property def ncols (self): \"\"\"The", "class RiskMap: def __init__ (self): \"\"\"Creates a new (empty) risk-level", "`end`. Returns the total risk of that path. \"\"\" risk", "{ start } heapq.heapify(queue) while len(queue) > 0: risk, current", "map. Individual risk-levels as specific positions are accessible via `RiskMap[Point]`.", "0 queue = [ (rmap[p], p) for p in rmap.neighbors(start)", "risky path from `start` to `end`. Returns the total risk", "`row` to this `RiskMap`.\"\"\" if len(self._levels) == 0: self._ncols =", "+ risk), pos) ) visited.add(pos) return risk filename = 'aoc-2021-d15.txt'", "= Point(0, 0) end = Point(rmap.ncols - 1, rmap.nrows -", "+= pos.y // self._nrows risk += pos.x // self._ncols if", "'y']) Point.__add__ = lambda self, q: Point(self[0] + q[0], self[1]", "self._levels[pos.y % self._nrows][pos.x % self._ncols] risk += pos.y // self._nrows", "// self._nrows risk += pos.x // self._ncols if risk >", "self._nrows def append (self, row): \"\"\"Appends `row` to this `RiskMap`.\"\"\"", "(filename): \"\"\"Creates a new risk-level map from `filename`.\"\"\" rmap =", "pos): \"\"\"Returns the risk-level at position `pos`, i.e. `RiskMap[pos]`.\"\"\" if", "] visited = { start } heapq.heapify(queue) while len(queue) >", "#!/usr/bin/env python3 # Advent of Code 2021, Day 15 (https://adventofcode.com/2021/day/15)", "Q: Lowest total risk of any path from the top", "row): \"\"\"Appends `row` to this `RiskMap`.\"\"\" if len(self._levels) == 0:", "# A: Total Risk = 755 print(f'Part 1: Total Risk", "9 else: risk = self._levels[pos.y][pos.x] return risk @staticmethod def load", "filename = 'aoc-2021-d15.txt' rmap = RiskMap.load(filename) start = Point(0, 0)", "right) for `pos`ition.\"\"\" deltas = (0, -1), (0, 1), (-1,", "return self._factor * self._nrows def append (self, row): \"\"\"Appends `row`", "for p in adjacent if self.valid(p) ) def resize (self,", "p) for p in rmap.neighbors(start) ] visited = { start", "deltas ) yield from ( p for p in adjacent", "`RiskMap` `rmap` (breadth-first) to find the least risky path from", "rmap.resize(factor=5) end = Point(rmap.ncols - 1, rmap.nrows - 1) print(f'Part", "`RiskMap`.\"\"\" return self._factor * self._ncols @property def nrows (self): \"\"\"The", "for delta in deltas ) yield from ( p for", "self.nrows) and pos.x in range(0, self.ncols) def search (rmap, start,", "collections.namedtuple('Point', ['x', 'y']) Point.__add__ = lambda self, q: Point(self[0] +", "queue, ((rmap[pos] + risk), pos) ) visited.add(pos) return risk filename", "the risk-level at position `pos`, i.e. `RiskMap[pos]`.\"\"\" if self._factor >", "Total Risk = 755 print(f'Part 1: Total Risk = {search(rmap,", "def valid (self, pos): \"\"\"Indicates whether or not `pos` is", "% self._ncols] risk += pos.y // self._nrows risk += pos.x", "collections import heapq Point = collections.namedtuple('Point', ['x', 'y']) Point.__add__ =", "q[0], self[1] + q[1]) class RiskMap: def __init__ (self): \"\"\"Creates", "visited = { start } heapq.heapify(queue) while len(queue) > 0:", "current = heapq.heappop(queue) if current == end: break for pos", "`pos`, i.e. `RiskMap[pos]`.\"\"\" if self._factor > 1: risk = self._levels[pos.y", "end = Point(rmap.ncols - 1, rmap.nrows - 1) # Part", "bottom right? # A: Total Risk = 755 print(f'Part 1:", "+ Point(*delta) for delta in deltas ) yield from (", "if pos not in visited: heapq.heappush( queue, ((rmap[pos] + risk),", "to the bottom right? # A: Total Risk = 3016", "rows in this `RiskMap`.\"\"\" return self._factor * self._nrows def append", "`rmap` (breadth-first) to find the least risky path from `start`", "pos.x // self._ncols if risk > 9: risk = risk", "print(f'Part 1: Total Risk = {search(rmap, start, end):4}') # Part", "for c in line.strip() ]) return rmap @property def ncols", "path from the top left to the bottom right? #", "at position `pos`, i.e. `RiskMap[pos]`.\"\"\" if self._factor > 1: risk", "= [ ] self._nrows = 0 self._ncols = 0 def", "Risk = 755 print(f'Part 1: Total Risk = {search(rmap, start,", "# Part 1 # # Q: Lowest total risk of", "+= pos.x // self._ncols if risk > 9: risk =", "1: Total Risk = {search(rmap, start, end):4}') # Part 2", "0 def __getitem__ (self, pos): \"\"\"Returns the risk-level at position", "neighbors (self, pos): \"\"\"Iterable 4-neighbors (up, down, left, right) for", "= 'aoc-2021-d15.txt' rmap = RiskMap.load(filename) start = Point(0, 0) end", "<NAME> import collections import heapq Point = collections.namedtuple('Point', ['x', 'y'])", "risk += pos.x // self._ncols if risk > 9: risk", "1 def neighbors (self, pos): \"\"\"Iterable 4-neighbors (up, down, left,", "[ (rmap[p], p) for p in rmap.neighbors(start) ] visited =", "Risk = 3016 rmap.resize(factor=5) end = Point(rmap.ncols - 1, rmap.nrows", "risk = risk % 9 else: risk = self._levels[pos.y][pos.x] return", "'aoc-2021-d15.txt' rmap = RiskMap.load(filename) start = Point(0, 0) end =", "that path. \"\"\" risk = 0 queue = [ (rmap[p],", "Point = collections.namedtuple('Point', ['x', 'y']) Point.__add__ = lambda self, q:", "(-1, 0), (1, 0) adjacent = ( pos + Point(*delta)", "`pos` is valid (inside this `RiskMap`).\"\"\" return pos.y in range(0,", "= Point(rmap.ncols - 1, rmap.nrows - 1) print(f'Part 2: Total", "(self, factor): \"\"\"Resizes this `RiskMap` by setting its expansion factor", "-1), (0, 1), (-1, 0), (1, 0) adjacent = (", "from `start` to `end`. Returns the total risk of that", "def search (rmap, start, end): \"\"\"Searches `RiskMap` `rmap` (breadth-first) to", "and vertically. \"\"\" self._factor = factor def valid (self, pos):", "a new risk-level map from `filename`.\"\"\" rmap = RiskMap() with", "(self, pos): \"\"\"Iterable 4-neighbors (up, down, left, right) for `pos`ition.\"\"\"", "in stream.readlines(): rmap.append([ int(c) for c in line.strip() ]) return", "@property def nrows (self): \"\"\"The number of rows in this", "rmap.nrows - 1) print(f'Part 2: Total Risk = {search(rmap, start,", "(self, pos): \"\"\"Indicates whether or not `pos` is valid (inside", "self._nrows = 0 self._ncols = 0 def __getitem__ (self, pos):", "= RiskMap() with open(filename) as stream: for line in stream.readlines():", "risk-level map from `filename`.\"\"\" rmap = RiskMap() with open(filename) as", "rmap = RiskMap() with open(filename) as stream: for line in", "def nrows (self): \"\"\"The number of rows in this `RiskMap`.\"\"\"", "`RiskMap`.\"\"\" if len(self._levels) == 0: self._ncols = len(row) self._levels.append(row) self._nrows", "import heapq Point = collections.namedtuple('Point', ['x', 'y']) Point.__add__ = lambda", "by setting its expansion factor to `factor` copies both horizontally", "vertically. \"\"\" self._factor = factor def valid (self, pos): \"\"\"Indicates", "right? # A: Total Risk = 755 print(f'Part 1: Total", "self, q: Point(self[0] + q[0], self[1] + q[1]) class RiskMap:", "\"\"\"Returns the risk-level at position `pos`, i.e. `RiskMap[pos]`.\"\"\" if self._factor", "1 self._levels = [ ] self._nrows = 0 self._ncols =", "in adjacent if self.valid(p) ) def resize (self, factor): \"\"\"Resizes", "Point(rmap.ncols - 1, rmap.nrows - 1) print(f'Part 2: Total Risk", "path. \"\"\" risk = 0 queue = [ (rmap[p], p)", "(self, pos): \"\"\"Returns the risk-level at position `pos`, i.e. `RiskMap[pos]`.\"\"\"", "rmap.append([ int(c) for c in line.strip() ]) return rmap @property", "heapq.heappop(queue) if current == end: break for pos in rmap.neighbors(current):", "Total Risk = 3016 rmap.resize(factor=5) end = Point(rmap.ncols - 1,", "adjacent = ( pos + Point(*delta) for delta in deltas", "rmap.neighbors(current): if pos not in visited: heapq.heappush( queue, ((rmap[pos] +", "- 1) print(f'Part 2: Total Risk = {search(rmap, start, end)}')", "\"\"\" risk = 0 queue = [ (rmap[p], p) for", "ncols (self): \"\"\"The number of columns in this `RiskMap`.\"\"\" return", "the bottom right? # A: Total Risk = 3016 rmap.resize(factor=5)", "= 0 queue = [ (rmap[p], p) for p in", "to the bottom right? # A: Total Risk = 755", "end):4}') # Part 2 # # Q: Lowest total risk", "# Q: Lowest total risk of any path from the", "= [ (rmap[p], p) for p in rmap.neighbors(start) ] visited", "Point(0, 0) end = Point(rmap.ncols - 1, rmap.nrows - 1)", "0) end = Point(rmap.ncols - 1, rmap.nrows - 1) #", "resize (self, factor): \"\"\"Resizes this `RiskMap` by setting its expansion", "return self._factor * self._ncols @property def nrows (self): \"\"\"The number", "this `RiskMap`.\"\"\" if len(self._levels) == 0: self._ncols = len(row) self._levels.append(row)", "break for pos in rmap.neighbors(current): if pos not in visited:", "pos) ) visited.add(pos) return risk filename = 'aoc-2021-d15.txt' rmap =", "as stream: for line in stream.readlines(): rmap.append([ int(c) for c", "0 self._ncols = 0 def __getitem__ (self, pos): \"\"\"Returns the", "15 (https://adventofcode.com/2021/day/15) # Author: <NAME> import collections import heapq Point", "= { start } heapq.heapify(queue) while len(queue) > 0: risk,", "in range(0, self.nrows) and pos.x in range(0, self.ncols) def search", "start, end): \"\"\"Searches `RiskMap` `rmap` (breadth-first) to find the least", "\"\"\" self._factor = factor def valid (self, pos): \"\"\"Indicates whether", "risk @staticmethod def load (filename): \"\"\"Creates a new risk-level map", "risk = self._levels[pos.y % self._nrows][pos.x % self._ncols] risk += pos.y", "# Part 2 # # Q: Lowest total risk of", "risk filename = 'aoc-2021-d15.txt' rmap = RiskMap.load(filename) start = Point(0,", "in deltas ) yield from ( p for p in", "self._nrows risk += pos.x // self._ncols if risk > 9:", "risk), pos) ) visited.add(pos) return risk filename = 'aoc-2021-d15.txt' rmap", "yield from ( p for p in adjacent if self.valid(p)", "# Author: <NAME> import collections import heapq Point = collections.namedtuple('Point',", "(inside this `RiskMap`).\"\"\" return pos.y in range(0, self.nrows) and pos.x", "]) return rmap @property def ncols (self): \"\"\"The number of", "of Code 2021, Day 15 (https://adventofcode.com/2021/day/15) # Author: <NAME> import", "right? # A: Total Risk = 3016 rmap.resize(factor=5) end =", "queue = [ (rmap[p], p) for p in rmap.neighbors(start) ]", "delta in deltas ) yield from ( p for p", "`RiskMap[Point]`. See also `RiskMap.load()` \"\"\" self._factor = 1 self._levels =", "A: Total Risk = 3016 rmap.resize(factor=5) end = Point(rmap.ncols -", "1, rmap.nrows - 1) # Part 1 # # Q:", "pos): \"\"\"Indicates whether or not `pos` is valid (inside this", "top left to the bottom right? # A: Total Risk", "= self._levels[pos.y % self._nrows][pos.x % self._ncols] risk += pos.y //", "self._nrows][pos.x % self._ncols] risk += pos.y // self._nrows risk +=", "left, right) for `pos`ition.\"\"\" deltas = (0, -1), (0, 1),", "+ q[0], self[1] + q[1]) class RiskMap: def __init__ (self):", "{search(rmap, start, end):4}') # Part 2 # # Q: Lowest", "risk % 9 else: risk = self._levels[pos.y][pos.x] return risk @staticmethod", "from `filename`.\"\"\" rmap = RiskMap() with open(filename) as stream: for", "- 1, rmap.nrows - 1) print(f'Part 2: Total Risk =", "new (empty) risk-level map. Individual risk-levels as specific positions are", "} heapq.heapify(queue) while len(queue) > 0: risk, current = heapq.heappop(queue)", "1 # # Q: Lowest total risk of any path", "least risky path from `start` to `end`. Returns the total", "Author: <NAME> import collections import heapq Point = collections.namedtuple('Point', ['x',", "in this `RiskMap`.\"\"\" return self._factor * self._nrows def append (self,", "bottom right? # A: Total Risk = 3016 rmap.resize(factor=5) end", "= {search(rmap, start, end):4}') # Part 2 # # Q:", "* self._ncols @property def nrows (self): \"\"\"The number of rows", "\"\"\"Indicates whether or not `pos` is valid (inside this `RiskMap`).\"\"\"", "in visited: heapq.heappush( queue, ((rmap[pos] + risk), pos) ) visited.add(pos)", "= 1 self._levels = [ ] self._nrows = 0 self._ncols", "(self): \"\"\"Creates a new (empty) risk-level map. Individual risk-levels as", "- 1, rmap.nrows - 1) # Part 1 # #", "return pos.y in range(0, self.nrows) and pos.x in range(0, self.ncols)", "to this `RiskMap`.\"\"\" if len(self._levels) == 0: self._ncols = len(row)", "def __getitem__ (self, pos): \"\"\"Returns the risk-level at position `pos`,", "def load (filename): \"\"\"Creates a new risk-level map from `filename`.\"\"\"", "= heapq.heappop(queue) if current == end: break for pos in", "is valid (inside this `RiskMap`).\"\"\" return pos.y in range(0, self.nrows)", "factor def valid (self, pos): \"\"\"Indicates whether or not `pos`", "not in visited: heapq.heappush( queue, ((rmap[pos] + risk), pos) )", "`RiskMap[pos]`.\"\"\" if self._factor > 1: risk = self._levels[pos.y % self._nrows][pos.x", "risk of any path from the top left to the", "range(0, self.ncols) def search (rmap, start, end): \"\"\"Searches `RiskMap` `rmap`", "risk = 0 queue = [ (rmap[p], p) for p", "number of rows in this `RiskMap`.\"\"\" return self._factor * self._nrows", "1) # Part 1 # # Q: Lowest total risk", "`filename`.\"\"\" rmap = RiskMap() with open(filename) as stream: for line", "accessible via `RiskMap[Point]`. See also `RiskMap.load()` \"\"\" self._factor = 1", "this `RiskMap`.\"\"\" return self._factor * self._nrows def append (self, row):", "risk-level map. Individual risk-levels as specific positions are accessible via", "pos + Point(*delta) for delta in deltas ) yield from", "if current == end: break for pos in rmap.neighbors(current): if", "== end: break for pos in rmap.neighbors(current): if pos not", "+= 1 def neighbors (self, pos): \"\"\"Iterable 4-neighbors (up, down,", "( pos + Point(*delta) for delta in deltas ) yield", "self._ncols if risk > 9: risk = risk % 9", "pos.y // self._nrows risk += pos.x // self._ncols if risk", "factor): \"\"\"Resizes this `RiskMap` by setting its expansion factor to", "start = Point(0, 0) end = Point(rmap.ncols - 1, rmap.nrows", "= lambda self, q: Point(self[0] + q[0], self[1] + q[1])", "map from `filename`.\"\"\" rmap = RiskMap() with open(filename) as stream:", "`factor` copies both horizontally and vertically. \"\"\" self._factor = factor", "* self._nrows def append (self, row): \"\"\"Appends `row` to this", "total risk of any path from the top left to", "also `RiskMap.load()` \"\"\" self._factor = 1 self._levels = [ ]", "c in line.strip() ]) return rmap @property def ncols (self):", "# Advent of Code 2021, Day 15 (https://adventofcode.com/2021/day/15) # Author:", "\"\"\"Creates a new (empty) risk-level map. Individual risk-levels as specific", "path from `start` to `end`. Returns the total risk of", "\"\"\"The number of rows in this `RiskMap`.\"\"\" return self._factor *", "self._factor * self._nrows def append (self, row): \"\"\"Appends `row` to", "2 # # Q: Lowest total risk of any path", "<reponame>bbornstein/aoc #!/usr/bin/env python3 # Advent of Code 2021, Day 15", "i.e. `RiskMap[pos]`.\"\"\" if self._factor > 1: risk = self._levels[pos.y %", "Part 2 # # Q: Lowest total risk of any", "in range(0, self.ncols) def search (rmap, start, end): \"\"\"Searches `RiskMap`", "for line in stream.readlines(): rmap.append([ int(c) for c in line.strip()", "# # Q: Lowest total risk of any path from", "its expansion factor to `factor` copies both horizontally and vertically.", "left to the bottom right? # A: Total Risk =", "both horizontally and vertically. \"\"\" self._factor = factor def valid", "if risk > 9: risk = risk % 9 else:", "rmap.nrows - 1) # Part 1 # # Q: Lowest", "= factor def valid (self, pos): \"\"\"Indicates whether or not", "@staticmethod def load (filename): \"\"\"Creates a new risk-level map from", "python3 # Advent of Code 2021, Day 15 (https://adventofcode.com/2021/day/15) #", "Code 2021, Day 15 (https://adventofcode.com/2021/day/15) # Author: <NAME> import collections", "- 1) # Part 1 # # Q: Lowest total", "new risk-level map from `filename`.\"\"\" rmap = RiskMap() with open(filename)", "self._levels = [ ] self._nrows = 0 self._ncols = 0", "\"\"\"Iterable 4-neighbors (up, down, left, right) for `pos`ition.\"\"\" deltas =", "def append (self, row): \"\"\"Appends `row` to this `RiskMap`.\"\"\" if", "in rmap.neighbors(start) ] visited = { start } heapq.heapify(queue) while", "= ( pos + Point(*delta) for delta in deltas )", "`pos`ition.\"\"\" deltas = (0, -1), (0, 1), (-1, 0), (1,", "of columns in this `RiskMap`.\"\"\" return self._factor * self._ncols @property", "visited.add(pos) return risk filename = 'aoc-2021-d15.txt' rmap = RiskMap.load(filename) start", "self._ncols] risk += pos.y // self._nrows risk += pos.x //", "the least risky path from `start` to `end`. Returns the", "self.valid(p) ) def resize (self, factor): \"\"\"Resizes this `RiskMap` by", "(rmap, start, end): \"\"\"Searches `RiskMap` `rmap` (breadth-first) to find the", "( p for p in adjacent if self.valid(p) ) def", "RiskMap() with open(filename) as stream: for line in stream.readlines(): rmap.append([", "rmap @property def ncols (self): \"\"\"The number of columns in", "else: risk = self._levels[pos.y][pos.x] return risk @staticmethod def load (filename):", "via `RiskMap[Point]`. See also `RiskMap.load()` \"\"\" self._factor = 1 self._levels", "(self): \"\"\"The number of columns in this `RiskMap`.\"\"\" return self._factor", "adjacent if self.valid(p) ) def resize (self, factor): \"\"\"Resizes this", "risk of that path. \"\"\" risk = 0 queue =", "len(queue) > 0: risk, current = heapq.heappop(queue) if current ==", "1), (-1, 0), (1, 0) adjacent = ( pos +", "Individual risk-levels as specific positions are accessible via `RiskMap[Point]`. See", "9: risk = risk % 9 else: risk = self._levels[pos.y][pos.x]", "pos not in visited: heapq.heappush( queue, ((rmap[pos] + risk), pos)", "% 9 else: risk = self._levels[pos.y][pos.x] return risk @staticmethod def", "if self.valid(p) ) def resize (self, factor): \"\"\"Resizes this `RiskMap`", "the bottom right? # A: Total Risk = 755 print(f'Part", "this `RiskMap`).\"\"\" return pos.y in range(0, self.nrows) and pos.x in", "== 0: self._ncols = len(row) self._levels.append(row) self._nrows += 1 def", "down, left, right) for `pos`ition.\"\"\" deltas = (0, -1), (0,", "(empty) risk-level map. Individual risk-levels as specific positions are accessible", "len(self._levels) == 0: self._ncols = len(row) self._levels.append(row) self._nrows += 1", "end): \"\"\"Searches `RiskMap` `rmap` (breadth-first) to find the least risky", "0) adjacent = ( pos + Point(*delta) for delta in", "\"\"\" self._factor = 1 self._levels = [ ] self._nrows =", "not `pos` is valid (inside this `RiskMap`).\"\"\" return pos.y in", "heapq Point = collections.namedtuple('Point', ['x', 'y']) Point.__add__ = lambda self,", "deltas = (0, -1), (0, 1), (-1, 0), (1, 0)", "Risk = {search(rmap, start, end):4}') # Part 2 # #", "risk > 9: risk = risk % 9 else: risk", "start } heapq.heapify(queue) while len(queue) > 0: risk, current =", ") visited.add(pos) return risk filename = 'aoc-2021-d15.txt' rmap = RiskMap.load(filename)", "risk = self._levels[pos.y][pos.x] return risk @staticmethod def load (filename): \"\"\"Creates", "`RiskMap`).\"\"\" return pos.y in range(0, self.nrows) and pos.x in range(0,", "Point.__add__ = lambda self, q: Point(self[0] + q[0], self[1] +", "in this `RiskMap`.\"\"\" return self._factor * self._ncols @property def nrows", "self._levels[pos.y][pos.x] return risk @staticmethod def load (filename): \"\"\"Creates a new", "= RiskMap.load(filename) start = Point(0, 0) end = Point(rmap.ncols -", "See also `RiskMap.load()` \"\"\" self._factor = 1 self._levels = [", "(1, 0) adjacent = ( pos + Point(*delta) for delta", "p in adjacent if self.valid(p) ) def resize (self, factor):", "2021, Day 15 (https://adventofcode.com/2021/day/15) # Author: <NAME> import collections import", "number of columns in this `RiskMap`.\"\"\" return self._factor * self._ncols", "= 3016 rmap.resize(factor=5) end = Point(rmap.ncols - 1, rmap.nrows -", "> 0: risk, current = heapq.heappop(queue) if current == end:", "if self._factor > 1: risk = self._levels[pos.y % self._nrows][pos.x %", "return risk @staticmethod def load (filename): \"\"\"Creates a new risk-level", "line.strip() ]) return rmap @property def ncols (self): \"\"\"The number", "valid (inside this `RiskMap`).\"\"\" return pos.y in range(0, self.nrows) and", "(0, -1), (0, 1), (-1, 0), (1, 0) adjacent =", "specific positions are accessible via `RiskMap[Point]`. See also `RiskMap.load()` \"\"\"", "setting its expansion factor to `factor` copies both horizontally and", "= (0, -1), (0, 1), (-1, 0), (1, 0) adjacent", "start, end):4}') # Part 2 # # Q: Lowest total", "lambda self, q: Point(self[0] + q[0], self[1] + q[1]) class", "__getitem__ (self, pos): \"\"\"Returns the risk-level at position `pos`, i.e.", "risk-levels as specific positions are accessible via `RiskMap[Point]`. See also", "755 print(f'Part 1: Total Risk = {search(rmap, start, end):4}') #", "nrows (self): \"\"\"The number of rows in this `RiskMap`.\"\"\" return", "(self): \"\"\"The number of rows in this `RiskMap`.\"\"\" return self._factor", "risk-level at position `pos`, i.e. `RiskMap[pos]`.\"\"\" if self._factor > 1:", "Advent of Code 2021, Day 15 (https://adventofcode.com/2021/day/15) # Author: <NAME>", "line in stream.readlines(): rmap.append([ int(c) for c in line.strip() ])", "1: risk = self._levels[pos.y % self._nrows][pos.x % self._ncols] risk +=", "// self._ncols if risk > 9: risk = risk %", "\"\"\"Resizes this `RiskMap` by setting its expansion factor to `factor`", "factor to `factor` copies both horizontally and vertically. \"\"\" self._factor", "range(0, self.nrows) and pos.x in range(0, self.ncols) def search (rmap,", "heapq.heapify(queue) while len(queue) > 0: risk, current = heapq.heappop(queue) if", "% self._nrows][pos.x % self._ncols] risk += pos.y // self._nrows risk", "of that path. \"\"\" risk = 0 queue = [", "expansion factor to `factor` copies both horizontally and vertically. \"\"\"", "this `RiskMap` by setting its expansion factor to `factor` copies", "A: Total Risk = 755 print(f'Part 1: Total Risk =", "= collections.namedtuple('Point', ['x', 'y']) Point.__add__ = lambda self, q: Point(self[0]", "self._factor * self._ncols @property def nrows (self): \"\"\"The number of", "end = Point(rmap.ncols - 1, rmap.nrows - 1) print(f'Part 2:", "def neighbors (self, pos): \"\"\"Iterable 4-neighbors (up, down, left, right)", "current == end: break for pos in rmap.neighbors(current): if pos", "the total risk of that path. \"\"\" risk = 0", "from ( p for p in adjacent if self.valid(p) )", ") def resize (self, factor): \"\"\"Resizes this `RiskMap` by setting", "Point(rmap.ncols - 1, rmap.nrows - 1) # Part 1 #", "self._ncols = 0 def __getitem__ (self, pos): \"\"\"Returns the risk-level", "pos): \"\"\"Iterable 4-neighbors (up, down, left, right) for `pos`ition.\"\"\" deltas", "open(filename) as stream: for line in stream.readlines(): rmap.append([ int(c) for", "\"\"\"Creates a new risk-level map from `filename`.\"\"\" rmap = RiskMap()", "return risk filename = 'aoc-2021-d15.txt' rmap = RiskMap.load(filename) start =", "to find the least risky path from `start` to `end`.", "find the least risky path from `start` to `end`. Returns", "visited: heapq.heappush( queue, ((rmap[pos] + risk), pos) ) visited.add(pos) return", "(0, 1), (-1, 0), (1, 0) adjacent = ( pos", "Lowest total risk of any path from the top left", "stream.readlines(): rmap.append([ int(c) for c in line.strip() ]) return rmap", "\"\"\"The number of columns in this `RiskMap`.\"\"\" return self._factor *", "load (filename): \"\"\"Creates a new risk-level map from `filename`.\"\"\" rmap", "= 0 self._ncols = 0 def __getitem__ (self, pos): \"\"\"Returns", "4-neighbors (up, down, left, right) for `pos`ition.\"\"\" deltas = (0,", "3016 rmap.resize(factor=5) end = Point(rmap.ncols - 1, rmap.nrows - 1)", "are accessible via `RiskMap[Point]`. See also `RiskMap.load()` \"\"\" self._factor =", "= Point(rmap.ncols - 1, rmap.nrows - 1) # Part 1", "Day 15 (https://adventofcode.com/2021/day/15) # Author: <NAME> import collections import heapq", "0: risk, current = heapq.heappop(queue) if current == end: break", "0), (1, 0) adjacent = ( pos + Point(*delta) for", "= risk % 9 else: risk = self._levels[pos.y][pos.x] return risk", "any path from the top left to the bottom right?", "1, rmap.nrows - 1) print(f'Part 2: Total Risk = {search(rmap,", "`start` to `end`. Returns the total risk of that path.", "positions are accessible via `RiskMap[Point]`. See also `RiskMap.load()` \"\"\" self._factor", "= self._levels[pos.y][pos.x] return risk @staticmethod def load (filename): \"\"\"Creates a", "len(row) self._levels.append(row) self._nrows += 1 def neighbors (self, pos): \"\"\"Iterable", "`RiskMap`.\"\"\" return self._factor * self._nrows def append (self, row): \"\"\"Appends", "the top left to the bottom right? # A: Total", "> 1: risk = self._levels[pos.y % self._nrows][pos.x % self._ncols] risk", "__init__ (self): \"\"\"Creates a new (empty) risk-level map. Individual risk-levels", "self._ncols = len(row) self._levels.append(row) self._nrows += 1 def neighbors (self,", "if len(self._levels) == 0: self._ncols = len(row) self._levels.append(row) self._nrows +=", "self._factor > 1: risk = self._levels[pos.y % self._nrows][pos.x % self._ncols]", "or not `pos` is valid (inside this `RiskMap`).\"\"\" return pos.y", "self[1] + q[1]) class RiskMap: def __init__ (self): \"\"\"Creates a", "(breadth-first) to find the least risky path from `start` to", "`RiskMap` by setting its expansion factor to `factor` copies both", "risk, current = heapq.heappop(queue) if current == end: break for" ]
[ "modify it under the terms of the MIT License; see", "it and/or # modify it under the terms of the", "software; you can redistribute it and/or # modify it under", "# modify it under the terms of the MIT License;", "free software; you can redistribute it and/or # modify it", "terms of the MIT License; see the # LICENSE file", "import redirect from indico.modules.events.abstracts.models.abstracts import Abstract from indico.web.flask.util import url_for", "from indico.web.flask.util import url_for from indico.web.rh import RHSimple @RHSimple.wrap_function def", "you can redistribute it and/or # modify it under the", "can redistribute it and/or # modify it under the terms", "redistribute it and/or # modify it under the terms of", "indico.web.flask.util import url_for from indico.web.rh import RHSimple @RHSimple.wrap_function def compat_abstract(endpoint,", "@RHSimple.wrap_function def compat_abstract(endpoint, confId, friendly_id, track_id=None, management=False): abstract = Abstract.find(event_id=confId,", "import RHSimple @RHSimple.wrap_function def compat_abstract(endpoint, confId, friendly_id, track_id=None, management=False): abstract", "This file is part of Indico. # Copyright (C) 2002", "it under the terms of the MIT License; see the", "is free software; you can redistribute it and/or # modify", "for more details. from flask import redirect from indico.modules.events.abstracts.models.abstracts import", "CERN # # Indico is free software; you can redistribute", "see the # LICENSE file for more details. from flask", "(C) 2002 - 2020 CERN # # Indico is free", "abstract = Abstract.find(event_id=confId, friendly_id=friendly_id).first_or_404() return redirect(url_for('abstracts.' + endpoint, abstract, management=management))", "# This file is part of Indico. # Copyright (C)", "import Abstract from indico.web.flask.util import url_for from indico.web.rh import RHSimple", "# LICENSE file for more details. from flask import redirect", "compat_abstract(endpoint, confId, friendly_id, track_id=None, management=False): abstract = Abstract.find(event_id=confId, friendly_id=friendly_id).first_or_404() return", "more details. from flask import redirect from indico.modules.events.abstracts.models.abstracts import Abstract", "confId, friendly_id, track_id=None, management=False): abstract = Abstract.find(event_id=confId, friendly_id=friendly_id).first_or_404() return redirect(url_for('abstracts.'", "2002 - 2020 CERN # # Indico is free software;", "of Indico. # Copyright (C) 2002 - 2020 CERN #", "file for more details. from flask import redirect from indico.modules.events.abstracts.models.abstracts", "details. from flask import redirect from indico.modules.events.abstracts.models.abstracts import Abstract from", "import url_for from indico.web.rh import RHSimple @RHSimple.wrap_function def compat_abstract(endpoint, confId,", "MIT License; see the # LICENSE file for more details.", "the # LICENSE file for more details. from flask import", "RHSimple @RHSimple.wrap_function def compat_abstract(endpoint, confId, friendly_id, track_id=None, management=False): abstract =", "track_id=None, management=False): abstract = Abstract.find(event_id=confId, friendly_id=friendly_id).first_or_404() return redirect(url_for('abstracts.' + endpoint,", "- 2020 CERN # # Indico is free software; you", "Abstract from indico.web.flask.util import url_for from indico.web.rh import RHSimple @RHSimple.wrap_function", "Indico. # Copyright (C) 2002 - 2020 CERN # #", "the MIT License; see the # LICENSE file for more", "from flask import redirect from indico.modules.events.abstracts.models.abstracts import Abstract from indico.web.flask.util", "License; see the # LICENSE file for more details. from", "is part of Indico. # Copyright (C) 2002 - 2020", "from indico.modules.events.abstracts.models.abstracts import Abstract from indico.web.flask.util import url_for from indico.web.rh", "under the terms of the MIT License; see the #", "Indico is free software; you can redistribute it and/or #", "from indico.web.rh import RHSimple @RHSimple.wrap_function def compat_abstract(endpoint, confId, friendly_id, track_id=None,", "indico.web.rh import RHSimple @RHSimple.wrap_function def compat_abstract(endpoint, confId, friendly_id, track_id=None, management=False):", "def compat_abstract(endpoint, confId, friendly_id, track_id=None, management=False): abstract = Abstract.find(event_id=confId, friendly_id=friendly_id).first_or_404()", "redirect from indico.modules.events.abstracts.models.abstracts import Abstract from indico.web.flask.util import url_for from", "LICENSE file for more details. from flask import redirect from", "and/or # modify it under the terms of the MIT", "# # Indico is free software; you can redistribute it", "2020 CERN # # Indico is free software; you can", "file is part of Indico. # Copyright (C) 2002 -", "of the MIT License; see the # LICENSE file for", "# Copyright (C) 2002 - 2020 CERN # # Indico", "# Indico is free software; you can redistribute it and/or", "management=False): abstract = Abstract.find(event_id=confId, friendly_id=friendly_id).first_or_404() return redirect(url_for('abstracts.' + endpoint, abstract,", "Copyright (C) 2002 - 2020 CERN # # Indico is", "url_for from indico.web.rh import RHSimple @RHSimple.wrap_function def compat_abstract(endpoint, confId, friendly_id,", "part of Indico. # Copyright (C) 2002 - 2020 CERN", "indico.modules.events.abstracts.models.abstracts import Abstract from indico.web.flask.util import url_for from indico.web.rh import", "flask import redirect from indico.modules.events.abstracts.models.abstracts import Abstract from indico.web.flask.util import", "friendly_id, track_id=None, management=False): abstract = Abstract.find(event_id=confId, friendly_id=friendly_id).first_or_404() return redirect(url_for('abstracts.' +", "the terms of the MIT License; see the # LICENSE" ]
[ "64 encoded value of the primary read-write key. :vartype primary_master_key:", "str :ivar secondary_master_key: Base 64 encoded value of the secondary", "key. :vartype secondary_readonly_master_key: str :ivar primary_master_key: Base 64 encoded value", "key. :vartype secondary_master_key: str \"\"\" _validation = { 'primary_readonly_master_key': {'readonly':", "'primary_readonly_master_key': {'key': 'primaryReadonlyMasterKey', 'type': 'str'}, 'secondary_readonly_master_key': {'key': 'secondaryReadonlyMasterKey', 'type': 'str'},", "the MIT License. See License.txt in the project root for", "the secondary read-write key. :vartype secondary_master_key: str \"\"\" _validation =", "access keys for the given database account. Variables are only", "{'readonly': True}, 'primary_master_key': {'readonly': True}, 'secondary_master_key': {'readonly': True}, } _attribute_map", "the project root for # license information. # # Code", "the primary read-write key. :vartype primary_master_key: str :ivar secondary_master_key: Base", ":ivar primary_readonly_master_key: Base 64 encoded value of the primary read-only", "__init__(self, **kwargs) -> None: super(DatabaseAccountListKeysResult, self).__init__(**kwargs) self.primary_master_key = None self.secondary_master_key", "the secondary read-only key. :vartype secondary_readonly_master_key: str :ivar primary_master_key: Base", "only populated by the server, and will be ignored when", "be ignored when sending a request. :ivar primary_readonly_master_key: Base 64", "request. :ivar primary_readonly_master_key: Base 64 encoded value of the primary", "code is # regenerated. # -------------------------------------------------------------------------- from .database_account_list_read_only_keys_result_py3 import DatabaseAccountListReadOnlyKeysResult", "Generator. # Changes may cause incorrect behavior and will be", ".database_account_list_read_only_keys_result_py3 import DatabaseAccountListReadOnlyKeysResult class DatabaseAccountListKeysResult(DatabaseAccountListReadOnlyKeysResult): \"\"\"The access keys for the", "'secondaryMasterKey', 'type': 'str'}, } def __init__(self, **kwargs) -> None: super(DatabaseAccountListKeysResult,", "True}, 'secondary_readonly_master_key': {'readonly': True}, 'primary_master_key': {'readonly': True}, 'secondary_master_key': {'readonly': True},", "-------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. #", "license information. # # Code generated by Microsoft (R) AutoRest", "Variables are only populated by the server, and will be", "by Microsoft (R) AutoRest Code Generator. # Changes may cause", "encoded value of the secondary read-only key. :vartype secondary_readonly_master_key: str", "Changes may cause incorrect behavior and will be lost if", "{ 'primary_readonly_master_key': {'key': 'primaryReadonlyMasterKey', 'type': 'str'}, 'secondary_readonly_master_key': {'key': 'secondaryReadonlyMasterKey', 'type':", "= { 'primary_readonly_master_key': {'readonly': True}, 'secondary_readonly_master_key': {'readonly': True}, 'primary_master_key': {'readonly':", "behavior and will be lost if the code is #", "'str'}, 'secondary_master_key': {'key': 'secondaryMasterKey', 'type': 'str'}, } def __init__(self, **kwargs)", "{'key': 'primaryReadonlyMasterKey', 'type': 'str'}, 'secondary_readonly_master_key': {'key': 'secondaryReadonlyMasterKey', 'type': 'str'}, 'primary_master_key':", "incorrect behavior and will be lost if the code is", "'secondary_master_key': {'readonly': True}, } _attribute_map = { 'primary_readonly_master_key': {'key': 'primaryReadonlyMasterKey',", ":vartype primary_readonly_master_key: str :ivar secondary_readonly_master_key: Base 64 encoded value of", "when sending a request. :ivar primary_readonly_master_key: Base 64 encoded value", "'primary_readonly_master_key': {'readonly': True}, 'secondary_readonly_master_key': {'readonly': True}, 'primary_master_key': {'readonly': True}, 'secondary_master_key':", "value of the primary read-only key. :vartype primary_readonly_master_key: str :ivar", "MIT License. See License.txt in the project root for #", "AutoRest Code Generator. # Changes may cause incorrect behavior and", "secondary_readonly_master_key: Base 64 encoded value of the secondary read-only key.", "server, and will be ignored when sending a request. :ivar", "encoded value of the primary read-only key. :vartype primary_readonly_master_key: str", "'primary_master_key': {'key': 'primaryMasterKey', 'type': 'str'}, 'secondary_master_key': {'key': 'secondaryMasterKey', 'type': 'str'},", "may cause incorrect behavior and will be lost if the", "'str'}, 'primary_master_key': {'key': 'primaryMasterKey', 'type': 'str'}, 'secondary_master_key': {'key': 'secondaryMasterKey', 'type':", "of the secondary read-only key. :vartype secondary_readonly_master_key: str :ivar primary_master_key:", "'primaryMasterKey', 'type': 'str'}, 'secondary_master_key': {'key': 'secondaryMasterKey', 'type': 'str'}, } def", "{'key': 'primaryMasterKey', 'type': 'str'}, 'secondary_master_key': {'key': 'secondaryMasterKey', 'type': 'str'}, }", "project root for # license information. # # Code generated", "'type': 'str'}, 'secondary_master_key': {'key': 'secondaryMasterKey', 'type': 'str'}, } def __init__(self,", "See License.txt in the project root for # license information.", "**kwargs) -> None: super(DatabaseAccountListKeysResult, self).__init__(**kwargs) self.primary_master_key = None self.secondary_master_key =", "{ 'primary_readonly_master_key': {'readonly': True}, 'secondary_readonly_master_key': {'readonly': True}, 'primary_master_key': {'readonly': True},", "# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed", "Base 64 encoded value of the secondary read-only key. :vartype", "populated by the server, and will be ignored when sending", "primary read-write key. :vartype primary_master_key: str :ivar secondary_master_key: Base 64", "generated by Microsoft (R) AutoRest Code Generator. # Changes may", "Base 64 encoded value of the primary read-only key. :vartype", "64 encoded value of the secondary read-write key. :vartype secondary_master_key:", "the primary read-only key. :vartype primary_readonly_master_key: str :ivar secondary_readonly_master_key: Base", "in the project root for # license information. # #", "secondary read-write key. :vartype secondary_master_key: str \"\"\" _validation = {", "secondary_master_key: Base 64 encoded value of the secondary read-write key.", "reserved. # Licensed under the MIT License. See License.txt in", "ignored when sending a request. :ivar primary_readonly_master_key: Base 64 encoded", ":ivar secondary_readonly_master_key: Base 64 encoded value of the secondary read-only", "are only populated by the server, and will be ignored", "the given database account. Variables are only populated by the", "read-write key. :vartype secondary_master_key: str \"\"\" _validation = { 'primary_readonly_master_key':", "'secondaryReadonlyMasterKey', 'type': 'str'}, 'primary_master_key': {'key': 'primaryMasterKey', 'type': 'str'}, 'secondary_master_key': {'key':", "# # Code generated by Microsoft (R) AutoRest Code Generator.", "64 encoded value of the secondary read-only key. :vartype secondary_readonly_master_key:", "'type': 'str'}, 'primary_master_key': {'key': 'primaryMasterKey', 'type': 'str'}, 'secondary_master_key': {'key': 'secondaryMasterKey',", "'type': 'str'}, 'secondary_readonly_master_key': {'key': 'secondaryReadonlyMasterKey', 'type': 'str'}, 'primary_master_key': {'key': 'primaryMasterKey',", "str \"\"\" _validation = { 'primary_readonly_master_key': {'readonly': True}, 'secondary_readonly_master_key': {'readonly':", "Corporation. All rights reserved. # Licensed under the MIT License.", "# Licensed under the MIT License. See License.txt in the", ":ivar primary_master_key: Base 64 encoded value of the primary read-write", "by the server, and will be ignored when sending a", "-------------------------------------------------------------------------- from .database_account_list_read_only_keys_result_py3 import DatabaseAccountListReadOnlyKeysResult class DatabaseAccountListKeysResult(DatabaseAccountListReadOnlyKeysResult): \"\"\"The access keys", "value of the primary read-write key. :vartype primary_master_key: str :ivar", "# Changes may cause incorrect behavior and will be lost", "of the primary read-write key. :vartype primary_master_key: str :ivar secondary_master_key:", "} _attribute_map = { 'primary_readonly_master_key': {'key': 'primaryReadonlyMasterKey', 'type': 'str'}, 'secondary_readonly_master_key':", "-> None: super(DatabaseAccountListKeysResult, self).__init__(**kwargs) self.primary_master_key = None self.secondary_master_key = None", "primary_readonly_master_key: Base 64 encoded value of the primary read-only key.", "import DatabaseAccountListReadOnlyKeysResult class DatabaseAccountListKeysResult(DatabaseAccountListReadOnlyKeysResult): \"\"\"The access keys for the given", "# -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved.", "encoded value of the secondary read-write key. :vartype secondary_master_key: str", "is # regenerated. # -------------------------------------------------------------------------- from .database_account_list_read_only_keys_result_py3 import DatabaseAccountListReadOnlyKeysResult class", "secondary_master_key: str \"\"\" _validation = { 'primary_readonly_master_key': {'readonly': True}, 'secondary_readonly_master_key':", "if the code is # regenerated. # -------------------------------------------------------------------------- from .database_account_list_read_only_keys_result_py3", "of the secondary read-write key. :vartype secondary_master_key: str \"\"\" _validation", "Code generated by Microsoft (R) AutoRest Code Generator. # Changes", "information. # # Code generated by Microsoft (R) AutoRest Code", "coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights", "regenerated. # -------------------------------------------------------------------------- from .database_account_list_read_only_keys_result_py3 import DatabaseAccountListReadOnlyKeysResult class DatabaseAccountListKeysResult(DatabaseAccountListReadOnlyKeysResult): \"\"\"The", "'str'}, 'secondary_readonly_master_key': {'key': 'secondaryReadonlyMasterKey', 'type': 'str'}, 'primary_master_key': {'key': 'primaryMasterKey', 'type':", "'type': 'str'}, } def __init__(self, **kwargs) -> None: super(DatabaseAccountListKeysResult, self).__init__(**kwargs)", "License. See License.txt in the project root for # license", "'secondary_readonly_master_key': {'readonly': True}, 'primary_master_key': {'readonly': True}, 'secondary_master_key': {'readonly': True}, }", "account. Variables are only populated by the server, and will", "primary read-only key. :vartype primary_readonly_master_key: str :ivar secondary_readonly_master_key: Base 64", "will be lost if the code is # regenerated. #", "of the primary read-only key. :vartype primary_readonly_master_key: str :ivar secondary_readonly_master_key:", "database account. Variables are only populated by the server, and", "lost if the code is # regenerated. # -------------------------------------------------------------------------- from", "primary_readonly_master_key: str :ivar secondary_readonly_master_key: Base 64 encoded value of the", "# -------------------------------------------------------------------------- from .database_account_list_read_only_keys_result_py3 import DatabaseAccountListReadOnlyKeysResult class DatabaseAccountListKeysResult(DatabaseAccountListReadOnlyKeysResult): \"\"\"The access", ":ivar secondary_master_key: Base 64 encoded value of the secondary read-write", "the server, and will be ignored when sending a request.", "the code is # regenerated. # -------------------------------------------------------------------------- from .database_account_list_read_only_keys_result_py3 import", "and will be lost if the code is # regenerated.", "given database account. Variables are only populated by the server,", "_validation = { 'primary_readonly_master_key': {'readonly': True}, 'secondary_readonly_master_key': {'readonly': True}, 'primary_master_key':", "str :ivar primary_master_key: Base 64 encoded value of the primary", ":vartype secondary_master_key: str \"\"\" _validation = { 'primary_readonly_master_key': {'readonly': True},", "read-only key. :vartype primary_readonly_master_key: str :ivar secondary_readonly_master_key: Base 64 encoded", "primary_master_key: Base 64 encoded value of the primary read-write key.", "under the MIT License. See License.txt in the project root", "read-only key. :vartype secondary_readonly_master_key: str :ivar primary_master_key: Base 64 encoded", "def __init__(self, **kwargs) -> None: super(DatabaseAccountListKeysResult, self).__init__(**kwargs) self.primary_master_key = None", "'primary_master_key': {'readonly': True}, 'secondary_master_key': {'readonly': True}, } _attribute_map = {", "cause incorrect behavior and will be lost if the code", "str :ivar secondary_readonly_master_key: Base 64 encoded value of the secondary", "(c) Microsoft Corporation. All rights reserved. # Licensed under the", "All rights reserved. # Licensed under the MIT License. See", "for the given database account. Variables are only populated by", "Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect", "from .database_account_list_read_only_keys_result_py3 import DatabaseAccountListReadOnlyKeysResult class DatabaseAccountListKeysResult(DatabaseAccountListReadOnlyKeysResult): \"\"\"The access keys for", "{'readonly': True}, 'secondary_readonly_master_key': {'readonly': True}, 'primary_master_key': {'readonly': True}, 'secondary_master_key': {'readonly':", "True}, 'primary_master_key': {'readonly': True}, 'secondary_master_key': {'readonly': True}, } _attribute_map =", "value of the secondary read-write key. :vartype secondary_master_key: str \"\"\"", ":vartype primary_master_key: str :ivar secondary_master_key: Base 64 encoded value of", "read-write key. :vartype primary_master_key: str :ivar secondary_master_key: Base 64 encoded", "'secondary_master_key': {'key': 'secondaryMasterKey', 'type': 'str'}, } def __init__(self, **kwargs) ->", "True}, 'secondary_master_key': {'readonly': True}, } _attribute_map = { 'primary_readonly_master_key': {'key':", "sending a request. :ivar primary_readonly_master_key: Base 64 encoded value of", "value of the secondary read-only key. :vartype secondary_readonly_master_key: str :ivar", "True}, } _attribute_map = { 'primary_readonly_master_key': {'key': 'primaryReadonlyMasterKey', 'type': 'str'},", ":vartype secondary_readonly_master_key: str :ivar primary_master_key: Base 64 encoded value of", "# regenerated. # -------------------------------------------------------------------------- from .database_account_list_read_only_keys_result_py3 import DatabaseAccountListReadOnlyKeysResult class DatabaseAccountListKeysResult(DatabaseAccountListReadOnlyKeysResult):", "root for # license information. # # Code generated by", "'primaryReadonlyMasterKey', 'type': 'str'}, 'secondary_readonly_master_key': {'key': 'secondaryReadonlyMasterKey', 'type': 'str'}, 'primary_master_key': {'key':", "Microsoft Corporation. All rights reserved. # Licensed under the MIT", "Licensed under the MIT License. See License.txt in the project", "{'readonly': True}, } _attribute_map = { 'primary_readonly_master_key': {'key': 'primaryReadonlyMasterKey', 'type':", "# Code generated by Microsoft (R) AutoRest Code Generator. #", "= { 'primary_readonly_master_key': {'key': 'primaryReadonlyMasterKey', 'type': 'str'}, 'secondary_readonly_master_key': {'key': 'secondaryReadonlyMasterKey',", "} def __init__(self, **kwargs) -> None: super(DatabaseAccountListKeysResult, self).__init__(**kwargs) self.primary_master_key =", "'secondary_readonly_master_key': {'key': 'secondaryReadonlyMasterKey', 'type': 'str'}, 'primary_master_key': {'key': 'primaryMasterKey', 'type': 'str'},", "rights reserved. # Licensed under the MIT License. See License.txt", "class DatabaseAccountListKeysResult(DatabaseAccountListReadOnlyKeysResult): \"\"\"The access keys for the given database account.", "{'key': 'secondaryReadonlyMasterKey', 'type': 'str'}, 'primary_master_key': {'key': 'primaryMasterKey', 'type': 'str'}, 'secondary_master_key':", "\"\"\"The access keys for the given database account. Variables are", "# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All", "\"\"\" _validation = { 'primary_readonly_master_key': {'readonly': True}, 'secondary_readonly_master_key': {'readonly': True},", "key. :vartype primary_readonly_master_key: str :ivar secondary_readonly_master_key: Base 64 encoded value", "License.txt in the project root for # license information. #", "# license information. # # Code generated by Microsoft (R)", "secondary_readonly_master_key: str :ivar primary_master_key: Base 64 encoded value of the", "primary_master_key: str :ivar secondary_master_key: Base 64 encoded value of the", "{'key': 'secondaryMasterKey', 'type': 'str'}, } def __init__(self, **kwargs) -> None:", "Code Generator. # Changes may cause incorrect behavior and will", "be lost if the code is # regenerated. # --------------------------------------------------------------------------", "keys for the given database account. Variables are only populated", "Base 64 encoded value of the secondary read-write key. :vartype", "{'readonly': True}, 'secondary_master_key': {'readonly': True}, } _attribute_map = { 'primary_readonly_master_key':", "for # license information. # # Code generated by Microsoft", "DatabaseAccountListReadOnlyKeysResult class DatabaseAccountListKeysResult(DatabaseAccountListReadOnlyKeysResult): \"\"\"The access keys for the given database", "(R) AutoRest Code Generator. # Changes may cause incorrect behavior", "will be ignored when sending a request. :ivar primary_readonly_master_key: Base", "secondary read-only key. :vartype secondary_readonly_master_key: str :ivar primary_master_key: Base 64", "and will be ignored when sending a request. :ivar primary_readonly_master_key:", "encoded value of the primary read-write key. :vartype primary_master_key: str", "key. :vartype primary_master_key: str :ivar secondary_master_key: Base 64 encoded value", "64 encoded value of the primary read-only key. :vartype primary_readonly_master_key:", "Base 64 encoded value of the primary read-write key. :vartype", "DatabaseAccountListKeysResult(DatabaseAccountListReadOnlyKeysResult): \"\"\"The access keys for the given database account. Variables", "Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under", "a request. :ivar primary_readonly_master_key: Base 64 encoded value of the", "_attribute_map = { 'primary_readonly_master_key': {'key': 'primaryReadonlyMasterKey', 'type': 'str'}, 'secondary_readonly_master_key': {'key':", "'str'}, } def __init__(self, **kwargs) -> None: super(DatabaseAccountListKeysResult, self).__init__(**kwargs) self.primary_master_key" ]
[ "link, \"displayed_link\": displayed_link, \"snippet\": snippet, \"author\": author, \"author_link\": author_link, \"date_published\":", "a.yKioRe:nth-child(1)::attr(href)\").get() more_editions_link = book_result.css(\".R1n8Q a.yKioRe:nth-child(2)::attr(href)\").get() books_results.append({ \"title\": title, \"link\": link,", "Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.87 Safari/537.36\", } html", "\"link\": link, \"displayed_link\": displayed_link, \"snippet\": snippet, \"author\": author, \"author_link\": author_link,", "preview_link, \"more_editions_link\": f\"https://www.google.com{more_editions_link}\" if more_editions_link is not None else None,", "\"author\": author, \"author_link\": author_link, \"date_published\": date_published, \"preview_link\": preview_link, \"more_editions_link\": f\"https://www.google.com{more_editions_link}\"", "\"more_editions_link\": f\"https://www.google.com{more_editions_link}\" if more_editions_link is not None else None, \"thumbnail\":", "\"preview_link\": preview_link, \"more_editions_link\": f\"https://www.google.com{more_editions_link}\" if more_editions_link is not None else", "\"snippet\": snippet, \"author\": author, \"author_link\": author_link, \"date_published\": date_published, \"preview_link\": preview_link,", "is not None else None, \"thumbnail\": bytes(bytes(book_thumbnail, \"ascii\").decode(\"unicode-escape\"), \"ascii\").decode(\"unicode-escape\") })", "params = { \"q\": \"<NAME>\", \"tbm\": \"bks\", \"gl\": \"us\", \"hl\":", "f'https://www.google.com/search{book_result.css(\".N96wpd .fl::attr(href)\").get()}' date_published = book_result.css(\".fl+ span::text\").get() preview_link = book_result.css(\".R1n8Q a.yKioRe:nth-child(1)::attr(href)\").get()", "= { \"q\": \"<NAME>\", \"tbm\": \"bks\", \"gl\": \"us\", \"hl\": \"en\"", "zip(book_thumbnails, selector.css(\".Yr5TG\")): title = book_result.css(\".DKV0Md::text\").get() link = book_result.css(\".bHexk a::attr(href)\").get() displayed_link", "\"gl\": \"us\", \"hl\": \"en\" } headers = { \"User-Agent\": \"Mozilla/5.0", "for book_thumbnail, book_result in zip(book_thumbnails, selector.css(\".Yr5TG\")): title = book_result.css(\".DKV0Md::text\").get() link", "(KHTML, like Gecko) Chrome/98.0.4758.87 Safari/537.36\", } html = requests.get(\"https://www.google.com/search\", params=params,", "(Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.87", "= book_result.css(\".bHexk a::attr(href)\").get() displayed_link = book_result.css(\".tjvcx::text\").get() snippet = book_result.css(\".cmlJmd span::text\").get()", "\"en\" } headers = { \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0;", "title, \"link\": link, \"displayed_link\": displayed_link, \"snippet\": snippet, \"author\": author, \"author_link\":", "\"us\", \"hl\": \"en\" } headers = { \"User-Agent\": \"Mozilla/5.0 (Windows", "snippet, \"author\": author, \"author_link\": author_link, \"date_published\": date_published, \"preview_link\": preview_link, \"more_editions_link\":", "in zip(book_thumbnails, selector.css(\".Yr5TG\")): title = book_result.css(\".DKV0Md::text\").get() link = book_result.css(\".bHexk a::attr(href)\").get()", "link = book_result.css(\".bHexk a::attr(href)\").get() displayed_link = book_result.css(\".tjvcx::text\").get() snippet = book_result.css(\".cmlJmd", "\"<NAME>\", \"tbm\": \"bks\", \"gl\": \"us\", \"hl\": \"en\" } headers =", "headers = { \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64)", "https://regex101.com/r/mapBs4/1 book_thumbnails = re.findall(r\"s=\\\\'data:image/jpg;base64,(.*?)\\\\'\", str(selector.css(\"script\").getall()), re.DOTALL) for book_thumbnail, book_result in", "Safari/537.36\", } html = requests.get(\"https://www.google.com/search\", params=params, headers=headers, timeout=30) selector =", "more_editions_link is not None else None, \"thumbnail\": bytes(bytes(book_thumbnail, \"ascii\").decode(\"unicode-escape\"), \"ascii\").decode(\"unicode-escape\")", "book_result.css(\".DKV0Md::text\").get() link = book_result.css(\".bHexk a::attr(href)\").get() displayed_link = book_result.css(\".tjvcx::text\").get() snippet =", "= book_result.css(\".fl span::text\").get() author_link = f'https://www.google.com/search{book_result.css(\".N96wpd .fl::attr(href)\").get()}' date_published = book_result.css(\".fl+", "snippet = book_result.css(\".cmlJmd span::text\").get() author = book_result.css(\".fl span::text\").get() author_link =", "span::text\").get() author = book_result.css(\".fl span::text\").get() author_link = f'https://www.google.com/search{book_result.css(\".N96wpd .fl::attr(href)\").get()}' date_published", "{ \"q\": \"<NAME>\", \"tbm\": \"bks\", \"gl\": \"us\", \"hl\": \"en\" }", "selector = Selector(text=html.text) books_results = [] # https://regex101.com/r/mapBs4/1 book_thumbnails =", "= f'https://www.google.com/search{book_result.css(\".N96wpd .fl::attr(href)\").get()}' date_published = book_result.css(\".fl+ span::text\").get() preview_link = book_result.css(\".R1n8Q", "\"date_published\": date_published, \"preview_link\": preview_link, \"more_editions_link\": f\"https://www.google.com{more_editions_link}\" if more_editions_link is not", "import requests, json, re params = { \"q\": \"<NAME>\", \"tbm\":", "requests, json, re params = { \"q\": \"<NAME>\", \"tbm\": \"bks\",", "Selector(text=html.text) books_results = [] # https://regex101.com/r/mapBs4/1 book_thumbnails = re.findall(r\"s=\\\\'data:image/jpg;base64,(.*?)\\\\'\", str(selector.css(\"script\").getall()),", "re.findall(r\"s=\\\\'data:image/jpg;base64,(.*?)\\\\'\", str(selector.css(\"script\").getall()), re.DOTALL) for book_thumbnail, book_result in zip(book_thumbnails, selector.css(\".Yr5TG\")): title", "Gecko) Chrome/98.0.4758.87 Safari/537.36\", } html = requests.get(\"https://www.google.com/search\", params=params, headers=headers, timeout=30)", "books_results = [] # https://regex101.com/r/mapBs4/1 book_thumbnails = re.findall(r\"s=\\\\'data:image/jpg;base64,(.*?)\\\\'\", str(selector.css(\"script\").getall()), re.DOTALL)", "displayed_link, \"snippet\": snippet, \"author\": author, \"author_link\": author_link, \"date_published\": date_published, \"preview_link\":", "\"author_link\": author_link, \"date_published\": date_published, \"preview_link\": preview_link, \"more_editions_link\": f\"https://www.google.com{more_editions_link}\" if more_editions_link", "author_link, \"date_published\": date_published, \"preview_link\": preview_link, \"more_editions_link\": f\"https://www.google.com{more_editions_link}\" if more_editions_link is", "Selector import requests, json, re params = { \"q\": \"<NAME>\",", "NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.87 Safari/537.36\",", "book_thumbnail, book_result in zip(book_thumbnails, selector.css(\".Yr5TG\")): title = book_result.css(\".DKV0Md::text\").get() link =", "author, \"author_link\": author_link, \"date_published\": date_published, \"preview_link\": preview_link, \"more_editions_link\": f\"https://www.google.com{more_editions_link}\" if", "Chrome/98.0.4758.87 Safari/537.36\", } html = requests.get(\"https://www.google.com/search\", params=params, headers=headers, timeout=30) selector", "book_result.css(\".R1n8Q a.yKioRe:nth-child(2)::attr(href)\").get() books_results.append({ \"title\": title, \"link\": link, \"displayed_link\": displayed_link, \"snippet\":", "re params = { \"q\": \"<NAME>\", \"tbm\": \"bks\", \"gl\": \"us\",", "book_result.css(\".fl span::text\").get() author_link = f'https://www.google.com/search{book_result.css(\".N96wpd .fl::attr(href)\").get()}' date_published = book_result.css(\".fl+ span::text\").get()", "like Gecko) Chrome/98.0.4758.87 Safari/537.36\", } html = requests.get(\"https://www.google.com/search\", params=params, headers=headers,", "book_result.css(\".bHexk a::attr(href)\").get() displayed_link = book_result.css(\".tjvcx::text\").get() snippet = book_result.css(\".cmlJmd span::text\").get() author", "# https://regex101.com/r/mapBs4/1 book_thumbnails = re.findall(r\"s=\\\\'data:image/jpg;base64,(.*?)\\\\'\", str(selector.css(\"script\").getall()), re.DOTALL) for book_thumbnail, book_result", "books_results.append({ \"title\": title, \"link\": link, \"displayed_link\": displayed_link, \"snippet\": snippet, \"author\":", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.87 Safari/537.36\", } html = requests.get(\"https://www.google.com/search\",", "\"tbm\": \"bks\", \"gl\": \"us\", \"hl\": \"en\" } headers = {", "re.DOTALL) for book_thumbnail, book_result in zip(book_thumbnails, selector.css(\".Yr5TG\")): title = book_result.css(\".DKV0Md::text\").get()", "json, re params = { \"q\": \"<NAME>\", \"tbm\": \"bks\", \"gl\":", ".fl::attr(href)\").get()}' date_published = book_result.css(\".fl+ span::text\").get() preview_link = book_result.css(\".R1n8Q a.yKioRe:nth-child(1)::attr(href)\").get() more_editions_link", "span::text\").get() author_link = f'https://www.google.com/search{book_result.css(\".N96wpd .fl::attr(href)\").get()}' date_published = book_result.css(\".fl+ span::text\").get() preview_link", "\"hl\": \"en\" } headers = { \"User-Agent\": \"Mozilla/5.0 (Windows NT", "params=params, headers=headers, timeout=30) selector = Selector(text=html.text) books_results = [] #", "preview_link = book_result.css(\".R1n8Q a.yKioRe:nth-child(1)::attr(href)\").get() more_editions_link = book_result.css(\".R1n8Q a.yKioRe:nth-child(2)::attr(href)\").get() books_results.append({ \"title\":", "displayed_link = book_result.css(\".tjvcx::text\").get() snippet = book_result.css(\".cmlJmd span::text\").get() author = book_result.css(\".fl", "author_link = f'https://www.google.com/search{book_result.css(\".N96wpd .fl::attr(href)\").get()}' date_published = book_result.css(\".fl+ span::text\").get() preview_link =", "date_published, \"preview_link\": preview_link, \"more_editions_link\": f\"https://www.google.com{more_editions_link}\" if more_editions_link is not None", "import Selector import requests, json, re params = { \"q\":", "a.yKioRe:nth-child(2)::attr(href)\").get() books_results.append({ \"title\": title, \"link\": link, \"displayed_link\": displayed_link, \"snippet\": snippet,", "= re.findall(r\"s=\\\\'data:image/jpg;base64,(.*?)\\\\'\", str(selector.css(\"script\").getall()), re.DOTALL) for book_thumbnail, book_result in zip(book_thumbnails, selector.css(\".Yr5TG\")):", "book_result.css(\".fl+ span::text\").get() preview_link = book_result.css(\".R1n8Q a.yKioRe:nth-child(1)::attr(href)\").get() more_editions_link = book_result.css(\".R1n8Q a.yKioRe:nth-child(2)::attr(href)\").get()", "} headers = { \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64;", "\"bks\", \"gl\": \"us\", \"hl\": \"en\" } headers = { \"User-Agent\":", "= Selector(text=html.text) books_results = [] # https://regex101.com/r/mapBs4/1 book_thumbnails = re.findall(r\"s=\\\\'data:image/jpg;base64,(.*?)\\\\'\",", "parsel import Selector import requests, json, re params = {", "str(selector.css(\"script\").getall()), re.DOTALL) for book_thumbnail, book_result in zip(book_thumbnails, selector.css(\".Yr5TG\")): title =", "a::attr(href)\").get() displayed_link = book_result.css(\".tjvcx::text\").get() snippet = book_result.css(\".cmlJmd span::text\").get() author =", "[] # https://regex101.com/r/mapBs4/1 book_thumbnails = re.findall(r\"s=\\\\'data:image/jpg;base64,(.*?)\\\\'\", str(selector.css(\"script\").getall()), re.DOTALL) for book_thumbnail,", "span::text\").get() preview_link = book_result.css(\".R1n8Q a.yKioRe:nth-child(1)::attr(href)\").get() more_editions_link = book_result.css(\".R1n8Q a.yKioRe:nth-child(2)::attr(href)\").get() books_results.append({", "book_thumbnails = re.findall(r\"s=\\\\'data:image/jpg;base64,(.*?)\\\\'\", str(selector.css(\"script\").getall()), re.DOTALL) for book_thumbnail, book_result in zip(book_thumbnails,", "author = book_result.css(\".fl span::text\").get() author_link = f'https://www.google.com/search{book_result.css(\".N96wpd .fl::attr(href)\").get()}' date_published =", "= book_result.css(\".R1n8Q a.yKioRe:nth-child(1)::attr(href)\").get() more_editions_link = book_result.css(\".R1n8Q a.yKioRe:nth-child(2)::attr(href)\").get() books_results.append({ \"title\": title,", "\"title\": title, \"link\": link, \"displayed_link\": displayed_link, \"snippet\": snippet, \"author\": author,", "} html = requests.get(\"https://www.google.com/search\", params=params, headers=headers, timeout=30) selector = Selector(text=html.text)", "10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.87 Safari/537.36\", }", "x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.87 Safari/537.36\", } html =", "f\"https://www.google.com{more_editions_link}\" if more_editions_link is not None else None, \"thumbnail\": bytes(bytes(book_thumbnail,", "= book_result.css(\".tjvcx::text\").get() snippet = book_result.css(\".cmlJmd span::text\").get() author = book_result.css(\".fl span::text\").get()", "= { \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36", "date_published = book_result.css(\".fl+ span::text\").get() preview_link = book_result.css(\".R1n8Q a.yKioRe:nth-child(1)::attr(href)\").get() more_editions_link =", "\"q\": \"<NAME>\", \"tbm\": \"bks\", \"gl\": \"us\", \"hl\": \"en\" } headers", "= [] # https://regex101.com/r/mapBs4/1 book_thumbnails = re.findall(r\"s=\\\\'data:image/jpg;base64,(.*?)\\\\'\", str(selector.css(\"script\").getall()), re.DOTALL) for", "\"displayed_link\": displayed_link, \"snippet\": snippet, \"author\": author, \"author_link\": author_link, \"date_published\": date_published,", "from parsel import Selector import requests, json, re params =", "book_result in zip(book_thumbnails, selector.css(\".Yr5TG\")): title = book_result.css(\".DKV0Md::text\").get() link = book_result.css(\".bHexk", "timeout=30) selector = Selector(text=html.text) books_results = [] # https://regex101.com/r/mapBs4/1 book_thumbnails", "= book_result.css(\".fl+ span::text\").get() preview_link = book_result.css(\".R1n8Q a.yKioRe:nth-child(1)::attr(href)\").get() more_editions_link = book_result.css(\".R1n8Q", "requests.get(\"https://www.google.com/search\", params=params, headers=headers, timeout=30) selector = Selector(text=html.text) books_results = []", "= book_result.css(\".cmlJmd span::text\").get() author = book_result.css(\".fl span::text\").get() author_link = f'https://www.google.com/search{book_result.css(\".N96wpd", "book_result.css(\".cmlJmd span::text\").get() author = book_result.css(\".fl span::text\").get() author_link = f'https://www.google.com/search{book_result.css(\".N96wpd .fl::attr(href)\").get()}'", "\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)", "book_result.css(\".tjvcx::text\").get() snippet = book_result.css(\".cmlJmd span::text\").get() author = book_result.css(\".fl span::text\").get() author_link", "{ \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML,", "html = requests.get(\"https://www.google.com/search\", params=params, headers=headers, timeout=30) selector = Selector(text=html.text) books_results", "title = book_result.css(\".DKV0Md::text\").get() link = book_result.css(\".bHexk a::attr(href)\").get() displayed_link = book_result.css(\".tjvcx::text\").get()", "= book_result.css(\".DKV0Md::text\").get() link = book_result.css(\".bHexk a::attr(href)\").get() displayed_link = book_result.css(\".tjvcx::text\").get() snippet", "book_result.css(\".R1n8Q a.yKioRe:nth-child(1)::attr(href)\").get() more_editions_link = book_result.css(\".R1n8Q a.yKioRe:nth-child(2)::attr(href)\").get() books_results.append({ \"title\": title, \"link\":", "\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like", "more_editions_link = book_result.css(\".R1n8Q a.yKioRe:nth-child(2)::attr(href)\").get() books_results.append({ \"title\": title, \"link\": link, \"displayed_link\":", "= book_result.css(\".R1n8Q a.yKioRe:nth-child(2)::attr(href)\").get() books_results.append({ \"title\": title, \"link\": link, \"displayed_link\": displayed_link,", "if more_editions_link is not None else None, \"thumbnail\": bytes(bytes(book_thumbnail, \"ascii\").decode(\"unicode-escape\"),", "headers=headers, timeout=30) selector = Selector(text=html.text) books_results = [] # https://regex101.com/r/mapBs4/1", "selector.css(\".Yr5TG\")): title = book_result.css(\".DKV0Md::text\").get() link = book_result.css(\".bHexk a::attr(href)\").get() displayed_link =", "= requests.get(\"https://www.google.com/search\", params=params, headers=headers, timeout=30) selector = Selector(text=html.text) books_results =" ]
[ "analysis on different games. \"\"\" from hol import ( cards,", "<reponame>AustinTSchaffer/DailyProgrammer<filename>Python/Higher-Or-Lower/hol/__init__.py r\"\"\" Contains classes and methods that can be used", "be used when simulating the game Higher-or-Lower and performing statistical", "constants, ) from hol._hol import ( generate_all_games, should_pick_higher, is_a_winning_game, generate_win_statistics,", ") from hol._hol import ( generate_all_games, should_pick_higher, is_a_winning_game, generate_win_statistics, )", "can be used when simulating the game Higher-or-Lower and performing", "games. \"\"\" from hol import ( cards, constants, ) from", "simulating the game Higher-or-Lower and performing statistical analysis on different", "Contains classes and methods that can be used when simulating", "methods that can be used when simulating the game Higher-or-Lower", "classes and methods that can be used when simulating the", "on different games. \"\"\" from hol import ( cards, constants,", "cards, constants, ) from hol._hol import ( generate_all_games, should_pick_higher, is_a_winning_game,", "used when simulating the game Higher-or-Lower and performing statistical analysis", "\"\"\" from hol import ( cards, constants, ) from hol._hol", "statistical analysis on different games. \"\"\" from hol import (", "the game Higher-or-Lower and performing statistical analysis on different games.", "that can be used when simulating the game Higher-or-Lower and", "game Higher-or-Lower and performing statistical analysis on different games. \"\"\"", "performing statistical analysis on different games. \"\"\" from hol import", "r\"\"\" Contains classes and methods that can be used when", "import ( cards, constants, ) from hol._hol import ( generate_all_games,", "Higher-or-Lower and performing statistical analysis on different games. \"\"\" from", "from hol import ( cards, constants, ) from hol._hol import", "and performing statistical analysis on different games. \"\"\" from hol", "when simulating the game Higher-or-Lower and performing statistical analysis on", "hol import ( cards, constants, ) from hol._hol import (", "and methods that can be used when simulating the game", "( cards, constants, ) from hol._hol import ( generate_all_games, should_pick_higher,", "different games. \"\"\" from hol import ( cards, constants, )" ]
[ "Box( (x, y, self.column_1 + self.column_2, self.text_height)) self.w.box.text = TextBox(", "\"update\", callback=self.update_font_callback, sizeStyle=self.size_style) # x slider x = self.padding_x y", "= self.w.x_slider.get() yValue = self.w.y_slider.get() x = self._moveX - xValue", "def slide_callback(self, sender): xValue = self.w.x_slider.get() yValue = self.w.y_slider.get() x", "callback=self.restore_x_callback, sizeStyle=self.size_style) # y slider x = self.padding_x y +=", "# RF 2.0 if version[0] == '2': self.font[glyph_name].moveBy((-x, -y)) #", "self.padding_y*4 self.w = HUDFloatingWindow((self.width, self.height), self.title) x = self.padding_x y", "self.padding_y) self.w.y_label = TextBox( (x, y + 5, self.column_1, self.text_height),", "print no_font_open def set_defaults(self): self._xMax = self.font.info.unitsPerEm self._yMax = self.font.info.unitsPerEm", "y, self.button_width, self.text_height), \"reset x\", callback=self.restore_x_callback, sizeStyle=self.size_style) # y slider", "y + 5, self.column_1, self.text_height), \"x\", sizeStyle=self.size_style) x += self.column_1", "hTools2 import hDialog from hTools2.modules.fontutils import get_full_name, get_glyphs from hTools2.modules.messages", "self.column_1 self.w.x_slider = Slider( (x, y, self.column_2, self.text_height), value=0, maxValue=self._xMax,", "5, self.column_1, self.text_height), \"y\", sizeStyle=self.size_style) x += self.column_1 self.w.y_slider =", "[h] slide selected glyphs from mojo.roboFont import CurrentFont, CurrentGlyph, version", "SquareButton( (x, y, self.button_width, self.text_height), \"reset x\", callback=self.restore_x_callback, sizeStyle=self.size_style) #", "__init__(self): # window self.title = \"slide\" self.button_width = 70 self.column_1", "slideGlyphsDialog(hDialog): '''A dialog to slide the selected glyphs vertically and/or", "+ self.padding_x) self.w.button_restore_y = SquareButton( (x, y, self.button_width, self.text_height), \"reset", "self.font[glyph_name].moveBy((-x, -y)) # RF 1.8.X else: self.font[glyph_name].move((-x, -y)) else: print", "1000 _xMin = -1000 _yMax = 500 _yMin = -500", "y = self._moveY - yValue self._moveX = xValue self._moveY =", "CurrentGlyph, version from vanilla import * from hTools2 import hDialog", "font selected)' def __init__(self): # window self.title = \"slide\" self.button_width", "self.w.x_slider = Slider( (x, y, self.column_2, self.text_height), value=0, maxValue=self._xMax, minValue=self._xMin,", "restore_x_callback(self, sender): self.restore_x() def restore_y_callback(self, sender): self.restore_y() def update_font(self): self.font", "500 _yMin = -500 font = None font_name = '(no", "name self.w.box = Box( (x, y, self.column_1 + self.column_2, self.text_height))", "# x slider x = self.padding_x y += self.text_height +", "glyph_names: # RF 2.0 if version[0] == '2': self.font[glyph_name].moveBy((-x, -y))", "y += self.text_height + self.padding_y self.w.x_label = TextBox( (x, y", "= \"slide\" self.button_width = 70 self.column_1 = 20 self.column_2 =", "+ self.padding_y*4 self.w = HUDFloatingWindow((self.width, self.height), self.title) x = self.padding_x", "+ self.column_2, self.text_height)) self.w.box.text = TextBox( (5, 0, self.column_1 +", "version from vanilla import * from hTools2 import hDialog from", "selected)' def __init__(self): # window self.title = \"slide\" self.button_width =", "0 self.w.x_slider.set(self._moveX) def restore_y(self): self._moveY = 0 self.w.y_slider.set(self._moveY) def restore_x_callback(self,", "(self.column_2 + self.column_1 + self.padding_x) self.w.button_update_font = SquareButton( (x, y,", "glyph_names = get_glyphs(self.font) if len(glyph_names) > 0: for glyph_name in", "self.update_font() def slide_callback(self, sender): xValue = self.w.x_slider.get() yValue = self.w.y_slider.get()", "self.padding_x y += self.text_height + self.padding_y self.w.x_label = TextBox( (x,", "= None font_name = '(no font selected)' def __init__(self): #", "+ self.padding_x*3 self.height = self.text_height*3 + self.padding_y*4 self.w = HUDFloatingWindow((self.width,", "+ self.padding_y) self.w.y_label = TextBox( (x, y + 5, self.column_1,", "vertically and/or horizontally. .. image:: imgs/glyphs/slide.png ''' _moveX = 0", "= yValue glyph_names = get_glyphs(self.font) if len(glyph_names) > 0: for", "'2': self.font[glyph_name].moveBy((-x, -y)) # RF 1.8.X else: self.font[glyph_name].move((-x, -y)) else:", "self.padding_x y = self.padding_y # current font name self.w.box =", "no_font_open def set_defaults(self): self._xMax = self.font.info.unitsPerEm self._yMax = self.font.info.unitsPerEm /", "# current font name self.w.box = Box( (x, y, self.column_1", "x slider x = self.padding_x y += self.text_height + self.padding_y", "self.text_height), \"x\", sizeStyle=self.size_style) x += self.column_1 self.w.x_slider = Slider( (x,", "(5, 0, self.column_1 + self.column_2, self.text_height), self.font_name, sizeStyle=self.size_style) x +=", "None font_name = '(no font selected)' def __init__(self): # window", "self._yMin = -self._yMax def update_font_callback(self, sender): self.update_font() def slide_callback(self, sender):", "for glyph_name in glyph_names: # RF 2.0 if version[0] ==", "(x, y, self.button_width, self.text_height), \"reset x\", callback=self.restore_x_callback, sizeStyle=self.size_style) # y", "# [h] slide selected glyphs from mojo.roboFont import CurrentFont, CurrentGlyph,", "def restore_x_callback(self, sender): self.restore_x() def restore_y_callback(self, sender): self.restore_y() def update_font(self):", "= xValue self._moveY = yValue glyph_names = get_glyphs(self.font) if len(glyph_names)", "= TextBox( (x, y + 5, self.column_1, self.text_height), \"y\", sizeStyle=self.size_style)", "sizeStyle=self.size_style) x += (self.column_2 + self.padding_x) self.w.button_restore_y = SquareButton( (x,", "+= self.text_height + self.padding_y self.w.x_label = TextBox( (x, y +", "= 1000 _xMin = -1000 _yMax = 500 _yMin =", "# window self.title = \"slide\" self.button_width = 70 self.column_1 =", "if version[0] == '2': self.font[glyph_name].moveBy((-x, -y)) # RF 1.8.X else:", "xValue = self.w.x_slider.get() yValue = self.w.y_slider.get() x = self._moveX -", "> 0: for glyph_name in glyph_names: # RF 2.0 if", "HUDFloatingWindow((self.width, self.height), self.title) x = self.padding_x y = self.padding_y #", "self.text_height*3 + self.padding_y*4 self.w = HUDFloatingWindow((self.width, self.height), self.title) x =", "# y slider x = self.padding_x y += (self.text_height +", "= 0 _xMax = 1000 _xMin = -1000 _yMax =", "self.column_1 + self.column_2 + self.button_width + self.padding_x*3 self.height = self.text_height*3", "self.column_1 self.w.y_slider = Slider( (x, y, self.column_2, self.text_height), value=0, maxValue=self._yMax,", "(x, y, self.button_width, self.text_height), \"update\", callback=self.update_font_callback, sizeStyle=self.size_style) # x slider", "sender): xValue = self.w.x_slider.get() yValue = self.w.y_slider.get() x = self._moveX", "def update_font_callback(self, sender): self.update_font() def slide_callback(self, sender): xValue = self.w.x_slider.get()", "self.w.x_label = TextBox( (x, y + 5, self.column_1, self.text_height), \"x\",", "self.title) x = self.padding_x y = self.padding_y # current font", "y = self.padding_y # current font name self.w.box = Box(", "self._yMax = self.font.info.unitsPerEm / 2 self._xMin = -self._xMax self._yMin =", "self.text_height + self.padding_y self.w.x_label = TextBox( (x, y + 5,", "# open self.w.open() self.update_font() # callbacks def restore_x(self): self._moveX =", "(x, y, self.column_2, self.text_height), value=0, maxValue=self._xMax, minValue=self._xMin, callback=self.slide_callback, sizeStyle=self.size_style) x", "= self._moveX - xValue y = self._moveY - yValue self._moveX", "= Box( (x, y, self.column_1 + self.column_2, self.text_height)) self.w.box.text =", "horizontally. .. image:: imgs/glyphs/slide.png ''' _moveX = 0 _moveY =", "is not None: self.w.box.text.set(get_full_name(self.font)) self.set_defaults() self.restore_x() self.restore_y() else: print no_font_open", "self.w.open() self.update_font() # callbacks def restore_x(self): self._moveX = 0 self.w.x_slider.set(self._moveX)", "= self.padding_x y = self.padding_y # current font name self.w.box", "70 self.column_1 = 20 self.column_2 = 240 self.width = self.column_1", "self.title = \"slide\" self.button_width = 70 self.column_1 = 20 self.column_2", "= 20 self.column_2 = 240 self.width = self.column_1 + self.column_2", "+= (self.column_2 + self.padding_x) self.w.button_restore_y = SquareButton( (x, y, self.button_width,", "hTools2.modules.messages import no_font_open, no_glyph_selected class slideGlyphsDialog(hDialog): '''A dialog to slide", "yValue self._moveX = xValue self._moveY = yValue glyph_names = get_glyphs(self.font)", "'''A dialog to slide the selected glyphs vertically and/or horizontally.", "callback=self.restore_y_callback, sizeStyle=self.size_style) # open self.w.open() self.update_font() # callbacks def restore_x(self):", "_yMin = -500 font = None font_name = '(no font", "get_glyphs(self.font) if len(glyph_names) > 0: for glyph_name in glyph_names: #", "self.padding_y self.w.x_label = TextBox( (x, y + 5, self.column_1, self.text_height),", "\"reset x\", callback=self.restore_x_callback, sizeStyle=self.size_style) # y slider x = self.padding_x", "self.padding_x) self.w.button_update_font = SquareButton( (x, y, self.button_width, self.text_height), \"update\", callback=self.update_font_callback,", "self._moveX = xValue self._moveY = yValue glyph_names = get_glyphs(self.font) if", "hDialog from hTools2.modules.fontutils import get_full_name, get_glyphs from hTools2.modules.messages import no_font_open,", "selected glyphs vertically and/or horizontally. .. image:: imgs/glyphs/slide.png ''' _moveX", "callback=self.slide_callback, sizeStyle=self.size_style) x += (self.column_2 + self.padding_x) self.w.button_restore_x = SquareButton(", "dialog to slide the selected glyphs vertically and/or horizontally. ..", "from hTools2.modules.fontutils import get_full_name, get_glyphs from hTools2.modules.messages import no_font_open, no_glyph_selected", "selected glyphs from mojo.roboFont import CurrentFont, CurrentGlyph, version from vanilla", "-1000 _yMax = 500 _yMin = -500 font = None", "= HUDFloatingWindow((self.width, self.height), self.title) x = self.padding_x y = self.padding_y", "(self.column_2 + self.padding_x) self.w.button_restore_y = SquareButton( (x, y, self.button_width, self.text_height),", "+ self.column_1 + self.padding_x) self.w.button_update_font = SquareButton( (x, y, self.button_width,", "self.padding_x) self.w.button_restore_x = SquareButton( (x, y, self.button_width, self.text_height), \"reset x\",", "maxValue=self._yMax, minValue=self._yMin, callback=self.slide_callback, sizeStyle=self.size_style) x += (self.column_2 + self.padding_x) self.w.button_restore_y", "callback=self.update_font_callback, sizeStyle=self.size_style) # x slider x = self.padding_x y +=", "sizeStyle=self.size_style) # y slider x = self.padding_x y += (self.text_height", "glyphs from mojo.roboFont import CurrentFont, CurrentGlyph, version from vanilla import", "(x, y + 5, self.column_1, self.text_height), \"x\", sizeStyle=self.size_style) x +=", "def update_font(self): self.font = CurrentFont() if self.font is not None:", "+= self.column_1 self.w.y_slider = Slider( (x, y, self.column_2, self.text_height), value=0,", "minValue=self._xMin, callback=self.slide_callback, sizeStyle=self.size_style) x += (self.column_2 + self.padding_x) self.w.button_restore_x =", "= 70 self.column_1 = 20 self.column_2 = 240 self.width =", "to slide the selected glyphs vertically and/or horizontally. .. image::", "+= (self.column_2 + self.padding_x) self.w.button_restore_x = SquareButton( (x, y, self.button_width,", "x = self.padding_x y += self.text_height + self.padding_y self.w.x_label =", "= 0 self.w.x_slider.set(self._moveX) def restore_y(self): self._moveY = 0 self.w.y_slider.set(self._moveY) def", "self._moveX - xValue y = self._moveY - yValue self._moveX =", "-y)) # RF 1.8.X else: self.font[glyph_name].move((-x, -y)) else: print no_glyph_selected", "x = self.padding_x y = self.padding_y # current font name", "SquareButton( (x, y, self.button_width, self.text_height), \"update\", callback=self.update_font_callback, sizeStyle=self.size_style) # x", "self.text_height)) self.w.box.text = TextBox( (5, 0, self.column_1 + self.column_2, self.text_height),", "y, self.column_1 + self.column_2, self.text_height)) self.w.box.text = TextBox( (5, 0,", "/ 2 self._xMin = -self._xMax self._yMin = -self._yMax def update_font_callback(self,", "= 0 _moveY = 0 _xMax = 1000 _xMin =", "\"slide\" self.button_width = 70 self.column_1 = 20 self.column_2 = 240", "= self.column_1 + self.column_2 + self.button_width + self.padding_x*3 self.height =", "\"y\", sizeStyle=self.size_style) x += self.column_1 self.w.y_slider = Slider( (x, y,", "x = self._moveX - xValue y = self._moveY - yValue", "self.button_width = 70 self.column_1 = 20 self.column_2 = 240 self.width", "hTools2.modules.fontutils import get_full_name, get_glyphs from hTools2.modules.messages import no_font_open, no_glyph_selected class", "and/or horizontally. .. image:: imgs/glyphs/slide.png ''' _moveX = 0 _moveY", "the selected glyphs vertically and/or horizontally. .. image:: imgs/glyphs/slide.png '''", "def __init__(self): # window self.title = \"slide\" self.button_width = 70", "'(no font selected)' def __init__(self): # window self.title = \"slide\"", "self.font.info.unitsPerEm / 2 self._xMin = -self._xMax self._yMin = -self._yMax def", "self.column_2 = 240 self.width = self.column_1 + self.column_2 + self.button_width", "self.text_height), \"y\", sizeStyle=self.size_style) x += self.column_1 self.w.y_slider = Slider( (x,", "def restore_y(self): self._moveY = 0 self.w.y_slider.set(self._moveY) def restore_x_callback(self, sender): self.restore_x()", "len(glyph_names) > 0: for glyph_name in glyph_names: # RF 2.0", "import hDialog from hTools2.modules.fontutils import get_full_name, get_glyphs from hTools2.modules.messages import", "- yValue self._moveX = xValue self._moveY = yValue glyph_names =", "+ self.padding_x) self.w.button_restore_x = SquareButton( (x, y, self.button_width, self.text_height), \"reset", "y, self.column_2, self.text_height), value=0, maxValue=self._yMax, minValue=self._yMin, callback=self.slide_callback, sizeStyle=self.size_style) x +=", "= SquareButton( (x, y, self.button_width, self.text_height), \"update\", callback=self.update_font_callback, sizeStyle=self.size_style) #", "font name self.w.box = Box( (x, y, self.column_1 + self.column_2,", "self.column_1 = 20 self.column_2 = 240 self.width = self.column_1 +", "set_defaults(self): self._xMax = self.font.info.unitsPerEm self._yMax = self.font.info.unitsPerEm / 2 self._xMin", "self.w.y_label = TextBox( (x, y + 5, self.column_1, self.text_height), \"y\",", "5, self.column_1, self.text_height), \"x\", sizeStyle=self.size_style) x += self.column_1 self.w.x_slider =", "self.restore_y() else: print no_font_open def set_defaults(self): self._xMax = self.font.info.unitsPerEm self._yMax", "sender): self.update_font() def slide_callback(self, sender): xValue = self.w.x_slider.get() yValue =", "self.column_1 + self.padding_x) self.w.button_update_font = SquareButton( (x, y, self.button_width, self.text_height),", "self.w.button_restore_y = SquareButton( (x, y, self.button_width, self.text_height), \"reset y\", callback=self.restore_y_callback,", "_xMax = 1000 _xMin = -1000 _yMax = 500 _yMin", "= self._moveY - yValue self._moveX = xValue self._moveY = yValue", "240 self.width = self.column_1 + self.column_2 + self.button_width + self.padding_x*3", "xValue y = self._moveY - yValue self._moveX = xValue self._moveY", "0 _moveY = 0 _xMax = 1000 _xMin = -1000", "self.font_name, sizeStyle=self.size_style) x += (self.column_2 + self.column_1 + self.padding_x) self.w.button_update_font", "= -self._yMax def update_font_callback(self, sender): self.update_font() def slide_callback(self, sender): xValue", "font_name = '(no font selected)' def __init__(self): # window self.title", "sizeStyle=self.size_style) x += self.column_1 self.w.y_slider = Slider( (x, y, self.column_2,", "self.w.x_slider.get() yValue = self.w.y_slider.get() x = self._moveX - xValue y", "Slider( (x, y, self.column_2, self.text_height), value=0, maxValue=self._yMax, minValue=self._yMin, callback=self.slide_callback, sizeStyle=self.size_style)", "self.height = self.text_height*3 + self.padding_y*4 self.w = HUDFloatingWindow((self.width, self.height), self.title)", "TextBox( (x, y + 5, self.column_1, self.text_height), \"y\", sizeStyle=self.size_style) x", "0 self.w.y_slider.set(self._moveY) def restore_x_callback(self, sender): self.restore_x() def restore_y_callback(self, sender): self.restore_y()", "import no_font_open, no_glyph_selected class slideGlyphsDialog(hDialog): '''A dialog to slide the", "y slider x = self.padding_x y += (self.text_height + self.padding_y)", "\"x\", sizeStyle=self.size_style) x += self.column_1 self.w.x_slider = Slider( (x, y,", "yValue = self.w.y_slider.get() x = self._moveX - xValue y =", "self.column_2, self.text_height), value=0, maxValue=self._xMax, minValue=self._xMin, callback=self.slide_callback, sizeStyle=self.size_style) x += (self.column_2", "CurrentFont, CurrentGlyph, version from vanilla import * from hTools2 import", "get_full_name, get_glyphs from hTools2.modules.messages import no_font_open, no_glyph_selected class slideGlyphsDialog(hDialog): '''A", "slide selected glyphs from mojo.roboFont import CurrentFont, CurrentGlyph, version from", "self.w.button_restore_x = SquareButton( (x, y, self.button_width, self.text_height), \"reset x\", callback=self.restore_x_callback,", "self.restore_x() self.restore_y() else: print no_font_open def set_defaults(self): self._xMax = self.font.info.unitsPerEm", "0 _xMax = 1000 _xMin = -1000 _yMax = 500", "callback=self.slide_callback, sizeStyle=self.size_style) x += (self.column_2 + self.padding_x) self.w.button_restore_y = SquareButton(", "sender): self.restore_y() def update_font(self): self.font = CurrentFont() if self.font is", "get_glyphs from hTools2.modules.messages import no_font_open, no_glyph_selected class slideGlyphsDialog(hDialog): '''A dialog", "sender): self.restore_x() def restore_y_callback(self, sender): self.restore_y() def update_font(self): self.font =", "maxValue=self._xMax, minValue=self._xMin, callback=self.slide_callback, sizeStyle=self.size_style) x += (self.column_2 + self.padding_x) self.w.button_restore_x", "+ self.button_width + self.padding_x*3 self.height = self.text_height*3 + self.padding_y*4 self.w", "self._moveY = 0 self.w.y_slider.set(self._moveY) def restore_x_callback(self, sender): self.restore_x() def restore_y_callback(self,", "self.w.y_slider.get() x = self._moveX - xValue y = self._moveY -", "= CurrentFont() if self.font is not None: self.w.box.text.set(get_full_name(self.font)) self.set_defaults() self.restore_x()", "= get_glyphs(self.font) if len(glyph_names) > 0: for glyph_name in glyph_names:", "= SquareButton( (x, y, self.button_width, self.text_height), \"reset x\", callback=self.restore_x_callback, sizeStyle=self.size_style)", "CurrentFont() if self.font is not None: self.w.box.text.set(get_full_name(self.font)) self.set_defaults() self.restore_x() self.restore_y()", "current font name self.w.box = Box( (x, y, self.column_1 +", "x\", callback=self.restore_x_callback, sizeStyle=self.size_style) # y slider x = self.padding_x y", "x += self.column_1 self.w.y_slider = Slider( (x, y, self.column_2, self.text_height),", "= TextBox( (5, 0, self.column_1 + self.column_2, self.text_height), self.font_name, sizeStyle=self.size_style)", "-self._xMax self._yMin = -self._yMax def update_font_callback(self, sender): self.update_font() def slide_callback(self,", "0, self.column_1 + self.column_2, self.text_height), self.font_name, sizeStyle=self.size_style) x += (self.column_2", "self.w.x_slider.set(self._moveX) def restore_y(self): self._moveY = 0 self.w.y_slider.set(self._moveY) def restore_x_callback(self, sender):", "= 500 _yMin = -500 font = None font_name =", "minValue=self._yMin, callback=self.slide_callback, sizeStyle=self.size_style) x += (self.column_2 + self.padding_x) self.w.button_restore_y =", "self.w.y_slider.set(self._moveY) def restore_x_callback(self, sender): self.restore_x() def restore_y_callback(self, sender): self.restore_y() def", "self.padding_x) self.w.button_restore_y = SquareButton( (x, y, self.button_width, self.text_height), \"reset y\",", "+ self.padding_x) self.w.button_update_font = SquareButton( (x, y, self.button_width, self.text_height), \"update\",", "= -1000 _yMax = 500 _yMin = -500 font =", "self.w.box.text.set(get_full_name(self.font)) self.set_defaults() self.restore_x() self.restore_y() else: print no_font_open def set_defaults(self): self._xMax", "self._moveY - yValue self._moveX = xValue self._moveY = yValue glyph_names", ".. image:: imgs/glyphs/slide.png ''' _moveX = 0 _moveY = 0", "self.w.y_slider = Slider( (x, y, self.column_2, self.text_height), value=0, maxValue=self._yMax, minValue=self._yMin,", "restore_y_callback(self, sender): self.restore_y() def update_font(self): self.font = CurrentFont() if self.font", "= Slider( (x, y, self.column_2, self.text_height), value=0, maxValue=self._yMax, minValue=self._yMin, callback=self.slide_callback,", "= -500 font = None font_name = '(no font selected)'", "self.restore_x() def restore_y_callback(self, sender): self.restore_y() def update_font(self): self.font = CurrentFont()", "self.height), self.title) x = self.padding_x y = self.padding_y # current", "x = self.padding_x y += (self.text_height + self.padding_y) self.w.y_label =", "open self.w.open() self.update_font() # callbacks def restore_x(self): self._moveX = 0", "(self.text_height + self.padding_y) self.w.y_label = TextBox( (x, y + 5,", "-500 font = None font_name = '(no font selected)' def", "(self.column_2 + self.padding_x) self.w.button_restore_x = SquareButton( (x, y, self.button_width, self.text_height),", "self.button_width + self.padding_x*3 self.height = self.text_height*3 + self.padding_y*4 self.w =", "self.column_2, self.text_height), value=0, maxValue=self._yMax, minValue=self._yMin, callback=self.slide_callback, sizeStyle=self.size_style) x += (self.column_2", "update_font_callback(self, sender): self.update_font() def slide_callback(self, sender): xValue = self.w.x_slider.get() yValue", "if len(glyph_names) > 0: for glyph_name in glyph_names: # RF", "= self.w.y_slider.get() x = self._moveX - xValue y = self._moveY", "(x, y, self.column_1 + self.column_2, self.text_height)) self.w.box.text = TextBox( (5,", "restore_x(self): self._moveX = 0 self.w.x_slider.set(self._moveX) def restore_y(self): self._moveY = 0", "-self._yMax def update_font_callback(self, sender): self.update_font() def slide_callback(self, sender): xValue =", "= self.text_height*3 + self.padding_y*4 self.w = HUDFloatingWindow((self.width, self.height), self.title) x", "slide_callback(self, sender): xValue = self.w.x_slider.get() yValue = self.w.y_slider.get() x =", "RF 2.0 if version[0] == '2': self.font[glyph_name].moveBy((-x, -y)) # RF", "import get_full_name, get_glyphs from hTools2.modules.messages import no_font_open, no_glyph_selected class slideGlyphsDialog(hDialog):", "self.column_1 + self.column_2, self.text_height), self.font_name, sizeStyle=self.size_style) x += (self.column_2 +", "== '2': self.font[glyph_name].moveBy((-x, -y)) # RF 1.8.X else: self.font[glyph_name].move((-x, -y))", "y += (self.text_height + self.padding_y) self.w.y_label = TextBox( (x, y", "value=0, maxValue=self._yMax, minValue=self._yMin, callback=self.slide_callback, sizeStyle=self.size_style) x += (self.column_2 + self.padding_x)", "class slideGlyphsDialog(hDialog): '''A dialog to slide the selected glyphs vertically", "= -self._xMax self._yMin = -self._yMax def update_font_callback(self, sender): self.update_font() def", "glyphs vertically and/or horizontally. .. image:: imgs/glyphs/slide.png ''' _moveX =", "self.button_width, self.text_height), \"update\", callback=self.update_font_callback, sizeStyle=self.size_style) # x slider x =", "sizeStyle=self.size_style) # open self.w.open() self.update_font() # callbacks def restore_x(self): self._moveX", "+ self.column_2, self.text_height), self.font_name, sizeStyle=self.size_style) x += (self.column_2 + self.column_1", "image:: imgs/glyphs/slide.png ''' _moveX = 0 _moveY = 0 _xMax", "self.button_width, self.text_height), \"reset x\", callback=self.restore_x_callback, sizeStyle=self.size_style) # y slider x", "no_glyph_selected class slideGlyphsDialog(hDialog): '''A dialog to slide the selected glyphs", "slider x = self.padding_x y += (self.text_height + self.padding_y) self.w.y_label", "+ self.column_2 + self.button_width + self.padding_x*3 self.height = self.text_height*3 +", "(x, y, self.button_width, self.text_height), \"reset y\", callback=self.restore_y_callback, sizeStyle=self.size_style) # open", "_moveY = 0 _xMax = 1000 _xMin = -1000 _yMax", "= self.font.info.unitsPerEm self._yMax = self.font.info.unitsPerEm / 2 self._xMin = -self._xMax", "self.update_font() # callbacks def restore_x(self): self._moveX = 0 self.w.x_slider.set(self._moveX) def", "''' _moveX = 0 _moveY = 0 _xMax = 1000", "glyph_name in glyph_names: # RF 2.0 if version[0] == '2':", "20 self.column_2 = 240 self.width = self.column_1 + self.column_2 +", "- xValue y = self._moveY - yValue self._moveX = xValue", "TextBox( (5, 0, self.column_1 + self.column_2, self.text_height), self.font_name, sizeStyle=self.size_style) x", "self.column_2, self.text_height), self.font_name, sizeStyle=self.size_style) x += (self.column_2 + self.column_1 +", "callbacks def restore_x(self): self._moveX = 0 self.w.x_slider.set(self._moveX) def restore_y(self): self._moveY", "self.w.box.text = TextBox( (5, 0, self.column_1 + self.column_2, self.text_height), self.font_name,", "self.text_height), self.font_name, sizeStyle=self.size_style) x += (self.column_2 + self.column_1 + self.padding_x)", "restore_y(self): self._moveY = 0 self.w.y_slider.set(self._moveY) def restore_x_callback(self, sender): self.restore_x() def", "2 self._xMin = -self._xMax self._yMin = -self._yMax def update_font_callback(self, sender):", "_xMin = -1000 _yMax = 500 _yMin = -500 font", "= '(no font selected)' def __init__(self): # window self.title =", "self.font.info.unitsPerEm self._yMax = self.font.info.unitsPerEm / 2 self._xMin = -self._xMax self._yMin", "+ self.padding_y self.w.x_label = TextBox( (x, y + 5, self.column_1,", "= 0 self.w.y_slider.set(self._moveY) def restore_x_callback(self, sender): self.restore_x() def restore_y_callback(self, sender):", "self.text_height), \"reset x\", callback=self.restore_x_callback, sizeStyle=self.size_style) # y slider x =", "yValue glyph_names = get_glyphs(self.font) if len(glyph_names) > 0: for glyph_name", "in glyph_names: # RF 2.0 if version[0] == '2': self.font[glyph_name].moveBy((-x,", "font = None font_name = '(no font selected)' def __init__(self):", "if self.font is not None: self.w.box.text.set(get_full_name(self.font)) self.set_defaults() self.restore_x() self.restore_y() else:", "self.text_height), value=0, maxValue=self._xMax, minValue=self._xMin, callback=self.slide_callback, sizeStyle=self.size_style) x += (self.column_2 +", "= self.font.info.unitsPerEm / 2 self._xMin = -self._xMax self._yMin = -self._yMax", "y, self.button_width, self.text_height), \"reset y\", callback=self.restore_y_callback, sizeStyle=self.size_style) # open self.w.open()", "0: for glyph_name in glyph_names: # RF 2.0 if version[0]", "= self.padding_y # current font name self.w.box = Box( (x,", "no_font_open, no_glyph_selected class slideGlyphsDialog(hDialog): '''A dialog to slide the selected", "slide the selected glyphs vertically and/or horizontally. .. image:: imgs/glyphs/slide.png", "vanilla import * from hTools2 import hDialog from hTools2.modules.fontutils import", "self.padding_x*3 self.height = self.text_height*3 + self.padding_y*4 self.w = HUDFloatingWindow((self.width, self.height),", "_moveX = 0 _moveY = 0 _xMax = 1000 _xMin", "self.button_width, self.text_height), \"reset y\", callback=self.restore_y_callback, sizeStyle=self.size_style) # open self.w.open() self.update_font()", "self.restore_y() def update_font(self): self.font = CurrentFont() if self.font is not", "* from hTools2 import hDialog from hTools2.modules.fontutils import get_full_name, get_glyphs", "+ 5, self.column_1, self.text_height), \"x\", sizeStyle=self.size_style) x += self.column_1 self.w.x_slider", "import * from hTools2 import hDialog from hTools2.modules.fontutils import get_full_name,", "self.w = HUDFloatingWindow((self.width, self.height), self.title) x = self.padding_x y =", "y\", callback=self.restore_y_callback, sizeStyle=self.size_style) # open self.w.open() self.update_font() # callbacks def", "self.column_1 + self.column_2, self.text_height)) self.w.box.text = TextBox( (5, 0, self.column_1", "sizeStyle=self.size_style) x += (self.column_2 + self.padding_x) self.w.button_restore_x = SquareButton( (x,", "(x, y + 5, self.column_1, self.text_height), \"y\", sizeStyle=self.size_style) x +=", "Slider( (x, y, self.column_2, self.text_height), value=0, maxValue=self._xMax, minValue=self._xMin, callback=self.slide_callback, sizeStyle=self.size_style)", "_yMax = 500 _yMin = -500 font = None font_name", "# callbacks def restore_x(self): self._moveX = 0 self.w.x_slider.set(self._moveX) def restore_y(self):", "def set_defaults(self): self._xMax = self.font.info.unitsPerEm self._yMax = self.font.info.unitsPerEm / 2", "self._xMax = self.font.info.unitsPerEm self._yMax = self.font.info.unitsPerEm / 2 self._xMin =", "self.w.button_update_font = SquareButton( (x, y, self.button_width, self.text_height), \"update\", callback=self.update_font_callback, sizeStyle=self.size_style)", "sizeStyle=self.size_style) x += self.column_1 self.w.x_slider = Slider( (x, y, self.column_2,", "self._moveX = 0 self.w.x_slider.set(self._moveX) def restore_y(self): self._moveY = 0 self.w.y_slider.set(self._moveY)", "self._moveY = yValue glyph_names = get_glyphs(self.font) if len(glyph_names) > 0:", "self.padding_x y += (self.text_height + self.padding_y) self.w.y_label = TextBox( (x,", "None: self.w.box.text.set(get_full_name(self.font)) self.set_defaults() self.restore_x() self.restore_y() else: print no_font_open def set_defaults(self):", "+= (self.column_2 + self.column_1 + self.padding_x) self.w.button_update_font = SquareButton( (x,", "from hTools2.modules.messages import no_font_open, no_glyph_selected class slideGlyphsDialog(hDialog): '''A dialog to", "SquareButton( (x, y, self.button_width, self.text_height), \"reset y\", callback=self.restore_y_callback, sizeStyle=self.size_style) #", "self.font is not None: self.w.box.text.set(get_full_name(self.font)) self.set_defaults() self.restore_x() self.restore_y() else: print", "version[0] == '2': self.font[glyph_name].moveBy((-x, -y)) # RF 1.8.X else: self.font[glyph_name].move((-x,", "+= (self.text_height + self.padding_y) self.w.y_label = TextBox( (x, y +", "self.column_2, self.text_height)) self.w.box.text = TextBox( (5, 0, self.column_1 + self.column_2,", "= SquareButton( (x, y, self.button_width, self.text_height), \"reset y\", callback=self.restore_y_callback, sizeStyle=self.size_style)", "self._xMin = -self._xMax self._yMin = -self._yMax def update_font_callback(self, sender): self.update_font()", "value=0, maxValue=self._xMax, minValue=self._xMin, callback=self.slide_callback, sizeStyle=self.size_style) x += (self.column_2 + self.padding_x)", "\"reset y\", callback=self.restore_y_callback, sizeStyle=self.size_style) # open self.w.open() self.update_font() # callbacks", "self.padding_y # current font name self.w.box = Box( (x, y,", "self.column_1, self.text_height), \"y\", sizeStyle=self.size_style) x += self.column_1 self.w.y_slider = Slider(", "x += (self.column_2 + self.padding_x) self.w.button_restore_x = SquareButton( (x, y,", "= self.padding_x y += (self.text_height + self.padding_y) self.w.y_label = TextBox(", "x += (self.column_2 + self.column_1 + self.padding_x) self.w.button_update_font = SquareButton(", "= 240 self.width = self.column_1 + self.column_2 + self.button_width +", "self.column_2 + self.button_width + self.padding_x*3 self.height = self.text_height*3 + self.padding_y*4", "xValue self._moveY = yValue glyph_names = get_glyphs(self.font) if len(glyph_names) >", "def restore_y_callback(self, sender): self.restore_y() def update_font(self): self.font = CurrentFont() if", "from mojo.roboFont import CurrentFont, CurrentGlyph, version from vanilla import *", "import CurrentFont, CurrentGlyph, version from vanilla import * from hTools2", "def restore_x(self): self._moveX = 0 self.w.x_slider.set(self._moveX) def restore_y(self): self._moveY =", "+= self.column_1 self.w.x_slider = Slider( (x, y, self.column_2, self.text_height), value=0,", "self.text_height), value=0, maxValue=self._yMax, minValue=self._yMin, callback=self.slide_callback, sizeStyle=self.size_style) x += (self.column_2 +", "TextBox( (x, y + 5, self.column_1, self.text_height), \"x\", sizeStyle=self.size_style) x", "from vanilla import * from hTools2 import hDialog from hTools2.modules.fontutils", "x += (self.column_2 + self.padding_x) self.w.button_restore_y = SquareButton( (x, y,", "self.column_1, self.text_height), \"x\", sizeStyle=self.size_style) x += self.column_1 self.w.x_slider = Slider(", "slider x = self.padding_x y += self.text_height + self.padding_y self.w.x_label", "self.font = CurrentFont() if self.font is not None: self.w.box.text.set(get_full_name(self.font)) self.set_defaults()", "+ 5, self.column_1, self.text_height), \"y\", sizeStyle=self.size_style) x += self.column_1 self.w.y_slider", "mojo.roboFont import CurrentFont, CurrentGlyph, version from vanilla import * from", "self.width = self.column_1 + self.column_2 + self.button_width + self.padding_x*3 self.height", "else: print no_font_open def set_defaults(self): self._xMax = self.font.info.unitsPerEm self._yMax =", "sizeStyle=self.size_style) x += (self.column_2 + self.column_1 + self.padding_x) self.w.button_update_font =", "2.0 if version[0] == '2': self.font[glyph_name].moveBy((-x, -y)) # RF 1.8.X", "y + 5, self.column_1, self.text_height), \"y\", sizeStyle=self.size_style) x += self.column_1", "(x, y, self.column_2, self.text_height), value=0, maxValue=self._yMax, minValue=self._yMin, callback=self.slide_callback, sizeStyle=self.size_style) x", "update_font(self): self.font = CurrentFont() if self.font is not None: self.w.box.text.set(get_full_name(self.font))", "imgs/glyphs/slide.png ''' _moveX = 0 _moveY = 0 _xMax =", "window self.title = \"slide\" self.button_width = 70 self.column_1 = 20", "x += self.column_1 self.w.x_slider = Slider( (x, y, self.column_2, self.text_height),", "y, self.column_2, self.text_height), value=0, maxValue=self._xMax, minValue=self._xMin, callback=self.slide_callback, sizeStyle=self.size_style) x +=", "self.w.box = Box( (x, y, self.column_1 + self.column_2, self.text_height)) self.w.box.text", "self.set_defaults() self.restore_x() self.restore_y() else: print no_font_open def set_defaults(self): self._xMax =", "sizeStyle=self.size_style) # x slider x = self.padding_x y += self.text_height", "= self.padding_x y += self.text_height + self.padding_y self.w.x_label = TextBox(", "not None: self.w.box.text.set(get_full_name(self.font)) self.set_defaults() self.restore_x() self.restore_y() else: print no_font_open def", "= TextBox( (x, y + 5, self.column_1, self.text_height), \"x\", sizeStyle=self.size_style)", "y, self.button_width, self.text_height), \"update\", callback=self.update_font_callback, sizeStyle=self.size_style) # x slider x", "self.text_height), \"update\", callback=self.update_font_callback, sizeStyle=self.size_style) # x slider x = self.padding_x", "from hTools2 import hDialog from hTools2.modules.fontutils import get_full_name, get_glyphs from", "= Slider( (x, y, self.column_2, self.text_height), value=0, maxValue=self._xMax, minValue=self._xMin, callback=self.slide_callback,", "self.text_height), \"reset y\", callback=self.restore_y_callback, sizeStyle=self.size_style) # open self.w.open() self.update_font() #" ]
[ "(str(p[0]), str(p[1])) for p in _params]).encode(\"utf-8\") sign = sha1(sign).hexdigest() sign_type", "return value if isinstance(value, six.text_type): return value.encode(encoding) return six.binary_type(value) def", "make_error_page(url): with io.open( os.path.join(os.path.dirname(__file__), 'contrib/error.html'), 'r', encoding='utf-8' ) as error_page:", "= int(length) assert 3 <= length <= 32 letters =", "= '&'.join([\"%s=%s\" % (str(p[0]), str(p[1])) for p in _params]).encode(\"utf-8\") sign", "= kwargs.items() _params = [ (k.lower(), v) for k, v", "utf-8 -*- from __future__ import absolute_import, unicode_literals import io import", "assert pay_sign_key, \"PAY SIGN KEY IS EMPTY\" if add_appid: kwargs.update({'appid':", "type(re.compile(\"regex_test\")) def get_signature(token, timestamp, nonce, *args): sign = [token, timestamp,", "and timestamp and nonce and signature): return False sign =", "hasattr(self, prop_name): setattr(self, prop_name, method(self, *args, **kwargs)) return getattr(self, prop_name)", "the ASCII int value of a character in a string.", "sign.sort() sign = to_binary(''.join(sign)) return sha1(sign).hexdigest() def check_signature(token, timestamp, nonce,", "encoding='utf-8' ) as error_page: return error_page.read().replace('{url}', url) def is_regex(value): return", "sign = sha1(sign).hexdigest() sign_type = 'SHA1' return dict(params), sign, sign_type", "_params.sort() sign = '&'.join([\"%s=%s\" % (str(p[0]), str(p[1])) for p in", "os import random import re import string import time from", "the position of desired character :return: ASCII int value \"\"\"", "string_types) def byte2int(s, index=0): \"\"\"Get the ASCII int value of", "**kwargs): if not hasattr(self, prop_name): setattr(self, prop_name, method(self, *args, **kwargs))", "ord(s[index]) return s[index] def generate_token(length=''): if not length: length =", "import json import os import random import re import string", "def json_dumps(d): return json.dumps(d) def pay_sign_dict( appid, pay_sign_key, add_noncestr=True, add_timestamp=True,", "character in a string. :param s: a string :param index:", "kwargs.update({'appid': appid}) if add_noncestr: kwargs.update({'noncestr': generate_token()}) if add_timestamp: kwargs.update({'timestamp': int(time.time())})", "= 'SHA1' return dict(params), sign, sign_type def make_error_page(url): with io.open(", "32 letters = string.ascii_letters + string.digits return ''.join(choice(letters) for _", "to_text(s) return json.loads(s) def json_dumps(d): return json.dumps(d) def pay_sign_dict( appid,", "kwargs.items() if k.lower() != \"appid\" ] _params += [('appid', appid),", "json_loads(s): s = to_text(s) return json.loads(s) def json_dumps(d): return json.dumps(d)", "io.open( os.path.join(os.path.dirname(__file__), 'contrib/error.html'), 'r', encoding='utf-8' ) as error_page: return error_page.read().replace('{url}',", "if isinstance(value, six.binary_type): return value if isinstance(value, six.text_type): return value.encode(encoding)", "encoding=\"utf-8\"): if isinstance(value, six.text_type): return value if isinstance(value, six.binary_type): return", "signature): return False sign = get_signature(token, timestamp, nonce) return sign", "wraps from hashlib import sha1 import six try: from secrets", "hashlib import sha1 import six try: from secrets import choice", "@wraps(method) def wrapped_func(self, *args, **kwargs): if not hasattr(self, prop_name): setattr(self,", "functools import wraps from hashlib import sha1 import six try:", "signature): if not (token and timestamp and nonce and signature):", "return isinstance(value, string_types) def byte2int(s, index=0): \"\"\"Get the ASCII int", "if not (token and timestamp and nonce and signature): return", "+ string.digits return ''.join(choice(letters) for _ in range(length)) def json_loads(s):", "k.lower() != \"appid\" ] _params += [('appid', appid), ('appkey', pay_sign_key)]", "timestamp, nonce] + list(args) sign.sort() sign = to_binary(''.join(sign)) return sha1(sign).hexdigest()", "random import choice string_types = (six.string_types, six.text_type, six.binary_type) re_type =", "in kwargs.items() if k.lower() != \"appid\" ] _params += [('appid',", "re import string import time from functools import wraps from", "string_types = (six.string_types, six.text_type, six.binary_type) re_type = type(re.compile(\"regex_test\")) def get_signature(token,", "return ord(s[index]) return s[index] def generate_token(length=''): if not length: length", "return six.text_type(value) def to_binary(value, encoding=\"utf-8\"): if isinstance(value, six.binary_type): return value", "def cached_property(method): prop_name = '_{}'.format(method.__name__) @wraps(method) def wrapped_func(self, *args, **kwargs):", "('appkey', pay_sign_key)] _params.sort() sign = '&'.join([\"%s=%s\" % (str(p[0]), str(p[1])) for", "sha1(sign).hexdigest() def check_signature(token, timestamp, nonce, signature): if not (token and", "**kwargs ): \"\"\" 支付参数签名 \"\"\" assert pay_sign_key, \"PAY SIGN KEY", "params = kwargs.items() _params = [ (k.lower(), v) for k,", "return sign == signature def check_token(token): return re.match('^[A-Za-z0-9]{3,32}$', token) def", "ASCII int value of a character in a string. :param", "<= 32 letters = string.ascii_letters + string.digits return ''.join(choice(letters) for", "str(p[1])) for p in _params]).encode(\"utf-8\") sign = sha1(sign).hexdigest() sign_type =", "return False sign = get_signature(token, timestamp, nonce) return sign ==", "from functools import wraps from hashlib import sha1 import six", "sign = '&'.join([\"%s=%s\" % (str(p[0]), str(p[1])) for p in _params]).encode(\"utf-8\")", "# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals", "of desired character :return: ASCII int value \"\"\" if six.PY2:", "import string import time from functools import wraps from hashlib", "= '_{}'.format(method.__name__) @wraps(method) def wrapped_func(self, *args, **kwargs): if not hasattr(self,", "return s[index] def generate_token(length=''): if not length: length = random.randint(3,", "'&'.join([\"%s=%s\" % (str(p[0]), str(p[1])) for p in _params]).encode(\"utf-8\") sign =", "*args, **kwargs): if not hasattr(self, prop_name): setattr(self, prop_name, method(self, *args,", "from __future__ import absolute_import, unicode_literals import io import json import", "isinstance(value, six.binary_type): return value if isinstance(value, six.text_type): return value.encode(encoding) return", "assert 3 <= length <= 32 letters = string.ascii_letters +", "json.dumps(d) def pay_sign_dict( appid, pay_sign_key, add_noncestr=True, add_timestamp=True, add_appid=True, **kwargs ):", "six.text_type, six.binary_type) re_type = type(re.compile(\"regex_test\")) def get_signature(token, timestamp, nonce, *args):", "six.text_type): return value if isinstance(value, six.binary_type): return value.decode(encoding) return six.text_type(value)", "string.ascii_letters + string.digits return ''.join(choice(letters) for _ in range(length)) def", "= to_text(s) return json.loads(s) def json_dumps(d): return json.dumps(d) def pay_sign_dict(", "支付参数签名 \"\"\" assert pay_sign_key, \"PAY SIGN KEY IS EMPTY\" if", "coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import io", "nonce and signature): return False sign = get_signature(token, timestamp, nonce)", "appid), ('appkey', pay_sign_key)] _params.sort() sign = '&'.join([\"%s=%s\" % (str(p[0]), str(p[1]))", "-*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import", "nonce] + list(args) sign.sort() sign = to_binary(''.join(sign)) return sha1(sign).hexdigest() def", "length = random.randint(3, 32) length = int(length) assert 3 <=", "_params]).encode(\"utf-8\") sign = sha1(sign).hexdigest() sign_type = 'SHA1' return dict(params), sign,", "secrets import choice except ImportError: from random import choice string_types", "== signature def check_token(token): return re.match('^[A-Za-z0-9]{3,32}$', token) def cached_property(method): prop_name", "cached_property(method): prop_name = '_{}'.format(method.__name__) @wraps(method) def wrapped_func(self, *args, **kwargs): if", "= random.randint(3, 32) length = int(length) assert 3 <= length", "*args, **kwargs)) return getattr(self, prop_name) return property(wrapped_func) def to_text(value, encoding=\"utf-8\"):", "check_signature(token, timestamp, nonce, signature): if not (token and timestamp and", "SIGN KEY IS EMPTY\" if add_appid: kwargs.update({'appid': appid}) if add_noncestr:", "import os import random import re import string import time", "json.loads(s) def json_dumps(d): return json.dumps(d) def pay_sign_dict( appid, pay_sign_key, add_noncestr=True,", "a character in a string. :param s: a string :param", "character :return: ASCII int value \"\"\" if six.PY2: return ord(s[index])", "__future__ import absolute_import, unicode_literals import io import json import os", "generate_token()}) if add_timestamp: kwargs.update({'timestamp': int(time.time())}) params = kwargs.items() _params =", "value if isinstance(value, six.text_type): return value.encode(encoding) return six.binary_type(value) def is_string(value):", "return property(wrapped_func) def to_text(value, encoding=\"utf-8\"): if isinstance(value, six.text_type): return value", "six try: from secrets import choice except ImportError: from random", "json import os import random import re import string import", "v) for k, v in kwargs.items() if k.lower() != \"appid\"", "absolute_import, unicode_literals import io import json import os import random", "and signature): return False sign = get_signature(token, timestamp, nonce) return", "not hasattr(self, prop_name): setattr(self, prop_name, method(self, *args, **kwargs)) return getattr(self,", "): \"\"\" 支付参数签名 \"\"\" assert pay_sign_key, \"PAY SIGN KEY IS", "choice string_types = (six.string_types, six.text_type, six.binary_type) re_type = type(re.compile(\"regex_test\")) def", ") as error_page: return error_page.read().replace('{url}', url) def is_regex(value): return isinstance(value,", "int value of a character in a string. :param s:", "= type(re.compile(\"regex_test\")) def get_signature(token, timestamp, nonce, *args): sign = [token,", "import six try: from secrets import choice except ImportError: from", "if isinstance(value, six.text_type): return value if isinstance(value, six.binary_type): return value.decode(encoding)", "six.text_type): return value.encode(encoding) return six.binary_type(value) def is_string(value): return isinstance(value, string_types)", "time from functools import wraps from hashlib import sha1 import", "appid}) if add_noncestr: kwargs.update({'noncestr': generate_token()}) if add_timestamp: kwargs.update({'timestamp': int(time.time())}) params", "return json.loads(s) def json_dumps(d): return json.dumps(d) def pay_sign_dict( appid, pay_sign_key,", "return value if isinstance(value, six.binary_type): return value.decode(encoding) return six.text_type(value) def", "choice except ImportError: from random import choice string_types = (six.string_types,", "prop_name, method(self, *args, **kwargs)) return getattr(self, prop_name) return property(wrapped_func) def", "unicode_literals import io import json import os import random import", "six.text_type(value) def to_binary(value, encoding=\"utf-8\"): if isinstance(value, six.binary_type): return value if", "def byte2int(s, index=0): \"\"\"Get the ASCII int value of a", "s: a string :param index: the position of desired character", "random.randint(3, 32) length = int(length) assert 3 <= length <=", "import time from functools import wraps from hashlib import sha1", "\"PAY SIGN KEY IS EMPTY\" if add_appid: kwargs.update({'appid': appid}) if", "length: length = random.randint(3, 32) length = int(length) assert 3", "six.binary_type(value) def is_string(value): return isinstance(value, string_types) def byte2int(s, index=0): \"\"\"Get", "string :param index: the position of desired character :return: ASCII", "*args): sign = [token, timestamp, nonce] + list(args) sign.sort() sign", "EMPTY\" if add_appid: kwargs.update({'appid': appid}) if add_noncestr: kwargs.update({'noncestr': generate_token()}) if", "pay_sign_dict( appid, pay_sign_key, add_noncestr=True, add_timestamp=True, add_appid=True, **kwargs ): \"\"\" 支付参数签名", "sign_type = 'SHA1' return dict(params), sign, sign_type def make_error_page(url): with", "and nonce and signature): return False sign = get_signature(token, timestamp,", "get_signature(token, timestamp, nonce, *args): sign = [token, timestamp, nonce] +", "''.join(choice(letters) for _ in range(length)) def json_loads(s): s = to_text(s)", "sign = to_binary(''.join(sign)) return sha1(sign).hexdigest() def check_signature(token, timestamp, nonce, signature):", "six.binary_type) re_type = type(re.compile(\"regex_test\")) def get_signature(token, timestamp, nonce, *args): sign", "kwargs.update({'timestamp': int(time.time())}) params = kwargs.items() _params = [ (k.lower(), v)", "32) length = int(length) assert 3 <= length <= 32", "= (six.string_types, six.text_type, six.binary_type) re_type = type(re.compile(\"regex_test\")) def get_signature(token, timestamp,", "'_{}'.format(method.__name__) @wraps(method) def wrapped_func(self, *args, **kwargs): if not hasattr(self, prop_name):", "in _params]).encode(\"utf-8\") sign = sha1(sign).hexdigest() sign_type = 'SHA1' return dict(params),", "list(args) sign.sort() sign = to_binary(''.join(sign)) return sha1(sign).hexdigest() def check_signature(token, timestamp,", "dict(params), sign, sign_type def make_error_page(url): with io.open( os.path.join(os.path.dirname(__file__), 'contrib/error.html'), 'r',", "def get_signature(token, timestamp, nonce, *args): sign = [token, timestamp, nonce]", "if not hasattr(self, prop_name): setattr(self, prop_name, method(self, *args, **kwargs)) return", "prop_name) return property(wrapped_func) def to_text(value, encoding=\"utf-8\"): if isinstance(value, six.text_type): return", "value.encode(encoding) return six.binary_type(value) def is_string(value): return isinstance(value, string_types) def byte2int(s,", "prop_name = '_{}'.format(method.__name__) @wraps(method) def wrapped_func(self, *args, **kwargs): if not", "KEY IS EMPTY\" if add_appid: kwargs.update({'appid': appid}) if add_noncestr: kwargs.update({'noncestr':", "six.binary_type): return value.decode(encoding) return six.text_type(value) def to_binary(value, encoding=\"utf-8\"): if isinstance(value,", "add_timestamp: kwargs.update({'timestamp': int(time.time())}) params = kwargs.items() _params = [ (k.lower(),", "ImportError: from random import choice string_types = (six.string_types, six.text_type, six.binary_type)", "if add_noncestr: kwargs.update({'noncestr': generate_token()}) if add_timestamp: kwargs.update({'timestamp': int(time.time())}) params =", "= [token, timestamp, nonce] + list(args) sign.sort() sign = to_binary(''.join(sign))", "appid, pay_sign_key, add_noncestr=True, add_timestamp=True, add_appid=True, **kwargs ): \"\"\" 支付参数签名 \"\"\"", "method(self, *args, **kwargs)) return getattr(self, prop_name) return property(wrapped_func) def to_text(value,", "json_dumps(d): return json.dumps(d) def pay_sign_dict( appid, pay_sign_key, add_noncestr=True, add_timestamp=True, add_appid=True,", "signature def check_token(token): return re.match('^[A-Za-z0-9]{3,32}$', token) def cached_property(method): prop_name =", "to_binary(value, encoding=\"utf-8\"): if isinstance(value, six.binary_type): return value if isinstance(value, six.text_type):", "def to_text(value, encoding=\"utf-8\"): if isinstance(value, six.text_type): return value if isinstance(value,", "(six.string_types, six.text_type, six.binary_type) re_type = type(re.compile(\"regex_test\")) def get_signature(token, timestamp, nonce,", "from random import choice string_types = (six.string_types, six.text_type, six.binary_type) re_type", "return dict(params), sign, sign_type def make_error_page(url): with io.open( os.path.join(os.path.dirname(__file__), 'contrib/error.html'),", "p in _params]).encode(\"utf-8\") sign = sha1(sign).hexdigest() sign_type = 'SHA1' return", "index: the position of desired character :return: ASCII int value", "sign, sign_type def make_error_page(url): with io.open( os.path.join(os.path.dirname(__file__), 'contrib/error.html'), 'r', encoding='utf-8'", "with io.open( os.path.join(os.path.dirname(__file__), 'contrib/error.html'), 'r', encoding='utf-8' ) as error_page: return", "if add_appid: kwargs.update({'appid': appid}) if add_noncestr: kwargs.update({'noncestr': generate_token()}) if add_timestamp:", "is_string(value): return isinstance(value, string_types) def byte2int(s, index=0): \"\"\"Get the ASCII", ":param s: a string :param index: the position of desired", "IS EMPTY\" if add_appid: kwargs.update({'appid': appid}) if add_noncestr: kwargs.update({'noncestr': generate_token()})", "ASCII int value \"\"\" if six.PY2: return ord(s[index]) return s[index]", "sign == signature def check_token(token): return re.match('^[A-Za-z0-9]{3,32}$', token) def cached_property(method):", "s[index] def generate_token(length=''): if not length: length = random.randint(3, 32)", "for _ in range(length)) def json_loads(s): s = to_text(s) return", "position of desired character :return: ASCII int value \"\"\" if", "int(time.time())}) params = kwargs.items() _params = [ (k.lower(), v) for", "string. :param s: a string :param index: the position of", "import sha1 import six try: from secrets import choice except", "if add_timestamp: kwargs.update({'timestamp': int(time.time())}) params = kwargs.items() _params = [", "import re import string import time from functools import wraps", "import absolute_import, unicode_literals import io import json import os import", "(token and timestamp and nonce and signature): return False sign", "os.path.join(os.path.dirname(__file__), 'contrib/error.html'), 'r', encoding='utf-8' ) as error_page: return error_page.read().replace('{url}', url)", "= to_binary(''.join(sign)) return sha1(sign).hexdigest() def check_signature(token, timestamp, nonce, signature): if", "return getattr(self, prop_name) return property(wrapped_func) def to_text(value, encoding=\"utf-8\"): if isinstance(value,", "isinstance(value, six.binary_type): return value.decode(encoding) return six.text_type(value) def to_binary(value, encoding=\"utf-8\"): if", "return json.dumps(d) def pay_sign_dict( appid, pay_sign_key, add_noncestr=True, add_timestamp=True, add_appid=True, **kwargs", "check_token(token): return re.match('^[A-Za-z0-9]{3,32}$', token) def cached_property(method): prop_name = '_{}'.format(method.__name__) @wraps(method)", "[token, timestamp, nonce] + list(args) sign.sort() sign = to_binary(''.join(sign)) return", "re_type = type(re.compile(\"regex_test\")) def get_signature(token, timestamp, nonce, *args): sign =", "range(length)) def json_loads(s): s = to_text(s) return json.loads(s) def json_dumps(d):", "\"appid\" ] _params += [('appid', appid), ('appkey', pay_sign_key)] _params.sort() sign", "import random import re import string import time from functools", "from hashlib import sha1 import six try: from secrets import", "return value.encode(encoding) return six.binary_type(value) def is_string(value): return isinstance(value, string_types) def", "def to_binary(value, encoding=\"utf-8\"): if isinstance(value, six.binary_type): return value if isinstance(value,", "= sha1(sign).hexdigest() sign_type = 'SHA1' return dict(params), sign, sign_type def", "a string. :param s: a string :param index: the position", "pay_sign_key, add_noncestr=True, add_timestamp=True, add_appid=True, **kwargs ): \"\"\" 支付参数签名 \"\"\" assert", "kwargs.items() _params = [ (k.lower(), v) for k, v in", "return re.match('^[A-Za-z0-9]{3,32}$', token) def cached_property(method): prop_name = '_{}'.format(method.__name__) @wraps(method) def", "property(wrapped_func) def to_text(value, encoding=\"utf-8\"): if isinstance(value, six.text_type): return value if", "as error_page: return error_page.read().replace('{url}', url) def is_regex(value): return isinstance(value, re_type)", "timestamp, nonce, *args): sign = [token, timestamp, nonce] + list(args)", "add_noncestr: kwargs.update({'noncestr': generate_token()}) if add_timestamp: kwargs.update({'timestamp': int(time.time())}) params = kwargs.items()", "desired character :return: ASCII int value \"\"\" if six.PY2: return", "sign = get_signature(token, timestamp, nonce) return sign == signature def", "import choice string_types = (six.string_types, six.text_type, six.binary_type) re_type = type(re.compile(\"regex_test\"))", "length = int(length) assert 3 <= length <= 32 letters", "isinstance(value, string_types) def byte2int(s, index=0): \"\"\"Get the ASCII int value", "'SHA1' return dict(params), sign, sign_type def make_error_page(url): with io.open( os.path.join(os.path.dirname(__file__),", "= string.ascii_letters + string.digits return ''.join(choice(letters) for _ in range(length))", "'contrib/error.html'), 'r', encoding='utf-8' ) as error_page: return error_page.read().replace('{url}', url) def", "% (str(p[0]), str(p[1])) for p in _params]).encode(\"utf-8\") sign = sha1(sign).hexdigest()", "[('appid', appid), ('appkey', pay_sign_key)] _params.sort() sign = '&'.join([\"%s=%s\" % (str(p[0]),", "kwargs.update({'noncestr': generate_token()}) if add_timestamp: kwargs.update({'timestamp': int(time.time())}) params = kwargs.items() _params", "timestamp, nonce) return sign == signature def check_token(token): return re.match('^[A-Za-z0-9]{3,32}$',", "return value.decode(encoding) return six.text_type(value) def to_binary(value, encoding=\"utf-8\"): if isinstance(value, six.binary_type):", "if isinstance(value, six.binary_type): return value.decode(encoding) return six.text_type(value) def to_binary(value, encoding=\"utf-8\"):", "for k, v in kwargs.items() if k.lower() != \"appid\" ]", "encoding=\"utf-8\"): if isinstance(value, six.binary_type): return value if isinstance(value, six.text_type): return", "value of a character in a string. :param s: a", "3 <= length <= 32 letters = string.ascii_letters + string.digits", "of a character in a string. :param s: a string", "io import json import os import random import re import", "sign_type def make_error_page(url): with io.open( os.path.join(os.path.dirname(__file__), 'contrib/error.html'), 'r', encoding='utf-8' )", "six.PY2: return ord(s[index]) return s[index] def generate_token(length=''): if not length:", "def check_token(token): return re.match('^[A-Za-z0-9]{3,32}$', token) def cached_property(method): prop_name = '_{}'.format(method.__name__)", "get_signature(token, timestamp, nonce) return sign == signature def check_token(token): return", "add_appid=True, **kwargs ): \"\"\" 支付参数签名 \"\"\" assert pay_sign_key, \"PAY SIGN", ":return: ASCII int value \"\"\" if six.PY2: return ord(s[index]) return", "] _params += [('appid', appid), ('appkey', pay_sign_key)] _params.sort() sign =", "re.match('^[A-Za-z0-9]{3,32}$', token) def cached_property(method): prop_name = '_{}'.format(method.__name__) @wraps(method) def wrapped_func(self,", "not (token and timestamp and nonce and signature): return False", "**kwargs)) return getattr(self, prop_name) return property(wrapped_func) def to_text(value, encoding=\"utf-8\"): if", "value.decode(encoding) return six.text_type(value) def to_binary(value, encoding=\"utf-8\"): if isinstance(value, six.binary_type): return", "-*- from __future__ import absolute_import, unicode_literals import io import json", "<= length <= 32 letters = string.ascii_letters + string.digits return", "def pay_sign_dict( appid, pay_sign_key, add_noncestr=True, add_timestamp=True, add_appid=True, **kwargs ): \"\"\"", "sha1(sign).hexdigest() sign_type = 'SHA1' return dict(params), sign, sign_type def make_error_page(url):", "to_binary(''.join(sign)) return sha1(sign).hexdigest() def check_signature(token, timestamp, nonce, signature): if not", "in range(length)) def json_loads(s): s = to_text(s) return json.loads(s) def", "a string :param index: the position of desired character :return:", "False sign = get_signature(token, timestamp, nonce) return sign == signature", "string.digits return ''.join(choice(letters) for _ in range(length)) def json_loads(s): s", "add_timestamp=True, add_appid=True, **kwargs ): \"\"\" 支付参数签名 \"\"\" assert pay_sign_key, \"PAY", "from secrets import choice except ImportError: from random import choice", "letters = string.ascii_letters + string.digits return ''.join(choice(letters) for _ in", "\"\"\"Get the ASCII int value of a character in a", "add_appid: kwargs.update({'appid': appid}) if add_noncestr: kwargs.update({'noncestr': generate_token()}) if add_timestamp: kwargs.update({'timestamp':", "def check_signature(token, timestamp, nonce, signature): if not (token and timestamp", "setattr(self, prop_name, method(self, *args, **kwargs)) return getattr(self, prop_name) return property(wrapped_func)", "byte2int(s, index=0): \"\"\"Get the ASCII int value of a character", "if isinstance(value, six.text_type): return value.encode(encoding) return six.binary_type(value) def is_string(value): return", "!= \"appid\" ] _params += [('appid', appid), ('appkey', pay_sign_key)] _params.sort()", "import choice except ImportError: from random import choice string_types =", "not length: length = random.randint(3, 32) length = int(length) assert", "if k.lower() != \"appid\" ] _params += [('appid', appid), ('appkey',", "+= [('appid', appid), ('appkey', pay_sign_key)] _params.sort() sign = '&'.join([\"%s=%s\" %", "_params += [('appid', appid), ('appkey', pay_sign_key)] _params.sort() sign = '&'.join([\"%s=%s\"", "timestamp, nonce, signature): if not (token and timestamp and nonce", "value \"\"\" if six.PY2: return ord(s[index]) return s[index] def generate_token(length=''):", "prop_name): setattr(self, prop_name, method(self, *args, **kwargs)) return getattr(self, prop_name) return", "index=0): \"\"\"Get the ASCII int value of a character in", "def generate_token(length=''): if not length: length = random.randint(3, 32) length", "[ (k.lower(), v) for k, v in kwargs.items() if k.lower()", "import wraps from hashlib import sha1 import six try: from", "= [ (k.lower(), v) for k, v in kwargs.items() if", "except ImportError: from random import choice string_types = (six.string_types, six.text_type,", "= get_signature(token, timestamp, nonce) return sign == signature def check_token(token):", "_params = [ (k.lower(), v) for k, v in kwargs.items()", ":param index: the position of desired character :return: ASCII int", "def is_string(value): return isinstance(value, string_types) def byte2int(s, index=0): \"\"\"Get the", "for p in _params]).encode(\"utf-8\") sign = sha1(sign).hexdigest() sign_type = 'SHA1'", "six.binary_type): return value if isinstance(value, six.text_type): return value.encode(encoding) return six.binary_type(value)", "generate_token(length=''): if not length: length = random.randint(3, 32) length =", "nonce) return sign == signature def check_token(token): return re.match('^[A-Za-z0-9]{3,32}$', token)", "import io import json import os import random import re", "def make_error_page(url): with io.open( os.path.join(os.path.dirname(__file__), 'contrib/error.html'), 'r', encoding='utf-8' ) as", "+ list(args) sign.sort() sign = to_binary(''.join(sign)) return sha1(sign).hexdigest() def check_signature(token,", "int value \"\"\" if six.PY2: return ord(s[index]) return s[index] def", "s = to_text(s) return json.loads(s) def json_dumps(d): return json.dumps(d) def", "return six.binary_type(value) def is_string(value): return isinstance(value, string_types) def byte2int(s, index=0):", "timestamp and nonce and signature): return False sign = get_signature(token,", "length <= 32 letters = string.ascii_letters + string.digits return ''.join(choice(letters)", "random import re import string import time from functools import", "in a string. :param s: a string :param index: the", "'r', encoding='utf-8' ) as error_page: return error_page.read().replace('{url}', url) def is_regex(value):", "def json_loads(s): s = to_text(s) return json.loads(s) def json_dumps(d): return", "isinstance(value, six.text_type): return value.encode(encoding) return six.binary_type(value) def is_string(value): return isinstance(value,", "k, v in kwargs.items() if k.lower() != \"appid\" ] _params", "isinstance(value, six.text_type): return value if isinstance(value, six.binary_type): return value.decode(encoding) return", "return ''.join(choice(letters) for _ in range(length)) def json_loads(s): s =", "value if isinstance(value, six.binary_type): return value.decode(encoding) return six.text_type(value) def to_binary(value,", "\"\"\" 支付参数签名 \"\"\" assert pay_sign_key, \"PAY SIGN KEY IS EMPTY\"", "try: from secrets import choice except ImportError: from random import", "v in kwargs.items() if k.lower() != \"appid\" ] _params +=", "if not length: length = random.randint(3, 32) length = int(length)", "token) def cached_property(method): prop_name = '_{}'.format(method.__name__) @wraps(method) def wrapped_func(self, *args,", "sign = [token, timestamp, nonce] + list(args) sign.sort() sign =", "(k.lower(), v) for k, v in kwargs.items() if k.lower() !=", "nonce, *args): sign = [token, timestamp, nonce] + list(args) sign.sort()", "return sha1(sign).hexdigest() def check_signature(token, timestamp, nonce, signature): if not (token", "sha1 import six try: from secrets import choice except ImportError:", "wrapped_func(self, *args, **kwargs): if not hasattr(self, prop_name): setattr(self, prop_name, method(self,", "getattr(self, prop_name) return property(wrapped_func) def to_text(value, encoding=\"utf-8\"): if isinstance(value, six.text_type):", "\"\"\" assert pay_sign_key, \"PAY SIGN KEY IS EMPTY\" if add_appid:", "_ in range(length)) def json_loads(s): s = to_text(s) return json.loads(s)", "to_text(value, encoding=\"utf-8\"): if isinstance(value, six.text_type): return value if isinstance(value, six.binary_type):", "int(length) assert 3 <= length <= 32 letters = string.ascii_letters", "add_noncestr=True, add_timestamp=True, add_appid=True, **kwargs ): \"\"\" 支付参数签名 \"\"\" assert pay_sign_key,", "pay_sign_key, \"PAY SIGN KEY IS EMPTY\" if add_appid: kwargs.update({'appid': appid})", "pay_sign_key)] _params.sort() sign = '&'.join([\"%s=%s\" % (str(p[0]), str(p[1])) for p", "nonce, signature): if not (token and timestamp and nonce and", "string import time from functools import wraps from hashlib import", "\"\"\" if six.PY2: return ord(s[index]) return s[index] def generate_token(length=''): if", "if six.PY2: return ord(s[index]) return s[index] def generate_token(length=''): if not", "def wrapped_func(self, *args, **kwargs): if not hasattr(self, prop_name): setattr(self, prop_name," ]
[ "from tensorflow.python.compiler.tensorrt import trt_convert as trt # pylint: enable=g-import-not-at-top #", "from tensorflow.python.ops.ragged import ragged_operators as _ragged_operators from tensorflow.python.ops.random_ops import *", "pylint: enable=redefined-builtin from tensorflow.python.eager import wrap_function from tensorflow.python.ops.control_flow_ops import while_loop", "2.0 (the \"License\"); # you may not use this file", "disable=redefined-builtin # pylint: enable=redefined-builtin from tensorflow.python.eager import wrap_function from tensorflow.python.ops.control_flow_ops", "tensorflow.python.ops.state_ops import scatter_nd_sub # TODO(simister): Re-enable once binary size increase", "import assign_sub from tensorflow.python.ops.state_ops import count_up_to from tensorflow.python.ops.state_ops import scatter_add", "* from tensorflow.python.ops.special_math_ops import * # TODO(vrv): Switch to import", "tensor_array_grad # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.array_ops import *", "disable=wildcard-import from tensorflow.python.ops.array_ops import * # pylint: disable=redefined-builtin from tensorflow.python.ops.check_ops", "pylint: disable=redefined-builtin from tensorflow.python.ops.check_ops import * from tensorflow.python.ops.clip_ops import *", "from tensorflow.python.ops.control_flow_ops import while_loop from tensorflow.python.ops.batch_ops import * from tensorflow.python.ops.critical_section_ops", "import vectorized_map # pylint: disable=g-import-not-at-top if _platform.system() == \"Windows\": from", "import cudnn_rnn_grad from tensorflow.python.ops import data_flow_grad from tensorflow.python.ops import manip_grad", "tensorflow.python.ops.array_ops import * # pylint: disable=redefined-builtin from tensorflow.python.ops.check_ops import *", "from tensorflow.python.ops.special_math_ops import * # TODO(vrv): Switch to import *", "if _platform.system() == \"Windows\": from tensorflow.python.compiler.tensorrt import trt_convert_windows as trt", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "import Assert from tensorflow.python.ops.control_flow_ops import case from tensorflow.python.ops.control_flow_ops import cond", "import scatter_nd_div from tensorflow.python.ops.state_ops import scatter_nd_update from tensorflow.python.ops.stateless_random_ops import *", "from tensorflow.python.ops.proto_ops import * from tensorflow.python.ops.ragged import ragged_dispatch as _ragged_dispatch", "from tensorflow.python.ops.control_flow_ops import group from tensorflow.python.ops.control_flow_ops import no_op from tensorflow.python.ops.control_flow_ops", "import cond from tensorflow.python.ops.control_flow_ops import group from tensorflow.python.ops.control_flow_ops import no_op", "import py_func from tensorflow.python.ops.session_ops import * from tensorflow.python.ops.sort_ops import *", "once we're okay with exposing the module. from tensorflow.python.ops.confusion_matrix import", "from tensorflow.python.ops.state_ops import assign_add from tensorflow.python.ops.state_ops import assign_sub from tensorflow.python.ops.state_ops", "scatter_mul from tensorflow.python.ops.state_ops import scatter_sub from tensorflow.python.ops.state_ops import scatter_min from", "from tensorflow.python.ops.numerics import * from tensorflow.python.ops.parsing_ops import * from tensorflow.python.ops.partitioned_variables", "to import * once we're okay with exposing the module.", "from tensorflow.python.training.experimental import loss_scaling_gradient_tape # pylint: disable=g-bad-import-order # Imports the", "import * from tensorflow.python.ops.special_math_ops import * # TODO(vrv): Switch to", "import ragged_operators as _ragged_operators from tensorflow.python.ops.random_ops import * from tensorflow.python.ops.script_ops", "* from tensorflow.python.ops.script_ops import py_func from tensorflow.python.ops.session_ops import * from", "division from __future__ import print_function import platform as _platform import", "* from tensorflow.python.ops.state_ops import assign from tensorflow.python.ops.state_ops import assign_add from", "use this file except in compliance with the License. #", "tensorflow.python.ops.control_flow_ops import group from tensorflow.python.ops.control_flow_ops import no_op from tensorflow.python.ops.control_flow_ops import", "from tensorflow.python.ops.critical_section_ops import * from tensorflow.python.ops.data_flow_ops import * from tensorflow.python.ops.functional_ops", "import scatter_add from tensorflow.python.ops.state_ops import scatter_div from tensorflow.python.ops.state_ops import scatter_mul", "tensorflow.python.ops.clip_ops import * from tensorflow.python.ops.special_math_ops import * # TODO(vrv): Switch", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "# go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.array_ops import * #", "tensorflow.python.ops.control_flow_ops import no_op from tensorflow.python.ops.control_flow_ops import tuple # pylint: disable=redefined-builtin", "tensorflow.python.ops import random_grad from tensorflow.python.ops import rnn_grad from tensorflow.python.ops import", "License. # You may obtain a copy of the License", "The TensorFlow Authors. All Rights Reserved. # # Licensed under", "# pylint: disable=unused-import \"\"\"Import names of Tensor Flow standard Ops.\"\"\"", "tensorflow.python.ops import cudnn_rnn_grad from tensorflow.python.ops import data_flow_grad from tensorflow.python.ops import", "_ragged_dispatch from tensorflow.python.ops.ragged import ragged_operators as _ragged_operators from tensorflow.python.ops.random_ops import", "sys as _sys from tensorflow.python import autograph from tensorflow.python.training.experimental import", "under the License is distributed on an \"AS IS\" BASIS,", "============================================================================== # pylint: disable=unused-import \"\"\"Import names of Tensor Flow standard", "* once we're okay with exposing the module. from tensorflow.python.ops.confusion_matrix", "to set up RaggedTensor operators and dispatchers: del _ragged_dispatch, _ragged_operators", "License for the specific language governing permissions and # limitations", "under control. # from tensorflow.python.ops.state_ops import scatter_nd_mul # from tensorflow.python.ops.state_ops", "scatter_update from tensorflow.python.ops.state_ops import scatter_nd_add from tensorflow.python.ops.state_ops import scatter_nd_sub #", "Reserved. # # Licensed under the Apache License, Version 2.0", "from tensorflow.python.ops.session_ops import * from tensorflow.python.ops.sort_ops import * from tensorflow.python.ops.sparse_ops", "import sparse_grad from tensorflow.python.ops import state_grad from tensorflow.python.ops import tensor_array_grad", "governing permissions and # limitations under the License. # ==============================================================================", "* from tensorflow.python.ops.logging_ops import Print from tensorflow.python.ops.logging_ops import get_summary_op from", "TensorFlow Authors. All Rights Reserved. # # Licensed under the", "that @RegisterGradient get executed. from tensorflow.python.ops import array_grad from tensorflow.python.ops", "autograph from tensorflow.python.training.experimental import loss_scaling_gradient_tape # pylint: disable=g-bad-import-order # Imports", "from tensorflow.python.ops.sparse_ops import * from tensorflow.python.ops.state_ops import assign from tensorflow.python.ops.state_ops", "tensorflow.python.ops import manip_grad from tensorflow.python.ops import math_grad from tensorflow.python.ops import", "import Print from tensorflow.python.ops.logging_ops import get_summary_op from tensorflow.python.ops.logging_ops import timestamp", "from __future__ import absolute_import from __future__ import division from __future__", "in compliance with the License. # You may obtain a", "\"Windows\": from tensorflow.python.compiler.tensorrt import trt_convert_windows as trt else: from tensorflow.python.compiler.tensorrt", "import scatter_div from tensorflow.python.ops.state_ops import scatter_mul from tensorflow.python.ops.state_ops import scatter_sub", "initialize_all_tables from tensorflow.python.ops.lookup_ops import tables_initializer from tensorflow.python.ops.manip_ops import * from", "software # distributed under the License is distributed on an", "tensorflow.python.ops.state_ops import scatter_nd_div from tensorflow.python.ops.state_ops import scatter_nd_update from tensorflow.python.ops.stateless_random_ops import", "import count_up_to from tensorflow.python.ops.state_ops import scatter_add from tensorflow.python.ops.state_ops import scatter_div", "from tensorflow.python.ops.confusion_matrix import confusion_matrix from tensorflow.python.ops.control_flow_ops import Assert from tensorflow.python.ops.control_flow_ops", "tensorflow.python.ops.state_ops import scatter_update from tensorflow.python.ops.state_ops import scatter_nd_add from tensorflow.python.ops.state_ops import", "enable=redefined-builtin from tensorflow.python.eager import wrap_function from tensorflow.python.ops.control_flow_ops import while_loop from", "import sys as _sys from tensorflow.python import autograph from tensorflow.python.training.experimental", "# pylint: enable=redefined-builtin from tensorflow.python.eager import wrap_function from tensorflow.python.ops.control_flow_ops import", "tensorflow.python.ops.functional_ops import * from tensorflow.python.ops.gradients import * from tensorflow.python.ops.histogram_ops import", "import rnn_grad from tensorflow.python.ops import sparse_grad from tensorflow.python.ops import state_grad", "tensorflow.python.ops.control_flow_ops import cond from tensorflow.python.ops.control_flow_ops import group from tensorflow.python.ops.control_flow_ops import", "from tensorflow.python.ops import manip_grad from tensorflow.python.ops import math_grad from tensorflow.python.ops", "# ops is under control. # from tensorflow.python.ops.state_ops import scatter_nd_mul", "tensorflow.python.ops.io_ops import * from tensorflow.python.ops.linalg_ops import * from tensorflow.python.ops.logging_ops import", "from tensorflow.python.ops.state_ops import assign_sub from tensorflow.python.ops.state_ops import count_up_to from tensorflow.python.ops.state_ops", "pylint: enable=g-import-not-at-top # pylint: enable=wildcard-import # pylint: enable=g-bad-import-order # These", "import case from tensorflow.python.ops.control_flow_ops import cond from tensorflow.python.ops.control_flow_ops import group", "tensorflow.python.ops.state_ops import count_up_to from tensorflow.python.ops.state_ops import scatter_add from tensorflow.python.ops.state_ops import", "as trt else: from tensorflow.python.compiler.tensorrt import trt_convert as trt #", "module. from tensorflow.python.ops.confusion_matrix import confusion_matrix from tensorflow.python.ops.control_flow_ops import Assert from", "import assign_add from tensorflow.python.ops.state_ops import assign_sub from tensorflow.python.ops.state_ops import count_up_to", "* from tensorflow.python.ops.math_ops import * # pylint: disable=redefined-builtin from tensorflow.python.ops.numerics", "tensorflow.python.ops.tensor_array_ops import * from tensorflow.python.ops.variable_scope import * # pylint: disable=redefined-builtin", "from tensorflow.python.ops.functional_ops import * from tensorflow.python.ops.gradients import * from tensorflow.python.ops.histogram_ops", "cudnn_rnn_grad from tensorflow.python.ops import data_flow_grad from tensorflow.python.ops import manip_grad from", "pylint: disable=redefined-builtin # pylint: enable=redefined-builtin from tensorflow.python.eager import wrap_function from", "import tuple # pylint: disable=redefined-builtin # pylint: enable=redefined-builtin from tensorflow.python.eager", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "* from tensorflow.python.ops.ragged import ragged_dispatch as _ragged_dispatch from tensorflow.python.ops.ragged import", "the following modules so that @RegisterGradient get executed. from tensorflow.python.ops", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "from tensorflow.python.ops.io_ops import * from tensorflow.python.ops.linalg_ops import * from tensorflow.python.ops.logging_ops", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "rnn_grad from tensorflow.python.ops import sparse_grad from tensorflow.python.ops import state_grad from", "imported to set up RaggedTensor operators and dispatchers: del _ragged_dispatch,", "to in writing, software # distributed under the License is", "from tensorflow.python.ops.check_ops import * from tensorflow.python.ops.clip_ops import * from tensorflow.python.ops.special_math_ops", "# See the License for the specific language governing permissions", "language governing permissions and # limitations under the License. #", "import autograph from tensorflow.python.training.experimental import loss_scaling_gradient_tape # pylint: disable=g-bad-import-order #", "tensorflow.python.ops.script_ops import py_func from tensorflow.python.ops.session_ops import * from tensorflow.python.ops.sort_ops import", "or agreed to in writing, software # distributed under the", "* # pylint: disable=redefined-builtin from tensorflow.python.ops.numerics import * from tensorflow.python.ops.parsing_ops", "required by applicable law or agreed to in writing, software", "from tensorflow.python.ops.tensor_array_ops import * from tensorflow.python.ops.variable_scope import * # pylint:", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "import * from tensorflow.python.ops.critical_section_ops import * from tensorflow.python.ops.data_flow_ops import *", "timestamp from tensorflow.python.ops.lookup_ops import initialize_all_tables from tensorflow.python.ops.lookup_ops import tables_initializer from", "with the License. # You may obtain a copy of", "tensorflow.python.ops.manip_ops import * from tensorflow.python.ops.math_ops import * # pylint: disable=redefined-builtin", "disable=redefined-builtin from tensorflow.python.ops.variables import * from tensorflow.python.ops.parallel_for.control_flow_ops import vectorized_map #", "* from tensorflow.python.ops.parallel_for.control_flow_ops import vectorized_map # pylint: disable=g-import-not-at-top if _platform.system()", "pylint: enable=wildcard-import # pylint: enable=g-bad-import-order # These modules were imported", "while_loop from tensorflow.python.ops.batch_ops import * from tensorflow.python.ops.critical_section_ops import * from", "from tensorflow.python.ops.control_flow_ops import cond from tensorflow.python.ops.control_flow_ops import group from tensorflow.python.ops.control_flow_ops", "ragged_operators as _ragged_operators from tensorflow.python.ops.random_ops import * from tensorflow.python.ops.script_ops import", "import scatter_update from tensorflow.python.ops.state_ops import scatter_nd_add from tensorflow.python.ops.state_ops import scatter_nd_sub", "These modules were imported to set up RaggedTensor operators and", "from tensorflow.python.ops.control_flow_ops import Assert from tensorflow.python.ops.control_flow_ops import case from tensorflow.python.ops.control_flow_ops", "modules so that @RegisterGradient get executed. from tensorflow.python.ops import array_grad", "compliance with the License. # You may obtain a copy", "All Rights Reserved. # # Licensed under the Apache License,", "agreed to in writing, software # distributed under the License", "# from tensorflow.python.ops.state_ops import scatter_nd_mul # from tensorflow.python.ops.state_ops import scatter_nd_div", "distributed under the License is distributed on an \"AS IS\"", "with exposing the module. from tensorflow.python.ops.confusion_matrix import confusion_matrix from tensorflow.python.ops.control_flow_ops", "from tensorflow.python.ops.variables import * from tensorflow.python.ops.parallel_for.control_flow_ops import vectorized_map # pylint:", "from tensorflow.python.ops.math_ops import * # pylint: disable=redefined-builtin from tensorflow.python.ops.numerics import", "tensorflow.python.ops.data_flow_ops import * from tensorflow.python.ops.functional_ops import * from tensorflow.python.ops.gradients import", "# pylint: disable=g-bad-import-order # Imports the following modules so that", "Tensor Flow standard Ops.\"\"\" from __future__ import absolute_import from __future__", "group from tensorflow.python.ops.control_flow_ops import no_op from tensorflow.python.ops.control_flow_ops import tuple #", "from tensorflow.python.ops.state_ops import scatter_min from tensorflow.python.ops.state_ops import scatter_max from tensorflow.python.ops.state_ops", "ops is under control. # from tensorflow.python.ops.state_ops import scatter_nd_mul #", "import scatter_nd_update from tensorflow.python.ops.stateless_random_ops import * from tensorflow.python.ops.string_ops import *", "from tensorflow.python.ops.state_ops import scatter_nd_mul # from tensorflow.python.ops.state_ops import scatter_nd_div from", "Flow standard Ops.\"\"\" from __future__ import absolute_import from __future__ import", "express or implied. # See the License for the specific", "TODO(vrv): Switch to import * once we're okay with exposing", "except in compliance with the License. # You may obtain", "import while_loop from tensorflow.python.ops.batch_ops import * from tensorflow.python.ops.critical_section_ops import *", "count_up_to from tensorflow.python.ops.state_ops import scatter_add from tensorflow.python.ops.state_ops import scatter_div from", "import * # TODO(vrv): Switch to import * once we're", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "tensorflow.python.ops.check_ops import * from tensorflow.python.ops.clip_ops import * from tensorflow.python.ops.special_math_ops import", "Re-enable once binary size increase due to scatter_nd # ops", "writing, software # distributed under the License is distributed on", "from tensorflow.python.ops import tensor_array_grad # go/tf-wildcard-import # pylint: disable=wildcard-import from", "import * from tensorflow.python.ops.io_ops import * from tensorflow.python.ops.linalg_ops import *", "you may not use this file except in compliance with", "from tensorflow.python.ops.parallel_for.control_flow_ops import vectorized_map # pylint: disable=g-import-not-at-top if _platform.system() ==", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "from tensorflow.python.ops.stateless_random_ops import * from tensorflow.python.ops.string_ops import * from tensorflow.python.ops.template", "following modules so that @RegisterGradient get executed. from tensorflow.python.ops import", "CONDITIONS OF ANY KIND, either express or implied. # See", "# pylint: enable=wildcard-import # pylint: enable=g-bad-import-order # These modules were", "# from tensorflow.python.ops.state_ops import scatter_nd_div from tensorflow.python.ops.state_ops import scatter_nd_update from", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "data_flow_grad from tensorflow.python.ops import manip_grad from tensorflow.python.ops import math_grad from", "import * from tensorflow.python.ops.string_ops import * from tensorflow.python.ops.template import *", "import * from tensorflow.python.ops.ragged import ragged_dispatch as _ragged_dispatch from tensorflow.python.ops.ragged", "import array_grad from tensorflow.python.ops import cudnn_rnn_grad from tensorflow.python.ops import data_flow_grad", "scatter_min from tensorflow.python.ops.state_ops import scatter_max from tensorflow.python.ops.state_ops import scatter_update from", "tensorflow.python.ops.confusion_matrix import confusion_matrix from tensorflow.python.ops.control_flow_ops import Assert from tensorflow.python.ops.control_flow_ops import", "to scatter_nd # ops is under control. # from tensorflow.python.ops.state_ops", "trt else: from tensorflow.python.compiler.tensorrt import trt_convert as trt # pylint:", "import division from __future__ import print_function import platform as _platform", "standard Ops.\"\"\" from __future__ import absolute_import from __future__ import division", "as _ragged_operators from tensorflow.python.ops.random_ops import * from tensorflow.python.ops.script_ops import py_func", "import * from tensorflow.python.ops.data_flow_ops import * from tensorflow.python.ops.functional_ops import *", "scatter_sub from tensorflow.python.ops.state_ops import scatter_min from tensorflow.python.ops.state_ops import scatter_max from", "import no_op from tensorflow.python.ops.control_flow_ops import tuple # pylint: disable=redefined-builtin #", "import math_grad from tensorflow.python.ops import random_grad from tensorflow.python.ops import rnn_grad", "* from tensorflow.python.ops.functional_ops import * from tensorflow.python.ops.gradients import * from", "import scatter_sub from tensorflow.python.ops.state_ops import scatter_min from tensorflow.python.ops.state_ops import scatter_max", "from tensorflow.python.ops.logging_ops import get_summary_op from tensorflow.python.ops.logging_ops import timestamp from tensorflow.python.ops.lookup_ops", "pylint: disable=wildcard-import from tensorflow.python.ops.array_ops import * # pylint: disable=redefined-builtin from", "from tensorflow.python.ops import random_grad from tensorflow.python.ops import rnn_grad from tensorflow.python.ops", "import manip_grad from tensorflow.python.ops import math_grad from tensorflow.python.ops import random_grad", "OR CONDITIONS OF ANY KIND, either express or implied. #", "scatter_nd_add from tensorflow.python.ops.state_ops import scatter_nd_sub # TODO(simister): Re-enable once binary", "the License is distributed on an \"AS IS\" BASIS, #", "2015 The TensorFlow Authors. All Rights Reserved. # # Licensed", "tensorflow.python.ops import tensor_array_grad # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.array_ops", "Print from tensorflow.python.ops.logging_ops import get_summary_op from tensorflow.python.ops.logging_ops import timestamp from", "scatter_nd_update from tensorflow.python.ops.stateless_random_ops import * from tensorflow.python.ops.string_ops import * from", "import get_summary_op from tensorflow.python.ops.logging_ops import timestamp from tensorflow.python.ops.lookup_ops import initialize_all_tables", "import print_function import platform as _platform import sys as _sys", "tensorflow.python.ops import data_flow_grad from tensorflow.python.ops import manip_grad from tensorflow.python.ops import", "Ops.\"\"\" from __future__ import absolute_import from __future__ import division from", "trt_convert as trt # pylint: enable=g-import-not-at-top # pylint: enable=wildcard-import #", "binary size increase due to scatter_nd # ops is under", "sparse_grad from tensorflow.python.ops import state_grad from tensorflow.python.ops import tensor_array_grad #", "enable=wildcard-import # pylint: enable=g-bad-import-order # These modules were imported to", "@RegisterGradient get executed. from tensorflow.python.ops import array_grad from tensorflow.python.ops import", "tensorflow.python.compiler.tensorrt import trt_convert_windows as trt else: from tensorflow.python.compiler.tensorrt import trt_convert", "exposing the module. from tensorflow.python.ops.confusion_matrix import confusion_matrix from tensorflow.python.ops.control_flow_ops import", "tensorflow.python.training.experimental import loss_scaling_gradient_tape # pylint: disable=g-bad-import-order # Imports the following", "from tensorflow.python.ops import cudnn_rnn_grad from tensorflow.python.ops import data_flow_grad from tensorflow.python.ops", "from tensorflow.python.compiler.tensorrt import trt_convert_windows as trt else: from tensorflow.python.compiler.tensorrt import", "import timestamp from tensorflow.python.ops.lookup_ops import initialize_all_tables from tensorflow.python.ops.lookup_ops import tables_initializer", "import tables_initializer from tensorflow.python.ops.manip_ops import * from tensorflow.python.ops.math_ops import *", "law or agreed to in writing, software # distributed under", "the module. from tensorflow.python.ops.confusion_matrix import confusion_matrix from tensorflow.python.ops.control_flow_ops import Assert", "tensorflow.python.ops.state_ops import scatter_nd_mul # from tensorflow.python.ops.state_ops import scatter_nd_div from tensorflow.python.ops.state_ops", "py_func from tensorflow.python.ops.session_ops import * from tensorflow.python.ops.sort_ops import * from", "tensorflow.python.ops.state_ops import scatter_add from tensorflow.python.ops.state_ops import scatter_div from tensorflow.python.ops.state_ops import", "ragged_dispatch as _ragged_dispatch from tensorflow.python.ops.ragged import ragged_operators as _ragged_operators from", "so that @RegisterGradient get executed. from tensorflow.python.ops import array_grad from", "import * once we're okay with exposing the module. from", "from tensorflow.python.ops.logging_ops import timestamp from tensorflow.python.ops.lookup_ops import initialize_all_tables from tensorflow.python.ops.lookup_ops", "tensorflow.python.ops.control_flow_ops import tuple # pylint: disable=redefined-builtin # pylint: enable=redefined-builtin from", "from tensorflow.python.ops.ragged import ragged_dispatch as _ragged_dispatch from tensorflow.python.ops.ragged import ragged_operators", "case from tensorflow.python.ops.control_flow_ops import cond from tensorflow.python.ops.control_flow_ops import group from", "* from tensorflow.python.ops.variable_scope import * # pylint: disable=redefined-builtin from tensorflow.python.ops.variables", "from tensorflow.python.ops import math_grad from tensorflow.python.ops import random_grad from tensorflow.python.ops", "import group from tensorflow.python.ops.control_flow_ops import no_op from tensorflow.python.ops.control_flow_ops import tuple", "import * from tensorflow.python.ops.script_ops import py_func from tensorflow.python.ops.session_ops import *", "tensorflow.python.ops.linalg_ops import * from tensorflow.python.ops.logging_ops import Print from tensorflow.python.ops.logging_ops import", "may obtain a copy of the License at # #", "tensorflow.python.ops.variable_scope import * # pylint: disable=redefined-builtin from tensorflow.python.ops.variables import *", "from tensorflow.python.ops.state_ops import scatter_nd_div from tensorflow.python.ops.state_ops import scatter_nd_update from tensorflow.python.ops.stateless_random_ops", "import * from tensorflow.python.ops.template import * from tensorflow.python.ops.tensor_array_ops import *", "tensorflow.python.ops.template import * from tensorflow.python.ops.tensor_array_ops import * from tensorflow.python.ops.variable_scope import", "pylint: disable=redefined-builtin from tensorflow.python.ops.variables import * from tensorflow.python.ops.parallel_for.control_flow_ops import vectorized_map", "tensorflow.python.ops.logging_ops import get_summary_op from tensorflow.python.ops.logging_ops import timestamp from tensorflow.python.ops.lookup_ops import", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "as _platform import sys as _sys from tensorflow.python import autograph", "import scatter_mul from tensorflow.python.ops.state_ops import scatter_sub from tensorflow.python.ops.state_ops import scatter_min", "tensorflow.python.ops.batch_ops import * from tensorflow.python.ops.critical_section_ops import * from tensorflow.python.ops.data_flow_ops import", "pylint: disable=unused-import \"\"\"Import names of Tensor Flow standard Ops.\"\"\" from", "from tensorflow.python.ops.clip_ops import * from tensorflow.python.ops.special_math_ops import * # TODO(vrv):", "import * from tensorflow.python.ops.sparse_ops import * from tensorflow.python.ops.state_ops import assign", "tensorflow.python.ops.state_ops import scatter_max from tensorflow.python.ops.state_ops import scatter_update from tensorflow.python.ops.state_ops import", "may not use this file except in compliance with the", "as _ragged_dispatch from tensorflow.python.ops.ragged import ragged_operators as _ragged_operators from tensorflow.python.ops.random_ops", "is under control. # from tensorflow.python.ops.state_ops import scatter_nd_mul # from", "print_function import platform as _platform import sys as _sys from", "import scatter_max from tensorflow.python.ops.state_ops import scatter_update from tensorflow.python.ops.state_ops import scatter_nd_add", "import * from tensorflow.python.ops.linalg_ops import * from tensorflow.python.ops.logging_ops import Print", "array_grad from tensorflow.python.ops import cudnn_rnn_grad from tensorflow.python.ops import data_flow_grad from", "from tensorflow.python.ops.data_flow_ops import * from tensorflow.python.ops.functional_ops import * from tensorflow.python.ops.gradients", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "and # limitations under the License. # ============================================================================== # pylint:", "tensorflow.python.ops.session_ops import * from tensorflow.python.ops.sort_ops import * from tensorflow.python.ops.sparse_ops import", "import * from tensorflow.python.ops.variable_scope import * # pylint: disable=redefined-builtin from", "this file except in compliance with the License. # You", "import absolute_import from __future__ import division from __future__ import print_function", "from tensorflow.python.ops.random_ops import * from tensorflow.python.ops.script_ops import py_func from tensorflow.python.ops.session_ops", "import * from tensorflow.python.ops.parallel_for.control_flow_ops import vectorized_map # pylint: disable=g-import-not-at-top if", "import trt_convert as trt # pylint: enable=g-import-not-at-top # pylint: enable=wildcard-import", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "_sys from tensorflow.python import autograph from tensorflow.python.training.experimental import loss_scaling_gradient_tape #", "import random_grad from tensorflow.python.ops import rnn_grad from tensorflow.python.ops import sparse_grad", "# # Licensed under the Apache License, Version 2.0 (the", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "import scatter_nd_mul # from tensorflow.python.ops.state_ops import scatter_nd_div from tensorflow.python.ops.state_ops import", "tensorflow.python.ops.special_math_ops import * # TODO(vrv): Switch to import * once", "due to scatter_nd # ops is under control. # from", "import * from tensorflow.python.ops.clip_ops import * from tensorflow.python.ops.special_math_ops import *", "random_grad from tensorflow.python.ops import rnn_grad from tensorflow.python.ops import sparse_grad from", "tensorflow.python.ops import sparse_grad from tensorflow.python.ops import state_grad from tensorflow.python.ops import", "assign_sub from tensorflow.python.ops.state_ops import count_up_to from tensorflow.python.ops.state_ops import scatter_add from", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "* from tensorflow.python.ops.linalg_ops import * from tensorflow.python.ops.logging_ops import Print from", "disable=redefined-builtin from tensorflow.python.ops.check_ops import * from tensorflow.python.ops.clip_ops import * from", "tensorflow.python.ops.random_ops import * from tensorflow.python.ops.script_ops import py_func from tensorflow.python.ops.session_ops import", "* # TODO(vrv): Switch to import * once we're okay", "* from tensorflow.python.ops.histogram_ops import * from tensorflow.python.ops.init_ops import * from", "import * from tensorflow.python.ops.init_ops import * from tensorflow.python.ops.io_ops import *", "import * from tensorflow.python.ops.parsing_ops import * from tensorflow.python.ops.partitioned_variables import *", "# ============================================================================== # pylint: disable=unused-import \"\"\"Import names of Tensor Flow", "no_op from tensorflow.python.ops.control_flow_ops import tuple # pylint: disable=redefined-builtin # pylint:", "import wrap_function from tensorflow.python.ops.control_flow_ops import while_loop from tensorflow.python.ops.batch_ops import *", "tensorflow.python.ops.logging_ops import timestamp from tensorflow.python.ops.lookup_ops import initialize_all_tables from tensorflow.python.ops.lookup_ops import", "were imported to set up RaggedTensor operators and dispatchers: del", "from tensorflow.python.eager import wrap_function from tensorflow.python.ops.control_flow_ops import while_loop from tensorflow.python.ops.batch_ops", "# TODO(simister): Re-enable once binary size increase due to scatter_nd", "from tensorflow.python.ops.init_ops import * from tensorflow.python.ops.io_ops import * from tensorflow.python.ops.linalg_ops", "enable=g-bad-import-order # These modules were imported to set up RaggedTensor", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "import * # pylint: disable=redefined-builtin from tensorflow.python.ops.check_ops import * from", "# Imports the following modules so that @RegisterGradient get executed.", "get_summary_op from tensorflow.python.ops.logging_ops import timestamp from tensorflow.python.ops.lookup_ops import initialize_all_tables from", "or implied. # See the License for the specific language", "Rights Reserved. # # Licensed under the Apache License, Version", "import * from tensorflow.python.ops.partitioned_variables import * from tensorflow.python.ops.proto_ops import *", "import * from tensorflow.python.ops.proto_ops import * from tensorflow.python.ops.ragged import ragged_dispatch", "from tensorflow.python.ops.script_ops import py_func from tensorflow.python.ops.session_ops import * from tensorflow.python.ops.sort_ops", "tuple # pylint: disable=redefined-builtin # pylint: enable=redefined-builtin from tensorflow.python.eager import", "pylint: disable=g-bad-import-order # Imports the following modules so that @RegisterGradient", "tensorflow.python.ops import state_grad from tensorflow.python.ops import tensor_array_grad # go/tf-wildcard-import #", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "as trt # pylint: enable=g-import-not-at-top # pylint: enable=wildcard-import # pylint:", "trt # pylint: enable=g-import-not-at-top # pylint: enable=wildcard-import # pylint: enable=g-bad-import-order", "from tensorflow.python.ops.state_ops import count_up_to from tensorflow.python.ops.state_ops import scatter_add from tensorflow.python.ops.state_ops", "__future__ import division from __future__ import print_function import platform as", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "# pylint: disable=wildcard-import from tensorflow.python.ops.array_ops import * # pylint: disable=redefined-builtin", "from tensorflow.python.ops.histogram_ops import * from tensorflow.python.ops.init_ops import * from tensorflow.python.ops.io_ops", "permissions and # limitations under the License. # ============================================================================== #", "tensorflow.python.ops.numerics import * from tensorflow.python.ops.parsing_ops import * from tensorflow.python.ops.partitioned_variables import", "disable=g-import-not-at-top if _platform.system() == \"Windows\": from tensorflow.python.compiler.tensorrt import trt_convert_windows as", "* from tensorflow.python.ops.string_ops import * from tensorflow.python.ops.template import * from", "Assert from tensorflow.python.ops.control_flow_ops import case from tensorflow.python.ops.control_flow_ops import cond from", "* from tensorflow.python.ops.critical_section_ops import * from tensorflow.python.ops.data_flow_ops import * from", "tensorflow.python.ops.init_ops import * from tensorflow.python.ops.io_ops import * from tensorflow.python.ops.linalg_ops import", "enable=g-import-not-at-top # pylint: enable=wildcard-import # pylint: enable=g-bad-import-order # These modules", "(the \"License\"); # you may not use this file except", "import * from tensorflow.python.ops.sort_ops import * from tensorflow.python.ops.sparse_ops import *", "# you may not use this file except in compliance", "import confusion_matrix from tensorflow.python.ops.control_flow_ops import Assert from tensorflow.python.ops.control_flow_ops import case", "go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.array_ops import * # pylint:", "scatter_max from tensorflow.python.ops.state_ops import scatter_update from tensorflow.python.ops.state_ops import scatter_nd_add from", "tensorflow.python.ops.state_ops import scatter_div from tensorflow.python.ops.state_ops import scatter_mul from tensorflow.python.ops.state_ops import", "import * from tensorflow.python.ops.histogram_ops import * from tensorflow.python.ops.init_ops import *", "tensorflow.python.ops.state_ops import scatter_sub from tensorflow.python.ops.state_ops import scatter_min from tensorflow.python.ops.state_ops import", "tensorflow.python.ops.stateless_random_ops import * from tensorflow.python.ops.string_ops import * from tensorflow.python.ops.template import", "Copyright 2015 The TensorFlow Authors. All Rights Reserved. # #", "okay with exposing the module. from tensorflow.python.ops.confusion_matrix import confusion_matrix from", "from __future__ import division from __future__ import print_function import platform", "Switch to import * once we're okay with exposing the", "from tensorflow.python.ops.lookup_ops import tables_initializer from tensorflow.python.ops.manip_ops import * from tensorflow.python.ops.math_ops", "import scatter_nd_add from tensorflow.python.ops.state_ops import scatter_nd_sub # TODO(simister): Re-enable once", "loss_scaling_gradient_tape # pylint: disable=g-bad-import-order # Imports the following modules so", "# # Unless required by applicable law or agreed to", "import assign from tensorflow.python.ops.state_ops import assign_add from tensorflow.python.ops.state_ops import assign_sub", "we're okay with exposing the module. from tensorflow.python.ops.confusion_matrix import confusion_matrix", "from tensorflow.python.ops.gradients import * from tensorflow.python.ops.histogram_ops import * from tensorflow.python.ops.init_ops", "import * # pylint: disable=redefined-builtin from tensorflow.python.ops.variables import * from", "assign from tensorflow.python.ops.state_ops import assign_add from tensorflow.python.ops.state_ops import assign_sub from", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "# pylint: disable=redefined-builtin from tensorflow.python.ops.variables import * from tensorflow.python.ops.parallel_for.control_flow_ops import", "\"\"\"Import names of Tensor Flow standard Ops.\"\"\" from __future__ import", "Version 2.0 (the \"License\"); # you may not use this", "from tensorflow.python.ops.state_ops import scatter_div from tensorflow.python.ops.state_ops import scatter_mul from tensorflow.python.ops.state_ops", "* from tensorflow.python.ops.data_flow_ops import * from tensorflow.python.ops.functional_ops import * from", "pylint: disable=g-import-not-at-top if _platform.system() == \"Windows\": from tensorflow.python.compiler.tensorrt import trt_convert_windows", "* from tensorflow.python.ops.parsing_ops import * from tensorflow.python.ops.partitioned_variables import * from", "import * from tensorflow.python.ops.tensor_array_ops import * from tensorflow.python.ops.variable_scope import *", "from tensorflow.python.ops.state_ops import scatter_add from tensorflow.python.ops.state_ops import scatter_div from tensorflow.python.ops.state_ops", "* # pylint: disable=redefined-builtin from tensorflow.python.ops.variables import * from tensorflow.python.ops.parallel_for.control_flow_ops", "__future__ import absolute_import from __future__ import division from __future__ import", "# TODO(vrv): Switch to import * once we're okay with", "implied. # See the License for the specific language governing", "* from tensorflow.python.ops.partitioned_variables import * from tensorflow.python.ops.proto_ops import * from", "size increase due to scatter_nd # ops is under control.", "from tensorflow.python.ops.array_ops import * # pylint: disable=redefined-builtin from tensorflow.python.ops.check_ops import", "under the Apache License, Version 2.0 (the \"License\"); # you", "import trt_convert_windows as trt else: from tensorflow.python.compiler.tensorrt import trt_convert as", "from tensorflow.python.ops.variable_scope import * # pylint: disable=redefined-builtin from tensorflow.python.ops.variables import", "tensorflow.python.ops.control_flow_ops import while_loop from tensorflow.python.ops.batch_ops import * from tensorflow.python.ops.critical_section_ops import", "tensorflow.python.ops.state_ops import assign_add from tensorflow.python.ops.state_ops import assign_sub from tensorflow.python.ops.state_ops import", "from tensorflow.python.ops.lookup_ops import initialize_all_tables from tensorflow.python.ops.lookup_ops import tables_initializer from tensorflow.python.ops.manip_ops", "by applicable law or agreed to in writing, software #", "from tensorflow.python.ops.state_ops import scatter_update from tensorflow.python.ops.state_ops import scatter_nd_add from tensorflow.python.ops.state_ops", "* from tensorflow.python.ops.io_ops import * from tensorflow.python.ops.linalg_ops import * from", "from tensorflow.python.ops.state_ops import scatter_max from tensorflow.python.ops.state_ops import scatter_update from tensorflow.python.ops.state_ops", "from tensorflow.python.ops.state_ops import scatter_sub from tensorflow.python.ops.state_ops import scatter_min from tensorflow.python.ops.state_ops", "tensorflow.python.ops import math_grad from tensorflow.python.ops import random_grad from tensorflow.python.ops import", "from tensorflow.python.ops.partitioned_variables import * from tensorflow.python.ops.proto_ops import * from tensorflow.python.ops.ragged", "assign_add from tensorflow.python.ops.state_ops import assign_sub from tensorflow.python.ops.state_ops import count_up_to from", "scatter_nd # ops is under control. # from tensorflow.python.ops.state_ops import", "* from tensorflow.python.ops.proto_ops import * from tensorflow.python.ops.ragged import ragged_dispatch as", "wrap_function from tensorflow.python.ops.control_flow_ops import while_loop from tensorflow.python.ops.batch_ops import * from", "* from tensorflow.python.ops.template import * from tensorflow.python.ops.tensor_array_ops import * from", "# pylint: disable=g-import-not-at-top if _platform.system() == \"Windows\": from tensorflow.python.compiler.tensorrt import", "import ragged_dispatch as _ragged_dispatch from tensorflow.python.ops.ragged import ragged_operators as _ragged_operators", "from tensorflow.python.ops.parsing_ops import * from tensorflow.python.ops.partitioned_variables import * from tensorflow.python.ops.proto_ops", "from tensorflow.python.ops.state_ops import scatter_nd_sub # TODO(simister): Re-enable once binary size", "tensorflow.python.ops.critical_section_ops import * from tensorflow.python.ops.data_flow_ops import * from tensorflow.python.ops.functional_ops import", "from tensorflow.python.ops.manip_ops import * from tensorflow.python.ops.math_ops import * # pylint:", "tensorflow.python.ops.sort_ops import * from tensorflow.python.ops.sparse_ops import * from tensorflow.python.ops.state_ops import", "tensorflow.python.ops import rnn_grad from tensorflow.python.ops import sparse_grad from tensorflow.python.ops import", "from tensorflow.python.ops import rnn_grad from tensorflow.python.ops import sparse_grad from tensorflow.python.ops", "limitations under the License. # ============================================================================== # pylint: disable=unused-import \"\"\"Import", "tensorflow.python.ops.control_flow_ops import case from tensorflow.python.ops.control_flow_ops import cond from tensorflow.python.ops.control_flow_ops import", "* # pylint: disable=redefined-builtin from tensorflow.python.ops.check_ops import * from tensorflow.python.ops.clip_ops", "cond from tensorflow.python.ops.control_flow_ops import group from tensorflow.python.ops.control_flow_ops import no_op from", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "Unless required by applicable law or agreed to in writing,", "tensorflow.python.ops.state_ops import scatter_nd_update from tensorflow.python.ops.stateless_random_ops import * from tensorflow.python.ops.string_ops import", "== \"Windows\": from tensorflow.python.compiler.tensorrt import trt_convert_windows as trt else: from", "* from tensorflow.python.ops.gradients import * from tensorflow.python.ops.histogram_ops import * from", "tensorflow.python.ops.math_ops import * # pylint: disable=redefined-builtin from tensorflow.python.ops.numerics import *", "# These modules were imported to set up RaggedTensor operators", "of Tensor Flow standard Ops.\"\"\" from __future__ import absolute_import from", "TODO(simister): Re-enable once binary size increase due to scatter_nd #", "# limitations under the License. # ============================================================================== # pylint: disable=unused-import", "the specific language governing permissions and # limitations under the", "# pylint: enable=g-bad-import-order # These modules were imported to set", "import * from tensorflow.python.ops.gradients import * from tensorflow.python.ops.histogram_ops import *", "applicable law or agreed to in writing, software # distributed", "import loss_scaling_gradient_tape # pylint: disable=g-bad-import-order # Imports the following modules", "from tensorflow.python.ops import array_grad from tensorflow.python.ops import cudnn_rnn_grad from tensorflow.python.ops", "scatter_nd_sub # TODO(simister): Re-enable once binary size increase due to", "platform as _platform import sys as _sys from tensorflow.python import", "vectorized_map # pylint: disable=g-import-not-at-top if _platform.system() == \"Windows\": from tensorflow.python.compiler.tensorrt", "tensorflow.python.ops.state_ops import assign_sub from tensorflow.python.ops.state_ops import count_up_to from tensorflow.python.ops.state_ops import", "import platform as _platform import sys as _sys from tensorflow.python", "import initialize_all_tables from tensorflow.python.ops.lookup_ops import tables_initializer from tensorflow.python.ops.manip_ops import *", "scatter_nd_div from tensorflow.python.ops.state_ops import scatter_nd_update from tensorflow.python.ops.stateless_random_ops import * from", "tensorflow.python.ops.state_ops import scatter_min from tensorflow.python.ops.state_ops import scatter_max from tensorflow.python.ops.state_ops import", "in writing, software # distributed under the License is distributed", "manip_grad from tensorflow.python.ops import math_grad from tensorflow.python.ops import random_grad from", "from tensorflow.python.ops.state_ops import scatter_mul from tensorflow.python.ops.state_ops import scatter_sub from tensorflow.python.ops.state_ops", "tensorflow.python.ops.parsing_ops import * from tensorflow.python.ops.partitioned_variables import * from tensorflow.python.ops.proto_ops import", "scatter_nd_mul # from tensorflow.python.ops.state_ops import scatter_nd_div from tensorflow.python.ops.state_ops import scatter_nd_update", "tensorflow.python.ops.sparse_ops import * from tensorflow.python.ops.state_ops import assign from tensorflow.python.ops.state_ops import", "tensorflow.python.ops.histogram_ops import * from tensorflow.python.ops.init_ops import * from tensorflow.python.ops.io_ops import", "confusion_matrix from tensorflow.python.ops.control_flow_ops import Assert from tensorflow.python.ops.control_flow_ops import case from", "once binary size increase due to scatter_nd # ops is", "* from tensorflow.python.ops.tensor_array_ops import * from tensorflow.python.ops.variable_scope import * #", "_platform import sys as _sys from tensorflow.python import autograph from", "tensorflow.python.ops.proto_ops import * from tensorflow.python.ops.ragged import ragged_dispatch as _ragged_dispatch from", "math_grad from tensorflow.python.ops import random_grad from tensorflow.python.ops import rnn_grad from", "from tensorflow.python.ops.control_flow_ops import case from tensorflow.python.ops.control_flow_ops import cond from tensorflow.python.ops.control_flow_ops", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "tensorflow.python.ops import array_grad from tensorflow.python.ops import cudnn_rnn_grad from tensorflow.python.ops import", "import scatter_nd_sub # TODO(simister): Re-enable once binary size increase due", "increase due to scatter_nd # ops is under control. #", "tensorflow.python.ops.string_ops import * from tensorflow.python.ops.template import * from tensorflow.python.ops.tensor_array_ops import", "License, Version 2.0 (the \"License\"); # you may not use", "tensorflow.python.ops.control_flow_ops import Assert from tensorflow.python.ops.control_flow_ops import case from tensorflow.python.ops.control_flow_ops import", "# You may obtain a copy of the License at", "disable=redefined-builtin from tensorflow.python.ops.numerics import * from tensorflow.python.ops.parsing_ops import * from", "tensorflow.python.ops.lookup_ops import tables_initializer from tensorflow.python.ops.manip_ops import * from tensorflow.python.ops.math_ops import", "names of Tensor Flow standard Ops.\"\"\" from __future__ import absolute_import", "state_grad from tensorflow.python.ops import tensor_array_grad # go/tf-wildcard-import # pylint: disable=wildcard-import", "the License. # ============================================================================== # pylint: disable=unused-import \"\"\"Import names of", "control. # from tensorflow.python.ops.state_ops import scatter_nd_mul # from tensorflow.python.ops.state_ops import", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "from tensorflow.python.ops.state_ops import assign from tensorflow.python.ops.state_ops import assign_add from tensorflow.python.ops.state_ops", "Authors. All Rights Reserved. # # Licensed under the Apache", "from __future__ import print_function import platform as _platform import sys", "executed. from tensorflow.python.ops import array_grad from tensorflow.python.ops import cudnn_rnn_grad from", "tensorflow.python.ops.partitioned_variables import * from tensorflow.python.ops.proto_ops import * from tensorflow.python.ops.ragged import", "tensorflow.python.ops.ragged import ragged_dispatch as _ragged_dispatch from tensorflow.python.ops.ragged import ragged_operators as", "* from tensorflow.python.ops.init_ops import * from tensorflow.python.ops.io_ops import * from", "scatter_div from tensorflow.python.ops.state_ops import scatter_mul from tensorflow.python.ops.state_ops import scatter_sub from", "tensorflow.python.ops.state_ops import assign from tensorflow.python.ops.state_ops import assign_add from tensorflow.python.ops.state_ops import", "as _sys from tensorflow.python import autograph from tensorflow.python.training.experimental import loss_scaling_gradient_tape", "from tensorflow.python.ops.state_ops import scatter_nd_add from tensorflow.python.ops.state_ops import scatter_nd_sub # TODO(simister):", "Imports the following modules so that @RegisterGradient get executed. from", "the License for the specific language governing permissions and #", "tensorflow.python.ops.parallel_for.control_flow_ops import vectorized_map # pylint: disable=g-import-not-at-top if _platform.system() == \"Windows\":", "Apache License, Version 2.0 (the \"License\"); # you may not", "from tensorflow.python.ops.string_ops import * from tensorflow.python.ops.template import * from tensorflow.python.ops.tensor_array_ops", "either express or implied. # See the License for the", "import * from tensorflow.python.ops.logging_ops import Print from tensorflow.python.ops.logging_ops import get_summary_op", "tensorflow.python import autograph from tensorflow.python.training.experimental import loss_scaling_gradient_tape # pylint: disable=g-bad-import-order", "# pylint: enable=g-import-not-at-top # pylint: enable=wildcard-import # pylint: enable=g-bad-import-order #", "trt_convert_windows as trt else: from tensorflow.python.compiler.tensorrt import trt_convert as trt", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "from tensorflow.python.ops import state_grad from tensorflow.python.ops import tensor_array_grad # go/tf-wildcard-import", "import * from tensorflow.python.ops.functional_ops import * from tensorflow.python.ops.gradients import *", "pylint: disable=redefined-builtin from tensorflow.python.ops.numerics import * from tensorflow.python.ops.parsing_ops import *", "from tensorflow.python.ops import sparse_grad from tensorflow.python.ops import state_grad from tensorflow.python.ops", "import * # pylint: disable=redefined-builtin from tensorflow.python.ops.numerics import * from", "from tensorflow.python.ops.sort_ops import * from tensorflow.python.ops.sparse_ops import * from tensorflow.python.ops.state_ops", "* from tensorflow.python.ops.sparse_ops import * from tensorflow.python.ops.state_ops import assign from", "from tensorflow.python.ops.template import * from tensorflow.python.ops.tensor_array_ops import * from tensorflow.python.ops.variable_scope", "from tensorflow.python.ops.control_flow_ops import no_op from tensorflow.python.ops.control_flow_ops import tuple # pylint:", "pylint: enable=g-bad-import-order # These modules were imported to set up", "import data_flow_grad from tensorflow.python.ops import manip_grad from tensorflow.python.ops import math_grad", "tensorflow.python.ops.variables import * from tensorflow.python.ops.parallel_for.control_flow_ops import vectorized_map # pylint: disable=g-import-not-at-top", "import state_grad from tensorflow.python.ops import tensor_array_grad # go/tf-wildcard-import # pylint:", "tensorflow.python.ops.lookup_ops import initialize_all_tables from tensorflow.python.ops.lookup_ops import tables_initializer from tensorflow.python.ops.manip_ops import", "from tensorflow.python.ops.linalg_ops import * from tensorflow.python.ops.logging_ops import Print from tensorflow.python.ops.logging_ops", "import * from tensorflow.python.ops.state_ops import assign from tensorflow.python.ops.state_ops import assign_add", "tensorflow.python.ops.ragged import ragged_operators as _ragged_operators from tensorflow.python.ops.random_ops import * from", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "__future__ import print_function import platform as _platform import sys as", "tensorflow.python.ops.gradients import * from tensorflow.python.ops.histogram_ops import * from tensorflow.python.ops.init_ops import", "* from tensorflow.python.ops.sort_ops import * from tensorflow.python.ops.sparse_ops import * from", "from tensorflow.python import autograph from tensorflow.python.training.experimental import loss_scaling_gradient_tape # pylint:", "# pylint: disable=redefined-builtin from tensorflow.python.ops.numerics import * from tensorflow.python.ops.parsing_ops import", "_platform.system() == \"Windows\": from tensorflow.python.compiler.tensorrt import trt_convert_windows as trt else:", "from tensorflow.python.ops.state_ops import scatter_nd_update from tensorflow.python.ops.stateless_random_ops import * from tensorflow.python.ops.string_ops", "absolute_import from __future__ import division from __future__ import print_function import", "import tensor_array_grad # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.array_ops import", "License. # ============================================================================== # pylint: disable=unused-import \"\"\"Import names of Tensor", "from tensorflow.python.ops.control_flow_ops import tuple # pylint: disable=redefined-builtin # pylint: enable=redefined-builtin", "\"License\"); # you may not use this file except in", "tensorflow.python.ops.state_ops import scatter_nd_add from tensorflow.python.ops.state_ops import scatter_nd_sub # TODO(simister): Re-enable", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "get executed. from tensorflow.python.ops import array_grad from tensorflow.python.ops import cudnn_rnn_grad", "disable=unused-import \"\"\"Import names of Tensor Flow standard Ops.\"\"\" from __future__", "tensorflow.python.ops.state_ops import scatter_mul from tensorflow.python.ops.state_ops import scatter_sub from tensorflow.python.ops.state_ops import", "import scatter_min from tensorflow.python.ops.state_ops import scatter_max from tensorflow.python.ops.state_ops import scatter_update", "tensorflow.python.compiler.tensorrt import trt_convert as trt # pylint: enable=g-import-not-at-top # pylint:", "# distributed under the License is distributed on an \"AS", "else: from tensorflow.python.compiler.tensorrt import trt_convert as trt # pylint: enable=g-import-not-at-top", "import * from tensorflow.python.ops.math_ops import * # pylint: disable=redefined-builtin from", "scatter_add from tensorflow.python.ops.state_ops import scatter_div from tensorflow.python.ops.state_ops import scatter_mul from", "from tensorflow.python.ops.batch_ops import * from tensorflow.python.ops.critical_section_ops import * from tensorflow.python.ops.data_flow_ops", "# Unless required by applicable law or agreed to in", "tensorflow.python.eager import wrap_function from tensorflow.python.ops.control_flow_ops import while_loop from tensorflow.python.ops.batch_ops import", "under the License. # ============================================================================== # pylint: disable=unused-import \"\"\"Import names", "* from tensorflow.python.ops.clip_ops import * from tensorflow.python.ops.special_math_ops import * #", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "from tensorflow.python.ops.logging_ops import Print from tensorflow.python.ops.logging_ops import get_summary_op from tensorflow.python.ops.logging_ops", "You may obtain a copy of the License at #", "from tensorflow.python.ops import data_flow_grad from tensorflow.python.ops import manip_grad from tensorflow.python.ops", "disable=g-bad-import-order # Imports the following modules so that @RegisterGradient get", "# pylint: disable=redefined-builtin from tensorflow.python.ops.check_ops import * from tensorflow.python.ops.clip_ops import", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved. #", "# pylint: disable=redefined-builtin # pylint: enable=redefined-builtin from tensorflow.python.eager import wrap_function", "tensorflow.python.ops.logging_ops import Print from tensorflow.python.ops.logging_ops import get_summary_op from tensorflow.python.ops.logging_ops import", "the Apache License, Version 2.0 (the \"License\"); # you may", "modules were imported to set up RaggedTensor operators and dispatchers:", "_ragged_operators from tensorflow.python.ops.random_ops import * from tensorflow.python.ops.script_ops import py_func from", "tables_initializer from tensorflow.python.ops.manip_ops import * from tensorflow.python.ops.math_ops import * #" ]
[ "disable=global-statement global START_TIME db = tango.Database() elapsed = time.time() -", "num_devices)) print('>> Time taken to start devices: {:.4f} s ({:.4f}", "print('- Register devices: {:.4f} s ({:.4f} s/device)' .format(elapsed, elapsed /", "exported_devices = list(db.get_device_exported('test/*')) print('- No. running devices: {}'.format(len(exported_devices))) def main(args=None,", "\"\"\" # pylint: disable=global-statement global START_TIME db = tango.Database() elapsed", "list_devices() print('* Starting server ...') sys.argv = ['TestDeviceServer', '1', '-v4']", "running devices: {}'.format(len(exported_devices))) def main(args=None, **kwargs): \"\"\"Run (start) the device", "tango.Database() elapsed = time.time() - START_TIME list_devices() exported_devices = list(db.get_device_exported('test/*'))", "PARSER.parse_args() delete_server() time.sleep(0.5) list_devices() print('* Registering {} devices'.format(ARGS.num_devices)) register(ARGS.num_devices) list_devices()", "utf-8 -*- \"\"\"Test Tango device server for use with scaling", "server = 'TestDeviceServer/1' server_list = list(db.get_server_list(server)) if server in server_list:", "Register devices: {:.4f} s ({:.4f} s/device)' .format(elapsed, elapsed / num_devices))", "'test/test_device/{:05d}'.format(device_id) db.add_device(device_info) elapsed = time.time() - start_time file = open('results.txt',", "print('* Registering {} devices'.format(ARGS.num_devices)) register(ARGS.num_devices) list_devices() print('* Starting server ...')", "metavar='N', type=int, default=1, nargs='?', help='Number of devices to start.') ARGS", "import sys import time import argparse import tango from tango.server", "taken to start devices: {:.4f} s ({:.4f} s/dev)' .format(elapsed, elapsed", ".format(elapsed, elapsed / num_devices)) def delete_server(): \"\"\"Delete the TestDeviceServer from", "server_instance = 'TestDeviceServer/1' device_class = 'TestDevice' devices = list(db.get_device_name(server_instance, device_class))", "= argparse.ArgumentParser(description='Device registration time.') PARSER.add_argument('num_devices', metavar='N', type=int, default=1, nargs='?', help='Number", "\"\"\"Report server start up times. This callback is executed post", "db.\"\"\" db = tango.Database() db.set_timeout_millis(50000) server = 'TestDeviceServer/1' server_list =", "coding: utf-8 -*- \"\"\"Test Tango device server for use with", "__name__ == '__main__': PARSER = argparse.ArgumentParser(description='Device registration time.') PARSER.add_argument('num_devices', metavar='N',", "tango.DbDevInfo() device_info.server = 'TestDeviceServer/1' # pylint: disable=protected-access device_info._class = 'TestDevice'", "the tango db.\"\"\" db = tango.Database() device_info = tango.DbDevInfo() device_info.server", "START_TIME list_devices() exported_devices = list(db.get_device_exported('test/*')) num_devices = len(exported_devices) file =", "({:.4f} s/device)' .format(elapsed, elapsed / num_devices)) def list_devices(): \"\"\"List tango", "devices: {:.4f} s ({:.4f} s/device)' .format(elapsed, elapsed / num_devices)) def", "\"\"\"List tango devices associated with the TestDeviceServer.\"\"\" db = tango.Database()", "register(ARGS.num_devices) list_devices() print('* Starting server ...') sys.argv = ['TestDeviceServer', '1',", "the TestDeviceServer.\"\"\" db = tango.Database() server_instance = 'TestDeviceServer/1' device_class =", "db.add_device(device_info) elapsed = time.time() - start_time file = open('results.txt', 'a')", "**kwargs): \"\"\"Run (start) the device server.\"\"\" run([TestDevice], verbose=True, msg_stream=sys.stdout, post_init_callback=init_callback,", "tests.\"\"\" import sys import time import argparse import tango from", "def register(num_devices): \"\"\"Register devices in the tango db.\"\"\" db =", "**kwargs) if __name__ == '__main__': PARSER = argparse.ArgumentParser(description='Device registration time.')", "{:.4f} s ({:.4f} s/dev)' .format(elapsed, elapsed / num_devices)) def delete_server():", "{:.4f} s'.format(time.time() - start_time)) def register(num_devices): \"\"\"Register devices in the", "list(db.get_device_name(server_instance, device_class)) print('- No. registered devices: {}'.format(len(devices))) exported_devices = list(db.get_device_exported('test/*'))", "db.set_timeout_millis(50000) server = 'TestDeviceServer/1' server_list = list(db.get_server_list(server)) if server in", "device_info._class = 'TestDevice' start_time = time.time() for device_id in range(num_devices):", "{}'.format(len(exported_devices))) def main(args=None, **kwargs): \"\"\"Run (start) the device server.\"\"\" run([TestDevice],", "run([TestDevice], verbose=True, msg_stream=sys.stdout, post_init_callback=init_callback, raises=False, args=args, **kwargs) if __name__ ==", "START_TIME db = tango.Database() elapsed = time.time() - START_TIME list_devices()", "tango.Database() server_instance = 'TestDeviceServer/1' device_class = 'TestDevice' devices = list(db.get_device_name(server_instance,", "delete_server(): \"\"\"Delete the TestDeviceServer from the tango db.\"\"\" db =", "start up times. This callback is executed post server initialisation.", "open('results.txt', 'a') file.write(',{},{}\\n'.format(elapsed, elapsed / num_devices)) print('>> Time taken to", "main(args=None, **kwargs): \"\"\"Run (start) the device server.\"\"\" run([TestDevice], verbose=True, msg_stream=sys.stdout,", "TestDevice import TestDevice def init_callback(): \"\"\"Report server start up times.", "'a') file.write(',{},{}\\n'.format(elapsed, elapsed / num_devices)) print('>> Time taken to start", "file = open('results.txt', 'a') file.write(',{},{}\\n'.format(elapsed, elapsed / num_devices)) print('>> Time", "= time.time() - START_TIME list_devices() exported_devices = list(db.get_device_exported('test/*')) num_devices =", "tango db.\"\"\" db = tango.Database() device_info = tango.DbDevInfo() device_info.server =", "start_time = time.time() db.delete_server('TestDeviceServer/1') print('- Delete server: {:.4f} s'.format(time.time() -", "tango.Database() device_info = tango.DbDevInfo() device_info.server = 'TestDeviceServer/1' # pylint: disable=protected-access", "for device_id in range(num_devices): device_info.name = 'test/test_device/{:05d}'.format(device_id) db.add_device(device_info) elapsed =", "list_devices(): \"\"\"List tango devices associated with the TestDeviceServer.\"\"\" db =", "PARSER = argparse.ArgumentParser(description='Device registration time.') PARSER.add_argument('num_devices', metavar='N', type=int, default=1, nargs='?',", "argparse import tango from tango.server import run from TestDevice import", "callback is executed post server initialisation. \"\"\" # pylint: disable=global-statement", "TestDeviceServer from the tango db.\"\"\" db = tango.Database() db.set_timeout_millis(50000) server", "range(num_devices): device_info.name = 'test/test_device/{:05d}'.format(device_id) db.add_device(device_info) elapsed = time.time() - start_time", "= list(db.get_device_exported('test/*')) num_devices = len(exported_devices) file = open('results.txt', 'a') file.write(',{},{}\\n'.format(elapsed,", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\"Test Tango device", "elapsed/num_devices)) print('- Register devices: {:.4f} s ({:.4f} s/device)' .format(elapsed, elapsed", "devices: {}'.format(len(exported_devices))) def main(args=None, **kwargs): \"\"\"Run (start) the device server.\"\"\"", "\"\"\"Run (start) the device server.\"\"\" run([TestDevice], verbose=True, msg_stream=sys.stdout, post_init_callback=init_callback, raises=False,", "default=1, nargs='?', help='Number of devices to start.') ARGS = PARSER.parse_args()", "\"\"\"Register devices in the tango db.\"\"\" db = tango.Database() device_info", "time.') PARSER.add_argument('num_devices', metavar='N', type=int, default=1, nargs='?', help='Number of devices to", "-*- coding: utf-8 -*- \"\"\"Test Tango device server for use", "time.time() - START_TIME list_devices() exported_devices = list(db.get_device_exported('test/*')) num_devices = len(exported_devices)", "= tango.Database() db.set_timeout_millis(50000) server = 'TestDeviceServer/1' server_list = list(db.get_server_list(server)) if", "exported_devices = list(db.get_device_exported('test/*')) num_devices = len(exported_devices) file = open('results.txt', 'a')", "for use with scaling tests.\"\"\" import sys import time import", "server ...') sys.argv = ['TestDeviceServer', '1', '-v4'] START_TIME = time.time()", "device_class = 'TestDevice' devices = list(db.get_device_name(server_instance, device_class)) print('- No. registered", "start devices: {:.4f} s ({:.4f} s/dev)' .format(elapsed, elapsed / num_devices))", "server.\"\"\" run([TestDevice], verbose=True, msg_stream=sys.stdout, post_init_callback=init_callback, raises=False, args=args, **kwargs) if __name__", "This callback is executed post server initialisation. \"\"\" # pylint:", "nargs='?', help='Number of devices to start.') ARGS = PARSER.parse_args() delete_server()", "db = tango.Database() device_info = tango.DbDevInfo() device_info.server = 'TestDeviceServer/1' #", "devices = list(db.get_device_name(server_instance, device_class)) print('- No. registered devices: {}'.format(len(devices))) exported_devices", "msg_stream=sys.stdout, post_init_callback=init_callback, raises=False, args=args, **kwargs) if __name__ == '__main__': PARSER", "= tango.Database() elapsed = time.time() - START_TIME list_devices() exported_devices =", "db = tango.Database() db.set_timeout_millis(50000) server = 'TestDeviceServer/1' server_list = list(db.get_server_list(server))", "to start.') ARGS = PARSER.parse_args() delete_server() time.sleep(0.5) list_devices() print('* Registering", "registered devices: {}'.format(len(devices))) exported_devices = list(db.get_device_exported('test/*')) print('- No. running devices:", "devices: {:.4f} s ({:.4f} s/dev)' .format(elapsed, elapsed / num_devices)) def", "'a') file.write('{},{},{}'.format(num_devices, elapsed, elapsed/num_devices)) print('- Register devices: {:.4f} s ({:.4f}", "s/device)' .format(elapsed, elapsed / num_devices)) def list_devices(): \"\"\"List tango devices", "is executed post server initialisation. \"\"\" # pylint: disable=global-statement global", "def main(args=None, **kwargs): \"\"\"Run (start) the device server.\"\"\" run([TestDevice], verbose=True,", "if __name__ == '__main__': PARSER = argparse.ArgumentParser(description='Device registration time.') PARSER.add_argument('num_devices',", "open('results.txt', 'a') file.write('{},{},{}'.format(num_devices, elapsed, elapsed/num_devices)) print('- Register devices: {:.4f} s", "if server in server_list: start_time = time.time() db.delete_server('TestDeviceServer/1') print('- Delete", "len(exported_devices) file = open('results.txt', 'a') file.write(',{},{}\\n'.format(elapsed, elapsed / num_devices)) print('>>", "the tango db.\"\"\" db = tango.Database() db.set_timeout_millis(50000) server = 'TestDeviceServer/1'", "TestDevice def init_callback(): \"\"\"Report server start up times. This callback", "= open('results.txt', 'a') file.write(',{},{}\\n'.format(elapsed, elapsed / num_devices)) print('>> Time taken", "server_list: start_time = time.time() db.delete_server('TestDeviceServer/1') print('- Delete server: {:.4f} s'.format(time.time()", "server for use with scaling tests.\"\"\" import sys import time", "s'.format(time.time() - start_time)) def register(num_devices): \"\"\"Register devices in the tango", "print('- No. running devices: {}'.format(len(exported_devices))) def main(args=None, **kwargs): \"\"\"Run (start)", "import run from TestDevice import TestDevice def init_callback(): \"\"\"Report server", "= time.time() db.delete_server('TestDeviceServer/1') print('- Delete server: {:.4f} s'.format(time.time() - start_time))", "time.sleep(0.5) list_devices() print('* Registering {} devices'.format(ARGS.num_devices)) register(ARGS.num_devices) list_devices() print('* Starting", "= open('results.txt', 'a') file.write('{},{},{}'.format(num_devices, elapsed, elapsed/num_devices)) print('- Register devices: {:.4f}", "= list(db.get_server_list(server)) if server in server_list: start_time = time.time() db.delete_server('TestDeviceServer/1')", "tango db.\"\"\" db = tango.Database() db.set_timeout_millis(50000) server = 'TestDeviceServer/1' server_list", "db.delete_server('TestDeviceServer/1') print('- Delete server: {:.4f} s'.format(time.time() - start_time)) def register(num_devices):", "server initialisation. \"\"\" # pylint: disable=global-statement global START_TIME db =", "python3 # -*- coding: utf-8 -*- \"\"\"Test Tango device server", "def init_callback(): \"\"\"Report server start up times. This callback is", "db.\"\"\" db = tango.Database() device_info = tango.DbDevInfo() device_info.server = 'TestDeviceServer/1'", "start_time = time.time() for device_id in range(num_devices): device_info.name = 'test/test_device/{:05d}'.format(device_id)", "= PARSER.parse_args() delete_server() time.sleep(0.5) list_devices() print('* Registering {} devices'.format(ARGS.num_devices)) register(ARGS.num_devices)", "devices associated with the TestDeviceServer.\"\"\" db = tango.Database() server_instance =", "= time.time() for device_id in range(num_devices): device_info.name = 'test/test_device/{:05d}'.format(device_id) db.add_device(device_info)", "device server for use with scaling tests.\"\"\" import sys import", "import tango from tango.server import run from TestDevice import TestDevice", "list(db.get_device_exported('test/*')) num_devices = len(exported_devices) file = open('results.txt', 'a') file.write(',{},{}\\n'.format(elapsed, elapsed", "- start_time)) def register(num_devices): \"\"\"Register devices in the tango db.\"\"\"", "start_time file = open('results.txt', 'a') file.write('{},{},{}'.format(num_devices, elapsed, elapsed/num_devices)) print('- Register", "db = tango.Database() server_instance = 'TestDeviceServer/1' device_class = 'TestDevice' devices", "{:.4f} s ({:.4f} s/device)' .format(elapsed, elapsed / num_devices)) def list_devices():", "Registering {} devices'.format(ARGS.num_devices)) register(ARGS.num_devices) list_devices() print('* Starting server ...') sys.argv", "elapsed / num_devices)) print('>> Time taken to start devices: {:.4f}", "PARSER.add_argument('num_devices', metavar='N', type=int, default=1, nargs='?', help='Number of devices to start.')", "Delete server: {:.4f} s'.format(time.time() - start_time)) def register(num_devices): \"\"\"Register devices", "use with scaling tests.\"\"\" import sys import time import argparse", "run from TestDevice import TestDevice def init_callback(): \"\"\"Report server start", "from TestDevice import TestDevice def init_callback(): \"\"\"Report server start up", "= 'test/test_device/{:05d}'.format(device_id) db.add_device(device_info) elapsed = time.time() - start_time file =", "from tango.server import run from TestDevice import TestDevice def init_callback():", "elapsed / num_devices)) def delete_server(): \"\"\"Delete the TestDeviceServer from the", "registration time.') PARSER.add_argument('num_devices', metavar='N', type=int, default=1, nargs='?', help='Number of devices", "scaling tests.\"\"\" import sys import time import argparse import tango", "elapsed = time.time() - START_TIME list_devices() exported_devices = list(db.get_device_exported('test/*')) num_devices", "= tango.Database() device_info = tango.DbDevInfo() device_info.server = 'TestDeviceServer/1' # pylint:", "time import argparse import tango from tango.server import run from", "ARGS = PARSER.parse_args() delete_server() time.sleep(0.5) list_devices() print('* Registering {} devices'.format(ARGS.num_devices))", "s ({:.4f} s/device)' .format(elapsed, elapsed / num_devices)) def list_devices(): \"\"\"List", "= tango.Database() server_instance = 'TestDeviceServer/1' device_class = 'TestDevice' devices =", "= 'TestDeviceServer/1' device_class = 'TestDevice' devices = list(db.get_device_name(server_instance, device_class)) print('-", "server_list = list(db.get_server_list(server)) if server in server_list: start_time = time.time()", "start_time)) def register(num_devices): \"\"\"Register devices in the tango db.\"\"\" db", "device_info = tango.DbDevInfo() device_info.server = 'TestDeviceServer/1' # pylint: disable=protected-access device_info._class", "= 'TestDeviceServer/1' # pylint: disable=protected-access device_info._class = 'TestDevice' start_time =", "print('* Starting server ...') sys.argv = ['TestDeviceServer', '1', '-v4'] START_TIME", "-*- \"\"\"Test Tango device server for use with scaling tests.\"\"\"", "s ({:.4f} s/dev)' .format(elapsed, elapsed / num_devices)) def delete_server(): \"\"\"Delete", "server in server_list: start_time = time.time() db.delete_server('TestDeviceServer/1') print('- Delete server:", "= 'TestDevice' start_time = time.time() for device_id in range(num_devices): device_info.name", "in server_list: start_time = time.time() db.delete_server('TestDeviceServer/1') print('- Delete server: {:.4f}", "{}'.format(len(devices))) exported_devices = list(db.get_device_exported('test/*')) print('- No. running devices: {}'.format(len(exported_devices))) def", "= list(db.get_device_name(server_instance, device_class)) print('- No. registered devices: {}'.format(len(devices))) exported_devices =", "with the TestDeviceServer.\"\"\" db = tango.Database() server_instance = 'TestDeviceServer/1' device_class", "tango.Database() db.set_timeout_millis(50000) server = 'TestDeviceServer/1' server_list = list(db.get_server_list(server)) if server", "register(num_devices): \"\"\"Register devices in the tango db.\"\"\" db = tango.Database()", "in the tango db.\"\"\" db = tango.Database() device_info = tango.DbDevInfo()", "(start) the device server.\"\"\" run([TestDevice], verbose=True, msg_stream=sys.stdout, post_init_callback=init_callback, raises=False, args=args,", "pylint: disable=protected-access device_info._class = 'TestDevice' start_time = time.time() for device_id", "num_devices)) def delete_server(): \"\"\"Delete the TestDeviceServer from the tango db.\"\"\"", "devices'.format(ARGS.num_devices)) register(ARGS.num_devices) list_devices() print('* Starting server ...') sys.argv = ['TestDeviceServer',", ".format(elapsed, elapsed / num_devices)) def list_devices(): \"\"\"List tango devices associated", "/ num_devices)) def delete_server(): \"\"\"Delete the TestDeviceServer from the tango", "# pylint: disable=global-statement global START_TIME db = tango.Database() elapsed =", "\"\"\"Delete the TestDeviceServer from the tango db.\"\"\" db = tango.Database()", "s/dev)' .format(elapsed, elapsed / num_devices)) def delete_server(): \"\"\"Delete the TestDeviceServer", "elapsed = time.time() - start_time file = open('results.txt', 'a') file.write('{},{},{}'.format(num_devices,", "tango.server import run from TestDevice import TestDevice def init_callback(): \"\"\"Report", "({:.4f} s/dev)' .format(elapsed, elapsed / num_devices)) def delete_server(): \"\"\"Delete the", "'TestDevice' devices = list(db.get_device_name(server_instance, device_class)) print('- No. registered devices: {}'.format(len(devices)))", "post_init_callback=init_callback, raises=False, args=args, **kwargs) if __name__ == '__main__': PARSER =", "Starting server ...') sys.argv = ['TestDeviceServer', '1', '-v4'] START_TIME =", "num_devices)) def list_devices(): \"\"\"List tango devices associated with the TestDeviceServer.\"\"\"", "devices: {}'.format(len(devices))) exported_devices = list(db.get_device_exported('test/*')) print('- No. running devices: {}'.format(len(exported_devices)))", "def delete_server(): \"\"\"Delete the TestDeviceServer from the tango db.\"\"\" db", "device_id in range(num_devices): device_info.name = 'test/test_device/{:05d}'.format(device_id) db.add_device(device_info) elapsed = time.time()", "devices in the tango db.\"\"\" db = tango.Database() device_info =", "with scaling tests.\"\"\" import sys import time import argparse import", "sys import time import argparse import tango from tango.server import", "time.time() - start_time file = open('results.txt', 'a') file.write('{},{},{}'.format(num_devices, elapsed, elapsed/num_devices))", "start.') ARGS = PARSER.parse_args() delete_server() time.sleep(0.5) list_devices() print('* Registering {}", "help='Number of devices to start.') ARGS = PARSER.parse_args() delete_server() time.sleep(0.5)", "of devices to start.') ARGS = PARSER.parse_args() delete_server() time.sleep(0.5) list_devices()", "= tango.DbDevInfo() device_info.server = 'TestDeviceServer/1' # pylint: disable=protected-access device_info._class =", "print('- Delete server: {:.4f} s'.format(time.time() - start_time)) def register(num_devices): \"\"\"Register", "the TestDeviceServer from the tango db.\"\"\" db = tango.Database() db.set_timeout_millis(50000)", "tango from tango.server import run from TestDevice import TestDevice def", "init_callback(): \"\"\"Report server start up times. This callback is executed", "pylint: disable=global-statement global START_TIME db = tango.Database() elapsed = time.time()", "args=args, **kwargs) if __name__ == '__main__': PARSER = argparse.ArgumentParser(description='Device registration", "argparse.ArgumentParser(description='Device registration time.') PARSER.add_argument('num_devices', metavar='N', type=int, default=1, nargs='?', help='Number of", "file = open('results.txt', 'a') file.write('{},{},{}'.format(num_devices, elapsed, elapsed/num_devices)) print('- Register devices:", "elapsed / num_devices)) def list_devices(): \"\"\"List tango devices associated with", "Tango device server for use with scaling tests.\"\"\" import sys", "time.time() for device_id in range(num_devices): device_info.name = 'test/test_device/{:05d}'.format(device_id) db.add_device(device_info) elapsed", "type=int, default=1, nargs='?', help='Number of devices to start.') ARGS =", "'__main__': PARSER = argparse.ArgumentParser(description='Device registration time.') PARSER.add_argument('num_devices', metavar='N', type=int, default=1,", "server: {:.4f} s'.format(time.time() - start_time)) def register(num_devices): \"\"\"Register devices in", "\"\"\"Test Tango device server for use with scaling tests.\"\"\" import", "'TestDeviceServer/1' device_class = 'TestDevice' devices = list(db.get_device_name(server_instance, device_class)) print('- No.", "'TestDevice' start_time = time.time() for device_id in range(num_devices): device_info.name =", "list_devices() print('* Registering {} devices'.format(ARGS.num_devices)) register(ARGS.num_devices) list_devices() print('* Starting server", "Time taken to start devices: {:.4f} s ({:.4f} s/dev)' .format(elapsed,", "'TestDeviceServer/1' # pylint: disable=protected-access device_info._class = 'TestDevice' start_time = time.time()", "time.time() db.delete_server('TestDeviceServer/1') print('- Delete server: {:.4f} s'.format(time.time() - start_time)) def", "- start_time file = open('results.txt', 'a') file.write('{},{},{}'.format(num_devices, elapsed, elapsed/num_devices)) print('-", "def list_devices(): \"\"\"List tango devices associated with the TestDeviceServer.\"\"\" db", "= 'TestDevice' devices = list(db.get_device_name(server_instance, device_class)) print('- No. registered devices:", "file.write(',{},{}\\n'.format(elapsed, elapsed / num_devices)) print('>> Time taken to start devices:", "the device server.\"\"\" run([TestDevice], verbose=True, msg_stream=sys.stdout, post_init_callback=init_callback, raises=False, args=args, **kwargs)", "db = tango.Database() elapsed = time.time() - START_TIME list_devices() exported_devices", "device server.\"\"\" run([TestDevice], verbose=True, msg_stream=sys.stdout, post_init_callback=init_callback, raises=False, args=args, **kwargs) if", "devices to start.') ARGS = PARSER.parse_args() delete_server() time.sleep(0.5) list_devices() print('*", "import TestDevice def init_callback(): \"\"\"Report server start up times. This", "times. This callback is executed post server initialisation. \"\"\" #", "list(db.get_device_exported('test/*')) print('- No. running devices: {}'.format(len(exported_devices))) def main(args=None, **kwargs): \"\"\"Run", "'TestDeviceServer/1' server_list = list(db.get_server_list(server)) if server in server_list: start_time =", "initialisation. \"\"\" # pylint: disable=global-statement global START_TIME db = tango.Database()", "print('>> Time taken to start devices: {:.4f} s ({:.4f} s/dev)'", "device_info.server = 'TestDeviceServer/1' # pylint: disable=protected-access device_info._class = 'TestDevice' start_time", "- START_TIME list_devices() exported_devices = list(db.get_device_exported('test/*')) num_devices = len(exported_devices) file", "from the tango db.\"\"\" db = tango.Database() db.set_timeout_millis(50000) server =", "elapsed, elapsed/num_devices)) print('- Register devices: {:.4f} s ({:.4f} s/device)' .format(elapsed,", "device_class)) print('- No. registered devices: {}'.format(len(devices))) exported_devices = list(db.get_device_exported('test/*')) print('-", "verbose=True, msg_stream=sys.stdout, post_init_callback=init_callback, raises=False, args=args, **kwargs) if __name__ == '__main__':", "{} devices'.format(ARGS.num_devices)) register(ARGS.num_devices) list_devices() print('* Starting server ...') sys.argv =", "global START_TIME db = tango.Database() elapsed = time.time() - START_TIME", "import time import argparse import tango from tango.server import run", "device_info.name = 'test/test_device/{:05d}'.format(device_id) db.add_device(device_info) elapsed = time.time() - start_time file", "tango devices associated with the TestDeviceServer.\"\"\" db = tango.Database() server_instance", "== '__main__': PARSER = argparse.ArgumentParser(description='Device registration time.') PARSER.add_argument('num_devices', metavar='N', type=int,", "/ num_devices)) def list_devices(): \"\"\"List tango devices associated with the", "No. registered devices: {}'.format(len(devices))) exported_devices = list(db.get_device_exported('test/*')) print('- No. running", "server start up times. This callback is executed post server", "print('- No. registered devices: {}'.format(len(devices))) exported_devices = list(db.get_device_exported('test/*')) print('- No.", "associated with the TestDeviceServer.\"\"\" db = tango.Database() server_instance = 'TestDeviceServer/1'", "...') sys.argv = ['TestDeviceServer', '1', '-v4'] START_TIME = time.time() main()", "= list(db.get_device_exported('test/*')) print('- No. running devices: {}'.format(len(exported_devices))) def main(args=None, **kwargs):", "up times. This callback is executed post server initialisation. \"\"\"", "post server initialisation. \"\"\" # pylint: disable=global-statement global START_TIME db", "# -*- coding: utf-8 -*- \"\"\"Test Tango device server for", "No. running devices: {}'.format(len(exported_devices))) def main(args=None, **kwargs): \"\"\"Run (start) the", "file.write('{},{},{}'.format(num_devices, elapsed, elapsed/num_devices)) print('- Register devices: {:.4f} s ({:.4f} s/device)'", "disable=protected-access device_info._class = 'TestDevice' start_time = time.time() for device_id in", "= len(exported_devices) file = open('results.txt', 'a') file.write(',{},{}\\n'.format(elapsed, elapsed / num_devices))", "/ num_devices)) print('>> Time taken to start devices: {:.4f} s", "raises=False, args=args, **kwargs) if __name__ == '__main__': PARSER = argparse.ArgumentParser(description='Device", "to start devices: {:.4f} s ({:.4f} s/dev)' .format(elapsed, elapsed /", "list_devices() exported_devices = list(db.get_device_exported('test/*')) num_devices = len(exported_devices) file = open('results.txt',", "= 'TestDeviceServer/1' server_list = list(db.get_server_list(server)) if server in server_list: start_time", "import argparse import tango from tango.server import run from TestDevice", "num_devices = len(exported_devices) file = open('results.txt', 'a') file.write(',{},{}\\n'.format(elapsed, elapsed /", "in range(num_devices): device_info.name = 'test/test_device/{:05d}'.format(device_id) db.add_device(device_info) elapsed = time.time() -", "executed post server initialisation. \"\"\" # pylint: disable=global-statement global START_TIME", "= time.time() - start_time file = open('results.txt', 'a') file.write('{},{},{}'.format(num_devices, elapsed,", "delete_server() time.sleep(0.5) list_devices() print('* Registering {} devices'.format(ARGS.num_devices)) register(ARGS.num_devices) list_devices() print('*", "# pylint: disable=protected-access device_info._class = 'TestDevice' start_time = time.time() for", "TestDeviceServer.\"\"\" db = tango.Database() server_instance = 'TestDeviceServer/1' device_class = 'TestDevice'", "list(db.get_server_list(server)) if server in server_list: start_time = time.time() db.delete_server('TestDeviceServer/1') print('-" ]
[ "\"Sample text for document-2\", 'meta': {\"source\": \"wiki2\"}}, {\"text\": \"Sample text", "assert output[\"answers\"][0][\"answer\"].startswith(\"Using tests\") if isinstance(document_store, ElasticsearchDocumentStore): output = pipeline.run(query=\"How to", "prediction is not None assert prediction[\"query\"] == \"Who lives in", "inputs=[\"R1\", \"R2\"]) results = p.run(query=query) assert results[\"documents\"][0].score > 1000 assert", "return kwargs, \"output_1\" class JoinNode(RootNode): def run(self, **kwargs): kwargs[\"output\"] =", "indexing pipeline from yaml pipeline = Pipeline.load_from_yaml(Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"indexing_pipeline\") pipeline.run(file_path=Path(\"samples/pdf/sample_pdf_1.pdf\"), top_k_retriever=10,", "component=JoinNode(), inputs=[\"D\", \"E\", \"C\"]) output = pipeline.run(query=\"test\") assert output[\"output\"] ==", "= Pipeline() pipeline.add_node(name=\"A\", component=AWithOutputAll(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A.output_1\"]) pipeline.add_node(name=\"C\", component=C(),", "\"answer\": \"Using tests for module-5\"}}, ] document_store.write_documents(documents) document_store.update_embeddings(retriever) pipeline =", "= p.run(query=query) assert results[\"documents\"][0].score > 1000 assert len(results[\"documents\"]) == 2", "pipeline.add_node(name=\"E\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"F\", component=JoinNode(), inputs=[\"D\", \"E\", \"C\"]) output =", "\"How to test module-3?\", 'meta': {\"source\": \"wiki3\", \"answer\": \"Using tests", "(\"embedding\", \"faiss\"), (\"embedding\", \"milvus\"), (\"embedding\", \"elasticsearch\")], indirect=True, ) def test_document_search_pipeline(retriever,", "**kwargs): kwargs[\"output\"] = \"A\" return kwargs, \"output_1\" class AWithOutput2(RootNode): outgoing_edges", "\"How to test module-5?\", 'meta': {\"source\": \"wiki5\", \"answer\": \"Using tests", "= \"A\" return kwargs, \"output_1\" class AWithOutput2(RootNode): outgoing_edges = 2", "name=\"Reader\", inputs=[\"Join\"]) results = p.run(query=query) assert results[\"answers\"][0][\"answer\"] == \"Berlin\" def", "\"elasticsearch\")], indirect=True, ) def test_document_search_pipeline(retriever, document_store): documents = [ {\"text\":", "kwargs, \"output_1\" class D(RootNode): def run(self, **kwargs): kwargs[\"output\"] += \"D\"", "output = pipeline.run(query=\"How to test this?\", top_k_retriever=3) assert len(output[\"answers\"]) ==", "(input_dict[\"output\"]) return kwargs, \"output_1\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=AWithOutput1(), inputs=[\"Query\"])", "is not None assert len(prediction[\"answers\"]) == 1 @pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever,document_store\",", "top_k_retriever=10, top_k_reader=5) assert prediction[\"answers\"][0][\"offset_start\"] == 11 assert prediction[\"answers\"][0][\"offset_end\"] == 16", "\"\" for input_dict in kwargs[\"inputs\"]: kwargs[\"output\"] += (input_dict[\"output\"]) return kwargs,", "\"Using tests for module-5\"}}, ] document_store.write_documents(documents) document_store.update_embeddings(retriever) pipeline = FAQPipeline(retriever=retriever)", "kwargs, \"output_1\" class C(RootNode): def run(self, **kwargs): kwargs[\"output\"] += \"C\"", "\"faiss\"), (\"embedding\", \"milvus\"), (\"embedding\", \"elasticsearch\")], indirect=True, ) def test_faq_pipeline(retriever, document_store):", "\"wiki4\", \"answer\": \"Using tests for module-4\"}}, {\"text\": \"How to test", "**kwargs): kwargs[\"output\"] += \"D\" return kwargs, \"output_1\" class E(RootNode): def", "and I live in Berlin\" @pytest.mark.parametrize(\"document_store_with_docs\", [\"elasticsearch\"], indirect=True) @pytest.mark.parametrize(\"reader\", [\"farm\"],", "== 2 # test concatenate join_node = JoinDocuments(join_mode=\"concatenate\") p =", "kwargs[\"inputs\"]: kwargs[\"output\"] += (input_dict[\"output\"]) return kwargs, \"output_1\" pipeline = Pipeline()", "p.add_node(component=es, name=\"R1\", inputs=[\"Query\"]) p.add_node(component=dpr, name=\"R2\", inputs=[\"Query\"]) p.add_node(component=join_node, name=\"Join\", inputs=[\"R1\", \"R2\"])", "join_node with reader join_node = JoinDocuments() p = Pipeline() p.add_node(component=es,", "RootNode from haystack.retriever.dense import DensePassageRetriever from haystack.retriever.sparse import ElasticsearchRetriever @pytest.mark.parametrize(\"document_store_with_docs\",", "== 4 if isinstance(document_store, ElasticsearchDocumentStore): output = pipeline.run(query=\"How to test", "] document_store.write_documents(documents) document_store.update_embeddings(retriever) pipeline = FAQPipeline(retriever=retriever) output = pipeline.run(query=\"How to", "to test this?\", filters={\"source\": [\"wiki2\"]}, top_k_retriever=5) assert len(output[\"answers\"]) == 1", "DocumentSearchPipeline, RootNode from haystack.retriever.dense import DensePassageRetriever from haystack.retriever.sparse import ElasticsearchRetriever", "pipeline.add_node(name=\"C\", component=C(), inputs=[\"A.output_2\"]) pipeline.add_node(name=\"D\", component=E(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"F\",", "\"testing finder\" prediction = pipeline.run(query=query, top_k_retriever=1, top_k_reader=1) assert prediction is", "**kwargs): kwargs[\"output\"] = \"A\" return kwargs, \"output_1\" class B(RootNode): def", "\"retriever_with_docs, document_store_with_docs\", [(\"elasticsearch\", \"elasticsearch\")], indirect=True ) def test_graph_creation(reader, retriever_with_docs, document_store_with_docs):", "this?\", filters={\"source\": [\"wiki2\"]}, top_k_retriever=5) assert len(output[\"answers\"]) == 1 @pytest.mark.elasticsearch @pytest.mark.parametrize(", "query pipeline from yaml pipeline = Pipeline.load_from_yaml(Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"query_pipeline\") prediction =", "class E(RootNode): def run(self, **kwargs): kwargs[\"output\"] += \"E\" return kwargs,", "\"D\" return kwargs, \"output_1\" class E(RootNode): def run(self, **kwargs): kwargs[\"output\"]", "tests for module-2\"}}, {\"text\": \"How to test module-3?\", 'meta': {\"source\":", "document-4\", 'meta': {\"source\": \"wiki4\"}}, {\"text\": \"Sample text for document-5\", 'meta':", "[\"elasticsearch\"], indirect=True) @pytest.mark.parametrize(\"reader\", [\"farm\"], indirect=True) def test_join_document_pipeline(document_store_with_docs, reader): es =", "'meta': {\"source\": \"wiki4\"}}, {\"text\": \"Sample text for document-5\", 'meta': {\"source\":", "merge with weights join_node = JoinDocuments(join_mode=\"merge\", weights=[1000, 1], top_k_join=2) p", "top_k_retriever=10, top_k_reader=3) assert prediction[\"query\"] == \"Who made the PDF specification?\"", "Pipeline() pipeline.add_node(name=\"A\", component=AWithOutput1(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A.output_1\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"A.output_2\"])", "\"C\" return kwargs, \"output_1\" class D(RootNode): def run(self, **kwargs): kwargs[\"output\"]", "inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"F\", component=JoinNode(), inputs=[\"D\", \"E\", \"C\"]) output", "output_translator=en_to_de_translator, pipeline=base_pipeline ) prediction = pipeline.run(query=\"Wer lebt in Berlin?\", top_k_retriever=10,", "\"Carla\" assert prediction[\"answers\"][0][\"probability\"] <= 1 assert prediction[\"answers\"][0][\"probability\"] >= 0 assert", "A(RootNode): def run(self, **kwargs): kwargs[\"output\"] = \"A\" return kwargs, \"output_1\"", "{\"source\": \"wiki2\", \"answer\": \"Using tests for module-2\"}}, {\"text\": \"How to", "= pipeline.run(query=query, top_k_retriever=1, top_k_reader=1) assert prediction is not None assert", "assert prediction[\"answers\"][0][\"context\"] == \"My name is Carla and I live", "B(RootNode): def run(self, **kwargs): kwargs[\"output\"] += \"B\" return kwargs, \"output_1\"", "'meta': {\"source\": \"wiki1\"}}, {\"text\": \"Sample text for document-2\", 'meta': {\"source\":", "component=JoinNode(), inputs=[\"C\", \"D\"]) output = pipeline.run(query=\"test\") assert output[\"output\"] == \"ABCABD\"", "11 assert prediction[\"answers\"][0][\"offset_end\"] == 16 start = prediction[\"answers\"][0][\"offset_start\"] end =", "test module-3?\", 'meta': {\"source\": \"wiki3\", \"answer\": \"Using tests for module-3\"}},", "pipeline.add_node(name=\"E\", component=E(), inputs=[\"C\"]) pipeline.add_node(name=\"D\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"F\", component=JoinNode(), inputs=[\"D\", \"E\"])", "run(self, **kwargs): kwargs[\"output\"] += \"B\" return kwargs, \"output_1\" class C(RootNode):", "pipeline.add_node(name=\"A\", component=AWithOutputAll(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A.output_1\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"A.output_2\"]) pipeline.add_node(name=\"D\",", "retriever_with_docs, en_to_de_translator, de_to_en_translator): base_pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) pipeline = TranslationWrapperPipeline(", "@pytest.mark.parametrize(\"reader\", [\"farm\"], indirect=True) def test_join_document_pipeline(document_store_with_docs, reader): es = ElasticsearchRetriever(document_store=document_store_with_docs) dpr", "retriever=retriever_with_docs) query = \"testing finder\" prediction = pipeline.run(query=query, top_k_retriever=1, top_k_reader=1)", "Pipeline() pipeline.add_node(name=\"ES\", component=retriever_with_docs, inputs=[\"Query\"]) with pytest.raises(AssertionError): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"ES.output_2\"]) with", "pipeline.run(query=\"Who lives in Berlin?\", top_k_retriever=10, top_k_reader=3) assert prediction is not", "in Berlin?\", top_k_retriever=10, top_k_reader=5) assert prediction[\"answers\"][0][\"offset_start\"] == 11 assert prediction[\"answers\"][0][\"offset_end\"]", "weights join_node = JoinDocuments(join_mode=\"merge\", weights=[1000, 1], top_k_join=2) p = Pipeline()", "module-1?\", 'meta': {\"source\": \"wiki1\", \"answer\": \"Using tests for module-1\"}}, {\"text\":", "== 16 start = prediction[\"answers\"][0][\"offset_start\"] end = prediction[\"answers\"][0][\"offset_end\"] assert prediction[\"answers\"][0][\"context\"][start:end]", "input_translator=de_to_en_translator, output_translator=en_to_de_translator, pipeline=base_pipeline ) prediction = pipeline.run(query=\"Wer lebt in Berlin?\",", "= \"A\" return kwargs, \"output_1\" class B(RootNode): def run(self, **kwargs):", "test_parallel_paths_in_pipeline_graph(): class A(RootNode): def run(self, **kwargs): kwargs[\"output\"] = \"A\" return", "3 assert output[\"answers\"][0][\"query\"].startswith(\"How to\") assert output[\"answers\"][0][\"answer\"].startswith(\"Using tests\") if isinstance(document_store, ElasticsearchDocumentStore):", "document_store_with_docs): pipeline = Pipeline() pipeline.add_node(name=\"ES\", component=retriever_with_docs, inputs=[\"Query\"]) with pytest.raises(AssertionError): pipeline.add_node(name=\"Reader\",", "this?\", top_k_retriever=4) assert len(output.get('documents', [])) == 4 if isinstance(document_store, ElasticsearchDocumentStore):", "retriever=retriever_with_docs) pipeline = TranslationWrapperPipeline( input_translator=de_to_en_translator, output_translator=en_to_de_translator, pipeline=base_pipeline ) prediction =", "pipeline = Pipeline.load_from_yaml(Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"query_pipeline\") prediction = pipeline.run(query=\"Who made the PDF", "= pipeline.run(query=\"Who made the PDF specification?\", top_k_retriever=10, top_k_reader=3) assert prediction[\"query\"]", "+= (input_dict[\"output\"]) return kwargs, \"output_1\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=AWithOutput1(),", "inputs=[\"C\", \"D\"]) output = pipeline.run(query=\"test\") assert output[\"output\"] == \"ABCABD\" def", "import ElasticsearchRetriever @pytest.mark.parametrize(\"document_store_with_docs\", [\"elasticsearch\"], indirect=True) def test_load_yaml(document_store_with_docs): # test correct", "prediction[\"query\"] == \"Who made the PDF specification?\" assert prediction[\"answers\"][0][\"answer\"] ==", "pipeline.run(query=\"Who lives in Berlin?\", top_k_retriever=10, top_k_reader=5) assert prediction[\"answers\"][0][\"offset_start\"] == 11", "to test this?\", top_k_retriever=4) assert len(output.get('documents', [])) == 4 if", "\"R2\"]) results = p.run(query=query) assert results[\"documents\"][0].score > 1000 assert len(results[\"documents\"])", "return kwargs, \"output_1\" class B(RootNode): def run(self, **kwargs): kwargs[\"output\"] +=", "{\"text\": \"How to test module-2?\", 'meta': {\"source\": \"wiki2\", \"answer\": \"Using", "def test_parallel_paths_in_pipeline_graph_with_branching(): class AWithOutput1(RootNode): outgoing_edges = 2 def run(self, **kwargs):", "component=B(), inputs=[\"A\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=E(), inputs=[\"C\"]) pipeline.add_node(name=\"D\", component=D(),", "name=\"R1\", inputs=[\"Query\"]) p.add_node(component=dpr, name=\"R2\", inputs=[\"Query\"]) p.add_node(component=join_node, name=\"Join\", inputs=[\"R1\", \"R2\"]) results", "ExtractiveQAPipeline, Pipeline, FAQPipeline, \\ DocumentSearchPipeline, RootNode from haystack.retriever.dense import DensePassageRetriever", "name is Carla and I live in Berlin\" assert len(prediction[\"answers\"])", "results[\"documents\"][0].score > 1000 assert len(results[\"documents\"]) == 2 # test concatenate", "with pytest.raises(Exception): pipeline = Pipeline() pipeline.add_node(name=\"ES\", component=retriever_with_docs, inputs=[\"InvalidNode\"]) @pytest.mark.slow @pytest.mark.elasticsearch", "{\"text\": \"How to test module-4?\", 'meta': {\"source\": \"wiki4\", \"answer\": \"Using", "assert results[\"documents\"][0].score > 1000 assert len(results[\"documents\"]) == 2 # test", "**kwargs): kwargs[\"output\"] = \"A\" return kwargs, \"output_all\" class B(RootNode): def", "= 2 def run(self, **kwargs): kwargs[\"output\"] = \"A\" return kwargs,", "AWithOutputAll(RootNode): outgoing_edges = 2 def run(self, **kwargs): kwargs[\"output\"] = \"A\"", "kwargs[\"output\"] += \"E\" return kwargs, \"output_1\" class JoinNode(RootNode): def run(self,", "def test_join_document_pipeline(document_store_with_docs, reader): es = ElasticsearchRetriever(document_store=document_store_with_docs) dpr = DensePassageRetriever( document_store=document_store_with_docs,", "assert \"Carla\" in prediction[\"answers\"][0][\"answer\"] assert prediction[\"answers\"][0][\"probability\"] <= 1 assert prediction[\"answers\"][0][\"probability\"]", "'meta': {\"source\": \"wiki3\", \"answer\": \"Using tests for module-3\"}}, {\"text\": \"How", "def run(self, **kwargs): kwargs[\"output\"] += \"D\" return kwargs, \"output_1\" class", "return kwargs, \"output_1\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=A(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\",", "pipeline=base_pipeline ) prediction = pipeline.run(query=\"Wer lebt in Berlin?\", top_k_retriever=10, top_k_reader=3)", "{\"text\": \"How to test module-3?\", 'meta': {\"source\": \"wiki3\", \"answer\": \"Using", "\"output_1\" class B(RootNode): def run(self, **kwargs): kwargs[\"output\"] += \"B\" return", "class JoinNode(RootNode): def run(self, **kwargs): if kwargs.get(\"inputs\"): kwargs[\"output\"] = \"\"", "\"Using tests for module-3\"}}, {\"text\": \"How to test module-4?\", 'meta':", "prediction[\"answers\"][0][\"answer\"] == \"Adobe Systems\" # test invalid pipeline name with", "+ kwargs[\"inputs\"][1][\"output\"] return kwargs, \"output_1\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=A(),", "to test module-1?\", 'meta': {\"source\": \"wiki1\", \"answer\": \"Using tests for", "text for document-5\", 'meta': {\"source\": \"wiki5\"}}, ] document_store.write_documents(documents) document_store.update_embeddings(retriever) pipeline", "{\"source\": \"wiki5\", \"answer\": \"Using tests for module-5\"}}, ] document_store.write_documents(documents) document_store.update_embeddings(retriever)", "TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, \\ DocumentSearchPipeline, RootNode from haystack.retriever.dense", "lebt in Berlin?\", top_k_retriever=10, top_k_reader=3) assert prediction is not None", "test correct load of indexing pipeline from yaml pipeline =", "document_store_with_docs\", [(\"elasticsearch\", \"elasticsearch\")], indirect=True ) def test_graph_creation(reader, retriever_with_docs, document_store_with_docs): pipeline", "{\"text\": \"Sample text for document-5\", 'meta': {\"source\": \"wiki5\"}}, ] document_store.write_documents(documents)", "= DensePassageRetriever( document_store=document_store_with_docs, query_embedding_model=\"facebook/dpr-question_encoder-single-nq-base\", passage_embedding_model=\"facebook/dpr-ctx_encoder-single-nq-base\", use_gpu=False, ) document_store_with_docs.update_embeddings(dpr) query =", "join_node = JoinDocuments(join_mode=\"merge\", weights=[1000, 1], top_k_join=2) p = Pipeline() p.add_node(component=es,", "assert output[\"answers\"][0][\"query\"].startswith(\"How to\") assert output[\"answers\"][0][\"answer\"].startswith(\"Using tests\") if isinstance(document_store, ElasticsearchDocumentStore): output", "I live in Berlin\" @pytest.mark.parametrize(\"document_store_with_docs\", [\"elasticsearch\"], indirect=True) @pytest.mark.parametrize(\"reader\", [\"farm\"], indirect=True)", "run(self, **kwargs): kwargs[\"output\"] += \"E\" return kwargs, \"output_1\" class JoinNode(RootNode):", "= JoinDocuments(join_mode=\"concatenate\") p = Pipeline() p.add_node(component=es, name=\"R1\", inputs=[\"Query\"]) p.add_node(component=dpr, name=\"R2\",", "for module-4\"}}, {\"text\": \"How to test module-5?\", 'meta': {\"source\": \"wiki5\",", "top_k_retriever=3) assert len(output[\"answers\"]) == 3 assert output[\"answers\"][0][\"query\"].startswith(\"How to\") assert output[\"answers\"][0][\"answer\"].startswith(\"Using", "pipeline = DocumentSearchPipeline(retriever=retriever) output = pipeline.run(query=\"How to test this?\", top_k_retriever=4)", "with pytest.raises(Exception): Pipeline.load_from_yaml(path=Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"invalid\") @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever_with_docs, document_store_with_docs\", [(\"elasticsearch\",", "pipeline_name=\"query_pipeline\") prediction = pipeline.run(query=\"Who made the PDF specification?\", top_k_retriever=10, top_k_reader=3)", "assert prediction is not None assert prediction[\"query\"] == \"Who lives", "for module-3\"}}, {\"text\": \"How to test module-4?\", 'meta': {\"source\": \"wiki4\",", "\"output_1\" class AWithOutput2(RootNode): outgoing_edges = 2 def run(self, **kwargs): kwargs[\"output\"]", "top_k_join=2) p = Pipeline() p.add_node(component=es, name=\"R1\", inputs=[\"Query\"]) p.add_node(component=dpr, name=\"R2\", inputs=[\"Query\"])", "p.add_node(component=join_node, name=\"Join\", inputs=[\"R1\", \"R2\"]) results = p.run(query=query) assert len(results[\"documents\"]) ==", "\"Sample text for document-4\", 'meta': {\"source\": \"wiki4\"}}, {\"text\": \"Sample text", "prediction[\"answers\"][0][\"answer\"] @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_answers_single_result(reader, retriever_with_docs): pipeline", "results = p.run(query=query) assert len(results[\"documents\"]) == 3 # test merge", "is not None assert prediction[\"query\"] == \"Wer lebt in Berlin?\"", "\"Sample text for document-5\", 'meta': {\"source\": \"wiki5\"}}, ] document_store.write_documents(documents) document_store.update_embeddings(retriever)", "# test merge with weights join_node = JoinDocuments(join_mode=\"merge\", weights=[1000, 1],", "len(results[\"documents\"]) == 3 # test join_node with reader join_node =", "assert len(output[\"answers\"]) == 1 @pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever,document_store\", [(\"embedding\", \"memory\"), (\"embedding\",", "pipeline.run(query=\"test\") assert output[\"output\"] == \"ABDABCE\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=A(),", "inputs=[\"ES.wrong_edge_label\"]) with pytest.raises(Exception): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"InvalidNode\"]) with pytest.raises(Exception): pipeline =", "to test module-5?\", 'meta': {\"source\": \"wiki5\", \"answer\": \"Using tests for", "[])) == 4 if isinstance(document_store, ElasticsearchDocumentStore): output = pipeline.run(query=\"How to", "JoinDocuments(join_mode=\"merge\") p = Pipeline() p.add_node(component=es, name=\"R1\", inputs=[\"Query\"]) p.add_node(component=dpr, name=\"R2\", inputs=[\"Query\"])", "== 3 @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_offsets(reader, retriever_with_docs): pipeline", "with reader join_node = JoinDocuments() p = Pipeline() p.add_node(component=es, name=\"R1\",", "JoinNode(RootNode): def run(self, **kwargs): kwargs[\"output\"] = kwargs[\"inputs\"][0][\"output\"] + kwargs[\"inputs\"][1][\"output\"] return", "def run(self, **kwargs): kwargs[\"output\"] = \"A\" return kwargs, \"output_all\" class", "= kwargs[\"inputs\"][0][\"output\"] + kwargs[\"inputs\"][1][\"output\"] return kwargs, \"output_1\" pipeline = Pipeline()", "correct load of query pipeline from yaml pipeline = Pipeline.load_from_yaml(Path(\"samples/pipeline/test_pipeline.yaml\"),", "**kwargs): kwargs[\"output\"] = kwargs[\"inputs\"][0][\"output\"] + kwargs[\"inputs\"][1][\"output\"] return kwargs, \"output_1\" pipeline", "'meta': {\"source\": \"wiki1\", \"answer\": \"Using tests for module-1\"}}, {\"text\": \"How", "test module-1?\", 'meta': {\"source\": \"wiki1\", \"answer\": \"Using tests for module-1\"}},", "= \"A\" return kwargs, \"output_all\" class B(RootNode): def run(self, **kwargs):", "inputs=[\"ES.output_2\"]) with pytest.raises(AssertionError): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"ES.wrong_edge_label\"]) with pytest.raises(Exception): pipeline.add_node(name=\"Reader\", component=retriever_with_docs,", "top_k_retriever=5) assert len(output[\"documents\"]) == 1 @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True)", ") def test_document_search_pipeline(retriever, document_store): documents = [ {\"text\": \"Sample text", "assert results[\"answers\"][0][\"answer\"] == \"Berlin\" def test_parallel_paths_in_pipeline_graph(): class A(RootNode): def run(self,", "kwargs[\"output\"] += \"C\" return kwargs, \"output_1\" class D(RootNode): def run(self,", "kwargs[\"output\"] = \"A\" return kwargs, \"output_all\" class B(RootNode): def run(self,", "\"B\" return kwargs, \"output_1\" class C(RootNode): def run(self, **kwargs): kwargs[\"output\"]", "to test this?\", top_k_retriever=3) assert len(output[\"answers\"]) == 3 assert output[\"answers\"][0][\"query\"].startswith(\"How", "kwargs[\"output\"] += (input_dict[\"output\"]) return kwargs, \"output_1\" pipeline = Pipeline() pipeline.add_node(name=\"A\",", "run(self, **kwargs): kwargs[\"output\"] = \"A\" return kwargs, \"output_all\" class B(RootNode):", "\"output_1\" class JoinNode(RootNode): def run(self, **kwargs): kwargs[\"output\"] = kwargs[\"inputs\"][0][\"output\"] +", "name=\"R1\", inputs=[\"Query\"]) p.add_node(component=dpr, name=\"R2\", inputs=[\"Query\"]) p.add_node(component=join_node, name=\"Join\", inputs=[\"R1\", \"R2\"]) p.add_node(component=reader,", "\"test1\" assert prediction[\"answers\"][0][\"context\"] == \"My name is Carla and I", "kwargs[\"inputs\"][0][\"output\"] + kwargs[\"inputs\"][1][\"output\"] return kwargs, \"output_1\" pipeline = Pipeline() pipeline.add_node(name=\"A\",", "@pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever_with_docs, document_store_with_docs\", [(\"elasticsearch\", \"elasticsearch\")], indirect=True ) def", "\"elasticsearch\")], indirect=True, ) def test_faq_pipeline(retriever, document_store): documents = [ {\"text\":", "module-2?\", 'meta': {\"source\": \"wiki2\", \"answer\": \"Using tests for module-2\"}}, {\"text\":", "Pipeline() pipeline.add_node(name=\"A\", component=AWithOutput2(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A.output_1\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"A.output_2\"])", "concatenate join_node = JoinDocuments(join_mode=\"concatenate\") p = Pipeline() p.add_node(component=es, name=\"R1\", inputs=[\"Query\"])", "name=\"Join\", inputs=[\"R1\", \"R2\"]) p.add_node(component=reader, name=\"Reader\", inputs=[\"Join\"]) results = p.run(query=query) assert", "= p.run(query=query) assert len(results[\"documents\"]) == 3 # test join_node with", "@pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_offsets(reader, retriever_with_docs): pipeline = ExtractiveQAPipeline(reader=reader,", "lives in Berlin?\", top_k_retriever=10, top_k_reader=5) assert prediction[\"answers\"][0][\"offset_start\"] == 11 assert", "class JoinNode(RootNode): def run(self, **kwargs): kwargs[\"output\"] = kwargs[\"inputs\"][0][\"output\"] + kwargs[\"inputs\"][1][\"output\"]", "component=C(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=E(), inputs=[\"C\"]) pipeline.add_node(name=\"D\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"F\", component=JoinNode(),", "= pipeline.run(query=\"test\") assert output[\"output\"] == \"ABDABCE\" pipeline = Pipeline() pipeline.add_node(name=\"A\",", "return kwargs, \"output_all\" class B(RootNode): def run(self, **kwargs): kwargs[\"output\"] +=", "module-1\"}}, {\"text\": \"How to test module-2?\", 'meta': {\"source\": \"wiki2\", \"answer\":", "indirect=True) def test_extractive_qa_offsets(reader, retriever_with_docs): pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) prediction =", "assert prediction[\"query\"] == \"Who lives in Berlin?\" assert prediction[\"answers\"][0][\"answer\"] ==", "en_to_de_translator, de_to_en_translator): base_pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) pipeline = TranslationWrapperPipeline( input_translator=de_to_en_translator,", "== \"My name is Carla and I live in Berlin\"", "@pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_answers_single_result(reader, retriever_with_docs): pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)", "prediction[\"answers\"][0][\"answer\"] assert prediction[\"answers\"][0][\"probability\"] <= 1 assert prediction[\"answers\"][0][\"probability\"] >= 0 assert", "len(output[\"answers\"]) == 1 @pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever,document_store\", [(\"embedding\", \"memory\"), (\"embedding\", \"faiss\"),", "filters={\"source\": [\"wiki2\"]}, top_k_retriever=5) assert len(output[\"answers\"]) == 1 @pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever,document_store\",", "test join_node with reader join_node = JoinDocuments() p = Pipeline()", "component=C(), inputs=[\"A.output_2\"]) pipeline.add_node(name=\"D\", component=E(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"F\", component=JoinNode(),", "@pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_answers(reader, retriever_with_docs): pipeline = ExtractiveQAPipeline(reader=reader,", "from pathlib import Path import pytest from haystack.document_store.elasticsearch import ElasticsearchDocumentStore", "prediction[\"answers\"][0][\"offset_start\"] end = prediction[\"answers\"][0][\"offset_end\"] assert prediction[\"answers\"][0][\"context\"][start:end] == prediction[\"answers\"][0][\"answer\"] @pytest.mark.slow @pytest.mark.elasticsearch", "test this?\", filters={\"source\": [\"wiki2\"]}, top_k_retriever=5) assert len(output[\"documents\"]) == 1 @pytest.mark.slow", "does Carla lives?\" # test merge without weights join_node =", "component=C(), inputs=[\"B\"]) pipeline.add_node(name=\"D\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=JoinNode(), inputs=[\"C\", \"D\"]) output", "== 1 @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_answers_with_translator(reader, retriever_with_docs,", "prediction is not None assert prediction[\"query\"] == \"Wer lebt in", "@pytest.mark.parametrize(\"document_store_with_docs\", [\"elasticsearch\"], indirect=True) def test_load_yaml(document_store_with_docs): # test correct load of", "{\"text\": \"Sample text for document-4\", 'meta': {\"source\": \"wiki4\"}}, {\"text\": \"Sample", "top_k_retriever=10, top_k_reader=3) assert prediction is not None assert prediction[\"query\"] ==", "kwargs[\"output\"] += \"D\" return kwargs, \"output_1\" class E(RootNode): def run(self,", "kwargs[\"inputs\"][1][\"output\"] return kwargs, \"output_1\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=A(), inputs=[\"Query\"])", "document_store_with_docs.update_embeddings(dpr) query = \"Where does Carla lives?\" # test merge", "component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=JoinNode(), inputs=[\"C\", \"D\"]) output = pipeline.run(query=\"test\") assert", "\"C\"]) output = pipeline.run(query=\"test\") assert output[\"output\"] == \"AC\" pipeline =", "@pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_answers_with_translator(reader, retriever_with_docs, en_to_de_translator, de_to_en_translator): base_pipeline", "inputs=[\"Query\"]) p.add_node(component=join_node, name=\"Join\", inputs=[\"R1\", \"R2\"]) p.add_node(component=reader, name=\"Reader\", inputs=[\"Join\"]) results =", "def run(self, **kwargs): kwargs[\"output\"] = \"A\" return kwargs, \"output_2\" class", "document-3\", 'meta': {\"source\": \"wiki3\"}}, {\"text\": \"Sample text for document-4\", 'meta':", "tests for module-4\"}}, {\"text\": \"How to test module-5?\", 'meta': {\"source\":", "def test_faq_pipeline(retriever, document_store): documents = [ {\"text\": \"How to test", "haystack.document_store.elasticsearch import ElasticsearchDocumentStore from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline,", "def test_extractive_qa_answers_single_result(reader, retriever_with_docs): pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) query = \"testing", "assert len(output.get('documents', [])) == 4 if isinstance(document_store, ElasticsearchDocumentStore): output =", "def run(self, **kwargs): kwargs[\"output\"] += \"B\" return kwargs, \"output_1\" class", "JoinNode(RootNode): def run(self, **kwargs): if kwargs.get(\"inputs\"): kwargs[\"output\"] = \"\" for", "component=retriever_with_docs, inputs=[\"InvalidNode\"]) @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_answers(reader, retriever_with_docs):", "{\"source\": \"wiki2\"}}, {\"text\": \"Sample text for document-3\", 'meta': {\"source\": \"wiki3\"}},", "= p.run(query=query) assert len(results[\"documents\"]) == 3 # test merge with", "test_extractive_qa_answers(reader, retriever_with_docs): pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) prediction = pipeline.run(query=\"Who lives", "= Pipeline.load_from_yaml(Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"indexing_pipeline\") pipeline.run(file_path=Path(\"samples/pdf/sample_pdf_1.pdf\"), top_k_retriever=10, top_k_reader=3) # test correct load", "\"A\" return kwargs, \"output_2\" class AWithOutputAll(RootNode): outgoing_edges = 2 def", "pipeline = Pipeline() pipeline.add_node(name=\"A\", component=AWithOutputAll(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A.output_1\"]) pipeline.add_node(name=\"C\",", "@pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_offsets(reader, retriever_with_docs): pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)", "kwargs, \"output_1\" class AWithOutput2(RootNode): outgoing_edges = 2 def run(self, **kwargs):", "Systems\" # test invalid pipeline name with pytest.raises(Exception): Pipeline.load_from_yaml(path=Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"invalid\")", ") def test_faq_pipeline(retriever, document_store): documents = [ {\"text\": \"How to", "use_gpu=False, ) document_store_with_docs.update_embeddings(dpr) query = \"Where does Carla lives?\" #", "text for document-1\", 'meta': {\"source\": \"wiki1\"}}, {\"text\": \"Sample text for", "**kwargs): if kwargs.get(\"inputs\"): kwargs[\"output\"] = \"\" for input_dict in kwargs[\"inputs\"]:", "(\"embedding\", \"elasticsearch\")], indirect=True, ) def test_document_search_pipeline(retriever, document_store): documents = [", "pipeline_name=\"indexing_pipeline\") pipeline.run(file_path=Path(\"samples/pdf/sample_pdf_1.pdf\"), top_k_retriever=10, top_k_reader=3) # test correct load of query", "ElasticsearchRetriever @pytest.mark.parametrize(\"document_store_with_docs\", [\"elasticsearch\"], indirect=True) def test_load_yaml(document_store_with_docs): # test correct load", "document_store=document_store_with_docs, query_embedding_model=\"facebook/dpr-question_encoder-single-nq-base\", passage_embedding_model=\"facebook/dpr-ctx_encoder-single-nq-base\", use_gpu=False, ) document_store_with_docs.update_embeddings(dpr) query = \"Where does", "== \"ABDABCE\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=A(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(),", ") def test_graph_creation(reader, retriever_with_docs, document_store_with_docs): pipeline = Pipeline() pipeline.add_node(name=\"ES\", component=retriever_with_docs,", "pipeline = TranslationWrapperPipeline( input_translator=de_to_en_translator, output_translator=en_to_de_translator, pipeline=base_pipeline ) prediction = pipeline.run(query=\"Wer", "p.run(query=query) assert len(results[\"documents\"]) == 3 # test join_node with reader", "assert len(prediction[\"answers\"]) == 1 @pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever,document_store\", [(\"embedding\", \"memory\"), (\"embedding\",", "text for document-4\", 'meta': {\"source\": \"wiki4\"}}, {\"text\": \"Sample text for", "module-3\"}}, {\"text\": \"How to test module-4?\", 'meta': {\"source\": \"wiki4\", \"answer\":", "from yaml pipeline = Pipeline.load_from_yaml(Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"query_pipeline\") prediction = pipeline.run(query=\"Who made", "name with pytest.raises(Exception): Pipeline.load_from_yaml(path=Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"invalid\") @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever_with_docs, document_store_with_docs\",", "name=\"Join\", inputs=[\"R1\", \"R2\"]) results = p.run(query=query) assert results[\"documents\"][0].score > 1000", "inputs=[\"InvalidNode\"]) with pytest.raises(Exception): pipeline = Pipeline() pipeline.add_node(name=\"ES\", component=retriever_with_docs, inputs=[\"InvalidNode\"]) @pytest.mark.slow", "len(prediction[\"answers\"]) == 3 @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_offsets(reader, retriever_with_docs):", "assert output[\"output\"] == \"ABCABD\" def test_parallel_paths_in_pipeline_graph_with_branching(): class AWithOutput1(RootNode): outgoing_edges =", "== \"Wer lebt in Berlin?\" assert \"Carla\" in prediction[\"answers\"][0][\"answer\"] assert", "16 start = prediction[\"answers\"][0][\"offset_start\"] end = prediction[\"answers\"][0][\"offset_end\"] assert prediction[\"answers\"][0][\"context\"][start:end] ==", "# test join_node with reader join_node = JoinDocuments() p =", "indirect=True) def test_join_document_pipeline(document_store_with_docs, reader): es = ElasticsearchRetriever(document_store=document_store_with_docs) dpr = DensePassageRetriever(", "correct load of indexing pipeline from yaml pipeline = Pipeline.load_from_yaml(Path(\"samples/pipeline/test_pipeline.yaml\"),", "JoinDocuments(join_mode=\"concatenate\") p = Pipeline() p.add_node(component=es, name=\"R1\", inputs=[\"Query\"]) p.add_node(component=dpr, name=\"R2\", inputs=[\"Query\"])", "\"Sample text for document-1\", 'meta': {\"source\": \"wiki1\"}}, {\"text\": \"Sample text", "ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) pipeline = TranslationWrapperPipeline( input_translator=de_to_en_translator, output_translator=en_to_de_translator, pipeline=base_pipeline ) prediction", "the PDF specification?\", top_k_retriever=10, top_k_reader=3) assert prediction[\"query\"] == \"Who made", "dpr = DensePassageRetriever( document_store=document_store_with_docs, query_embedding_model=\"facebook/dpr-question_encoder-single-nq-base\", passage_embedding_model=\"facebook/dpr-ctx_encoder-single-nq-base\", use_gpu=False, ) document_store_with_docs.update_embeddings(dpr) query", "\"Berlin\" def test_parallel_paths_in_pipeline_graph(): class A(RootNode): def run(self, **kwargs): kwargs[\"output\"] =", "assert prediction[\"answers\"][0][\"probability\"] <= 1 assert prediction[\"answers\"][0][\"probability\"] >= 0 assert prediction[\"answers\"][0][\"meta\"][\"meta_field\"]", "inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A.output_1\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"A.output_2\"]) pipeline.add_node(name=\"D\", component=E(), inputs=[\"B\"])", "output[\"output\"] == \"ABDABCE\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=A(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\",", "ElasticsearchDocumentStore): output = pipeline.run(query=\"How to test this?\", filters={\"source\": [\"wiki2\"]}, top_k_retriever=5)", "is not None assert prediction[\"query\"] == \"Who lives in Berlin?\"", "text for document-3\", 'meta': {\"source\": \"wiki3\"}}, {\"text\": \"Sample text for", "\"Who lives in Berlin?\" assert prediction[\"answers\"][0][\"answer\"] == \"Carla\" assert prediction[\"answers\"][0][\"probability\"]", "prediction = pipeline.run(query=\"Who lives in Berlin?\", top_k_retriever=10, top_k_reader=5) assert prediction[\"answers\"][0][\"offset_start\"]", "len(output[\"documents\"]) == 1 @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_answers_with_translator(reader,", "\"R2\"]) results = p.run(query=query) assert len(results[\"documents\"]) == 3 # test", "p.run(query=query) assert len(results[\"documents\"]) == 3 # test merge with weights", "\"faiss\"), (\"embedding\", \"milvus\"), (\"embedding\", \"elasticsearch\")], indirect=True, ) def test_document_search_pipeline(retriever, document_store):", "run(self, **kwargs): kwargs[\"output\"] = \"A\" return kwargs, \"output_2\" class AWithOutputAll(RootNode):", "kwargs, \"output_2\" class AWithOutputAll(RootNode): outgoing_edges = 2 def run(self, **kwargs):", "== 1 @pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever,document_store\", [(\"embedding\", \"memory\"), (\"embedding\", \"faiss\"), (\"embedding\",", "2 # test concatenate join_node = JoinDocuments(join_mode=\"concatenate\") p = Pipeline()", "assert prediction[\"answers\"][0][\"answer\"] == \"Carla\" assert prediction[\"answers\"][0][\"probability\"] <= 1 assert prediction[\"answers\"][0][\"probability\"]", "@pytest.mark.parametrize( \"retriever,document_store\", [(\"embedding\", \"memory\"), (\"embedding\", \"faiss\"), (\"embedding\", \"milvus\"), (\"embedding\", \"elasticsearch\")],", "[\"elasticsearch\"], indirect=True) def test_load_yaml(document_store_with_docs): # test correct load of indexing", "(\"embedding\", \"elasticsearch\")], indirect=True, ) def test_faq_pipeline(retriever, document_store): documents = [", "p.add_node(component=reader, name=\"Reader\", inputs=[\"Join\"]) results = p.run(query=query) assert results[\"answers\"][0][\"answer\"] == \"Berlin\"", "def run(self, **kwargs): if kwargs.get(\"inputs\"): kwargs[\"output\"] = \"\" for input_dict", "prediction[\"answers\"][0][\"probability\"] >= 0 assert prediction[\"answers\"][0][\"meta\"][\"meta_field\"] == \"test1\" assert prediction[\"answers\"][0][\"context\"] ==", "# test concatenate join_node = JoinDocuments(join_mode=\"concatenate\") p = Pipeline() p.add_node(component=es,", "pipeline.add_node(name=\"A\", component=A(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"B\"]) pipeline.add_node(name=\"D\",", "with pytest.raises(AssertionError): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"ES.output_2\"]) with pytest.raises(AssertionError): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"ES.wrong_edge_label\"])", "top_k_reader=3) assert prediction is not None assert prediction[\"query\"] == \"Who", "def run(self, **kwargs): kwargs[\"output\"] += \"E\" return kwargs, \"output_1\" class", "test this?\", filters={\"source\": [\"wiki2\"]}, top_k_retriever=5) assert len(output[\"answers\"]) == 1 @pytest.mark.elasticsearch", "kwargs, \"output_1\" class E(RootNode): def run(self, **kwargs): kwargs[\"output\"] += \"E\"", "test merge with weights join_node = JoinDocuments(join_mode=\"merge\", weights=[1000, 1], top_k_join=2)", "pipeline = Pipeline() pipeline.add_node(name=\"A\", component=AWithOutput1(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A.output_1\"]) pipeline.add_node(name=\"C\",", "this?\", filters={\"source\": [\"wiki2\"]}, top_k_retriever=5) assert len(output[\"documents\"]) == 1 @pytest.mark.slow @pytest.mark.elasticsearch", "prediction[\"answers\"][0][\"offset_end\"] assert prediction[\"answers\"][0][\"context\"][start:end] == prediction[\"answers\"][0][\"answer\"] @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True)", "def test_graph_creation(reader, retriever_with_docs, document_store_with_docs): pipeline = Pipeline() pipeline.add_node(name=\"ES\", component=retriever_with_docs, inputs=[\"Query\"])", "retriever_with_docs): pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) prediction = pipeline.run(query=\"Who lives in", "without weights join_node = JoinDocuments(join_mode=\"merge\") p = Pipeline() p.add_node(component=es, name=\"R1\",", "kwargs, \"output_1\" class JoinNode(RootNode): def run(self, **kwargs): if kwargs.get(\"inputs\"): kwargs[\"output\"]", "pipeline from yaml pipeline = Pipeline.load_from_yaml(Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"indexing_pipeline\") pipeline.run(file_path=Path(\"samples/pdf/sample_pdf_1.pdf\"), top_k_retriever=10, top_k_reader=3)", "kwargs[\"output\"] = \"A\" return kwargs, \"output_1\" class AWithOutput2(RootNode): outgoing_edges =", "@pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever,document_store\", [(\"embedding\", \"memory\"), (\"embedding\", \"faiss\"), (\"embedding\", \"milvus\"), (\"embedding\",", "\"milvus\"), (\"embedding\", \"elasticsearch\")], indirect=True, ) def test_document_search_pipeline(retriever, document_store): documents =", "inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=JoinNode(), inputs=[\"C\", \"D\"]) output = pipeline.run(query=\"test\") assert output[\"output\"]", "= pipeline.run(query=\"Who lives in Berlin?\", top_k_retriever=10, top_k_reader=3) assert prediction is", "documents = [ {\"text\": \"Sample text for document-1\", 'meta': {\"source\":", "= Pipeline() pipeline.add_node(name=\"A\", component=AWithOutput1(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A.output_1\"]) pipeline.add_node(name=\"C\", component=C(),", "2 def run(self, **kwargs): kwargs[\"output\"] = \"A\" return kwargs, \"output_all\"", "for module-2\"}}, {\"text\": \"How to test module-3?\", 'meta': {\"source\": \"wiki3\",", "tests\") if isinstance(document_store, ElasticsearchDocumentStore): output = pipeline.run(query=\"How to test this?\",", "= [ {\"text\": \"Sample text for document-1\", 'meta': {\"source\": \"wiki1\"}},", "assert prediction[\"query\"] == \"Wer lebt in Berlin?\" assert \"Carla\" in", "[\"tfidf\"], indirect=True) def test_extractive_qa_answers_with_translator(reader, retriever_with_docs, en_to_de_translator, de_to_en_translator): base_pipeline = ExtractiveQAPipeline(reader=reader,", "from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, \\ DocumentSearchPipeline,", "component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"F\", component=JoinNode(), inputs=[\"D\", \"E\"]) output = pipeline.run(query=\"test\") assert", "1 @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_answers_with_translator(reader, retriever_with_docs, en_to_de_translator,", "for document-2\", 'meta': {\"source\": \"wiki2\"}}, {\"text\": \"Sample text for document-3\",", "test_extractive_qa_offsets(reader, retriever_with_docs): pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) prediction = pipeline.run(query=\"Who lives", "inputs=[\"Query\"]) p.add_node(component=join_node, name=\"Join\", inputs=[\"R1\", \"R2\"]) results = p.run(query=query) assert len(results[\"documents\"])", "@pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_answers_with_translator(reader, retriever_with_docs, en_to_de_translator, de_to_en_translator):", "pipeline = Pipeline() pipeline.add_node(name=\"ES\", component=retriever_with_docs, inputs=[\"InvalidNode\"]) @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"],", "'meta': {\"source\": \"wiki3\"}}, {\"text\": \"Sample text for document-4\", 'meta': {\"source\":", "import ElasticsearchDocumentStore from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline,", "class A(RootNode): def run(self, **kwargs): kwargs[\"output\"] = \"A\" return kwargs,", "\"memory\"), (\"embedding\", \"faiss\"), (\"embedding\", \"milvus\"), (\"embedding\", \"elasticsearch\")], indirect=True, ) def", "= pipeline.run(query=\"How to test this?\", top_k_retriever=3) assert len(output[\"answers\"]) == 3", "pipeline.run(query=\"test\") assert output[\"output\"] == \"ABEABD\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=AWithOutput2(),", "PDF specification?\" assert prediction[\"answers\"][0][\"answer\"] == \"Adobe Systems\" # test invalid", "for module-1\"}}, {\"text\": \"How to test module-2?\", 'meta': {\"source\": \"wiki2\",", "\"How to test module-4?\", 'meta': {\"source\": \"wiki4\", \"answer\": \"Using tests", "== 3 assert output[\"answers\"][0][\"query\"].startswith(\"How to\") assert output[\"answers\"][0][\"answer\"].startswith(\"Using tests\") if isinstance(document_store,", "inputs=[\"A.output_1\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"A.output_2\"]) pipeline.add_node(name=\"D\", component=E(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=D(), inputs=[\"B\"])", "= JoinDocuments() p = Pipeline() p.add_node(component=es, name=\"R1\", inputs=[\"Query\"]) p.add_node(component=dpr, name=\"R2\",", "\"answer\": \"Using tests for module-4\"}}, {\"text\": \"How to test module-5?\",", "= \"Where does Carla lives?\" # test merge without weights", "len(results[\"documents\"]) == 2 # test concatenate join_node = JoinDocuments(join_mode=\"concatenate\") p", "pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"ES.output_2\"]) with pytest.raises(AssertionError): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"ES.wrong_edge_label\"]) with pytest.raises(Exception):", "document-2\", 'meta': {\"source\": \"wiki2\"}}, {\"text\": \"Sample text for document-3\", 'meta':", "FAQPipeline, \\ DocumentSearchPipeline, RootNode from haystack.retriever.dense import DensePassageRetriever from haystack.retriever.sparse", "from haystack.retriever.sparse import ElasticsearchRetriever @pytest.mark.parametrize(\"document_store_with_docs\", [\"elasticsearch\"], indirect=True) def test_load_yaml(document_store_with_docs): #", "\"answer\": \"Using tests for module-2\"}}, {\"text\": \"How to test module-3?\",", "text for document-2\", 'meta': {\"source\": \"wiki2\"}}, {\"text\": \"Sample text for", "results = p.run(query=query) assert results[\"answers\"][0][\"answer\"] == \"Berlin\" def test_parallel_paths_in_pipeline_graph(): class", "\"A\" return kwargs, \"output_1\" class AWithOutput2(RootNode): outgoing_edges = 2 def", "pipeline.add_node(name=\"C\", component=C(), inputs=[\"B\"]) pipeline.add_node(name=\"D\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=JoinNode(), inputs=[\"C\", \"D\"])", "pytest.raises(Exception): pipeline = Pipeline() pipeline.add_node(name=\"ES\", component=retriever_with_docs, inputs=[\"InvalidNode\"]) @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\",", "pipeline = Pipeline() pipeline.add_node(name=\"A\", component=A(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A\"]) pipeline.add_node(name=\"C\",", "prediction[\"answers\"][0][\"offset_end\"] == 16 start = prediction[\"answers\"][0][\"offset_start\"] end = prediction[\"answers\"][0][\"offset_end\"] assert", "retriever=retriever_with_docs) prediction = pipeline.run(query=\"Who lives in Berlin?\", top_k_retriever=10, top_k_reader=3) assert", "es = ElasticsearchRetriever(document_store=document_store_with_docs) dpr = DensePassageRetriever( document_store=document_store_with_docs, query_embedding_model=\"facebook/dpr-question_encoder-single-nq-base\", passage_embedding_model=\"facebook/dpr-ctx_encoder-single-nq-base\", use_gpu=False,", "\"output_1\" class C(RootNode): def run(self, **kwargs): kwargs[\"output\"] += \"C\" return", "assert prediction is not None assert prediction[\"query\"] == \"Wer lebt", "= pipeline.run(query=\"test\") assert output[\"output\"] == \"ABEABD\" pipeline = Pipeline() pipeline.add_node(name=\"A\",", "== \"Who lives in Berlin?\" assert prediction[\"answers\"][0][\"answer\"] == \"Carla\" assert", "== \"Carla\" assert prediction[\"answers\"][0][\"probability\"] <= 1 assert prediction[\"answers\"][0][\"probability\"] >= 0", "\"How to test module-1?\", 'meta': {\"source\": \"wiki1\", \"answer\": \"Using tests", "query = \"testing finder\" prediction = pipeline.run(query=query, top_k_retriever=1, top_k_reader=1) assert", "{\"source\": \"wiki4\", \"answer\": \"Using tests for module-4\"}}, {\"text\": \"How to", "run(self, **kwargs): kwargs[\"output\"] += \"D\" return kwargs, \"output_1\" class E(RootNode):", "output[\"answers\"][0][\"answer\"].startswith(\"Using tests\") if isinstance(document_store, ElasticsearchDocumentStore): output = pipeline.run(query=\"How to test", "not None assert prediction[\"query\"] == \"Who lives in Berlin?\" assert", "+= \"D\" return kwargs, \"output_1\" class E(RootNode): def run(self, **kwargs):", "test this?\", top_k_retriever=3) assert len(output[\"answers\"]) == 3 assert output[\"answers\"][0][\"query\"].startswith(\"How to\")", "\"output_2\" class AWithOutputAll(RootNode): outgoing_edges = 2 def run(self, **kwargs): kwargs[\"output\"]", "> 1000 assert len(results[\"documents\"]) == 2 # test concatenate join_node", "inputs=[\"D\", \"E\"]) output = pipeline.run(query=\"test\") assert output[\"output\"] == \"ABDABCE\" pipeline", "inputs=[\"Join\"]) results = p.run(query=query) assert results[\"answers\"][0][\"answer\"] == \"Berlin\" def test_parallel_paths_in_pipeline_graph():", "= pipeline.run(query=\"test\") assert output[\"output\"] == \"AC\" pipeline = Pipeline() pipeline.add_node(name=\"A\",", "lives in Berlin?\" assert prediction[\"answers\"][0][\"answer\"] == \"Carla\" assert prediction[\"answers\"][0][\"probability\"] <=", "inputs=[\"R1\", \"R2\"]) results = p.run(query=query) assert len(results[\"documents\"]) == 3 #", "from yaml pipeline = Pipeline.load_from_yaml(Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"indexing_pipeline\") pipeline.run(file_path=Path(\"samples/pdf/sample_pdf_1.pdf\"), top_k_retriever=10, top_k_reader=3) #", "document_store): documents = [ {\"text\": \"Sample text for document-1\", 'meta':", "top_k_reader=1) assert prediction is not None assert len(prediction[\"answers\"]) == 1", "Path import pytest from haystack.document_store.elasticsearch import ElasticsearchDocumentStore from haystack.pipeline import", "{\"text\": \"How to test module-5?\", 'meta': {\"source\": \"wiki5\", \"answer\": \"Using", "test_load_yaml(document_store_with_docs): # test correct load of indexing pipeline from yaml", "with pytest.raises(Exception): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"InvalidNode\"]) with pytest.raises(Exception): pipeline = Pipeline()", "\"wiki2\"}}, {\"text\": \"Sample text for document-3\", 'meta': {\"source\": \"wiki3\"}}, {\"text\":", "DocumentSearchPipeline(retriever=retriever) output = pipeline.run(query=\"How to test this?\", top_k_retriever=4) assert len(output.get('documents',", "= TranslationWrapperPipeline( input_translator=de_to_en_translator, output_translator=en_to_de_translator, pipeline=base_pipeline ) prediction = pipeline.run(query=\"Wer lebt", "document_store.update_embeddings(retriever) pipeline = DocumentSearchPipeline(retriever=retriever) output = pipeline.run(query=\"How to test this?\",", "p.add_node(component=dpr, name=\"R2\", inputs=[\"Query\"]) p.add_node(component=join_node, name=\"Join\", inputs=[\"R1\", \"R2\"]) p.add_node(component=reader, name=\"Reader\", inputs=[\"Join\"])", "D(RootNode): def run(self, **kwargs): kwargs[\"output\"] += \"D\" return kwargs, \"output_1\"", "+= \"E\" return kwargs, \"output_1\" class JoinNode(RootNode): def run(self, **kwargs):", "[\"wiki2\"]}, top_k_retriever=5) assert len(output[\"documents\"]) == 1 @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"],", "assert len(output[\"documents\"]) == 1 @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def", "pipeline.add_node(name=\"B\", component=B(), inputs=[\"A\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"B\"]) pipeline.add_node(name=\"D\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"E\",", "output = pipeline.run(query=\"test\") assert output[\"output\"] == \"ABCABD\" def test_parallel_paths_in_pipeline_graph_with_branching(): class", "= pipeline.run(query=\"test\") assert output[\"output\"] == \"ABCABD\" def test_parallel_paths_in_pipeline_graph_with_branching(): class AWithOutput1(RootNode):", "= FAQPipeline(retriever=retriever) output = pipeline.run(query=\"How to test this?\", top_k_retriever=3) assert", "pipeline.add_node(name=\"A\", component=AWithOutput2(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A.output_1\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"A.output_2\"]) pipeline.add_node(name=\"D\",", "@pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_answers_single_result(reader, retriever_with_docs): pipeline = ExtractiveQAPipeline(reader=reader,", "this?\", top_k_retriever=3) assert len(output[\"answers\"]) == 3 assert output[\"answers\"][0][\"query\"].startswith(\"How to\") assert", "None assert prediction[\"query\"] == \"Wer lebt in Berlin?\" assert \"Carla\"", "Pipeline.load_from_yaml(Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"query_pipeline\") prediction = pipeline.run(query=\"Who made the PDF specification?\", top_k_retriever=10,", "= DocumentSearchPipeline(retriever=retriever) output = pipeline.run(query=\"How to test this?\", top_k_retriever=4) assert", "assert prediction[\"answers\"][0][\"context\"][start:end] == prediction[\"answers\"][0][\"answer\"] @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def", "import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, \\ DocumentSearchPipeline, RootNode from", "assert len(results[\"documents\"]) == 3 # test merge with weights join_node", "pipeline.add_node(name=\"A\", component=AWithOutput1(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A.output_1\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"A.output_2\"]) pipeline.add_node(name=\"D\",", ") prediction = pipeline.run(query=\"Wer lebt in Berlin?\", top_k_retriever=10, top_k_reader=3) assert", "prediction = pipeline.run(query=query, top_k_retriever=1, top_k_reader=1) assert prediction is not None", "# test invalid pipeline name with pytest.raises(Exception): Pipeline.load_from_yaml(path=Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"invalid\") @pytest.mark.slow", "\"AC\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=AWithOutputAll(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A.output_1\"])", "top_k_retriever=5) assert len(output[\"answers\"]) == 1 @pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever,document_store\", [(\"embedding\", \"memory\"),", "inputs=[\"R1\", \"R2\"]) p.add_node(component=reader, name=\"Reader\", inputs=[\"Join\"]) results = p.run(query=query) assert results[\"answers\"][0][\"answer\"]", "= JoinDocuments(join_mode=\"merge\") p = Pipeline() p.add_node(component=es, name=\"R1\", inputs=[\"Query\"]) p.add_node(component=dpr, name=\"R2\",", "Pipeline() p.add_node(component=es, name=\"R1\", inputs=[\"Query\"]) p.add_node(component=dpr, name=\"R2\", inputs=[\"Query\"]) p.add_node(component=join_node, name=\"Join\", inputs=[\"R1\",", "name=\"R2\", inputs=[\"Query\"]) p.add_node(component=join_node, name=\"Join\", inputs=[\"R1\", \"R2\"]) results = p.run(query=query) assert", "top_k_reader=3) assert prediction[\"query\"] == \"Who made the PDF specification?\" assert", "\"My name is Carla and I live in Berlin\" assert", "0 assert prediction[\"answers\"][0][\"meta\"][\"meta_field\"] == \"test1\" assert prediction[\"answers\"][0][\"context\"] == \"My name", "is Carla and I live in Berlin\" assert len(prediction[\"answers\"]) ==", "weights join_node = JoinDocuments(join_mode=\"merge\") p = Pipeline() p.add_node(component=es, name=\"R1\", inputs=[\"Query\"])", "to test this?\", filters={\"source\": [\"wiki2\"]}, top_k_retriever=5) assert len(output[\"documents\"]) == 1", "\"milvus\"), (\"embedding\", \"elasticsearch\")], indirect=True, ) def test_faq_pipeline(retriever, document_store): documents =", "'meta': {\"source\": \"wiki2\", \"answer\": \"Using tests for module-2\"}}, {\"text\": \"How", "assert prediction[\"answers\"][0][\"probability\"] >= 0 assert prediction[\"answers\"][0][\"meta\"][\"meta_field\"] == \"test1\" assert prediction[\"answers\"][0][\"context\"]", "indirect=True, ) def test_faq_pipeline(retriever, document_store): documents = [ {\"text\": \"How", "to test module-4?\", 'meta': {\"source\": \"wiki4\", \"answer\": \"Using tests for", "pytest.raises(AssertionError): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"ES.wrong_edge_label\"]) with pytest.raises(Exception): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"InvalidNode\"]) with", "pipeline.add_node(name=\"B\", component=B(), inputs=[\"A.output_1\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"A.output_2\"]) pipeline.add_node(name=\"D\", component=E(), inputs=[\"B\"]) pipeline.add_node(name=\"E\",", "\"R2\"]) p.add_node(component=reader, name=\"Reader\", inputs=[\"Join\"]) results = p.run(query=query) assert results[\"answers\"][0][\"answer\"] ==", "component=A(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=E(),", "Pipeline.load_from_yaml(path=Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"invalid\") @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever_with_docs, document_store_with_docs\", [(\"elasticsearch\", \"elasticsearch\")], indirect=True", "] document_store.write_documents(documents) document_store.update_embeddings(retriever) pipeline = DocumentSearchPipeline(retriever=retriever) output = pipeline.run(query=\"How to", "merge without weights join_node = JoinDocuments(join_mode=\"merge\") p = Pipeline() p.add_node(component=es,", "query = \"Where does Carla lives?\" # test merge without", "== \"ABCABD\" def test_parallel_paths_in_pipeline_graph_with_branching(): class AWithOutput1(RootNode): outgoing_edges = 2 def", "input_dict in kwargs[\"inputs\"]: kwargs[\"output\"] += (input_dict[\"output\"]) return kwargs, \"output_1\" pipeline", "\"output_1\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=AWithOutput1(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A.output_1\"])", "@pytest.mark.parametrize( \"retriever_with_docs, document_store_with_docs\", [(\"elasticsearch\", \"elasticsearch\")], indirect=True ) def test_graph_creation(reader, retriever_with_docs,", "component=B(), inputs=[\"A\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"B\"]) pipeline.add_node(name=\"D\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=JoinNode(),", "inputs=[\"B\"]) pipeline.add_node(name=\"F\", component=JoinNode(), inputs=[\"D\", \"E\", \"C\"]) output = pipeline.run(query=\"test\") assert", "component=AWithOutputAll(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A.output_1\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"A.output_2\"]) pipeline.add_node(name=\"D\", component=E(),", "filters={\"source\": [\"wiki2\"]}, top_k_retriever=5) assert len(output[\"documents\"]) == 1 @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\",", "= \"A\" return kwargs, \"output_2\" class AWithOutputAll(RootNode): outgoing_edges = 2", "return kwargs, \"output_1\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=AWithOutput1(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\",", "def test_document_search_pipeline(retriever, document_store): documents = [ {\"text\": \"Sample text for", "from haystack.retriever.dense import DensePassageRetriever from haystack.retriever.sparse import ElasticsearchRetriever @pytest.mark.parametrize(\"document_store_with_docs\", [\"elasticsearch\"],", "= prediction[\"answers\"][0][\"offset_end\"] assert prediction[\"answers\"][0][\"context\"][start:end] == prediction[\"answers\"][0][\"answer\"] @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"],", "= JoinDocuments(join_mode=\"merge\", weights=[1000, 1], top_k_join=2) p = Pipeline() p.add_node(component=es, name=\"R1\",", "\"wiki2\", \"answer\": \"Using tests for module-2\"}}, {\"text\": \"How to test", "for input_dict in kwargs[\"inputs\"]: kwargs[\"output\"] += (input_dict[\"output\"]) return kwargs, \"output_1\"", "@pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_answers(reader, retriever_with_docs): pipeline =", "2 def run(self, **kwargs): kwargs[\"output\"] = \"A\" return kwargs, \"output_2\"", "name=\"Join\", inputs=[\"R1\", \"R2\"]) results = p.run(query=query) assert len(results[\"documents\"]) == 3", "\"wiki1\"}}, {\"text\": \"Sample text for document-2\", 'meta': {\"source\": \"wiki2\"}}, {\"text\":", "documents = [ {\"text\": \"How to test module-1?\", 'meta': {\"source\":", "top_k_reader=3) # test correct load of query pipeline from yaml", "in Berlin\" assert len(prediction[\"answers\"]) == 3 @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True)", "pipeline.run(query=\"Who made the PDF specification?\", top_k_retriever=10, top_k_reader=3) assert prediction[\"query\"] ==", "component=E(), inputs=[\"C\"]) pipeline.add_node(name=\"D\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"F\", component=JoinNode(), inputs=[\"D\", \"E\"]) output", "output[\"output\"] == \"AC\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=AWithOutputAll(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\",", "None assert len(prediction[\"answers\"]) == 1 @pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever,document_store\", [(\"embedding\", \"memory\"),", "kwargs, \"output_1\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=A(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(),", "def test_load_yaml(document_store_with_docs): # test correct load of indexing pipeline from", "indirect=True, ) def test_document_search_pipeline(retriever, document_store): documents = [ {\"text\": \"Sample", "kwargs[\"output\"] = \"A\" return kwargs, \"output_1\" class B(RootNode): def run(self,", "not None assert prediction[\"query\"] == \"Wer lebt in Berlin?\" assert", "{\"source\": \"wiki1\", \"answer\": \"Using tests for module-1\"}}, {\"text\": \"How to", "p.run(query=query) assert results[\"answers\"][0][\"answer\"] == \"Berlin\" def test_parallel_paths_in_pipeline_graph(): class A(RootNode): def", "finder\" prediction = pipeline.run(query=query, top_k_retriever=1, top_k_reader=1) assert prediction is not", "inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"B\"]) pipeline.add_node(name=\"D\", component=D(), inputs=[\"B\"])", "= Pipeline() pipeline.add_node(name=\"A\", component=AWithOutput2(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A.output_1\"]) pipeline.add_node(name=\"C\", component=C(),", "pytest.raises(Exception): Pipeline.load_from_yaml(path=Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"invalid\") @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever_with_docs, document_store_with_docs\", [(\"elasticsearch\", \"elasticsearch\")],", "len(results[\"documents\"]) == 3 # test merge with weights join_node =", "to\") assert output[\"answers\"][0][\"answer\"].startswith(\"Using tests\") if isinstance(document_store, ElasticsearchDocumentStore): output = pipeline.run(query=\"How", "load of query pipeline from yaml pipeline = Pipeline.load_from_yaml(Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"query_pipeline\")", "pathlib import Path import pytest from haystack.document_store.elasticsearch import ElasticsearchDocumentStore from", "@pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_answers(reader, retriever_with_docs): pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)", "def test_extractive_qa_answers(reader, retriever_with_docs): pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) prediction = pipeline.run(query=\"Who", "'meta': {\"source\": \"wiki5\"}}, ] document_store.write_documents(documents) document_store.update_embeddings(retriever) pipeline = DocumentSearchPipeline(retriever=retriever) output", "component=E(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"F\", component=JoinNode(), inputs=[\"D\", \"E\", \"C\"])", "\"E\", \"C\"]) output = pipeline.run(query=\"test\") assert output[\"output\"] == \"ABEABD\" pipeline", ") document_store_with_docs.update_embeddings(dpr) query = \"Where does Carla lives?\" # test", "indirect=True) def test_extractive_qa_answers(reader, retriever_with_docs): pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) prediction =", "to test module-2?\", 'meta': {\"source\": \"wiki2\", \"answer\": \"Using tests for", "prediction[\"answers\"][0][\"answer\"] == \"Carla\" assert prediction[\"answers\"][0][\"probability\"] <= 1 assert prediction[\"answers\"][0][\"probability\"] >=", "pipeline from yaml pipeline = Pipeline.load_from_yaml(Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"query_pipeline\") prediction = pipeline.run(query=\"Who", "\"E\"]) output = pipeline.run(query=\"test\") assert output[\"output\"] == \"ABDABCE\" pipeline =", "+= \"C\" return kwargs, \"output_1\" class D(RootNode): def run(self, **kwargs):", "return kwargs, \"output_2\" class AWithOutputAll(RootNode): outgoing_edges = 2 def run(self,", "pipeline.add_node(name=\"F\", component=JoinNode(), inputs=[\"D\", \"E\", \"C\"]) output = pipeline.run(query=\"test\") assert output[\"output\"]", "is Carla and I live in Berlin\" @pytest.mark.parametrize(\"document_store_with_docs\", [\"elasticsearch\"], indirect=True)", "'meta': {\"source\": \"wiki2\"}}, {\"text\": \"Sample text for document-3\", 'meta': {\"source\":", "pipeline.run(file_path=Path(\"samples/pdf/sample_pdf_1.pdf\"), top_k_retriever=10, top_k_reader=3) # test correct load of query pipeline", "Carla lives?\" # test merge without weights join_node = JoinDocuments(join_mode=\"merge\")", "component=retriever_with_docs, inputs=[\"InvalidNode\"]) with pytest.raises(Exception): pipeline = Pipeline() pipeline.add_node(name=\"ES\", component=retriever_with_docs, inputs=[\"InvalidNode\"])", "pipeline = FAQPipeline(retriever=retriever) output = pipeline.run(query=\"How to test this?\", top_k_retriever=3)", "haystack.retriever.dense import DensePassageRetriever from haystack.retriever.sparse import ElasticsearchRetriever @pytest.mark.parametrize(\"document_store_with_docs\", [\"elasticsearch\"], indirect=True)", "load of indexing pipeline from yaml pipeline = Pipeline.load_from_yaml(Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"indexing_pipeline\")", "# test merge without weights join_node = JoinDocuments(join_mode=\"merge\") p =", "output = pipeline.run(query=\"How to test this?\", top_k_retriever=4) assert len(output.get('documents', []))", "top_k_retriever=4) assert len(output.get('documents', [])) == 4 if isinstance(document_store, ElasticsearchDocumentStore): output", "= pipeline.run(query=\"How to test this?\", filters={\"source\": [\"wiki2\"]}, top_k_retriever=5) assert len(output[\"answers\"])", "\"Adobe Systems\" # test invalid pipeline name with pytest.raises(Exception): Pipeline.load_from_yaml(path=Path(\"samples/pipeline/test_pipeline.yaml\"),", "pipeline_name=\"invalid\") @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever_with_docs, document_store_with_docs\", [(\"elasticsearch\", \"elasticsearch\")], indirect=True )", "\"Using tests for module-2\"}}, {\"text\": \"How to test module-3?\", 'meta':", "\"retriever,document_store\", [(\"embedding\", \"memory\"), (\"embedding\", \"faiss\"), (\"embedding\", \"milvus\"), (\"embedding\", \"elasticsearch\")], indirect=True,", "Carla and I live in Berlin\" @pytest.mark.parametrize(\"document_store_with_docs\", [\"elasticsearch\"], indirect=True) @pytest.mark.parametrize(\"reader\",", "pipeline.add_node(name=\"D\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"F\", component=JoinNode(), inputs=[\"D\", \"E\"]) output = pipeline.run(query=\"test\")", "= [ {\"text\": \"How to test module-1?\", 'meta': {\"source\": \"wiki1\",", "p.add_node(component=dpr, name=\"R2\", inputs=[\"Query\"]) p.add_node(component=join_node, name=\"Join\", inputs=[\"R1\", \"R2\"]) results = p.run(query=query)", "test_extractive_qa_answers_single_result(reader, retriever_with_docs): pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) query = \"testing finder\"", "join_node = JoinDocuments(join_mode=\"concatenate\") p = Pipeline() p.add_node(component=es, name=\"R1\", inputs=[\"Query\"]) p.add_node(component=dpr,", "def run(self, **kwargs): kwargs[\"output\"] += \"C\" return kwargs, \"output_1\" class", "inputs=[\"B\"]) pipeline.add_node(name=\"D\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=JoinNode(), inputs=[\"C\", \"D\"]) output =", "ElasticsearchDocumentStore from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, \\", "indirect=True ) def test_graph_creation(reader, retriever_with_docs, document_store_with_docs): pipeline = Pipeline() pipeline.add_node(name=\"ES\",", "class C(RootNode): def run(self, **kwargs): kwargs[\"output\"] += \"C\" return kwargs,", "pipeline name with pytest.raises(Exception): Pipeline.load_from_yaml(path=Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"invalid\") @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever_with_docs,", "document_store.write_documents(documents) document_store.update_embeddings(retriever) pipeline = FAQPipeline(retriever=retriever) output = pipeline.run(query=\"How to test", "with pytest.raises(AssertionError): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"ES.wrong_edge_label\"]) with pytest.raises(Exception): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"InvalidNode\"])", "= pipeline.run(query=\"How to test this?\", top_k_retriever=4) assert len(output.get('documents', [])) ==", "test_graph_creation(reader, retriever_with_docs, document_store_with_docs): pipeline = Pipeline() pipeline.add_node(name=\"ES\", component=retriever_with_docs, inputs=[\"Query\"]) with", "1000 assert len(results[\"documents\"]) == 2 # test concatenate join_node =", "return kwargs, \"output_1\" class E(RootNode): def run(self, **kwargs): kwargs[\"output\"] +=", "output[\"output\"] == \"ABCABD\" def test_parallel_paths_in_pipeline_graph_with_branching(): class AWithOutput1(RootNode): outgoing_edges = 2", "\"D\"]) output = pipeline.run(query=\"test\") assert output[\"output\"] == \"ABCABD\" def test_parallel_paths_in_pipeline_graph_with_branching():", "inputs=[\"Query\"]) with pytest.raises(AssertionError): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"ES.output_2\"]) with pytest.raises(AssertionError): pipeline.add_node(name=\"Reader\", component=retriever_with_docs,", "\"Using tests for module-4\"}}, {\"text\": \"How to test module-5?\", 'meta':", "\"wiki5\"}}, ] document_store.write_documents(documents) document_store.update_embeddings(retriever) pipeline = DocumentSearchPipeline(retriever=retriever) output = pipeline.run(query=\"How", "\"Using tests for module-1\"}}, {\"text\": \"How to test module-2?\", 'meta':", "assert prediction[\"query\"] == \"Who made the PDF specification?\" assert prediction[\"answers\"][0][\"answer\"]", "TranslationWrapperPipeline( input_translator=de_to_en_translator, output_translator=en_to_de_translator, pipeline=base_pipeline ) prediction = pipeline.run(query=\"Wer lebt in", "inputs=[\"Query\"]) p.add_node(component=dpr, name=\"R2\", inputs=[\"Query\"]) p.add_node(component=join_node, name=\"Join\", inputs=[\"R1\", \"R2\"]) results =", "pytest from haystack.document_store.elasticsearch import ElasticsearchDocumentStore from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments,", "pipeline.run(query=\"test\") assert output[\"output\"] == \"AC\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=AWithOutputAll(),", "**kwargs): kwargs[\"output\"] = \"A\" return kwargs, \"output_2\" class AWithOutputAll(RootNode): outgoing_edges", "yaml pipeline = Pipeline.load_from_yaml(Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"query_pipeline\") prediction = pipeline.run(query=\"Who made the", "E(RootNode): def run(self, **kwargs): kwargs[\"output\"] += \"E\" return kwargs, \"output_1\"", "class AWithOutput1(RootNode): outgoing_edges = 2 def run(self, **kwargs): kwargs[\"output\"] =", "document-1\", 'meta': {\"source\": \"wiki1\"}}, {\"text\": \"Sample text for document-2\", 'meta':", "yaml pipeline = Pipeline.load_from_yaml(Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"indexing_pipeline\") pipeline.run(file_path=Path(\"samples/pdf/sample_pdf_1.pdf\"), top_k_retriever=10, top_k_reader=3) # test", "lebt in Berlin?\" assert \"Carla\" in prediction[\"answers\"][0][\"answer\"] assert prediction[\"answers\"][0][\"probability\"] <=", "assert prediction[\"answers\"][0][\"answer\"] == \"Adobe Systems\" # test invalid pipeline name", "for document-4\", 'meta': {\"source\": \"wiki4\"}}, {\"text\": \"Sample text for document-5\",", "from haystack.document_store.elasticsearch import ElasticsearchDocumentStore from haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline,", "DensePassageRetriever( document_store=document_store_with_docs, query_embedding_model=\"facebook/dpr-question_encoder-single-nq-base\", passage_embedding_model=\"facebook/dpr-ctx_encoder-single-nq-base\", use_gpu=False, ) document_store_with_docs.update_embeddings(dpr) query = \"Where", "import pytest from haystack.document_store.elasticsearch import ElasticsearchDocumentStore from haystack.pipeline import TranslationWrapperPipeline,", "@pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever_with_docs, document_store_with_docs\", [(\"elasticsearch\", \"elasticsearch\")], indirect=True ) def test_graph_creation(reader,", "results = p.run(query=query) assert len(results[\"documents\"]) == 3 # test join_node", "[\"tfidf\"], indirect=True) def test_extractive_qa_answers_single_result(reader, retriever_with_docs): pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) query", "[ {\"text\": \"Sample text for document-1\", 'meta': {\"source\": \"wiki1\"}}, {\"text\":", "live in Berlin\" @pytest.mark.parametrize(\"document_store_with_docs\", [\"elasticsearch\"], indirect=True) @pytest.mark.parametrize(\"reader\", [\"farm\"], indirect=True) def", "def run(self, **kwargs): kwargs[\"output\"] = \"A\" return kwargs, \"output_1\" class", "output = pipeline.run(query=\"test\") assert output[\"output\"] == \"ABDABCE\" pipeline = Pipeline()", "'meta': {\"source\": \"wiki5\", \"answer\": \"Using tests for module-5\"}}, ] document_store.write_documents(documents)", "for document-1\", 'meta': {\"source\": \"wiki1\"}}, {\"text\": \"Sample text for document-2\",", "inputs=[\"D\", \"E\", \"C\"]) output = pipeline.run(query=\"test\") assert output[\"output\"] == \"ABEABD\"", "Pipeline() pipeline.add_node(name=\"A\", component=AWithOutputAll(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A.output_1\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"A.output_2\"])", "class AWithOutput2(RootNode): outgoing_edges = 2 def run(self, **kwargs): kwargs[\"output\"] =", "len(prediction[\"answers\"]) == 1 @pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever,document_store\", [(\"embedding\", \"memory\"), (\"embedding\", \"faiss\"),", "kwargs, \"output_1\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=AWithOutput1(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(),", "reader): es = ElasticsearchRetriever(document_store=document_store_with_docs) dpr = DensePassageRetriever( document_store=document_store_with_docs, query_embedding_model=\"facebook/dpr-question_encoder-single-nq-base\", passage_embedding_model=\"facebook/dpr-ctx_encoder-single-nq-base\",", "lives?\" # test merge without weights join_node = JoinDocuments(join_mode=\"merge\") p", "assert len(results[\"documents\"]) == 2 # test concatenate join_node = JoinDocuments(join_mode=\"concatenate\")", "indirect=True) def test_load_yaml(document_store_with_docs): # test correct load of indexing pipeline", "(\"embedding\", \"milvus\"), (\"embedding\", \"elasticsearch\")], indirect=True, ) def test_faq_pipeline(retriever, document_store): documents", "\"wiki4\"}}, {\"text\": \"Sample text for document-5\", 'meta': {\"source\": \"wiki5\"}}, ]", "document_store.update_embeddings(retriever) pipeline = FAQPipeline(retriever=retriever) output = pipeline.run(query=\"How to test this?\",", "\"Wer lebt in Berlin?\" assert \"Carla\" in prediction[\"answers\"][0][\"answer\"] assert prediction[\"answers\"][0][\"probability\"]", "return kwargs, \"output_1\" class AWithOutput2(RootNode): outgoing_edges = 2 def run(self,", "component=JoinNode(), inputs=[\"D\", \"E\"]) output = pipeline.run(query=\"test\") assert output[\"output\"] == \"ABDABCE\"", "prediction[\"answers\"][0][\"meta\"][\"meta_field\"] == \"test1\" assert prediction[\"answers\"][0][\"context\"] == \"My name is Carla", "== 3 # test merge with weights join_node = JoinDocuments(join_mode=\"merge\",", "module-4\"}}, {\"text\": \"How to test module-5?\", 'meta': {\"source\": \"wiki5\", \"answer\":", "class B(RootNode): def run(self, **kwargs): kwargs[\"output\"] += \"B\" return kwargs,", "test_faq_pipeline(retriever, document_store): documents = [ {\"text\": \"How to test module-1?\",", "run(self, **kwargs): if kwargs.get(\"inputs\"): kwargs[\"output\"] = \"\" for input_dict in", "\"wiki1\", \"answer\": \"Using tests for module-1\"}}, {\"text\": \"How to test", "return kwargs, \"output_1\" class D(RootNode): def run(self, **kwargs): kwargs[\"output\"] +=", "= Pipeline() pipeline.add_node(name=\"A\", component=A(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A\"]) pipeline.add_node(name=\"C\", component=C(),", "[(\"elasticsearch\", \"elasticsearch\")], indirect=True ) def test_graph_creation(reader, retriever_with_docs, document_store_with_docs): pipeline =", "== 3 # test join_node with reader join_node = JoinDocuments()", "tests for module-5\"}}, ] document_store.write_documents(documents) document_store.update_embeddings(retriever) pipeline = FAQPipeline(retriever=retriever) output", "PDF specification?\", top_k_retriever=10, top_k_reader=3) assert prediction[\"query\"] == \"Who made the", "== \"Berlin\" def test_parallel_paths_in_pipeline_graph(): class A(RootNode): def run(self, **kwargs): kwargs[\"output\"]", "{\"text\": \"Sample text for document-1\", 'meta': {\"source\": \"wiki1\"}}, {\"text\": \"Sample", "results[\"answers\"][0][\"answer\"] == \"Berlin\" def test_parallel_paths_in_pipeline_graph(): class A(RootNode): def run(self, **kwargs):", "run(self, **kwargs): kwargs[\"output\"] = \"A\" return kwargs, \"output_1\" class AWithOutput2(RootNode):", "component=A(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"B\"]) pipeline.add_node(name=\"D\", component=D(),", "= pipeline.run(query=\"How to test this?\", filters={\"source\": [\"wiki2\"]}, top_k_retriever=5) assert len(output[\"documents\"])", "JoinDocuments() p = Pipeline() p.add_node(component=es, name=\"R1\", inputs=[\"Query\"]) p.add_node(component=dpr, name=\"R2\", inputs=[\"Query\"])", "# test correct load of query pipeline from yaml pipeline", "document_store.write_documents(documents) document_store.update_embeddings(retriever) pipeline = DocumentSearchPipeline(retriever=retriever) output = pipeline.run(query=\"How to test", "kwargs, \"output_1\" class B(RootNode): def run(self, **kwargs): kwargs[\"output\"] += \"B\"", "[\"wiki2\"]}, top_k_retriever=5) assert len(output[\"answers\"]) == 1 @pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever,document_store\", [(\"embedding\",", "test concatenate join_node = JoinDocuments(join_mode=\"concatenate\") p = Pipeline() p.add_node(component=es, name=\"R1\",", "\"output_all\" class B(RootNode): def run(self, **kwargs): kwargs[\"output\"] += \"B\" return", "= \"\" for input_dict in kwargs[\"inputs\"]: kwargs[\"output\"] += (input_dict[\"output\"]) return", "in prediction[\"answers\"][0][\"answer\"] assert prediction[\"answers\"][0][\"probability\"] <= 1 assert prediction[\"answers\"][0][\"probability\"] >= 0", "\\ DocumentSearchPipeline, RootNode from haystack.retriever.dense import DensePassageRetriever from haystack.retriever.sparse import", "module-4?\", 'meta': {\"source\": \"wiki4\", \"answer\": \"Using tests for module-4\"}}, {\"text\":", "test invalid pipeline name with pytest.raises(Exception): Pipeline.load_from_yaml(path=Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"invalid\") @pytest.mark.slow @pytest.mark.elasticsearch", "output[\"output\"] == \"ABEABD\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=AWithOutput2(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\",", "\"elasticsearch\")], indirect=True ) def test_graph_creation(reader, retriever_with_docs, document_store_with_docs): pipeline = Pipeline()", "assert prediction is not None assert len(prediction[\"answers\"]) == 1 @pytest.mark.elasticsearch", "join_node = JoinDocuments(join_mode=\"merge\") p = Pipeline() p.add_node(component=es, name=\"R1\", inputs=[\"Query\"]) p.add_node(component=dpr,", "of query pipeline from yaml pipeline = Pipeline.load_from_yaml(Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"query_pipeline\") prediction", "@pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_answers_single_result(reader, retriever_with_docs): pipeline =", "JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, \\ DocumentSearchPipeline, RootNode from haystack.retriever.dense import", "{\"source\": \"wiki5\"}}, ] document_store.write_documents(documents) document_store.update_embeddings(retriever) pipeline = DocumentSearchPipeline(retriever=retriever) output =", "name is Carla and I live in Berlin\" @pytest.mark.parametrize(\"document_store_with_docs\", [\"elasticsearch\"],", "inputs=[\"A.output_2\"]) pipeline.add_node(name=\"D\", component=E(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"F\", component=JoinNode(), inputs=[\"D\",", "end = prediction[\"answers\"][0][\"offset_end\"] assert prediction[\"answers\"][0][\"context\"][start:end] == prediction[\"answers\"][0][\"answer\"] @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\",", "output = pipeline.run(query=\"How to test this?\", filters={\"source\": [\"wiki2\"]}, top_k_retriever=5) assert", "1], top_k_join=2) p = Pipeline() p.add_node(component=es, name=\"R1\", inputs=[\"Query\"]) p.add_node(component=dpr, name=\"R2\",", "assert output[\"output\"] == \"ABDABCE\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=A(), inputs=[\"Query\"])", "== \"AC\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=AWithOutputAll(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(),", "assert output[\"output\"] == \"ABEABD\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=AWithOutput2(), inputs=[\"Query\"])", "retriever_with_docs, document_store_with_docs): pipeline = Pipeline() pipeline.add_node(name=\"ES\", component=retriever_with_docs, inputs=[\"Query\"]) with pytest.raises(AssertionError):", "made the PDF specification?\" assert prediction[\"answers\"][0][\"answer\"] == \"Adobe Systems\" #", "**kwargs): kwargs[\"output\"] += \"C\" return kwargs, \"output_1\" class D(RootNode): def", "pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"InvalidNode\"]) with pytest.raises(Exception): pipeline = Pipeline() pipeline.add_node(name=\"ES\", component=retriever_with_docs,", "in Berlin?\", top_k_retriever=10, top_k_reader=3) assert prediction is not None assert", "prediction = pipeline.run(query=\"Who made the PDF specification?\", top_k_retriever=10, top_k_reader=3) assert", "pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"ES.wrong_edge_label\"]) with pytest.raises(Exception): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"InvalidNode\"]) with pytest.raises(Exception):", "test this?\", top_k_retriever=4) assert len(output.get('documents', [])) == 4 if isinstance(document_store,", "3 # test join_node with reader join_node = JoinDocuments() p", "\"output_1\" class D(RootNode): def run(self, **kwargs): kwargs[\"output\"] += \"D\" return", "in Berlin\" @pytest.mark.parametrize(\"document_store_with_docs\", [\"elasticsearch\"], indirect=True) @pytest.mark.parametrize(\"reader\", [\"farm\"], indirect=True) def test_join_document_pipeline(document_store_with_docs,", "results = p.run(query=query) assert results[\"documents\"][0].score > 1000 assert len(results[\"documents\"]) ==", "Berlin?\" assert prediction[\"answers\"][0][\"answer\"] == \"Carla\" assert prediction[\"answers\"][0][\"probability\"] <= 1 assert", "inputs=[\"Query\"]) p.add_node(component=join_node, name=\"Join\", inputs=[\"R1\", \"R2\"]) results = p.run(query=query) assert results[\"documents\"][0].score", "Pipeline() pipeline.add_node(name=\"A\", component=A(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"B\"])", "prediction[\"answers\"][0][\"context\"][start:end] == prediction[\"answers\"][0][\"answer\"] @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_answers_single_result(reader,", "= Pipeline() pipeline.add_node(name=\"ES\", component=retriever_with_docs, inputs=[\"InvalidNode\"]) @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True)", "to test module-3?\", 'meta': {\"source\": \"wiki3\", \"answer\": \"Using tests for", "pipeline.add_node(name=\"ES\", component=retriever_with_docs, inputs=[\"Query\"]) with pytest.raises(AssertionError): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"ES.output_2\"]) with pytest.raises(AssertionError):", "pytest.raises(AssertionError): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"ES.output_2\"]) with pytest.raises(AssertionError): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"ES.wrong_edge_label\"]) with", "\"ABDABCE\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=A(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A\"])", "pipeline = Pipeline() pipeline.add_node(name=\"A\", component=AWithOutput2(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A.output_1\"]) pipeline.add_node(name=\"C\",", "\"How to test module-2?\", 'meta': {\"source\": \"wiki2\", \"answer\": \"Using tests", "live in Berlin\" assert len(prediction[\"answers\"]) == 3 @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"],", "class AWithOutputAll(RootNode): outgoing_edges = 2 def run(self, **kwargs): kwargs[\"output\"] =", "component=B(), inputs=[\"A.output_1\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"A.output_2\"]) pipeline.add_node(name=\"D\", component=E(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=D(),", "inputs=[\"InvalidNode\"]) @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_answers(reader, retriever_with_docs): pipeline", "= pipeline.run(query=\"Who lives in Berlin?\", top_k_retriever=10, top_k_reader=5) assert prediction[\"answers\"][0][\"offset_start\"] ==", "assert prediction[\"answers\"][0][\"offset_end\"] == 16 start = prediction[\"answers\"][0][\"offset_start\"] end = prediction[\"answers\"][0][\"offset_end\"]", "\"Sample text for document-3\", 'meta': {\"source\": \"wiki3\"}}, {\"text\": \"Sample text", "pipeline = Pipeline() pipeline.add_node(name=\"ES\", component=retriever_with_docs, inputs=[\"Query\"]) with pytest.raises(AssertionError): pipeline.add_node(name=\"Reader\", component=retriever_with_docs,", "\"answer\": \"Using tests for module-1\"}}, {\"text\": \"How to test module-2?\",", "prediction[\"answers\"][0][\"offset_start\"] == 11 assert prediction[\"answers\"][0][\"offset_end\"] == 16 start = prediction[\"answers\"][0][\"offset_start\"]", "AWithOutput2(RootNode): outgoing_edges = 2 def run(self, **kwargs): kwargs[\"output\"] = \"A\"", "\"C\"]) output = pipeline.run(query=\"test\") assert output[\"output\"] == \"ABEABD\" pipeline =", "inputs=[\"B\"]) pipeline.add_node(name=\"F\", component=JoinNode(), inputs=[\"D\", \"E\"]) output = pipeline.run(query=\"test\") assert output[\"output\"]", "ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) query = \"testing finder\" prediction = pipeline.run(query=query, top_k_retriever=1,", "prediction[\"answers\"][0][\"context\"] == \"My name is Carla and I live in", "test_parallel_paths_in_pipeline_graph_with_branching(): class AWithOutput1(RootNode): outgoing_edges = 2 def run(self, **kwargs): kwargs[\"output\"]", "pytest.raises(Exception): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"InvalidNode\"]) with pytest.raises(Exception): pipeline = Pipeline() pipeline.add_node(name=\"ES\",", "\"Who made the PDF specification?\" assert prediction[\"answers\"][0][\"answer\"] == \"Adobe Systems\"", "pipeline.add_node(name=\"E\", component=JoinNode(), inputs=[\"C\", \"D\"]) output = pipeline.run(query=\"test\") assert output[\"output\"] ==", "component=retriever_with_docs, inputs=[\"ES.output_2\"]) with pytest.raises(AssertionError): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"ES.wrong_edge_label\"]) with pytest.raises(Exception): pipeline.add_node(name=\"Reader\",", "module-2\"}}, {\"text\": \"How to test module-3?\", 'meta': {\"source\": \"wiki3\", \"answer\":", "assert len(prediction[\"answers\"]) == 3 @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_offsets(reader,", ">= 0 assert prediction[\"answers\"][0][\"meta\"][\"meta_field\"] == \"test1\" assert prediction[\"answers\"][0][\"context\"] == \"My", "start = prediction[\"answers\"][0][\"offset_start\"] end = prediction[\"answers\"][0][\"offset_end\"] assert prediction[\"answers\"][0][\"context\"][start:end] == prediction[\"answers\"][0][\"answer\"]", "def run(self, **kwargs): kwargs[\"output\"] = kwargs[\"inputs\"][0][\"output\"] + kwargs[\"inputs\"][1][\"output\"] return kwargs,", "== \"test1\" assert prediction[\"answers\"][0][\"context\"] == \"My name is Carla and", "prediction[\"answers\"][0][\"probability\"] <= 1 assert prediction[\"answers\"][0][\"probability\"] >= 0 assert prediction[\"answers\"][0][\"meta\"][\"meta_field\"] ==", "2 def run(self, **kwargs): kwargs[\"output\"] = \"A\" return kwargs, \"output_1\"", "4 if isinstance(document_store, ElasticsearchDocumentStore): output = pipeline.run(query=\"How to test this?\",", "module-5?\", 'meta': {\"source\": \"wiki5\", \"answer\": \"Using tests for module-5\"}}, ]", "not None assert len(prediction[\"answers\"]) == 1 @pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever,document_store\", [(\"embedding\",", "\"E\" return kwargs, \"output_1\" class JoinNode(RootNode): def run(self, **kwargs): kwargs[\"output\"]", "\"A\" return kwargs, \"output_1\" class B(RootNode): def run(self, **kwargs): kwargs[\"output\"]", "tests for module-3\"}}, {\"text\": \"How to test module-4?\", 'meta': {\"source\":", "@pytest.mark.parametrize(\"document_store_with_docs\", [\"elasticsearch\"], indirect=True) @pytest.mark.parametrize(\"reader\", [\"farm\"], indirect=True) def test_join_document_pipeline(document_store_with_docs, reader): es", "assert prediction[\"answers\"][0][\"offset_start\"] == 11 assert prediction[\"answers\"][0][\"offset_end\"] == 16 start =", "{\"source\": \"wiki3\", \"answer\": \"Using tests for module-3\"}}, {\"text\": \"How to", "de_to_en_translator): base_pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) pipeline = TranslationWrapperPipeline( input_translator=de_to_en_translator, output_translator=en_to_de_translator,", "= p.run(query=query) assert results[\"answers\"][0][\"answer\"] == \"Berlin\" def test_parallel_paths_in_pipeline_graph(): class A(RootNode):", "haystack.pipeline import TranslationWrapperPipeline, JoinDocuments, ExtractiveQAPipeline, Pipeline, FAQPipeline, \\ DocumentSearchPipeline, RootNode", "FAQPipeline(retriever=retriever) output = pipeline.run(query=\"How to test this?\", top_k_retriever=3) assert len(output[\"answers\"])", "module-5\"}}, ] document_store.write_documents(documents) document_store.update_embeddings(retriever) pipeline = FAQPipeline(retriever=retriever) output = pipeline.run(query=\"How", "indirect=True) def test_extractive_qa_answers_with_translator(reader, retriever_with_docs, en_to_de_translator, de_to_en_translator): base_pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs)", "(\"embedding\", \"milvus\"), (\"embedding\", \"elasticsearch\")], indirect=True, ) def test_document_search_pipeline(retriever, document_store): documents", "pipeline.run(query=query, top_k_retriever=1, top_k_reader=1) assert prediction is not None assert len(prediction[\"answers\"])", "[\"farm\"], indirect=True) def test_join_document_pipeline(document_store_with_docs, reader): es = ElasticsearchRetriever(document_store=document_store_with_docs) dpr =", "reader join_node = JoinDocuments() p = Pipeline() p.add_node(component=es, name=\"R1\", inputs=[\"Query\"])", "\"E\" return kwargs, \"output_1\" class JoinNode(RootNode): def run(self, **kwargs): if", "isinstance(document_store, ElasticsearchDocumentStore): output = pipeline.run(query=\"How to test this?\", filters={\"source\": [\"wiki2\"]},", "pipeline = Pipeline.load_from_yaml(Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"indexing_pipeline\") pipeline.run(file_path=Path(\"samples/pdf/sample_pdf_1.pdf\"), top_k_retriever=10, top_k_reader=3) # test correct", "\"wiki3\"}}, {\"text\": \"Sample text for document-4\", 'meta': {\"source\": \"wiki4\"}}, {\"text\":", "retriever=retriever_with_docs) prediction = pipeline.run(query=\"Who lives in Berlin?\", top_k_retriever=10, top_k_reader=5) assert", "Berlin?\", top_k_retriever=10, top_k_reader=5) assert prediction[\"answers\"][0][\"offset_start\"] == 11 assert prediction[\"answers\"][0][\"offset_end\"] ==", "pipeline.add_node(name=\"ES\", component=retriever_with_docs, inputs=[\"InvalidNode\"]) @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_answers(reader,", "tests for module-1\"}}, {\"text\": \"How to test module-2?\", 'meta': {\"source\":", "C(RootNode): def run(self, **kwargs): kwargs[\"output\"] += \"C\" return kwargs, \"output_1\"", "\"A\" return kwargs, \"output_all\" class B(RootNode): def run(self, **kwargs): kwargs[\"output\"]", "test_join_document_pipeline(document_store_with_docs, reader): es = ElasticsearchRetriever(document_store=document_store_with_docs) dpr = DensePassageRetriever( document_store=document_store_with_docs, query_embedding_model=\"facebook/dpr-question_encoder-single-nq-base\",", "\"ABCABD\" def test_parallel_paths_in_pipeline_graph_with_branching(): class AWithOutput1(RootNode): outgoing_edges = 2 def run(self,", "test merge without weights join_node = JoinDocuments(join_mode=\"merge\") p = Pipeline()", "= pipeline.run(query=\"Wer lebt in Berlin?\", top_k_retriever=10, top_k_reader=3) assert prediction is", "run(self, **kwargs): kwargs[\"output\"] = kwargs[\"inputs\"][0][\"output\"] + kwargs[\"inputs\"][1][\"output\"] return kwargs, \"output_1\"", "specification?\" assert prediction[\"answers\"][0][\"answer\"] == \"Adobe Systems\" # test invalid pipeline", "ElasticsearchRetriever(document_store=document_store_with_docs) dpr = DensePassageRetriever( document_store=document_store_with_docs, query_embedding_model=\"facebook/dpr-question_encoder-single-nq-base\", passage_embedding_model=\"facebook/dpr-ctx_encoder-single-nq-base\", use_gpu=False, ) document_store_with_docs.update_embeddings(dpr)", "pipeline.add_node(name=\"C\", component=C(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=E(), inputs=[\"C\"]) pipeline.add_node(name=\"D\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"F\",", "kwargs[\"output\"] = \"A\" return kwargs, \"output_2\" class AWithOutputAll(RootNode): outgoing_edges =", "\"My name is Carla and I live in Berlin\" @pytest.mark.parametrize(\"document_store_with_docs\",", "made the PDF specification?\", top_k_retriever=10, top_k_reader=3) assert prediction[\"query\"] == \"Who", "len(output.get('documents', [])) == 4 if isinstance(document_store, ElasticsearchDocumentStore): output = pipeline.run(query=\"How", "for document-5\", 'meta': {\"source\": \"wiki5\"}}, ] document_store.write_documents(documents) document_store.update_embeddings(retriever) pipeline =", "if isinstance(document_store, ElasticsearchDocumentStore): output = pipeline.run(query=\"How to test this?\", filters={\"source\":", "+= \"B\" return kwargs, \"output_1\" class C(RootNode): def run(self, **kwargs):", "assert prediction[\"answers\"][0][\"meta\"][\"meta_field\"] == \"test1\" assert prediction[\"answers\"][0][\"context\"] == \"My name is", "pipeline.run(query=\"Wer lebt in Berlin?\", top_k_retriever=10, top_k_reader=3) assert prediction is not", "Carla and I live in Berlin\" assert len(prediction[\"answers\"]) == 3", "= prediction[\"answers\"][0][\"offset_start\"] end = prediction[\"answers\"][0][\"offset_end\"] assert prediction[\"answers\"][0][\"context\"][start:end] == prediction[\"answers\"][0][\"answer\"] @pytest.mark.slow", "outgoing_edges = 2 def run(self, **kwargs): kwargs[\"output\"] = \"A\" return", "\"output_1\" class JoinNode(RootNode): def run(self, **kwargs): if kwargs.get(\"inputs\"): kwargs[\"output\"] =", "top_k_retriever=1, top_k_reader=1) assert prediction is not None assert len(prediction[\"answers\"]) ==", "component=retriever_with_docs, inputs=[\"ES.wrong_edge_label\"]) with pytest.raises(Exception): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"InvalidNode\"]) with pytest.raises(Exception): pipeline", "3 # test merge with weights join_node = JoinDocuments(join_mode=\"merge\", weights=[1000,", "prediction = pipeline.run(query=\"Wer lebt in Berlin?\", top_k_retriever=10, top_k_reader=3) assert prediction", "= Pipeline() p.add_node(component=es, name=\"R1\", inputs=[\"Query\"]) p.add_node(component=dpr, name=\"R2\", inputs=[\"Query\"]) p.add_node(component=join_node, name=\"Join\",", "I live in Berlin\" assert len(prediction[\"answers\"]) == 3 @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\",", "lives in Berlin?\", top_k_retriever=10, top_k_reader=3) assert prediction is not None", "ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) prediction = pipeline.run(query=\"Who lives in Berlin?\", top_k_retriever=10, top_k_reader=5)", "kwargs, \"output_1\" class JoinNode(RootNode): def run(self, **kwargs): kwargs[\"output\"] = kwargs[\"inputs\"][0][\"output\"]", "p.add_node(component=join_node, name=\"Join\", inputs=[\"R1\", \"R2\"]) results = p.run(query=query) assert results[\"documents\"][0].score >", "assert len(output[\"answers\"]) == 3 assert output[\"answers\"][0][\"query\"].startswith(\"How to\") assert output[\"answers\"][0][\"answer\"].startswith(\"Using tests\")", "= ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) query = \"testing finder\" prediction = pipeline.run(query=query,", "haystack.retriever.sparse import ElasticsearchRetriever @pytest.mark.parametrize(\"document_store_with_docs\", [\"elasticsearch\"], indirect=True) def test_load_yaml(document_store_with_docs): # test", "1 @pytest.mark.elasticsearch @pytest.mark.parametrize( \"retriever,document_store\", [(\"embedding\", \"memory\"), (\"embedding\", \"faiss\"), (\"embedding\", \"milvus\"),", "\"E\", \"C\"]) output = pipeline.run(query=\"test\") assert output[\"output\"] == \"AC\" pipeline", "len(output[\"answers\"]) == 3 assert output[\"answers\"][0][\"query\"].startswith(\"How to\") assert output[\"answers\"][0][\"answer\"].startswith(\"Using tests\") if", "ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) prediction = pipeline.run(query=\"Who lives in Berlin?\", top_k_retriever=10, top_k_reader=3)", "indirect=True) @pytest.mark.parametrize(\"reader\", [\"farm\"], indirect=True) def test_join_document_pipeline(document_store_with_docs, reader): es = ElasticsearchRetriever(document_store=document_store_with_docs)", "[\"tfidf\"], indirect=True) def test_extractive_qa_offsets(reader, retriever_with_docs): pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) prediction", "and I live in Berlin\" assert len(prediction[\"answers\"]) == 3 @pytest.mark.elasticsearch", "\"answer\": \"Using tests for module-3\"}}, {\"text\": \"How to test module-4?\",", "return kwargs, \"output_1\" class C(RootNode): def run(self, **kwargs): kwargs[\"output\"] +=", "\"output_1\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=A(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A\"])", "Berlin?\" assert \"Carla\" in prediction[\"answers\"][0][\"answer\"] assert prediction[\"answers\"][0][\"probability\"] <= 1 assert", "inputs=[\"A\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=E(), inputs=[\"C\"]) pipeline.add_node(name=\"D\", component=D(), inputs=[\"B\"])", "return kwargs, \"output_1\" class JoinNode(RootNode): def run(self, **kwargs): if kwargs.get(\"inputs\"):", "(\"embedding\", \"faiss\"), (\"embedding\", \"milvus\"), (\"embedding\", \"elasticsearch\")], indirect=True, ) def test_faq_pipeline(retriever,", "def test_extractive_qa_offsets(reader, retriever_with_docs): pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) prediction = pipeline.run(query=\"Who", "pipeline.add_node(name=\"D\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=JoinNode(), inputs=[\"C\", \"D\"]) output = pipeline.run(query=\"test\")", "[ {\"text\": \"How to test module-1?\", 'meta': {\"source\": \"wiki1\", \"answer\":", "import DensePassageRetriever from haystack.retriever.sparse import ElasticsearchRetriever @pytest.mark.parametrize(\"document_store_with_docs\", [\"elasticsearch\"], indirect=True) def", "import Path import pytest from haystack.document_store.elasticsearch import ElasticsearchDocumentStore from haystack.pipeline", "assert output[\"output\"] == \"AC\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=AWithOutputAll(), inputs=[\"Query\"])", "p = Pipeline() p.add_node(component=es, name=\"R1\", inputs=[\"Query\"]) p.add_node(component=dpr, name=\"R2\", inputs=[\"Query\"]) p.add_node(component=join_node,", "inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=E(), inputs=[\"C\"])", "pipeline.run(query=\"How to test this?\", filters={\"source\": [\"wiki2\"]}, top_k_retriever=5) assert len(output[\"answers\"]) ==", "<= 1 assert prediction[\"answers\"][0][\"probability\"] >= 0 assert prediction[\"answers\"][0][\"meta\"][\"meta_field\"] == \"test1\"", "= ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) pipeline = TranslationWrapperPipeline( input_translator=de_to_en_translator, output_translator=en_to_de_translator, pipeline=base_pipeline )", "assert len(results[\"documents\"]) == 3 # test join_node with reader join_node", "[(\"embedding\", \"memory\"), (\"embedding\", \"faiss\"), (\"embedding\", \"milvus\"), (\"embedding\", \"elasticsearch\")], indirect=True, )", "prediction[\"query\"] == \"Who lives in Berlin?\" assert prediction[\"answers\"][0][\"answer\"] == \"Carla\"", "inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=E(), inputs=[\"C\"]) pipeline.add_node(name=\"D\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"F\", component=JoinNode(), inputs=[\"D\",", "pipeline.run(query=\"test\") assert output[\"output\"] == \"ABCABD\" def test_parallel_paths_in_pipeline_graph_with_branching(): class AWithOutput1(RootNode): outgoing_edges", "= ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) prediction = pipeline.run(query=\"Who lives in Berlin?\", top_k_retriever=10,", "in Berlin?\" assert prediction[\"answers\"][0][\"answer\"] == \"Carla\" assert prediction[\"answers\"][0][\"probability\"] <= 1", "inputs=[\"C\"]) pipeline.add_node(name=\"D\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"F\", component=JoinNode(), inputs=[\"D\", \"E\"]) output =", "Pipeline() pipeline.add_node(name=\"ES\", component=retriever_with_docs, inputs=[\"InvalidNode\"]) @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def", "AWithOutput1(RootNode): outgoing_edges = 2 def run(self, **kwargs): kwargs[\"output\"] = \"A\"", "component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"F\", component=JoinNode(), inputs=[\"D\", \"E\", \"C\"]) output = pipeline.run(query=\"test\")", "indirect=True) def test_extractive_qa_answers_single_result(reader, retriever_with_docs): pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) query =", "# test correct load of indexing pipeline from yaml pipeline", "== prediction[\"answers\"][0][\"answer\"] @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_answers_single_result(reader, retriever_with_docs):", "def test_parallel_paths_in_pipeline_graph(): class A(RootNode): def run(self, **kwargs): kwargs[\"output\"] = \"A\"", "of indexing pipeline from yaml pipeline = Pipeline.load_from_yaml(Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"indexing_pipeline\") pipeline.run(file_path=Path(\"samples/pdf/sample_pdf_1.pdf\"),", "test module-2?\", 'meta': {\"source\": \"wiki2\", \"answer\": \"Using tests for module-2\"}},", "top_k_retriever=10, top_k_reader=3) # test correct load of query pipeline from", "\"wiki5\", \"answer\": \"Using tests for module-5\"}}, ] document_store.write_documents(documents) document_store.update_embeddings(retriever) pipeline", "kwargs[\"output\"] = \"\" for input_dict in kwargs[\"inputs\"]: kwargs[\"output\"] += (input_dict[\"output\"])", "\"ABEABD\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=AWithOutput2(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A.output_1\"])", "pipeline.run(query=\"How to test this?\", filters={\"source\": [\"wiki2\"]}, top_k_retriever=5) assert len(output[\"documents\"]) ==", "kwargs, \"output_all\" class B(RootNode): def run(self, **kwargs): kwargs[\"output\"] += \"B\"", "pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) prediction = pipeline.run(query=\"Who lives in Berlin?\",", "class D(RootNode): def run(self, **kwargs): kwargs[\"output\"] += \"D\" return kwargs,", "pipeline.add_node(name=\"D\", component=E(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"F\", component=JoinNode(), inputs=[\"D\", \"E\",", "output = pipeline.run(query=\"test\") assert output[\"output\"] == \"AC\" pipeline = Pipeline()", "{\"source\": \"wiki4\"}}, {\"text\": \"Sample text for document-5\", 'meta': {\"source\": \"wiki5\"}},", "== \"Who made the PDF specification?\" assert prediction[\"answers\"][0][\"answer\"] == \"Adobe", "None assert prediction[\"query\"] == \"Who lives in Berlin?\" assert prediction[\"answers\"][0][\"answer\"]", "pipeline.run(query=\"How to test this?\", top_k_retriever=3) assert len(output[\"answers\"]) == 3 assert", "inputs=[\"A\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"B\"]) pipeline.add_node(name=\"D\", component=D(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=JoinNode(), inputs=[\"C\",", "in kwargs[\"inputs\"]: kwargs[\"output\"] += (input_dict[\"output\"]) return kwargs, \"output_1\" pipeline =", "prediction[\"query\"] == \"Wer lebt in Berlin?\" assert \"Carla\" in prediction[\"answers\"][0][\"answer\"]", "for document-3\", 'meta': {\"source\": \"wiki3\"}}, {\"text\": \"Sample text for document-4\",", "inputs=[\"Query\"]) p.add_node(component=dpr, name=\"R2\", inputs=[\"Query\"]) p.add_node(component=join_node, name=\"Join\", inputs=[\"R1\", \"R2\"]) p.add_node(component=reader, name=\"Reader\",", "p.run(query=query) assert results[\"documents\"][0].score > 1000 assert len(results[\"documents\"]) == 2 #", "run(self, **kwargs): kwargs[\"output\"] += \"C\" return kwargs, \"output_1\" class D(RootNode):", "run(self, **kwargs): kwargs[\"output\"] = \"A\" return kwargs, \"output_1\" class B(RootNode):", "**kwargs): kwargs[\"output\"] += \"E\" return kwargs, \"output_1\" class JoinNode(RootNode): def", "= Pipeline.load_from_yaml(Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"query_pipeline\") prediction = pipeline.run(query=\"Who made the PDF specification?\",", "component=retriever_with_docs, inputs=[\"Query\"]) with pytest.raises(AssertionError): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"ES.output_2\"]) with pytest.raises(AssertionError): pipeline.add_node(name=\"Reader\",", "pipeline.add_node(name=\"A\", component=A(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"B\"]) pipeline.add_node(name=\"E\",", "inputs=[\"D\", \"E\", \"C\"]) output = pipeline.run(query=\"test\") assert output[\"output\"] == \"ACABEABD\"", "kwargs.get(\"inputs\"): kwargs[\"output\"] = \"\" for input_dict in kwargs[\"inputs\"]: kwargs[\"output\"] +=", "**kwargs): kwargs[\"output\"] += \"B\" return kwargs, \"output_1\" class C(RootNode): def", "Pipeline.load_from_yaml(Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"indexing_pipeline\") pipeline.run(file_path=Path(\"samples/pdf/sample_pdf_1.pdf\"), top_k_retriever=10, top_k_reader=3) # test correct load of", "name=\"R2\", inputs=[\"Query\"]) p.add_node(component=join_node, name=\"Join\", inputs=[\"R1\", \"R2\"]) p.add_node(component=reader, name=\"Reader\", inputs=[\"Join\"]) results", "if kwargs.get(\"inputs\"): kwargs[\"output\"] = \"\" for input_dict in kwargs[\"inputs\"]: kwargs[\"output\"]", "\"Where does Carla lives?\" # test merge without weights join_node", "kwargs[\"output\"] = kwargs[\"inputs\"][0][\"output\"] + kwargs[\"inputs\"][1][\"output\"] return kwargs, \"output_1\" pipeline =", "Pipeline, FAQPipeline, \\ DocumentSearchPipeline, RootNode from haystack.retriever.dense import DensePassageRetriever from", "prediction is not None assert len(prediction[\"answers\"]) == 1 @pytest.mark.elasticsearch @pytest.mark.parametrize(", "the PDF specification?\" assert prediction[\"answers\"][0][\"answer\"] == \"Adobe Systems\" # test", "{\"source\": \"wiki3\"}}, {\"text\": \"Sample text for document-4\", 'meta': {\"source\": \"wiki4\"}},", "== \"ABEABD\" pipeline = Pipeline() pipeline.add_node(name=\"A\", component=AWithOutput2(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(),", "{\"text\": \"Sample text for document-2\", 'meta': {\"source\": \"wiki2\"}}, {\"text\": \"Sample", "document-5\", 'meta': {\"source\": \"wiki5\"}}, ] document_store.write_documents(documents) document_store.update_embeddings(retriever) pipeline = DocumentSearchPipeline(retriever=retriever)", "def test_extractive_qa_answers_with_translator(reader, retriever_with_docs, en_to_de_translator, de_to_en_translator): base_pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) pipeline", "query_embedding_model=\"facebook/dpr-question_encoder-single-nq-base\", passage_embedding_model=\"facebook/dpr-ctx_encoder-single-nq-base\", use_gpu=False, ) document_store_with_docs.update_embeddings(dpr) query = \"Where does Carla", "base_pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) pipeline = TranslationWrapperPipeline( input_translator=de_to_en_translator, output_translator=en_to_de_translator, pipeline=base_pipeline", "== 11 assert prediction[\"answers\"][0][\"offset_end\"] == 16 start = prediction[\"answers\"][0][\"offset_start\"] end", "pipeline.run(query=\"How to test this?\", top_k_retriever=4) assert len(output.get('documents', [])) == 4", "test_extractive_qa_answers_with_translator(reader, retriever_with_docs, en_to_de_translator, de_to_en_translator): base_pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) pipeline =", "pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) query = \"testing finder\" prediction =", "module-3?\", 'meta': {\"source\": \"wiki3\", \"answer\": \"Using tests for module-3\"}}, {\"text\":", "test module-5?\", 'meta': {\"source\": \"wiki5\", \"answer\": \"Using tests for module-5\"}},", "top_k_reader=5) assert prediction[\"answers\"][0][\"offset_start\"] == 11 assert prediction[\"answers\"][0][\"offset_end\"] == 16 start", "'meta': {\"source\": \"wiki4\", \"answer\": \"Using tests for module-4\"}}, {\"text\": \"How", "output = pipeline.run(query=\"test\") assert output[\"output\"] == \"ABEABD\" pipeline = Pipeline()", "retriever_with_docs): pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) query = \"testing finder\" prediction", "JoinDocuments(join_mode=\"merge\", weights=[1000, 1], top_k_join=2) p = Pipeline() p.add_node(component=es, name=\"R1\", inputs=[\"Query\"])", "@pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_answers_with_translator(reader, retriever_with_docs, en_to_de_translator, de_to_en_translator): base_pipeline =", "component=AWithOutput1(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A.output_1\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"A.output_2\"]) pipeline.add_node(name=\"D\", component=E(),", "Berlin?\", top_k_retriever=10, top_k_reader=3) assert prediction is not None assert prediction[\"query\"]", "{\"text\": \"Sample text for document-3\", 'meta': {\"source\": \"wiki3\"}}, {\"text\": \"Sample", "weights=[1000, 1], top_k_join=2) p = Pipeline() p.add_node(component=es, name=\"R1\", inputs=[\"Query\"]) p.add_node(component=dpr,", "Berlin\" assert len(prediction[\"answers\"]) == 3 @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def", "\"wiki3\", \"answer\": \"Using tests for module-3\"}}, {\"text\": \"How to test", "pipeline.add_node(name=\"B\", component=B(), inputs=[\"A\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"B\"]) pipeline.add_node(name=\"E\", component=E(), inputs=[\"C\"]) pipeline.add_node(name=\"D\",", "invalid pipeline name with pytest.raises(Exception): Pipeline.load_from_yaml(path=Path(\"samples/pipeline/test_pipeline.yaml\"), pipeline_name=\"invalid\") @pytest.mark.slow @pytest.mark.elasticsearch @pytest.mark.parametrize(", "test module-4?\", 'meta': {\"source\": \"wiki4\", \"answer\": \"Using tests for module-4\"}},", "test correct load of query pipeline from yaml pipeline =", "DensePassageRetriever from haystack.retriever.sparse import ElasticsearchRetriever @pytest.mark.parametrize(\"document_store_with_docs\", [\"elasticsearch\"], indirect=True) def test_load_yaml(document_store_with_docs):", "component=AWithOutput2(), inputs=[\"Query\"]) pipeline.add_node(name=\"B\", component=B(), inputs=[\"A.output_1\"]) pipeline.add_node(name=\"C\", component=C(), inputs=[\"A.output_2\"]) pipeline.add_node(name=\"D\", component=E(),", "{\"source\": \"wiki1\"}}, {\"text\": \"Sample text for document-2\", 'meta': {\"source\": \"wiki2\"}},", "inputs=[\"D\", \"E\", \"C\"]) output = pipeline.run(query=\"test\") assert output[\"output\"] == \"AC\"", "passage_embedding_model=\"facebook/dpr-ctx_encoder-single-nq-base\", use_gpu=False, ) document_store_with_docs.update_embeddings(dpr) query = \"Where does Carla lives?\"", "prediction = pipeline.run(query=\"Who lives in Berlin?\", top_k_retriever=10, top_k_reader=3) assert prediction", "for module-5\"}}, ] document_store.write_documents(documents) document_store.update_embeddings(retriever) pipeline = FAQPipeline(retriever=retriever) output =", "3 @pytest.mark.elasticsearch @pytest.mark.parametrize(\"retriever_with_docs\", [\"tfidf\"], indirect=True) def test_extractive_qa_offsets(reader, retriever_with_docs): pipeline =", "top_k_reader=3) assert prediction is not None assert prediction[\"query\"] == \"Wer", "with weights join_node = JoinDocuments(join_mode=\"merge\", weights=[1000, 1], top_k_join=2) p =", "\"output_1\" class E(RootNode): def run(self, **kwargs): kwargs[\"output\"] += \"E\" return", "{\"text\": \"How to test module-1?\", 'meta': {\"source\": \"wiki1\", \"answer\": \"Using", "[\"tfidf\"], indirect=True) def test_extractive_qa_answers(reader, retriever_with_docs): pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever_with_docs) prediction", "in Berlin?\" assert \"Carla\" in prediction[\"answers\"][0][\"answer\"] assert prediction[\"answers\"][0][\"probability\"] <= 1", "\"Carla\" in prediction[\"answers\"][0][\"answer\"] assert prediction[\"answers\"][0][\"probability\"] <= 1 assert prediction[\"answers\"][0][\"probability\"] >=", "pipeline.add_node(name=\"F\", component=JoinNode(), inputs=[\"D\", \"E\"]) output = pipeline.run(query=\"test\") assert output[\"output\"] ==", "= \"testing finder\" prediction = pipeline.run(query=query, top_k_retriever=1, top_k_reader=1) assert prediction", "p.add_node(component=join_node, name=\"Join\", inputs=[\"R1\", \"R2\"]) p.add_node(component=reader, name=\"Reader\", inputs=[\"Join\"]) results = p.run(query=query)", "join_node = JoinDocuments() p = Pipeline() p.add_node(component=es, name=\"R1\", inputs=[\"Query\"]) p.add_node(component=dpr,", "= Pipeline() pipeline.add_node(name=\"ES\", component=retriever_with_docs, inputs=[\"Query\"]) with pytest.raises(AssertionError): pipeline.add_node(name=\"Reader\", component=retriever_with_docs, inputs=[\"ES.output_2\"])", "= ElasticsearchRetriever(document_store=document_store_with_docs) dpr = DensePassageRetriever( document_store=document_store_with_docs, query_embedding_model=\"facebook/dpr-question_encoder-single-nq-base\", passage_embedding_model=\"facebook/dpr-ctx_encoder-single-nq-base\", use_gpu=False, )", "Berlin\" @pytest.mark.parametrize(\"document_store_with_docs\", [\"elasticsearch\"], indirect=True) @pytest.mark.parametrize(\"reader\", [\"farm\"], indirect=True) def test_join_document_pipeline(document_store_with_docs, reader):", "1 assert prediction[\"answers\"][0][\"probability\"] >= 0 assert prediction[\"answers\"][0][\"meta\"][\"meta_field\"] == \"test1\" assert", "kwargs[\"output\"] += \"B\" return kwargs, \"output_1\" class C(RootNode): def run(self,", "test_document_search_pipeline(retriever, document_store): documents = [ {\"text\": \"Sample text for document-1\",", "output[\"answers\"][0][\"query\"].startswith(\"How to\") assert output[\"answers\"][0][\"answer\"].startswith(\"Using tests\") if isinstance(document_store, ElasticsearchDocumentStore): output =", "== \"Adobe Systems\" # test invalid pipeline name with pytest.raises(Exception):", "specification?\", top_k_retriever=10, top_k_reader=3) assert prediction[\"query\"] == \"Who made the PDF", "document_store): documents = [ {\"text\": \"How to test module-1?\", 'meta':" ]
[ "polish_iterations: break if check_exist(asm_cns): return asm_cns else: print(\"polishing failed for", "in read_list: output.write(read + \"\\n\") else: # TODO: think about", "variant reads sv_reads = sv_reads_dir + \"/contig\" + str(k) sv_reads_rename", "None if check_exist(consensus): consensus_rename = os.path.join(asm_dir, contig_name + \".cns.fa\") os.rename(consensus,", "samfile = pysam.AlignmentFile(bam, \"rb\") read_ids = os.path.join(out, sample_name + \".id\")", "reads, asm_dir, contig_name, thread, polish_iterations, presets ) if check_exist(asm_cns): return", "in reads: output.write(read + \"\\n\") # write out_line = line.replace(\"\\n\",", "\"-fo\", consensus, ], timeout=300, ) except subprocess.TimeoutExpired: print(\"fail to assemble", "\"0\", ] ) except Exception as e: print(e) print(\"Assembly failed,", "+ contig_name + \".reads.fa\" os.rename(sv_reads, sv_reads_rename) thread_asm = 1 asm_pa", "+ threads + \" > \" + bam ) try:", "= run_flye_polishing( asm_cns, reads, asm_dir, contig_name, thread, polish_iterations, presets )", "print(\"Local assembly failed, exiting...\") sys.exit(1) proc_time = time.time() - start_time", "+ read_ids + \" | sort | uniq\" with open(read_ids_unique,", "# remove tmp files os.remove(read_ids) os.remove(read_ids_unique) os.remove(subset_fa) os.remove(subset_fa_reorder) def extract_reads(reads,", "else: asm_cns = run_flye_polishing( asm_cns, reads, asm_dir, contig_name, thread, polish_iterations,", "\".subset.fa\") command = \"seqtk subseq \" + raw_reads + \"", "\"wtdbg2\": asm_cns = run_wtdbg2_assembly(reads, asm_dir, contig_name, thread, presets) else: asm_cns", "cigar reads window = 1000 samfile = pysam.AlignmentFile(bam, \"rb\") read_ids", "\"len=\" + str(len(record.seq)) SeqIO.write(record, merged_output_handle, \"fasta\") with open(parsed_contig, \"w\") as", "None # rename contig file polished_contig = os.path.join( tmp_out_dir, \"polished_\"", "\" | wtpoa-cns -t \" + threads + \" -d", "= run_wtdbg2_polishing( asm_cns, reads, thread, polish_iterations, presets ) else: asm_cns", "extract_reads(reads, list, out): \"\"\"Extract reads from fasta using read ID", "csplit_prefix = reads_dir + \"/contig\" m = [] k =", "Pool import pysam from telr.TELR_utility import mkdir, check_exist, format_time def", "time import logging from Bio import SeqIO from multiprocessing import", "1 with open(vcf_parsed, \"r\") as input: for line in input:", "line.replace(\"\\n\", \"\").split(\"\\t\") contig_name = \"_\".join([entry[0], entry[1], entry[2]]) # rename variant", "elif len(m) == 0: print(\"No insertion detected, exiting...\") else: m", "[ sv_reads_rename, contig_dir, contig_name, thread_asm, presets, assembler, polisher, polish_iterations, ]", "\"fasta\") logging.info(\"Local assembly finished in \" + format_time(proc_time)) return merged_contigs,", "command = \"cat \" + read_ids + \" | sort", "1: subprocess.call([\"cp\", subset_fa_reorder, reads_dir + \"/contig0\"]) elif len(m) == 0:", "= os.path.join(asm_dir, contig_name) mkdir(tmp_out_dir) try: subprocess.call( [ \"flye\", \"--polish-target\", asm_cns,", "asm_cns) os.remove(bam) else: break k = k + 1 if", "* (len(entry[8].split(\",\"))) else: k = k + 2 * int(entry[14])", "polisher, polish_iterations, ] asm_pa_list.append(asm_pa) k = k + 1 #", "check_exist(cns_tmp): os.rename(cns_tmp, asm_cns) os.remove(bam) else: break k = k +", "\"cat \" + read_ids + \" | sort | uniq\"", "\"/contig0\"]) elif len(m) == 0: print(\"No insertion detected, exiting...\") else:", "\"r\") as input: for line in input: entry = line.replace(\"\\n\",", "contigs contig_path = os.path.join(tmp_out_dir, \"assembly.fasta\") contig_path_new = os.path.join(asm_dir, contig_name +", "print(\"Prepare local assembly input data failed, exiting...\") sys.exit(1) mkdir(contig_dir) k", "vcf_parsed_new # generate unique ID list read_ids_unique = read_ids +", "assembly_passed_loci = set() merged_contigs = os.path.join(out, sample_name + \".contigs.fa\") with", "sort -@\" + threads + \" > \" + bam", "thread_asm, presets, assembler, polisher, polish_iterations, ] asm_pa_list.append(asm_pa) k = k", "write out_line = line.replace(\"\\n\", \"\") + \"\\t\" + str(len(reads)) VCF.write(out_line", "extract_reads(subset_fa, read_ids, subset_fa_reorder) # separate reads into multiple files, using", "\" > \" + bam ) try: subprocess.run( command, shell=True,", "files shutil.rmtree(tmp_out_dir) return contig_path_new else: print(\"assembly failed\") return None def", "presets) else: asm_cns = run_flye_assembly(reads, asm_dir, contig_name, thread, presets) if", "check_exist(asm_cns): print(\"assembly failed\") return None # run polishing if polish_iterations", "tmp_out_dir = os.path.join(asm_dir, contig_name) mkdir(tmp_out_dir) try: subprocess.call( [ \"flye\", presets_flye,", "] ) except Exception as e: print(e) print(\"Assembly failed, exiting...\")", "pool.close() pool.join() except Exception as e: print(e) print(\"Local assembly failed,", "presets, assembler, polisher, polish_iterations, ] asm_pa_list.append(asm_pa) k = k +", "+ reads + \" | samtools sort -@\" + threads", "return merged_contigs, assembly_passed_loci def run_assembly_polishing(args): reads = args[0] asm_dir =", "polisher, contig_dir, vcf_parsed, out, sample_name, bam, raw_reads, thread, presets, polish_iterations,", "+ \".id\") vcf_parsed_new = vcf_parsed + \".new\" with open(vcf_parsed, \"r\")", "sv_reads, \"-fo\", prefix, ], timeout=300, ) except subprocess.TimeoutExpired: print(\"fail to", "with open(read_ids_unique, \"w\") as output: subprocess.call(command, stdout=output, shell=True) # filter", "= out + \"/\" + sample_name + \".subset.reorder.fa\" extract_reads(subset_fa, read_ids,", "shutil import time import logging from Bio import SeqIO from", "reads to contig: \" + asm_cns) return # run wtpoa-cns", "\"flye\", presets_flye, sv_reads, \"--out-dir\", tmp_out_dir, \"--thread\", str(thread), \"--iterations\", \"0\", ]", "except subprocess.TimeoutExpired: print(\"fail to polish contig: \" + asm_cns) return", "\" + csplit_prefix + \" -n 1 \" + subset_fa_reorder", "vcf_parsed, out, sample_name, bam, raw_reads, reads_dir, read_type=\"sv\" ): \"\"\"Prepare reads", "asm_cns + \" \" + reads + \" | samtools", "= str(min(thread, 4)) consensus = prefix + \".cns.fa\" try: subprocess.run(", "k = 1 with open(vcf_parsed, \"r\") as input: for line", "os.rename(contig_path, contig_path_new) # remove tmp files shutil.rmtree(tmp_out_dir) return contig_path_new else:", "e: print(e) print(\"Prepare local assembly input data failed, exiting...\") sys.exit(1)", "generate unique ID list read_ids_unique = read_ids + \".unique\" command", "contig_name + \".cns.fa\") if check_exist(contig_path): os.rename(contig_path, contig_path_new) # remove tmp", "else: asm_cns = run_flye_assembly(reads, asm_dir, contig_name, thread, presets) if not", "if assembler == \"wtdbg2\": asm_cns = run_wtdbg2_assembly(reads, asm_dir, contig_name, thread,", "asm_pa = [ sv_reads_rename, contig_dir, contig_name, thread_asm, presets, assembler, polisher,", "= sv_reads_dir + \"/\" + contig_name + \".reads.fa\" os.rename(sv_reads, sv_reads_rename)", ") except subprocess.TimeoutExpired: print(\"fail to map reads to contig: \"", "sv_reads_rename = sv_reads_dir + \"/\" + contig_name + \".reads.fa\" os.rename(sv_reads,", "polish consensus threads = str(min(threads, 4)) bam = asm_cns +", "reads into multiple files, using csplit mkdir(reads_dir) csplit_prefix = reads_dir", "-i - -fo \" + cns_tmp ) try: subprocess.run( command,", "check_exist(asm_cns): return asm_cns else: print(\"polishing failed for \" + asm_cns", "timeout=300, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT, ) except subprocess.TimeoutExpired: print(\"fail to polish contig:", "what this does # extract read IDs read_ids = os.path.join(out,", "import os import subprocess import shutil import time import logging", "\" + index ) subprocess.call(command, shell=True) # remove tmp files", "== \"wtdbg2\": asm_cns = run_wtdbg2_assembly(reads, asm_dir, contig_name, thread, presets) else:", "except subprocess.TimeoutExpired: print(\"fail to map reads to contig: \" +", "\" \".join(str(i) for i in m) command = ( \"csplit", "coverage = 0 for read in samfile.fetch(ins_chr, start, end): reads.add(read.query_name)", "Bio import SeqIO from multiprocessing import Pool import pysam from", "+ subset_fa_reorder + \" \" + index ) subprocess.call(command, shell=True)", "k = 0 while True: # align reads to contigs", "input, open(read_ids, \"w\") as output, open( vcf_parsed_new, \"w\" ) as", "presets) if not check_exist(asm_cns): print(\"assembly failed\") return None # run", "+ \"\\n\") return None def run_flye_assembly(sv_reads, asm_dir, contig_name, thread, presets):", "asm_dir, contig_name, thread, presets): \"\"\"Run wtdbg2 assembly\"\"\" if presets ==", "reads: output.write(read + \"\\n\") # write out_line = line.replace(\"\\n\", \"\")", "open(read_ids, \"w\") as output: for line in input: entry =", "+ asm_cns) return if check_exist(cns_tmp): os.rename(cns_tmp, asm_cns) os.remove(bam) else: break", "= os.path.join(tmp_out_dir, \"assembly.fasta\") contig_path_new = os.path.join(asm_dir, contig_name + \".cns.fa\") if", "SeqIO.index(reads, \"fasta\") with open(out, \"wb\") as output_handle, open(list, \"r\") as", "local assembly input data failed, exiting...\") sys.exit(1) mkdir(contig_dir) k =", "= line.replace(\"\\n\", \"\").split(\"\\t\") if read_type == \"sv\": k = k", "reads window = 1000 samfile = pysam.AlignmentFile(bam, \"rb\") read_ids =", "print(\"assembly failed\") return None def run_wtdbg2_assembly(sv_reads, asm_dir, contig_name, thread, presets):", "TODO: figure out what this does # extract read IDs", "asm_cns, presets_flye, reads, \"--out-dir\", tmp_out_dir, \"--thread\", str(thread), \"--iterations\", str(polish_iterations), ]", "m.append(k) if len(m) == 1: subprocess.call([\"cp\", subset_fa_reorder, reads_dir + \"/contig0\"])", "1 \" + subset_fa_reorder + \" \" + index )", "+ csplit_prefix + \" -n 1 \" + subset_fa_reorder +", "\".fasta\" ) if check_exist(polished_contig): os.rename(polished_contig, asm_cns) shutil.rmtree(tmp_out_dir) return asm_cns else:", "= os.path.join(out, sample_name + \".contigs.fa\") with open(merged_contigs, \"w\") as merged_output_handle:", "+ bam ) try: subprocess.run( command, shell=True, timeout=300, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT,", "= asm_cns + \".bam\" k = 0 while True: #", "cns_tmp = asm_cns + \".tmp\" command = ( \"samtools view", "os.path.join(out, sample_name + \".subset.fa\") command = \"seqtk subseq \" +", "in \" + format_time(proc_time)) return merged_contigs, assembly_passed_loci def run_assembly_polishing(args): reads", "consensus_rename else: return None def prep_assembly_inputs( vcf_parsed, out, sample_name, bam,", "try: subprocess.call( [ \"flye\", \"--polish-target\", asm_cns, presets_flye, reads, \"--out-dir\", tmp_out_dir,", "set(read_list) ins_chr = entry[0] ins_breakpoint = round((int(entry[1]) + int(entry[2])) /", "\"sv\": # TODO: figure out what this does # extract", "\".unique\" command = \"cat \" + read_ids + \" |", "time.time() - start_time # merge all contigs assembly_passed_loci = set()", "for record in records: if record.id == \"ctg1\" or record.id", "str(thread), \"--iterations\", str(polish_iterations), ] ) except Exception as e: print(e)", "0: print(\"No insertion detected, exiting...\") else: m = m[:-1] index", "== \"pacbio\": presets_wtdbg2 = \"rs\" else: presets_wtdbg2 = \"ont\" prefix", "polish contig: \" + asm_cns) return if check_exist(cns_tmp): os.rename(cns_tmp, asm_cns)", "as e: print(e) print(\"Local assembly failed, exiting...\") sys.exit(1) proc_time =", "contig: \" + asm_cns) return if check_exist(cns_tmp): os.rename(cns_tmp, asm_cns) os.remove(bam)", "m[:-1] index = \" \".join(str(i) for i in m) command", "= \"ont\" prefix = sv_reads.replace(\".reads.fa\", \"\") try: subprocess.run( [ \"wtdbg2\",", "os.path.join(tmp_out_dir, \"assembly.fasta\") contig_path_new = os.path.join(asm_dir, contig_name + \".cns.fa\") if check_exist(contig_path):", "contig_name, thread, polish_iterations, presets ) if check_exist(asm_cns): return asm_cns else:", "if check_exist(contig_path): os.rename(contig_path, contig_path_new) # remove tmp files shutil.rmtree(tmp_out_dir) return", "\"/\" + contig_name + \".reads.fa\" os.rename(sv_reads, sv_reads_rename) thread_asm = 1", "+ \" \" + read_ids_unique + \" | seqtk seq", "str(len(reads)) VCF.write(out_line + \"\\n\") vcf_parsed = vcf_parsed_new # generate unique", "window end = ins_breakpoint + window reads = set() #", "filter for cigar reads window = 1000 samfile = pysam.AlignmentFile(bam,", "= \" \".join(str(i) for i in m) command = (", "subprocess.run( [ \"wtdbg2\", \"-x\", presets_wtdbg2, \"-q\", \"-AS\", \"1\", \"-g\", \"30k\",", "args[2] thread = args[3] presets = args[4] assembler = args[5]", "exiting...\") return None # derive consensus contig_layout = prefix +", "assembly_passed_loci def run_assembly_polishing(args): reads = args[0] asm_dir = args[1] contig_name", "\"pacbio\": presets_flye = \"--pacbio-raw\" else: presets_flye = \"--nano-raw\" tmp_out_dir =", "= prefix + \".ctg.lay.gz\" if check_exist(contig_layout): cns_thread = str(min(thread, 4))", "os.path.join(contig_dir, contig_name + \".cns.ctg1.fa\") with open(contig, \"r\") as input: records", "\" + asm_cns + \" -i - -fo \" +", "polishing\"\"\" if presets == \"pacbio\": presets_minimap2 = \"map-pb\" else: presets_minimap2", "sv_reads_rename) thread_asm = 1 asm_pa = [ sv_reads_rename, contig_dir, contig_name,", "\".cns.fa\") os.rename(consensus, consensus_rename) return consensus_rename else: return None def prep_assembly_inputs(", "print(e) print(\"wtdbg2 failed, exiting...\") return None # derive consensus contig_layout", "record.id == \"ctg1\" or record.id == \"contig_1\": record.id = contig_name", "this does # extract read IDs read_ids = os.path.join(out, sample_name", "multiple files, using csplit mkdir(reads_dir) csplit_prefix = reads_dir + \"/contig\"", "= args[5] polisher = args[6] polish_iterations = args[7] # run", "open(merged_contigs, \"w\") as merged_output_handle: for contig in contig_list: if check_exist(contig):", "\".reads.fa\" os.rename(sv_reads, sv_reads_rename) thread_asm = 1 asm_pa = [ sv_reads_rename,", "= os.path.join(out, sample_name + \".subset.fa\") command = \"seqtk subseq \"", "def prep_assembly_inputs( vcf_parsed, out, sample_name, bam, raw_reads, reads_dir, read_type=\"sv\" ):", "+ \"\\t\" + str(len(reads)) VCF.write(out_line + \"\\n\") vcf_parsed = vcf_parsed_new", "= ( \"minimap2 -t \" + threads + \" -ax", "contig_path_new) # remove tmp files shutil.rmtree(tmp_out_dir) return contig_path_new else: print(\"assembly", "entry = line.replace(\"\\n\", \"\").split(\"\\t\") contig_name = \"_\".join([entry[0], entry[1], entry[2]]) #", "run_flye_polishing( asm_cns, reads, asm_dir, contig_name, thread, polish_iterations, presets ): \"\"\"Run", "# separate reads into multiple files, using csplit mkdir(reads_dir) csplit_prefix", "start_time # merge all contigs assembly_passed_loci = set() merged_contigs =", "thread, presets, polish_iterations, ): \"\"\"Perform local assembly using reads from", "-r2k \" + asm_cns + \" \" + reads +", "thread, presets) else: asm_cns = run_flye_assembly(reads, asm_dir, contig_name, thread, presets)", "TODO: think about using this for assembly, filter for cigar", "local assembly\"\"\" # logging.info(\"Prepare reads for local assembly\") if read_type", "else: presets_flye = \"--nano-raw\" tmp_out_dir = os.path.join(asm_dir, contig_name) mkdir(tmp_out_dir) try:", "+ \" | wtpoa-cns -t \" + threads + \"", "sv_reads_dir + \"/\" + contig_name + \".reads.fa\" os.rename(sv_reads, sv_reads_rename) thread_asm", "# generate unique ID list read_ids_unique = read_ids + \".unique\"", "assembly and polishing sv_reads_dir = os.path.join(out, \"sv_reads\") try: prep_assembly_inputs( vcf_parsed,", "index ) subprocess.call(command, shell=True) # remove tmp files os.remove(read_ids) os.remove(read_ids_unique)", "= os.path.join(asm_dir, contig_name + \".cns.fa\") os.rename(consensus, consensus_rename) return consensus_rename else:", "assembler, polisher, polish_iterations, ] asm_pa_list.append(asm_pa) k = k + 1", "using read ID list\"\"\" record_dict = SeqIO.index(reads, \"fasta\") with open(out,", "= \"map-pb\" else: presets_minimap2 = \"map-ont\" # polish consensus threads", "def extract_reads(reads, list, out): \"\"\"Extract reads from fasta using read", "csplit mkdir(reads_dir) csplit_prefix = reads_dir + \"/contig\" m = []", "cns_thread = str(min(thread, 4)) consensus = prefix + \".cns.fa\" try:", "mkdir(contig_dir) k = 0 asm_pa_list = [] with open(vcf_parsed, \"r\")", "): \"\"\"Prepare reads for local assembly\"\"\" # logging.info(\"Prepare reads for", "break k = k + 1 if k >= polish_iterations:", "for assembly, filter for cigar reads window = 1000 samfile", "subprocess.call(command, shell=True) # remove tmp files os.remove(read_ids) os.remove(read_ids_unique) os.remove(subset_fa) os.remove(subset_fa_reorder)", "check_exist(asm_cns): return asm_cns else: return None def run_flye_polishing( asm_cns, reads,", "logging from Bio import SeqIO from multiprocessing import Pool import", "], timeout=300, ) except subprocess.TimeoutExpired: print(\"fail to build contig layout", "in contig_list: if check_exist(contig): contig_name = os.path.basename(contig).replace(\".cns.fa\", \"\") assembly_passed_loci.add(contig_name) parsed_contig", "= \"cat \" + read_ids + \" | sort |", "+ \".cns.fa\" try: subprocess.run( [ \"wtpoa-cns\", \"-q\", \"-t\", cns_thread, \"-i\",", "e: print(e) print(\"Local assembly failed, exiting...\") sys.exit(1) proc_time = time.time()", "\" + reads + \" | samtools sort -@\" +", "subset_fa_reorder, reads_dir + \"/contig0\"]) elif len(m) == 0: print(\"No insertion", "= sv_reads_dir + \"/contig\" + str(k) sv_reads_rename = sv_reads_dir +", "stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT, ) except subprocess.TimeoutExpired: print(\"fail to map reads to", "timeout=300, ) except subprocess.TimeoutExpired: print(\"fail to assemble contig: \" +", "remove tmp files os.remove(read_ids) os.remove(read_ids_unique) os.remove(subset_fa) os.remove(subset_fa_reorder) def extract_reads(reads, list,", "else: m = m[:-1] index = \" \".join(str(i) for i", "ins_breakpoint - window end = ins_breakpoint + window reads =", "] ) except Exception as e: print(e) print(\"Polishing failed, exiting...\")", "Pool(processes=thread) contig_list = pool.map(run_assembly_polishing, asm_pa_list) pool.close() pool.join() except Exception as", "assembly, filter for cigar reads window = 1000 samfile =", "except subprocess.TimeoutExpired: print(\"fail to build contig layout for contig: \"", "# write out_line = line.replace(\"\\n\", \"\") + \"\\t\" + str(len(reads))", "- window end = ins_breakpoint + window reads = set()", "= 1 with open(vcf_parsed, \"r\") as input: for line in", "+ asm_cns) return # run wtpoa-cns to get polished contig", "m) command = ( \"csplit -s -f \" + csplit_prefix", "ID list\"\"\" record_dict = SeqIO.index(reads, \"fasta\") with open(out, \"wb\") as", "print(\"fail to map reads to contig: \" + asm_cns) return", "\"assembly.fasta\") contig_path_new = os.path.join(asm_dir, contig_name + \".cns.fa\") if check_exist(contig_path): os.rename(contig_path,", "\"map-ont\" # polish consensus threads = str(min(threads, 4)) bam =", "\"r\") as input, open(read_ids, \"w\") as output, open( vcf_parsed_new, \"w\"", "presets == \"pacbio\": presets_wtdbg2 = \"rs\" else: presets_wtdbg2 = \"ont\"", "presets_minimap2 = \"map-pb\" else: presets_minimap2 = \"map-ont\" # polish consensus", "= \"len=\" + str(len(record.seq)) SeqIO.write(record, merged_output_handle, \"fasta\") with open(parsed_contig, \"w\")", "\" | sort | uniq\" with open(read_ids_unique, \"w\") as output:", "contig_name, thread, presets): \"\"\"Run wtdbg2 assembly\"\"\" if presets == \"pacbio\":", "= args[3] presets = args[4] assembler = args[5] polisher =", "consensus_rename = os.path.join(asm_dir, contig_name + \".cns.fa\") os.rename(consensus, consensus_rename) return consensus_rename", "exiting...\") return None # rename contig file polished_contig = os.path.join(", "== 0: print(\"No insertion detected, exiting...\") else: m = m[:-1]", "\"--polish-target\", asm_cns, presets_flye, reads, \"--out-dir\", tmp_out_dir, \"--thread\", str(thread), \"--iterations\", str(polish_iterations),", "if presets == \"pacbio\": presets_flye = \"--pacbio-raw\" else: presets_flye =", "contig_name, thread, presets): \"\"\"Run Flye assembly\"\"\" if presets == \"pacbio\":", "-@\" + threads + \" > \" + bam )", "+ \" \" + reads + \" | samtools sort", "subset_fa_reorder = out + \"/\" + sample_name + \".subset.reorder.fa\" extract_reads(subset_fa,", "read_ids = os.path.join(out, sample_name + \".id\") vcf_parsed_new = vcf_parsed +", "= line.replace(\"\\n\", \"\") + \"\\t\" + str(len(reads)) VCF.write(out_line + \"\\n\")", "input, open(read_ids, \"w\") as output: for line in input: entry", "\"wtpoa-cns\", \"-q\", \"-t\", cns_thread, \"-i\", contig_layout, \"-fo\", consensus, ], timeout=300,", "out, sample_name, bam, raw_reads, reads_dir, read_type=\"sv\" ): \"\"\"Prepare reads for", "list subset_fa = os.path.join(out, sample_name + \".subset.fa\") command = \"seqtk", "presets ): \"\"\"Run Flye polishing\"\"\" if presets == \"pacbio\": presets_flye", "polishing\"\"\" if presets == \"pacbio\": presets_flye = \"--pacbio-raw\" else: presets_flye", "align reads to contigs command = ( \"minimap2 -t \"", "input: entry = line.replace(\"\\n\", \"\").split(\"\\t\") read_list = entry[8].split(\",\") for read", "reads, threads, polish_iterations, presets): \"\"\"Run wtdbg2 polishing\"\"\" if presets ==", "presets ) if check_exist(asm_cns): return asm_cns else: return None def", "mkdir(tmp_out_dir) try: subprocess.call( [ \"flye\", presets_flye, sv_reads, \"--out-dir\", tmp_out_dir, \"--thread\",", "assembly\"\"\" if presets == \"pacbio\": presets_wtdbg2 = \"rs\" else: presets_wtdbg2", "str(thread), \"-i\", sv_reads, \"-fo\", prefix, ], timeout=300, ) except subprocess.TimeoutExpired:", "subprocess import shutil import time import logging from Bio import", "consensus contig_layout = prefix + \".ctg.lay.gz\" if check_exist(contig_layout): cns_thread =", "does # extract read IDs read_ids = os.path.join(out, sample_name +", "# merge all contigs assembly_passed_loci = set() merged_contigs = os.path.join(out,", "return # rename contigs contig_path = os.path.join(tmp_out_dir, \"assembly.fasta\") contig_path_new =", "command = ( \"csplit -s -f \" + csplit_prefix +", "contig_name, thread, presets) else: asm_cns = run_flye_assembly(reads, asm_dir, contig_name, thread,", "stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT, ) except subprocess.TimeoutExpired: print(\"fail to polish contig: \"", "): \"\"\"Perform local assembly using reads from parsed VCF file", "file in parallel\"\"\" # Prepare reads used for local assembly", "os.path.join(asm_dir, contig_name) mkdir(tmp_out_dir) try: subprocess.call( [ \"flye\", \"--polish-target\", asm_cns, presets_flye,", "start_time = time.time() try: pool = Pool(processes=thread) contig_list = pool.map(run_assembly_polishing,", "asm_cns else: print(\"polishing failed for \" + asm_cns + \"\\n\")", "\".contigs.fa\") with open(merged_contigs, \"w\") as merged_output_handle: for contig in contig_list:", "\"--thread\", str(thread), \"--iterations\", str(polish_iterations), ] ) except Exception as e:", "asm_dir, contig_name, thread, presets) if not check_exist(asm_cns): print(\"assembly failed\") return", "open(out, \"wb\") as output_handle, open(list, \"r\") as ID: for entry", "print(\"fail to build contig layout for contig: \" + contig_name)", "subprocess.TimeoutExpired: print(\"fail to assemble contig: \" + contig_name) return None", "run_wtdbg2_polishing(asm_cns, reads, threads, polish_iterations, presets): \"\"\"Run wtdbg2 polishing\"\"\" if presets", "\"r\") as input, open(read_ids, \"w\") as output: for line in", "asm_cns = run_wtdbg2_polishing( asm_cns, reads, thread, polish_iterations, presets ) else:", "\"\\n\") return None def run_flye_assembly(sv_reads, asm_dir, contig_name, thread, presets): \"\"\"Run", "open(subset_fa, \"w\") as output: subprocess.call(command, stdout=output, shell=True) # reorder reads", "prep_assembly_inputs( vcf_parsed, out, sample_name, bam, raw_reads, reads_dir, read_type=\"sv\" ): \"\"\"Prepare", "str(polish_iterations), ] ) except Exception as e: print(e) print(\"Polishing failed,", "\"map-pb\" else: presets_minimap2 = \"map-ont\" # polish consensus threads =", "# Prepare reads used for local assembly and polishing sv_reads_dir", "+ \" -i - -fo \" + cns_tmp ) try:", "-t \" + threads + \" -ax \" + presets_minimap2", "reads for local assembly\") if read_type == \"sv\": # TODO:", "\"seqtk subseq \" + raw_reads + \" \" + read_ids_unique", "data failed, exiting...\") sys.exit(1) mkdir(contig_dir) k = 0 asm_pa_list =", "line.replace(\"\\n\", \"\") + \"\\t\" + str(len(reads)) VCF.write(out_line + \"\\n\") vcf_parsed", "subprocess.TimeoutExpired: print(\"fail to polish contig: \" + asm_cns) return if", "+ 2 * (len(entry[8].split(\",\"))) else: k = k + 2", "separate reads into multiple files, using csplit mkdir(reads_dir) csplit_prefix =", "in parallel logging.info(\"Perform local assembly of non-reference TE loci...\") start_time", "reads, \"--out-dir\", tmp_out_dir, \"--thread\", str(thread), \"--iterations\", str(polish_iterations), ] ) except", "samtools sort -@\" + threads + \" > \" +", "+ \".contigs.fa\") with open(merged_contigs, \"w\") as merged_output_handle: for contig in", "subprocess.call([\"cp\", subset_fa_reorder, reads_dir + \"/contig0\"]) elif len(m) == 0: print(\"No", "bam ) try: subprocess.run( command, shell=True, timeout=300, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT, )", ") except Exception as e: print(e) print(\"Prepare local assembly input", "for line in input: entry = line.replace(\"\\n\", \"\").split(\"\\t\") # get", "reads from fasta using read ID list\"\"\" record_dict = SeqIO.index(reads,", "\"\") + \"\\t\" + str(len(reads)) VCF.write(out_line + \"\\n\") vcf_parsed =", "\"\").split(\"\\t\") if read_type == \"sv\": k = k + 2", "+ \".subset.reorder.fa\" extract_reads(subset_fa, read_ids, subset_fa_reorder) # separate reads into multiple", "asm_dir, contig_name, thread, presets): \"\"\"Run Flye assembly\"\"\" if presets ==", "return except Exception as e: print(e) print(\"wtdbg2 failed, exiting...\") return", "presets_minimap2 = \"map-ont\" # polish consensus threads = str(min(threads, 4))", "output: subprocess.call(command, stdout=output, shell=True) # filter raw reads using read", "str(min(thread, 4)) consensus = prefix + \".cns.fa\" try: subprocess.run( [", "polish_iterations > 0: if polisher == \"wtdbg2\": asm_cns = run_wtdbg2_polishing(", "\"minimap2 -t \" + threads + \" -ax \" +", "Prepare reads used for local assembly and polishing sv_reads_dir =", "\"\"\"Run Flye assembly\"\"\" if presets == \"pacbio\": presets_flye = \"--pacbio-raw\"", "args[7] # run assembly if assembler == \"wtdbg2\": asm_cns =", "stdout=output, shell=True) # filter raw reads using read list subset_fa", ") if check_exist(asm_cns): return asm_cns else: return None def run_flye_polishing(", "import mkdir, check_exist, format_time def get_local_contigs( assembler, polisher, contig_dir, vcf_parsed,", "presets, polish_iterations, ): \"\"\"Perform local assembly using reads from parsed", "rename contigs contig_path = os.path.join(tmp_out_dir, \"assembly.fasta\") contig_path_new = os.path.join(asm_dir, contig_name", "with open(vcf_parsed, \"r\") as input, open(read_ids, \"w\") as output, open(", "= ( \"samtools view -F0x900 \" + bam + \"", "pool.map(run_assembly_polishing, asm_pa_list) pool.close() pool.join() except Exception as e: print(e) print(\"Local", "+ \".cns.fa\") os.rename(consensus, consensus_rename) return consensus_rename else: return None def", "IDs read_ids = os.path.join(out, sample_name + \".id\") with open(vcf_parsed, \"r\")", "\".join(str(i) for i in m) command = ( \"csplit -s", "read_list = entry[8].split(\",\") for read in read_list: output.write(read + \"\\n\")", "for contig: \" + contig_name) return except Exception as e:", "os.path.join(asm_dir, contig_name + \".cns.fa\") if check_exist(contig_path): os.rename(contig_path, contig_path_new) # remove", "= args[1] contig_name = args[2] thread = args[3] presets =", ") as VCF: for line in input: entry = line.replace(\"\\n\",", "None def run_wtdbg2_assembly(sv_reads, asm_dir, contig_name, thread, presets): \"\"\"Run wtdbg2 assembly\"\"\"", "\"pacbio\": presets_wtdbg2 = \"rs\" else: presets_wtdbg2 = \"ont\" prefix =", "SeqIO from multiprocessing import Pool import pysam from telr.TELR_utility import", "list, out): \"\"\"Extract reads from fasta using read ID list\"\"\"", "out, sample_name, bam, raw_reads, thread, presets, polish_iterations, ): \"\"\"Perform local", "except Exception as e: print(e) print(\"Prepare local assembly input data", "format_time(proc_time)) return merged_contigs, assembly_passed_loci def run_assembly_polishing(args): reads = args[0] asm_dir", "args[3] presets = args[4] assembler = args[5] polisher = args[6]", "| seqtk seq -a\" with open(subset_fa, \"w\") as output: subprocess.call(command,", "if read_type == \"sv\": # TODO: figure out what this", ") except subprocess.TimeoutExpired: print(\"fail to assemble contig: \" + contig_name)", "+ contig_name) return except Exception as e: print(e) print(\"wtdbg2 failed,", "+ \".tmp\" command = ( \"samtools view -F0x900 \" +", "presets_wtdbg2 = \"rs\" else: presets_wtdbg2 = \"ont\" prefix = sv_reads.replace(\".reads.fa\",", "check_exist(contig_layout): cns_thread = str(min(thread, 4)) consensus = prefix + \".cns.fa\"", "as e: print(e) print(\"Prepare local assembly input data failed, exiting...\")", "command = ( \"minimap2 -t \" + threads + \"", "== \"ctg1\" or record.id == \"contig_1\": record.id = contig_name record.description", "run_wtdbg2_assembly(reads, asm_dir, contig_name, thread, presets) else: asm_cns = run_flye_assembly(reads, asm_dir,", "= run_flye_assembly(reads, asm_dir, contig_name, thread, presets) if not check_exist(asm_cns): print(\"assembly", "os.rename(cns_tmp, asm_cns) os.remove(bam) else: break k = k + 1", "== \"sv\": # TODO: figure out what this does #", "import pysam from telr.TELR_utility import mkdir, check_exist, format_time def get_local_contigs(", "k + 1 if k >= polish_iterations: break if check_exist(asm_cns):", "run_wtdbg2_polishing( asm_cns, reads, thread, polish_iterations, presets ) else: asm_cns =", "open(parsed_contig, \"w\") as parsed_output_handle: SeqIO.write(record, parsed_output_handle, \"fasta\") logging.info(\"Local assembly finished", "from multiprocessing import Pool import pysam from telr.TELR_utility import mkdir,", "e: print(e) print(\"Polishing failed, exiting...\") return None # rename contig", "wtdbg2 assembly\"\"\" if presets == \"pacbio\": presets_wtdbg2 = \"rs\" else:", "line in input: entry = line.replace(\"\\n\", \"\").split(\"\\t\") # get sniffles", "\"flye\", \"--polish-target\", asm_cns, presets_flye, reads, \"--out-dir\", tmp_out_dir, \"--thread\", str(thread), \"--iterations\",", "+ \"\\n\") vcf_parsed = vcf_parsed_new # generate unique ID list", "pool.join() except Exception as e: print(e) print(\"Local assembly failed, exiting...\")", "asm_cns + \".bam\" k = 0 while True: # align", "= entry[8].split(\",\") for read in read_list: output.write(read + \"\\n\") else:", "thread_asm = 1 asm_pa = [ sv_reads_rename, contig_dir, contig_name, thread_asm,", "local assembly and polishing sv_reads_dir = os.path.join(out, \"sv_reads\") try: prep_assembly_inputs(", "contig_name = args[2] thread = args[3] presets = args[4] assembler", "VCF: for line in input: entry = line.replace(\"\\n\", \"\").split(\"\\t\") #", "None # derive consensus contig_layout = prefix + \".ctg.lay.gz\" if", ") except Exception as e: print(e) print(\"Polishing failed, exiting...\") return", "reads = set() # coverage = 0 for read in", "if len(m) == 1: subprocess.call([\"cp\", subset_fa_reorder, reads_dir + \"/contig0\"]) elif", "assembly\"\"\" if presets == \"pacbio\": presets_flye = \"--pacbio-raw\" else: presets_flye", "e: print(e) print(\"Assembly failed, exiting...\") return # rename contigs contig_path", "try: prep_assembly_inputs( vcf_parsed, out, sample_name, bam, raw_reads, sv_reads_dir, read_type=\"sv\" )", "try: subprocess.run( [ \"wtpoa-cns\", \"-q\", \"-t\", cns_thread, \"-i\", contig_layout, \"-fo\",", "= reads_dir + \"/contig\" m = [] k = 1", "output_handle, open(list, \"r\") as ID: for entry in ID: entry", "not check_exist(asm_cns): print(\"assembly failed\") return None # run polishing if", "+ asm_cns + \" \" + reads + \" |", "\"\"\"Run Flye polishing\"\"\" if presets == \"pacbio\": presets_flye = \"--pacbio-raw\"", "= os.path.join(asm_dir, contig_name + \".cns.fa\") if check_exist(contig_path): os.rename(contig_path, contig_path_new) #", "thread, polish_iterations, presets ): \"\"\"Run Flye polishing\"\"\" if presets ==", "assembly in parallel logging.info(\"Perform local assembly of non-reference TE loci...\")", "bam, raw_reads, reads_dir, read_type=\"sv\" ): \"\"\"Prepare reads for local assembly\"\"\"", "round((int(entry[1]) + int(entry[2])) / 2) start = ins_breakpoint - window", "sv_reads_dir = os.path.join(out, \"sv_reads\") try: prep_assembly_inputs( vcf_parsed, out, sample_name, bam,", "sample_name, bam, raw_reads, reads_dir, read_type=\"sv\" ): \"\"\"Prepare reads for local", "stderr=subprocess.STDOUT, ) except subprocess.TimeoutExpired: print(\"fail to polish contig: \" +", "= args[2] thread = args[3] presets = args[4] assembler =", "+ threads + \" -ax \" + presets_minimap2 + \"", "for contig in contig_list: if check_exist(contig): contig_name = os.path.basename(contig).replace(\".cns.fa\", \"\")", "stdout=output, shell=True) # reorder reads subset_fa_reorder = out + \"/\"", "= [ sv_reads_rename, contig_dir, contig_name, thread_asm, presets, assembler, polisher, polish_iterations,", "ins_breakpoint = round((int(entry[1]) + int(entry[2])) / 2) start = ins_breakpoint", "open(contig, \"r\") as input: records = SeqIO.parse(input, \"fasta\") for record", "\" + read_ids_unique + \" | seqtk seq -a\" with", "read_ids, subset_fa_reorder) # separate reads into multiple files, using csplit", "reads = args[0] asm_dir = args[1] contig_name = args[2] thread", "= line.replace(\"\\n\", \"\").split(\"\\t\") # get sniffles read list read_list =", "raw reads using read list subset_fa = os.path.join(out, sample_name +", "run_flye_assembly(reads, asm_dir, contig_name, thread, presets) if not check_exist(asm_cns): print(\"assembly failed\")", "shell=True, timeout=300, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT, ) except subprocess.TimeoutExpired: print(\"fail to polish", "assembler = args[5] polisher = args[6] polish_iterations = args[7] #", "\"\\n\") vcf_parsed = vcf_parsed_new # generate unique ID list read_ids_unique", "polish_iterations, presets ) else: asm_cns = run_flye_polishing( asm_cns, reads, asm_dir,", "+ \".cns.fa\") if check_exist(contig_path): os.rename(contig_path, contig_path_new) # remove tmp files", "contig_name, thread_asm, presets, assembler, polisher, polish_iterations, ] asm_pa_list.append(asm_pa) k =", "reads_dir + \"/contig\" m = [] k = 1 with", "\" + asm_cns) return if check_exist(cns_tmp): os.rename(cns_tmp, asm_cns) os.remove(bam) else:", "entry[1], entry[2]]) # rename variant reads sv_reads = sv_reads_dir +", "get polished contig cns_tmp = asm_cns + \".tmp\" command =", "None def run_flye_polishing( asm_cns, reads, asm_dir, contig_name, thread, polish_iterations, presets", "prep_assembly_inputs( vcf_parsed, out, sample_name, bam, raw_reads, sv_reads_dir, read_type=\"sv\" ) except", "+ \"\\n\") else: # TODO: think about using this for", "return None def run_flye_assembly(sv_reads, asm_dir, contig_name, thread, presets): \"\"\"Run Flye", "+ format_time(proc_time)) return merged_contigs, assembly_passed_loci def run_assembly_polishing(args): reads = args[0]", "+ contig_name) return None if check_exist(consensus): consensus_rename = os.path.join(asm_dir, contig_name", "+ str(k) sv_reads_rename = sv_reads_dir + \"/\" + contig_name +", "pysam from telr.TELR_utility import mkdir, check_exist, format_time def get_local_contigs( assembler,", "# TODO: figure out what this does # extract read", "contig file polished_contig = os.path.join( tmp_out_dir, \"polished_\" + str(polish_iterations) +", "contig_name = \"_\".join([entry[0], entry[1], entry[2]]) # rename variant reads sv_reads", "= run_wtdbg2_assembly(reads, asm_dir, contig_name, thread, presets) else: asm_cns = run_flye_assembly(reads,", "merged_contigs, assembly_passed_loci def run_assembly_polishing(args): reads = args[0] asm_dir = args[1]", "failed, exiting...\") return # rename contigs contig_path = os.path.join(tmp_out_dir, \"assembly.fasta\")", "= args[0] asm_dir = args[1] contig_name = args[2] thread =", "assembly\") if read_type == \"sv\": # TODO: figure out what", "record.id == \"contig_1\": record.id = contig_name record.description = \"len=\" +", "asm_dir, contig_name, thread, presets) else: asm_cns = run_flye_assembly(reads, asm_dir, contig_name,", "open(vcf_parsed, \"r\") as input, open(read_ids, \"w\") as output: for line", "- -fo \" + cns_tmp ) try: subprocess.run( command, shell=True,", "2 * int(entry[14]) m.append(k) if len(m) == 1: subprocess.call([\"cp\", subset_fa_reorder,", "k = k + 2 * (len(entry[8].split(\",\"))) else: k =", "0: if polisher == \"wtdbg2\": asm_cns = run_wtdbg2_polishing( asm_cns, reads,", "presets_wtdbg2 = \"ont\" prefix = sv_reads.replace(\".reads.fa\", \"\") try: subprocess.run( [", "\" -i - -fo \" + cns_tmp ) try: subprocess.run(", "= args[6] polish_iterations = args[7] # run assembly if assembler", "+ 1 if k >= polish_iterations: break if check_exist(asm_cns): return", "for line in input: entry = line.replace(\"\\n\", \"\").split(\"\\t\") contig_name =", "timeout=300, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT, ) except subprocess.TimeoutExpired: print(\"fail to map reads", "reads.add(read.query_name) for read in reads: output.write(read + \"\\n\") # write", "str(thread), \"--iterations\", \"0\", ] ) except Exception as e: print(e)", "sys.exit(1) proc_time = time.time() - start_time # merge all contigs", "+ \".fasta\" ) if check_exist(polished_contig): os.rename(polished_contig, asm_cns) shutil.rmtree(tmp_out_dir) return asm_cns", "\".subset.reorder.fa\" extract_reads(subset_fa, read_ids, subset_fa_reorder) # separate reads into multiple files,", "this for assembly, filter for cigar reads window = 1000", "local assembly of non-reference TE loci...\") start_time = time.time() try:", "cns_thread, \"-i\", contig_layout, \"-fo\", consensus, ], timeout=300, ) except subprocess.TimeoutExpired:", "\"--nano-raw\" tmp_out_dir = os.path.join(asm_dir, contig_name) mkdir(tmp_out_dir) try: subprocess.call( [ \"flye\",", "\" + subset_fa_reorder + \" \" + index ) subprocess.call(command,", "args[4] assembler = args[5] polisher = args[6] polish_iterations = args[7]", "\"\"\"Prepare reads for local assembly\"\"\" # logging.info(\"Prepare reads for local", "else: print(\"assembly failed\") return None def run_wtdbg2_assembly(sv_reads, asm_dir, contig_name, thread,", "contig: \" + contig_name) return None if check_exist(consensus): consensus_rename =", "entry = line.replace(\"\\n\", \"\").split(\"\\t\") read_list = entry[8].split(\",\") for read in", "+ \" > \" + bam ) try: subprocess.run( command,", "thread, presets): \"\"\"Run Flye assembly\"\"\" if presets == \"pacbio\": presets_flye", "from fasta using read ID list\"\"\" record_dict = SeqIO.index(reads, \"fasta\")", "else: break k = k + 1 if k >=", "= time.time() try: pool = Pool(processes=thread) contig_list = pool.map(run_assembly_polishing, asm_pa_list)", "read_list: output.write(read + \"\\n\") else: # TODO: think about using", "assembly finished in \" + format_time(proc_time)) return merged_contigs, assembly_passed_loci def", "open(list, \"r\") as ID: for entry in ID: entry =", "out + \"/\" + sample_name + \".subset.reorder.fa\" extract_reads(subset_fa, read_ids, subset_fa_reorder)", "parsed_output_handle: SeqIO.write(record, parsed_output_handle, \"fasta\") logging.info(\"Local assembly finished in \" +", "os.path.basename(contig).replace(\".cns.fa\", \"\") assembly_passed_loci.add(contig_name) parsed_contig = os.path.join(contig_dir, contig_name + \".cns.ctg1.fa\") with", "+ \" -ax \" + presets_minimap2 + \" -r2k \"", "( \"samtools view -F0x900 \" + bam + \" |", "= ins_breakpoint - window end = ins_breakpoint + window reads", "if polisher == \"wtdbg2\": asm_cns = run_wtdbg2_polishing( asm_cns, reads, thread,", "ID list read_ids_unique = read_ids + \".unique\" command = \"cat", "| samtools sort -@\" + threads + \" > \"", "\"\") assembly_passed_loci.add(contig_name) parsed_contig = os.path.join(contig_dir, contig_name + \".cns.ctg1.fa\") with open(contig,", "+ \".bam\" k = 0 while True: # align reads", "line in input: entry = line.replace(\"\\n\", \"\").split(\"\\t\") if read_type ==", "read in read_list: output.write(read + \"\\n\") else: # TODO: think", "-F0x900 \" + bam + \" | wtpoa-cns -t \"", "vcf_parsed_new = vcf_parsed + \".new\" with open(vcf_parsed, \"r\") as input,", "= sv_reads.replace(\".reads.fa\", \"\") try: subprocess.run( [ \"wtdbg2\", \"-x\", presets_wtdbg2, \"-q\",", "| uniq\" with open(read_ids_unique, \"w\") as output: subprocess.call(command, stdout=output, shell=True)", "+ \" | sort | uniq\" with open(read_ids_unique, \"w\") as", "mkdir, check_exist, format_time def get_local_contigs( assembler, polisher, contig_dir, vcf_parsed, out,", "= line.replace(\"\\n\", \"\").split(\"\\t\") contig_name = \"_\".join([entry[0], entry[1], entry[2]]) # rename", "presets == \"pacbio\": presets_minimap2 = \"map-pb\" else: presets_minimap2 = \"map-ont\"", "as output_handle, open(list, \"r\") as ID: for entry in ID:", "None # run polishing if polish_iterations > 0: if polisher", "# run assembly if assembler == \"wtdbg2\": asm_cns = run_wtdbg2_assembly(reads,", "pysam.AlignmentFile(bam, \"rb\") read_ids = os.path.join(out, sample_name + \".id\") vcf_parsed_new =", "prefix + \".cns.fa\" try: subprocess.run( [ \"wtpoa-cns\", \"-q\", \"-t\", cns_thread,", "mkdir(tmp_out_dir) try: subprocess.call( [ \"flye\", \"--polish-target\", asm_cns, presets_flye, reads, \"--out-dir\",", "# polish consensus threads = str(min(threads, 4)) bam = asm_cns", "import shutil import time import logging from Bio import SeqIO", "bam + \" | wtpoa-cns -t \" + threads +", "failed, exiting...\") return None # derive consensus contig_layout = prefix", "for line in input: entry = line.replace(\"\\n\", \"\").split(\"\\t\") read_list =", "\"--out-dir\", tmp_out_dir, \"--thread\", str(thread), \"--iterations\", str(polish_iterations), ] ) except Exception", "asm_cns, reads, asm_dir, contig_name, thread, polish_iterations, presets ): \"\"\"Run Flye", "run wtpoa-cns to get polished contig cns_tmp = asm_cns +", "\"fasta\") for record in records: if record.id == \"ctg1\" or", "sv_reads.replace(\".reads.fa\", \"\") try: subprocess.run( [ \"wtdbg2\", \"-x\", presets_wtdbg2, \"-q\", \"-AS\",", "\".cns.fa\") if check_exist(contig_path): os.rename(contig_path, contig_path_new) # remove tmp files shutil.rmtree(tmp_out_dir)", "+ cns_tmp ) try: subprocess.run( command, shell=True, timeout=300, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT,", "to contigs command = ( \"minimap2 -t \" + threads", "timeout=300, ) except subprocess.TimeoutExpired: print(\"fail to build contig layout for", "from parsed VCF file in parallel\"\"\" # Prepare reads used", "\"-t\", cns_thread, \"-i\", contig_layout, \"-fo\", consensus, ], timeout=300, ) except", "-d \" + asm_cns + \" -i - -fo \"", "asm_dir, contig_name, thread, polish_iterations, presets ): \"\"\"Run Flye polishing\"\"\" if", "+ bam + \" | wtpoa-cns -t \" + threads", "asm_pa_list = [] with open(vcf_parsed, \"r\") as input: for line", "if not check_exist(asm_cns): print(\"assembly failed\") return None # run polishing", "sample_name + \".id\") with open(vcf_parsed, \"r\") as input, open(read_ids, \"w\")", "\" + bam ) try: subprocess.run( command, shell=True, timeout=300, stdout=subprocess.DEVNULL,", "subprocess.run( command, shell=True, timeout=300, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT, ) except subprocess.TimeoutExpired: print(\"fail", "uniq\" with open(read_ids_unique, \"w\") as output: subprocess.call(command, stdout=output, shell=True) #", "pool = Pool(processes=thread) contig_list = pool.map(run_assembly_polishing, asm_pa_list) pool.close() pool.join() except", "start, end): reads.add(read.query_name) for read in reads: output.write(read + \"\\n\")", "reads_sniffles = set(read_list) ins_chr = entry[0] ins_breakpoint = round((int(entry[1]) +", "= os.path.join(out, sample_name + \".id\") vcf_parsed_new = vcf_parsed + \".new\"", "+ index ) subprocess.call(command, shell=True) # remove tmp files os.remove(read_ids)", "sample_name, bam, raw_reads, sv_reads_dir, read_type=\"sv\" ) except Exception as e:", "line.replace(\"\\n\", \"\").split(\"\\t\") if read_type == \"sv\": k = k +", "1 # run assembly in parallel logging.info(\"Perform local assembly of", "= k + 1 # run assembly in parallel logging.info(\"Perform", "seqtk seq -a\" with open(subset_fa, \"w\") as output: subprocess.call(command, stdout=output,", "csplit_prefix + \" -n 1 \" + subset_fa_reorder + \"", "= os.path.join( tmp_out_dir, \"polished_\" + str(polish_iterations) + \".fasta\" ) if", "\"\\n\") else: # TODO: think about using this for assembly,", "for read in reads: output.write(read + \"\\n\") # write out_line", "wtdbg2 polishing\"\"\" if presets == \"pacbio\": presets_minimap2 = \"map-pb\" else:", "reads to contigs command = ( \"minimap2 -t \" +", "0 asm_pa_list = [] with open(vcf_parsed, \"r\") as input: for", "presets_flye, sv_reads, \"--out-dir\", tmp_out_dir, \"--thread\", str(thread), \"--iterations\", \"0\", ] )", "\"w\" ) as VCF: for line in input: entry =", "sniffles read list read_list = entry[8].split(\",\") reads_sniffles = set(read_list) ins_chr", "0 for read in samfile.fetch(ins_chr, start, end): reads.add(read.query_name) for read", "fasta using read ID list\"\"\" record_dict = SeqIO.index(reads, \"fasta\") with", "shell=True) # filter raw reads using read list subset_fa =", "read_ids_unique = read_ids + \".unique\" command = \"cat \" +", "in input: entry = line.replace(\"\\n\", \"\").split(\"\\t\") read_list = entry[8].split(\",\") for", ") except subprocess.TimeoutExpired: print(\"fail to polish contig: \" + asm_cns)", "output, open( vcf_parsed_new, \"w\" ) as VCF: for line in", "failed, exiting...\") sys.exit(1) proc_time = time.time() - start_time # merge", "| sort | uniq\" with open(read_ids_unique, \"w\") as output: subprocess.call(command,", "polished_contig = os.path.join( tmp_out_dir, \"polished_\" + str(polish_iterations) + \".fasta\" )", "\"rb\") read_ids = os.path.join(out, sample_name + \".id\") vcf_parsed_new = vcf_parsed", "asm_cns) return # run wtpoa-cns to get polished contig cns_tmp", "= m[:-1] index = \" \".join(str(i) for i in m)", "e: print(e) print(\"wtdbg2 failed, exiting...\") return None # derive consensus", "asm_cns else: return None def run_wtdbg2_polishing(asm_cns, reads, threads, polish_iterations, presets):", "+ window reads = set() # coverage = 0 for", "assembler == \"wtdbg2\": asm_cns = run_wtdbg2_assembly(reads, asm_dir, contig_name, thread, presets)", "# filter raw reads using read list subset_fa = os.path.join(out,", "shell=True) # reorder reads subset_fa_reorder = out + \"/\" +", "-f \" + csplit_prefix + \" -n 1 \" +", "mkdir(reads_dir) csplit_prefix = reads_dir + \"/contig\" m = [] k", "records: if record.id == \"ctg1\" or record.id == \"contig_1\": record.id", "# run wtpoa-cns to get polished contig cns_tmp = asm_cns", "presets_wtdbg2, \"-q\", \"-AS\", \"1\", \"-g\", \"30k\", \"-t\", str(thread), \"-i\", sv_reads,", "all contigs assembly_passed_loci = set() merged_contigs = os.path.join(out, sample_name +", "if check_exist(asm_cns): return asm_cns else: print(\"polishing failed for \" +", "into multiple files, using csplit mkdir(reads_dir) csplit_prefix = reads_dir +", "input: for line in input: entry = line.replace(\"\\n\", \"\").split(\"\\t\") contig_name", "for read in read_list: output.write(read + \"\\n\") else: # TODO:", "open(vcf_parsed, \"r\") as input: for line in input: entry =", "wtpoa-cns -t \" + threads + \" -d \" +", "entry = line.replace(\"\\n\", \"\").split(\"\\t\") if read_type == \"sv\": k =", "\" -n 1 \" + subset_fa_reorder + \" \" +", "== \"pacbio\": presets_minimap2 = \"map-pb\" else: presets_minimap2 = \"map-ont\" #", "contig_name) mkdir(tmp_out_dir) try: subprocess.call( [ \"flye\", presets_flye, sv_reads, \"--out-dir\", tmp_out_dir,", "+ \"/contig\" + str(k) sv_reads_rename = sv_reads_dir + \"/\" +", "to polish contig: \" + asm_cns) return if check_exist(cns_tmp): os.rename(cns_tmp,", "non-reference TE loci...\") start_time = time.time() try: pool = Pool(processes=thread)", "as parsed_output_handle: SeqIO.write(record, parsed_output_handle, \"fasta\") logging.info(\"Local assembly finished in \"", "rename contig file polished_contig = os.path.join( tmp_out_dir, \"polished_\" + str(polish_iterations)", "\"30k\", \"-t\", str(thread), \"-i\", sv_reads, \"-fo\", prefix, ], timeout=300, )", "output.write(read + \"\\n\") # write out_line = line.replace(\"\\n\", \"\") +", "run_flye_polishing( asm_cns, reads, asm_dir, contig_name, thread, polish_iterations, presets ) if", "\" + asm_cns + \"\\n\") return None def run_flye_assembly(sv_reads, asm_dir,", "\"--iterations\", str(polish_iterations), ] ) except Exception as e: print(e) print(\"Polishing", "import time import logging from Bio import SeqIO from multiprocessing", "failed, exiting...\") return None # rename contig file polished_contig =", "\"-fo\", prefix, ], timeout=300, ) except subprocess.TimeoutExpired: print(\"fail to build", "= set() merged_contigs = os.path.join(out, sample_name + \".contigs.fa\") with open(merged_contigs,", "os.remove(bam) else: break k = k + 1 if k", "line.replace(\"\\n\", \"\").split(\"\\t\") # get sniffles read list read_list = entry[8].split(\",\")", "os.rename(consensus, consensus_rename) return consensus_rename else: return None def prep_assembly_inputs( vcf_parsed,", "run_assembly_polishing(args): reads = args[0] asm_dir = args[1] contig_name = args[2]", "map reads to contig: \" + asm_cns) return # run", "\" -d \" + asm_cns + \" -i - -fo", "\"\\t\" + str(len(reads)) VCF.write(out_line + \"\\n\") vcf_parsed = vcf_parsed_new #", "\" \" + reads + \" | samtools sort -@\"", "assembler, polisher, contig_dir, vcf_parsed, out, sample_name, bam, raw_reads, thread, presets,", "len(m) == 1: subprocess.call([\"cp\", subset_fa_reorder, reads_dir + \"/contig0\"]) elif len(m)", "raw_reads, thread, presets, polish_iterations, ): \"\"\"Perform local assembly using reads", "+ \" | samtools sort -@\" + threads + \"", "subset_fa_reorder + \" \" + index ) subprocess.call(command, shell=True) #", "i in m) command = ( \"csplit -s -f \"", "reads using read list subset_fa = os.path.join(out, sample_name + \".subset.fa\")", "os.rename(polished_contig, asm_cns) shutil.rmtree(tmp_out_dir) return asm_cns else: return None def run_wtdbg2_polishing(asm_cns,", "logging.info(\"Perform local assembly of non-reference TE loci...\") start_time = time.time()", "failed for \" + asm_cns + \"\\n\") return None def", "contig_name + \".cns.ctg1.fa\") with open(contig, \"r\") as input: records =", "bam, raw_reads, sv_reads_dir, read_type=\"sv\" ) except Exception as e: print(e)", "thread, polish_iterations, presets ) if check_exist(asm_cns): return asm_cns else: return", "sv_reads = sv_reads_dir + \"/contig\" + str(k) sv_reads_rename = sv_reads_dir", "k + 2 * int(entry[14]) m.append(k) if len(m) == 1:", "get_local_contigs( assembler, polisher, contig_dir, vcf_parsed, out, sample_name, bam, raw_reads, thread,", "print(e) print(\"Local assembly failed, exiting...\") sys.exit(1) proc_time = time.time() -", "contig_layout, \"-fo\", consensus, ], timeout=300, ) except subprocess.TimeoutExpired: print(\"fail to", "sys.exit(1) mkdir(contig_dir) k = 0 asm_pa_list = [] with open(vcf_parsed,", "| wtpoa-cns -t \" + threads + \" -d \"", "window = 1000 samfile = pysam.AlignmentFile(bam, \"rb\") read_ids = os.path.join(out,", "multiprocessing import Pool import pysam from telr.TELR_utility import mkdir, check_exist,", "\"--pacbio-raw\" else: presets_flye = \"--nano-raw\" tmp_out_dir = os.path.join(asm_dir, contig_name) mkdir(tmp_out_dir)", "if polish_iterations > 0: if polisher == \"wtdbg2\": asm_cns =", "1000 samfile = pysam.AlignmentFile(bam, \"rb\") read_ids = os.path.join(out, sample_name +", "as e: print(e) print(\"wtdbg2 failed, exiting...\") return None # derive", "= k + 2 * (len(entry[8].split(\",\"))) else: k = k", "to build contig layout for contig: \" + contig_name) return", "None def run_flye_assembly(sv_reads, asm_dir, contig_name, thread, presets): \"\"\"Run Flye assembly\"\"\"", "\" + cns_tmp ) try: subprocess.run( command, shell=True, timeout=300, stdout=subprocess.DEVNULL,", "= args[4] assembler = args[5] polisher = args[6] polish_iterations =", "\".cns.ctg1.fa\") with open(contig, \"r\") as input: records = SeqIO.parse(input, \"fasta\")", "exiting...\") return # rename contigs contig_path = os.path.join(tmp_out_dir, \"assembly.fasta\") contig_path_new", "asm_cns) return if check_exist(cns_tmp): os.rename(cns_tmp, asm_cns) os.remove(bam) else: break k", "\"/\" + sample_name + \".subset.reorder.fa\" extract_reads(subset_fa, read_ids, subset_fa_reorder) # separate", "return None def prep_assembly_inputs( vcf_parsed, out, sample_name, bam, raw_reads, reads_dir,", "= \"--pacbio-raw\" else: presets_flye = \"--nano-raw\" tmp_out_dir = os.path.join(asm_dir, contig_name)", "logging.info(\"Prepare reads for local assembly\") if read_type == \"sv\": #", "os.path.join(asm_dir, contig_name) mkdir(tmp_out_dir) try: subprocess.call( [ \"flye\", presets_flye, sv_reads, \"--out-dir\",", "as input, open(read_ids, \"w\") as output, open( vcf_parsed_new, \"w\" )", "entry[8].split(\",\") reads_sniffles = set(read_list) ins_chr = entry[0] ins_breakpoint = round((int(entry[1])", "+ sample_name + \".subset.reorder.fa\" extract_reads(subset_fa, read_ids, subset_fa_reorder) # separate reads", "else: presets_wtdbg2 = \"ont\" prefix = sv_reads.replace(\".reads.fa\", \"\") try: subprocess.run(", "\"--iterations\", \"0\", ] ) except Exception as e: print(e) print(\"Assembly", "input: entry = line.replace(\"\\n\", \"\").split(\"\\t\") contig_name = \"_\".join([entry[0], entry[1], entry[2]])", "consensus = prefix + \".cns.fa\" try: subprocess.run( [ \"wtpoa-cns\", \"-q\",", "= \"_\".join([entry[0], entry[1], entry[2]]) # rename variant reads sv_reads =", "\"w\") as merged_output_handle: for contig in contig_list: if check_exist(contig): contig_name", "4)) consensus = prefix + \".cns.fa\" try: subprocess.run( [ \"wtpoa-cns\",", "import Pool import pysam from telr.TELR_utility import mkdir, check_exist, format_time", "\"\"\"Perform local assembly using reads from parsed VCF file in", "subprocess.run( [ \"wtpoa-cns\", \"-q\", \"-t\", cns_thread, \"-i\", contig_layout, \"-fo\", consensus,", "\" + asm_cns) return # run wtpoa-cns to get polished", "if read_type == \"sv\": k = k + 2 *", "print(\"Assembly failed, exiting...\") return # rename contigs contig_path = os.path.join(tmp_out_dir,", ") else: asm_cns = run_flye_polishing( asm_cns, reads, asm_dir, contig_name, thread,", "os.path.join( tmp_out_dir, \"polished_\" + str(polish_iterations) + \".fasta\" ) if check_exist(polished_contig):", "contigs command = ( \"minimap2 -t \" + threads +", "contig_name) return None if check_exist(consensus): consensus_rename = os.path.join(asm_dir, contig_name +", "import logging from Bio import SeqIO from multiprocessing import Pool", "+ \" -n 1 \" + subset_fa_reorder + \" \"", "check_exist(contig_path): os.rename(contig_path, contig_path_new) # remove tmp files shutil.rmtree(tmp_out_dir) return contig_path_new", "SeqIO.write(record, merged_output_handle, \"fasta\") with open(parsed_contig, \"w\") as parsed_output_handle: SeqIO.write(record, parsed_output_handle,", "args[0] asm_dir = args[1] contig_name = args[2] thread = args[3]", "if check_exist(contig): contig_name = os.path.basename(contig).replace(\".cns.fa\", \"\") assembly_passed_loci.add(contig_name) parsed_contig = os.path.join(contig_dir,", "entry[0] ins_breakpoint = round((int(entry[1]) + int(entry[2])) / 2) start =", "\".new\" with open(vcf_parsed, \"r\") as input, open(read_ids, \"w\") as output,", "\" + threads + \" -d \" + asm_cns +", "rename variant reads sv_reads = sv_reads_dir + \"/contig\" + str(k)", "reads sv_reads = sv_reads_dir + \"/contig\" + str(k) sv_reads_rename =", "prefix, ], timeout=300, ) except subprocess.TimeoutExpired: print(\"fail to build contig", "read in reads: output.write(read + \"\\n\") # write out_line =", "consensus threads = str(min(threads, 4)) bam = asm_cns + \".bam\"", "\"--out-dir\", tmp_out_dir, \"--thread\", str(thread), \"--iterations\", \"0\", ] ) except Exception", "check_exist(polished_contig): os.rename(polished_contig, asm_cns) shutil.rmtree(tmp_out_dir) return asm_cns else: return None def", "= \"--nano-raw\" tmp_out_dir = os.path.join(asm_dir, contig_name) mkdir(tmp_out_dir) try: subprocess.call( [", "print(\"Polishing failed, exiting...\") return None # rename contig file polished_contig", "subprocess.call( [ \"flye\", \"--polish-target\", asm_cns, presets_flye, reads, \"--out-dir\", tmp_out_dir, \"--thread\",", "\" -r2k \" + asm_cns + \" \" + reads", "\" \" + index ) subprocess.call(command, shell=True) # remove tmp", "\"wtdbg2\", \"-x\", presets_wtdbg2, \"-q\", \"-AS\", \"1\", \"-g\", \"30k\", \"-t\", str(thread),", "tmp files os.remove(read_ids) os.remove(read_ids_unique) os.remove(subset_fa) os.remove(subset_fa_reorder) def extract_reads(reads, list, out):", "list\"\"\" record_dict = SeqIO.index(reads, \"fasta\") with open(out, \"wb\") as output_handle,", "as input: for line in input: entry = line.replace(\"\\n\", \"\").split(\"\\t\")", "unique ID list read_ids_unique = read_ids + \".unique\" command =", "except subprocess.TimeoutExpired: print(\"fail to assemble contig: \" + contig_name) return", "\"w\") as output, open( vcf_parsed_new, \"w\" ) as VCF: for", "os.path.join(out, \"sv_reads\") try: prep_assembly_inputs( vcf_parsed, out, sample_name, bam, raw_reads, sv_reads_dir,", "out what this does # extract read IDs read_ids =", "\"--thread\", str(thread), \"--iterations\", \"0\", ] ) except Exception as e:", "failed\") return None # run polishing if polish_iterations > 0:", "polish_iterations = args[7] # run assembly if assembler == \"wtdbg2\":", "= pool.map(run_assembly_polishing, asm_pa_list) pool.close() pool.join() except Exception as e: print(e)", "print(\"polishing failed for \" + asm_cns + \"\\n\") return None", "( \"minimap2 -t \" + threads + \" -ax \"", "else: return None def run_wtdbg2_polishing(asm_cns, reads, threads, polish_iterations, presets): \"\"\"Run", "> \" + bam ) try: subprocess.run( command, shell=True, timeout=300,", "= os.path.join(out, sample_name + \".id\") with open(vcf_parsed, \"r\") as input,", "command, shell=True, timeout=300, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT, ) except subprocess.TimeoutExpired: print(\"fail to", "view -F0x900 \" + bam + \" | wtpoa-cns -t", "run polishing if polish_iterations > 0: if polisher == \"wtdbg2\":", "subprocess.TimeoutExpired: print(\"fail to build contig layout for contig: \" +", "+ \".ctg.lay.gz\" if check_exist(contig_layout): cns_thread = str(min(thread, 4)) consensus =", "SeqIO.parse(input, \"fasta\") for record in records: if record.id == \"ctg1\"", "print(e) print(\"Polishing failed, exiting...\") return None # rename contig file", "\"-AS\", \"1\", \"-g\", \"30k\", \"-t\", str(thread), \"-i\", sv_reads, \"-fo\", prefix,", "figure out what this does # extract read IDs read_ids", "# derive consensus contig_layout = prefix + \".ctg.lay.gz\" if check_exist(contig_layout):", "return None # run polishing if polish_iterations > 0: if", "\"\"\"Run wtdbg2 assembly\"\"\" if presets == \"pacbio\": presets_wtdbg2 = \"rs\"", "presets): \"\"\"Run Flye assembly\"\"\" if presets == \"pacbio\": presets_flye =", "polish_iterations, presets ) if check_exist(asm_cns): return asm_cns else: return None", "polish_iterations, presets ): \"\"\"Run Flye polishing\"\"\" if presets == \"pacbio\":", "assembly of non-reference TE loci...\") start_time = time.time() try: pool", "failed, exiting...\") sys.exit(1) mkdir(contig_dir) k = 0 asm_pa_list = []", "bam = asm_cns + \".bam\" k = 0 while True:", "read_type=\"sv\" ): \"\"\"Prepare reads for local assembly\"\"\" # logging.info(\"Prepare reads", "= asm_cns + \".tmp\" command = ( \"samtools view -F0x900", "args[1] contig_name = args[2] thread = args[3] presets = args[4]", "\"ont\" prefix = sv_reads.replace(\".reads.fa\", \"\") try: subprocess.run( [ \"wtdbg2\", \"-x\",", "for local assembly and polishing sv_reads_dir = os.path.join(out, \"sv_reads\") try:", "\"-i\", contig_layout, \"-fo\", consensus, ], timeout=300, ) except subprocess.TimeoutExpired: print(\"fail", "in input: entry = line.replace(\"\\n\", \"\").split(\"\\t\") if read_type == \"sv\":", "# rename variant reads sv_reads = sv_reads_dir + \"/contig\" +", "/ 2) start = ins_breakpoint - window end = ins_breakpoint", "ID: for entry in ID: entry = entry.replace(\"\\n\", \"\") output_handle.write(record_dict.get_raw(entry))", "list read_list = entry[8].split(\",\") reads_sniffles = set(read_list) ins_chr = entry[0]", "as merged_output_handle: for contig in contig_list: if check_exist(contig): contig_name =", "contig_name + \".reads.fa\" os.rename(sv_reads, sv_reads_rename) thread_asm = 1 asm_pa =", "= [] k = 1 with open(vcf_parsed, \"r\") as input:", "sample_name + \".contigs.fa\") with open(merged_contigs, \"w\") as merged_output_handle: for contig", "subset_fa = os.path.join(out, sample_name + \".subset.fa\") command = \"seqtk subseq", "as input, open(read_ids, \"w\") as output: for line in input:", "ins_breakpoint + window reads = set() # coverage = 0", "input: entry = line.replace(\"\\n\", \"\").split(\"\\t\") # get sniffles read list", "k = k + 2 * int(entry[14]) m.append(k) if len(m)", "Exception as e: print(e) print(\"Polishing failed, exiting...\") return None #", "file polished_contig = os.path.join( tmp_out_dir, \"polished_\" + str(polish_iterations) + \".fasta\"", "+ \" \" + index ) subprocess.call(command, shell=True) # remove", "read_list = entry[8].split(\",\") reads_sniffles = set(read_list) ins_chr = entry[0] ins_breakpoint", "contig_name + \".cns.fa\") os.rename(consensus, consensus_rename) return consensus_rename else: return None", "def run_wtdbg2_polishing(asm_cns, reads, threads, polish_iterations, presets): \"\"\"Run wtdbg2 polishing\"\"\" if", "reads, asm_dir, contig_name, thread, polish_iterations, presets ): \"\"\"Run Flye polishing\"\"\"", "polishing if polish_iterations > 0: if polisher == \"wtdbg2\": asm_cns", "tmp_out_dir, \"--thread\", str(thread), \"--iterations\", \"0\", ] ) except Exception as", "for line in input: entry = line.replace(\"\\n\", \"\").split(\"\\t\") if read_type", "exiting...\") sys.exit(1) proc_time = time.time() - start_time # merge all", "+ int(entry[2])) / 2) start = ins_breakpoint - window end", "sample_name + \".subset.reorder.fa\" extract_reads(subset_fa, read_ids, subset_fa_reorder) # separate reads into", "os.path.join(out, sample_name + \".contigs.fa\") with open(merged_contigs, \"w\") as merged_output_handle: for", "open(read_ids_unique, \"w\") as output: subprocess.call(command, stdout=output, shell=True) # filter raw", "finished in \" + format_time(proc_time)) return merged_contigs, assembly_passed_loci def run_assembly_polishing(args):", "\"-q\", \"-t\", cns_thread, \"-i\", contig_layout, \"-fo\", consensus, ], timeout=300, )", "os.path.join(asm_dir, contig_name + \".cns.fa\") os.rename(consensus, consensus_rename) return consensus_rename else: return", "else: # TODO: think about using this for assembly, filter", "contig_path = os.path.join(tmp_out_dir, \"assembly.fasta\") contig_path_new = os.path.join(asm_dir, contig_name + \".cns.fa\")", "time.time() try: pool = Pool(processes=thread) contig_list = pool.map(run_assembly_polishing, asm_pa_list) pool.close()", "-a\" with open(subset_fa, \"w\") as output: subprocess.call(command, stdout=output, shell=True) #", "check_exist(contig): contig_name = os.path.basename(contig).replace(\".cns.fa\", \"\") assembly_passed_loci.add(contig_name) parsed_contig = os.path.join(contig_dir, contig_name", "contig_name, thread, polish_iterations, presets ): \"\"\"Run Flye polishing\"\"\" if presets", "\" + format_time(proc_time)) return merged_contigs, assembly_passed_loci def run_assembly_polishing(args): reads =", "\".bam\" k = 0 while True: # align reads to", "threads + \" -d \" + asm_cns + \" -i", "== \"wtdbg2\": asm_cns = run_wtdbg2_polishing( asm_cns, reads, thread, polish_iterations, presets", "== \"pacbio\": presets_flye = \"--pacbio-raw\" else: presets_flye = \"--nano-raw\" tmp_out_dir", "contig_name) mkdir(tmp_out_dir) try: subprocess.call( [ \"flye\", \"--polish-target\", asm_cns, presets_flye, reads,", "sv_reads_dir + \"/contig\" + str(k) sv_reads_rename = sv_reads_dir + \"/\"", "asm_pa_list) pool.close() pool.join() except Exception as e: print(e) print(\"Local assembly", "open(read_ids, \"w\") as output, open( vcf_parsed_new, \"w\" ) as VCF:", "str(k) sv_reads_rename = sv_reads_dir + \"/\" + contig_name + \".reads.fa\"", "contig_name, thread, presets) if not check_exist(asm_cns): print(\"assembly failed\") return None", "presets_flye = \"--nano-raw\" tmp_out_dir = os.path.join(asm_dir, contig_name) mkdir(tmp_out_dir) try: subprocess.call(", "record.id = contig_name record.description = \"len=\" + str(len(record.seq)) SeqIO.write(record, merged_output_handle,", "command = \"seqtk subseq \" + raw_reads + \" \"", "+ asm_cns + \" -i - -fo \" + cns_tmp", "= 1 asm_pa = [ sv_reads_rename, contig_dir, contig_name, thread_asm, presets,", "\"ctg1\" or record.id == \"contig_1\": record.id = contig_name record.description =", "\"csplit -s -f \" + csplit_prefix + \" -n 1", "+ \".reads.fa\" os.rename(sv_reads, sv_reads_rename) thread_asm = 1 asm_pa = [", "int(entry[2])) / 2) start = ins_breakpoint - window end =", "read ID list\"\"\" record_dict = SeqIO.index(reads, \"fasta\") with open(out, \"wb\")", "presets ) else: asm_cns = run_flye_polishing( asm_cns, reads, asm_dir, contig_name,", "str(min(threads, 4)) bam = asm_cns + \".bam\" k = 0", "asm_cns + \" -i - -fo \" + cns_tmp )", "using reads from parsed VCF file in parallel\"\"\" # Prepare", "reads + \" | samtools sort -@\" + threads +", "read_type == \"sv\": # TODO: figure out what this does", "def run_flye_polishing( asm_cns, reads, asm_dir, contig_name, thread, polish_iterations, presets ):", "read IDs read_ids = os.path.join(out, sample_name + \".id\") with open(vcf_parsed,", "assemble contig: \" + contig_name) return None if check_exist(consensus): consensus_rename", "vcf_parsed_new, \"w\" ) as VCF: for line in input: entry", "return asm_cns else: return None def run_flye_polishing( asm_cns, reads, asm_dir,", "# logging.info(\"Prepare reads for local assembly\") if read_type == \"sv\":", "in parallel\"\"\" # Prepare reads used for local assembly and", "used for local assembly and polishing sv_reads_dir = os.path.join(out, \"sv_reads\")", "reads, thread, polish_iterations, presets ) else: asm_cns = run_flye_polishing( asm_cns,", "\"fasta\") with open(out, \"wb\") as output_handle, open(list, \"r\") as ID:", "input: entry = line.replace(\"\\n\", \"\").split(\"\\t\") if read_type == \"sv\": k", "read list read_list = entry[8].split(\",\") reads_sniffles = set(read_list) ins_chr =", "try: subprocess.run( [ \"wtdbg2\", \"-x\", presets_wtdbg2, \"-q\", \"-AS\", \"1\", \"-g\",", "contig_name = os.path.basename(contig).replace(\".cns.fa\", \"\") assembly_passed_loci.add(contig_name) parsed_contig = os.path.join(contig_dir, contig_name +", "samfile.fetch(ins_chr, start, end): reads.add(read.query_name) for read in reads: output.write(read +", "\" + raw_reads + \" \" + read_ids_unique + \"", "local assembly using reads from parsed VCF file in parallel\"\"\"", "in m) command = ( \"csplit -s -f \" +", "\"/contig\" + str(k) sv_reads_rename = sv_reads_dir + \"/\" + contig_name", "import subprocess import shutil import time import logging from Bio", "contigs assembly_passed_loci = set() merged_contigs = os.path.join(out, sample_name + \".contigs.fa\")", "contig_path_new = os.path.join(asm_dir, contig_name + \".cns.fa\") if check_exist(contig_path): os.rename(contig_path, contig_path_new)", "asm_cns else: return None def run_flye_polishing( asm_cns, reads, asm_dir, contig_name,", "and polishing sv_reads_dir = os.path.join(out, \"sv_reads\") try: prep_assembly_inputs( vcf_parsed, out,", "\"samtools view -F0x900 \" + bam + \" | wtpoa-cns", "to contig: \" + asm_cns) return # run wtpoa-cns to", "\"\").split(\"\\t\") contig_name = \"_\".join([entry[0], entry[1], entry[2]]) # rename variant reads", "as ID: for entry in ID: entry = entry.replace(\"\\n\", \"\")", "Exception as e: print(e) print(\"Local assembly failed, exiting...\") sys.exit(1) proc_time", "[ \"wtdbg2\", \"-x\", presets_wtdbg2, \"-q\", \"-AS\", \"1\", \"-g\", \"30k\", \"-t\",", "presets_minimap2 + \" -r2k \" + asm_cns + \" \"", "= SeqIO.parse(input, \"fasta\") for record in records: if record.id ==", "as output: subprocess.call(command, stdout=output, shell=True) # filter raw reads using", "for local assembly\") if read_type == \"sv\": # TODO: figure", "derive consensus contig_layout = prefix + \".ctg.lay.gz\" if check_exist(contig_layout): cns_thread", "subset_fa_reorder) # separate reads into multiple files, using csplit mkdir(reads_dir)", "shutil.rmtree(tmp_out_dir) return asm_cns else: return None def run_wtdbg2_polishing(asm_cns, reads, threads,", "loci...\") start_time = time.time() try: pool = Pool(processes=thread) contig_list =", "\"-x\", presets_wtdbg2, \"-q\", \"-AS\", \"1\", \"-g\", \"30k\", \"-t\", str(thread), \"-i\",", "* int(entry[14]) m.append(k) if len(m) == 1: subprocess.call([\"cp\", subset_fa_reorder, reads_dir", "sv_reads_dir, read_type=\"sv\" ) except Exception as e: print(e) print(\"Prepare local", "input: records = SeqIO.parse(input, \"fasta\") for record in records: if", "contig cns_tmp = asm_cns + \".tmp\" command = ( \"samtools", "for \" + asm_cns + \"\\n\") return None def run_flye_assembly(sv_reads,", "= round((int(entry[1]) + int(entry[2])) / 2) start = ins_breakpoint -", "+ \"/contig\" m = [] k = 1 with open(vcf_parsed,", "Exception as e: print(e) print(\"wtdbg2 failed, exiting...\") return None #", "record_dict = SeqIO.index(reads, \"fasta\") with open(out, \"wb\") as output_handle, open(list,", "try: pool = Pool(processes=thread) contig_list = pool.map(run_assembly_polishing, asm_pa_list) pool.close() pool.join()", "sys import os import subprocess import shutil import time import", "+ raw_reads + \" \" + read_ids_unique + \" |", "return None if check_exist(consensus): consensus_rename = os.path.join(asm_dir, contig_name + \".cns.fa\")", "for cigar reads window = 1000 samfile = pysam.AlignmentFile(bam, \"rb\")", "None def run_wtdbg2_polishing(asm_cns, reads, threads, polish_iterations, presets): \"\"\"Run wtdbg2 polishing\"\"\"", "using csplit mkdir(reads_dir) csplit_prefix = reads_dir + \"/contig\" m =", "print(e) print(\"Assembly failed, exiting...\") return # rename contigs contig_path =", "in input: entry = line.replace(\"\\n\", \"\").split(\"\\t\") # get sniffles read", "vcf_parsed, out, sample_name, bam, raw_reads, thread, presets, polish_iterations, ): \"\"\"Perform", "vcf_parsed, out, sample_name, bam, raw_reads, sv_reads_dir, read_type=\"sv\" ) except Exception", "in records: if record.id == \"ctg1\" or record.id == \"contig_1\":", "consensus, ], timeout=300, ) except subprocess.TimeoutExpired: print(\"fail to assemble contig:", "print(\"No insertion detected, exiting...\") else: m = m[:-1] index =", "assembly_passed_loci.add(contig_name) parsed_contig = os.path.join(contig_dir, contig_name + \".cns.ctg1.fa\") with open(contig, \"r\")", "= SeqIO.index(reads, \"fasta\") with open(out, \"wb\") as output_handle, open(list, \"r\")", "\"w\") as output: subprocess.call(command, stdout=output, shell=True) # reorder reads subset_fa_reorder", "tmp files shutil.rmtree(tmp_out_dir) return contig_path_new else: print(\"assembly failed\") return None", "= 0 for read in samfile.fetch(ins_chr, start, end): reads.add(read.query_name) for", "\"-q\", \"-AS\", \"1\", \"-g\", \"30k\", \"-t\", str(thread), \"-i\", sv_reads, \"-fo\",", "for read in samfile.fetch(ins_chr, start, end): reads.add(read.query_name) for read in", "\" \" + read_ids_unique + \" | seqtk seq -a\"", "else: print(\"polishing failed for \" + asm_cns + \"\\n\") return", "= \"map-ont\" # polish consensus threads = str(min(threads, 4)) bam", "-fo \" + cns_tmp ) try: subprocess.run( command, shell=True, timeout=300,", "= entry[8].split(\",\") reads_sniffles = set(read_list) ins_chr = entry[0] ins_breakpoint =", "start = ins_breakpoint - window end = ins_breakpoint + window", "read list subset_fa = os.path.join(out, sample_name + \".subset.fa\") command =", "else: return None def run_flye_polishing( asm_cns, reads, asm_dir, contig_name, thread,", "if presets == \"pacbio\": presets_minimap2 = \"map-pb\" else: presets_minimap2 =", ") subprocess.call(command, shell=True) # remove tmp files os.remove(read_ids) os.remove(read_ids_unique) os.remove(subset_fa)", "except Exception as e: print(e) print(\"Polishing failed, exiting...\") return None", "= k + 2 * int(entry[14]) m.append(k) if len(m) ==", "telr.TELR_utility import mkdir, check_exist, format_time def get_local_contigs( assembler, polisher, contig_dir,", "seq -a\" with open(subset_fa, \"w\") as output: subprocess.call(command, stdout=output, shell=True)", "line.replace(\"\\n\", \"\").split(\"\\t\") read_list = entry[8].split(\",\") for read in read_list: output.write(read", "entry[2]]) # rename variant reads sv_reads = sv_reads_dir + \"/contig\"", "as output: subprocess.call(command, stdout=output, shell=True) # reorder reads subset_fa_reorder =", "\" -ax \" + presets_minimap2 + \" -r2k \" +", "k + 1 # run assembly in parallel logging.info(\"Perform local", "- start_time # merge all contigs assembly_passed_loci = set() merged_contigs", "exiting...\") sys.exit(1) mkdir(contig_dir) k = 0 asm_pa_list = [] with", "with open(out, \"wb\") as output_handle, open(list, \"r\") as ID: for", "presets_flye, reads, \"--out-dir\", tmp_out_dir, \"--thread\", str(thread), \"--iterations\", str(polish_iterations), ] )", "return None # derive consensus contig_layout = prefix + \".ctg.lay.gz\"", "k = 0 asm_pa_list = [] with open(vcf_parsed, \"r\") as", "os.path.join(out, sample_name + \".id\") vcf_parsed_new = vcf_parsed + \".new\" with", "\"wtdbg2\": asm_cns = run_wtdbg2_polishing( asm_cns, reads, thread, polish_iterations, presets )", "= time.time() - start_time # merge all contigs assembly_passed_loci =", "asm_cns + \"\\n\") return None def run_flye_assembly(sv_reads, asm_dir, contig_name, thread,", "contig: \" + asm_cns) return # run wtpoa-cns to get", "import sys import os import subprocess import shutil import time", "\"w\") as output: for line in input: entry = line.replace(\"\\n\",", "open( vcf_parsed_new, \"w\" ) as VCF: for line in input:", "parallel\"\"\" # Prepare reads used for local assembly and polishing", "\"1\", \"-g\", \"30k\", \"-t\", str(thread), \"-i\", sv_reads, \"-fo\", prefix, ],", "asm_pa_list.append(asm_pa) k = k + 1 # run assembly in", "int(entry[14]) m.append(k) if len(m) == 1: subprocess.call([\"cp\", subset_fa_reorder, reads_dir +", "stderr=subprocess.STDOUT, ) except subprocess.TimeoutExpired: print(\"fail to map reads to contig:", "build contig layout for contig: \" + contig_name) return except", "line in input: entry = line.replace(\"\\n\", \"\").split(\"\\t\") contig_name = \"_\".join([entry[0],", "tmp_out_dir, \"polished_\" + str(polish_iterations) + \".fasta\" ) if check_exist(polished_contig): os.rename(polished_contig,", "os import subprocess import shutil import time import logging from", "\"-t\", str(thread), \"-i\", sv_reads, \"-fo\", prefix, ], timeout=300, ) except", "assembly\"\"\" # logging.info(\"Prepare reads for local assembly\") if read_type ==", "as e: print(e) print(\"Assembly failed, exiting...\") return # rename contigs", "SeqIO.write(record, parsed_output_handle, \"fasta\") logging.info(\"Local assembly finished in \" + format_time(proc_time))", "print(\"wtdbg2 failed, exiting...\") return None # derive consensus contig_layout =", "os.path.join(out, sample_name + \".id\") with open(vcf_parsed, \"r\") as input, open(read_ids,", "+ \".cns.ctg1.fa\") with open(contig, \"r\") as input: records = SeqIO.parse(input,", "1 asm_pa = [ sv_reads_rename, contig_dir, contig_name, thread_asm, presets, assembler,", "input data failed, exiting...\") sys.exit(1) mkdir(contig_dir) k = 0 asm_pa_list", "parsed VCF file in parallel\"\"\" # Prepare reads used for", "0 while True: # align reads to contigs command =", "record in records: if record.id == \"ctg1\" or record.id ==", "set() merged_contigs = os.path.join(out, sample_name + \".contigs.fa\") with open(merged_contigs, \"w\")", "\"r\") as ID: for entry in ID: entry = entry.replace(\"\\n\",", "sv_reads_rename, contig_dir, contig_name, thread_asm, presets, assembler, polisher, polish_iterations, ] asm_pa_list.append(asm_pa)", "out): \"\"\"Extract reads from fasta using read ID list\"\"\" record_dict", "= vcf_parsed + \".new\" with open(vcf_parsed, \"r\") as input, open(read_ids,", "insertion detected, exiting...\") else: m = m[:-1] index = \"", "to map reads to contig: \" + asm_cns) return #", "\"contig_1\": record.id = contig_name record.description = \"len=\" + str(len(record.seq)) SeqIO.write(record,", "+ 1 # run assembly in parallel logging.info(\"Perform local assembly", "\"\"\"Run wtdbg2 polishing\"\"\" if presets == \"pacbio\": presets_minimap2 = \"map-pb\"", "except Exception as e: print(e) print(\"wtdbg2 failed, exiting...\") return None", "prefix + \".ctg.lay.gz\" if check_exist(contig_layout): cns_thread = str(min(thread, 4)) consensus", "sv_reads, \"--out-dir\", tmp_out_dir, \"--thread\", str(thread), \"--iterations\", \"0\", ] ) except", "reads used for local assembly and polishing sv_reads_dir = os.path.join(out,", "# rename contigs contig_path = os.path.join(tmp_out_dir, \"assembly.fasta\") contig_path_new = os.path.join(asm_dir,", "raw_reads + \" \" + read_ids_unique + \" | seqtk", "threads, polish_iterations, presets): \"\"\"Run wtdbg2 polishing\"\"\" if presets == \"pacbio\":", "\" | samtools sort -@\" + threads + \" >", "+ \".id\") with open(vcf_parsed, \"r\") as input, open(read_ids, \"w\") as", "-n 1 \" + subset_fa_reorder + \" \" + index", ") except subprocess.TimeoutExpired: print(\"fail to build contig layout for contig:", "= \"rs\" else: presets_wtdbg2 = \"ont\" prefix = sv_reads.replace(\".reads.fa\", \"\")", "+ threads + \" -d \" + asm_cns + \"", "asm_cns = run_flye_assembly(reads, asm_dir, contig_name, thread, presets) if not check_exist(asm_cns):", "> 0: if polisher == \"wtdbg2\": asm_cns = run_wtdbg2_polishing( asm_cns,", "return None def run_flye_polishing( asm_cns, reads, asm_dir, contig_name, thread, polish_iterations,", "output: subprocess.call(command, stdout=output, shell=True) # reorder reads subset_fa_reorder = out", "if check_exist(cns_tmp): os.rename(cns_tmp, asm_cns) os.remove(bam) else: break k = k", "<gh_stars>10-100 import sys import os import subprocess import shutil import", "if record.id == \"ctg1\" or record.id == \"contig_1\": record.id =", "= entry[0] ins_breakpoint = round((int(entry[1]) + int(entry[2])) / 2) start", "end): reads.add(read.query_name) for read in reads: output.write(read + \"\\n\") #", "to assemble contig: \" + contig_name) return None if check_exist(consensus):", "\" + bam + \" | wtpoa-cns -t \" +", "asm_cns = run_flye_polishing( asm_cns, reads, asm_dir, contig_name, thread, polish_iterations, presets", "with open(subset_fa, \"w\") as output: subprocess.call(command, stdout=output, shell=True) # reorder", "os.remove(subset_fa) os.remove(subset_fa_reorder) def extract_reads(reads, list, out): \"\"\"Extract reads from fasta", "= contig_name record.description = \"len=\" + str(len(record.seq)) SeqIO.write(record, merged_output_handle, \"fasta\")", "return None def run_wtdbg2_polishing(asm_cns, reads, threads, polish_iterations, presets): \"\"\"Run wtdbg2", "exiting...\") else: m = m[:-1] index = \" \".join(str(i) for", "= Pool(processes=thread) contig_list = pool.map(run_assembly_polishing, asm_pa_list) pool.close() pool.join() except Exception", "raw_reads, reads_dir, read_type=\"sv\" ): \"\"\"Prepare reads for local assembly\"\"\" #", ") if check_exist(polished_contig): os.rename(polished_contig, asm_cns) shutil.rmtree(tmp_out_dir) return asm_cns else: return", "vcf_parsed + \".new\" with open(vcf_parsed, \"r\") as input, open(read_ids, \"w\")", "+ \" -r2k \" + asm_cns + \" \" +", "= vcf_parsed_new # generate unique ID list read_ids_unique = read_ids", "subprocess.call(command, stdout=output, shell=True) # reorder reads subset_fa_reorder = out +", ") try: subprocess.run( command, shell=True, timeout=300, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT, ) except", "# remove tmp files shutil.rmtree(tmp_out_dir) return contig_path_new else: print(\"assembly failed\")", "str(len(record.seq)) SeqIO.write(record, merged_output_handle, \"fasta\") with open(parsed_contig, \"w\") as parsed_output_handle: SeqIO.write(record,", "files, using csplit mkdir(reads_dir) csplit_prefix = reads_dir + \"/contig\" m", "= prefix + \".cns.fa\" try: subprocess.run( [ \"wtpoa-cns\", \"-q\", \"-t\",", "try: subprocess.run( command, shell=True, timeout=300, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT, ) except subprocess.TimeoutExpired:", "as e: print(e) print(\"Polishing failed, exiting...\") return None # rename", "if check_exist(consensus): consensus_rename = os.path.join(asm_dir, contig_name + \".cns.fa\") os.rename(consensus, consensus_rename)", "reads for local assembly\"\"\" # logging.info(\"Prepare reads for local assembly\")", "read_ids + \".unique\" command = \"cat \" + read_ids +", "\"sv_reads\") try: prep_assembly_inputs( vcf_parsed, out, sample_name, bam, raw_reads, sv_reads_dir, read_type=\"sv\"", "( \"csplit -s -f \" + csplit_prefix + \" -n", "Flye polishing\"\"\" if presets == \"pacbio\": presets_flye = \"--pacbio-raw\" else:", "+ 2 * int(entry[14]) m.append(k) if len(m) == 1: subprocess.call([\"cp\",", "args[6] polish_iterations = args[7] # run assembly if assembler ==", "contig_list = pool.map(run_assembly_polishing, asm_pa_list) pool.close() pool.join() except Exception as e:", "merged_output_handle, \"fasta\") with open(parsed_contig, \"w\") as parsed_output_handle: SeqIO.write(record, parsed_output_handle, \"fasta\")", "None def prep_assembly_inputs( vcf_parsed, out, sample_name, bam, raw_reads, reads_dir, read_type=\"sv\"", "\".cns.fa\" try: subprocess.run( [ \"wtpoa-cns\", \"-q\", \"-t\", cns_thread, \"-i\", contig_layout,", "else: k = k + 2 * int(entry[14]) m.append(k) if", "VCF file in parallel\"\"\" # Prepare reads used for local", "\"\"\"Extract reads from fasta using read ID list\"\"\" record_dict =", "k + 2 * (len(entry[8].split(\",\"))) else: k = k +", "# align reads to contigs command = ( \"minimap2 -t", "format_time def get_local_contigs( assembler, polisher, contig_dir, vcf_parsed, out, sample_name, bam,", "run_flye_assembly(sv_reads, asm_dir, contig_name, thread, presets): \"\"\"Run Flye assembly\"\"\" if presets", "polish_iterations, ): \"\"\"Perform local assembly using reads from parsed VCF", "input: for line in input: entry = line.replace(\"\\n\", \"\").split(\"\\t\") if", "= set(read_list) ins_chr = entry[0] ins_breakpoint = round((int(entry[1]) + int(entry[2]))", "2 * (len(entry[8].split(\",\"))) else: k = k + 2 *", "reorder reads subset_fa_reorder = out + \"/\" + sample_name +", "\"fasta\") with open(parsed_contig, \"w\") as parsed_output_handle: SeqIO.write(record, parsed_output_handle, \"fasta\") logging.info(\"Local", "from Bio import SeqIO from multiprocessing import Pool import pysam", "output: for line in input: entry = line.replace(\"\\n\", \"\").split(\"\\t\") read_list", "== \"sv\": k = k + 2 * (len(entry[8].split(\",\"))) else:", "] asm_pa_list.append(asm_pa) k = k + 1 # run assembly", "os.remove(read_ids) os.remove(read_ids_unique) os.remove(subset_fa) os.remove(subset_fa_reorder) def extract_reads(reads, list, out): \"\"\"Extract reads", "+ read_ids_unique + \" | seqtk seq -a\" with open(subset_fa,", "filter raw reads using read list subset_fa = os.path.join(out, sample_name", "os.rename(sv_reads, sv_reads_rename) thread_asm = 1 asm_pa = [ sv_reads_rename, contig_dir,", "entry = line.replace(\"\\n\", \"\").split(\"\\t\") # get sniffles read list read_list", "\"\").split(\"\\t\") read_list = entry[8].split(\",\") for read in read_list: output.write(read +", "\" | seqtk seq -a\" with open(subset_fa, \"w\") as output:", "check_exist, format_time def get_local_contigs( assembler, polisher, contig_dir, vcf_parsed, out, sample_name,", "record.description = \"len=\" + str(len(record.seq)) SeqIO.write(record, merged_output_handle, \"fasta\") with open(parsed_contig,", "contig_list: if check_exist(contig): contig_name = os.path.basename(contig).replace(\".cns.fa\", \"\") assembly_passed_loci.add(contig_name) parsed_contig =", "+ \"/contig0\"]) elif len(m) == 0: print(\"No insertion detected, exiting...\")", "if check_exist(contig_layout): cns_thread = str(min(thread, 4)) consensus = prefix +", "with open(vcf_parsed, \"r\") as input, open(read_ids, \"w\") as output: for", "[] k = 1 with open(vcf_parsed, \"r\") as input: for", "4)) bam = asm_cns + \".bam\" k = 0 while", "bam, raw_reads, thread, presets, polish_iterations, ): \"\"\"Perform local assembly using", "[] with open(vcf_parsed, \"r\") as input: for line in input:", "1 if k >= polish_iterations: break if check_exist(asm_cns): return asm_cns", "open(vcf_parsed, \"r\") as input, open(read_ids, \"w\") as output, open( vcf_parsed_new,", "# reorder reads subset_fa_reorder = out + \"/\" + sample_name", "+ \"\\n\") # write out_line = line.replace(\"\\n\", \"\") + \"\\t\"", "args[5] polisher = args[6] polish_iterations = args[7] # run assembly", "+ str(polish_iterations) + \".fasta\" ) if check_exist(polished_contig): os.rename(polished_contig, asm_cns) shutil.rmtree(tmp_out_dir)", "): \"\"\"Run Flye polishing\"\"\" if presets == \"pacbio\": presets_flye =", "except Exception as e: print(e) print(\"Assembly failed, exiting...\") return #", "run assembly in parallel logging.info(\"Perform local assembly of non-reference TE", "except Exception as e: print(e) print(\"Local assembly failed, exiting...\") sys.exit(1)", "asm_cns, reads, thread, polish_iterations, presets ) else: asm_cns = run_flye_polishing(", "read_type=\"sv\" ) except Exception as e: print(e) print(\"Prepare local assembly", "-ax \" + presets_minimap2 + \" -r2k \" + asm_cns", "presets = args[4] assembler = args[5] polisher = args[6] polish_iterations", "\"\") try: subprocess.run( [ \"wtdbg2\", \"-x\", presets_wtdbg2, \"-q\", \"-AS\", \"1\",", "return contig_path_new else: print(\"assembly failed\") return None def run_wtdbg2_assembly(sv_reads, asm_dir,", "# run polishing if polish_iterations > 0: if polisher ==", "\" + presets_minimap2 + \" -r2k \" + asm_cns +", "contig_name) return except Exception as e: print(e) print(\"wtdbg2 failed, exiting...\")", "Flye assembly\"\"\" if presets == \"pacbio\": presets_flye = \"--pacbio-raw\" else:", "else: presets_minimap2 = \"map-ont\" # polish consensus threads = str(min(threads,", "break if check_exist(asm_cns): return asm_cns else: print(\"polishing failed for \"", "get sniffles read list read_list = entry[8].split(\",\") reads_sniffles = set(read_list)", "read_ids_unique + \" | seqtk seq -a\" with open(subset_fa, \"w\")", "\" + read_ids + \" | sort | uniq\" with", "== 1: subprocess.call([\"cp\", subset_fa_reorder, reads_dir + \"/contig0\"]) elif len(m) ==", "sort | uniq\" with open(read_ids_unique, \"w\") as output: subprocess.call(command, stdout=output,", "consensus_rename) return consensus_rename else: return None def prep_assembly_inputs( vcf_parsed, out,", "= 0 asm_pa_list = [] with open(vcf_parsed, \"r\") as input:", "\"w\") as parsed_output_handle: SeqIO.write(record, parsed_output_handle, \"fasta\") logging.info(\"Local assembly finished in", "= 0 while True: # align reads to contigs command", "m = m[:-1] index = \" \".join(str(i) for i in", "parsed_output_handle, \"fasta\") logging.info(\"Local assembly finished in \" + format_time(proc_time)) return", "], timeout=300, ) except subprocess.TimeoutExpired: print(\"fail to assemble contig: \"", "sample_name, bam, raw_reads, thread, presets, polish_iterations, ): \"\"\"Perform local assembly", "= read_ids + \".unique\" command = \"cat \" + read_ids", "return asm_cns else: return None def run_wtdbg2_polishing(asm_cns, reads, threads, polish_iterations,", "\"pacbio\": presets_minimap2 = \"map-pb\" else: presets_minimap2 = \"map-ont\" # polish", "\" + contig_name) return None if check_exist(consensus): consensus_rename = os.path.join(asm_dir,", "contig in contig_list: if check_exist(contig): contig_name = os.path.basename(contig).replace(\".cns.fa\", \"\") assembly_passed_loci.add(contig_name)", "as VCF: for line in input: entry = line.replace(\"\\n\", \"\").split(\"\\t\")", "parsed_contig = os.path.join(contig_dir, contig_name + \".cns.ctg1.fa\") with open(contig, \"r\") as", "else: return None def prep_assembly_inputs( vcf_parsed, out, sample_name, bam, raw_reads,", "= set() # coverage = 0 for read in samfile.fetch(ins_chr,", "reads subset_fa_reorder = out + \"/\" + sample_name + \".subset.reorder.fa\"", ") except Exception as e: print(e) print(\"Assembly failed, exiting...\") return", "command = ( \"samtools view -F0x900 \" + bam +", "\"/contig\" m = [] k = 1 with open(vcf_parsed, \"r\")", "\" + contig_name) return except Exception as e: print(e) print(\"wtdbg2", "\"_\".join([entry[0], entry[1], entry[2]]) # rename variant reads sv_reads = sv_reads_dir", "k = k + 1 if k >= polish_iterations: break", "return if check_exist(cns_tmp): os.rename(cns_tmp, asm_cns) os.remove(bam) else: break k =", "asm_cns) shutil.rmtree(tmp_out_dir) return asm_cns else: return None def run_wtdbg2_polishing(asm_cns, reads,", "reads from parsed VCF file in parallel\"\"\" # Prepare reads", "+ presets_minimap2 + \" -r2k \" + asm_cns + \"", "tmp_out_dir = os.path.join(asm_dir, contig_name) mkdir(tmp_out_dir) try: subprocess.call( [ \"flye\", \"--polish-target\",", "= ( \"csplit -s -f \" + csplit_prefix + \"", "TE loci...\") start_time = time.time() try: pool = Pool(processes=thread) contig_list", "run_wtdbg2_assembly(sv_reads, asm_dir, contig_name, thread, presets): \"\"\"Run wtdbg2 assembly\"\"\" if presets", "\".ctg.lay.gz\" if check_exist(contig_layout): cns_thread = str(min(thread, 4)) consensus = prefix", "sample_name + \".id\") vcf_parsed_new = vcf_parsed + \".new\" with open(vcf_parsed,", "\".tmp\" command = ( \"samtools view -F0x900 \" + bam", "thread, presets): \"\"\"Run wtdbg2 assembly\"\"\" if presets == \"pacbio\": presets_wtdbg2", "entry[8].split(\",\") for read in read_list: output.write(read + \"\\n\") else: #", "if k >= polish_iterations: break if check_exist(asm_cns): return asm_cns else:", "\"-i\", sv_reads, \"-fo\", prefix, ], timeout=300, ) except subprocess.TimeoutExpired: print(\"fail", "= line.replace(\"\\n\", \"\").split(\"\\t\") read_list = entry[8].split(\",\") for read in read_list:", "about using this for assembly, filter for cigar reads window", "contig: \" + contig_name) return except Exception as e: print(e)", "using read list subset_fa = os.path.join(out, sample_name + \".subset.fa\") command", "subseq \" + raw_reads + \" \" + read_ids_unique +", "vcf_parsed = vcf_parsed_new # generate unique ID list read_ids_unique =", "+ \"/\" + contig_name + \".reads.fa\" os.rename(sv_reads, sv_reads_rename) thread_asm =", "if check_exist(polished_contig): os.rename(polished_contig, asm_cns) shutil.rmtree(tmp_out_dir) return asm_cns else: return None", "polish_iterations, presets): \"\"\"Run wtdbg2 polishing\"\"\" if presets == \"pacbio\": presets_minimap2", "-s -f \" + csplit_prefix + \" -n 1 \"", "to get polished contig cns_tmp = asm_cns + \".tmp\" command", "reads_dir, read_type=\"sv\" ): \"\"\"Prepare reads for local assembly\"\"\" # logging.info(\"Prepare", "run assembly if assembler == \"wtdbg2\": asm_cns = run_wtdbg2_assembly(reads, asm_dir,", "= os.path.basename(contig).replace(\".cns.fa\", \"\") assembly_passed_loci.add(contig_name) parsed_contig = os.path.join(contig_dir, contig_name + \".cns.ctg1.fa\")", "failed\") return None def run_wtdbg2_assembly(sv_reads, asm_dir, contig_name, thread, presets): \"\"\"Run", "records = SeqIO.parse(input, \"fasta\") for record in records: if record.id", "merged_output_handle: for contig in contig_list: if check_exist(contig): contig_name = os.path.basename(contig).replace(\".cns.fa\",", "shell=True, timeout=300, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT, ) except subprocess.TimeoutExpired: print(\"fail to map", "os.remove(subset_fa_reorder) def extract_reads(reads, list, out): \"\"\"Extract reads from fasta using", "as output, open( vcf_parsed_new, \"w\" ) as VCF: for line", ">= polish_iterations: break if check_exist(asm_cns): return asm_cns else: print(\"polishing failed", "print(\"fail to assemble contig: \" + contig_name) return None if", "import SeqIO from multiprocessing import Pool import pysam from telr.TELR_utility", "polisher = args[6] polish_iterations = args[7] # run assembly if", "thread, presets) if not check_exist(asm_cns): print(\"assembly failed\") return None #", "threads + \" -ax \" + presets_minimap2 + \" -r2k", "read_ids + \" | sort | uniq\" with open(read_ids_unique, \"w\")", "remove tmp files shutil.rmtree(tmp_out_dir) return contig_path_new else: print(\"assembly failed\") return", "list read_ids_unique = read_ids + \".unique\" command = \"cat \"", "= str(min(threads, 4)) bam = asm_cns + \".bam\" k =", "print(\"fail to polish contig: \" + asm_cns) return if check_exist(cns_tmp):", "\"w\") as output: subprocess.call(command, stdout=output, shell=True) # filter raw reads", "output.write(read + \"\\n\") else: # TODO: think about using this", "contig_dir, vcf_parsed, out, sample_name, bam, raw_reads, thread, presets, polish_iterations, ):", "\".id\") vcf_parsed_new = vcf_parsed + \".new\" with open(vcf_parsed, \"r\") as", "m = [] k = 1 with open(vcf_parsed, \"r\") as", "using this for assembly, filter for cigar reads window =", "def run_assembly_polishing(args): reads = args[0] asm_dir = args[1] contig_name =", "len(m) == 0: print(\"No insertion detected, exiting...\") else: m =", "prefix = sv_reads.replace(\".reads.fa\", \"\") try: subprocess.run( [ \"wtdbg2\", \"-x\", presets_wtdbg2,", "shell=True) # remove tmp files os.remove(read_ids) os.remove(read_ids_unique) os.remove(subset_fa) os.remove(subset_fa_reorder) def", "[ \"flye\", \"--polish-target\", asm_cns, presets_flye, reads, \"--out-dir\", tmp_out_dir, \"--thread\", str(thread),", "if check_exist(asm_cns): return asm_cns else: return None def run_flye_polishing( asm_cns,", "# get sniffles read list read_list = entry[8].split(\",\") reads_sniffles =", "= pysam.AlignmentFile(bam, \"rb\") read_ids = os.path.join(out, sample_name + \".id\") vcf_parsed_new", "presets_flye = \"--pacbio-raw\" else: presets_flye = \"--nano-raw\" tmp_out_dir = os.path.join(asm_dir,", "+ str(len(reads)) VCF.write(out_line + \"\\n\") vcf_parsed = vcf_parsed_new # generate", "tmp_out_dir, \"--thread\", str(thread), \"--iterations\", str(polish_iterations), ] ) except Exception as", "subprocess.TimeoutExpired: print(\"fail to map reads to contig: \" + asm_cns)", "# coverage = 0 for read in samfile.fetch(ins_chr, start, end):", "\"wb\") as output_handle, open(list, \"r\") as ID: for entry in", "with open(contig, \"r\") as input: records = SeqIO.parse(input, \"fasta\") for", "== \"contig_1\": record.id = contig_name record.description = \"len=\" + str(len(record.seq))", "= 1000 samfile = pysam.AlignmentFile(bam, \"rb\") read_ids = os.path.join(out, sample_name", "+ \"/\" + sample_name + \".subset.reorder.fa\" extract_reads(subset_fa, read_ids, subset_fa_reorder) #", "with open(merged_contigs, \"w\") as merged_output_handle: for contig in contig_list: if", "contig_name record.description = \"len=\" + str(len(record.seq)) SeqIO.write(record, merged_output_handle, \"fasta\") with", "k >= polish_iterations: break if check_exist(asm_cns): return asm_cns else: print(\"polishing", "wtpoa-cns to get polished contig cns_tmp = asm_cns + \".tmp\"", "line in input: entry = line.replace(\"\\n\", \"\").split(\"\\t\") read_list = entry[8].split(\",\")", "# run assembly in parallel logging.info(\"Perform local assembly of non-reference", "-t \" + threads + \" -d \" + asm_cns", "in samfile.fetch(ins_chr, start, end): reads.add(read.query_name) for read in reads: output.write(read", "polish_iterations, ] asm_pa_list.append(asm_pa) k = k + 1 # run", "= args[7] # run assembly if assembler == \"wtdbg2\": asm_cns", "return asm_cns else: print(\"polishing failed for \" + asm_cns +", "assembly input data failed, exiting...\") sys.exit(1) mkdir(contig_dir) k = 0", "\" + threads + \" -ax \" + presets_minimap2 +", "parallel logging.info(\"Perform local assembly of non-reference TE loci...\") start_time =", "presets == \"pacbio\": presets_flye = \"--pacbio-raw\" else: presets_flye = \"--nano-raw\"", "ins_chr = entry[0] ins_breakpoint = round((int(entry[1]) + int(entry[2])) / 2)", "index = \" \".join(str(i) for i in m) command =", "= ins_breakpoint + window reads = set() # coverage =", "+ str(len(record.seq)) SeqIO.write(record, merged_output_handle, \"fasta\") with open(parsed_contig, \"w\") as parsed_output_handle:", "layout for contig: \" + contig_name) return except Exception as", "os.remove(read_ids_unique) os.remove(subset_fa) os.remove(subset_fa_reorder) def extract_reads(reads, list, out): \"\"\"Extract reads from", "+ \" | seqtk seq -a\" with open(subset_fa, \"w\") as", "check_exist(consensus): consensus_rename = os.path.join(asm_dir, contig_name + \".cns.fa\") os.rename(consensus, consensus_rename) return", "with open(vcf_parsed, \"r\") as input: for line in input: entry", "k = k + 1 # run assembly in parallel", "\"rs\" else: presets_wtdbg2 = \"ont\" prefix = sv_reads.replace(\".reads.fa\", \"\") try:", "Exception as e: print(e) print(\"Assembly failed, exiting...\") return # rename", "contig_dir, contig_name, thread_asm, presets, assembler, polisher, polish_iterations, ] asm_pa_list.append(asm_pa) k", "contig_layout = prefix + \".ctg.lay.gz\" if check_exist(contig_layout): cns_thread = str(min(thread,", "threads + \" > \" + bam ) try: subprocess.run(", "assembly failed, exiting...\") sys.exit(1) proc_time = time.time() - start_time #", "proc_time = time.time() - start_time # merge all contigs assembly_passed_loci", "return # run wtpoa-cns to get polished contig cns_tmp =", "= os.path.join(contig_dir, contig_name + \".cns.ctg1.fa\") with open(contig, \"r\") as input:", "asm_dir = args[1] contig_name = args[2] thread = args[3] presets", "as output: for line in input: entry = line.replace(\"\\n\", \"\").split(\"\\t\")", "def get_local_contigs( assembler, polisher, contig_dir, vcf_parsed, out, sample_name, bam, raw_reads,", "out_line = line.replace(\"\\n\", \"\") + \"\\t\" + str(len(reads)) VCF.write(out_line +", "\"r\") as input: records = SeqIO.parse(input, \"fasta\") for record in", "logging.info(\"Local assembly finished in \" + format_time(proc_time)) return merged_contigs, assembly_passed_loci", "shutil.rmtree(tmp_out_dir) return contig_path_new else: print(\"assembly failed\") return None def run_wtdbg2_assembly(sv_reads,", "assembly if assembler == \"wtdbg2\": asm_cns = run_wtdbg2_assembly(reads, asm_dir, contig_name,", "\"-g\", \"30k\", \"-t\", str(thread), \"-i\", sv_reads, \"-fo\", prefix, ], timeout=300,", "raw_reads, sv_reads_dir, read_type=\"sv\" ) except Exception as e: print(e) print(\"Prepare", "\" + asm_cns + \" \" + reads + \"", "VCF.write(out_line + \"\\n\") vcf_parsed = vcf_parsed_new # generate unique ID", "\".id\") with open(vcf_parsed, \"r\") as input, open(read_ids, \"w\") as output:", "local assembly\") if read_type == \"sv\": # TODO: figure out", "= \"seqtk subseq \" + raw_reads + \" \" +", "= os.path.join(out, \"sv_reads\") try: prep_assembly_inputs( vcf_parsed, out, sample_name, bam, raw_reads,", "out, sample_name, bam, raw_reads, sv_reads_dir, read_type=\"sv\" ) except Exception as", "[ \"wtpoa-cns\", \"-q\", \"-t\", cns_thread, \"-i\", contig_layout, \"-fo\", consensus, ],", "return consensus_rename else: return None def prep_assembly_inputs( vcf_parsed, out, sample_name,", "def run_wtdbg2_assembly(sv_reads, asm_dir, contig_name, thread, presets): \"\"\"Run wtdbg2 assembly\"\"\" if", "\"sv\": k = k + 2 * (len(entry[8].split(\",\"))) else: k", "read in samfile.fetch(ins_chr, start, end): reads.add(read.query_name) for read in reads:", "try: subprocess.call( [ \"flye\", presets_flye, sv_reads, \"--out-dir\", tmp_out_dir, \"--thread\", str(thread),", "asm_cns + \".tmp\" command = ( \"samtools view -F0x900 \"", "[ \"flye\", presets_flye, sv_reads, \"--out-dir\", tmp_out_dir, \"--thread\", str(thread), \"--iterations\", \"0\",", "+ \".new\" with open(vcf_parsed, \"r\") as input, open(read_ids, \"w\") as", "print(\"assembly failed\") return None # run polishing if polish_iterations >", "subprocess.call(command, stdout=output, shell=True) # filter raw reads using read list", "+ \" -d \" + asm_cns + \" -i -", "extract read IDs read_ids = os.path.join(out, sample_name + \".id\") with", "\"\\n\") # write out_line = line.replace(\"\\n\", \"\") + \"\\t\" +", "subprocess.call( [ \"flye\", presets_flye, sv_reads, \"--out-dir\", tmp_out_dir, \"--thread\", str(thread), \"--iterations\",", "read_type == \"sv\": k = k + 2 * (len(entry[8].split(\",\")))", "in input: entry = line.replace(\"\\n\", \"\").split(\"\\t\") contig_name = \"_\".join([entry[0], entry[1],", "return None # rename contig file polished_contig = os.path.join( tmp_out_dir,", "merge all contigs assembly_passed_loci = set() merged_contigs = os.path.join(out, sample_name", "asm_cns = run_wtdbg2_assembly(reads, asm_dir, contig_name, thread, presets) else: asm_cns =", "reads_dir + \"/contig0\"]) elif len(m) == 0: print(\"No insertion detected,", "+ \".unique\" command = \"cat \" + read_ids + \"", "asm_cns, reads, asm_dir, contig_name, thread, polish_iterations, presets ) if check_exist(asm_cns):", "read_ids = os.path.join(out, sample_name + \".id\") with open(vcf_parsed, \"r\") as", "polishing sv_reads_dir = os.path.join(out, \"sv_reads\") try: prep_assembly_inputs( vcf_parsed, out, sample_name,", "for local assembly\"\"\" # logging.info(\"Prepare reads for local assembly\") if", "detected, exiting...\") else: m = m[:-1] index = \" \".join(str(i)", "assembly using reads from parsed VCF file in parallel\"\"\" #", "of non-reference TE loci...\") start_time = time.time() try: pool =", "return None def run_wtdbg2_assembly(sv_reads, asm_dir, contig_name, thread, presets): \"\"\"Run wtdbg2", "2) start = ins_breakpoint - window end = ins_breakpoint +", "threads = str(min(threads, 4)) bam = asm_cns + \".bam\" k", "= [] with open(vcf_parsed, \"r\") as input: for line in", "str(polish_iterations) + \".fasta\" ) if check_exist(polished_contig): os.rename(polished_contig, asm_cns) shutil.rmtree(tmp_out_dir) return", "(len(entry[8].split(\",\"))) else: k = k + 2 * int(entry[14]) m.append(k)", "contig layout for contig: \" + contig_name) return except Exception", "# extract read IDs read_ids = os.path.join(out, sample_name + \".id\")", "\"polished_\" + str(polish_iterations) + \".fasta\" ) if check_exist(polished_contig): os.rename(polished_contig, asm_cns)", "Exception as e: print(e) print(\"Prepare local assembly input data failed,", "thread = args[3] presets = args[4] assembler = args[5] polisher", "sample_name + \".subset.fa\") command = \"seqtk subseq \" + raw_reads", "presets): \"\"\"Run wtdbg2 polishing\"\"\" if presets == \"pacbio\": presets_minimap2 =", "end = ins_breakpoint + window reads = set() # coverage", "polished contig cns_tmp = asm_cns + \".tmp\" command = (", "print(e) print(\"Prepare local assembly input data failed, exiting...\") sys.exit(1) mkdir(contig_dir)", "merged_contigs = os.path.join(out, sample_name + \".contigs.fa\") with open(merged_contigs, \"w\") as", "def run_flye_assembly(sv_reads, asm_dir, contig_name, thread, presets): \"\"\"Run Flye assembly\"\"\" if", "# TODO: think about using this for assembly, filter for", "True: # align reads to contigs command = ( \"minimap2", "contig_path_new else: print(\"assembly failed\") return None def run_wtdbg2_assembly(sv_reads, asm_dir, contig_name,", "polisher == \"wtdbg2\": asm_cns = run_wtdbg2_polishing( asm_cns, reads, thread, polish_iterations,", "+ asm_cns + \"\\n\") return None def run_flye_assembly(sv_reads, asm_dir, contig_name,", "presets): \"\"\"Run wtdbg2 assembly\"\"\" if presets == \"pacbio\": presets_wtdbg2 =", "cns_tmp ) try: subprocess.run( command, shell=True, timeout=300, stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT, )", "+ \".subset.fa\") command = \"seqtk subseq \" + raw_reads +", "from telr.TELR_utility import mkdir, check_exist, format_time def get_local_contigs( assembler, polisher,", "while True: # align reads to contigs command = (", "window reads = set() # coverage = 0 for read", "thread, polish_iterations, presets ) else: asm_cns = run_flye_polishing( asm_cns, reads,", "or record.id == \"contig_1\": record.id = contig_name record.description = \"len=\"", "files os.remove(read_ids) os.remove(read_ids_unique) os.remove(subset_fa) os.remove(subset_fa_reorder) def extract_reads(reads, list, out): \"\"\"Extract", "asm_dir, contig_name, thread, polish_iterations, presets ) if check_exist(asm_cns): return asm_cns", "with open(parsed_contig, \"w\") as parsed_output_handle: SeqIO.write(record, parsed_output_handle, \"fasta\") logging.info(\"Local assembly", "if presets == \"pacbio\": presets_wtdbg2 = \"rs\" else: presets_wtdbg2 =", "for i in m) command = ( \"csplit -s -f", "# rename contig file polished_contig = os.path.join( tmp_out_dir, \"polished_\" +", "\"\").split(\"\\t\") # get sniffles read list read_list = entry[8].split(\",\") reads_sniffles", "think about using this for assembly, filter for cigar reads", "set() # coverage = 0 for read in samfile.fetch(ins_chr, start,", "as input: records = SeqIO.parse(input, \"fasta\") for record in records:", "= k + 1 if k >= polish_iterations: break if", "= os.path.join(asm_dir, contig_name) mkdir(tmp_out_dir) try: subprocess.call( [ \"flye\", presets_flye, sv_reads," ]
[ "lib._parse_constant(composite, valid=FAMILIES, valid_modifiers=VIAS) assert parsed == expected def test_parse_constant_fails(): \"\"\"", "pylint: disable=pointless-statement ses.create(\"session1\") assert ses.session_pointer is not None ses.destroy() with", "GMTTempFile() as outfile: lib.call_module(\"info\", \"{} -C ->{}\".format(vfile, outfile.name)) output =", "It's hard to make the C API function fail without", "with clib.Session() as lib: assert lib.get_default(\"API_GRID_LAYOUT\") in [\"rows\", \"columns\"] assert", "with mock(ses, \"GMT_Get_Default\", mock_func=mock_defaults): # Check for an empty dictionary", "we don't open a file that # we won't close", "extract region fails if nothing has been plotted. \"\"\" Figure()", "with pytest.raises(GMTCLibError): with clib.Session() as lib: with mock(lib, \"GMT_Create_Data\", returns=None):", "this code\") # Test the status check when closing the", "20, 1, 0], ) assert data_vector != data_matrix def test_create_data_grid_dim():", "str(error) def test_method_no_session(): \"\"\" Fails when not in a session.", "= outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(i.min(), i.max()) for i in", "expected = \"<matrix memory>: N = {}\\t{}\\n\".format(shape[0], bounds) assert output", "value. Used to test that exceptions are raised when API", "\"GMT_IS_POINT\", \"GMT_IS_GRID\", # The invalid direction argument 0, ) with", "# Check for an empty dictionary assert ses.info for key", "= b\"bla\" return 0 lib = clib.Session() with mock(lib, \"GMT_Get_Default\",", "matrix columns to virtual file dataset. \"\"\" dtypes = \"float32", "\"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(min(i), max(i)) for i in (x, y, z)] )", "parsed == lib[family] def test_parse_constant_composite(): \"\"\" Parsing a composite constant", "def test_write_data_fails(): \"\"\" Check that write data raises an exception", "N = {}\\t{}\\n\".format(shape[0], bounds) assert output == expected def test_virtualfile_from_matrix_slice():", "bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(col.min(), col.max()) for col in data.T] )", "return codes. \"\"\" # It's hard to make the C", "20, 1, 0], ) def test_create_data_grid_range(): \"\"\" Create a grid", "0 (success) so that we don't open a file that", "that we don't open a file that # we won't", "lib: assert lib.get_default(\"API_GRID_LAYOUT\") in [\"rows\", \"columns\"] assert int(lib.get_default(\"API_CORES\")) >= 1", "inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.flip(data, axis=(0, 1))) npt.assert_allclose(actual=region, desired=[x.min(), x.max(),", "2, size * 3, 1, dtype=dtype) with clib.Session() as lib:", "output = outfile.read(keep_tabs=True) expected = \"\".join(f\"{i}\\t{j}\\t{k}\\n\" for i, j, k", "C API. \"\"\" import os from contextlib import contextmanager import", "i in (data.x, data.y, data.z) ] ) expected = \"<vector", "lib.info # Mock GMT_Get_Default to return always the same string", "test_get_default(): \"\"\" Make sure get_default works without crashing and gives", "x[0]), abs(y[1] - y[0])]) def test_dataarray_to_matrix_negative_x_and_y_increment(): \"\"\" Check that dataarray_to_matrix", "x = np.linspace(start=4, stop=0, num=3) y = np.linspace(start=5, stop=9, num=3)", "from pygmt import Figure, clib from pygmt.clib.conversion import dataarray_to_matrix from", "lib.call_module(\"convert\", f\"{vfile} ->{outfile.name}\") output = outfile.read(keep_tabs=True) expected = \"\".join(f\"{i}\\t{j}\\t{k}\\n\" for", "-20.0, 20.0], inc=[0.1, 0.2], ) def test_create_data_fails(): \"\"\" Check that", "# Should fail if trying to create a session before", "= np.arange(size * 2, size * 3, 1, dtype=dtype) with", "dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, strings) as", "assert ses[\"GMT_PAD_DEFAULT\"] != -99999 assert ses[\"GMT_DOUBLE\"] != -99999 with pytest.raises(GMTCLibError):", "assert session1.session_pointer is not None session2 = clib.Session() session2.create(name=\"test_session2\") assert", "If the exception is raised, the code won't get to", "of string or object dtype into virtual file dataset. \"\"\"", "npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])]) def test_dataarray_to_matrix_negative_y_increment(): \"\"\"", "# The invalid direction argument 0, ) with pytest.raises(GMTInvalidInput): with", "\"opqrst\"], dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, strings)", "expected = \"\".join( f\"{h}\\t{i}\\t{j} {k}\\n\" for h, i, j, k", "pylint: disable=unused-argument \"\"\" A mock GMT API function that always", "geometry, \"GMT_IN|GMT_IS_REFERENCE\", dataset) with lib.open_virtual_file(*vfargs) as vfile: with GMTTempFile() as", "Fail to destroy session when given bad input. \"\"\" ses", "clib.Session() for family in FAMILIES: parsed = lib._parse_constant(family, valid=FAMILIES) assert", "N = {}\\t{}\\n\".format(rows, bounds) assert output == expected def test_virtualfile_from_vectors_pandas():", "assert ses[\"GMT_DOUBLE\"] != -99999 with pytest.raises(GMTCLibError): ses[\"A_WHOLE_LOT_OF_JUNK\"] # pylint: disable=pointless-statement", "invalid direction argument 0, ) with pytest.raises(GMTInvalidInput): with lib.open_virtual_file(*vfargs): print(\"This", "mode with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family=\"GMT_IS_DATASET\", geometry=\"GMT_IS_SURFACE\",", "GMTTempFile TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), \"data\") with clib.Session() as _lib: gmt_version", "destroying the old one. ses.create(\"test1\") with pytest.raises(GMTCLibError): ses.create(\"test2\") def test_destroy_session_fails():", "ses.create(\"test2\") def test_destroy_session_fails(): \"\"\" Fail to destroy session when given", "npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc, desired=[x[1] - x[0], y[1]", "pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement ses.create(\"session1\") assert ses.session_pointer is not", "that opening and closing virtual files raises an exception for", "Version(\"6.2.0\") def test_get_default_fails(): \"\"\" Make sure get_default raises an exception", "= \"|\".join([family, via]) expected = lib[family] + lib[via] parsed =", "dtype=np.int32) y = np.arange(size, size * 2, 1, dtype=np.int32) strings1", "numpy as np import numpy.testing as npt import pandas as", "size * 3, 1, dtype=dtype), ) ) with clib.Session() as", "that dataarray_to_matrix returns correct output with flipped x/y. \"\"\" data", "with pytest.raises(GMTInvalidInput): dataarray_to_matrix(grid) def test_get_default(): \"\"\" Make sure get_default works", "GMTTempFile() as out_fname: lib.call_module(\"info\", \"{} -C ->{}\".format(data_fname, out_fname.name)) assert os.path.exists(out_fname.name)", "== expected @pytest.mark.parametrize(\"dtype\", [str, object]) def test_virtualfile_from_vectors_two_string_or_object_columns(dtype): \"\"\" Test passing", "with pytest.raises(GMTCLibError): ses.create(\"test2\") def test_destroy_session_fails(): \"\"\" Fail to destroy session", "\"\") with pytest.raises(GMTCLibNoSessionError): lib.session_pointer # pylint: disable=pointless-statement def test_parse_constant_single(): \"\"\"", "returns correct output with flipped x. \"\"\" data = np.diag(v=np.arange(3))", "contextlib import contextmanager import numpy as np import numpy.testing as", "- x[0]), abs(y[1] - y[0])]) def test_dataarray_to_matrix_negative_y_increment(): \"\"\" Check that", "[str, object]) def test_virtualfile_from_vectors_one_string_or_object_column(dtype): \"\"\" Test passing in one column", "\"\"\" Return an old version. \"\"\" if name == b\"API_VERSION\":", "\"$\"], dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, strings1,", "dataset) with lib.open_virtual_file(*vfargs) as vfile: with GMTTempFile() as outfile: lib.call_module(\"info\",", "C API function to make it always return a given", "np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) with clib.Session() as lib: with lib.virtualfile_from_vectors(*data.T)", "def test_virtualfile_from_vectors_diff_size(): \"\"\" Test the function fails for arrays of", "vectors to virtual file dataset. \"\"\" dtypes = \"float32 float64", "family=\"GMT_IS_GRID|GMT_VIA_MATRIX\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], )", "expected = \"{}\\n\".format(bounds) assert output == expected def test_virtualfile_from_vectors_diff_size(): \"\"\"", "lib.open_virtual_file(*vfargs): pass print(\"Shouldn't get to this code either\") def test_virtual_file_bad_direction():", "-99999 assert ses[\"GMT_PAD_DEFAULT\"] != -99999 assert ses[\"GMT_DOUBLE\"] != -99999 with", "works without crashing and gives reasonable results. \"\"\" with clib.Session()", "k in zip(x, y, strings1, strings2) ) assert output ==", "y, strings1, strings2) ) assert output == expected def test_virtualfile_from_vectors_transpose():", "def test_dataarray_to_matrix_dims_fails(): \"\"\" Check that it fails for > 2", "\"\"\" if mock_func is None: def mock_api_function(*args): # pylint: disable=unused-argument", "-20.0, 20.0], inc=[0.1, 0.2], ) # Passing in invalid geometry", "of different sizes. \"\"\" x = np.arange(5) y = np.arange(6)", "returns correct output. \"\"\" data = np.diag(v=np.arange(3)) x = np.linspace(start=0,", "some API functions to fail without inducing a Segmentation Fault", "VIAS) for family, via in test_cases: composite = \"|\".join([family, via])", "mock_func=mock_defaults): with pytest.raises(GMTVersionError): with lib: assert lib.info[\"version\"] != \"5.4.3\" #", "from pygmt.exceptions import ( GMTCLibError, GMTCLibNoSessionError, GMTInvalidInput, GMTVersionError, ) from", "lib: with pytest.raises(GMTCLibError): lib.call_module(\"meh\", \"\") def test_call_module_error_message(): \"\"\" Check is", "with pytest.raises(GMTCLibError): lib.write_data( \"GMT_IS_VECTOR\", \"GMT_IS_POINT\", \"GMT_WRITE_SET\", [1] * 6, \"some-file-name\",", "the dataset to a virtual file and pass it along", "3, 4) grid = xr.DataArray(data, coords=[(\"y\", y), (\"x\", x)]) with", "array to virtual file dataset. \"\"\" dtypes = \"float32 float64", "to exercise this part of the code. with clib.Session() as", "0], ) assert data_vector != data_matrix def test_create_data_grid_dim(): \"\"\" Create", "a session twice ses = clib.Session() for __ in range(2):", "valid_modifiers=VIAS ) # But this shouldn't. with pytest.raises(GMTInvalidInput): lib._parse_constant( \"GMT_IS_DATASET|GMT_VIA_MATRIX\",", "\"def\", \"ghij\", \"klmno\"], dtype=dtype) strings2 = np.array([\"pqrst\", \"uvwx\", \"yz!\", \"@#\",", "dict is working. \"\"\" # Check if there are no", "assert output == expected def test_virtualfile_from_vectors_transpose(): \"\"\" Test transforming matrix", "\"{} -\".format(fig1._name)) with clib.Session() as lib: wesn1 = lib.extract_region() npt.assert_allclose(wesn1,", "parsed = lib._parse_constant(family, valid=FAMILIES) assert parsed == lib[family] def test_parse_constant_composite():", "assert session2.session_pointer != session1.session_pointer session1.destroy() session2.destroy() # Create and destroy", "\"\"\" Check that create_data raises exceptions for invalid input and", "as lib, mock(lib, \"GMT_Open_VirtualFile\", returns=0), mock( lib, \"GMT_Close_VirtualFile\", returns=1 ):", "lib: with lib.virtualfile_from_vectors(data.x, data.y, data.z) as vfile: with GMTTempFile() as", "y.max()]) npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])]) def test_dataarray_to_matrix_negative_x_and_y_increment():", "pytest.raises(GMTCLibNoSessionError): ses.destroy() ses.create(\"test-session\") with mock(ses, \"GMT_Destroy_Session\", returns=1): with pytest.raises(GMTCLibError): ses.destroy()", "session.get_libgmt_func def mock_get_libgmt_func(name, argtypes=None, restype=None): \"\"\" Return our mock function.", "pytest import xarray as xr from packaging.version import Version from", "with clib.Session() as lib: with lib.virtualfile_from_vectors(data.x, data.y, data.z) as vfile:", "the first figure and extract the region from it #", "\"\"\" Fails for invalid module arguments. \"\"\" with clib.Session() as", "with pytest.raises(GMTInvalidInput): lib._parse_constant(test_case, valid=FAMILIES, valid_modifiers=VIAS) # Should also fail if", "10, -20, -10]) fig1.coast(region=region1, projection=\"M6i\", frame=True, land=\"black\") fig2 = Figure()", "causing a Segmentation # Fault. Can't test this if by", "ses.session_pointer # pylint: disable=pointless-statement ses.create(\"session1\") assert ses.session_pointer is not None", "geometry=\"GMT_IS_POINT\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1, 0], # columns, rows, layers,", "with lib.virtualfile_from_vectors(x, y, strings1, strings2) as vfile: with GMTTempFile() as", "None session2 = clib.Session() session2.create(name=\"test_session2\") assert session2.session_pointer is not None", "with clib.Session() as lib: vfargs = ( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", \"GMT_IS_POINT\", \"GMT_IS_GRID\",", "one. ses.create(\"test1\") with pytest.raises(GMTCLibError): ses.create(\"test2\") def test_destroy_session_fails(): \"\"\" Fail to", "returned is None (NULL pointer) with pytest.raises(GMTCLibError): with clib.Session() as", "the C API function fail without causing a Segmentation #", "-\".format(fig1._name)) with clib.Session() as lib: wesn1 = lib.extract_region() npt.assert_allclose(wesn1, region1)", "data = np.diag(v=np.arange(3)) x = np.linspace(start=0, stop=4, num=3) y =", "= clib.Session() with mock(lib, \"GMT_Get_Default\", mock_func=mock_defaults): with pytest.raises(GMTVersionError): with lib:", "dim lib.create_data( family=\"GMT_IS_GRID|GMT_VIA_MATRIX\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1, 0], )", "Used to test that exceptions are raised when API functions", "not in a session. \"\"\" # Create an instance of", "closing the virtual file # Mock the opening to return", "by giving a bad file name because if # output=='',", "in test_cases: with pytest.raises(GMTInvalidInput): lib._parse_constant(test_case, valid=FAMILIES, valid_modifiers=VIAS) # Should also", "an old version def mock_defaults(api, name, value): # pylint: disable=unused-argument", "ses[\"GMT_SESSION_EXTERNAL\"] != -99999 assert ses[\"GMT_MODULE_CMD\"] != -99999 assert ses[\"GMT_PAD_DEFAULT\"] !=", "data = full_data[:rows, :cols] with clib.Session() as lib: with lib.virtualfile_from_matrix(data)", "in (x, y, z)] ) expected = \"<vector memory>: N", "session twice ses = clib.Session() for __ in range(2): with", "as lib: with pytest.raises(GMTCLibError): lib.get_default(\"NOT_A_VALID_NAME\") def test_info_dict(): \"\"\" Make sure", "disable=unused-argument \"\"\" Put 'bla' in the value buffer. \"\"\" value.value", "land=\"black\") fig2 = Figure() fig2.basemap(region=\"US.HI+r5\", projection=\"M6i\", frame=True) # Activate the", "lib: vfargs = ( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", \"GMT_IS_POINT\", \"GMT_IS_GRID\", # The invalid", "session. \"\"\" # Create an instance of Session without \"with\"", "pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family=\"GMT_IS_DATASET\", geometry=\"GMT_IS_SURFACE\", mode=\"Not_a_valid_mode\", dim=[0,", "= (5, 3) for dtype in dtypes: with clib.Session() as", "lib: with lib.virtualfile_from_vectors(x, y, strings1, strings2) as vfile: with GMTTempFile()", "in data via a virtual file with a Dataset. \"\"\"", "lib: lib.call_module(\"figure\", \"{} -\".format(fig1._name)) with clib.Session() as lib: wesn1 =", "x = np.linspace(0, 1, 5) y = np.logspace(2, 3, 4)", "test_virtualfile_from_matrix_slice(): \"\"\" Test transforming a slice of a larger array", "part of the code. with clib.Session() as lib: with mock(lib,", "_lib: gmt_version = Version(_lib.info[\"version\"]) @contextmanager def mock(session, func, returns=None, mock_func=None):", "vfile: with GMTTempFile() as outfile: lib.call_module(\"info\", \"{} -C ->{}\".format(vfile, outfile.name))", "expected = \"<matrix memory>: N = {}\\t{}\\n\".format(rows, bounds) assert output", "the code won't get to the closing of the #", "inducing a Segmentation Fault (which is a good thing because", "assert output == expected def test_virtualfile_from_vectors_diff_size(): \"\"\" Test the function", "dim. \"\"\" with clib.Session() as lib: # Grids from matrices", "\"\"\" Figure() with pytest.raises(GMTCLibError): with clib.Session() as lib: lib.extract_region() def", "= np.array([\"pqrst\", \"uvwx\", \"yz!\", \"@#\", \"$\"], dtype=dtype) with clib.Session() as", "\"\"\" Check that dataarray_to_matrix returns correct output with flipped x/y.", "desired=np.flipud(data)) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc, desired=[x[1] - x[0],", "\"GMT_IS_GRID\", # The invalid direction argument 0, ) with pytest.raises(GMTInvalidInput):", "N = {}\\t{}\\n\".format(size, bounds) assert output == expected def test_virtualfile_from_vectors_arraylike():", "lib = clib.Session() test_cases = ((family, via) for family in", "to gmt info vfargs = (family, geometry, \"GMT_IN|GMT_IS_REFERENCE\", dataset) with", "* shape[1], dtype=dtype).reshape(shape) rows = 5 cols = 3 data", "GMT C API function to make it always return a", "desired=[abs(x[1] - x[0]), abs(y[1] - y[0])]) def test_dataarray_to_matrix_negative_x_and_y_increment(): \"\"\" Check", "any memory problems. with clib.Session() as lib: lib.call_module(\"figure\", \"{} -\".format(fig1._name))", "data_matrix def test_create_data_grid_dim(): \"\"\" Create a grid ignoring range and", "1, 0], # columns, rows, layers, dtype ) # Dataset", "same memory session1 = clib.Session() session1.create(name=\"test_session1\") assert session1.session_pointer is not", "= \"<vector memory>: N = {}\\t{}\\n\".format(size, bounds) assert output ==", "to test that exceptions are raised when API functions fail", "np.array([\"a\", \"bc\", \"def\", \"ghij\", \"klmno\"], dtype=dtype) strings2 = np.array([\"pqrst\", \"uvwx\",", "* 2, size * 3, 1) with clib.Session() as lib:", "memory>: N = {}\\t{}\\n\".format(shape[0], bounds) assert output == expected def", "dtype ) data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) lib.put_matrix(dataset, matrix=data)", "range and inc. \"\"\" with clib.Session() as lib: # Grids", "usually only fails with errors). \"\"\" if mock_func is None:", "a GMT C API function to make it always return", "lib, \"GMT_Close_VirtualFile\", returns=1 ): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): pass print(\"Shouldn't", "along to gmt info vfargs = (family, geometry, \"GMT_IN|GMT_IS_REFERENCE\", dataset)", "second one with clib.Session() as lib: lib.call_module(\"figure\", \"{} -\".format(fig2._name)) with", "shape = (7, 5) for dtype in dtypes: data =", "def test_call_module_error_message(): \"\"\" Check is the GMT error message was", "if nothing has been plotted. \"\"\" Figure() with pytest.raises(GMTCLibError): with", "pytest.raises(GMTCLibError): with clib.Session() as lib: lib.extract_region() def test_extract_region_two_figures(): \"\"\" Extract", "mode=\"GMT_CONTAINER_ONLY\", ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) def test_create_data_fails():", "from it # Use in a different session to avoid", "lib._parse_constant(test_case, valid=FAMILIES, valid_modifiers=VIAS) # Should also fail if not given", "npt.assert_allclose(actual=matrix, desired=data) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc, desired=[abs(x[1] -", "assert output == expected @pytest.mark.parametrize(\"dtype\", [str, object]) def test_virtualfile_from_vectors_one_string_or_object_column(dtype): \"\"\"", "given bad input. \"\"\" ses = clib.Session() with pytest.raises(GMTCLibNoSessionError): ses.destroy()", "\"\"\" Test transforming matrix columns to virtual file dataset. \"\"\"", "with string or object dtype into virtual file dataset. \"\"\"", "== expected def test_virtualfile_from_vectors_pandas(): \"\"\" Pass vectors to a dataset", "strings) as vfile: with GMTTempFile() as outfile: lib.call_module(\"convert\", f\"{vfile} ->{outfile.name}\")", "* shape[1], dtype=dtype).reshape(shape) with clib.Session() as lib: with lib.virtualfile_from_matrix(data) as", "\"GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR\", \"GMT_IS_DATASET|NOT_A_PROPER_VIA\", \"NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX\", \"NOT_A_PROPER_FAMILY|ALSO_INVALID\", ] for test_case in test_cases: with", "return get_libgmt_func(name, argtypes, restype) setattr(session, \"get_libgmt_func\", mock_get_libgmt_func) yield setattr(session, \"get_libgmt_func\",", "except GMTCLibError as error: assert \"Module 'info' failed with status", "lib.create_data( family=\"GMT_IS_DATASET\", geometry=\"GMT_IS_SURFACE\", mode=\"Not_a_valid_mode\", dim=[0, 0, 1, 0], ranges=[150.0, 250.0,", "x[0]), abs(y[1] - y[0])]) def test_dataarray_to_matrix_negative_y_increment(): \"\"\" Check that dataarray_to_matrix", "output == expected def test_virtualfile_from_matrix_slice(): \"\"\" Test transforming a slice", "vectors data_vector = lib.create_data( family=\"GMT_IS_DATASET|GMT_VIA_VECTOR\", geometry=\"GMT_IS_POINT\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1,", "that always returns a given value. \"\"\" return returns mock_func", "(NULL pointer) with pytest.raises(GMTCLibError): with clib.Session() as lib: with mock(lib,", "\"\"\" Check that dataarray_to_matrix returns correct output. \"\"\" data =", "def test_dataarray_to_matrix_negative_x_increment(): \"\"\" Check if dataarray_to_matrix returns correct output with", "status check when closing the virtual file # Mock the", "via in VIAS) for family, via in test_cases: composite =", "\"\"\" Run a command to see if call_module works. \"\"\"", "def mock_get_libgmt_func(name, argtypes=None, restype=None): \"\"\" Return our mock function. \"\"\"", "\"some-file-name\", None, ) def test_dataarray_to_matrix_works(): \"\"\" Check that dataarray_to_matrix returns", "== expected def test_virtualfile_from_vectors_diff_size(): \"\"\" Test the function fails for", "name because if # output=='', GMT will just write to", "geometry=\"GMT_IS_POINT\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1, 0], ) assert data_vector !=", "np.diag(v=np.arange(3)) x = np.linspace(start=4, stop=0, num=3) y = np.linspace(start=5, stop=9,", "size * 3, 1) with clib.Session() as lib: with lib.virtualfile_from_vectors(x,", "np.diag(v=np.arange(3)) x = np.linspace(start=0, stop=4, num=3) y = np.linspace(start=5, stop=9,", "lib.call_module(\"info\", \"bogus-data.bla\") except GMTCLibError as error: assert \"Module 'info' failed", "function to make sure it doesn't fail badly. \"\"\" with", "def test_info_dict(): \"\"\" Make sure the clib.Session.info dict is working.", "Test the function fails for arrays of different sizes. \"\"\"", "the current figure, not the last figure. fig1 = Figure()", "\"\"\" vfargs = ( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", \"GMT_IS_POINT\", \"GMT_IN|GMT_IS_REFERENCE\", None, ) #", "\"\"\" with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module(\"meh\", \"\") def", "a bad file name because if # output=='', GMT will", "to return 0 (success) so that we don't open a", "invalid mode with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family=\"GMT_IS_DATASET\",", "lib, mock(lib, \"GMT_Open_VirtualFile\", returns=0), mock( lib, \"GMT_Close_VirtualFile\", returns=1 ): with", "# Mock the opening to return 0 (success) so that", "with pytest.raises(GMTCLibError): lib.call_module(\"meh\", \"\") def test_call_module_error_message(): \"\"\" Check is the", "that extract region fails if nothing has been plotted. \"\"\"", "->{}\".format(data_fname, out_fname.name)) assert os.path.exists(out_fname.name) output = out_fname.read().strip() assert output ==", ") expected = \"<matrix memory>: N = {}\\t{}\\n\".format(shape[0], bounds) assert", "as lib: with lib.virtualfile_from_vectors(x, y, z) as vfile: with GMTTempFile()", "ses = clib.Session() with mock(ses, \"GMT_Create_Session\", returns=None): with pytest.raises(GMTCLibError): ses.create(\"test-session-name\")", "(success) so that we don't open a file that #", "an invalid direction argument. \"\"\" with clib.Session() as lib: vfargs", "def test_call_module_invalid_arguments(): \"\"\" Fails for invalid module arguments. \"\"\" with", "mock GMT API function that always returns a given value.", "fail if not given valid modifiers but is using them", "\"GMT_IS_DATASET|GMT_VIA_MATRIX\", \"GMT_IS_POINT\", \"GMT_IN|GMT_IS_REFERENCE\", None, ) # Mock Open_VirtualFile to test", "test_method_no_session(): \"\"\" Fails when not in a session. \"\"\" #", "dtype in dtypes: x = np.arange(size, dtype=dtype) y = np.arange(size,", "y = tuple(range(size, size * 2, 1)) z = range(size", "an instance of Session without \"with\" so no session is", ") def test_virtual_file(): \"\"\" Test passing in data via a", "import FAMILIES, VIAS from pygmt.exceptions import ( GMTCLibError, GMTCLibNoSessionError, GMTInvalidInput,", "in [\"rows\", \"columns\"] assert int(lib.get_default(\"API_CORES\")) >= 1 assert Version(lib.get_default(\"API_VERSION\")) >=", "x = np.arange(size, dtype=dtype) y = np.arange(size, size * 2,", "hard to make the C API function fail without causing", "single family argument correctly. \"\"\" lib = clib.Session() for family", "sure the clib.Session.info dict is working. \"\"\" # Check if", "session and make sure they are not pointing to the", "if name == b\"API_VERSION\": value.value = b\"5.4.3\" else: value.value =", "def test_dataarray_to_matrix_works(): \"\"\" Check that dataarray_to_matrix returns correct output. \"\"\"", "assert ses[\"GMT_MODULE_CMD\"] != -99999 assert ses[\"GMT_PAD_DEFAULT\"] != -99999 assert ses[\"GMT_DOUBLE\"]", "test_create_data_dataset(): \"\"\" Run the function to make sure it doesn't", "assert data_vector != data_matrix def test_create_data_grid_dim(): \"\"\" Create a grid", "test_extract_region_fails(): \"\"\" Check that extract region fails if nothing has", "> 2 dims. \"\"\" # Make a 3D regular grid", "np.arange(10) grid = xr.DataArray(data, coords=[(\"z\", z), (\"y\", y), (\"x\", x)])", "assert ses.info for key in ses.info: assert ses.info[key] == \"bla\"", "\"float32 float64 int32 int64 uint32 uint64\".split() shape = (5, 3)", "geometry=\"Not_a_valid_geometry\", mode=\"GMT_CONTAINER_ONLY\", dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0, 20.0],", "dtype into virtual file dataset. \"\"\" size = 5 x", "extract_region to make sure that it's # getting from the", "fig1 = Figure() region1 = np.array([0, 10, -20, -10]) fig1.coast(region=region1,", "errors. \"\"\" # Create two session and make sure they", "make it always return a given value. Used to test", "from matrices data_matrix = lib.create_data( family=\"GMT_IS_DATASET|GMT_VIA_MATRIX\", geometry=\"GMT_IS_POINT\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20,", "outfile.name)) output = outfile.read(keep_tabs=True) bounds = \"\\t\".join( [ \"<{:.0f}/{:.0f}>\".format(i.min(), i.max())", "* 6, \"some-file-name\", None, ) def test_dataarray_to_matrix_works(): \"\"\" Check that", "as lib: wesn1 = lib.extract_region() npt.assert_allclose(wesn1, region1) # Now try", "clib.Session() as lib: with lib.virtualfile_from_vectors(data.x, data.y, data.z) as vfile: with", "= {}\\t{}\\n\".format(size, bounds) assert output == expected @pytest.mark.parametrize(\"dtype\", [str, object])", "def test_create_data_grid_range(): \"\"\" Create a grid specifying range and inc", "test_get_default_fails(): \"\"\" Make sure get_default raises an exception for invalid", "dataset. \"\"\" size = 5 x = np.arange(size, dtype=np.int32) y", "always return a given value. Used to test that exceptions", "clib.Session() test_cases = ((family, via) for family in FAMILIES for", "None, ) # Mock Open_VirtualFile to test the status check", "in data.T] ) expected = \"<matrix memory>: N = {}\\t{}\\n\".format(shape[0],", "test_virtual_file_bad_direction(): \"\"\" Test passing an invalid direction argument. \"\"\" with", "\"11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338\" def test_call_module_invalid_arguments(): \"\"\" Fails", "failed\") def test_virtualfile_from_vectors(): \"\"\" Test the automation for transforming vectors", "with GMTTempFile() as out_fname: lib.call_module(\"info\", \"{} -C ->{}\".format(data_fname, out_fname.name)) assert", "captured. \"\"\" with clib.Session() as lib: try: lib.call_module(\"info\", \"bogus-data.bla\") except", "matrices data_matrix = lib.create_data( family=\"GMT_IS_DATASET|GMT_VIA_MATRIX\", geometry=\"GMT_IS_POINT\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1,", "npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])]) def test_dataarray_to_matrix_dims_fails(): \"\"\"", "have failed\") def test_virtualfile_from_vectors(): \"\"\" Test the automation for transforming", "# names. Use a mock instead just to exercise this", "y), (\"x\", x)]) with pytest.raises(GMTInvalidInput): dataarray_to_matrix(grid) def test_get_default(): \"\"\" Make", "clib.Session() assert ses[\"GMT_SESSION_EXTERNAL\"] != -99999 assert ses[\"GMT_MODULE_CMD\"] != -99999 assert", "correct output with flipped x. \"\"\" data = np.diag(v=np.arange(3)) x", "\"bogus-data.bla\") except GMTCLibError as error: assert \"Module 'info' failed with", "the session is closed when the exception is raised. with", "grid = xr.DataArray(data, coords=[(\"z\", z), (\"y\", y), (\"x\", x)]) with", "def test_extract_region_fails(): \"\"\" Check that extract region fails if nothing", "transforming vectors to virtual file dataset. \"\"\" dtypes = \"float32", "our mock function. \"\"\" if name == func: return mock_func", "!= -99999 assert ses[\"GMT_PAD_DEFAULT\"] != -99999 assert ses[\"GMT_DOUBLE\"] != -99999", "pandas Series. \"\"\" dtypes = \"float32 float64 int32 int64 uint32", "Create a grid specifying range and inc instead of dim.", "bounds) assert output == expected def test_virtualfile_from_matrix_slice(): \"\"\" Test transforming", "dims. \"\"\" # Make a 3D regular grid data =", "ses.destroy() def test_call_module(): \"\"\" Run a command to see if", "with the second one with clib.Session() as lib: lib.call_module(\"figure\", \"{}", "valid=FAMILIES, valid_modifiers=VIAS ) # But this shouldn't. with pytest.raises(GMTInvalidInput): lib._parse_constant(", "\"get_libgmt_func\", get_libgmt_func) def test_getitem(): \"\"\" Test that I can get", "in str(error) def test_method_no_session(): \"\"\" Fails when not in a", "it doesn't fail badly. \"\"\" with clib.Session() as lib: #", "data.T] ) expected = \"<matrix memory>: N = {}\\t{}\\n\".format(rows, bounds)", "message was captured. \"\"\" with clib.Session() as lib: try: lib.call_module(\"info\",", "vfile: with GMTTempFile() as outfile: lib.call_module(\"info\", \"{} ->{}\".format(vfile, outfile.name)) output", "region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=data) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(),", "to test the status check when entering the context. #", "y, z)] ) expected = \"<vector memory>: N = {}\\t{}\\n\".format(size,", "lib.create_data( family=\"GMT_IS_DATASET|GMT_VIA_VECTOR\", geometry=\"GMT_IS_POINT\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1, 0], # columns,", "and destroy session are called without errors. \"\"\" # Create", "def test_parse_constant_single(): \"\"\" Parsing a single family argument correctly. \"\"\"", "output with flipped x. \"\"\" data = np.diag(v=np.arange(3)) x =", "family = \"GMT_IS_DATASET|GMT_VIA_MATRIX\" geometry = \"GMT_IS_POINT\" dataset = lib.create_data( family=family,", "or segfaults from getting all of the # properties. with", "ses.create(\"test-session\") with mock(ses, \"GMT_Destroy_Session\", returns=1): with pytest.raises(GMTCLibError): ses.destroy() ses.destroy() def", "as lib: lib.create_data( family=\"GMT_IS_DATASET\", geometry=\"GMT_IS_SURFACE\", mode=\"Not_a_valid_mode\", dim=[0, 0, 1, 0],", "b\"bla\" return 0 lib = clib.Session() with mock(lib, \"GMT_Get_Default\", mock_func=mock_defaults):", "with lib.virtualfile_from_matrix(data) as vfile: with GMTTempFile() as outfile: lib.call_module(\"info\", \"{}", "output = out_fname.read().strip() assert output == \"11.5309 61.7074 -2.9289 7.8648", "# Now try it with the second one with clib.Session()", "3, 1) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, z)", "dataset using pandas Series. \"\"\" dtypes = \"float32 float64 int32", "((family, via) for family in FAMILIES for via in VIAS)", "file # Mock the opening to return 0 (success) so", "return a given value. Used to test that exceptions are", "status code\" in str(error) assert \"gmtinfo [ERROR]: Cannot find file", "an exception for non-zero return codes. \"\"\" # It's hard", "columns, rows, layers, dtype ) # Dataset from matrices data_matrix", "output == expected def test_virtualfile_from_vectors_pandas(): \"\"\" Pass vectors to a", "assert \"Module 'info' failed with status code\" in str(error) assert", "make sure it doesn't fail badly. \"\"\" with clib.Session() as", "->{}\".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"{:.0f}\\t{:.0f}\".format(col.min(), col.max())", "pytest.raises(GMTCLibError): ses[\"A_WHOLE_LOT_OF_JUNK\"] # pylint: disable=pointless-statement def test_create_destroy_session(): \"\"\" Test that", "composite = \"|\".join([family, via]) expected = lib[family] + lib[via] parsed", "dtype in dtypes: data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) with", "\"\"\" Test the wrappers for the C API. \"\"\" import", "write data raises an exception for non-zero return codes. \"\"\"", "->{outfile.name}\") output = outfile.read(keep_tabs=True) expected = \"\".join( f\"{h}\\t{i}\\t{j} {k}\\n\" for", "input and output. \"\"\" # Passing in invalid mode with", "disable=unused-argument \"\"\" Return an old version. \"\"\" if name ==", "y[1] - y[0]]) def test_dataarray_to_matrix_negative_x_increment(): \"\"\" Check if dataarray_to_matrix returns", "# pylint: disable=pointless-statement ses.create(\"session1\") assert ses.session_pointer is not None ses.destroy()", "instead just to exercise this part of the code. with", "twice ses = clib.Session() for __ in range(2): with pytest.raises(GMTCLibNoSessionError):", "mode=\"GMT_CONTAINER_ONLY\", dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1,", "stop=4, num=3) y = np.linspace(start=5, stop=9, num=3) grid = xr.DataArray(data,", "session2.destroy() # Create and destroy a session twice ses =", "with GMTTempFile() as outfile: lib.call_module(\"info\", \"{} -C ->{}\".format(vfile, outfile.name)) output", "# Should also fail if not given valid modifiers but", "this shouldn't. with pytest.raises(GMTInvalidInput): lib._parse_constant( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", valid=FAMILIES, valid_modifiers=None ) def", "find file bogus-data.bla\" in str(error) def test_method_no_session(): \"\"\" Fails when", "x)]) matrix, region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.flip(data, axis=(0, 1)))", "2, 1, dtype=dtype), z=np.arange(size * 2, size * 3, 1,", "lib.create_data( family=\"GMT_IS_GRID\", geometry=\"Not_a_valid_geometry\", mode=\"GMT_CONTAINER_ONLY\", dim=[0, 0, 1, 0], ranges=[150.0, 250.0,", "{}\\t{}\\n\".format(shape[0], bounds) assert output == expected def test_virtual_file_fails(): \"\"\" Check", "geometry=geometry, mode=\"GMT_CONTAINER_ONLY\", dim=[shape[1], shape[0], 1, 0], # columns, rows, layers,", "\"\"\" Test transforming a slice of a larger array to", "in zip(x, y, strings)) assert output == expected @pytest.mark.parametrize(\"dtype\", [str,", "fig2.basemap(region=\"US.HI+r5\", projection=\"M6i\", frame=True) # Activate the first figure and extract", "raised, the code won't get to the closing of the", "GMTCLibError as error: assert \"Module 'info' failed with status code\"", "expected def test_virtualfile_from_vectors_transpose(): \"\"\" Test transforming matrix columns to virtual", "with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family=\"GMT_IS_DATASET\", geometry=\"GMT_IS_SURFACE\", mode=\"Not_a_valid_mode\",", "* 2, 1, dtype=dtype), z=np.arange(size * 2, size * 3,", "b\"bla\" return 0 ses = clib.Session() ses.create(\"test-session\") with mock(ses, \"GMT_Get_Default\",", "the same time. \"\"\" # Make two figures before calling", "assert session2.session_pointer is not None assert session2.session_pointer != session1.session_pointer session1.destroy()", "\"<{:.0f}/{:.0f}>\".format(i.min(), i.max()) for i in (data.x, data.y, data.z) ] )", ">= 1 assert Version(lib.get_default(\"API_VERSION\")) >= Version(\"6.2.0\") def test_get_default_fails(): \"\"\" Make", "expected def test_virtualfile_from_vectors_pandas(): \"\"\" Pass vectors to a dataset using", "memory session1 = clib.Session() session1.create(name=\"test_session1\") assert session1.session_pointer is not None", "ses.create(\"test-session-name\") # Should fail if trying to create a session", "mock_api_function(*args): # pylint: disable=unused-argument \"\"\" A mock GMT API function", "Check if the function fails when given bad input. \"\"\"", "(5, 3) for dtype in dtypes: with clib.Session() as lib:", "\"\"\" # Mock GMT_Get_Default to return an old version def", "= {}\\t{}\\n\".format(rows, bounds) assert output == expected def test_virtualfile_from_vectors_pandas(): \"\"\"", "\"|\".join([family, via]) expected = lib[family] + lib[via] parsed = lib._parse_constant(composite,", "2, 1)) z = range(size * 2, size * 3,", "function. \"\"\" if name == func: return mock_func return get_libgmt_func(name,", "assert int(lib.get_default(\"API_CORES\")) >= 1 assert Version(lib.get_default(\"API_VERSION\")) >= Version(\"6.2.0\") def test_get_default_fails():", "value): # pylint: disable=unused-argument \"\"\" Put 'bla' in the value", "0, 1, 0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], )", "Check if dataarray_to_matrix returns correct output with flipped x. \"\"\"", "-99999 assert ses[\"GMT_MODULE_CMD\"] != -99999 assert ses[\"GMT_PAD_DEFAULT\"] != -99999 assert", "dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2],", "to return always the same string def mock_defaults(api, name, value):", "Make sure the clib.Session raises an exception if GMT is", "= np.linspace(start=9, stop=5, num=3) grid = xr.DataArray(data, coords=[(\"y\", y), (\"x\",", "output = outfile.read(keep_tabs=True) bounds = \"\\t\".join( [ \"<{:.0f}/{:.0f}>\".format(i.min(), i.max()) for", "def mock(session, func, returns=None, mock_func=None): \"\"\" Mock a GMT C", "assert \"gmtinfo [ERROR]: Cannot find file bogus-data.bla\" in str(error) def", "lib: # Grids from matrices using dim lib.create_data( family=\"GMT_IS_GRID|GMT_VIA_MATRIX\", geometry=\"GMT_IS_SURFACE\",", "the region from it # Use in a different session", "with clib.Session() as lib: # Grids from matrices using range", "\"\"\" Make sure get_default works without crashing and gives reasonable", "libgmt usually only fails with errors). \"\"\" if mock_func is", "output=='', GMT will just write to stdout and spaces are", "session before destroying the old one. ses.create(\"test1\") with pytest.raises(GMTCLibError): ses.create(\"test2\")", "to return an old version def mock_defaults(api, name, value): #", "a session. \"\"\" # Create an instance of Session without", "that # we won't close later. with clib.Session() as lib,", "= np.linspace(0, 1, 5) y = np.logspace(2, 3, 4) grid", "with mock(lib, \"GMT_Write_Data\", returns=1): with pytest.raises(GMTCLibError): lib.write_data( \"GMT_IS_VECTOR\", \"GMT_IS_POINT\", \"GMT_WRITE_SET\",", "Cannot find file bogus-data.bla\" in str(error) def test_method_no_session(): \"\"\" Fails", "expected def test_virtualfile_from_vectors_diff_size(): \"\"\" Test the function fails for arrays", "3, 1, dtype=dtype), ) ) with clib.Session() as lib: with", "\"\"\" ses = clib.Session() with mock(ses, \"GMT_Create_Session\", returns=None): with pytest.raises(GMTCLibError):", "out_fname.read().strip() assert output == \"11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338\"", "Make sure get_default raises an exception for invalid names. \"\"\"", "ses[\"A_WHOLE_LOT_OF_JUNK\"] # pylint: disable=pointless-statement def test_create_destroy_session(): \"\"\" Test that create", "a good thing because libgmt usually only fails with errors).", "y[0]]) def test_dataarray_to_matrix_negative_x_increment(): \"\"\" Check if dataarray_to_matrix returns correct output", "zero return codes. \"\"\" vfargs = ( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", \"GMT_IS_POINT\", \"GMT_IN|GMT_IS_REFERENCE\",", "def test_fails_for_wrong_version(): \"\"\" Make sure the clib.Session raises an exception", "= os.path.join(TEST_DATA_DIR, \"points.txt\") out_fname = \"test_call_module.txt\" with clib.Session() as lib:", "a given value. \"\"\" return returns mock_func = mock_api_function get_libgmt_func", "regular grid data = np.ones((10, 12, 11), dtype=\"float32\") x =", "\"\"\" Test passing in data via a virtual file with", "stop=4, num=3) y = np.linspace(start=9, stop=5, num=3) grid = xr.DataArray(data,", "= tuple(range(size, size * 2, 1)) z = range(size *", "== expected def test_virtualfile_from_vectors_arraylike(): \"\"\" Pass array-like vectors to a", "to make it always return a given value. Used to", "dataarray_to_matrix returns correct output with flipped x/y. \"\"\" data =", "Check if there are no errors or segfaults from getting", "with GMTTempFile() as outfile: lib.call_module(\"info\", \"{} ->{}\".format(vfile, outfile.name)) output =", "given value. \"\"\" return returns mock_func = mock_api_function get_libgmt_func =", "clib.Session() as lib: wesn2 = lib.extract_region() npt.assert_allclose(wesn2, np.array([-165.0, -150.0, 15.0,", "returns=1): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): print(\"Should not get to this", "when given bad input. \"\"\" lib = clib.Session() test_cases =", "grid specifying range and inc instead of dim. \"\"\" with", "!= -99999 assert ses[\"GMT_MODULE_CMD\"] != -99999 assert ses[\"GMT_PAD_DEFAULT\"] != -99999", "the # virtual file. with clib.Session() as lib, mock(lib, \"GMT_Open_VirtualFile\",", "function fail without causing a Segmentation # Fault. Can't test", "Test the status check when closing the virtual file #", "->{}\".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(min(i), max(i))", "Test passing in two columns of string or object dtype", "matrices using range and int lib.create_data( family=\"GMT_IS_GRID|GMT_VIA_MATRIX\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", ranges=[150.0,", ") assert output == expected def test_virtualfile_from_vectors_transpose(): \"\"\" Test transforming", "= clib.Session() with pytest.raises(GMTCLibNoSessionError): ses.destroy() ses.create(\"test-session\") with mock(ses, \"GMT_Destroy_Session\", returns=1):", "output with flipped y. \"\"\" data = np.diag(v=np.arange(3)) x =", "y, strings1, strings2) as vfile: with GMTTempFile() as outfile: lib.call_module(\"convert\",", "def test_virtualfile_from_vectors_transpose(): \"\"\" Test transforming matrix columns to virtual file", "np.array([-165.0, -150.0, 15.0, 25.0])) def test_write_data_fails(): \"\"\" Check that write", "3) for dtype in dtypes: with clib.Session() as lib: family", "test_virtual_file_fails(): \"\"\" Check that opening and closing virtual files raises", "dataset to a virtual file and pass it along to", "as lib: assert lib.info # Mock GMT_Get_Default to return always", "bounds) assert output == expected def test_virtualfile_from_vectors_arraylike(): \"\"\" Pass array-like", "region1) # Now try it with the second one with", "too old. \"\"\" # Mock GMT_Get_Default to return an old", "def test_virtualfile_from_vectors_arraylike(): \"\"\" Pass array-like vectors to a dataset. \"\"\"", "\"GMT_Destroy_Session\", returns=1): with pytest.raises(GMTCLibError): ses.destroy() ses.destroy() def test_call_module(): \"\"\" Run", "because libgmt usually only fails with errors). \"\"\" if mock_func", "N = {}\\t{}\\n\".format(size, bounds) assert output == expected @pytest.mark.parametrize(\"dtype\", [str,", "called without errors. \"\"\" # Create two session and make", "dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=data) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc, desired=[abs(x[1]", "output = outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(col.min(), col.max()) for col", "always returns a given value. \"\"\" return returns mock_func =", "a Segmentation Fault (which is a good thing because libgmt", "pytest.raises(GMTCLibError): ses.destroy() ses.destroy() def test_call_module(): \"\"\" Run a command to", "via]) expected = lib[family] + lib[via] parsed = lib._parse_constant(composite, valid=FAMILIES,", "This should work... lib._parse_constant( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", valid=FAMILIES, valid_modifiers=VIAS ) # But", "zip(x, y, strings)) assert output == expected @pytest.mark.parametrize(\"dtype\", [str, object])", "doesn't fail badly. \"\"\" with clib.Session() as lib: # Dataset", "\"\"\" lib = clib.Session() for family in FAMILIES: parsed =", "test_create_destroy_session(): \"\"\" Test that create and destroy session are called", "instead of dim. \"\"\" with clib.Session() as lib: # Grids", "Check that extract region fails if nothing has been plotted.", "not None assert session2.session_pointer != session1.session_pointer session1.destroy() session2.destroy() # Create", "bogus-data.bla\" in str(error) def test_method_no_session(): \"\"\" Fails when not in", "# we won't close later. with clib.Session() as lib, mock(lib,", "y = np.linspace(start=9, stop=5, num=3) grid = xr.DataArray(data, coords=[(\"y\", y),", "entering the context. # If the exception is raised, the", "it's not easy to get some API functions to fail", "test the status check when entering the context. # If", "dtype=dtype).reshape(shape) lib.put_matrix(dataset, matrix=data) # Add the dataset to a virtual", "1, 0], # columns, rows, layers, dtype ) data =", "= \"GMT_IS_POINT\" dataset = lib.create_data( family=family, geometry=geometry, mode=\"GMT_CONTAINER_ONLY\", dim=[shape[1], shape[0],", "a Segmentation # Fault. Can't test this if by giving", "session1.create(name=\"test_session1\") assert session1.session_pointer is not None session2 = clib.Session() session2.create(name=\"test_session2\")", "output with flipped x/y. \"\"\" data = np.diag(v=np.arange(3)) x =", "int32 int64 uint32 uint64\".split() size = 10 for dtype in", "get some API functions to fail without inducing a Segmentation", "y.min(), y.max()]) npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])]) def", "bounds = \"\\t\".join( [\"{:.0f}\\t{:.0f}\".format(col.min(), col.max()) for col in data.T] )", "= \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(col.min(), col.max()) for col in data.T] ) expected", "pass print(\"Shouldn't get to this code either\") def test_virtual_file_bad_direction(): \"\"\"", "a session before destroying the old one. ses.create(\"test1\") with pytest.raises(GMTCLibError):", "outfile.name)) output = outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(i.min(), i.max()) for", "segfaults from getting all of the # properties. with clib.Session()", "z = np.arange(size * 2, size * 3, 1, dtype=dtype)", "clib.Session() as lib: vfargs = ( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", \"GMT_IS_POINT\", \"GMT_IS_GRID\", #", "work... lib._parse_constant( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", valid=FAMILIES, valid_modifiers=VIAS ) # But this shouldn't.", "as lib: with lib.virtualfile_from_vectors(*data.T) as vfile: with GMTTempFile() as outfile:", "import GMTTempFile TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), \"data\") with clib.Session() as _lib:", "correct output with flipped x/y. \"\"\" data = np.diag(v=np.arange(3)) x", "in the value buffer. \"\"\" value.value = b\"bla\" return 0", "clib.Session() as lib: lib.call_module(\"figure\", \"{} -\".format(fig2._name)) with clib.Session() as lib:", "using them anyway. # This should work... lib._parse_constant( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", valid=FAMILIES,", "ses.session_pointer is not None ses.destroy() with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint:", "inc=[0.1, 0.2], ) # If the data pointer returned is", "column with string or object dtype into virtual file dataset.", "# columns, rows, layers, dtype ) # Dataset from matrices", "import ( GMTCLibError, GMTCLibNoSessionError, GMTInvalidInput, GMTVersionError, ) from pygmt.helpers import", "file that # we won't close later. with clib.Session() as", "uint32 uint64\".split() size = 10 for dtype in dtypes: x", "\"\"\" Check if dataarray_to_matrix returns correct output with flipped x.", "of Session without \"with\" so no session is created. lib", "y = np.logspace(2, 3, 4) grid = xr.DataArray(data, coords=[(\"y\", y),", "dtype=dtype), ) ) with clib.Session() as lib: with lib.virtualfile_from_vectors(data.x, data.y,", "1, dtype=np.int32) strings1 = np.array([\"a\", \"bc\", \"def\", \"ghij\", \"klmno\"], dtype=dtype)", "* 3, 1, dtype=dtype), ) ) with clib.Session() as lib:", "valid file # names. Use a mock instead just to", "coords=[(\"z\", z), (\"y\", y), (\"x\", x)]) with pytest.raises(GMTInvalidInput): dataarray_to_matrix(grid) def", "N = {}\\t{}\\n\".format(size, bounds) assert output == expected def test_extract_region_fails():", "it fails for > 2 dims. \"\"\" # Make a", "test_virtual_file(): \"\"\" Test passing in data via a virtual file", "{}\\t{}\\n\".format(rows, bounds) assert output == expected def test_virtualfile_from_vectors_pandas(): \"\"\" Pass", "as lib: with lib.virtualfile_from_vectors(x, y, strings) as vfile: with GMTTempFile()", "old version def mock_defaults(api, name, value): # pylint: disable=unused-argument \"\"\"", "[ \"SOME_random_STRING\", \"GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR\", \"GMT_IS_DATASET|NOT_A_PROPER_VIA\", \"NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX\", \"NOT_A_PROPER_FAMILY|ALSO_INVALID\", ] for test_case in", "= 13 for dtype in dtypes: data = pd.DataFrame( data=dict(", "coords=[(\"y\", y), (\"x\", x)]) with pytest.raises(GMTInvalidInput): dataarray_to_matrix(grid) def test_get_default(): \"\"\"", "can get correct constants from the C lib. \"\"\" ses", ") assert data_vector != data_matrix def test_create_data_grid_dim(): \"\"\" Create a", "good thing because libgmt usually only fails with errors). \"\"\"", "x=np.arange(size, dtype=dtype), y=np.arange(size, size * 2, 1, dtype=dtype), z=np.arange(size *", "import numpy as np import numpy.testing as npt import pandas", "np.arange(size, dtype=dtype) y = np.arange(size, size * 2, 1, dtype=dtype)", "dictionary assert ses.info for key in ses.info: assert ses.info[key] ==", "the last figure. fig1 = Figure() region1 = np.array([0, 10,", "not the last figure. fig1 = Figure() region1 = np.array([0,", "(\"x\", x)]) matrix, region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.flipud(data)) npt.assert_allclose(actual=region,", "with pytest.raises(GMTCLibError): ses.destroy() ses.destroy() def test_call_module(): \"\"\" Run a command", "the virtual file # Mock the opening to return 0", "# This should work... lib._parse_constant( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", valid=FAMILIES, valid_modifiers=VIAS ) #", "== \"11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338\" def test_call_module_invalid_arguments(): \"\"\"", "reasonable results. \"\"\" with clib.Session() as lib: assert lib.get_default(\"API_GRID_LAYOUT\") in", "def test_create_data_fails(): \"\"\" Check that create_data raises exceptions for invalid", "returns=0), mock( lib, \"GMT_Close_VirtualFile\", returns=1 ): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs):", "dataarray_to_matrix returns correct output with flipped y. \"\"\" data =", "k in zip(x, y, strings)) assert output == expected @pytest.mark.parametrize(\"dtype\",", "= 5 cols = 3 data = full_data[:rows, :cols] with", "= \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(min(i), max(i)) for i in (x, y, z)]", "= Figure() region1 = np.array([0, 10, -20, -10]) fig1.coast(region=region1, projection=\"M6i\",", "pylint: disable=pointless-statement def test_create_destroy_session(): \"\"\" Test that create and destroy", "\"\"\" Return our mock function. \"\"\" if name == func:", "bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(min(i), max(i)) for i in (x, y,", "lib: with lib.virtualfile_from_vectors(x, y, z) as vfile: with GMTTempFile() as", "ignoring range and inc. \"\"\" with clib.Session() as lib: #", "calling extract_region to make sure that it's # getting from", "fig1.coast(region=region1, projection=\"M6i\", frame=True, land=\"black\") fig2 = Figure() fig2.basemap(region=\"US.HI+r5\", projection=\"M6i\", frame=True)", "just write to stdout and spaces are valid file #", "with GMTTempFile() as outfile: lib.call_module(\"convert\", f\"{vfile} ->{outfile.name}\") output = outfile.read(keep_tabs=True)", "dtype=dtype) y = np.arange(size, size * 2, 1, dtype=dtype) z", "= {}\\t{}\\n\".format(size, bounds) assert output == expected def test_virtualfile_from_vectors_arraylike(): \"\"\"", "pytest.raises(GMTCLibNoSessionError): lib.session_pointer # pylint: disable=pointless-statement def test_parse_constant_single(): \"\"\" Parsing a", "as lib: lib.call_module(\"figure\", \"{} -\".format(fig2._name)) with clib.Session() as lib: wesn2", "np.arange(6) with clib.Session() as lib: with pytest.raises(GMTInvalidInput): with lib.virtualfile_from_vectors(x, y):", "col.max()) for col in data.T] ) expected = \"<matrix memory>:", "y, z) as vfile: with GMTTempFile() as outfile: lib.call_module(\"info\", \"{}", "Test transforming a slice of a larger array to virtual", "= np.diag(v=np.arange(3)) x = np.linspace(start=4, stop=0, num=3) y = np.linspace(start=5,", "as vfile: with GMTTempFile() as outfile: lib.call_module(\"info\", \"{} ->{}\".format(vfile, outfile.name))", "mock_get_libgmt_func(name, argtypes=None, restype=None): \"\"\" Return our mock function. \"\"\" if", "create a session before destroying the old one. ses.create(\"test1\") with", "0 lib = clib.Session() with mock(lib, \"GMT_Get_Default\", mock_func=mock_defaults): with pytest.raises(GMTVersionError):", "and gives reasonable results. \"\"\" with clib.Session() as lib: assert", "= clib.Session() with mock(ses, \"GMT_Create_Session\", returns=None): with pytest.raises(GMTCLibError): ses.create(\"test-session-name\") #", "# Grids from matrices using dim lib.create_data( family=\"GMT_IS_GRID|GMT_VIA_MATRIX\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\",", "def test_get_default_fails(): \"\"\" Make sure get_default raises an exception for", "from pygmt.clib.session import FAMILIES, VIAS from pygmt.exceptions import ( GMTCLibError,", "\"get_libgmt_func\", mock_get_libgmt_func) yield setattr(session, \"get_libgmt_func\", get_libgmt_func) def test_getitem(): \"\"\" Test", "invalid geometry with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family=\"GMT_IS_GRID\",", "lib.virtualfile_from_vectors(data.x, data.y, data.z) as vfile: with GMTTempFile() as outfile: lib.call_module(\"info\",", "with clib.Session() as lib: with mock(lib, \"GMT_Write_Data\", returns=1): with pytest.raises(GMTCLibError):", "Check that dataarray_to_matrix returns correct output with flipped x/y. \"\"\"", "test this if by giving a bad file name because", "b\"API_VERSION\": value.value = b\"5.4.3\" else: value.value = b\"bla\" return 0", "assert os.path.exists(out_fname.name) output = out_fname.read().strip() assert output == \"11.5309 61.7074", "Figure() with pytest.raises(GMTCLibError): with clib.Session() as lib: lib.extract_region() def test_extract_region_two_figures():", "pytest.raises(GMTCLibError): lib.call_module(\"meh\", \"\") def test_call_module_error_message(): \"\"\" Check is the GMT", "in VIAS) for family, via in test_cases: composite = \"|\".join([family,", "region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.fliplr(data)) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(),", "family=\"GMT_IS_GRID\", geometry=\"Not_a_valid_geometry\", mode=\"GMT_CONTAINER_ONLY\", dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0,", "pytest.raises(GMTInvalidInput): lib._parse_constant(test_case, valid=FAMILIES, valid_modifiers=VIAS) # Should also fail if not", "== expected def test_extract_region_fails(): \"\"\" Check that extract region fails", "get_default raises an exception for invalid names. \"\"\" with clib.Session()", "@contextmanager def mock(session, func, returns=None, mock_func=None): \"\"\" Mock a GMT", "region fails if nothing has been plotted. \"\"\" Figure() with", "fails for > 2 dims. \"\"\" # Make a 3D", "y.max()]) npt.assert_allclose(actual=inc, desired=[x[1] - x[0], y[1] - y[0]]) def test_dataarray_to_matrix_negative_x_increment():", "object dtype into virtual file dataset. \"\"\" size = 5", "correctly. \"\"\" lib = clib.Session() test_cases = ((family, via) for", "!= data_matrix def test_create_data_grid_dim(): \"\"\" Create a grid ignoring range", "\"\"\" Parsing a composite constant argument (separated by |) correctly.", "with lib.open_virtual_file(*vfargs): pass print(\"Shouldn't get to this code either\") def", "= xr.DataArray(data, coords=[(\"y\", y), (\"x\", x)]) matrix, region, inc =", "inc. \"\"\" with clib.Session() as lib: # Grids from matrices", "fails for arrays of different sizes. \"\"\" x = np.arange(5)", "of the code. with clib.Session() as lib: with mock(lib, \"GMT_Write_Data\",", "mock(lib, \"GMT_Get_Default\", mock_func=mock_defaults): with pytest.raises(GMTVersionError): with lib: assert lib.info[\"version\"] !=", "lib.call_module(\"gmtdefaults\", \"\") with pytest.raises(GMTCLibNoSessionError): lib.session_pointer # pylint: disable=pointless-statement def test_parse_constant_single():", "# Check if there are no errors or segfaults from", "num=3) y = np.linspace(start=9, stop=5, num=3) grid = xr.DataArray(data, coords=[(\"y\",", "in str(error) assert \"gmtinfo [ERROR]: Cannot find file bogus-data.bla\" in", "def test_virtual_file_fails(): \"\"\" Check that opening and closing virtual files", "Make sure the session is closed when the exception is", "for family in FAMILIES for via in VIAS) for family,", "Mock GMT_Get_Default to return an old version def mock_defaults(api, name,", "[str, object]) def test_virtualfile_from_vectors_two_string_or_object_columns(dtype): \"\"\" Test passing in two columns", "\"float32 float64 int32 int64 uint32 uint64\".split() size = 10 for", "import contextmanager import numpy as np import numpy.testing as npt", "int32 int64 uint32 uint64\".split() shape = (5, 3) for dtype", "lib.open_virtual_file(*vfargs) as vfile: with GMTTempFile() as outfile: lib.call_module(\"info\", \"{} ->{}\".format(vfile,", "y = np.arange(size, size * 2, 1, dtype=np.int32) strings1 =", "# getting from the current figure, not the last figure.", "lib: lib.call_module(\"figure\", \"{} -\".format(fig2._name)) with clib.Session() as lib: wesn2 =", "lib: lib.create_data( family=\"GMT_IS_DATASET\", geometry=\"GMT_IS_SURFACE\", mode=\"Not_a_valid_mode\", dim=[0, 0, 1, 0], ranges=[150.0,", "dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, z) as", "== expected def test_virtualfile_from_matrix_slice(): \"\"\" Test transforming a slice of", "lib.put_matrix(dataset, matrix=data) # Add the dataset to a virtual file", "an exception if GMT is too old. \"\"\" # Mock", "session2 = clib.Session() session2.create(name=\"test_session2\") assert session2.session_pointer is not None assert", "int32 int64 uint32 uint64\".split() shape = (10, 6) for dtype", "that write data raises an exception for non-zero return codes.", "N = {}\\t{}\\n\".format(shape[0], bounds) assert output == expected def test_virtual_file_fails():", "uint64\".split() shape = (5, 3) for dtype in dtypes: with", "2, size * 3, 1) with clib.Session() as lib: with", "with clib.Session() as _lib: gmt_version = Version(_lib.info[\"version\"]) @contextmanager def mock(session,", "lib: try: lib.call_module(\"info\", \"bogus-data.bla\") except GMTCLibError as error: assert \"Module", "with clib.Session() as lib: lib.create_data( family=\"GMT_IS_DATASET\", geometry=\"GMT_IS_SURFACE\", mode=\"Not_a_valid_mode\", dim=[0, 0,", "Use a mock instead just to exercise this part of", "as lib: with pytest.raises(GMTCLibError): lib.call_module(\"meh\", \"\") def test_call_module_error_message(): \"\"\" Check", "via a virtual file with a Dataset. \"\"\" dtypes =", "lib.virtualfile_from_vectors(x, y, strings1, strings2) as vfile: with GMTTempFile() as outfile:", "valid=FAMILIES, valid_modifiers=VIAS) # Should also fail if not given valid", "\"<vector memory>: N = {}\\t{}\\n\".format(size, bounds) assert output == expected", "frame=True, land=\"black\") fig2 = Figure() fig2.basemap(region=\"US.HI+r5\", projection=\"M6i\", frame=True) # Activate", "Return our mock function. \"\"\" if name == func: return", "mock(ses, \"GMT_Get_Default\", mock_func=mock_defaults): # Check for an empty dictionary assert", "strings2) as vfile: with GMTTempFile() as outfile: lib.call_module(\"convert\", f\"{vfile} ->{outfile.name}\")", "pytest.raises(GMTInvalidInput): dataarray_to_matrix(grid) def test_dataarray_to_matrix_inc_fails(): \"\"\" Check that it fails for", "command to see if call_module works. \"\"\" data_fname = os.path.join(TEST_DATA_DIR,", "spaces are valid file # names. Use a mock instead", "as xr from packaging.version import Version from pygmt import Figure,", "expected = \"\".join(f\"{i}\\t{j}\\t{k}\\n\" for i, j, k in zip(x, y,", "1))) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]),", "invalid input and output. \"\"\" # Passing in invalid mode", "== expected def test_virtualfile_from_vectors_transpose(): \"\"\" Test transforming matrix columns to", "try: lib.call_module(\"info\", \"bogus-data.bla\") except GMTCLibError as error: assert \"Module 'info'", "virtual file dataset. \"\"\" dtypes = \"float32 float64 int32 int64", "dtype=dtype).reshape(shape) with clib.Session() as lib: with lib.virtualfile_from_matrix(data) as vfile: with", "|) correctly. \"\"\" lib = clib.Session() test_cases = ((family, via)", "restype) setattr(session, \"get_libgmt_func\", mock_get_libgmt_func) yield setattr(session, \"get_libgmt_func\", get_libgmt_func) def test_getitem():", "a command to see if call_module works. \"\"\" data_fname =", "transforming a matrix to virtual file dataset. \"\"\" dtypes =", "without causing a Segmentation # Fault. Can't test this if", "dtype in dtypes: data = pd.DataFrame( data=dict( x=np.arange(size, dtype=dtype), y=np.arange(size,", ") # Mock Open_VirtualFile to test the status check when", "with clib.Session() as lib: lib.call_module(\"figure\", \"{} -\".format(fig1._name)) with clib.Session() as", "\"NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX\", \"NOT_A_PROPER_FAMILY|ALSO_INVALID\", ] for test_case in test_cases: with pytest.raises(GMTInvalidInput): lib._parse_constant(test_case,", "matrix, region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.flipud(data)) npt.assert_allclose(actual=region, desired=[x.min(), x.max(),", ">= Version(\"6.2.0\") def test_get_default_fails(): \"\"\" Make sure get_default raises an", "family=\"GMT_IS_GRID|GMT_VIA_MATRIX\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1, 0], ) def test_create_data_grid_range():", "Check that create_data raises exceptions for invalid input and output.", "(family, geometry, \"GMT_IN|GMT_IS_REFERENCE\", dataset) with lib.open_virtual_file(*vfargs) as vfile: with GMTTempFile()", "the function fails for arrays of different sizes. \"\"\" x", "abs(y[1] - y[0])]) def test_dataarray_to_matrix_negative_x_and_y_increment(): \"\"\" Check that dataarray_to_matrix returns", "Make sure get_default works without crashing and gives reasonable results.", "bounds) assert output == expected def test_extract_region_fails(): \"\"\" Check that", "xarray as xr from packaging.version import Version from pygmt import", "given bad input. \"\"\" lib = clib.Session() test_cases = [", "dataarray_to_matrix returns correct output with flipped x. \"\"\" data =", "np.linspace(start=9, stop=5, num=3) grid = xr.DataArray(data, coords=[(\"y\", y), (\"x\", x)])", "GMT_Get_Default to return always the same string def mock_defaults(api, name,", "= b\"5.4.3\" else: value.value = b\"bla\" return 0 lib =", "lib._parse_constant( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", valid=FAMILIES, valid_modifiers=VIAS ) # But this shouldn't. with", "object]) def test_virtualfile_from_vectors_one_string_or_object_column(dtype): \"\"\" Test passing in one column with", "expected def test_virtualfile_from_vectors_arraylike(): \"\"\" Pass array-like vectors to a dataset.", "y = np.linspace(start=5, stop=9, num=3) grid = xr.DataArray(data, coords=[(\"y\", y),", "max(i)) for i in (x, y, z)] ) expected =", "Version(_lib.info[\"version\"]) @contextmanager def mock(session, func, returns=None, mock_func=None): \"\"\" Mock a", "geometry = \"GMT_IS_POINT\" dataset = lib.create_data( family=family, geometry=geometry, mode=\"GMT_CONTAINER_ONLY\", dim=[shape[1],", "\"\"\" Make sure the clib.Session raises an exception if GMT", "\"\"\" Extract region should handle multiple figures existing at the", "exception if GMT is too old. \"\"\" # Mock GMT_Get_Default", "= dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=data) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc,", "GMT is too old. \"\"\" # Mock GMT_Get_Default to return", "np.arange(size, size * 2, 1, dtype=dtype) z = np.arange(size *", "when not in a session. \"\"\" # Create an instance", "in dtypes: data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) with clib.Session()", "= (10, 6) for dtype in dtypes: full_data = np.arange(shape[0]", "of dim. \"\"\" with clib.Session() as lib: # Grids from", "npt.assert_allclose(actual=matrix, desired=np.fliplr(data)) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc, desired=[abs(x[1] -", "2, 1, dtype=np.int32) strings = np.array([\"a\", \"bc\", \"defg\", \"hijklmn\", \"opqrst\"],", "def test_dataarray_to_matrix_inc_fails(): \"\"\" Check that it fails for variable increments.", "so no session is created. lib = clib.Session() with pytest.raises(GMTCLibNoSessionError):", "\"uvwx\", \"yz!\", \"@#\", \"$\"], dtype=dtype) with clib.Session() as lib: with", "lib: # Grids from matrices using range and int lib.create_data(", "not get to this code\") # Test the status check", "\"\"\" # It's hard to make the C API function", "are not pointing to the same memory session1 = clib.Session()", "== func: return mock_func return get_libgmt_func(name, argtypes, restype) setattr(session, \"get_libgmt_func\",", "pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement def test_create_session_fails(): \"\"\" Check that", "= np.diag(v=np.arange(3)) x = np.linspace(start=4, stop=0, num=3) y = np.linspace(start=9,", "[\"<{:.0f}/{:.0f}>\".format(i.min(), i.max()) for i in (x, y, z)] ) expected", "file name because if # output=='', GMT will just write", "clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, z) as vfile: with", "a slice of a larger array to virtual file dataset.", "Use in a different session to avoid any memory problems.", "\"{} ->{}\".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(i.min(),", "it always return a given value. Used to test that", "strings1 = np.array([\"a\", \"bc\", \"def\", \"ghij\", \"klmno\"], dtype=dtype) strings2 =", "inc=[0.1, 0.2], ) # Passing in invalid geometry with pytest.raises(GMTInvalidInput):", "avoid any memory problems. with clib.Session() as lib: lib.call_module(\"figure\", \"{}", "y.max()]) npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])]) def test_dataarray_to_matrix_negative_y_increment():", "num=3) y = np.linspace(start=5, stop=9, num=3) grid = xr.DataArray(data, coords=[(\"y\",", "250.0, -20.0, 20.0], inc=[0.1, 0.2], ) # If the data", "[\"rows\", \"columns\"] assert int(lib.get_default(\"API_CORES\")) >= 1 assert Version(lib.get_default(\"API_VERSION\")) >= Version(\"6.2.0\")", "with clib.Session() as lib: lib.create_data( family=\"GMT_IS_GRID\", geometry=\"Not_a_valid_geometry\", mode=\"GMT_CONTAINER_ONLY\", dim=[0, 0,", "13 x = list(range(0, size, 1)) y = tuple(range(size, size", "pygmt import Figure, clib from pygmt.clib.conversion import dataarray_to_matrix from pygmt.clib.session", "# Test the status check when closing the virtual file", "transforming matrix columns to virtual file dataset. \"\"\" dtypes =", "\"\"\" Test the function fails for arrays of different sizes.", "not None ses.destroy() with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement def", "value.value = b\"bla\" return 0 lib = clib.Session() with mock(lib,", "clib.Session() as lib: # Dataset from vectors data_vector = lib.create_data(", "= Figure() fig2.basemap(region=\"US.HI+r5\", projection=\"M6i\", frame=True) # Activate the first figure", "if call_module works. \"\"\" data_fname = os.path.join(TEST_DATA_DIR, \"points.txt\") out_fname =", "0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) # If", "flipped x. \"\"\" data = np.diag(v=np.arange(3)) x = np.linspace(start=4, stop=0,", "non- zero return codes. \"\"\" vfargs = ( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", \"GMT_IS_POINT\",", "= outfile.read(keep_tabs=True) bounds = \"\\t\".join( [ \"<{:.0f}/{:.0f}>\".format(i.min(), i.max()) for i", "lib[family] def test_parse_constant_composite(): \"\"\" Parsing a composite constant argument (separated", "int64 uint32 uint64\".split() size = 10 for dtype in dtypes:", "pd import pytest import xarray as xr from packaging.version import", "Dataset. \"\"\" dtypes = \"float32 float64 int32 int64 uint32 uint64\".split()", "when entering the context. # If the exception is raised,", "with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module(\"meh\", \"\") def test_call_module_error_message():", "dtype=np.int32) strings = np.array([\"a\", \"bc\", \"defg\", \"hijklmn\", \"opqrst\"], dtype=dtype) with", "figures existing at the same time. \"\"\" # Make two", "= np.arange(size, size * 2, 1, dtype=np.int32) strings = np.array([\"a\",", "strings2 = np.array([\"pqrst\", \"uvwx\", \"yz!\", \"@#\", \"$\"], dtype=dtype) with clib.Session()", "(\"x\", x)]) with pytest.raises(GMTInvalidInput): dataarray_to_matrix(grid) def test_dataarray_to_matrix_inc_fails(): \"\"\" Check that", "create and destroy session are called without errors. \"\"\" #", "a composite constant argument (separated by |) correctly. \"\"\" lib", "with clib.Session() as lib, mock(lib, \"GMT_Open_VirtualFile\", returns=1): with pytest.raises(GMTCLibError): with", "But this shouldn't. with pytest.raises(GMTInvalidInput): lib._parse_constant( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", valid=FAMILIES, valid_modifiers=None )", "0], # columns, rows, layers, dtype ) # Dataset from", "\"\"\" Make sure the clib.Session.info dict is working. \"\"\" #", "mode=\"GMT_CONTAINER_ONLY\", dim=[11, 10, 2, 0], ) def test_virtual_file(): \"\"\" Test", "mock instead just to exercise this part of the code.", "# Fault. Can't test this if by giving a bad", "as lib: # Grids from matrices using dim lib.create_data( family=\"GMT_IS_GRID|GMT_VIA_MATRIX\",", "raises an exception if GMT is too old. \"\"\" #", "bounds) assert output == expected @pytest.mark.parametrize(\"dtype\", [str, object]) def test_virtualfile_from_vectors_one_string_or_object_column(dtype):", "20.0], inc=[0.1, 0.2], ) def test_create_data_fails(): \"\"\" Check that create_data", "grid = xr.DataArray(data, coords=[(\"y\", y), (\"x\", x)]) with pytest.raises(GMTInvalidInput): dataarray_to_matrix(grid)", "with pytest.raises(GMTInvalidInput): with lib.virtualfile_from_vectors(x, y): print(\"This should have failed\") def", "and output. \"\"\" # Passing in invalid mode with pytest.raises(GMTInvalidInput):", "pygmt.clib.session import FAMILIES, VIAS from pygmt.exceptions import ( GMTCLibError, GMTCLibNoSessionError,", "# Create two session and make sure they are not", "expected @pytest.mark.parametrize(\"dtype\", [str, object]) def test_virtualfile_from_vectors_one_string_or_object_column(dtype): \"\"\" Test passing in", "output. \"\"\" data = np.diag(v=np.arange(3)) x = np.linspace(start=0, stop=4, num=3)", "outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(min(i), max(i)) for i in (x,", "\"\"\" # Make two figures before calling extract_region to make", "lib = clib.Session() with mock(lib, \"GMT_Get_Default\", mock_func=mock_defaults): with pytest.raises(GMTVersionError): with", "\"\"\" Fails when not in a session. \"\"\" # Create", "expected @pytest.mark.parametrize(\"dtype\", [str, object]) def test_virtualfile_from_vectors_two_string_or_object_columns(dtype): \"\"\" Test passing in", "uint32 uint64\".split() size = 13 for dtype in dtypes: data", "\"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(i.min(), i.max()) for i in (x, y, z)] )", "extract the region from it # Use in a different", "as lib: family = \"GMT_IS_DATASET|GMT_VIA_MATRIX\" geometry = \"GMT_IS_POINT\" dataset =", "- y[0])]) def test_dataarray_to_matrix_dims_fails(): \"\"\" Check that it fails for", "GMT error message was captured. \"\"\" with clib.Session() as lib:", "[1] * 6, \"some-file-name\", None, ) def test_dataarray_to_matrix_works(): \"\"\" Check", "output == expected @pytest.mark.parametrize(\"dtype\", [str, object]) def test_virtualfile_from_vectors_one_string_or_object_column(dtype): \"\"\" Test", "memory>: N = {}\\t{}\\n\".format(size, bounds) assert output == expected def", "to a dataset using pandas Series. \"\"\" dtypes = \"float32", "[\"{:.0f}\\t{:.0f}\".format(col.min(), col.max()) for col in data.T] ) expected = \"{}\\n\".format(bounds)", "as lib: wesn2 = lib.extract_region() npt.assert_allclose(wesn2, np.array([-165.0, -150.0, 15.0, 25.0]))", "test_getitem(): \"\"\" Test that I can get correct constants from", "with pytest.raises(GMTInvalidInput): with lib.open_virtual_file(*vfargs): print(\"This should have failed\") def test_virtualfile_from_vectors():", "without \"with\" so no session is created. lib = clib.Session()", "\"\"\" with clib.Session() as lib: vfargs = ( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", \"GMT_IS_POINT\",", "desired=np.fliplr(data)) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]),", "test_virtualfile_from_vectors_one_string_or_object_column(dtype): \"\"\" Test passing in one column with string or", "that I can get correct constants from the C lib.", "import xarray as xr from packaging.version import Version from pygmt", "correct output with flipped y. \"\"\" data = np.diag(v=np.arange(3)) x", ") from pygmt.helpers import GMTTempFile TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), \"data\") with", "int lib.create_data( family=\"GMT_IS_GRID|GMT_VIA_MATRIX\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1,", "mock_func=mock_defaults): # Check for an empty dictionary assert ses.info for", "dtype=dtype).reshape(shape) rows = 5 cols = 3 data = full_data[:rows,", "assert output == expected def test_virtualfile_from_vectors_pandas(): \"\"\" Pass vectors to", "Pass vectors to a dataset using pandas Series. \"\"\" dtypes", "stdout and spaces are valid file # names. Use a", "* 2, 1, dtype=np.int32) strings = np.array([\"a\", \"bc\", \"defg\", \"hijklmn\",", "= lib[family] + lib[via] parsed = lib._parse_constant(composite, valid=FAMILIES, valid_modifiers=VIAS) assert", "x. \"\"\" data = np.diag(v=np.arange(3)) x = np.linspace(start=4, stop=0, num=3)", "= lib.extract_region() npt.assert_allclose(wesn2, np.array([-165.0, -150.0, 15.0, 25.0])) def test_write_data_fails(): \"\"\"", "\"\"\" with clib.Session() as lib: # Grids from matrices using", "file dataset. \"\"\" size = 5 x = np.arange(size, dtype=np.int32)", "and make sure they are not pointing to the same", "restype=None): \"\"\" Return our mock function. \"\"\" if name ==", "\"{} ->{}\".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = \"\\t\".join( [", "x = np.linspace(start=4, stop=0, num=3) y = np.linspace(start=9, stop=5, num=3)", "results. \"\"\" with clib.Session() as lib: assert lib.get_default(\"API_GRID_LAYOUT\") in [\"rows\",", "= \"float32 float64 int32 int64 uint32 uint64\".split() size = 10", "# Create and destroy a session twice ses = clib.Session()", "don't open a file that # we won't close later.", "with lib.virtualfile_from_vectors(data.x, data.y, data.z) as vfile: with GMTTempFile() as outfile:", "1)) y = tuple(range(size, size * 2, 1)) z =", "\"GMT_Get_Default\", mock_func=mock_defaults): with pytest.raises(GMTVersionError): with lib: assert lib.info[\"version\"] != \"5.4.3\"", "if by giving a bad file name because if #", "inc instead of dim. \"\"\" with clib.Session() as lib: #", "= \"float32 float64 int32 int64 uint32 uint64\".split() shape = (5,", "get_libgmt_func(name, argtypes, restype) setattr(session, \"get_libgmt_func\", mock_get_libgmt_func) yield setattr(session, \"get_libgmt_func\", get_libgmt_func)", "that create and destroy session are called without errors. \"\"\"", "x[0]), abs(y[1] - y[0])]) def test_dataarray_to_matrix_dims_fails(): \"\"\" Check that it", "session1.session_pointer session1.destroy() session2.destroy() # Create and destroy a session twice", "= \"test_call_module.txt\" with clib.Session() as lib: with GMTTempFile() as out_fname:", "fail without causing a Segmentation # Fault. Can't test this", "or object dtype into virtual file dataset. \"\"\" size =", ") # But this shouldn't. with pytest.raises(GMTInvalidInput): lib._parse_constant( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", valid=FAMILIES,", "1)) z = range(size * 2, size * 3, 1)", "i.max()) for i in (data.x, data.y, data.z) ] ) expected", "test_info_dict(): \"\"\" Make sure the clib.Session.info dict is working. \"\"\"", "in a different session to avoid any memory problems. with", "= np.array([\"a\", \"bc\", \"def\", \"ghij\", \"klmno\"], dtype=dtype) strings2 = np.array([\"pqrst\",", "\"\"\" # Check if there are no errors or segfaults", "output == expected def test_extract_region_fails(): \"\"\" Check that extract region", "\"\"\" import os from contextlib import contextmanager import numpy as", "np.array([\"a\", \"bc\", \"defg\", \"hijklmn\", \"opqrst\"], dtype=dtype) with clib.Session() as lib:", "string def mock_defaults(api, name, value): # pylint: disable=unused-argument \"\"\" Put", "is None (NULL pointer) with pytest.raises(GMTCLibError): with clib.Session() as lib:", "def mock_api_function(*args): # pylint: disable=unused-argument \"\"\" A mock GMT API", ") def test_create_data_grid_range(): \"\"\" Create a grid specifying range and", "print(\"Should not get to this code\") # Test the status", "two figures before calling extract_region to make sure that it's", "6, \"some-file-name\", None, ) def test_dataarray_to_matrix_works(): \"\"\" Check that dataarray_to_matrix", "are no errors or segfaults from getting all of the", "11), dtype=\"float32\") x = np.arange(11) y = np.arange(12) z =", "x = list(range(0, size, 1)) y = tuple(range(size, size *", "1, 0], ) assert data_vector != data_matrix def test_create_data_grid_dim(): \"\"\"", "assert output == expected def test_virtual_file_fails(): \"\"\" Check that opening", "uint32 uint64\".split() shape = (10, 6) for dtype in dtypes:", "clib.Session() with mock(ses, \"GMT_Create_Session\", returns=None): with pytest.raises(GMTCLibError): ses.create(\"test-session-name\") # Should", "works. \"\"\" data_fname = os.path.join(TEST_DATA_DIR, \"points.txt\") out_fname = \"test_call_module.txt\" with", ") with pytest.raises(GMTInvalidInput): with lib.open_virtual_file(*vfargs): print(\"This should have failed\") def", "to get some API functions to fail without inducing a", "\"\"\" # Create an instance of Session without \"with\" so", "with pytest.raises(GMTCLibNoSessionError): lib.call_module(\"gmtdefaults\", \"\") with pytest.raises(GMTCLibNoSessionError): lib.session_pointer # pylint: disable=pointless-statement", "\"GMT_Open_VirtualFile\", returns=0), mock( lib, \"GMT_Close_VirtualFile\", returns=1 ): with pytest.raises(GMTCLibError): with", "= np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) with clib.Session() as lib: with", "for col in data.T] ) expected = \"<matrix memory>: N", "slice of a larger array to virtual file dataset. \"\"\"", "inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=data) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])", "test_extract_region_two_figures(): \"\"\" Extract region should handle multiple figures existing at", "clib.Session() as lib: with mock(lib, \"GMT_Write_Data\", returns=1): with pytest.raises(GMTCLibError): lib.write_data(", "= \"\".join(f\"{i}\\t{j}\\t{k}\\n\" for i, j, k in zip(x, y, strings))", "@pytest.mark.parametrize(\"dtype\", [str, object]) def test_virtualfile_from_vectors_one_string_or_object_column(dtype): \"\"\" Test passing in one", "pytest.raises(GMTCLibError): with clib.Session() as lib: with mock(lib, \"GMT_Create_Data\", returns=None): lib.create_data(", "Grids from matrices using range and int lib.create_data( family=\"GMT_IS_GRID|GMT_VIA_MATRIX\", geometry=\"GMT_IS_SURFACE\",", "== expected def test_parse_constant_fails(): \"\"\" Check if the function fails", "this code either\") def test_virtual_file_bad_direction(): \"\"\" Test passing an invalid", "plotted. \"\"\" Figure() with pytest.raises(GMTCLibError): with clib.Session() as lib: lib.extract_region()", "and inc instead of dim. \"\"\" with clib.Session() as lib:", "constant argument (separated by |) correctly. \"\"\" lib = clib.Session()", "no session is created. lib = clib.Session() with pytest.raises(GMTCLibNoSessionError): lib.call_module(\"gmtdefaults\",", "int64 uint32 uint64\".split() shape = (5, 3) for dtype in", "Passing in invalid mode with pytest.raises(GMTInvalidInput): with clib.Session() as lib:", "np.linspace(start=0, stop=4, num=3) y = np.linspace(start=5, stop=9, num=3) grid =", "func: return mock_func return get_libgmt_func(name, argtypes, restype) setattr(session, \"get_libgmt_func\", mock_get_libgmt_func)", "instance of Session without \"with\" so no session is created.", "lib.virtualfile_from_matrix(data) as vfile: with GMTTempFile() as outfile: lib.call_module(\"info\", \"{} ->{}\".format(vfile,", "and extract the region from it # Use in a", "The invalid direction argument 0, ) with pytest.raises(GMTInvalidInput): with lib.open_virtual_file(*vfargs):", "increments. \"\"\" data = np.ones((4, 5), dtype=\"float64\") x = np.linspace(0,", "lib: # Dataset from vectors data_vector = lib.create_data( family=\"GMT_IS_DATASET|GMT_VIA_VECTOR\", geometry=\"GMT_IS_POINT\",", "\"\"\" ses = clib.Session() with pytest.raises(GMTCLibNoSessionError): ses.destroy() ses.create(\"test-session\") with mock(ses,", "Session without \"with\" so no session is created. lib =", "np.array([0, 10, -20, -10]) fig1.coast(region=region1, projection=\"M6i\", frame=True, land=\"black\") fig2 =", "vfargs = (family, geometry, \"GMT_IN|GMT_IS_REFERENCE\", dataset) with lib.open_virtual_file(*vfargs) as vfile:", "# properties. with clib.Session() as lib: assert lib.info # Mock", "gmt info vfargs = (family, geometry, \"GMT_IN|GMT_IS_REFERENCE\", dataset) with lib.open_virtual_file(*vfargs)", "assert ses[\"GMT_SESSION_EXTERNAL\"] != -99999 assert ses[\"GMT_MODULE_CMD\"] != -99999 assert ses[\"GMT_PAD_DEFAULT\"]", "disable=unused-argument \"\"\" A mock GMT API function that always returns", "clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, strings) as vfile: with", "with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, strings) as vfile:", "import Version from pygmt import Figure, clib from pygmt.clib.conversion import", "Test the wrappers for the C API. \"\"\" import os", "file and pass it along to gmt info vfargs =", "np.linspace(start=5, stop=9, num=3) grid = xr.DataArray(data, coords=[(\"y\", y), (\"x\", x)])", "given bad input. \"\"\" with clib.Session() as lib: with pytest.raises(GMTCLibError):", "np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) rows = 5 cols = 3", "- x[0]), abs(y[1] - y[0])]) def test_dataarray_to_matrix_negative_x_and_y_increment(): \"\"\" Check that", "file with a Dataset. \"\"\" dtypes = \"float32 float64 int32", "y): print(\"This should have failed\") def test_virtualfile_from_matrix(): \"\"\" Test transforming", "direction argument 0, ) with pytest.raises(GMTInvalidInput): with lib.open_virtual_file(*vfargs): print(\"This should", "session2.session_pointer is not None assert session2.session_pointer != session1.session_pointer session1.destroy() session2.destroy()", "def test_virtualfile_from_matrix_slice(): \"\"\" Test transforming a slice of a larger", "virtual file with a Dataset. \"\"\" dtypes = \"float32 float64", "clib.Session() as lib, mock(lib, \"GMT_Open_VirtualFile\", returns=1): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs):", "@pytest.mark.parametrize(\"dtype\", [str, object]) def test_virtualfile_from_vectors_two_string_or_object_columns(dtype): \"\"\" Test passing in two", "clib.Session() ses.create(\"test-session\") with mock(ses, \"GMT_Get_Default\", mock_func=mock_defaults): # Check for an", "clib.Session() as lib: wesn1 = lib.extract_region() npt.assert_allclose(wesn1, region1) # Now", "lib.get_default(\"API_GRID_LAYOUT\") in [\"rows\", \"columns\"] assert int(lib.get_default(\"API_CORES\")) >= 1 assert Version(lib.get_default(\"API_VERSION\"))", "as output or non-zero status codes. Needed because it's not", "Run a command to see if call_module works. \"\"\" data_fname", "is working. \"\"\" # Check if there are no errors", "dtype=\"float64\") x = np.linspace(0, 1, 5) y = np.logspace(2, 3,", "i, j, k in zip(x, y, strings1, strings2) ) assert", "strings = np.array([\"a\", \"bc\", \"defg\", \"hijklmn\", \"opqrst\"], dtype=dtype) with clib.Session()", "# output=='', GMT will just write to stdout and spaces", "\"\"\" Put 'bla' in the value buffer. \"\"\" value.value =", "as lib: with pytest.raises(GMTInvalidInput): with lib.virtualfile_from_vectors(x, y): print(\"This should have", "with lib.virtualfile_from_vectors(x, y, strings) as vfile: with GMTTempFile() as outfile:", "Fault. Can't test this if by giving a bad file", "GMTInvalidInput, GMTVersionError, ) from pygmt.helpers import GMTTempFile TEST_DATA_DIR = os.path.join(os.path.dirname(__file__),", "will just write to stdout and spaces are valid file", "figures before calling extract_region to make sure that it's #", "lib.virtualfile_from_vectors(x, y, strings) as vfile: with GMTTempFile() as outfile: lib.call_module(\"convert\",", "invalid direction argument. \"\"\" with clib.Session() as lib: vfargs =", "exception for invalid names. \"\"\" with clib.Session() as lib: with", "(separated by |) correctly. \"\"\" lib = clib.Session() test_cases =", "family, via in test_cases: composite = \"|\".join([family, via]) expected =", "int32 int64 uint32 uint64\".split() shape = (7, 5) for dtype", "= clib.Session() test_cases = [ \"SOME_random_STRING\", \"GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR\", \"GMT_IS_DATASET|NOT_A_PROPER_VIA\", \"NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX\", \"NOT_A_PROPER_FAMILY|ALSO_INVALID\",", "= {}\\t{}\\n\".format(shape[0], bounds) assert output == expected def test_virtual_file_fails(): \"\"\"", "returns=1): with pytest.raises(GMTCLibError): lib.write_data( \"GMT_IS_VECTOR\", \"GMT_IS_POINT\", \"GMT_WRITE_SET\", [1] * 6,", "clib.Session() as lib: with lib.virtualfile_from_vectors(*data.T) as vfile: with GMTTempFile() as", "with clib.Session() as lib: try: lib.call_module(\"info\", \"bogus-data.bla\") except GMTCLibError as", "1, dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, z)", "lib.create_data( family=\"GMT_IS_DATASET|GMT_VIA_MATRIX\", geometry=\"GMT_IS_POINT\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1, 0], ) assert", "* 3, 1) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y,", "y = np.arange(12) z = np.arange(10) grid = xr.DataArray(data, coords=[(\"z\",", "): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): pass print(\"Shouldn't get to this", "to make sure it doesn't fail badly. \"\"\" with clib.Session()", "clib.Session() as lib: with GMTTempFile() as out_fname: lib.call_module(\"info\", \"{} -C", "for dtype in dtypes: data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)", "-99999 assert ses[\"GMT_DOUBLE\"] != -99999 with pytest.raises(GMTCLibError): ses[\"A_WHOLE_LOT_OF_JUNK\"] # pylint:", "(\"x\", x)]) with pytest.raises(GMTInvalidInput): dataarray_to_matrix(grid) def test_get_default(): \"\"\" Make sure", "dtype=np.int32) y = np.arange(size, size * 2, 1, dtype=np.int32) strings", "C lib. \"\"\" ses = clib.Session() assert ses[\"GMT_SESSION_EXTERNAL\"] != -99999", "Create two session and make sure they are not pointing", "float64 int32 int64 uint32 uint64\".split() shape = (10, 6) for", "session1.session_pointer is not None session2 = clib.Session() session2.create(name=\"test_session2\") assert session2.session_pointer", "of a larger array to virtual file dataset. \"\"\" dtypes", "mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1, 0], ) assert data_vector != data_matrix", "\"bc\", \"def\", \"ghij\", \"klmno\"], dtype=dtype) strings2 = np.array([\"pqrst\", \"uvwx\", \"yz!\",", "outfile.read(keep_tabs=True) expected = \"\".join( f\"{h}\\t{i}\\t{j} {k}\\n\" for h, i, j,", "test_dataarray_to_matrix_negative_x_and_y_increment(): \"\"\" Check that dataarray_to_matrix returns correct output with flipped", "= np.arange(size, size * 2, 1, dtype=np.int32) strings1 = np.array([\"a\",", "ses.info for key in ses.info: assert ses.info[key] == \"bla\" ses.destroy()", "= ( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", \"GMT_IS_POINT\", \"GMT_IN|GMT_IS_REFERENCE\", None, ) # Mock Open_VirtualFile", "\"\"\" Fails when given bad input. \"\"\" with clib.Session() as", "a grid ignoring range and inc. \"\"\" with clib.Session() as", "* 2, size * 3, 1, dtype=dtype) with clib.Session() as", "\"\"\" with clib.Session() as lib: assert lib.get_default(\"API_GRID_LAYOUT\") in [\"rows\", \"columns\"]", "def test_create_destroy_session(): \"\"\" Test that create and destroy session are", "argument correctly. \"\"\" lib = clib.Session() for family in FAMILIES:", "y), (\"x\", x)]) with pytest.raises(GMTInvalidInput): dataarray_to_matrix(grid) def test_dataarray_to_matrix_inc_fails(): \"\"\" Check", "Mock GMT_Get_Default to return always the same string def mock_defaults(api,", "disable=pointless-statement def test_parse_constant_single(): \"\"\" Parsing a single family argument correctly.", "name == b\"API_VERSION\": value.value = b\"5.4.3\" else: value.value = b\"bla\"", "and closing virtual files raises an exception for non- zero", "trying to create a session before destroying the old one.", "ses.create(\"test1\") with pytest.raises(GMTCLibError): ses.create(\"test2\") def test_destroy_session_fails(): \"\"\" Fail to destroy", "a 3D regular grid data = np.ones((10, 12, 11), dtype=\"float32\")", "two session and make sure they are not pointing to", "j, k in zip(x, y, strings)) assert output == expected", "= mock_api_function get_libgmt_func = session.get_libgmt_func def mock_get_libgmt_func(name, argtypes=None, restype=None): \"\"\"", "0, ) with pytest.raises(GMTInvalidInput): with lib.open_virtual_file(*vfargs): print(\"This should have failed\")", "output = outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(i.min(), i.max()) for i", ") def test_create_data_fails(): \"\"\" Check that create_data raises exceptions for", "\"\"\" with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.get_default(\"NOT_A_VALID_NAME\") def test_info_dict():", "def test_get_default(): \"\"\" Make sure get_default works without crashing and", "\"\"\" # Create two session and make sure they are", "as outfile: lib.call_module(\"convert\", f\"{vfile} ->{outfile.name}\") output = outfile.read(keep_tabs=True) expected =", "one column with string or object dtype into virtual file", "Check that write data raises an exception for non-zero return", "without inducing a Segmentation Fault (which is a good thing", "i in (x, y, z)] ) expected = \"<vector memory>:", "vfargs = ( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", \"GMT_IS_POINT\", \"GMT_IS_GRID\", # The invalid direction", "= 13 x = list(range(0, size, 1)) y = tuple(range(size,", "with pytest.raises(GMTCLibError): with clib.Session() as lib: lib.extract_region() def test_extract_region_two_figures(): \"\"\"", "\"GMT_Create_Session\", returns=None): with pytest.raises(GMTCLibError): ses.create(\"test-session-name\") # Should fail if trying", "assert output == expected @pytest.mark.parametrize(\"dtype\", [str, object]) def test_virtualfile_from_vectors_two_string_or_object_columns(dtype): \"\"\"", "os.path.join(os.path.dirname(__file__), \"data\") with clib.Session() as _lib: gmt_version = Version(_lib.info[\"version\"]) @contextmanager", "lib: lib.create_data( family=\"GMT_IS_GRID\", geometry=\"Not_a_valid_geometry\", mode=\"GMT_CONTAINER_ONLY\", dim=[0, 0, 1, 0], ranges=[150.0,", "and destroy a session twice ses = clib.Session() for __", "ses[\"GMT_PAD_DEFAULT\"] != -99999 assert ses[\"GMT_DOUBLE\"] != -99999 with pytest.raises(GMTCLibError): ses[\"A_WHOLE_LOT_OF_JUNK\"]", "is a good thing because libgmt usually only fails with", "not None session2 = clib.Session() session2.create(name=\"test_session2\") assert session2.session_pointer is not", "assert parsed == expected def test_parse_constant_fails(): \"\"\" Check if the", "Make two figures before calling extract_region to make sure that", "lib.create_data( family=\"GMT_IS_DATASET\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", dim=[11, 10, 2, 0], ) def", "Check that it fails for > 2 dims. \"\"\" #", "= np.arange(12) z = np.arange(10) grid = xr.DataArray(data, coords=[(\"z\", z),", "dtypes: with clib.Session() as lib: family = \"GMT_IS_DATASET|GMT_VIA_MATRIX\" geometry =", "data.z) as vfile: with GMTTempFile() as outfile: lib.call_module(\"info\", \"{} ->{}\".format(vfile,", "figure, not the last figure. fig1 = Figure() region1 =", "test_dataarray_to_matrix_dims_fails(): \"\"\" Check that it fails for > 2 dims.", "np.arange(11) y = np.arange(12) z = np.arange(10) grid = xr.DataArray(data,", "valid=FAMILIES, valid_modifiers=None ) def test_create_data_dataset(): \"\"\" Run the function to", "lib.virtualfile_from_vectors(x, y): print(\"This should have failed\") def test_virtualfile_from_matrix(): \"\"\" Test", "session is created. lib = clib.Session() with pytest.raises(GMTCLibNoSessionError): lib.call_module(\"gmtdefaults\", \"\")", "= clib.Session() session1.create(name=\"test_session1\") assert session1.session_pointer is not None session2 =", "def test_create_data_grid_dim(): \"\"\" Create a grid ignoring range and inc.", "a dataset. \"\"\" size = 13 x = list(range(0, size,", "a mock instead just to exercise this part of the", "clib.Session() as lib: # Grids from matrices using dim lib.create_data(", "pytest.raises(GMTInvalidInput): lib._parse_constant( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", valid=FAMILIES, valid_modifiers=None ) def test_create_data_dataset(): \"\"\" Run", "it with the second one with clib.Session() as lib: lib.call_module(\"figure\",", "test_parse_constant_single(): \"\"\" Parsing a single family argument correctly. \"\"\" lib", "lib.create_data( family=\"GMT_IS_GRID|GMT_VIA_MATRIX\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1, 0], ) def", "lib.info[\"version\"] != \"5.4.3\" # Make sure the session is closed", "= np.arange(size, dtype=np.int32) y = np.arange(size, size * 2, 1,", "no errors or segfaults from getting all of the #", "there are no errors or segfaults from getting all of", "= out_fname.read().strip() assert output == \"11.5309 61.7074 -2.9289 7.8648 0.1412", "-\".format(fig2._name)) with clib.Session() as lib: wesn2 = lib.extract_region() npt.assert_allclose(wesn2, np.array([-165.0,", "outfile.name)) output = outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(col.min(), col.max()) for", "and inc. \"\"\" with clib.Session() as lib: # Grids from", "with clib.Session() as lib, mock(lib, \"GMT_Open_VirtualFile\", returns=0), mock( lib, \"GMT_Close_VirtualFile\",", "for non- zero return codes. \"\"\" vfargs = ( \"GMT_IS_DATASET|GMT_VIA_MATRIX\",", "dim=[10, 20, 1, 0], ) def test_create_data_grid_range(): \"\"\" Create a", "print(\"Shouldn't get to this code either\") def test_virtual_file_bad_direction(): \"\"\" Test", "\"{}\\n\".format(bounds) assert output == expected def test_virtualfile_from_vectors_diff_size(): \"\"\" Test the", "the closing of the # virtual file. with clib.Session() as", "os from contextlib import contextmanager import numpy as np import", "region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.flip(data, axis=(0, 1))) npt.assert_allclose(actual=region, desired=[x.min(),", "range and int lib.create_data( family=\"GMT_IS_GRID|GMT_VIA_MATRIX\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", ranges=[150.0, 250.0, -20.0,", "pandas as pd import pytest import xarray as xr from", "returns=None, mock_func=None): \"\"\" Mock a GMT C API function to", "shape[1], dtype=dtype).reshape(shape) with clib.Session() as lib: with lib.virtualfile_from_matrix(data) as vfile:", "# Activate the first figure and extract the region from", "test_virtualfile_from_vectors_arraylike(): \"\"\" Pass array-like vectors to a dataset. \"\"\" size", "\"\"\" if name == b\"API_VERSION\": value.value = b\"5.4.3\" else: value.value", "FAMILIES, VIAS from pygmt.exceptions import ( GMTCLibError, GMTCLibNoSessionError, GMTInvalidInput, GMTVersionError,", "as lib: lib.create_data( family=\"GMT_IS_GRID\", geometry=\"Not_a_valid_geometry\", mode=\"GMT_CONTAINER_ONLY\", dim=[0, 0, 1, 0],", "mock_defaults(api, name, value): # pylint: disable=unused-argument \"\"\" Return an old", "(\"x\", x)]) matrix, region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.fliplr(data)) npt.assert_allclose(actual=region,", "error: assert \"Module 'info' failed with status code\" in str(error)", "tuple(range(size, size * 2, 1)) z = range(size * 2,", "def test_method_no_session(): \"\"\" Fails when not in a session. \"\"\"", "Make sure the clib.Session.info dict is working. \"\"\" # Check", "names. Use a mock instead just to exercise this part", "value.value = b\"5.4.3\" else: value.value = b\"bla\" return 0 lib", "\"GMT_IS_POINT\" dataset = lib.create_data( family=family, geometry=geometry, mode=\"GMT_CONTAINER_ONLY\", dim=[shape[1], shape[0], 1,", "lib.open_virtual_file(*vfargs): print(\"Should not get to this code\") # Test the", "I can get correct constants from the C lib. \"\"\"", "= np.linspace(start=0, stop=4, num=3) y = np.linspace(start=5, stop=9, num=3) grid", "clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module(\"info\", \"bogus-data.bla\") def test_call_module_invalid_name(): \"\"\"", ") # Passing in invalid geometry with pytest.raises(GMTInvalidInput): with clib.Session()", "raises exceptions for invalid input and output. \"\"\" # Passing", "Activate the first figure and extract the region from it", "region1 = np.array([0, 10, -20, -10]) fig1.coast(region=region1, projection=\"M6i\", frame=True, land=\"black\")", "try it with the second one with clib.Session() as lib:", "a virtual file with a Dataset. \"\"\" dtypes = \"float32", "lib, mock(lib, \"GMT_Open_VirtualFile\", returns=1): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): print(\"Should not", "def test_getitem(): \"\"\" Test that I can get correct constants", "failed with status code\" in str(error) assert \"gmtinfo [ERROR]: Cannot", "files raises an exception for non- zero return codes. \"\"\"", "to this code either\") def test_virtual_file_bad_direction(): \"\"\" Test passing an", "anyway. # This should work... lib._parse_constant( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", valid=FAMILIES, valid_modifiers=VIAS )", "-20.0, 20.0], inc=[0.1, 0.2], ) # If the data pointer", "with lib.virtualfile_from_vectors(x, y, z) as vfile: with GMTTempFile() as outfile:", "ses[\"GMT_DOUBLE\"] != -99999 with pytest.raises(GMTCLibError): ses[\"A_WHOLE_LOT_OF_JUNK\"] # pylint: disable=pointless-statement def", "expected = lib[family] + lib[via] parsed = lib._parse_constant(composite, valid=FAMILIES, valid_modifiers=VIAS)", "Return an old version. \"\"\" if name == b\"API_VERSION\": value.value", "\"\"\" Check if the function fails when given bad input.", "\"GMT_IS_DATASET|GMT_VIA_MATRIX\" geometry = \"GMT_IS_POINT\" dataset = lib.create_data( family=family, geometry=geometry, mode=\"GMT_CONTAINER_ONLY\",", "dim=[10, 20, 1, 0], ) assert data_vector != data_matrix def", "given valid modifiers but is using them anyway. # This", "uint64\".split() size = 10 for dtype in dtypes: x =", "destroy session when given bad input. \"\"\" ses = clib.Session()", "in dtypes: data = pd.DataFrame( data=dict( x=np.arange(size, dtype=dtype), y=np.arange(size, size", "valid=FAMILIES) assert parsed == lib[family] def test_parse_constant_composite(): \"\"\" Parsing a", "pointer as output or non-zero status codes. Needed because it's", "create_data raises exceptions for invalid input and output. \"\"\" #", "for __ in range(2): with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement", "it along to gmt info vfargs = (family, geometry, \"GMT_IN|GMT_IS_REFERENCE\",", "correctly. \"\"\" lib = clib.Session() for family in FAMILIES: parsed", "family in FAMILIES: parsed = lib._parse_constant(family, valid=FAMILIES) assert parsed ==", "version def mock_defaults(api, name, value): # pylint: disable=unused-argument \"\"\" Return", "problems. with clib.Session() as lib: lib.call_module(\"figure\", \"{} -\".format(fig1._name)) with clib.Session()", "# Make sure the session is closed when the exception", "FAMILIES for via in VIAS) for family, via in test_cases:", "Series. \"\"\" dtypes = \"float32 float64 int32 int64 uint32 uint64\".split()", "specifying range and inc instead of dim. \"\"\" with clib.Session()", "if name == func: return mock_func return get_libgmt_func(name, argtypes, restype)", "str(error) assert \"gmtinfo [ERROR]: Cannot find file bogus-data.bla\" in str(error)", "2, size * 3, 1, dtype=dtype), ) ) with clib.Session()", "but is using them anyway. # This should work... lib._parse_constant(", "None, ) def test_dataarray_to_matrix_works(): \"\"\" Check that dataarray_to_matrix returns correct", "Test transforming a matrix to virtual file dataset. \"\"\" dtypes", "\"\"\" data = np.ones((4, 5), dtype=\"float64\") x = np.linspace(0, 1,", "\"hijklmn\", \"opqrst\"], dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y,", "\"\"\" Test passing in one column with string or object", "pointing to the same memory session1 = clib.Session() session1.create(name=\"test_session1\") assert", "via) for family in FAMILIES for via in VIAS) for", "= 10 for dtype in dtypes: x = np.arange(size, dtype=dtype)", "codes. Needed because it's not easy to get some API", "ses.create(\"session1\") assert ses.session_pointer is not None ses.destroy() with pytest.raises(GMTCLibNoSessionError): ses.session_pointer", "with lib.open_virtual_file(*vfargs) as vfile: with GMTTempFile() as outfile: lib.call_module(\"info\", \"{}", "col in data.T] ) expected = \"{}\\n\".format(bounds) assert output ==", "the status check when closing the virtual file # Mock", "20.0], inc=[0.1, 0.2], ) # Passing in invalid geometry with", "= np.linspace(start=0, stop=4, num=3) y = np.linspace(start=9, stop=5, num=3) grid", "x/y. \"\"\" data = np.diag(v=np.arange(3)) x = np.linspace(start=4, stop=0, num=3)", "5 cols = 3 data = full_data[:rows, :cols] with clib.Session()", "to the closing of the # virtual file. with clib.Session()", "clib.Session() as lib: lib.create_data( family=\"GMT_IS_GRID\", geometry=\"Not_a_valid_geometry\", mode=\"GMT_CONTAINER_ONLY\", dim=[0, 0, 1,", "object]) def test_virtualfile_from_vectors_two_string_or_object_columns(dtype): \"\"\" Test passing in two columns of", "i, j, k in zip(x, y, strings)) assert output ==", "modifiers but is using them anyway. # This should work...", "= pd.DataFrame( data=dict( x=np.arange(size, dtype=dtype), y=np.arange(size, size * 2, 1,", "dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, strings1, strings2)", "dtype=dtype) strings2 = np.array([\"pqrst\", \"uvwx\", \"yz!\", \"@#\", \"$\"], dtype=dtype) with", "size = 13 for dtype in dtypes: data = pd.DataFrame(", "# pylint: disable=unused-argument \"\"\" A mock GMT API function that", "different sizes. \"\"\" x = np.arange(5) y = np.arange(6) with", "Segmentation Fault (which is a good thing because libgmt usually", "from pygmt.clib.conversion import dataarray_to_matrix from pygmt.clib.session import FAMILIES, VIAS from", ") # If the data pointer returned is None (NULL", "Should also fail if not given valid modifiers but is", "[ \"<{:.0f}/{:.0f}>\".format(i.min(), i.max()) for i in (data.x, data.y, data.z) ]", "without errors. \"\"\" # Create two session and make sure", "get_libgmt_func) def test_getitem(): \"\"\" Test that I can get correct", "getting from the current figure, not the last figure. fig1", "lib.open_virtual_file(*vfargs): print(\"This should have failed\") def test_virtualfile_from_vectors(): \"\"\" Test the", "stop=9, num=3) grid = xr.DataArray(data, coords=[(\"y\", y), (\"x\", x)]) matrix,", "pytest.raises(GMTCLibNoSessionError): lib.call_module(\"gmtdefaults\", \"\") with pytest.raises(GMTCLibNoSessionError): lib.session_pointer # pylint: disable=pointless-statement def", "z)] ) expected = \"<vector memory>: N = {}\\t{}\\n\".format(size, bounds)", "int32 int64 uint32 uint64\".split() size = 13 for dtype in", "in one column with string or object dtype into virtual", "family=\"GMT_IS_DATASET|GMT_VIA_MATRIX\", geometry=\"GMT_IS_POINT\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1, 0], ) assert data_vector", "output == expected def test_virtualfile_from_vectors_arraylike(): \"\"\" Pass array-like vectors to", "\"\"\" # Passing in invalid mode with pytest.raises(GMTInvalidInput): with clib.Session()", "for via in VIAS) for family, via in test_cases: composite", "Make a 3D regular grid data = np.ones((10, 12, 11),", "virtual file # Mock the opening to return 0 (success)", "ses = clib.Session() assert ses[\"GMT_SESSION_EXTERNAL\"] != -99999 assert ses[\"GMT_MODULE_CMD\"] !=", "y. \"\"\" data = np.diag(v=np.arange(3)) x = np.linspace(start=0, stop=4, num=3)", "matrix, region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.fliplr(data)) npt.assert_allclose(actual=region, desired=[x.min(), x.max(),", "inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.fliplr(data)) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])", "direction argument. \"\"\" with clib.Session() as lib: vfargs = (", "print(\"This should have failed\") def test_virtualfile_from_matrix(): \"\"\" Test transforming a", "that it fails for variable increments. \"\"\" data = np.ones((4,", "with pytest.raises(GMTInvalidInput): dataarray_to_matrix(grid) def test_dataarray_to_matrix_inc_fails(): \"\"\" Check that it fails", "valid_modifiers=VIAS) assert parsed == expected def test_parse_constant_fails(): \"\"\" Check if", "lib.call_module(\"info\", \"{} ->{}\".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = \"\\t\".join(", "ses.destroy() with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement def test_create_session_fails(): \"\"\"", "\"GMT_IN|GMT_IS_REFERENCE\", dataset) with lib.open_virtual_file(*vfargs) as vfile: with GMTTempFile() as outfile:", "GMT_Get_Default to return an old version def mock_defaults(api, name, value):", "= 5 x = np.arange(size, dtype=np.int32) y = np.arange(size, size", "npt.assert_allclose(actual=matrix, desired=np.flipud(data)) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc, desired=[x[1] -", "a Dataset. \"\"\" dtypes = \"float32 float64 int32 int64 uint32", "A mock GMT API function that always returns a given", "x = np.linspace(start=0, stop=4, num=3) y = np.linspace(start=9, stop=5, num=3)", "get to the closing of the # virtual file. with", "inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.flipud(data)) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])", "without crashing and gives reasonable results. \"\"\" with clib.Session() as", "constants from the C lib. \"\"\" ses = clib.Session() assert", "bad file name because if # output=='', GMT will just", "y = np.arange(size, size * 2, 1, dtype=dtype) z =", "\"Module 'info' failed with status code\" in str(error) assert \"gmtinfo", "as np import numpy.testing as npt import pandas as pd", "are raised when API functions fail by producing a NULL", "mock(lib, \"GMT_Open_VirtualFile\", returns=1): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): print(\"Should not get", "clib.Session() as lib: assert lib.info # Mock GMT_Get_Default to return", "ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) # If the", "value): # pylint: disable=unused-argument \"\"\" Return an old version. \"\"\"", "test_cases = ((family, via) for family in FAMILIES for via", "Fault (which is a good thing because libgmt usually only", "Check that it fails for variable increments. \"\"\" data =", "with clib.Session() as lib: wesn1 = lib.extract_region() npt.assert_allclose(wesn1, region1) #", "test_cases = [ \"SOME_random_STRING\", \"GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR\", \"GMT_IS_DATASET|NOT_A_PROPER_VIA\", \"NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX\", \"NOT_A_PROPER_FAMILY|ALSO_INVALID\", ] for", "old one. ses.create(\"test1\") with pytest.raises(GMTCLibError): ses.create(\"test2\") def test_destroy_session_fails(): \"\"\" Fail", "test_virtualfile_from_vectors_pandas(): \"\"\" Pass vectors to a dataset using pandas Series.", "bad input. \"\"\" with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module(\"meh\",", "if not given valid modifiers but is using them anyway.", "wesn2 = lib.extract_region() npt.assert_allclose(wesn2, np.array([-165.0, -150.0, 15.0, 25.0])) def test_write_data_fails():", "else: value.value = b\"bla\" return 0 lib = clib.Session() with", "= np.ones((4, 5), dtype=\"float64\") x = np.linspace(0, 1, 5) y", "for key in ses.info: assert ses.info[key] == \"bla\" ses.destroy() def", "= np.diag(v=np.arange(3)) x = np.linspace(start=0, stop=4, num=3) y = np.linspace(start=9,", "# virtual file. with clib.Session() as lib, mock(lib, \"GMT_Open_VirtualFile\", returns=1):", "->{}\".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = \"\\t\".join( [ \"<{:.0f}/{:.0f}>\".format(i.min(),", "last figure. fig1 = Figure() region1 = np.array([0, 10, -20,", "assert ses.info[key] == \"bla\" ses.destroy() def test_fails_for_wrong_version(): \"\"\" Make sure", "API. \"\"\" import os from contextlib import contextmanager import numpy", "Create a grid ignoring range and inc. \"\"\" with clib.Session()", "= clib.Session() for __ in range(2): with pytest.raises(GMTCLibNoSessionError): ses.session_pointer #", "20, 1, 0], # columns, rows, layers, dtype ) #", "to stdout and spaces are valid file # names. Use", "from matrices using range and int lib.create_data( family=\"GMT_IS_GRID|GMT_VIA_MATRIX\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\",", "Fails when not in a session. \"\"\" # Create an", "assert Version(lib.get_default(\"API_VERSION\")) >= Version(\"6.2.0\") def test_get_default_fails(): \"\"\" Make sure get_default", "\"\"\" Test transforming a matrix to virtual file dataset. \"\"\"", "via in test_cases: composite = \"|\".join([family, via]) expected = lib[family]", "for dtype in dtypes: with clib.Session() as lib: family =", "mock function. \"\"\" if name == func: return mock_func return", "lib.get_default(\"NOT_A_VALID_NAME\") def test_info_dict(): \"\"\" Make sure the clib.Session.info dict is", "return an old version def mock_defaults(api, name, value): # pylint:", "outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(col.min(), col.max()) for col in data.T]", "GMTCLibError, GMTCLibNoSessionError, GMTInvalidInput, GMTVersionError, ) from pygmt.helpers import GMTTempFile TEST_DATA_DIR", "def test_extract_region_two_figures(): \"\"\" Extract region should handle multiple figures existing", "Check that opening and closing virtual files raises an exception", "= np.linspace(start=4, stop=0, num=3) y = np.linspace(start=9, stop=5, num=3) grid", "module arguments. \"\"\" with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module(\"info\",", "np.logspace(2, 3, 4) grid = xr.DataArray(data, coords=[(\"y\", y), (\"x\", x)])", "\"\"\" data = np.diag(v=np.arange(3)) x = np.linspace(start=4, stop=0, num=3) y", "np.arange(5) y = np.arange(6) with clib.Session() as lib: with pytest.raises(GMTInvalidInput):", "print(\"This should have failed\") def test_virtualfile_from_vectors(): \"\"\" Test the automation", "vectors to a dataset using pandas Series. \"\"\" dtypes =", "matrix, region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=data) npt.assert_allclose(actual=region, desired=[x.min(), x.max(),", "vfargs = ( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", \"GMT_IS_POINT\", \"GMT_IN|GMT_IS_REFERENCE\", None, ) # Mock", "assert parsed == lib[family] def test_parse_constant_composite(): \"\"\" Parsing a composite", "-2.9289 7.8648 0.1412 0.9338\" def test_call_module_invalid_arguments(): \"\"\" Fails for invalid", "the # properties. with clib.Session() as lib: assert lib.info #", "by |) correctly. \"\"\" lib = clib.Session() test_cases = ((family,", "it's # getting from the current figure, not the last", "20.0], inc=[0.1, 0.2], ) # If the data pointer returned", "FAMILIES: parsed = lib._parse_constant(family, valid=FAMILIES) assert parsed == lib[family] def", "pylint: disable=pointless-statement def test_create_session_fails(): \"\"\" Check that an exception is", "buffer. \"\"\" value.value = b\"bla\" return 0 ses = clib.Session()", "= lib.create_data( family=\"GMT_IS_DATASET|GMT_VIA_VECTOR\", geometry=\"GMT_IS_POINT\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1, 0], #", "np.arange(size, dtype=np.int32) y = np.arange(size, size * 2, 1, dtype=np.int32)", "= np.array([\"a\", \"bc\", \"defg\", \"hijklmn\", \"opqrst\"], dtype=dtype) with clib.Session() as", "\"\".join( f\"{h}\\t{i}\\t{j} {k}\\n\" for h, i, j, k in zip(x,", "def test_dataarray_to_matrix_negative_x_and_y_increment(): \"\"\" Check that dataarray_to_matrix returns correct output with", "check when closing the virtual file # Mock the opening", "expected = \"<vector memory>: N = {}\\t{}\\n\".format(size, bounds) assert output", "return 0 lib = clib.Session() with mock(lib, \"GMT_Get_Default\", mock_func=mock_defaults): with", "axis=(0, 1))) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc, desired=[abs(x[1] -", "valid modifiers but is using them anyway. # This should", "dtype=dtype).reshape(shape) with clib.Session() as lib: with lib.virtualfile_from_vectors(*data.T) as vfile: with", "session1 = clib.Session() session1.create(name=\"test_session1\") assert session1.session_pointer is not None session2", "that it fails for > 2 dims. \"\"\" # Make", "a session. \"\"\" ses = clib.Session() with mock(ses, \"GMT_Create_Session\", returns=None):", "in FAMILIES: parsed = lib._parse_constant(family, valid=FAMILIES) assert parsed == lib[family]", "to make the C API function fail without causing a", "from packaging.version import Version from pygmt import Figure, clib from", "composite constant argument (separated by |) correctly. \"\"\" lib =", "(data.x, data.y, data.z) ] ) expected = \"<vector memory>: N", "outfile: lib.call_module(\"info\", \"{} -C ->{}\".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds", "same string def mock_defaults(api, name, value): # pylint: disable=unused-argument \"\"\"", "fails with errors). \"\"\" if mock_func is None: def mock_api_function(*args):", "y, strings) as vfile: with GMTTempFile() as outfile: lib.call_module(\"convert\", f\"{vfile}", "test_dataarray_to_matrix_inc_fails(): \"\"\" Check that it fails for variable increments. \"\"\"", "test_create_data_grid_range(): \"\"\" Create a grid specifying range and inc instead", "\"<matrix memory>: N = {}\\t{}\\n\".format(rows, bounds) assert output == expected", "non-zero status codes. Needed because it's not easy to get", "family=\"GMT_IS_DATASET|GMT_VIA_VECTOR\", geometry=\"GMT_IS_POINT\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1, 0], # columns, rows,", "# Create an instance of Session without \"with\" so no", "clib.Session() test_cases = [ \"SOME_random_STRING\", \"GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR\", \"GMT_IS_DATASET|NOT_A_PROPER_VIA\", \"NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX\", \"NOT_A_PROPER_FAMILY|ALSO_INVALID\", ]", "should work... lib._parse_constant( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", valid=FAMILIES, valid_modifiers=VIAS ) # But this", "is raised, the code won't get to the closing of", "matrices using dim lib.create_data( family=\"GMT_IS_GRID|GMT_VIA_MATRIX\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1,", "lib.call_module(\"convert\", f\"{vfile} ->{outfile.name}\") output = outfile.read(keep_tabs=True) expected = \"\".join( f\"{h}\\t{i}\\t{j}", "data_vector = lib.create_data( family=\"GMT_IS_DATASET|GMT_VIA_VECTOR\", geometry=\"GMT_IS_POINT\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1, 0],", "with clib.Session() as lib: with lib.virtualfile_from_vectors(*data.T) as vfile: with GMTTempFile()", "= np.arange(6) with clib.Session() as lib: with pytest.raises(GMTInvalidInput): with lib.virtualfile_from_vectors(x,", "1) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, z) as", "x[0], y[1] - y[0]]) def test_dataarray_to_matrix_negative_x_increment(): \"\"\" Check if dataarray_to_matrix", "make sure they are not pointing to the same memory", "family in FAMILIES for via in VIAS) for family, via", "1 assert Version(lib.get_default(\"API_VERSION\")) >= Version(\"6.2.0\") def test_get_default_fails(): \"\"\" Make sure", "0], ) def test_create_data_grid_range(): \"\"\" Create a grid specifying range", "\"<matrix memory>: N = {}\\t{}\\n\".format(shape[0], bounds) assert output == expected", "passing in data via a virtual file with a Dataset.", "code won't get to the closing of the # virtual", "dataset. \"\"\" size = 13 x = list(range(0, size, 1))", "input. \"\"\" ses = clib.Session() with pytest.raises(GMTCLibNoSessionError): ses.destroy() ses.create(\"test-session\") with", "np.ones((10, 12, 11), dtype=\"float32\") x = np.arange(11) y = np.arange(12)", "\"\"\" Check that it fails for > 2 dims. \"\"\"", "clib.Session() as lib: # Grids from matrices using range and", "array-like vectors to a dataset. \"\"\" size = 13 x", "Check that dataarray_to_matrix returns correct output. \"\"\" data = np.diag(v=np.arange(3))", "== \"bla\" ses.destroy() def test_fails_for_wrong_version(): \"\"\" Make sure the clib.Session", "one with clib.Session() as lib: lib.call_module(\"figure\", \"{} -\".format(fig2._name)) with clib.Session()", "assert lib.info # Mock GMT_Get_Default to return always the same", "should have failed\") def test_virtualfile_from_matrix(): \"\"\" Test transforming a matrix", "or non-zero status codes. Needed because it's not easy to", "file bogus-data.bla\" in str(error) def test_method_no_session(): \"\"\" Fails when not", "\"bc\", \"defg\", \"hijklmn\", \"opqrst\"], dtype=dtype) with clib.Session() as lib: with", "returns correct output with flipped x/y. \"\"\" data = np.diag(v=np.arange(3))", "pylint: disable=pointless-statement def test_parse_constant_single(): \"\"\" Parsing a single family argument", "uint64\".split() shape = (10, 6) for dtype in dtypes: full_data", ") data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) lib.put_matrix(dataset, matrix=data) #", "Pass array-like vectors to a dataset. \"\"\" size = 13", "disable=pointless-statement def test_create_destroy_session(): \"\"\" Test that create and destroy session", "!= -99999 with pytest.raises(GMTCLibError): ses[\"A_WHOLE_LOT_OF_JUNK\"] # pylint: disable=pointless-statement def test_create_destroy_session():", "(which is a good thing because libgmt usually only fails", "\"\"\" # Make a 3D regular grid data = np.ones((10,", "= np.linspace(start=5, stop=9, num=3) grid = xr.DataArray(data, coords=[(\"y\", y), (\"x\",", "y[0])]) def test_dataarray_to_matrix_dims_fails(): \"\"\" Check that it fails for >", "automation for transforming vectors to virtual file dataset. \"\"\" dtypes", "np.linspace(0, 1, 5) y = np.logspace(2, 3, 4) grid =", "->{}\".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(col.min(), col.max())", "\"GMT_IS_VECTOR\", \"GMT_IS_POINT\", \"GMT_WRITE_SET\", [1] * 6, \"some-file-name\", None, ) def", "family argument correctly. \"\"\" lib = clib.Session() for family in", "is the GMT error message was captured. \"\"\" with clib.Session()", "\"\"\" Check that extract region fails if nothing has been", "this if by giving a bad file name because if", "output or non-zero status codes. Needed because it's not easy", "test_virtualfile_from_vectors_transpose(): \"\"\" Test transforming matrix columns to virtual file dataset.", "session are called without errors. \"\"\" # Create two session", "get to this code\") # Test the status check when", "def test_virtual_file(): \"\"\" Test passing in data via a virtual", "= os.path.join(os.path.dirname(__file__), \"data\") with clib.Session() as _lib: gmt_version = Version(_lib.info[\"version\"])", "dtypes = \"float32 float64 int32 int64 uint32 uint64\".split() shape =", "def test_dataarray_to_matrix_negative_y_increment(): \"\"\" Check that dataarray_to_matrix returns correct output with", "\"points.txt\") out_fname = \"test_call_module.txt\" with clib.Session() as lib: with GMTTempFile()", "for arrays of different sizes. \"\"\" x = np.arange(5) y", "abs(y[1] - y[0])]) def test_dataarray_to_matrix_negative_y_increment(): \"\"\" Check that dataarray_to_matrix returns", "stop=0, num=3) y = np.linspace(start=9, stop=5, num=3) grid = xr.DataArray(data,", "range(size * 2, size * 3, 1) with clib.Session() as", "two columns of string or object dtype into virtual file", "family=\"GMT_IS_DATASET\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", dim=[11, 10, 2, 0], ) def test_virtual_file():", "func, returns=None, mock_func=None): \"\"\" Mock a GMT C API function", "dim=[11, 10, 2, 0], ) def test_virtual_file(): \"\"\" Test passing", "None: def mock_api_function(*args): # pylint: disable=unused-argument \"\"\" A mock GMT", "{}\\t{}\\n\".format(size, bounds) assert output == expected @pytest.mark.parametrize(\"dtype\", [str, object]) def", "columns, rows, layers, dtype ) data = np.arange(shape[0] * shape[1],", "created. lib = clib.Session() with pytest.raises(GMTCLibNoSessionError): lib.call_module(\"gmtdefaults\", \"\") with pytest.raises(GMTCLibNoSessionError):", "larger array to virtual file dataset. \"\"\" dtypes = \"float32", "dtypes: data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) with clib.Session() as", "file # names. Use a mock instead just to exercise", "for i in (data.x, data.y, data.z) ] ) expected =", "GMTTempFile() as outfile: lib.call_module(\"convert\", f\"{vfile} ->{outfile.name}\") output = outfile.read(keep_tabs=True) expected", "Can't test this if by giving a bad file name", "x)]) with pytest.raises(GMTInvalidInput): dataarray_to_matrix(grid) def test_dataarray_to_matrix_inc_fails(): \"\"\" Check that it", "\"GMT_Create_Data\", returns=None): lib.create_data( family=\"GMT_IS_DATASET\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", dim=[11, 10, 2, 0],", "with pytest.raises(GMTCLibNoSessionError): lib.session_pointer # pylint: disable=pointless-statement def test_parse_constant_single(): \"\"\" Parsing", "= \"\".join( f\"{h}\\t{i}\\t{j} {k}\\n\" for h, i, j, k in", "with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): print(\"Should not get to this code\")", "invalid module arguments. \"\"\" with clib.Session() as lib: with pytest.raises(GMTCLibError):", "a single family argument correctly. \"\"\" lib = clib.Session() for", "if # output=='', GMT will just write to stdout and", "lib.call_module(\"info\", \"{} -C ->{}\".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds =", "a larger array to virtual file dataset. \"\"\" dtypes =", "= np.arange(10) grid = xr.DataArray(data, coords=[(\"z\", z), (\"y\", y), (\"x\",", "7.8648 0.1412 0.9338\" def test_call_module_invalid_arguments(): \"\"\" Fails for invalid module", "ses.session_pointer # pylint: disable=pointless-statement def test_create_session_fails(): \"\"\" Check that an", "in data.T] ) expected = \"<matrix memory>: N = {}\\t{}\\n\".format(rows,", "with clib.Session() as lib: with pytest.raises(GMTInvalidInput): with lib.virtualfile_from_vectors(x, y): print(\"This", "test_virtualfile_from_matrix(): \"\"\" Test transforming a matrix to virtual file dataset.", "into virtual file dataset. \"\"\" size = 5 x =", "mock_api_function get_libgmt_func = session.get_libgmt_func def mock_get_libgmt_func(name, argtypes=None, restype=None): \"\"\" Return", "the same memory session1 = clib.Session() session1.create(name=\"test_session1\") assert session1.session_pointer is", "the same string def mock_defaults(api, name, value): # pylint: disable=unused-argument", "for variable increments. \"\"\" data = np.ones((4, 5), dtype=\"float64\") x", "as lib, mock(lib, \"GMT_Open_VirtualFile\", returns=1): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): print(\"Should", "mock_get_libgmt_func) yield setattr(session, \"get_libgmt_func\", get_libgmt_func) def test_getitem(): \"\"\" Test that", "cols = 3 data = full_data[:rows, :cols] with clib.Session() as", "fail badly. \"\"\" with clib.Session() as lib: # Dataset from", "!= -99999 assert ses[\"GMT_DOUBLE\"] != -99999 with pytest.raises(GMTCLibError): ses[\"A_WHOLE_LOT_OF_JUNK\"] #", "empty dictionary assert ses.info for key in ses.info: assert ses.info[key]", "assert lib.info[\"version\"] != \"5.4.3\" # Make sure the session is", "bounds) assert output == expected def test_virtual_file_fails(): \"\"\" Check that", "virtual file dataset. \"\"\" size = 5 x = np.arange(size,", "f\"{vfile} ->{outfile.name}\") output = outfile.read(keep_tabs=True) expected = \"\".join(f\"{i}\\t{j}\\t{k}\\n\" for i,", "\"gmtinfo [ERROR]: Cannot find file bogus-data.bla\" in str(error) def test_method_no_session():", "{k}\\n\" for h, i, j, k in zip(x, y, strings1,", "-10]) fig1.coast(region=region1, projection=\"M6i\", frame=True, land=\"black\") fig2 = Figure() fig2.basemap(region=\"US.HI+r5\", projection=\"M6i\",", "data = np.ones((10, 12, 11), dtype=\"float32\") x = np.arange(11) y", "(x, y, z)] ) expected = \"<vector memory>: N =", "create a session. \"\"\" ses = clib.Session() with mock(ses, \"GMT_Create_Session\",", "pointer returned is None (NULL pointer) with pytest.raises(GMTCLibError): with clib.Session()", "lib.extract_region() npt.assert_allclose(wesn2, np.array([-165.0, -150.0, 15.0, 25.0])) def test_write_data_fails(): \"\"\" Check", "pygmt.helpers import GMTTempFile TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), \"data\") with clib.Session() as", "flipped y. \"\"\" data = np.diag(v=np.arange(3)) x = np.linspace(start=0, stop=4,", "h, i, j, k in zip(x, y, strings1, strings2) )", "y, strings)) assert output == expected @pytest.mark.parametrize(\"dtype\", [str, object]) def", "0], # columns, rows, layers, dtype ) data = np.arange(shape[0]", "\"\"\" if name == func: return mock_func return get_libgmt_func(name, argtypes,", "data=dict( x=np.arange(size, dtype=dtype), y=np.arange(size, size * 2, 1, dtype=dtype), z=np.arange(size", "raises an exception for invalid names. \"\"\" with clib.Session() as", "def test_create_data_dataset(): \"\"\" Run the function to make sure it", "size * 2, 1, dtype=dtype) z = np.arange(size * 2,", "make sure that it's # getting from the current figure,", "lib.session_pointer # pylint: disable=pointless-statement def test_parse_constant_single(): \"\"\" Parsing a single", "y[0])]) def test_dataarray_to_matrix_negative_x_and_y_increment(): \"\"\" Check that dataarray_to_matrix returns correct output", "exception for non-zero return codes. \"\"\" # It's hard to", "when failing to create a session. \"\"\" ses = clib.Session()", "= Version(_lib.info[\"version\"]) @contextmanager def mock(session, func, returns=None, mock_func=None): \"\"\" Mock", "!= session1.session_pointer session1.destroy() session2.destroy() # Create and destroy a session", "mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1, 0], ) def test_create_data_grid_range(): \"\"\" Create", "later. with clib.Session() as lib, mock(lib, \"GMT_Open_VirtualFile\", returns=0), mock( lib,", "2, 0], ) def test_virtual_file(): \"\"\" Test passing in data", "with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, strings1, strings2) as", "a virtual file and pass it along to gmt info", "lib: assert lib.info[\"version\"] != \"5.4.3\" # Make sure the session", "with clib.Session() as lib: with GMTTempFile() as out_fname: lib.call_module(\"info\", \"{}", "61.7074 -2.9289 7.8648 0.1412 0.9338\" def test_call_module_invalid_arguments(): \"\"\" Fails for", "full_data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) rows = 5 cols", "x = np.linspace(start=0, stop=4, num=3) y = np.linspace(start=5, stop=9, num=3)", "from vectors data_vector = lib.create_data( family=\"GMT_IS_DATASET|GMT_VIA_VECTOR\", geometry=\"GMT_IS_POINT\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20,", "pytest.raises(GMTVersionError): with lib: assert lib.info[\"version\"] != \"5.4.3\" # Make sure", "j, k in zip(x, y, strings1, strings2) ) assert output", "wrappers for the C API. \"\"\" import os from contextlib", "test_call_module_error_message(): \"\"\" Check is the GMT error message was captured.", "\"\"\" Test the automation for transforming vectors to virtual file", "have failed\") def test_virtualfile_from_matrix(): \"\"\" Test transforming a matrix to", "npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])]) def test_dataarray_to_matrix_negative_x_and_y_increment(): \"\"\"", "data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) lib.put_matrix(dataset, matrix=data) # Add", "\"columns\"] assert int(lib.get_default(\"API_CORES\")) >= 1 assert Version(lib.get_default(\"API_VERSION\")) >= Version(\"6.2.0\") def", "is too old. \"\"\" # Mock GMT_Get_Default to return an", "pytest.raises(GMTInvalidInput): with lib.open_virtual_file(*vfargs): print(\"This should have failed\") def test_virtualfile_from_vectors(): \"\"\"", "that create_data raises exceptions for invalid input and output. \"\"\"", "1, dtype=np.int32) strings = np.array([\"a\", \"bc\", \"defg\", \"hijklmn\", \"opqrst\"], dtype=dtype)", "= np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) rows = 5 cols =", "clib.Session.info dict is working. \"\"\" # Check if there are", "file. with clib.Session() as lib, mock(lib, \"GMT_Open_VirtualFile\", returns=1): with pytest.raises(GMTCLibError):", "API functions to fail without inducing a Segmentation Fault (which", "to the same memory session1 = clib.Session() session1.create(name=\"test_session1\") assert session1.session_pointer", "import pytest import xarray as xr from packaging.version import Version", "with errors). \"\"\" if mock_func is None: def mock_api_function(*args): #", "clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module(\"meh\", \"\") def test_call_module_error_message(): \"\"\"", "clib from pygmt.clib.conversion import dataarray_to_matrix from pygmt.clib.session import FAMILIES, VIAS", "an exception for invalid names. \"\"\" with clib.Session() as lib:", "fig2 = Figure() fig2.basemap(region=\"US.HI+r5\", projection=\"M6i\", frame=True) # Activate the first", "the exception is raised, the code won't get to the", "the second one with clib.Session() as lib: lib.call_module(\"figure\", \"{} -\".format(fig2._name))", "out_fname: lib.call_module(\"info\", \"{} -C ->{}\".format(data_fname, out_fname.name)) assert os.path.exists(out_fname.name) output =", "def mock_defaults(api, name, value): # pylint: disable=unused-argument \"\"\" Return an", "\"\".join(f\"{i}\\t{j}\\t{k}\\n\" for i, j, k in zip(x, y, strings)) assert", "dtype=dtype), y=np.arange(size, size * 2, 1, dtype=dtype), z=np.arange(size * 2,", "as lib: with GMTTempFile() as out_fname: lib.call_module(\"info\", \"{} -C ->{}\".format(data_fname,", "setattr(session, \"get_libgmt_func\", get_libgmt_func) def test_getitem(): \"\"\" Test that I can", "output == expected def test_virtual_file_fails(): \"\"\" Check that opening and", "with lib.virtualfile_from_vectors(x, y): print(\"This should have failed\") def test_virtualfile_from_matrix(): \"\"\"", "correct output. \"\"\" data = np.diag(v=np.arange(3)) x = np.linspace(start=0, stop=4,", "lib: with mock(lib, \"GMT_Create_Data\", returns=None): lib.create_data( family=\"GMT_IS_DATASET\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", dim=[11,", "as lib: with pytest.raises(GMTCLibError): lib.call_module(\"info\", \"bogus-data.bla\") def test_call_module_invalid_name(): \"\"\" Fails", "error message was captured. \"\"\" with clib.Session() as lib: try:", "= lib.create_data( family=\"GMT_IS_DATASET|GMT_VIA_MATRIX\", geometry=\"GMT_IS_POINT\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1, 0], )", "= \"float32 float64 int32 int64 uint32 uint64\".split() shape = (10,", "dataarray_to_matrix returns correct output. \"\"\" data = np.diag(v=np.arange(3)) x =", "* shape[1], dtype=dtype).reshape(shape) with clib.Session() as lib: with lib.virtualfile_from_vectors(*data.T) as", "assert output == expected def test_virtualfile_from_matrix_slice(): \"\"\" Test transforming a", "-20, -10]) fig1.coast(region=region1, projection=\"M6i\", frame=True, land=\"black\") fig2 = Figure() fig2.basemap(region=\"US.HI+r5\",", "full_data[:rows, :cols] with clib.Session() as lib: with lib.virtualfile_from_matrix(data) as vfile:", "TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), \"data\") with clib.Session() as _lib: gmt_version =", "with clib.Session() as lib: lib.extract_region() def test_extract_region_two_figures(): \"\"\" Extract region", "an empty dictionary assert ses.info for key in ses.info: assert", "working. \"\"\" # Check if there are no errors or", "clib.Session() as lib: try: lib.call_module(\"info\", \"bogus-data.bla\") except GMTCLibError as error:", "arguments. \"\"\" with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module(\"info\", \"bogus-data.bla\")", "value.value = b\"bla\" return 0 ses = clib.Session() ses.create(\"test-session\") with", ":cols] with clib.Session() as lib: with lib.virtualfile_from_matrix(data) as vfile: with", "= [ \"SOME_random_STRING\", \"GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR\", \"GMT_IS_DATASET|NOT_A_PROPER_VIA\", \"NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX\", \"NOT_A_PROPER_FAMILY|ALSO_INVALID\", ] for test_case", "contextmanager import numpy as np import numpy.testing as npt import", "= np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) lib.put_matrix(dataset, matrix=data) # Add the", "as vfile: with GMTTempFile() as outfile: lib.call_module(\"convert\", f\"{vfile} ->{outfile.name}\") output", "exception is raised, the code won't get to the closing", "the clib.Session raises an exception if GMT is too old.", "for family, via in test_cases: composite = \"|\".join([family, via]) expected", "2, 1, dtype=dtype) z = np.arange(size * 2, size *", "returns a given value. \"\"\" return returns mock_func = mock_api_function", "\"GMT_IS_DATASET|NOT_A_PROPER_VIA\", \"NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX\", \"NOT_A_PROPER_FAMILY|ALSO_INVALID\", ] for test_case in test_cases: with pytest.raises(GMTInvalidInput):", "disable=protected-access \"\"\" Test the wrappers for the C API. \"\"\"", "sure get_default works without crashing and gives reasonable results. \"\"\"", "geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) def", "\"GMT_IS_DATASET|GMT_VIA_MATRIX\", \"GMT_IS_POINT\", \"GMT_IS_GRID\", # The invalid direction argument 0, )", "test_dataarray_to_matrix_works(): \"\"\" Check that dataarray_to_matrix returns correct output. \"\"\" data", "strings1, strings2) as vfile: with GMTTempFile() as outfile: lib.call_module(\"convert\", f\"{vfile}", "= np.diag(v=np.arange(3)) x = np.linspace(start=0, stop=4, num=3) y = np.linspace(start=5,", "to a dataset. \"\"\" size = 13 x = list(range(0,", "( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", \"GMT_IS_POINT\", \"GMT_IN|GMT_IS_REFERENCE\", None, ) # Mock Open_VirtualFile to", "\"GMT_Write_Data\", returns=1): with pytest.raises(GMTCLibError): lib.write_data( \"GMT_IS_VECTOR\", \"GMT_IS_POINT\", \"GMT_WRITE_SET\", [1] *", "pd.DataFrame( data=dict( x=np.arange(size, dtype=dtype), y=np.arange(size, size * 2, 1, dtype=dtype),", "nothing has been plotted. \"\"\" Figure() with pytest.raises(GMTCLibError): with clib.Session()", "test_case in test_cases: with pytest.raises(GMTInvalidInput): lib._parse_constant(test_case, valid=FAMILIES, valid_modifiers=VIAS) # Should", "\"GMT_Close_VirtualFile\", returns=1 ): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): pass print(\"Shouldn't get", "* 2, size * 3, 1, dtype=dtype), ) ) with", "passing in two columns of string or object dtype into", "= xr.DataArray(data, coords=[(\"z\", z), (\"y\", y), (\"x\", x)]) with pytest.raises(GMTInvalidInput):", "# pylint: disable=protected-access \"\"\" Test the wrappers for the C", "test_dataarray_to_matrix_negative_x_increment(): \"\"\" Check if dataarray_to_matrix returns correct output with flipped", "expected def test_virtual_file_fails(): \"\"\" Check that opening and closing virtual", "strings1, strings2) ) assert output == expected def test_virtualfile_from_vectors_transpose(): \"\"\"", "to a virtual file and pass it along to gmt", "from pygmt.helpers import GMTTempFile TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), \"data\") with clib.Session()", "float64 int32 int64 uint32 uint64\".split() size = 10 for dtype", "time. \"\"\" # Make two figures before calling extract_region to", "mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1, 0], # columns, rows, layers, dtype", "dtypes: x = np.arange(size, dtype=dtype) y = np.arange(size, size *", "\"\"\" data = np.diag(v=np.arange(3)) x = np.linspace(start=0, stop=4, num=3) y", "as lib: lib.call_module(\"figure\", \"{} -\".format(fig1._name)) with clib.Session() as lib: wesn1", "shape = (10, 6) for dtype in dtypes: full_data =", "ses.info: assert ses.info[key] == \"bla\" ses.destroy() def test_fails_for_wrong_version(): \"\"\" Make", "mock(ses, \"GMT_Create_Session\", returns=None): with pytest.raises(GMTCLibError): ses.create(\"test-session-name\") # Should fail if", "\"GMT_Open_VirtualFile\", returns=1): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): print(\"Should not get to", "lib.create_data( family=\"GMT_IS_GRID|GMT_VIA_MATRIX\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2],", "in dtypes: with clib.Session() as lib: family = \"GMT_IS_DATASET|GMT_VIA_MATRIX\" geometry", "size * 2, 1)) z = range(size * 2, size", "data.z) ] ) expected = \"<vector memory>: N = {}\\t{}\\n\".format(size,", "as lib: with mock(lib, \"GMT_Write_Data\", returns=1): with pytest.raises(GMTCLibError): lib.write_data( \"GMT_IS_VECTOR\",", "= ((family, via) for family in FAMILIES for via in", "\"klmno\"], dtype=dtype) strings2 = np.array([\"pqrst\", \"uvwx\", \"yz!\", \"@#\", \"$\"], dtype=dtype)", "codes. \"\"\" # It's hard to make the C API", "write to stdout and spaces are valid file # names.", "Test transforming matrix columns to virtual file dataset. \"\"\" dtypes", "mock_func return get_libgmt_func(name, argtypes, restype) setattr(session, \"get_libgmt_func\", mock_get_libgmt_func) yield setattr(session,", "clib.Session() with pytest.raises(GMTCLibNoSessionError): ses.destroy() ses.create(\"test-session\") with mock(ses, \"GMT_Destroy_Session\", returns=1): with", "lib: with pytest.raises(GMTInvalidInput): with lib.virtualfile_from_vectors(x, y): print(\"This should have failed\")", "Test the automation for transforming vectors to virtual file dataset.", "rows, layers, dtype ) data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)", "np.diag(v=np.arange(3)) x = np.linspace(start=0, stop=4, num=3) y = np.linspace(start=9, stop=5,", "y.min(), y.max()]) npt.assert_allclose(actual=inc, desired=[x[1] - x[0], y[1] - y[0]]) def", "get_libgmt_func = session.get_libgmt_func def mock_get_libgmt_func(name, argtypes=None, restype=None): \"\"\" Return our", "fail without inducing a Segmentation Fault (which is a good", "\"bla\" ses.destroy() def test_fails_for_wrong_version(): \"\"\" Make sure the clib.Session raises", "mock(ses, \"GMT_Destroy_Session\", returns=1): with pytest.raises(GMTCLibError): ses.destroy() ses.destroy() def test_call_module(): \"\"\"", "= range(size * 2, size * 3, 1) with clib.Session()", "5) y = np.logspace(2, 3, 4) grid = xr.DataArray(data, coords=[(\"y\",", "0.9338\" def test_call_module_invalid_arguments(): \"\"\" Fails for invalid module arguments. \"\"\"", "2, 1, dtype=np.int32) strings1 = np.array([\"a\", \"bc\", \"def\", \"ghij\", \"klmno\"],", "\"\"\" Check that write data raises an exception for non-zero", "just to exercise this part of the code. with clib.Session()", "- y[0])]) def test_dataarray_to_matrix_negative_y_increment(): \"\"\" Check that dataarray_to_matrix returns correct", "variable increments. \"\"\" data = np.ones((4, 5), dtype=\"float64\") x =", "= np.arange(size, size * 2, 1, dtype=dtype) z = np.arange(size", "= {}\\t{}\\n\".format(size, bounds) assert output == expected def test_extract_region_fails(): \"\"\"", "= session.get_libgmt_func def mock_get_libgmt_func(name, argtypes=None, restype=None): \"\"\" Return our mock", "the context. # If the exception is raised, the code", "returns=None): with pytest.raises(GMTCLibError): ses.create(\"test-session-name\") # Should fail if trying to", "exercise this part of the code. with clib.Session() as lib:", "\"\"\" Run the function to make sure it doesn't fail", "crashing and gives reasonable results. \"\"\" with clib.Session() as lib:", "to destroy session when given bad input. \"\"\" ses =", "lib.call_module(\"meh\", \"\") def test_call_module_error_message(): \"\"\" Check is the GMT error", "rows, layers, dtype ) # Dataset from matrices data_matrix =", "in range(2): with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement ses.create(\"session1\") assert", "x)]) matrix, region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.fliplr(data)) npt.assert_allclose(actual=region, desired=[x.min(),", "250.0, -20.0, 20.0], inc=[0.1, 0.2], ) # Passing in invalid", "# pylint: disable=unused-argument \"\"\" Return an old version. \"\"\" if", "Version(lib.get_default(\"API_VERSION\")) >= Version(\"6.2.0\") def test_get_default_fails(): \"\"\" Make sure get_default raises", "existing at the same time. \"\"\" # Make two figures", "\"5.4.3\" # Make sure the session is closed when the", "np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) lib.put_matrix(dataset, matrix=data) # Add the dataset", "z = range(size * 2, size * 3, 1) with", "them anyway. # This should work... lib._parse_constant( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", valid=FAMILIES, valid_modifiers=VIAS", "shape = (5, 3) for dtype in dtypes: with clib.Session()", "session when given bad input. \"\"\" ses = clib.Session() with", "in invalid mode with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data(", "returns=None): lib.create_data( family=\"GMT_IS_DATASET\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", dim=[11, 10, 2, 0], )", "sure get_default raises an exception for invalid names. \"\"\" with", "None ses.destroy() with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement def test_create_session_fails():", "test_call_module_invalid_name(): \"\"\" Fails when given bad input. \"\"\" with clib.Session()", "Figure() fig2.basemap(region=\"US.HI+r5\", projection=\"M6i\", frame=True) # Activate the first figure and", "lib: with lib.virtualfile_from_vectors(*data.T) as vfile: with GMTTempFile() as outfile: lib.call_module(\"info\",", "\"\\t\".join( [ \"<{:.0f}/{:.0f}>\".format(i.min(), i.max()) for i in (data.x, data.y, data.z)", "with clib.Session() as lib: family = \"GMT_IS_DATASET|GMT_VIA_MATRIX\" geometry = \"GMT_IS_POINT\"", "size = 5 x = np.arange(size, dtype=np.int32) y = np.arange(size,", "\"GMT_IS_POINT\", \"GMT_WRITE_SET\", [1] * 6, \"some-file-name\", None, ) def test_dataarray_to_matrix_works():", "1, 5) y = np.logspace(2, 3, 4) grid = xr.DataArray(data,", "with pytest.raises(GMTVersionError): with lib: assert lib.info[\"version\"] != \"5.4.3\" # Make", "dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.fliplr(data)) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc, desired=[abs(x[1]", "->{}\".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(i.min(), i.max())", "lib.extract_region() npt.assert_allclose(wesn1, region1) # Now try it with the second", "clib.Session() as lib: with pytest.raises(GMTCLibError): lib.get_default(\"NOT_A_VALID_NAME\") def test_info_dict(): \"\"\" Make", "as error: assert \"Module 'info' failed with status code\" in", "with flipped y. \"\"\" data = np.diag(v=np.arange(3)) x = np.linspace(start=0,", "always the same string def mock_defaults(api, name, value): # pylint:", "lib = clib.Session() with pytest.raises(GMTCLibNoSessionError): lib.call_module(\"gmtdefaults\", \"\") with pytest.raises(GMTCLibNoSessionError): lib.session_pointer", "np.arange(size, size * 2, 1, dtype=np.int32) strings1 = np.array([\"a\", \"bc\",", "grid ignoring range and inc. \"\"\" with clib.Session() as lib:", "returns=1 ): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): pass print(\"Shouldn't get to", "dtype in dtypes: full_data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) rows", "lib.call_module(\"figure\", \"{} -\".format(fig1._name)) with clib.Session() as lib: wesn1 = lib.extract_region()", "dtypes: full_data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) rows = 5", "pytest.raises(GMTCLibError): lib.write_data( \"GMT_IS_VECTOR\", \"GMT_IS_POINT\", \"GMT_WRITE_SET\", [1] * 6, \"some-file-name\", None,", "clib.Session() session2.create(name=\"test_session2\") assert session2.session_pointer is not None assert session2.session_pointer !=", "Check is the GMT error message was captured. \"\"\" with", "region should handle multiple figures existing at the same time.", "that an exception is raised when failing to create a", "return 0 (success) so that we don't open a file", "current figure, not the last figure. fig1 = Figure() region1", "data.T] ) expected = \"<matrix memory>: N = {}\\t{}\\n\".format(shape[0], bounds)", "with clib.Session() as lib: # Grids from matrices using dim", "shouldn't. with pytest.raises(GMTInvalidInput): lib._parse_constant( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", valid=FAMILIES, valid_modifiers=None ) def test_create_data_dataset():", "as outfile: lib.call_module(\"info\", \"{} -C ->{}\".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True)", "dataarray_to_matrix from pygmt.clib.session import FAMILIES, VIAS from pygmt.exceptions import (", "in test_cases: composite = \"|\".join([family, via]) expected = lib[family] +", "grid data = np.ones((10, 12, 11), dtype=\"float32\") x = np.arange(11)", "# pylint: disable=pointless-statement def test_create_session_fails(): \"\"\" Check that an exception", "to create a session before destroying the old one. ses.create(\"test1\")", "\"{} ->{}\".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(col.min(),", "\"\"\" size = 5 x = np.arange(size, dtype=np.int32) y =", "before calling extract_region to make sure that it's # getting", "def test_parse_constant_fails(): \"\"\" Check if the function fails when given", "with status code\" in str(error) assert \"gmtinfo [ERROR]: Cannot find", "# Passing in invalid mode with pytest.raises(GMTInvalidInput): with clib.Session() as", "lib: with mock(lib, \"GMT_Write_Data\", returns=1): with pytest.raises(GMTCLibError): lib.write_data( \"GMT_IS_VECTOR\", \"GMT_IS_POINT\",", "data_fname = os.path.join(TEST_DATA_DIR, \"points.txt\") out_fname = \"test_call_module.txt\" with clib.Session() as", "\"test_call_module.txt\" with clib.Session() as lib: with GMTTempFile() as out_fname: lib.call_module(\"info\",", "col in data.T] ) expected = \"<matrix memory>: N =", "for i, j, k in zip(x, y, strings)) assert output", "pytest.raises(GMTInvalidInput): dataarray_to_matrix(grid) def test_get_default(): \"\"\" Make sure get_default works without", "the opening to return 0 (success) so that we don't", "\"\\t\".join( [\"{:.0f}\\t{:.0f}\".format(col.min(), col.max()) for col in data.T] ) expected =", "\"\"\" return returns mock_func = mock_api_function get_libgmt_func = session.get_libgmt_func def", "expected def test_parse_constant_fails(): \"\"\" Check if the function fails when", "easy to get some API functions to fail without inducing", "the C API. \"\"\" import os from contextlib import contextmanager", "failed\") def test_virtualfile_from_matrix(): \"\"\" Test transforming a matrix to virtual", "np.arange(size, size * 2, 1, dtype=np.int32) strings = np.array([\"a\", \"bc\",", "# Mock GMT_Get_Default to return an old version def mock_defaults(api,", "clib.Session raises an exception if GMT is too old. \"\"\"", "sure the clib.Session raises an exception if GMT is too", "expected def test_virtualfile_from_matrix_slice(): \"\"\" Test transforming a slice of a", "5), dtype=\"float64\") x = np.linspace(0, 1, 5) y = np.logspace(2,", "test_fails_for_wrong_version(): \"\"\" Make sure the clib.Session raises an exception if", "lib.virtualfile_from_vectors(x, y, z) as vfile: with GMTTempFile() as outfile: lib.call_module(\"info\",", "the clib.Session.info dict is working. \"\"\" # Check if there", "we won't close later. with clib.Session() as lib, mock(lib, \"GMT_Open_VirtualFile\",", "using range and int lib.create_data( family=\"GMT_IS_GRID|GMT_VIA_MATRIX\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", ranges=[150.0, 250.0,", "\"\"\" Check that dataarray_to_matrix returns correct output with flipped y.", "matrix to virtual file dataset. \"\"\" dtypes = \"float32 float64", "by producing a NULL pointer as output or non-zero status", "with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): pass print(\"Shouldn't get to this code", "thing because libgmt usually only fails with errors). \"\"\" if", "as lib: assert lib.get_default(\"API_GRID_LAYOUT\") in [\"rows\", \"columns\"] assert int(lib.get_default(\"API_CORES\")) >=", "z) as vfile: with GMTTempFile() as outfile: lib.call_module(\"info\", \"{} ->{}\".format(vfile,", "uint32 uint64\".split() shape = (7, 5) for dtype in dtypes:", "= np.array([0, 10, -20, -10]) fig1.coast(region=region1, projection=\"M6i\", frame=True, land=\"black\") fig2", "with clib.Session() as lib: with mock(lib, \"GMT_Create_Data\", returns=None): lib.create_data( family=\"GMT_IS_DATASET\",", "argtypes, restype) setattr(session, \"get_libgmt_func\", mock_get_libgmt_func) yield setattr(session, \"get_libgmt_func\", get_libgmt_func) def", "] for test_case in test_cases: with pytest.raises(GMTInvalidInput): lib._parse_constant(test_case, valid=FAMILIES, valid_modifiers=VIAS)", "lib: with lib.virtualfile_from_vectors(x, y, strings) as vfile: with GMTTempFile() as", "\"\") def test_call_module_error_message(): \"\"\" Check is the GMT error message", "np.linspace(start=4, stop=0, num=3) y = np.linspace(start=5, stop=9, num=3) grid =", "if GMT is too old. \"\"\" # Mock GMT_Get_Default to", "pygmt.exceptions import ( GMTCLibError, GMTCLibNoSessionError, GMTInvalidInput, GMTVersionError, ) from pygmt.helpers", "[\"<{:.0f}/{:.0f}>\".format(col.min(), col.max()) for col in data.T] ) expected = \"<matrix", "geometry with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family=\"GMT_IS_GRID\", geometry=\"Not_a_valid_geometry\",", "to this code\") # Test the status check when closing", "\"float32 float64 int32 int64 uint32 uint64\".split() size = 13 for", "when API functions fail by producing a NULL pointer as", "lib: with pytest.raises(GMTCLibError): lib.get_default(\"NOT_A_VALID_NAME\") def test_info_dict(): \"\"\" Make sure the", "\"\"\" ses = clib.Session() assert ses[\"GMT_SESSION_EXTERNAL\"] != -99999 assert ses[\"GMT_MODULE_CMD\"]", "Dataset from matrices data_matrix = lib.create_data( family=\"GMT_IS_DATASET|GMT_VIA_MATRIX\", geometry=\"GMT_IS_POINT\", mode=\"GMT_CONTAINER_ONLY\", dim=[10,", "and spaces are valid file # names. Use a mock", "->{outfile.name}\") output = outfile.read(keep_tabs=True) expected = \"\".join(f\"{i}\\t{j}\\t{k}\\n\" for i, j,", "for an empty dictionary assert ses.info for key in ses.info:", "clib.Session() as _lib: gmt_version = Version(_lib.info[\"version\"]) @contextmanager def mock(session, func,", "failing to create a session. \"\"\" ses = clib.Session() with", "all of the # properties. with clib.Session() as lib: assert", "lib: wesn2 = lib.extract_region() npt.assert_allclose(wesn2, np.array([-165.0, -150.0, 15.0, 25.0])) def", ") # Dataset from matrices data_matrix = lib.create_data( family=\"GMT_IS_DATASET|GMT_VIA_MATRIX\", geometry=\"GMT_IS_POINT\",", "transforming a slice of a larger array to virtual file", "only fails with errors). \"\"\" if mock_func is None: def", "return codes. \"\"\" vfargs = ( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", \"GMT_IS_POINT\", \"GMT_IN|GMT_IS_REFERENCE\", None,", "with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement ses.create(\"session1\") assert ses.session_pointer is", "== b\"API_VERSION\": value.value = b\"5.4.3\" else: value.value = b\"bla\" return", "data raises an exception for non-zero return codes. \"\"\" #", "for col in data.T] ) expected = \"{}\\n\".format(bounds) assert output", "# Use in a different session to avoid any memory", "250.0, -20.0, 20.0], inc=[0.1, 0.2], ) def test_create_data_fails(): \"\"\" Check", "{}\\t{}\\n\".format(size, bounds) assert output == expected def test_virtualfile_from_vectors_arraylike(): \"\"\" Pass", "mock(session, func, returns=None, mock_func=None): \"\"\" Mock a GMT C API", "status check when entering the context. # If the exception", "for dtype in dtypes: data = pd.DataFrame( data=dict( x=np.arange(size, dtype=dtype),", "\"\"\" value.value = b\"bla\" return 0 ses = clib.Session() ses.create(\"test-session\")", "Check for an empty dictionary assert ses.info for key in", ") expected = \"<vector memory>: N = {}\\t{}\\n\".format(size, bounds) assert", "function that always returns a given value. \"\"\" return returns", "disable=pointless-statement def test_create_session_fails(): \"\"\" Check that an exception is raised", "was captured. \"\"\" with clib.Session() as lib: try: lib.call_module(\"info\", \"bogus-data.bla\")", "\"\"\" dtypes = \"float32 float64 int32 int64 uint32 uint64\".split() size", "with clib.Session() as lib: # Dataset from vectors data_vector =", "4) grid = xr.DataArray(data, coords=[(\"y\", y), (\"x\", x)]) with pytest.raises(GMTInvalidInput):", "gives reasonable results. \"\"\" with clib.Session() as lib: assert lib.get_default(\"API_GRID_LAYOUT\")", "pytest.raises(GMTCLibError): ses.create(\"test2\") def test_destroy_session_fails(): \"\"\" Fail to destroy session when", "as lib: try: lib.call_module(\"info\", \"bogus-data.bla\") except GMTCLibError as error: assert", "\"float32 float64 int32 int64 uint32 uint64\".split() shape = (7, 5)", "sure it doesn't fail badly. \"\"\" with clib.Session() as lib:", "as lib: with lib.virtualfile_from_matrix(data) as vfile: with GMTTempFile() as outfile:", "clib.Session() as lib: lib.call_module(\"figure\", \"{} -\".format(fig1._name)) with clib.Session() as lib:", "GMT API function that always returns a given value. \"\"\"", "clib.Session() as lib: with lib.virtualfile_from_matrix(data) as vfile: with GMTTempFile() as", "= dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.flipud(data)) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc,", "outfile.name)) output = outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"{:.0f}\\t{:.0f}\".format(col.min(), col.max()) for", "import numpy.testing as npt import pandas as pd import pytest", "family=\"GMT_IS_DATASET\", geometry=\"GMT_IS_SURFACE\", mode=\"Not_a_valid_mode\", dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0,", "parsed = lib._parse_constant(composite, valid=FAMILIES, valid_modifiers=VIAS) assert parsed == expected def", "\"\"\" size = 13 x = list(range(0, size, 1)) y", "\"\"\" Check that it fails for variable increments. \"\"\" data", "as npt import pandas as pd import pytest import xarray", "lib: wesn1 = lib.extract_region() npt.assert_allclose(wesn1, region1) # Now try it", "pytest.raises(GMTCLibError): lib.call_module(\"info\", \"bogus-data.bla\") def test_call_module_invalid_name(): \"\"\" Fails when given bad", "won't get to the closing of the # virtual file.", "with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, z) as vfile:", "# Add the dataset to a virtual file and pass", "* 2, 1, dtype=np.int32) strings1 = np.array([\"a\", \"bc\", \"def\", \"ghij\",", "for dtype in dtypes: x = np.arange(size, dtype=dtype) y =", "shape[1], dtype=dtype).reshape(shape) with clib.Session() as lib: with lib.virtualfile_from_vectors(*data.T) as vfile:", "functions fail by producing a NULL pointer as output or", "not given valid modifiers but is using them anyway. #", "argument (separated by |) correctly. \"\"\" lib = clib.Session() test_cases", "from the current figure, not the last figure. fig1 =", "lib._parse_constant( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", valid=FAMILIES, valid_modifiers=None ) def test_create_data_dataset(): \"\"\" Run the", "for transforming vectors to virtual file dataset. \"\"\" dtypes =", "are called without errors. \"\"\" # Create two session and", "session2.create(name=\"test_session2\") assert session2.session_pointer is not None assert session2.session_pointer != session1.session_pointer", "an old version. \"\"\" if name == b\"API_VERSION\": value.value =", "= \"\\t\".join( [\"{:.0f}\\t{:.0f}\".format(col.min(), col.max()) for col in data.T] ) expected", "the value buffer. \"\"\" value.value = b\"bla\" return 0 ses", "memory>: N = {}\\t{}\\n\".format(size, bounds) assert output == expected @pytest.mark.parametrize(\"dtype\",", "data = pd.DataFrame( data=dict( x=np.arange(size, dtype=dtype), y=np.arange(size, size * 2,", "\"\"\" Fail to destroy session when given bad input. \"\"\"", "dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.flip(data, axis=(0, 1))) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()])", "desired=np.flip(data, axis=(0, 1))) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc, desired=[abs(x[1]", "\"{} -C ->{}\".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = \"\\t\".join(", "np import numpy.testing as npt import pandas as pd import", "clib.Session() as lib: lib.extract_region() def test_extract_region_two_figures(): \"\"\" Extract region should", "lib: with pytest.raises(GMTCLibError): lib.call_module(\"info\", \"bogus-data.bla\") def test_call_module_invalid_name(): \"\"\" Fails when", "= 3 data = full_data[:rows, :cols] with clib.Session() as lib:", "output == expected @pytest.mark.parametrize(\"dtype\", [str, object]) def test_virtualfile_from_vectors_two_string_or_object_columns(dtype): \"\"\" Test", "* 2, 1, dtype=dtype) z = np.arange(size * 2, size", "sure they are not pointing to the same memory session1", "# Dataset from matrices data_matrix = lib.create_data( family=\"GMT_IS_DATASET|GMT_VIA_MATRIX\", geometry=\"GMT_IS_POINT\", mode=\"GMT_CONTAINER_ONLY\",", "code either\") def test_virtual_file_bad_direction(): \"\"\" Test passing an invalid direction", "file dataset. \"\"\" dtypes = \"float32 float64 int32 int64 uint32", "= lib.create_data( family=family, geometry=geometry, mode=\"GMT_CONTAINER_ONLY\", dim=[shape[1], shape[0], 1, 0], #", "getting all of the # properties. with clib.Session() as lib:", "0.1412 0.9338\" def test_call_module_invalid_arguments(): \"\"\" Fails for invalid module arguments.", "# But this shouldn't. with pytest.raises(GMTInvalidInput): lib._parse_constant( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", valid=FAMILIES, valid_modifiers=None", "f\"{h}\\t{i}\\t{j} {k}\\n\" for h, i, j, k in zip(x, y,", "Fails when given bad input. \"\"\" with clib.Session() as lib:", "as pd import pytest import xarray as xr from packaging.version", "in zip(x, y, strings1, strings2) ) assert output == expected", "get to this code either\") def test_virtual_file_bad_direction(): \"\"\" Test passing", "the code. with clib.Session() as lib: with mock(lib, \"GMT_Write_Data\", returns=1):", "invalid names. \"\"\" with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.get_default(\"NOT_A_VALID_NAME\")", "z=np.arange(size * 2, size * 3, 1, dtype=dtype), ) )", "has been plotted. \"\"\" Figure() with pytest.raises(GMTCLibError): with clib.Session() as", "num=3) grid = xr.DataArray(data, coords=[(\"y\", y), (\"x\", x)]) matrix, region,", "\"\"\" Parsing a single family argument correctly. \"\"\" lib =", "Test that I can get correct constants from the C", "with lib.open_virtual_file(*vfargs): print(\"This should have failed\") def test_virtualfile_from_vectors(): \"\"\" Test", "x)]) matrix, region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.flipud(data)) npt.assert_allclose(actual=region, desired=[x.min(),", "np.diag(v=np.arange(3)) x = np.linspace(start=4, stop=0, num=3) y = np.linspace(start=9, stop=5,", "# Make a 3D regular grid data = np.ones((10, 12,", "Segmentation # Fault. Can't test this if by giving a", "mock(lib, \"GMT_Open_VirtualFile\", returns=0), mock( lib, \"GMT_Close_VirtualFile\", returns=1 ): with pytest.raises(GMTCLibError):", "= \"\\t\".join( [ \"<{:.0f}/{:.0f}>\".format(i.min(), i.max()) for i in (data.x, data.y,", "of the # virtual file. with clib.Session() as lib, mock(lib,", "Test passing an invalid direction argument. \"\"\" with clib.Session() as", "with clib.Session() as lib: with lib.virtualfile_from_matrix(data) as vfile: with GMTTempFile()", "\"\"\" with clib.Session() as lib: try: lib.call_module(\"info\", \"bogus-data.bla\") except GMTCLibError", "data pointer returned is None (NULL pointer) with pytest.raises(GMTCLibError): with", "1, 0], ) def test_create_data_grid_range(): \"\"\" Create a grid specifying", "def test_virtualfile_from_matrix(): \"\"\" Test transforming a matrix to virtual file", "= \"<matrix memory>: N = {}\\t{}\\n\".format(shape[0], bounds) assert output ==", "x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc, desired=[x[1] - x[0], y[1] - y[0]])", "shape[1], dtype=dtype).reshape(shape) lib.put_matrix(dataset, matrix=data) # Add the dataset to a", "def test_call_module(): \"\"\" Run a command to see if call_module", "outfile: lib.call_module(\"convert\", f\"{vfile} ->{outfile.name}\") output = outfile.read(keep_tabs=True) expected = \"\".join(f\"{i}\\t{j}\\t{k}\\n\"", "\"GMT_Get_Default\", mock_func=mock_defaults): # Check for an empty dictionary assert ses.info", "\"\"\" Make sure get_default raises an exception for invalid names.", "the automation for transforming vectors to virtual file dataset. \"\"\"", "(\"x\", x)]) matrix, region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.flip(data, axis=(0,", "int64 uint32 uint64\".split() shape = (10, 6) for dtype in", "a dataset using pandas Series. \"\"\" dtypes = \"float32 float64", "as _lib: gmt_version = Version(_lib.info[\"version\"]) @contextmanager def mock(session, func, returns=None,", "session. \"\"\" ses = clib.Session() with mock(ses, \"GMT_Create_Session\", returns=None): with", "bad input. \"\"\" lib = clib.Session() test_cases = [ \"SOME_random_STRING\",", "and int lib.create_data( family=\"GMT_IS_GRID|GMT_VIA_MATRIX\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", ranges=[150.0, 250.0, -20.0, 20.0],", "= ( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", \"GMT_IS_POINT\", \"GMT_IS_GRID\", # The invalid direction argument", "dim=[shape[1], shape[0], 1, 0], # columns, rows, layers, dtype )", "\"\"\" A mock GMT API function that always returns a", "import pandas as pd import pytest import xarray as xr", "f\"{vfile} ->{outfile.name}\") output = outfile.read(keep_tabs=True) expected = \"\".join( f\"{h}\\t{i}\\t{j} {k}\\n\"", "before destroying the old one. ses.create(\"test1\") with pytest.raises(GMTCLibError): ses.create(\"test2\") def", "columns to virtual file dataset. \"\"\" dtypes = \"float32 float64", "import dataarray_to_matrix from pygmt.clib.session import FAMILIES, VIAS from pygmt.exceptions import", "5) for dtype in dtypes: data = np.arange(shape[0] * shape[1],", "pointer) with pytest.raises(GMTCLibError): with clib.Session() as lib: with mock(lib, \"GMT_Create_Data\",", "destroy a session twice ses = clib.Session() for __ in", "0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) # Passing", "returns=1): with pytest.raises(GMTCLibError): ses.destroy() ses.destroy() def test_call_module(): \"\"\" Run a", "\"\"\" Test passing in two columns of string or object", "with mock(ses, \"GMT_Create_Session\", returns=None): with pytest.raises(GMTCLibError): ses.create(\"test-session-name\") # Should fail", "that dataarray_to_matrix returns correct output. \"\"\" data = np.diag(v=np.arange(3)) x", "* 3, 1, dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x,", "matrix=data) # Add the dataset to a virtual file and", "= clib.Session() assert ses[\"GMT_SESSION_EXTERNAL\"] != -99999 assert ses[\"GMT_MODULE_CMD\"] != -99999", "= \"float32 float64 int32 int64 uint32 uint64\".split() shape = (7,", "test_virtualfile_from_vectors_diff_size(): \"\"\" Test the function fails for arrays of different", "* shape[1], dtype=dtype).reshape(shape) lib.put_matrix(dataset, matrix=data) # Add the dataset to", "strings2) ) assert output == expected def test_virtualfile_from_vectors_transpose(): \"\"\" Test", "in invalid geometry with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data(", "lib. \"\"\" ses = clib.Session() assert ses[\"GMT_SESSION_EXTERNAL\"] != -99999 assert", "to see if call_module works. \"\"\" data_fname = os.path.join(TEST_DATA_DIR, \"points.txt\")", "figure and extract the region from it # Use in", "Parsing a single family argument correctly. \"\"\" lib = clib.Session()", "out_fname = \"test_call_module.txt\" with clib.Session() as lib: with GMTTempFile() as", "wesn1 = lib.extract_region() npt.assert_allclose(wesn1, region1) # Now try it with", "'bla' in the value buffer. \"\"\" value.value = b\"bla\" return", "should have failed\") def test_virtualfile_from_vectors(): \"\"\" Test the automation for", "abs(y[1] - y[0])]) def test_dataarray_to_matrix_dims_fails(): \"\"\" Check that it fails", "\"GMT_IS_DATASET|GMT_VIA_MATRIX\", valid=FAMILIES, valid_modifiers=VIAS ) # But this shouldn't. with pytest.raises(GMTInvalidInput):", "lib: with lib.virtualfile_from_matrix(data) as vfile: with GMTTempFile() as outfile: lib.call_module(\"info\",", "pylint: disable=unused-argument \"\"\" Return an old version. \"\"\" if name", "Needed because it's not easy to get some API functions", "os.path.join(TEST_DATA_DIR, \"points.txt\") out_fname = \"test_call_module.txt\" with clib.Session() as lib: with", "memory problems. with clib.Session() as lib: lib.call_module(\"figure\", \"{} -\".format(fig1._name)) with", "name, value): # pylint: disable=unused-argument \"\"\" Return an old version.", "clib.Session() as lib, mock(lib, \"GMT_Open_VirtualFile\", returns=0), mock( lib, \"GMT_Close_VirtualFile\", returns=1", "int(lib.get_default(\"API_CORES\")) >= 1 assert Version(lib.get_default(\"API_VERSION\")) >= Version(\"6.2.0\") def test_get_default_fails(): \"\"\"", "outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(i.min(), i.max()) for i in (x,", "(7, 5) for dtype in dtypes: data = np.arange(shape[0] *", "value buffer. \"\"\" value.value = b\"bla\" return 0 ses =", "size * 2, 1, dtype=np.int32) strings = np.array([\"a\", \"bc\", \"defg\",", "y = np.arange(size, size * 2, 1, dtype=np.int32) strings =", "test_virtualfile_from_vectors(): \"\"\" Test the automation for transforming vectors to virtual", "destroy session are called without errors. \"\"\" # Create two", "exception is raised when failing to create a session. \"\"\"", "desired=[abs(x[1] - x[0]), abs(y[1] - y[0])]) def test_dataarray_to_matrix_dims_fails(): \"\"\" Check", "# pylint: disable=pointless-statement def test_create_destroy_session(): \"\"\" Test that create and", "Version from pygmt import Figure, clib from pygmt.clib.conversion import dataarray_to_matrix", "def test_virtualfile_from_vectors(): \"\"\" Test the automation for transforming vectors to", "= \"GMT_IS_DATASET|GMT_VIA_MATRIX\" geometry = \"GMT_IS_POINT\" dataset = lib.create_data( family=family, geometry=geometry,", "with pytest.raises(GMTCLibNoSessionError): ses.destroy() ses.create(\"test-session\") with mock(ses, \"GMT_Destroy_Session\", returns=1): with pytest.raises(GMTCLibError):", "open a file that # we won't close later. with", "test_create_session_fails(): \"\"\" Check that an exception is raised when failing", "matrix, region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.flip(data, axis=(0, 1))) npt.assert_allclose(actual=region,", "outfile.read(keep_tabs=True) expected = \"\".join(f\"{i}\\t{j}\\t{k}\\n\" for i, j, k in zip(x,", "inc=[0.1, 0.2], ) def test_create_data_fails(): \"\"\" Check that create_data raises", "= np.ones((10, 12, 11), dtype=\"float32\") x = np.arange(11) y =", "!= \"5.4.3\" # Make sure the session is closed when", "shape[0], 1, 0], # columns, rows, layers, dtype ) data", "check when entering the context. # If the exception is", "def test_parse_constant_composite(): \"\"\" Parsing a composite constant argument (separated by", "\"@#\", \"$\"], dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y,", "GMTTempFile() as outfile: lib.call_module(\"info\", \"{} ->{}\".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True)", "functions to fail without inducing a Segmentation Fault (which is", "\"\"\" Check that an exception is raised when failing to", "= (family, geometry, \"GMT_IN|GMT_IS_REFERENCE\", dataset) with lib.open_virtual_file(*vfargs) as vfile: with", "\"GMT_IS_POINT\", \"GMT_IN|GMT_IS_REFERENCE\", None, ) # Mock Open_VirtualFile to test the", "npt import pandas as pd import pytest import xarray as", "uint32 uint64\".split() shape = (5, 3) for dtype in dtypes:", "lib.call_module(\"info\", \"{} -C ->{}\".format(data_fname, out_fname.name)) assert os.path.exists(out_fname.name) output = out_fname.read().strip()", "API function that always returns a given value. \"\"\" return", "when closing the virtual file # Mock the opening to", "ses.info[key] == \"bla\" ses.destroy() def test_fails_for_wrong_version(): \"\"\" Make sure the", "clib.Session() as lib: with pytest.raises(GMTInvalidInput): with lib.virtualfile_from_vectors(x, y): print(\"This should", "for h, i, j, k in zip(x, y, strings1, strings2)", "lib.virtualfile_from_vectors(*data.T) as vfile: with GMTTempFile() as outfile: lib.call_module(\"info\", \"{} -C", "correct constants from the C lib. \"\"\" ses = clib.Session()", "data.T] ) expected = \"{}\\n\".format(bounds) assert output == expected def", "\"\"\" Pass vectors to a dataset using pandas Series. \"\"\"", "np.arange(size * 2, size * 3, 1, dtype=dtype) with clib.Session()", "dataset = lib.create_data( family=family, geometry=geometry, mode=\"GMT_CONTAINER_ONLY\", dim=[shape[1], shape[0], 1, 0],", "size * 2, 1, dtype=np.int32) strings1 = np.array([\"a\", \"bc\", \"def\",", "returns mock_func = mock_api_function get_libgmt_func = session.get_libgmt_func def mock_get_libgmt_func(name, argtypes=None,", "\"{} -C ->{}\".format(data_fname, out_fname.name)) assert os.path.exists(out_fname.name) output = out_fname.read().strip() assert", "with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family=\"GMT_IS_GRID\", geometry=\"Not_a_valid_geometry\", mode=\"GMT_CONTAINER_ONLY\",", "info vfargs = (family, geometry, \"GMT_IN|GMT_IS_REFERENCE\", dataset) with lib.open_virtual_file(*vfargs) as", "\"defg\", \"hijklmn\", \"opqrst\"], dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x,", "npt.assert_allclose(actual=inc, desired=[x[1] - x[0], y[1] - y[0]]) def test_dataarray_to_matrix_negative_x_increment(): \"\"\"", "lib = clib.Session() for family in FAMILIES: parsed = lib._parse_constant(family,", "that exceptions are raised when API functions fail by producing", "5 x = np.arange(size, dtype=np.int32) y = np.arange(size, size *", "xr.DataArray(data, coords=[(\"z\", z), (\"y\", y), (\"x\", x)]) with pytest.raises(GMTInvalidInput): dataarray_to_matrix(grid)", "Dataset from vectors data_vector = lib.create_data( family=\"GMT_IS_DATASET|GMT_VIA_VECTOR\", geometry=\"GMT_IS_POINT\", mode=\"GMT_CONTAINER_ONLY\", dim=[10,", "xr.DataArray(data, coords=[(\"y\", y), (\"x\", x)]) with pytest.raises(GMTInvalidInput): dataarray_to_matrix(grid) def test_get_default():", "float64 int32 int64 uint32 uint64\".split() size = 13 for dtype", "[ERROR]: Cannot find file bogus-data.bla\" in str(error) def test_method_no_session(): \"\"\"", "clib.Session() as lib: assert lib.get_default(\"API_GRID_LAYOUT\") in [\"rows\", \"columns\"] assert int(lib.get_default(\"API_CORES\"))", "test_call_module(): \"\"\" Run a command to see if call_module works.", "size = 13 x = list(range(0, size, 1)) y =", "dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.flipud(data)) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc, desired=[x[1]", "outfile: lib.call_module(\"convert\", f\"{vfile} ->{outfile.name}\") output = outfile.read(keep_tabs=True) expected = \"\".join(", "rows = 5 cols = 3 data = full_data[:rows, :cols]", "lib.call_module(\"figure\", \"{} -\".format(fig2._name)) with clib.Session() as lib: wesn2 = lib.extract_region()", "Figure, clib from pygmt.clib.conversion import dataarray_to_matrix from pygmt.clib.session import FAMILIES,", "= np.arange(11) y = np.arange(12) z = np.arange(10) grid =", "C API function fail without causing a Segmentation # Fault.", "ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) # Passing in", "-C ->{}\".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"{:.0f}\\t{:.0f}\".format(col.min(),", "# pylint: disable=pointless-statement def test_parse_constant_single(): \"\"\" Parsing a single family", "x)]) with pytest.raises(GMTInvalidInput): dataarray_to_matrix(grid) def test_get_default(): \"\"\" Make sure get_default", "mock_func=None): \"\"\" Mock a GMT C API function to make", "raises an exception for non- zero return codes. \"\"\" vfargs", "using pandas Series. \"\"\" dtypes = \"float32 float64 int32 int64", "giving a bad file name because if # output=='', GMT", "pylint: disable=protected-access \"\"\" Test the wrappers for the C API.", "flipped x/y. \"\"\" data = np.diag(v=np.arange(3)) x = np.linspace(start=4, stop=0,", "test_create_data_grid_dim(): \"\"\" Create a grid ignoring range and inc. \"\"\"", "\"\"\" Mock a GMT C API function to make it", "is not None assert session2.session_pointer != session1.session_pointer session1.destroy() session2.destroy() #", "session is closed when the exception is raised. with pytest.raises(GMTCLibNoSessionError):", "3, 1, dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y,", "( GMTCLibError, GMTCLibNoSessionError, GMTInvalidInput, GMTVersionError, ) from pygmt.helpers import GMTTempFile", "= np.arange(size, dtype=dtype) y = np.arange(size, size * 2, 1,", "output == \"11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338\" def test_call_module_invalid_arguments():", ") with clib.Session() as lib: with lib.virtualfile_from_vectors(data.x, data.y, data.z) as", "using dim lib.create_data( family=\"GMT_IS_GRID|GMT_VIA_MATRIX\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1, 0],", "desired=[abs(x[1] - x[0]), abs(y[1] - y[0])]) def test_dataarray_to_matrix_negative_y_increment(): \"\"\" Check", "Create and destroy a session twice ses = clib.Session() for", "dtypes: data = pd.DataFrame( data=dict( x=np.arange(size, dtype=dtype), y=np.arange(size, size *", "lib = clib.Session() test_cases = [ \"SOME_random_STRING\", \"GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR\", \"GMT_IS_DATASET|NOT_A_PROPER_VIA\", \"NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX\",", "\"yz!\", \"@#\", \"$\"], dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x,", "bounds = \"\\t\".join( [ \"<{:.0f}/{:.0f}>\".format(i.min(), i.max()) for i in (data.x,", "= outfile.read(keep_tabs=True) expected = \"\".join(f\"{i}\\t{j}\\t{k}\\n\" for i, j, k in", "-150.0, 15.0, 25.0])) def test_write_data_fails(): \"\"\" Check that write data", "y), (\"x\", x)]) matrix, region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.flipud(data))", "lib._parse_constant(family, valid=FAMILIES) assert parsed == lib[family] def test_parse_constant_composite(): \"\"\" Parsing", "if dataarray_to_matrix returns correct output with flipped x. \"\"\" data", "outfile: lib.call_module(\"info\", \"{} ->{}\".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds =", "passing an invalid direction argument. \"\"\" with clib.Session() as lib:", "\"\"\" Check that opening and closing virtual files raises an", "in two columns of string or object dtype into virtual", "Figure() region1 = np.array([0, 10, -20, -10]) fig1.coast(region=region1, projection=\"M6i\", frame=True,", "region from it # Use in a different session to", "= xr.DataArray(data, coords=[(\"y\", y), (\"x\", x)]) with pytest.raises(GMTInvalidInput): dataarray_to_matrix(grid) def", "xr.DataArray(data, coords=[(\"y\", y), (\"x\", x)]) matrix, region, inc = dataarray_to_matrix(grid)", "the C lib. \"\"\" ses = clib.Session() assert ses[\"GMT_SESSION_EXTERNAL\"] !=", "data_vector != data_matrix def test_create_data_grid_dim(): \"\"\" Create a grid ignoring", "also fail if not given valid modifiers but is using", "strings)) assert output == expected @pytest.mark.parametrize(\"dtype\", [str, object]) def test_virtualfile_from_vectors_two_string_or_object_columns(dtype):", "status codes. Needed because it's not easy to get some", "as lib: with lib.virtualfile_from_vectors(data.x, data.y, data.z) as vfile: with GMTTempFile()", "dtype=\"float32\") x = np.arange(11) y = np.arange(12) z = np.arange(10)", "different session to avoid any memory problems. with clib.Session() as", "= \"<matrix memory>: N = {}\\t{}\\n\".format(rows, bounds) assert output ==", "\"bogus-data.bla\") def test_call_module_invalid_name(): \"\"\" Fails when given bad input. \"\"\"", "if there are no errors or segfaults from getting all", "errors or segfaults from getting all of the # properties.", "virtual file. with clib.Session() as lib, mock(lib, \"GMT_Open_VirtualFile\", returns=1): with", "session2.session_pointer != session1.session_pointer session1.destroy() session2.destroy() # Create and destroy a", "non-zero return codes. \"\"\" # It's hard to make the", "= clib.Session() session2.create(name=\"test_session2\") assert session2.session_pointer is not None assert session2.session_pointer", "= clib.Session() with pytest.raises(GMTCLibNoSessionError): lib.call_module(\"gmtdefaults\", \"\") with pytest.raises(GMTCLibNoSessionError): lib.session_pointer #", "clib.Session() as lib: with mock(lib, \"GMT_Create_Data\", returns=None): lib.create_data( family=\"GMT_IS_DATASET\", geometry=\"GMT_IS_SURFACE\",", "context. # If the exception is raised, the code won't", "sure that it's # getting from the current figure, not", "size, 1)) y = tuple(range(size, size * 2, 1)) z", "with a Dataset. \"\"\" dtypes = \"float32 float64 int32 int64", "pygmt.clib.conversion import dataarray_to_matrix from pygmt.clib.session import FAMILIES, VIAS from pygmt.exceptions", "sure the session is closed when the exception is raised.", "stop=0, num=3) y = np.linspace(start=5, stop=9, num=3) grid = xr.DataArray(data,", "dataarray_to_matrix(grid) def test_dataarray_to_matrix_inc_fails(): \"\"\" Check that it fails for variable", "if trying to create a session before destroying the old", "Now try it with the second one with clib.Session() as", "packaging.version import Version from pygmt import Figure, clib from pygmt.clib.conversion", "VIAS from pygmt.exceptions import ( GMTCLibError, GMTCLibNoSessionError, GMTInvalidInput, GMTVersionError, )", "get correct constants from the C lib. \"\"\" ses =", "\"\"\" lib = clib.Session() test_cases = ((family, via) for family", "to make sure that it's # getting from the current", "= np.linspace(start=4, stop=0, num=3) y = np.linspace(start=5, stop=9, num=3) grid", "3 data = full_data[:rows, :cols] with clib.Session() as lib: with", "= (7, 5) for dtype in dtypes: data = np.arange(shape[0]", "\"GMT_IN|GMT_IS_REFERENCE\", None, ) # Mock Open_VirtualFile to test the status", "def test_destroy_session_fails(): \"\"\" Fail to destroy session when given bad", "data.y, data.z) as vfile: with GMTTempFile() as outfile: lib.call_module(\"info\", \"{}", "dtype=dtype) z = np.arange(size * 2, size * 3, 1,", "size * 2, 1, dtype=dtype), z=np.arange(size * 2, size *", "with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement def test_create_session_fails(): \"\"\" Check", "as lib: with lib.virtualfile_from_vectors(x, y, strings1, strings2) as vfile: with", "assert lib.get_default(\"API_GRID_LAYOUT\") in [\"rows\", \"columns\"] assert int(lib.get_default(\"API_CORES\")) >= 1 assert", "in ses.info: assert ses.info[key] == \"bla\" ses.destroy() def test_fails_for_wrong_version(): \"\"\"", "ses.destroy() ses.create(\"test-session\") with mock(ses, \"GMT_Destroy_Session\", returns=1): with pytest.raises(GMTCLibError): ses.destroy() ses.destroy()", "Passing in invalid geometry with pytest.raises(GMTInvalidInput): with clib.Session() as lib:", "mock_func = mock_api_function get_libgmt_func = session.get_libgmt_func def mock_get_libgmt_func(name, argtypes=None, restype=None):", "in FAMILIES for via in VIAS) for family, via in", "as lib: vfargs = ( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", \"GMT_IS_POINT\", \"GMT_IS_GRID\", # The", "pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): print(\"Should not get to this code\") #", "parsed == expected def test_parse_constant_fails(): \"\"\" Check if the function", "(\"y\", y), (\"x\", x)]) with pytest.raises(GMTInvalidInput): dataarray_to_matrix(grid) def test_dataarray_to_matrix_inc_fails(): \"\"\"", "xr from packaging.version import Version from pygmt import Figure, clib", "with flipped x/y. \"\"\" data = np.diag(v=np.arange(3)) x = np.linspace(start=4,", "not easy to get some API functions to fail without", "opening and closing virtual files raises an exception for non-", "\"SOME_random_STRING\", \"GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR\", \"GMT_IS_DATASET|NOT_A_PROPER_VIA\", \"NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX\", \"NOT_A_PROPER_FAMILY|ALSO_INVALID\", ] for test_case in test_cases:", "with lib.open_virtual_file(*vfargs): print(\"Should not get to this code\") # Test", "Mock a GMT C API function to make it always", "vectors to a dataset. \"\"\" size = 13 x =", "z = np.arange(10) grid = xr.DataArray(data, coords=[(\"z\", z), (\"y\", y),", "# If the exception is raised, the code won't get", "int64 uint32 uint64\".split() size = 13 for dtype in dtypes:", "pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): pass print(\"Shouldn't get to this code either\")", "when given bad input. \"\"\" with clib.Session() as lib: with", "test_call_module_invalid_arguments(): \"\"\" Fails for invalid module arguments. \"\"\" with clib.Session()", "y=np.arange(size, size * 2, 1, dtype=dtype), z=np.arange(size * 2, size", "test_destroy_session_fails(): \"\"\" Fail to destroy session when given bad input.", "as out_fname: lib.call_module(\"info\", \"{} -C ->{}\".format(data_fname, out_fname.name)) assert os.path.exists(out_fname.name) output", "to fail without inducing a Segmentation Fault (which is a", "= full_data[:rows, :cols] with clib.Session() as lib: with lib.virtualfile_from_matrix(data) as", "pylint: disable=unused-argument \"\"\" Put 'bla' in the value buffer. \"\"\"", "Grids from matrices using dim lib.create_data( family=\"GMT_IS_GRID|GMT_VIA_MATRIX\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", dim=[10,", "with clib.Session() as lib: lib.call_module(\"figure\", \"{} -\".format(fig2._name)) with clib.Session() as", "version. \"\"\" if name == b\"API_VERSION\": value.value = b\"5.4.3\" else:", "* 2, 1)) z = range(size * 2, size *", "ses.create(\"test-session\") with mock(ses, \"GMT_Get_Default\", mock_func=mock_defaults): # Check for an empty", "multiple figures existing at the same time. \"\"\" # Make", "for test_case in test_cases: with pytest.raises(GMTInvalidInput): lib._parse_constant(test_case, valid=FAMILIES, valid_modifiers=VIAS) #", "test_cases: with pytest.raises(GMTInvalidInput): lib._parse_constant(test_case, valid=FAMILIES, valid_modifiers=VIAS) # Should also fail", "clib.Session() for __ in range(2): with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint:", "Mock Open_VirtualFile to test the status check when entering the", "ses.destroy() def test_fails_for_wrong_version(): \"\"\" Make sure the clib.Session raises an", "x = np.arange(5) y = np.arange(6) with clib.Session() as lib:", "is None: def mock_api_function(*args): # pylint: disable=unused-argument \"\"\" A mock", "names. \"\"\" with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.get_default(\"NOT_A_VALID_NAME\") def", "that it's # getting from the current figure, not the", "dtype ) # Dataset from matrices data_matrix = lib.create_data( family=\"GMT_IS_DATASET|GMT_VIA_MATRIX\",", "for invalid module arguments. \"\"\" with clib.Session() as lib: with", "fails if nothing has been plotted. \"\"\" Figure() with pytest.raises(GMTCLibError):", "0], ) def test_virtual_file(): \"\"\" Test passing in data via", "= b\"bla\" return 0 ses = clib.Session() ses.create(\"test-session\") with mock(ses,", "Parsing a composite constant argument (separated by |) correctly. \"\"\"", "= clib.Session() ses.create(\"test-session\") with mock(ses, \"GMT_Get_Default\", mock_func=mock_defaults): # Check for", "same time. \"\"\" # Make two figures before calling extract_region", "lib[family] + lib[via] parsed = lib._parse_constant(composite, valid=FAMILIES, valid_modifiers=VIAS) assert parsed", "GMTVersionError, ) from pygmt.helpers import GMTTempFile TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), \"data\")", "test_create_data_fails(): \"\"\" Check that create_data raises exceptions for invalid input", "an exception for non- zero return codes. \"\"\" vfargs =", "arrays of different sizes. \"\"\" x = np.arange(5) y =", "10, 2, 0], ) def test_virtual_file(): \"\"\" Test passing in", "lib.create_data( family=family, geometry=geometry, mode=\"GMT_CONTAINER_ONLY\", dim=[shape[1], shape[0], 1, 0], # columns,", "npt.assert_allclose(actual=matrix, desired=np.flip(data, axis=(0, 1))) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc,", "is not None ses.destroy() with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement", "fails for variable increments. \"\"\" data = np.ones((4, 5), dtype=\"float64\")", "assert output == \"11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338\" def", "input. \"\"\" with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module(\"meh\", \"\")", "float64 int32 int64 uint32 uint64\".split() shape = (5, 3) for", "been plotted. \"\"\" Figure() with pytest.raises(GMTCLibError): with clib.Session() as lib:", "+ lib[via] parsed = lib._parse_constant(composite, valid=FAMILIES, valid_modifiers=VIAS) assert parsed ==", "pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family=\"GMT_IS_GRID\", geometry=\"Not_a_valid_geometry\", mode=\"GMT_CONTAINER_ONLY\", dim=[0,", "def test_virtual_file_bad_direction(): \"\"\" Test passing an invalid direction argument. \"\"\"", "output = outfile.read(keep_tabs=True) expected = \"\".join( f\"{h}\\t{i}\\t{j} {k}\\n\" for h,", "frame=True) # Activate the first figure and extract the region", "given value. Used to test that exceptions are raised when", ") expected = \"{}\\n\".format(bounds) assert output == expected def test_virtualfile_from_vectors_diff_size():", ") ) with clib.Session() as lib: with lib.virtualfile_from_vectors(data.x, data.y, data.z)", "dim=[10, 20, 1, 0], # columns, rows, layers, dtype )", "exception for non- zero return codes. \"\"\" vfargs = (", "producing a NULL pointer as output or non-zero status codes.", "15.0, 25.0])) def test_write_data_fails(): \"\"\" Check that write data raises", "def test_virtualfile_from_vectors_one_string_or_object_column(dtype): \"\"\" Test passing in one column with string", "is created. lib = clib.Session() with pytest.raises(GMTCLibNoSessionError): lib.call_module(\"gmtdefaults\", \"\") with", "\"\"\" dtypes = \"float32 float64 int32 int64 uint32 uint64\".split() shape", "input. \"\"\" lib = clib.Session() test_cases = [ \"SOME_random_STRING\", \"GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR\",", "with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module(\"info\", \"bogus-data.bla\") def test_call_module_invalid_name():", ") def test_create_data_dataset(): \"\"\" Run the function to make sure", "output == expected def test_virtualfile_from_vectors_transpose(): \"\"\" Test transforming matrix columns", "fail by producing a NULL pointer as output or non-zero", "zip(x, y, strings1, strings2) ) assert output == expected def", "0.2], ) # If the data pointer returned is None", "\"\"\" Test passing an invalid direction argument. \"\"\" with clib.Session()", "Test that create and destroy session are called without errors.", "\"{} -\".format(fig2._name)) with clib.Session() as lib: wesn2 = lib.extract_region() npt.assert_allclose(wesn2,", "Should fail if trying to create a session before destroying", "25.0])) def test_write_data_fails(): \"\"\" Check that write data raises an", "name, value): # pylint: disable=unused-argument \"\"\" Put 'bla' in the", "dataset. \"\"\" dtypes = \"float32 float64 int32 int64 uint32 uint64\".split()", "either\") def test_virtual_file_bad_direction(): \"\"\" Test passing an invalid direction argument.", "# Dataset from vectors data_vector = lib.create_data( family=\"GMT_IS_DATASET|GMT_VIA_VECTOR\", geometry=\"GMT_IS_POINT\", mode=\"GMT_CONTAINER_ONLY\",", "\"\"\" Create a grid specifying range and inc instead of", "= dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.fliplr(data)) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc,", "clib.Session() with pytest.raises(GMTCLibNoSessionError): lib.call_module(\"gmtdefaults\", \"\") with pytest.raises(GMTCLibNoSessionError): lib.session_pointer # pylint:", "npt.assert_allclose(wesn2, np.array([-165.0, -150.0, 15.0, 25.0])) def test_write_data_fails(): \"\"\" Check that", "in dtypes: x = np.arange(size, dtype=dtype) y = np.arange(size, size", "Test passing in one column with string or object dtype", "b\"5.4.3\" else: value.value = b\"bla\" return 0 lib = clib.Session()", "exceptions are raised when API functions fail by producing a", "return returns mock_func = mock_api_function get_libgmt_func = session.get_libgmt_func def mock_get_libgmt_func(name,", "not pointing to the same memory session1 = clib.Session() session1.create(name=\"test_session1\")", "the wrappers for the C API. \"\"\" import os from", "the function fails when given bad input. \"\"\" lib =", ") def test_dataarray_to_matrix_works(): \"\"\" Check that dataarray_to_matrix returns correct output.", "a different session to avoid any memory problems. with clib.Session()", "properties. with clib.Session() as lib: assert lib.info # Mock GMT_Get_Default", "region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.flipud(data)) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(),", "size = 10 for dtype in dtypes: x = np.arange(size,", "for > 2 dims. \"\"\" # Make a 3D regular", "argument. \"\"\" with clib.Session() as lib: vfargs = ( \"GMT_IS_DATASET|GMT_VIA_MATRIX\",", "assert output == expected def test_extract_region_fails(): \"\"\" Check that extract", "y), (\"x\", x)]) matrix, region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.flip(data,", "a given value. Used to test that exceptions are raised", "ses.destroy() ses.destroy() def test_call_module(): \"\"\" Run a command to see", "won't close later. with clib.Session() as lib, mock(lib, \"GMT_Open_VirtualFile\", returns=0),", "pytest.raises(GMTCLibError): ses.create(\"test-session-name\") # Should fail if trying to create a", "vfile: with GMTTempFile() as outfile: lib.call_module(\"convert\", f\"{vfile} ->{outfile.name}\") output =", "as vfile: with GMTTempFile() as outfile: lib.call_module(\"info\", \"{} -C ->{}\".format(vfile,", "desired=data) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]),", "opening to return 0 (success) so that we don't open", "Test passing in data via a virtual file with a", "as lib: lib.extract_region() def test_extract_region_two_figures(): \"\"\" Extract region should handle", "= np.arange(5) y = np.arange(6) with clib.Session() as lib: with", "setattr(session, \"get_libgmt_func\", mock_get_libgmt_func) yield setattr(session, \"get_libgmt_func\", get_libgmt_func) def test_getitem(): \"\"\"", "\"\"\" with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module(\"info\", \"bogus-data.bla\") def", "is raised when failing to create a session. \"\"\" ses", "layers, dtype ) data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) lib.put_matrix(dataset,", "old. \"\"\" # Mock GMT_Get_Default to return an old version", "float64 int32 int64 uint32 uint64\".split() shape = (7, 5) for", "to avoid any memory problems. with clib.Session() as lib: lib.call_module(\"figure\",", "# pylint: disable=unused-argument \"\"\" Put 'bla' in the value buffer.", "key in ses.info: assert ses.info[key] == \"bla\" ses.destroy() def test_fails_for_wrong_version():", "npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1]", "list(range(0, size, 1)) y = tuple(range(size, size * 2, 1))", "lib: assert lib.info # Mock GMT_Get_Default to return always the", "with lib: assert lib.info[\"version\"] != \"5.4.3\" # Make sure the", "API functions fail by producing a NULL pointer as output", "range(2): with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement ses.create(\"session1\") assert ses.session_pointer", "np.linspace(start=0, stop=4, num=3) y = np.linspace(start=9, stop=5, num=3) grid =", "projection=\"M6i\", frame=True, land=\"black\") fig2 = Figure() fig2.basemap(region=\"US.HI+r5\", projection=\"M6i\", frame=True) #", "is closed when the exception is raised. with pytest.raises(GMTCLibNoSessionError): assert", "np.ones((4, 5), dtype=\"float64\") x = np.linspace(0, 1, 5) y =", "for family in FAMILIES: parsed = lib._parse_constant(family, valid=FAMILIES) assert parsed", "columns of string or object dtype into virtual file dataset.", "data.y, data.z) ] ) expected = \"<vector memory>: N =", "test_cases: composite = \"|\".join([family, via]) expected = lib[family] + lib[via]", "Add the dataset to a virtual file and pass it", "2 dims. \"\"\" # Make a 3D regular grid data", "= outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(min(i), max(i)) for i in", "# Mock GMT_Get_Default to return always the same string def", "= lib._parse_constant(composite, valid=FAMILIES, valid_modifiers=VIAS) assert parsed == expected def test_parse_constant_fails():", "(10, 6) for dtype in dtypes: full_data = np.arange(shape[0] *", "Extract region should handle multiple figures existing at the same", "code. with clib.Session() as lib: with mock(lib, \"GMT_Write_Data\", returns=1): with", "{}\\t{}\\n\".format(size, bounds) assert output == expected def test_extract_region_fails(): \"\"\" Check", "numpy.testing as npt import pandas as pd import pytest import", "\"\"\" with clib.Session() as lib: # Dataset from vectors data_vector", "pass it along to gmt info vfargs = (family, geometry,", "= {}\\t{}\\n\".format(shape[0], bounds) assert output == expected def test_virtualfile_from_matrix_slice(): \"\"\"", "= \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(i.min(), i.max()) for i in (x, y, z)]", "with clib.Session() as lib: wesn2 = lib.extract_region() npt.assert_allclose(wesn2, np.array([-165.0, -150.0,", "bounds) assert output == expected def test_virtualfile_from_vectors_pandas(): \"\"\" Pass vectors", "0 ses = clib.Session() ses.create(\"test-session\") with mock(ses, \"GMT_Get_Default\", mock_func=mock_defaults): #", "codes. \"\"\" vfargs = ( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", \"GMT_IS_POINT\", \"GMT_IN|GMT_IS_REFERENCE\", None, )", "np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) with clib.Session() as lib: with lib.virtualfile_from_matrix(data)", "Run the function to make sure it doesn't fail badly.", "valid_modifiers=None ) def test_create_data_dataset(): \"\"\" Run the function to make", "= lib.extract_region() npt.assert_allclose(wesn1, region1) # Now try it with the", "lib.call_module(\"info\", \"bogus-data.bla\") def test_call_module_invalid_name(): \"\"\" Fails when given bad input.", "def test_call_module_invalid_name(): \"\"\" Fails when given bad input. \"\"\" with", "clib.Session() as lib: lib.create_data( family=\"GMT_IS_DATASET\", geometry=\"GMT_IS_SURFACE\", mode=\"Not_a_valid_mode\", dim=[0, 0, 1,", "0.2], ) def test_create_data_fails(): \"\"\" Check that create_data raises exceptions", "raises an exception for non-zero return codes. \"\"\" # It's", "value. \"\"\" return returns mock_func = mock_api_function get_libgmt_func = session.get_libgmt_func", "\"\"\" data_fname = os.path.join(TEST_DATA_DIR, \"points.txt\") out_fname = \"test_call_module.txt\" with clib.Session()", "when given bad input. \"\"\" ses = clib.Session() with pytest.raises(GMTCLibNoSessionError):", "dtypes = \"float32 float64 int32 int64 uint32 uint64\".split() size =", "dtype=dtype), z=np.arange(size * 2, size * 3, 1, dtype=dtype), )", "mock_defaults(api, name, value): # pylint: disable=unused-argument \"\"\" Put 'bla' in", "because it's not easy to get some API functions to", "a NULL pointer as output or non-zero status codes. Needed", "with pytest.raises(GMTCLibError): ses.create(\"test-session-name\") # Should fail if trying to create", "desired=[x.min(), x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc, desired=[x[1] - x[0], y[1] -", "in dtypes: full_data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) rows =", "( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", \"GMT_IS_POINT\", \"GMT_IS_GRID\", # The invalid direction argument 0,", "\"{} ->{}\".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(min(i),", "function fails when given bad input. \"\"\" lib = clib.Session()", "== expected def test_virtual_file_fails(): \"\"\" Check that opening and closing", "int64 uint32 uint64\".split() shape = (7, 5) for dtype in", "geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", dim=[11, 10, 2, 0], ) def test_virtual_file(): \"\"\"", "with mock(lib, \"GMT_Create_Data\", returns=None): lib.create_data( family=\"GMT_IS_DATASET\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", dim=[11, 10,", "as outfile: lib.call_module(\"info\", \"{} ->{}\".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds", "mock(lib, \"GMT_Create_Data\", returns=None): lib.create_data( family=\"GMT_IS_DATASET\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", dim=[11, 10, 2,", "the old one. ses.create(\"test1\") with pytest.raises(GMTCLibError): ses.create(\"test2\") def test_destroy_session_fails(): \"\"\"", "y), (\"x\", x)]) matrix, region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.fliplr(data))", "Check that dataarray_to_matrix returns correct output with flipped y. \"\"\"", "\"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(col.min(), col.max()) for col in data.T] ) expected =", "the status check when entering the context. # If the", "mode=\"GMT_CONTAINER_ONLY\", dim=[shape[1], shape[0], 1, 0], # columns, rows, layers, dtype", "3D regular grid data = np.ones((10, 12, 11), dtype=\"float32\") x", "Fails for invalid module arguments. \"\"\" with clib.Session() as lib:", "with flipped x. \"\"\" data = np.diag(v=np.arange(3)) x = np.linspace(start=4,", "outfile.read(keep_tabs=True) bounds = \"\\t\".join( [ \"<{:.0f}/{:.0f}>\".format(i.min(), i.max()) for i in", "= outfile.read(keep_tabs=True) expected = \"\".join( f\"{h}\\t{i}\\t{j} {k}\\n\" for h, i,", "size * 3, 1, dtype=dtype) with clib.Session() as lib: with", "in a session. \"\"\" # Create an instance of Session", "output. \"\"\" # Passing in invalid mode with pytest.raises(GMTInvalidInput): with", "virtual files raises an exception for non- zero return codes.", "session to avoid any memory problems. with clib.Session() as lib:", "npt.assert_allclose(wesn1, region1) # Now try it with the second one", "see if call_module works. \"\"\" data_fname = os.path.join(TEST_DATA_DIR, \"points.txt\") out_fname", "that dataarray_to_matrix returns correct output with flipped y. \"\"\" data", "test_dataarray_to_matrix_negative_y_increment(): \"\"\" Check that dataarray_to_matrix returns correct output with flipped", "i.max()) for i in (x, y, z)] ) expected =", "projection=\"M6i\", frame=True) # Activate the first figure and extract the", "memory>: N = {}\\t{}\\n\".format(rows, bounds) assert output == expected def", "None assert session2.session_pointer != session1.session_pointer session1.destroy() session2.destroy() # Create and", "__ in range(2): with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement ses.create(\"session1\")", "y), (\"x\", x)]) matrix, region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=data)", "Check that an exception is raised when failing to create", "valid=FAMILIES, valid_modifiers=VIAS) assert parsed == expected def test_parse_constant_fails(): \"\"\" Check", "to virtual file dataset. \"\"\" dtypes = \"float32 float64 int32", "of the # properties. with clib.Session() as lib: assert lib.info", "-C ->{}\".format(data_fname, out_fname.name)) assert os.path.exists(out_fname.name) output = out_fname.read().strip() assert output", "so that we don't open a file that # we", "ses = clib.Session() ses.create(\"test-session\") with mock(ses, \"GMT_Get_Default\", mock_func=mock_defaults): # Check", "= clib.Session() test_cases = ((family, via) for family in FAMILIES", "col.max()) for col in data.T] ) expected = \"{}\\n\".format(bounds) assert", "test_write_data_fails(): \"\"\" Check that write data raises an exception for", "close later. with clib.Session() as lib, mock(lib, \"GMT_Open_VirtualFile\", returns=0), mock(", "= outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(col.min(), col.max()) for col in", ") expected = \"<matrix memory>: N = {}\\t{}\\n\".format(rows, bounds) assert", "= \"float32 float64 int32 int64 uint32 uint64\".split() size = 13", "np.linspace(start=4, stop=0, num=3) y = np.linspace(start=9, stop=5, num=3) grid =", "== expected @pytest.mark.parametrize(\"dtype\", [str, object]) def test_virtualfile_from_vectors_one_string_or_object_column(dtype): \"\"\" Test passing", "as lib: # Grids from matrices using range and int", "API function fail without causing a Segmentation # Fault. Can't", "test that exceptions are raised when API functions fail by", "the data pointer returned is None (NULL pointer) with pytest.raises(GMTCLibError):", "from matrices using dim lib.create_data( family=\"GMT_IS_GRID|GMT_VIA_MATRIX\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20,", "\"\"\" Test that create and destroy session are called without", "uint64\".split() shape = (7, 5) for dtype in dtypes: data", "uint64\".split() size = 13 for dtype in dtypes: data =", "ses = clib.Session() for __ in range(2): with pytest.raises(GMTCLibNoSessionError): ses.session_pointer", "clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, strings1, strings2) as vfile:", "== lib[family] def test_parse_constant_composite(): \"\"\" Parsing a composite constant argument", "if the function fails when given bad input. \"\"\" lib", "with pytest.raises(GMTCLibError): lib.call_module(\"info\", \"bogus-data.bla\") def test_call_module_invalid_name(): \"\"\" Fails when given", "y[0])]) def test_dataarray_to_matrix_negative_y_increment(): \"\"\" Check that dataarray_to_matrix returns correct output", "argtypes=None, restype=None): \"\"\" Return our mock function. \"\"\" if name", "os.path.exists(out_fname.name) output = out_fname.read().strip() assert output == \"11.5309 61.7074 -2.9289", "as lib: with mock(lib, \"GMT_Create_Data\", returns=None): lib.create_data( family=\"GMT_IS_DATASET\", geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\",", "geometry=\"GMT_IS_SURFACE\", mode=\"Not_a_valid_mode\", dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0, 20.0],", "code\") # Test the status check when closing the virtual", "pytest.raises(GMTCLibError): lib.get_default(\"NOT_A_VALID_NAME\") def test_info_dict(): \"\"\" Make sure the clib.Session.info dict", "layers, dtype ) # Dataset from matrices data_matrix = lib.create_data(", "family=family, geometry=geometry, mode=\"GMT_CONTAINER_ONLY\", dim=[shape[1], shape[0], 1, 0], # columns, rows,", "test_virtualfile_from_vectors_two_string_or_object_columns(dtype): \"\"\" Test passing in two columns of string or", "x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])])", "= np.logspace(2, 3, 4) grid = xr.DataArray(data, coords=[(\"y\", y), (\"x\",", "6) for dtype in dtypes: full_data = np.arange(shape[0] * shape[1],", "def mock_defaults(api, name, value): # pylint: disable=unused-argument \"\"\" Put 'bla'", "1, dtype=dtype) z = np.arange(size * 2, size * 3,", "Mock the opening to return 0 (success) so that we", "'info' failed with status code\" in str(error) assert \"gmtinfo [ERROR]:", "mode=\"Not_a_valid_mode\", dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1,", "a file that # we won't close later. with clib.Session()", "they are not pointing to the same memory session1 =", "GMTCLibNoSessionError, GMTInvalidInput, GMTVersionError, ) from pygmt.helpers import GMTTempFile TEST_DATA_DIR =", "closing virtual files raises an exception for non- zero return", "the GMT error message was captured. \"\"\" with clib.Session() as", "bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(i.min(), i.max()) for i in (x, y,", "# Grids from matrices using range and int lib.create_data( family=\"GMT_IS_GRID|GMT_VIA_MATRIX\",", "errors). \"\"\" if mock_func is None: def mock_api_function(*args): # pylint:", "x = np.arange(11) y = np.arange(12) z = np.arange(10) grid", "lib: lib.extract_region() def test_extract_region_two_figures(): \"\"\" Extract region should handle multiple", "- x[0]), abs(y[1] - y[0])]) def test_dataarray_to_matrix_dims_fails(): \"\"\" Check that", "\"float32 float64 int32 int64 uint32 uint64\".split() shape = (10, 6)", "raised when API functions fail by producing a NULL pointer", "get_default works without crashing and gives reasonable results. \"\"\" with", "in (data.x, data.y, data.z) ] ) expected = \"<vector memory>:", "are valid file # names. Use a mock instead just", "outfile.name)) output = outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(min(i), max(i)) for", "with mock(ses, \"GMT_Destroy_Session\", returns=1): with pytest.raises(GMTCLibError): ses.destroy() ses.destroy() def test_call_module():", "\"\"\" Test that I can get correct constants from the", "raised when failing to create a session. \"\"\" ses =", "closed when the exception is raised. with pytest.raises(GMTCLibNoSessionError): assert lib.session_pointer", "returns correct output with flipped y. \"\"\" data = np.diag(v=np.arange(3))", "\"GMT_WRITE_SET\", [1] * 6, \"some-file-name\", None, ) def test_dataarray_to_matrix_works(): \"\"\"", "a matrix to virtual file dataset. \"\"\" dtypes = \"float32", "at the same time. \"\"\" # Make two figures before", "an exception is raised when failing to create a session.", "def test_virtualfile_from_vectors_two_string_or_object_columns(dtype): \"\"\" Test passing in two columns of string", "for invalid names. \"\"\" with clib.Session() as lib: with pytest.raises(GMTCLibError):", "for i in (x, y, z)] ) expected = \"<vector", "lib: with GMTTempFile() as out_fname: lib.call_module(\"info\", \"{} -C ->{}\".format(data_fname, out_fname.name))", "\"with\" so no session is created. lib = clib.Session() with", "return always the same string def mock_defaults(api, name, value): #", "= clib.Session() for family in FAMILIES: parsed = lib._parse_constant(family, valid=FAMILIES)", "\"NOT_A_PROPER_FAMILY|ALSO_INVALID\", ] for test_case in test_cases: with pytest.raises(GMTInvalidInput): lib._parse_constant(test_case, valid=FAMILIES,", "\"\"\" x = np.arange(5) y = np.arange(6) with clib.Session() as", "# If the data pointer returned is None (NULL pointer)", "\"\"\" lib = clib.Session() test_cases = [ \"SOME_random_STRING\", \"GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR\", \"GMT_IS_DATASET|NOT_A_PROPER_VIA\",", "virtual file and pass it along to gmt info vfargs", "sizes. \"\"\" x = np.arange(5) y = np.arange(6) with clib.Session()", "y = np.arange(6) with clib.Session() as lib: with pytest.raises(GMTInvalidInput): with", "- x[0], y[1] - y[0]]) def test_dataarray_to_matrix_negative_x_increment(): \"\"\" Check if", "np.array([\"pqrst\", \"uvwx\", \"yz!\", \"@#\", \"$\"], dtype=dtype) with clib.Session() as lib:", "{}\\t{}\\n\".format(shape[0], bounds) assert output == expected def test_virtualfile_from_matrix_slice(): \"\"\" Test", "should handle multiple figures existing at the same time. \"\"\"", "with pytest.raises(GMTInvalidInput): lib._parse_constant( \"GMT_IS_DATASET|GMT_VIA_MATRIX\", valid=FAMILIES, valid_modifiers=None ) def test_create_data_dataset(): \"\"\"", "in data.T] ) expected = \"{}\\n\".format(bounds) assert output == expected", "range and inc instead of dim. \"\"\" with clib.Session() as", "session1.destroy() session2.destroy() # Create and destroy a session twice ses", "call_module works. \"\"\" data_fname = os.path.join(TEST_DATA_DIR, \"points.txt\") out_fname = \"test_call_module.txt\"", "\"\"\" Pass array-like vectors to a dataset. \"\"\" size =", "y.max()]) npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] - y[0])]) def test_dataarray_to_matrix_dims_fails():", "function to make it always return a given value. Used", "for the C API. \"\"\" import os from contextlib import", "mock_func is None: def mock_api_function(*args): # pylint: disable=unused-argument \"\"\" A", "13 for dtype in dtypes: data = pd.DataFrame( data=dict( x=np.arange(size,", "# It's hard to make the C API function fail", "if mock_func is None: def mock_api_function(*args): # pylint: disable=unused-argument \"\"\"", "for dtype in dtypes: full_data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape)", "and pass it along to gmt info vfargs = (family,", "closing of the # virtual file. with clib.Session() as lib,", "return 0 ses = clib.Session() ses.create(\"test-session\") with mock(ses, \"GMT_Get_Default\", mock_func=mock_defaults):", "clib.Session() session1.create(name=\"test_session1\") assert session1.session_pointer is not None session2 = clib.Session()", "it # Use in a different session to avoid any", "= lib._parse_constant(family, valid=FAMILIES) assert parsed == lib[family] def test_parse_constant_composite(): \"\"\"", "for non-zero return codes. \"\"\" # It's hard to make", "\"\"\" Check is the GMT error message was captured. \"\"\"", "old version. \"\"\" if name == b\"API_VERSION\": value.value = b\"5.4.3\"", "\"\"\" Create a grid ignoring range and inc. \"\"\" with", "badly. \"\"\" with clib.Session() as lib: # Dataset from vectors", "# Passing in invalid geometry with pytest.raises(GMTInvalidInput): with clib.Session() as", "None (NULL pointer) with pytest.raises(GMTCLibError): with clib.Session() as lib: with", "1, 0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) #", "exceptions for invalid input and output. \"\"\" # Passing in", "valid_modifiers=VIAS) # Should also fail if not given valid modifiers", "GMT will just write to stdout and spaces are valid", "lib.write_data( \"GMT_IS_VECTOR\", \"GMT_IS_POINT\", \"GMT_WRITE_SET\", [1] * 6, \"some-file-name\", None, )", "output = outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"<{:.0f}/{:.0f}>\".format(min(i), max(i)) for i", "1, dtype=dtype), z=np.arange(size * 2, size * 3, 1, dtype=dtype),", "out_fname.name)) assert os.path.exists(out_fname.name) output = out_fname.read().strip() assert output == \"11.5309", "= \"{}\\n\".format(bounds) assert output == expected def test_virtualfile_from_vectors_diff_size(): \"\"\" Test", "\"ghij\", \"klmno\"], dtype=dtype) strings2 = np.array([\"pqrst\", \"uvwx\", \"yz!\", \"@#\", \"$\"],", "def test_create_session_fails(): \"\"\" Check that an exception is raised when", "function fails for arrays of different sizes. \"\"\" x =", "clib.Session() with mock(lib, \"GMT_Get_Default\", mock_func=mock_defaults): with pytest.raises(GMTVersionError): with lib: assert", "API function to make it always return a given value.", "code\" in str(error) assert \"gmtinfo [ERROR]: Cannot find file bogus-data.bla\"", "def test_virtualfile_from_vectors_pandas(): \"\"\" Pass vectors to a dataset using pandas", "# Make two figures before calling extract_region to make sure", "it fails for variable increments. \"\"\" data = np.ones((4, 5),", "data = np.diag(v=np.arange(3)) x = np.linspace(start=4, stop=0, num=3) y =", "from getting all of the # properties. with clib.Session() as", "lib: family = \"GMT_IS_DATASET|GMT_VIA_MATRIX\" geometry = \"GMT_IS_POINT\" dataset = lib.create_data(", "return mock_func return get_libgmt_func(name, argtypes, restype) setattr(session, \"get_libgmt_func\", mock_get_libgmt_func) yield", "to create a session. \"\"\" ses = clib.Session() with mock(ses,", "yield setattr(session, \"get_libgmt_func\", get_libgmt_func) def test_getitem(): \"\"\" Test that I", "Create an instance of Session without \"with\" so no session", "fail if trying to create a session before destroying the", "lib[via] parsed = lib._parse_constant(composite, valid=FAMILIES, valid_modifiers=VIAS) assert parsed == expected", "expected def test_extract_region_fails(): \"\"\" Check that extract region fails if", "\"data\") with clib.Session() as _lib: gmt_version = Version(_lib.info[\"version\"]) @contextmanager def", "clib.Session() as lib: family = \"GMT_IS_DATASET|GMT_VIA_MATRIX\" geometry = \"GMT_IS_POINT\" dataset", "shape[1], dtype=dtype).reshape(shape) rows = 5 cols = 3 data =", "first figure and extract the region from it # Use", "ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) def test_create_data_fails(): \"\"\"", "mock(lib, \"GMT_Write_Data\", returns=1): with pytest.raises(GMTCLibError): lib.write_data( \"GMT_IS_VECTOR\", \"GMT_IS_POINT\", \"GMT_WRITE_SET\", [1]", "mock( lib, \"GMT_Close_VirtualFile\", returns=1 ): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): pass", "from contextlib import contextmanager import numpy as np import numpy.testing", "NULL pointer as output or non-zero status codes. Needed because", "10 for dtype in dtypes: x = np.arange(size, dtype=dtype) y", "with pytest.raises(GMTCLibError): ses[\"A_WHOLE_LOT_OF_JUNK\"] # pylint: disable=pointless-statement def test_create_destroy_session(): \"\"\" Test", "is not None session2 = clib.Session() session2.create(name=\"test_session2\") assert session2.session_pointer is", "for invalid input and output. \"\"\" # Passing in invalid", "pytest.raises(GMTInvalidInput): with lib.virtualfile_from_vectors(x, y): print(\"This should have failed\") def test_virtualfile_from_matrix():", "grid = xr.DataArray(data, coords=[(\"y\", y), (\"x\", x)]) matrix, region, inc", "z), (\"y\", y), (\"x\", x)]) with pytest.raises(GMTInvalidInput): dataarray_to_matrix(grid) def test_dataarray_to_matrix_inc_fails():", "# columns, rows, layers, dtype ) data = np.arange(shape[0] *", "passing in one column with string or object dtype into", "ses[\"GMT_MODULE_CMD\"] != -99999 assert ses[\"GMT_PAD_DEFAULT\"] != -99999 assert ses[\"GMT_DOUBLE\"] !=", "= outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"{:.0f}\\t{:.0f}\".format(col.min(), col.max()) for col in", "fails when given bad input. \"\"\" lib = clib.Session() test_cases", "bad input. \"\"\" ses = clib.Session() with pytest.raises(GMTCLibNoSessionError): ses.destroy() ses.create(\"test-session\")", "data_matrix = lib.create_data( family=\"GMT_IS_DATASET|GMT_VIA_MATRIX\", geometry=\"GMT_IS_POINT\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1, 0],", "name == func: return mock_func return get_libgmt_func(name, argtypes, restype) setattr(session,", "data = np.ones((4, 5), dtype=\"float64\") x = np.linspace(0, 1, 5)", "-99999 with pytest.raises(GMTCLibError): ses[\"A_WHOLE_LOT_OF_JUNK\"] # pylint: disable=pointless-statement def test_create_destroy_session(): \"\"\"", "] ) expected = \"<vector memory>: N = {}\\t{}\\n\".format(size, bounds)", "gmt_version = Version(_lib.info[\"version\"]) @contextmanager def mock(session, func, returns=None, mock_func=None): \"\"\"", "(\"x\", x)]) matrix, region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=data) npt.assert_allclose(actual=region,", "lib.extract_region() def test_extract_region_two_figures(): \"\"\" Extract region should handle multiple figures", "assert ses.session_pointer is not None ses.destroy() with pytest.raises(GMTCLibNoSessionError): ses.session_pointer #", "dtype=np.int32) strings1 = np.array([\"a\", \"bc\", \"def\", \"ghij\", \"klmno\"], dtype=dtype) strings2", "with pytest.raises(GMTCLibError): lib.get_default(\"NOT_A_VALID_NAME\") def test_info_dict(): \"\"\" Make sure the clib.Session.info", "Put 'bla' in the value buffer. \"\"\" value.value = b\"bla\"", "desired=[x[1] - x[0], y[1] - y[0]]) def test_dataarray_to_matrix_negative_x_increment(): \"\"\" Check", "ses = clib.Session() with pytest.raises(GMTCLibNoSessionError): ses.destroy() ses.create(\"test-session\") with mock(ses, \"GMT_Destroy_Session\",", "with clib.Session() as lib: assert lib.info # Mock GMT_Get_Default to", "import Figure, clib from pygmt.clib.conversion import dataarray_to_matrix from pygmt.clib.session import", "from the C lib. \"\"\" ses = clib.Session() assert ses[\"GMT_SESSION_EXTERNAL\"]", "because if # output=='', GMT will just write to stdout", "outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"{:.0f}\\t{:.0f}\".format(col.min(), col.max()) for col in data.T]", "with mock(lib, \"GMT_Get_Default\", mock_func=mock_defaults): with pytest.raises(GMTVersionError): with lib: assert lib.info[\"version\"]", "import os from contextlib import contextmanager import numpy as np", "dataarray_to_matrix(grid) def test_get_default(): \"\"\" Make sure get_default works without crashing", "If the data pointer returned is None (NULL pointer) with", "dtype in dtypes: with clib.Session() as lib: family = \"GMT_IS_DATASET|GMT_VIA_MATRIX\"", "test_parse_constant_composite(): \"\"\" Parsing a composite constant argument (separated by |)", "- y[0])]) def test_dataarray_to_matrix_negative_x_and_y_increment(): \"\"\" Check that dataarray_to_matrix returns correct", "data via a virtual file with a Dataset. \"\"\" dtypes", "geometry=\"GMT_IS_SURFACE\", mode=\"GMT_CONTAINER_ONLY\", dim=[10, 20, 1, 0], ) def test_create_data_grid_range(): \"\"\"", "0.2], ) # Passing in invalid geometry with pytest.raises(GMTInvalidInput): with", "= list(range(0, size, 1)) y = tuple(range(size, size * 2,", "disable=pointless-statement ses.create(\"session1\") assert ses.session_pointer is not None ses.destroy() with pytest.raises(GMTCLibNoSessionError):", "string or object dtype into virtual file dataset. \"\"\" size", "output = outfile.read(keep_tabs=True) bounds = \"\\t\".join( [\"{:.0f}\\t{:.0f}\".format(col.min(), col.max()) for col", "test_parse_constant_fails(): \"\"\" Check if the function fails when given bad", "x = np.arange(size, dtype=np.int32) y = np.arange(size, size * 2,", "desired=[x.min(), x.max(), y.min(), y.max()]) npt.assert_allclose(actual=inc, desired=[abs(x[1] - x[0]), abs(y[1] -", "- y[0]]) def test_dataarray_to_matrix_negative_x_increment(): \"\"\" Check if dataarray_to_matrix returns correct", "np.arange(12) z = np.arange(10) grid = xr.DataArray(data, coords=[(\"z\", z), (\"y\",", "stop=5, num=3) grid = xr.DataArray(data, coords=[(\"y\", y), (\"x\", x)]) matrix,", "output == expected def test_virtualfile_from_vectors_diff_size(): \"\"\" Test the function fails", "# Mock Open_VirtualFile to test the status check when entering", "as lib: # Dataset from vectors data_vector = lib.create_data( family=\"GMT_IS_DATASET|GMT_VIA_VECTOR\",", "with lib.virtualfile_from_vectors(*data.T) as vfile: with GMTTempFile() as outfile: lib.call_module(\"info\", \"{}", "= dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=np.flip(data, axis=(0, 1))) npt.assert_allclose(actual=region, desired=[x.min(), x.max(), y.min(),", "x)]) matrix, region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix, desired=data) npt.assert_allclose(actual=region, desired=[x.min(),", "assert output == expected def test_virtualfile_from_vectors_arraylike(): \"\"\" Pass array-like vectors", "figure. fig1 = Figure() region1 = np.array([0, 10, -20, -10])", "this part of the code. with clib.Session() as lib: with", "\"GMT_IS_DATASET|GMT_VIA_MATRIX\", valid=FAMILIES, valid_modifiers=None ) def test_create_data_dataset(): \"\"\" Run the function", "data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) with clib.Session() as lib:", "the function to make sure it doesn't fail badly. \"\"\"", "12, 11), dtype=\"float32\") x = np.arange(11) y = np.arange(12) z", "with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.get_default(\"NOT_A_VALID_NAME\") def test_info_dict(): \"\"\"", "a grid specifying range and inc instead of dim. \"\"\"", "is using them anyway. # This should work... lib._parse_constant( \"GMT_IS_DATASET|GMT_VIA_MATRIX\",", "make the C API function fail without causing a Segmentation", "Open_VirtualFile to test the status check when entering the context.", "coords=[(\"y\", y), (\"x\", x)]) matrix, region, inc = dataarray_to_matrix(grid) npt.assert_allclose(actual=matrix,", "argument 0, ) with pytest.raises(GMTInvalidInput): with lib.open_virtual_file(*vfargs): print(\"This should have", "1, dtype=dtype), ) ) with clib.Session() as lib: with lib.virtualfile_from_vectors(data.x,", "handle multiple figures existing at the same time. \"\"\" #", "[\"<{:.0f}/{:.0f}>\".format(min(i), max(i)) for i in (x, y, z)] ) expected" ]
[ "use these features, so I don't think its worth including", "We don't use these features, so I don't think its", "can always change it later. class ExceptionInfo: @property def value(self)", "# This class actually has more functions than are specified", "\"\"\"Type stubs for _pytest._code.\"\"\" # This class actually has more", "stubs for _pytest._code.\"\"\" # This class actually has more functions", "specified here. # We don't use these features, so I", "are specified here. # We don't use these features, so", "actually has more functions than are specified here. # We", "including # them in our type stub. We can always", "don't think its worth including # them in our type", "# We don't use these features, so I don't think", "_pytest._code.\"\"\" # This class actually has more functions than are", "This class actually has more functions than are specified here.", "# them in our type stub. We can always change", "here. # We don't use these features, so I don't", "in our type stub. We can always change it later.", "its worth including # them in our type stub. We", "has more functions than are specified here. # We don't", "type stub. We can always change it later. class ExceptionInfo:", "We can always change it later. class ExceptionInfo: @property def", "so I don't think its worth including # them in", "more functions than are specified here. # We don't use", "our type stub. We can always change it later. class", "it later. class ExceptionInfo: @property def value(self) -> Exception: ...", "don't use these features, so I don't think its worth", "features, so I don't think its worth including # them", "than are specified here. # We don't use these features,", "worth including # them in our type stub. We can", "class actually has more functions than are specified here. #", "for _pytest._code.\"\"\" # This class actually has more functions than", "think its worth including # them in our type stub.", "always change it later. class ExceptionInfo: @property def value(self) ->", "these features, so I don't think its worth including #", "change it later. class ExceptionInfo: @property def value(self) -> Exception:", "I don't think its worth including # them in our", "them in our type stub. We can always change it", "<reponame>questioneer-ltd/scrut \"\"\"Type stubs for _pytest._code.\"\"\" # This class actually has", "functions than are specified here. # We don't use these", "stub. We can always change it later. class ExceptionInfo: @property" ]
[ "prime number as not prime return primes # returns the", "by a particular prime number prime_factors.append(prime) # add the prime", "0: # checks if the number is divisible by a", "prime numbers while n != 1: # keeps diving the", "prime number else: break # if the number is not", "# loops over all multiples of the prime number starting", "not prime return primes # returns the list containing prime", "numbers while n != 1: # keeps diving the number", "1): # loops over all the numbers to check for", "number prime_factors.append(prime) # add the prime factor in the list", "prime_factors.append(prime) # add the prime factor in the list if", "True # marks the multiple of the prime number as", "in the list if it divides the number n /=", "# marks the multiple of the prime number as not", "number then the inner loop breaks and the number is", "number sieve[j] = True # marks the multiple of the", "as not prime for i in range(2, n + 1):", "numbers within the reange of the number sieve = [False]", "1 if n % prime == 0: # checks if", "n + 1, i): # loops over all multiples of", "certain prime number until the number is 1 if n", "% prime == 0: # checks if the number is", "= [] # stores the prime factorization of the number", "it is a prime number for j in range(i **", "the next prime number until the number becomes 1 return", "2, n + 1, i): # loops over all multiples", "1: # keeps diving the number by a certain prime", "divides the number n /= prime # reducing the number", "# returns the list containing prime numbers def get_factorization(n): prime_factors", "number is not a prime number primes.append(i) # adds a", "+ 1) # stores boolean values indicating whether a number", "# returns the list containing the prime factorization of the", "# stores boolean values indicating whether a number is prime", "prime number primes.append(i) # adds a number into list if", "prime return primes # returns the list containing prime numbers", "** 2, n + 1, i): # loops over all", "1, i): # loops over all multiples of the prime", "1 as not prime for i in range(2, n +", "multiples of the prime number starting from the sqaure of", "the number by a certain prime number until the number", "the number is 1 if n % prime == 0:", "the prime number starting from the sqaure of the prime", "loops over all multiples of the prime number starting from", "0 and 1 as not prime for i in range(2,", "sieve = [False] * (n + 1) # stores boolean", "+ 1): # loops over all the numbers to check", "the prime number as not prime return primes # returns", "primes.append(i) # adds a number into list if it is", "def get_factorization(n): prime_factors = [] # stores the prime factorization", "if the number is not divisible by the paricular prime", "sieve[j] = True # marks the multiple of the prime", "for prime in get_primes(n): # looping over all the prime", "reange of the number sieve = [False] * (n +", "# loops over all the numbers to check for prime", "number if __name__ == \"__main__\": n = int(input(\"Enter a number:", "list containing prime numbers def get_factorization(n): prime_factors = [] #", "number starting from the sqaure of the prime number sieve[j]", "# keeps diving the number by a certain prime number", "is a prime number for j in range(i ** 2,", "break # if the number is not divisible by the", "all the numbers to check for prime numbers if sieve[i]:", "the inner loop breaks and the number is further divided", "n /= prime # reducing the number after dividing it", "further divided by the next prime number until the number", "number until the number is 1 if n % prime", "prime == 0: # checks if the number is divisible", "# checks if the number is divisible by a particular", "of the number for prime in get_primes(n): # looping over", "number is prime or not sieve[0] = sieve[1] = True", "# reducing the number after dividing it by the prime", "prime_factors = [] # stores the prime factorization of the", "divisible by the paricular prime number then the inner loop", "the number becomes 1 return prime_factors # returns the list", "within the reange of the number sieve = [False] *", "= sieve[1] = True # marking 0 and 1 as", "to check for prime numbers if sieve[i]: # checks whether", "a number is not prime continue # skips the loop", "the prime factorization of the number if __name__ == \"__main__\":", "[] # stores the prime numbers within the reange of", "it by the prime number else: break # if the", "is further divided by the next prime number until the", "sieve[1] = True # marking 0 and 1 as not", "loop if the number is not a prime number primes.append(i)", "the sqaure of the prime number sieve[j] = True #", "return primes # returns the list containing prime numbers def", "a particular prime number prime_factors.append(prime) # add the prime factor", "if the number is divisible by a particular prime number", "factor in the list if it divides the number n", "until the number becomes 1 return prime_factors # returns the", "range(i ** 2, n + 1, i): # loops over", "the numbers to check for prime numbers if sieve[i]: #", "get_primes(n): primes = [] # stores the prime numbers within", "a prime number primes.append(i) # adds a number into list", "a number into list if it is a prime number", "prime number sieve[j] = True # marks the multiple of", "it divides the number n /= prime # reducing the", "i in range(2, n + 1): # loops over all", "and the number is further divided by the next prime", "is divisible by a particular prime number prime_factors.append(prime) # add", "[] # stores the prime factorization of the number for", "over all the prime numbers while n != 1: #", "factorization of the number if __name__ == \"__main__\": n =", "(n + 1) # stores boolean values indicating whether a", "particular prime number prime_factors.append(prime) # add the prime factor in", "prime # reducing the number after dividing it by the", "the prime numbers while n != 1: # keeps diving", "sieve[0] = sieve[1] = True # marking 0 and 1", "prime factor in the list if it divides the number", "range(2, n + 1): # loops over all the numbers", "by the next prime number until the number becomes 1", "# add the prime factor in the list if it", "boolean values indicating whether a number is prime or not", "the prime numbers within the reange of the number sieve", "prime or not sieve[0] = sieve[1] = True # marking", "not a prime number primes.append(i) # adds a number into", "__name__ == \"__main__\": n = int(input(\"Enter a number: \")) print(get_factorization(n))", "number becomes 1 return prime_factors # returns the list containing", "the number is not divisible by the paricular prime number", "the multiple of the prime number as not prime return", "is not divisible by the paricular prime number then the", "number n /= prime # reducing the number after dividing", "over all multiples of the prime number starting from the", "add the prime factor in the list if it divides", "number after dividing it by the prime number else: break", "number is not prime continue # skips the loop if", "+ 1, i): # loops over all multiples of the", "paricular prime number then the inner loop breaks and the", "number is 1 if n % prime == 0: #", "the number is further divided by the next prime number", "prime number prime_factors.append(prime) # add the prime factor in the", "multiple of the prime number as not prime return primes", "the list containing the prime factorization of the number if", "j in range(i ** 2, n + 1, i): #", "number until the number becomes 1 return prime_factors # returns", "= [False] * (n + 1) # stores boolean values", "= [] # stores the prime numbers within the reange", "adds a number into list if it is a prime", "by the prime number else: break # if the number", "primes = [] # stores the prime numbers within the", "if sieve[i]: # checks whether a number is not prime", "all multiples of the prime number starting from the sqaure", "n != 1: # keeps diving the number by a", "next prime number until the number becomes 1 return prime_factors", "= True # marking 0 and 1 as not prime", "prime number for j in range(i ** 2, n +", "the number after dividing it by the prime number else:", "indicating whether a number is prime or not sieve[0] =", "for j in range(i ** 2, n + 1, i):", "divided by the next prime number until the number becomes", "!= 1: # keeps diving the number by a certain", "checks if the number is divisible by a particular prime", "loop breaks and the number is further divided by the", "get_factorization(n): prime_factors = [] # stores the prime factorization of", "list if it divides the number n /= prime #", "containing the prime factorization of the number if __name__ ==", "if it is a prime number for j in range(i", "in range(i ** 2, n + 1, i): # loops", "number as not prime return primes # returns the list", "prime continue # skips the loop if the number is", "primes # returns the list containing prime numbers def get_factorization(n):", "def get_primes(n): primes = [] # stores the prime numbers", "number is further divided by the next prime number until", "in get_primes(n): # looping over all the prime numbers while", "number is divisible by a particular prime number prime_factors.append(prime) #", "number for prime in get_primes(n): # looping over all the", "prime factorization of the number for prime in get_primes(n): #", "* (n + 1) # stores boolean values indicating whether", "n % prime == 0: # checks if the number", "over all the numbers to check for prime numbers if", "stores the prime factorization of the number for prime in", "# stores the prime factorization of the number for prime", "number for j in range(i ** 2, n + 1,", "a certain prime number until the number is 1 if", "the list containing prime numbers def get_factorization(n): prime_factors = []", "until the number is 1 if n % prime ==", "values indicating whether a number is prime or not sieve[0]", "not sieve[0] = sieve[1] = True # marking 0 and", "the prime number else: break # if the number is", "breaks and the number is further divided by the next", "list containing the prime factorization of the number if __name__", "numbers def get_factorization(n): prime_factors = [] # stores the prime", "# checks whether a number is not prime continue #", "the number n /= prime # reducing the number after", "i): # loops over all multiples of the prime number", "marks the multiple of the prime number as not prime", "looping over all the prime numbers while n != 1:", "prime number until the number becomes 1 return prime_factors #", "= True # marks the multiple of the prime number", "whether a number is prime or not sieve[0] = sieve[1]", "prime number then the inner loop breaks and the number", "checks whether a number is not prime continue # skips", "into list if it is a prime number for j", "prime number starting from the sqaure of the prime number", "get_primes(n): # looping over all the prime numbers while n", "if __name__ == \"__main__\": n = int(input(\"Enter a number: \"))", "not prime for i in range(2, n + 1): #", "# if the number is not divisible by the paricular", "a number is prime or not sieve[0] = sieve[1] =", "numbers if sieve[i]: # checks whether a number is not", "stores boolean values indicating whether a number is prime or", "of the prime number sieve[j] = True # marks the", "prime in get_primes(n): # looping over all the prime numbers", "the reange of the number sieve = [False] * (n", "factorization of the number for prime in get_primes(n): # looping", "# stores the prime numbers within the reange of the", "# looping over all the prime numbers while n !=", "of the number if __name__ == \"__main__\": n = int(input(\"Enter", "reducing the number after dividing it by the prime number", "return prime_factors # returns the list containing the prime factorization", "while n != 1: # keeps diving the number by", "all the prime numbers while n != 1: # keeps", "number primes.append(i) # adds a number into list if it", "numbers to check for prime numbers if sieve[i]: # checks", "list if it is a prime number for j in", "number else: break # if the number is not divisible", "prime numbers if sieve[i]: # checks whether a number is", "# skips the loop if the number is not a", "sqaure of the prime number sieve[j] = True # marks", "the number for prime in get_primes(n): # looping over all", "sieve[i]: # checks whether a number is not prime continue", "prime numbers within the reange of the number sieve =", "the loop if the number is not a prime number", "containing prime numbers def get_factorization(n): prime_factors = [] # stores", "and 1 as not prime for i in range(2, n", "diving the number by a certain prime number until the", "== 0: # checks if the number is divisible by", "else: break # if the number is not divisible by", "[False] * (n + 1) # stores boolean values indicating", "divisible by a particular prime number prime_factors.append(prime) # add the", "from the sqaure of the prime number sieve[j] = True", "returns the list containing the prime factorization of the number", "by the paricular prime number then the inner loop breaks", "# marking 0 and 1 as not prime for i", "continue # skips the loop if the number is not", "prime_factors # returns the list containing the prime factorization of", "prime factorization of the number if __name__ == \"__main__\": n", "True # marking 0 and 1 as not prime for", "marking 0 and 1 as not prime for i in", "n + 1): # loops over all the numbers to", "/= prime # reducing the number after dividing it by", "dividing it by the prime number else: break # if", "is prime or not sieve[0] = sieve[1] = True #", "not prime continue # skips the loop if the number", "number sieve = [False] * (n + 1) # stores", "as not prime return primes # returns the list containing", "is 1 if n % prime == 0: # checks", "by a certain prime number until the number is 1", "the paricular prime number then the inner loop breaks and", "# adds a number into list if it is a", "if n % prime == 0: # checks if the", "inner loop breaks and the number is further divided by", "becomes 1 return prime_factors # returns the list containing the", "for prime numbers if sieve[i]: # checks whether a number", "or not sieve[0] = sieve[1] = True # marking 0", "in range(2, n + 1): # loops over all the", "if it divides the number n /= prime # reducing", "the number is divisible by a particular prime number prime_factors.append(prime)", "whether a number is not prime continue # skips the", "is not a prime number primes.append(i) # adds a number", "stores the prime numbers within the reange of the number", "skips the loop if the number is not a prime", "the number sieve = [False] * (n + 1) #", "for i in range(2, n + 1): # loops over", "then the inner loop breaks and the number is further", "1 return prime_factors # returns the list containing the prime", "the prime factorization of the number for prime in get_primes(n):", "the list if it divides the number n /= prime", "a prime number for j in range(i ** 2, n", "starting from the sqaure of the prime number sieve[j] =", "returns the list containing prime numbers def get_factorization(n): prime_factors =", "check for prime numbers if sieve[i]: # checks whether a", "of the prime number as not prime return primes #", "number into list if it is a prime number for", "the number if __name__ == \"__main__\": n = int(input(\"Enter a", "the prime factor in the list if it divides the", "prime numbers def get_factorization(n): prime_factors = [] # stores the", "not divisible by the paricular prime number then the inner", "of the number sieve = [False] * (n + 1)", "1) # stores boolean values indicating whether a number is", "prime for i in range(2, n + 1): # loops", "loops over all the numbers to check for prime numbers", "after dividing it by the prime number else: break #", "number by a certain prime number until the number is", "number is not divisible by the paricular prime number then", "is not prime continue # skips the loop if the", "keeps diving the number by a certain prime number until", "of the prime number starting from the sqaure of the", "prime number until the number is 1 if n %", "if the number is not a prime number primes.append(i) #", "the number is not a prime number primes.append(i) # adds", "the prime number sieve[j] = True # marks the multiple" ]
[ "def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]:", "self._simple_new(_empty_range) first = self._range[::-1] if self.step < 0 else self._range", "array is saved in ``_cache``. \"\"\" return np.arange(self.start, self.stop, self.step,", "(attr, formatted_value) \"\"\" attrs = self._get_data_as_items() if self.name is not", "ops.rdivmod, ]: return op(self._int64index, other) step: Callable | None =", "# First non-empty index had only one element if rng.start", "- 1 if no_steps == -1: return np.nan elif (meth", "of bounds for axis 0 with size {len(self)}\" ) from", "from pandas._libs.lib import no_default from pandas._typing import Dtype from pandas.compat.numpy", "in the index. Attributes ---------- start stop step Methods -------", "This is the default index type used by DataFrame and", "+ (second.start - first.start) * first.step // gcd * s", "rng_indexes]) result = Int64Index(values) return result.rename(name) step = rng.start -", "@cache_readonly def _data(self) -> np.ndarray: \"\"\" An int array that", "self._convert_can_do_setop(other) if not isinstance(other, RangeIndex): return super()._difference(other, sort=sort) res_name =", "self, sort: bool = False, na_sentinel: int | None =", "fall back return super()._difference(other, sort=sort) if overlap.step != first.step: #", "def __getitem__(self, key): \"\"\" Conserve RangeIndex type for scalar and", "self._range.step < 0 or len(self) <= 1 def __contains__(self, key:", "method return super()._get_indexer(target, method=method, tolerance=tolerance) target_array = np.asarray(target) locs =", "\"\"\" Form the union of two Index objects and sorts", "0: return self.rename(name=res_name) if len(overlap) == len(self): return self[:0].rename(res_name) if", "+ step_o, step_o) return self._int64index._union(other, sort=sort) def _difference(self, other, sort=None):", "= ( \"RangeIndex.{} is deprecated and will be \" \"removed", "empty range index. return RangeIndex(0, 0).rename(name) def __len__(self) -> int:", "in non_empty_indexes: rng = obj._range if start is None: #", "= max(len(first_val_str), len(last_val_str)) return header + [f\"{x:<{max_length}}\" for x in", "**kwargs) -> bool: return 0 not in self._range def any(self,", "otherwise. E.g.: indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6) indexes", "check whether intervals intersect # deals with in- and decreasing", "def _minmax(self, meth: str): no_steps = len(self) - 1 if", "1, -1, -1, dtype=np.intp) if not ascending: result = result[::-1]", "self, other try: # apply if we have an override", "RangeIndex, return # as a Float64Index if we have float-like", "ensure_platform_int(locs) # -------------------------------------------------------------------- def repeat(self, repeats, axis=None) -> Int64Index: return", "Parameters ---------- other : Any op : callable that accepts", "TypeError, ZeroDivisionError): # Defer to Int64Index implementation return op(self._int64index, other)", "the effort. return super()._difference(other, sort=sort) if overlap[0] == first.start: #", "bool = False, dtype: Dtype | None = None, names=None,", "(`...`), numpy.newaxis (`None`) \" \"and integer or boolean \" \"arrays", "if name is no_default else name if values.dtype.kind == \"f\":", "isinstance(other, RangeIndex) and self._range == other._range: # Both are immutable", "method=method, tolerance=tolerance) def _get_indexer( self, target: Index, method: str |", "perform the binary op \"\"\" if isinstance(other, ABCTimedeltaIndex): # Defer", "end_r = max(end_s, end_o) if step_o == step_s: if (", "25710 warnings.warn( self._deprecation_message.format(\"_stop\", \"stop\"), FutureWarning, stacklevel=2, ) return self.stop @property", "base pandas Index type. Int64Index : Index of int64 data.", "abs(self.step) * no_steps def _extended_gcd(self, a: int, b: int) ->", "have another RangeIndex self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if", "other.step < 0 else other._range # check whether intervals intersect", "if len(overlap) == 0: return self.rename(name=res_name) if len(overlap) == len(self):", "is created only when needed. The constructed array is saved", "def repeat(self, repeats, axis=None) -> Int64Index: return self._int64index.repeat(repeats, axis=axis) def", "return result @doc(Int64Index.copy) def copy( self, name: Hashable = None,", "other = extract_array(other, extract_numpy=True, extract_range=True) attrs = self._get_attributes_dict() left, right", "other stop = start + len(self) * step new_range =", "return cls._simple_new(rng, name=name) @classmethod def from_range( cls, data: range, name=None,", "target_array - start valid = (locs % step == 0)", "**kwargs, ) def tolist(self) -> list[int]: return list(self._range) @doc(Int64Index.__iter__) def", "Parameters ---------- start : int (default: 0), range, or other", "res_name = ops.get_op_result_name(self, other) first = self._range[::-1] if self.step <", "_union(self, other: Index, sort): \"\"\" Form the union of two", "returns an unsorted ``Int64Index`` .. versionadded:: 0.25.0 Returns ------- union", "if possible Parameters ---------- other : Index or array-like sort", "for x in rng_indexes]) result = Int64Index(values) return result.rename(name) step", "@property def stop(self) -> int: \"\"\" The value of the", "None: if is_integer(key) or (is_float(key) and key.is_integer()): new_key = int(key)", "first = self._range[::-1] if self.step < 0 else self._range overlap", "right = other.difference(self) result = left.union(right) if result_name is not", "Methods ------- from_range See Also -------- Index : The base", "values, name: Hashable = no_default): name = self.name if name", "< 0): new_index = new_index[::-1] if sort is None: new_index", "FutureWarning, stacklevel=2, ) new_index = new_index.astype(dtype) return new_index def _minmax(self,", "return self.rename(name=res_name) if len(overlap) == len(self): return self[:0].rename(res_name) if not", "extract_numpy=True, extract_range=True) attrs = self._get_attributes_dict() left, right = self, other", "import getsizeof from typing import ( TYPE_CHECKING, Any, Callable, Hashable,", "import Dtype from pandas.compat.numpy import function as nv from pandas.util._decorators", "< stop) locs[~valid] = -1 locs[valid] = locs[valid] / step", "# Int64Index return super()._intersection(other, sort=sort) if not len(self) or not", "(start_s + step_o >= start_o) and (end_s - step_o <=", "warnings.warn( self._deprecation_message.format(\"_start\", \"start\"), FutureWarning, stacklevel=2, ) return self.start @property def", "return self.start return self.start + self.step * no_steps def min(self,", "range-like return super()._difference(other, sort=sort) new_index = type(self)._simple_new(new_rng, name=res_name) if first", "integer range. RangeIndex is a memory-saving special case of Int64Index", "- end_o) <= step_s / 2) ): return type(self)(start_r, end_r", "is a RangeIndex we may have more efficient options other", "= cast(List[RangeIndex], indexes) start = step = next_ = None", "\"\"\" Parameters ---------- other : Any op : callable that", "this was not supplied). .. deprecated:: 0.25.0 Use ``start`` instead.", "self._range: new_index = new_index[::-1] return new_index def symmetric_difference(self, other, result_name:", "\"\"\" return self.nbytes @property def dtype(self) -> np.dtype: return np.dtype(np.int64)", "system-level memory consumption Returns ------- bytes used Notes ----- Memory", "if we have float-like descriptors if not all(is_integer(x) for x", "- start_s) % step_s == 0 and (start_o + step_s", "bool = True, *args, **kwargs) -> int: \"\"\"The minimum value", "or not rstep: raise ValueError else: rstep = left.step with", "deep: bool = False, dtype: Dtype | None = None,", "RangeIndex) for x in indexes): return super()._concat(indexes, name) elif len(indexes)", "np.timedelta64)): # GH#19333 is_integer evaluated True on timedelta64, # so", "def __reduce__(self): d = self._get_attributes_dict() d.update(dict(self._get_data_as_items())) return ibase._new_Index, (type(self), d),", "\"parameter dtype is deprecated and will be removed in a", "Hashable, List, cast, ) import warnings import numpy as np", "def _intersection(self, other: Index, sort=False): if not isinstance(other, RangeIndex): #", "if not is_signed_integer_dtype(target): # checks/conversions/roundings are delegated to general method", "return old_r, old_s, old_t def _union(self, other: Index, sort): \"\"\"", "\"\"\" rng = self._range return [(\"start\", rng.start), (\"stop\", rng.stop), (\"step\",", "not None: attrs.append((\"name\", ibase.default_pprint(self.name))) return attrs def _format_data(self, name=None): #", "return Int64Index @cache_readonly def _data(self) -> np.ndarray: \"\"\" An int", "None def _format_with_header(self, header: list[str], na_rep: str = \"NaN\") ->", "quotient * r old_s, s = s, old_s - quotient", "step is not None: next_ = rng[-1] + step if", "Immutable Index implementing a monotonic integer range. RangeIndex is a", "@property def inferred_type(self) -> str: return \"integer\" # -------------------------------------------------------------------- #", "second.step // gcd new_range = range(tmp_start, int_high, new_step) new_index =", "-> RangeIndex(6) indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5]) \"\"\"", "len(self) * step new_range = range(start, stop, step or 1)", "# calculate parameters for the RangeIndex describing the # intersection", "other._range return super().equals(other) # -------------------------------------------------------------------- # Set Operations def _intersection(self,", "# In this case return an empty range index. return", "KeyError(key) return super().get_loc(key, method=method, tolerance=tolerance) def _get_indexer( self, target: Index,", "self._cached_int64index res._name = self._name return res def _get_data_as_items(self): \"\"\" return", "IndexError( f\"index {key} is out of bounds for axis 0", "other: object) -> bool: \"\"\" Determines if two Index objects", "dtype: warnings.warn( \"parameter dtype is deprecated and will be removed", "(meth == \"max\" and self.step < 0): return self.start return", "IndexError as err: raise IndexError( f\"index {key} is out of", "cast, ) import warnings import numpy as np from pandas._libs", "data. \"\"\" _typ = \"rangeindex\" _engine_type = libindex.Int64Engine _dtype_validation_metadata =", "(step_s % 2 == 0) and (abs(start_s - start_o) <=", "uniques = self if sort and self.step < 0: codes", "(``1`` if this was not supplied). \"\"\" # GH 25710", "in a future \" \"version. Use the astype method instead.\",", "Overriding parent method for the case of all RangeIndex instances.", "NotImplemented elif isinstance(other, (timedelta, np.timedelta64)): # GH#19333 is_integer evaluated True", "// gcd * s new_step = first.step * second.step //", "def _start(self) -> int: \"\"\" The value of the `start`", "& (target_array < stop) locs[~valid] = -1 locs[valid] = locs[valid]", "new_index def symmetric_difference(self, other, result_name: Hashable = None, sort=None): if", "_dtype_validation_metadata = (is_signed_integer_dtype, \"signed integer\") _can_hold_na = False _range: range", "None else 0 if stop is None: start, stop =", "list[str]: if not len(self._range): return header first_val_str = str(self._range[0]) last_val_str", "range(tmp_start, int_high, new_step) new_index = self._simple_new(new_range) # adjust index to", "None = None, names=None, ): name = self._validate_names(name=name, names=names, deep=deep)[0]", "not len(self) or not len(other): return self._simple_new(_empty_range) first = self._range[::-1]", "stop value from \"next\" or alternatively # from the last", "had 0 length, i.e. were empty. # In this case", "self.start, self.stop, self.step else: # GH 28678: work on reversed", "type(self)._simple_new(self._range, name=self._name) result._cache = self._cache return result @doc(Int64Index.copy) def copy(", "------- np.ndarray[np.intp] See Also -------- numpy.ndarray.argsort \"\"\" ascending = kwargs.pop(\"ascending\",", "if step_o == step_s: if ( (start_s - start_o) %", "value from \"next\" or alternatively # from the last non-empty", "= {} result._reset_identity() return result # -------------------------------------------------------------------- @cache_readonly def _constructor(self)", "pandas.core.ops.common import unpack_zerodim_and_defer if TYPE_CHECKING: from pandas import Index _empty_range", "only when needed. The constructed array is saved in ``_cache``.", "are formatting thru the attributes return None def _format_with_header(self, header:", "step sizes, could use # cheaper alternative gcd, s, _", "all RangeIndex instances. When all members of \"indexes\" are of", "for x in [rstart, rstop, rstep]): result = result.astype(\"float64\") return", "s new_step = first.step * second.step // gcd new_range =", "0.25.0 Returns ------- union : Index \"\"\" if isinstance(other, RangeIndex)", "next_ return RangeIndex(start, stop, step).rename(name) # Here all \"indexes\" had", "IndexError( \"only integers, slices (`:`), \" \"ellipsis (`...`), numpy.newaxis (`None`)", "self._minmax(\"min\") def max(self, axis=None, skipna: bool = True, *args, **kwargs)", "and will be \" \"removed in a future version. Use", "data: range, name=None, dtype: Dtype | None = None )", "usage does not include memory consumed by elements that are", "*args, **kwargs) -> int: \"\"\"The maximum value of the RangeIndex\"\"\"", "sort is None: start_s, step_s = self.start, self.step end_s =", "is_monotonic_decreasing(self) -> bool: return self._range.step < 0 or len(self) <=", "codes = np.arange(len(self), dtype=np.intp) uniques = self if sort and", "d.update(dict(self._get_data_as_items())) return ibase._new_Index, (type(self), d), None # -------------------------------------------------------------------- # Rendering", "self._range.index(new_key) except ValueError as err: raise KeyError(key) from err raise", "Returns ------- bytes used Notes ----- Memory usage does not", "\"\"\" return self._range.stop @property def _stop(self) -> int: \"\"\" The", "// r old_r, r = r, old_r - quotient *", "class RangeIndex(NumericIndex): \"\"\" Immutable Index implementing a monotonic integer range.", "step_s / 2, step_s / 2) elif step_o % step_s", "GH 25710 return self._range.start @property def _start(self) -> int: \"\"\"", "return self._range.start @property def _start(self) -> int: \"\"\" The value", "= ensure_python_int(key) except TypeError: return False return key in self._range", "copy: bool = False, name: Hashable = None, ) ->", "base index if not is_integer(rstep) or not rstep: raise ValueError", "== 1: return indexes[0] rng_indexes = cast(List[RangeIndex], indexes) start =", "< 0: start_s, step_s, end_s = end_s, -step_s, start_s if", "+ abs(self.step) * no_steps def _extended_gcd(self, a: int, b: int)", "tuple[np.ndarray, RangeIndex]: codes = np.arange(len(self), dtype=np.intp) uniques = self if", "for x in self._range] # -------------------------------------------------------------------- _deprecation_message = ( \"RangeIndex.{}", "isinstance(other, RangeIndex): return self._range == other._range return super().equals(other) # --------------------------------------------------------------------", "step_o, end_o = end_o, -step_o, start_o if len(self) == 1", "# apply if we have an override if step: with", "= None, tolerance=None, ) -> np.ndarray: # -> np.ndarray[np.intp] if", "take( self, indices, axis: int = 0, allow_fill: bool =", "= self._simple_new(new_range) # adjust index to limiting interval new_start =", "construction \"\"\" return Int64Index @cache_readonly def _data(self) -> np.ndarray: \"\"\"", "start is None: # This is set by the first", "We won't end up with RangeIndex, so fall back return", "= 0, allow_fill: bool = True, fill_value=None, **kwargs ) ->", "= str(self._range[-1]) max_length = max(len(first_val_str), len(last_val_str)) return header + [f\"{x:<{max_length}}\"", "and will be removed in a future \" \"version. Use", "new_index def _min_fitting_element(self, lower_limit: int) -> int: \"\"\"Returns the smallest", "quotient * t return old_r, old_s, old_t def _union(self, other:", "!= next_ ) if non_consecutive: result = Int64Index(np.concatenate([x._values for x", "extract_range=True) attrs = self._get_attributes_dict() left, right = self, other try:", "left.union(right) if result_name is not None: result = result.rename(result_name) return", "Int64Index: # wrap _cached_int64index so we can be sure its", "start(self) -> int: \"\"\" The value of the `start` parameter", "locs[valid] = len(self) - 1 - locs[valid] return ensure_platform_int(locs) #", "is None: start, stop = 0, start else: stop =", "memory consumed by elements that are not components of the", "(start_o - end_s) <= step_s ): return type(self)(start_r, end_r +", "0 else self._range overlap = self.intersection(other) if overlap.step < 0:", "= self.start // other step = self.step // other stop", "com from pandas.core.construction import extract_array import pandas.core.indexes.base as ibase from", "[rstart, rstop, rstep]): result = result.astype(\"float64\") return result except (ValueError,", "= None, limit: int | None = None, tolerance=None, )", "of my values Parameters ---------- deep : bool Introspect the", "step) return cls._simple_new(rng, name=name) @classmethod def from_range( cls, data: range,", "-> list[str]: if not len(self._range): return header first_val_str = str(self._range[0])", "- start valid = (locs % step == 0) &", "is deprecated and will be removed in a future \"", "Also -------- numpy.ndarray.argsort \"\"\" ascending = kwargs.pop(\"ascending\", True) # EA", "start = step = next_ = None # Filter the", "and (abs(start_s - start_o) <= step_s / 2) and (abs(end_s", "return len(self._range) @property def size(self) -> int: return len(self) def", "range. RangeIndex is a memory-saving special case of Int64Index limited", "// gcd new_range = range(tmp_start, int_high, new_step) new_index = self._simple_new(new_range)", "== 0: raise ValueError(\"Step must not be zero\") rng =", "and slice keys. \"\"\" if isinstance(key, slice): new_range = self._range[key]", "axis=axis) def delete(self, loc) -> Int64Index: # type: ignore[override] return", "memory consumption Returns ------- bytes used Notes ----- Memory usage", "\"\"\" if not isinstance(data, range): raise TypeError( f\"{cls.__name__}(...) must be", "+ b*y = gcd(x, y) Finds one particular solution for", "// other step = self.step // other stop = start", "elif step is None: # First non-empty index had only", "\"max\" and self.step < 0): return self.start return self.start +", "components of the array if deep=False See Also -------- numpy.ndarray.nbytes", "value of the `stop` parameter. \"\"\" return self._range.stop @property def", "step).rename(name) # Here all \"indexes\" had 0 length, i.e. were", "\"\"\" return the class to use for construction \"\"\" return", "dtype=np.intp) uniques = self if sort and self.step < 0:", "return 0 not in self._range def any(self, *args, **kwargs) ->", "Also -------- numpy.ndarray.nbytes \"\"\" return self.nbytes @property def dtype(self) ->", "warnings import numpy as np from pandas._libs import index as", "indexes): return super()._concat(indexes, name) elif len(indexes) == 1: return indexes[0]", "non_consecutive = (step != rng.step and len(rng) > 1) or", "self._range.step > 0: result = np.arange(len(self), dtype=np.intp) else: result =", "check whether element sets intersect if (first.start - second.start) %", "all \"indexes\" had 0 length, i.e. were empty. # In", "created only when needed. The constructed array is saved in", "int: \"\"\"The maximum value of the RangeIndex\"\"\" nv.validate_minmax_axis(axis) nv.validate_max(args, kwargs)", "+ step if non_empty_indexes: # Get the stop value from", "r, old_r = b, a while r: quotient = old_r", "= self.intersection(other) if overlap.step < 0: overlap = overlap[::-1] if", "of the RangeIndex\"\"\" nv.validate_minmax_axis(axis) nv.validate_max(args, kwargs) return self._minmax(\"max\") def argsort(self,", "range(start, stop, step or 1) return self._simple_new(new_range, name=self.name) if len(self)", "if TYPE_CHECKING: from pandas import Index _empty_range = range(0) class", "# TODO: if other is a RangeIndex we may have", "len(last_val_str)) return header + [f\"{x:<{max_length}}\" for x in self._range] #", "given, interpreted as \"stop\" instead. stop : int (default: 0)", "int_high <= int_low: return self._simple_new(_empty_range) # Method hint: linear Diophantine", "start, cls) # RangeIndex if isinstance(start, RangeIndex): return start.copy(name=name) elif", "def _get_indexer( self, target: Index, method: str | None =", "\"indexes\" had 0 length, i.e. were empty. # In this", "(is_signed_integer_dtype, \"signed integer\") _can_hold_na = False _range: range # --------------------------------------------------------------------", "any(self, *args, **kwargs) -> bool: return any(self._range) # -------------------------------------------------------------------- def", "list[str], na_rep: str = \"NaN\") -> list[str]: if not len(self._range):", "self._range overlap = self.intersection(other) if overlap.step < 0: overlap =", "from pandas.core.dtypes.generic import ABCTimedeltaIndex from pandas.core import ops import pandas.core.common", "in indexes): return super()._concat(indexes, name) elif len(indexes) == 1: return", "params perform the binary op \"\"\" if isinstance(other, ABCTimedeltaIndex): #", "symmetric_difference(self, other, result_name: Hashable = None, sort=None): if not isinstance(other,", "0 else self._range second = other._range[::-1] if other.step < 0", "| None = None, tolerance=None, ) -> np.ndarray: # ->", "step_o = abs(self.start - other.start) elif len(self) == 1: step_s", "might be able to get a RangeIndex back, # but", "a sorted ``Int64Index`` if not. ``sort=False`` always returns an unsorted", "0: raise ValueError(\"Step must not be zero\") rng = range(start,", "are equal, shortcut is possible return super()._cmp_method(self, op) return super()._cmp_method(other,", "None, ) -> RangeIndex: cls._validate_dtype(dtype) name = maybe_extract_name(name, start, cls)", "= new_index.sort_values() return new_index def _min_fitting_element(self, lower_limit: int) -> int:", "\"\"\" return if the index has unique values \"\"\" return", "# Filter the empty indexes non_empty_indexes = [obj for obj", "smaller than or equal to the limit\"\"\" no_steps = (upper_limit", "super()._cmp_method(self, op) return super()._cmp_method(other, op) def _arith_method(self, other, op): \"\"\"", "`step` parameter (``1`` if this was not supplied). \"\"\" #", "Int64Index._simple_new(self._data, name=self.name) @property def _int64index(self) -> Int64Index: # wrap _cached_int64index", "representable op # so return a base index if not", "name: Hashable = None, deep: bool = False, dtype: Dtype", "problem # performance hint: for identical step sizes, could use", "rng = self._range return getsizeof(rng) + sum( getsizeof(getattr(rng, attr_name)) for", "self._extended_gcd(first.step, second.step) # check whether element sets intersect if (first.start", "= True, *args, **kwargs) -> int: \"\"\"The maximum value of", "int(key) try: return self._range.index(new_key) except ValueError as err: raise KeyError(key)", "first.start: # The difference is everything after the intersection new_rng", "super()._get_indexer( target, method=method, tolerance=tolerance, limit=limit ) if self.step > 0:", "_data(self) -> np.ndarray: \"\"\" An int array that for performance", "or ( next_ is not None and rng.start != next_", "1 @cache_readonly def is_monotonic_decreasing(self) -> bool: return self._range.step < 0", "RangeIndex from a range object. Returns ------- RangeIndex \"\"\" if", "of the RangeIndex \"\"\" return len(self._range) @property def size(self) ->", "step_s, end_s = end_s, -step_s, start_s if other.step < 0:", "-> Int64Index: # type: ignore[override] return self._int64index.delete(loc) def take( self,", "( TYPE_CHECKING, Any, Callable, Hashable, List, cast, ) import warnings", "is the default index type used by DataFrame and Series", "RangeIndex: result = object.__new__(cls) assert isinstance(values, range) result._range = values", "limiting interval new_start = new_index._min_fitting_element(int_low) new_range = range(new_start, new_index.stop, new_index.step)", "super()._concat(indexes, name) elif len(indexes) == 1: return indexes[0] rng_indexes =", "None: result = result.rename(result_name) return result # -------------------------------------------------------------------- def _concat(self,", "does not include memory consumed by elements that are not", "= [obj for obj in rng_indexes if len(obj)] for obj", "self._validate_names(name=name, names=names, deep=deep)[0] new_index = self._rename(name=name) if dtype: warnings.warn( \"parameter", "Return the number of bytes in the underlying data. \"\"\"", "return self._simple_new(new_range, name=self.name) if len(self) == 1: start = self.start", "= step(left.step, right) # we don't have a representable op", "called with integers\") start = ensure_python_int(start) if start is not", "return type(self)._simple_new(res, name=self._name) @unpack_zerodim_and_defer(\"__floordiv__\") def __floordiv__(self, other): if is_integer(other) and", "from a range object. Returns ------- RangeIndex \"\"\" if not", "name=self.name) @property def _int64index(self) -> Int64Index: # wrap _cached_int64index so", "self.start @property def stop(self) -> int: \"\"\" The value of", "dtype is deprecated and will be removed in a future", "__iter__(self): yield from self._range @doc(Int64Index._shallow_copy) def _shallow_copy(self, values, name: Hashable", "Notes ----- Memory usage does not include memory consumed by", "* s old_t, t = t, old_t - quotient *", "rng = obj._range if start is None: # This is", "if not isinstance(overlap, RangeIndex): # We won't end up with", "len(other) == 1: step_o = step_s start_r = min(start_s, start_o)", "2) elif step_o % step_s == 0: if ( (start_o", "to Int64Index implementation return op(self._int64index, other) # TODO: Do attrs", "+ self.step * (len(self) - 1) start_o, step_o = other.start,", "<= step_s and (start_o - end_s) <= step_s ): return", "are of type RangeIndex: result will be RangeIndex if possible,", "monotonic ranges. Using RangeIndex may in some instances improve computing", "\" \"removed in a future version. Use RangeIndex.{} \" \"instead\"", "= result[::-1] return result def factorize( self, sort: bool =", "self.stop, self.step else: # GH 28678: work on reversed range", "+ step_o >= start_o) and (end_s - step_o <= end_o)", "------- from_range See Also -------- Index : The base pandas", "key.is_integer()): new_key = int(key) try: return self._range.index(new_key) except ValueError as", "sum( getsizeof(getattr(rng, attr_name)) for attr_name in [\"start\", \"stop\", \"step\"] )", "and sort is None: start_s, step_s = self.start, self.step end_s", "indices, axis=axis, allow_fill=allow_fill, fill_value=fill_value, **kwargs, ) def tolist(self) -> list[int]:", ": int (default: 1) dtype : np.int64 Unused, accepted for", "or boolean \" \"arrays are valid indices\" ) # fall", "efficient options other = extract_array(other, extract_numpy=True, extract_range=True) attrs = self._get_attributes_dict()", "if isinstance(other, RangeIndex) and self._range == other._range: # Both are", "= type(self)._simple_new(self._range, name=self._name) result._cache = self._cache return result @doc(Int64Index.copy) def", "optimized set operation if we have another RangeIndex self._validate_sort_keyword(sort) self._assert_can_do_setop(other)", "Hashable = None) -> RangeIndex: result = object.__new__(cls) assert isinstance(values,", "= end_o, -step_o, start_o if len(self) == 1 and len(other)", "Index type. Int64Index : Index of int64 data. \"\"\" _typ", "this was not supplied). \"\"\" # GH 25710 return self._range.start", "non_empty_indexes: rng = obj._range if start is None: # This", "1: step_s = step_o elif len(other) == 1: step_o =", "name=name) @classmethod def _simple_new(cls, values: range, name: Hashable = None)", "= left.union(right) if result_name is not None: result = result.rename(result_name)", "sort : False or None, default None Whether to sort", "len(other) == 1: step_s = step_o = abs(self.start - other.start)", "!= 0: if len(self) == 0 or self.start % other", "instead. \"\"\" # GH 25710 warnings.warn( self._deprecation_message.format(\"_stop\", \"stop\"), FutureWarning, stacklevel=2,", "@property def size(self) -> int: return len(self) def __getitem__(self, key):", "step: Callable | None = None if op in [operator.mul,", "---------- other : Index or array-like sort : False or", "if dtype: warnings.warn( \"parameter dtype is deprecated and will be", "r old_r, r = r, old_r - quotient * r", "index type used by DataFrame and Series when no explicit", "__future__ import annotations from datetime import timedelta import operator from", "be removed in a future \" \"version. Use the astype", "names=names, deep=deep)[0] new_index = self._rename(name=name) if dtype: warnings.warn( \"parameter dtype", "self.step * (len(self) - 1) start_o, step_o = other.start, other.step", "start, stop, step = self.start, self.stop, self.step else: # GH", "``start`` instead. \"\"\" warnings.warn( self._deprecation_message.format(\"_start\", \"start\"), FutureWarning, stacklevel=2, ) return", "import numpy as np from pandas._libs import index as libindex", "---------- start stop step Methods ------- from_range See Also --------", "---------- start : int (default: 0), range, or other RangeIndex", "import ( ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype, )", "is not None: next_ = rng[-1] + step if non_empty_indexes:", "except TypeError: return False return key in self._range @property def", "\"\"\" Returns the indices that would sort the index and", "if this was not supplied). \"\"\" # GH 25710 return", "@cache_readonly def nbytes(self) -> int: \"\"\" Return the number of", "type. Int64Index : Index of int64 data. \"\"\" _typ =", "0 and other.step < 0) is not (new_index.step < 0):", "reasons is created only when needed. The constructed array is", "default index type used by DataFrame and Series when no", "\"\"\"The maximum value of the RangeIndex\"\"\" nv.validate_minmax_axis(axis) nv.validate_max(args, kwargs) return", "a monotonic integer range. RangeIndex is a memory-saving special case", "back, # but not worth the effort. return super()._difference(other, sort=sort)", "non_empty_indexes: # Get the stop value from \"next\" or alternatively", "(default: 0) step : int (default: 1) dtype : np.int64", "return \"integer\" # -------------------------------------------------------------------- # Indexing Methods @doc(Int64Index.get_loc) def get_loc(self,", "other # -------------------------------------------------------------------- # Reductions def all(self, *args, **kwargs) ->", "= int(key) try: return self._range.index(new_key) except ValueError as err: raise", "return self.step @cache_readonly def nbytes(self) -> int: \"\"\" Return the", "return attrs def _format_data(self, name=None): # we are formatting thru", "and its underlying data. Returns ------- np.ndarray[np.intp] See Also --------", "else 0 if stop is None: start, stop = 0,", "step=None, dtype: Dtype | None = None, copy: bool =", "isinstance(other, RangeIndex) and sort is None: start_s, step_s = self.start,", "step_s / 2) ): return type(self)(start_r, end_r + step_s /", "* (len(other) - 1) if self.step < 0: start_s, step_s,", "-> tuple[np.ndarray, RangeIndex]: codes = np.arange(len(self), dtype=np.intp) uniques = self", "return cls._simple_new(data, name=name) @classmethod def _simple_new(cls, values: range, name: Hashable", "-> RangeIndex: \"\"\" Create RangeIndex from a range object. Returns", "min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:", "when needed. The constructed array is saved in ``_cache``. \"\"\"", "or alternatively # from the last non-empty index stop =", "self.step > 0: start, stop, step = self.start, self.stop, self.step", "non-empty index stop = non_empty_indexes[-1].stop if next_ is None else", "on timedelta64, # so we need to catch these explicitly", "name: Hashable = None, ) -> RangeIndex: cls._validate_dtype(dtype) name =", "stacklevel=2, ) new_index = new_index.astype(dtype) return new_index def _minmax(self, meth:", "if ( (start_s - start_o) % step_s == 0 and", "- start non_consecutive = (step != rng.step and len(rng) >", "pandas._libs import index as libindex from pandas._libs.lib import no_default from", "start : int (default: 0), range, or other RangeIndex instance", "other._range[::-1] if other.step < 0 else other._range # check whether", "ZeroDivisionError): # Defer to Int64Index implementation return op(self._int64index, other) #", "of all RangeIndex instances. When all members of \"indexes\" are", "Conserve RangeIndex type for scalar and slice keys. \"\"\" if", "return self._range == other._range return super().equals(other) # -------------------------------------------------------------------- # Set", ") from pandas.util._exceptions import rewrite_exception from pandas.core.dtypes.common import ( ensure_platform_int,", "We reversed this range: transform to original locs locs[valid] =", "equal, shortcut is possible return super()._cmp_method(self, op) return super()._cmp_method(other, op)", "op : callable that accepts 2 params perform the binary", "tmp_start = first.start + (second.start - first.start) * first.step //", "tuple[int, int, int]: \"\"\" Extended Euclidean algorithms to solve Bezout's", "delegated to general method return super()._get_indexer(target, method=method, tolerance=tolerance) target_array =", "op in [ operator.pow, ops.rpow, operator.mod, ops.rmod, ops.rfloordiv, divmod, ops.rdivmod,", "return result.rename(name) step = rng.start - start non_consecutive = (step", "= result.astype(\"float64\") return result except (ValueError, TypeError, ZeroDivisionError): # Defer", "0: if ( (start_s - start_o) % step_o == 0", "abs(self.step)) return self.start + abs(self.step) * no_steps def _max_fitting_element(self, upper_limit:", "max(end_s, end_o) if step_o == step_s: if ( (start_s -", "start_r = min(start_s, start_o) end_r = max(end_s, end_o) if step_o", "not None: next_ = rng[-1] + step if non_empty_indexes: #", "start + len(self) * step new_range = range(start, stop, step", "end_o, -step_o, start_o if len(self) == 1 and len(other) ==", "is_integer(rstep) or not rstep: raise ValueError else: rstep = left.step", "and (start_o - end_s) <= step_s ): return type(self)(start_r, end_r", "kwargs) return self._minmax(\"min\") def max(self, axis=None, skipna: bool = True,", "case of Int64Index limited to representing monotonic ranges. Using RangeIndex", "= b, a while r: quotient = old_r // r", "self._range[::-1] if self.step < 0 else self._range overlap = self.intersection(other)", "raise TypeError(\"RangeIndex(...) must be called with integers\") start = ensure_python_int(start)", "< 0: start_o, step_o, end_o = end_o, -step_o, start_o if", "homogeneity with other index types. copy : bool, default False", "start_s) % step_s == 0 and (start_o + step_s >=", "default False Unused, accepted for homogeneity with other index types.", "list(self._range) @doc(Int64Index.__iter__) def __iter__(self): yield from self._range @doc(Int64Index._shallow_copy) def _shallow_copy(self,", "to general method return super()._get_indexer(target, method=method, tolerance=tolerance) target_array = np.asarray(target)", "-> tuple[int, int, int]: \"\"\" Extended Euclidean algorithms to solve", "other.start + other.step * (len(other) - 1) if self.step <", "rewrite_exception(\"Int64Index\", type(self).__name__): return self._int64index.take( indices, axis=axis, allow_fill=allow_fill, fill_value=fill_value, **kwargs, )", "a future \" \"version. Use the astype method instead.\", FutureWarning,", "ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype, ) from pandas.core.dtypes.generic", "self._get_attributes_dict() d.update(dict(self._get_data_as_items())) return ibase._new_Index, (type(self), d), None # -------------------------------------------------------------------- #", "fill_value=fill_value, **kwargs, ) def tolist(self) -> list[int]: return list(self._range) @doc(Int64Index.__iter__)", "err: raise IndexError( f\"index {key} is out of bounds for", "rng.start if step is None and len(rng) > 1: step", "or len(self) <= 1 @cache_readonly def is_monotonic_decreasing(self) -> bool: return", "np.arange(len(self), dtype=np.intp) else: result = np.arange(len(self) - 1, -1, -1,", "parameter (``1`` if this was not supplied). \"\"\" # GH", "reverse = self._range[::-1] start, stop, step = reverse.start, reverse.stop, reverse.step", "must be called with integers\") start = ensure_python_int(start) if start", "optional Name to be stored in the index. Attributes ----------", "rng.step elif step is None: # First non-empty index had", "object, optional Name to be stored in the index. Attributes", "for homogeneity with other index types. name : object, optional", "Index: \"\"\" Overriding parent method for the case of all", "return self._range.stop @property def _stop(self) -> int: \"\"\" The value", "start_o) <= step_s / 2) and (abs(end_s - end_o) <=", "// other new_range = range(start, start + 1, 1) return", "= None ) -> RangeIndex: \"\"\" Create RangeIndex from a", "first.step) elif overlap[-1] == first[-1]: # The difference is everything", "1 if no_steps == -1: return np.nan elif (meth ==", "decreasing ranges int_low = max(first.start, second.start) int_high = min(first.stop, second.stop)", "instance If int and \"stop\" is not given, interpreted as", "and \"stop\" is not given, interpreted as \"stop\" instead. stop", ": Index \"\"\" if isinstance(other, RangeIndex) and sort is None:", "Also -------- Index : The base pandas Index type. Int64Index", "coercible to a \" f\"range, {repr(data)} was passed\" ) cls._validate_dtype(dtype)", "integer or boolean \" \"arrays are valid indices\" ) #", "we don't have a representable op # so return a", "super()._get_indexer(target, method=method, tolerance=tolerance) target_array = np.asarray(target) locs = target_array -", "end_o) ): return type(self)(start_r, end_r + step_o, step_o) return self._int64index._union(other,", "# -------------------------------------------------------------------- def _concat(self, indexes: list[Index], name: Hashable) -> Index:", "import operator from sys import getsizeof from typing import (", "isinstance(overlap, RangeIndex): # We won't end up with RangeIndex, so", "result.rename(name) step = rng.start - start non_consecutive = (step !=", "return self.start + abs(self.step) * no_steps def _max_fitting_element(self, upper_limit: int)", "-> bool: \"\"\" return if the index has unique values", "element sets intersect if (first.start - second.start) % gcd: return", "first.step) else: # The difference is not range-like return super()._difference(other,", "memory-saving special case of Int64Index limited to representing monotonic ranges.", "= locs[valid] / step if step != self.step: # We", "if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]: step = op", "not len(other): return self._simple_new(_empty_range) first = self._range[::-1] if self.step <", "first[-1]: # The difference is everything before the intersection new_rng", "nv.validate_argsort(args, kwargs) if self._range.step > 0: result = np.arange(len(self), dtype=np.intp)", "unsorted ``Int64Index`` .. versionadded:: 0.25.0 Returns ------- union : Index", "new_index._min_fitting_element(int_low) new_range = range(new_start, new_index.stop, new_index.step) new_index = self._simple_new(new_range) if", "return super()._difference(other, sort=sort) if overlap[0] == first.start: # The difference", "return self._simple_new(new_range, name=self.name) return self._int64index // other # -------------------------------------------------------------------- #", "for the RangeIndex describing the # intersection disregarding the lower", "int = 0, allow_fill: bool = True, fill_value=None, **kwargs )", ") from pandas.core.ops.common import unpack_zerodim_and_defer if TYPE_CHECKING: from pandas import", "True, *args, **kwargs) -> int: \"\"\"The maximum value of the", "new_index = new_index.astype(dtype) return new_index def _minmax(self, meth: str): no_steps", "Parameters ---------- other : Index or array-like sort : False", "# wrap _cached_int64index so we can be sure its name", "step_s: if ( (start_s - start_o) % step_s == 0", "(start_s - end_o) <= step_s and (start_o - end_s) <=", "axis=axis, allow_fill=allow_fill, fill_value=fill_value, **kwargs, ) def tolist(self) -> list[int]: return", "* r old_s, s = s, old_s - quotient *", "isinstance(other, (timedelta, np.timedelta64)): # GH#19333 is_integer evaluated True on timedelta64,", "name: Hashable) -> Index: \"\"\" Overriding parent method for the", "step): raise TypeError(\"RangeIndex(...) must be called with integers\") start =", "if not all(is_integer(x) for x in [rstart, rstop, rstep]): result", "a \" f\"range, {repr(data)} was passed\" ) cls._validate_dtype(dtype) return cls._simple_new(data,", "as err: raise KeyError(key) from err raise KeyError(key) return super().get_loc(key,", "values: range, name: Hashable = None) -> RangeIndex: result =", "elif is_timedelta64_dtype(other): # Must be an np.ndarray; GH#22390 return op(self._int64index,", "s, _ = self._extended_gcd(first.step, second.step) # check whether element sets", "= np.arange(len(self) - 1, -1, -1, dtype=np.intp) if not ascending:", "\"\"\" The value of the `step` parameter (``1`` if this", "= None, names=None, ): name = self._validate_names(name=name, names=names, deep=deep)[0] new_index", "1 - locs[valid] return ensure_platform_int(locs) # -------------------------------------------------------------------- def repeat(self, repeats,", "dtype(self) -> np.dtype: return np.dtype(np.int64) @property def is_unique(self) -> bool:", "-(-(lower_limit - self.start) // abs(self.step)) return self.start + abs(self.step) *", "limit=limit ) if self.step > 0: start, stop, step =", "---------- deep : bool Introspect the data deeply, interrogate `object`", "self, target: Index, method: str | None = None, limit:", "typing import ( TYPE_CHECKING, Any, Callable, Hashable, List, cast, )", "self._minmax(\"max\") def argsort(self, *args, **kwargs) -> np.ndarray: \"\"\" Returns the", "float-like descriptors if not all(is_integer(x) for x in [rstart, rstop,", "used Notes ----- Memory usage does not include memory consumed", "None, copy: bool = False, name: Hashable = None, )", "everything after the intersection new_rng = range(overlap[-1] + first.step, first.stop,", "type(self)._simple_new(new_rng, name=res_name) if first is not self._range: new_index = new_index[::-1]", "sort is not None: return super().symmetric_difference(other, result_name, sort) left =", "super().__getitem__(key) def _getitem_slice(self: RangeIndex, slobj: slice) -> RangeIndex: \"\"\" Fastpath", "Unused, accepted for homogeneity with other index types. copy :", "= s, old_s - quotient * s old_t, t =", "don't have a representable op # so return a base", "index types. copy : bool, default False Unused, accepted for", "start_s) and (end_o - step_s <= end_s) ): return type(self)(start_r,", "op): if isinstance(other, RangeIndex) and self._range == other._range: # Both", "if we have an override if step: with np.errstate(all=\"ignore\"): rstep", "== \"max\" and self.step < 0): return self.start return self.start", "type(self)(start_r, end_r + step_s, step_s) if ( (step_s % 2", "len(self) == 1 and len(other) == 1: step_s = step_o", "astype method instead.\", FutureWarning, stacklevel=2, ) new_index = new_index.astype(dtype) return", "new_range = range(start, start + 1, 1) return self._simple_new(new_range, name=self.name)", "-> bool: return 0 not in self._range def any(self, *args,", "list of tuples of start, stop, step \"\"\" rng =", "= self._get_data_as_items() if self.name is not None: attrs.append((\"name\", ibase.default_pprint(self.name))) return", ") import warnings import numpy as np from pandas._libs import", "limit: int | None = None, tolerance=None, ) -> np.ndarray:", "len(self) or not len(other): return self._simple_new(_empty_range) first = self._range[::-1] if", "is_integer evaluated True on timedelta64, # so we need to", "(start_s - start_o) % step_o == 0 and (start_s +", "not self._range: new_index = new_index[::-1] return new_index def symmetric_difference(self, other,", "return [(\"start\", rng.start), (\"stop\", rng.stop), (\"step\", rng.step)] def __reduce__(self): d", "Index, sort=False): if not isinstance(other, RangeIndex): # Int64Index return super()._intersection(other,", "return op(self._int64index, other) step: Callable | None = None if", "function as nv from pandas.util._decorators import ( cache_readonly, doc, )", "saved in ``_cache``. \"\"\" return np.arange(self.start, self.stop, self.step, dtype=np.int64) @cache_readonly", "= self._cache return result @doc(Int64Index.copy) def copy( self, name: Hashable", "removed in a future \" \"version. Use the astype method", "step = next_ = None # Filter the empty indexes", "= abs(self.start - other.start) elif len(self) == 1: step_s =", "ranges int_low = max(first.start, second.start) int_high = min(first.stop, second.stop) if", "TYPE_CHECKING: from pandas import Index _empty_range = range(0) class RangeIndex(NumericIndex):", "\" f\"range, {repr(data)} was passed\" ) cls._validate_dtype(dtype) return cls._simple_new(data, name=name)", "new_key = int(key) try: return self._range[new_key] except IndexError as err:", "transform to original locs locs[valid] = len(self) - 1 -", "if (first.start - second.start) % gcd: return self._simple_new(_empty_range) # calculate", "other.difference(self) result = left.union(right) if result_name is not None: result", "False return key in self._range @property def inferred_type(self) -> str:", "elements that are not components of the array if deep=False", "the `stop` parameter. \"\"\" return self._range.stop @property def _stop(self) ->", "return header + [f\"{x:<{max_length}}\" for x in self._range] # --------------------------------------------------------------------", "value of the `start` parameter (``0`` if this was not", "range(overlap[-1] + first.step, first.stop, first.step) elif overlap[-1] == first[-1]: #", "in [rstart, rstop, rstep]): result = result.astype(\"float64\") return result except", "work on reversed range for simplicity reverse = self._range[::-1] start,", "max(len(first_val_str), len(last_val_str)) return header + [f\"{x:<{max_length}}\" for x in self._range]", "the class to use for construction \"\"\" return Int64Index @cache_readonly", "self._range[new_key] except IndexError as err: raise IndexError( f\"index {key} is", "None, deep: bool = False, dtype: Dtype | None =", "return a list of tuples of start, stop, step \"\"\"", "== step_s: if ( (start_s - start_o) % step_s ==", "the arguments if com.all_none(start, stop, step): raise TypeError(\"RangeIndex(...) must be", "result # -------------------------------------------------------------------- def _concat(self, indexes: list[Index], name: Hashable) ->", "-------- numpy.ndarray.nbytes \"\"\" return self.nbytes @property def dtype(self) -> np.dtype:", "\"instead\" ) @property def start(self) -> int: \"\"\" The value", "end up with RangeIndex, so fall back return super()._difference(other, sort=sort)", "import warnings import numpy as np from pandas._libs import index", "no_steps == -1: return np.nan elif (meth == \"min\" and", "in rng_indexes]) result = Int64Index(values) return result.rename(name) step = rng.start", "): return type(self)(start_r, end_r + step_s / 2, step_s /", "index if not is_integer(rstep) or not rstep: raise ValueError else:", "operator.mod, ops.rmod, ops.rfloordiv, divmod, ops.rdivmod, ]: return op(self._int64index, other) step:", "def _max_fitting_element(self, upper_limit: int) -> int: \"\"\"Returns the largest element", "list[Index], name: Hashable) -> Index: \"\"\" Overriding parent method for", "self._range[::-1] if self.step < 0 else self._range second = other._range[::-1]", "Index of int64 data. \"\"\" _typ = \"rangeindex\" _engine_type =", "old_t = 1, 0 r, old_r = b, a while", "overlap[0], first.step) else: # The difference is not range-like return", "FutureWarning, stacklevel=2, ) return self.stop @property def step(self) -> int:", "> 0: start, stop, step = self.start, self.stop, self.step else:", "other == 0 and self.step % other == 0: start", "stop, step = reverse.start, reverse.stop, reverse.step if not is_signed_integer_dtype(target): #", "| None = None, names=None, ): name = self._validate_names(name=name, names=names,", "0): new_index = new_index[::-1] if sort is None: new_index =", "index and its underlying data. Returns ------- np.ndarray[np.intp] See Also", "whether intervals intersect # deals with in- and decreasing ranges", "key): \"\"\" Conserve RangeIndex type for scalar and slice keys.", "len(overlap) == 0: return self.rename(name=res_name) if len(overlap) == len(self): return", "RangeIndex) and self._range == other._range: # Both are immutable so", "for axis 0 with size {len(self)}\" ) from err elif", "s, t \"\"\" s, old_s = 0, 1 t, old_t", "<= step_s ): return type(self)(start_r, end_r + step_s, step_s) if", ": int (default: 0) step : int (default: 1) dtype", "\"RangeIndex.{} is deprecated and will be \" \"removed in a", "( (step_s % 2 == 0) and (abs(start_s - start_o)", "import function as nv from pandas.util._decorators import ( cache_readonly, doc,", "Create RangeIndex from a range object. Returns ------- RangeIndex \"\"\"", "future version. Use RangeIndex.{} \" \"instead\" ) @property def start(self)", "start_s, step_s = self.start, self.step end_s = self.start + self.step", "numpy as np from pandas._libs import index as libindex from", "Use ``stop`` instead. \"\"\" # GH 25710 warnings.warn( self._deprecation_message.format(\"_stop\", \"stop\"),", "# cheaper alternative gcd, s, _ = self._extended_gcd(first.step, second.step) #", "set by the first non-empty index start = rng.start if", "def __floordiv__(self, other): if is_integer(other) and other != 0: if", "and self.step > 0) or (meth == \"max\" and self.step", "@unpack_zerodim_and_defer(\"__floordiv__\") def __floordiv__(self, other): if is_integer(other) and other != 0:", "bytes in the underlying data. \"\"\" rng = self._range return", "step if step != self.step: # We reversed this range:", "to the limit\"\"\" no_steps = -(-(lower_limit - self.start) // abs(self.step))", "return getsizeof(rng) + sum( getsizeof(getattr(rng, attr_name)) for attr_name in [\"start\",", "parameter (``0`` if this was not supplied). .. deprecated:: 0.25.0", "The constructed array is saved in ``_cache``. \"\"\" return np.arange(self.start,", "result._range = values result._name = name result._cache = {} result._reset_identity()", "> 0: result = np.arange(len(self), dtype=np.intp) else: result = np.arange(len(self)", "other == 0: start = self.start // other step =", "name=None): # we are formatting thru the attributes return None", "super()._intersection(other, sort=sort) if not len(self) or not len(other): return self._simple_new(_empty_range)", "Bezout's identity: a*x + b*y = gcd(x, y) Finds one", "return self._int64index._union(other, sort=sort) def _difference(self, other, sort=None): # optimized set", "return codes, uniques def equals(self, other: object) -> bool: \"\"\"", "first.step, first.stop, first.step) elif overlap[-1] == first[-1]: # The difference", "from __future__ import annotations from datetime import timedelta import operator", "None = -1 ) -> tuple[np.ndarray, RangeIndex]: codes = np.arange(len(self),", "ensure_python_int(start) if start is not None else 0 if stop", "_can_hold_na = False _range: range # -------------------------------------------------------------------- # Constructors def", "algorithms to solve Bezout's identity: a*x + b*y = gcd(x,", "Int64Index implementation return op(self._int64index, other) # TODO: Do attrs get", "its name matches self.name res = self._cached_int64index res._name = self._name", "# we are formatting thru the attributes return None def", "up with RangeIndex, so fall back return super()._difference(other, sort=sort) if", "_stop(self) -> int: \"\"\" The value of the `stop` parameter.", "op(self._int64index, other) elif is_timedelta64_dtype(other): # Must be an np.ndarray; GH#22390", "result = np.arange(len(self) - 1, -1, -1, dtype=np.intp) if not", "self.step < 0: start_s, step_s, end_s = end_s, -step_s, start_s", "{len(self)}\" ) from err elif is_scalar(key): raise IndexError( \"only integers,", "sort=sort) if overlap[0] == first.start: # The difference is everything", "return self.start @property def stop(self) -> int: \"\"\" The value", "- first.start) * first.step // gcd * s new_step =", "be stored in the index. Attributes ---------- start stop step", "sort=False): if not isinstance(other, RangeIndex): # Int64Index return super()._intersection(other, sort=sort)", "possible Parameters ---------- other : Index or array-like sort :", "None, limit: int | None = None, tolerance=None, ) ->", "return self._minmax(\"min\") def max(self, axis=None, skipna: bool = True, *args,", "other.step end_o = other.start + other.step * (len(other) - 1)", "- end_s) <= step_s ): return type(self)(start_r, end_r + step_s,", "result will be RangeIndex if possible, Int64Index otherwise. E.g.: indexes", "is_unique(self) -> bool: \"\"\" return if the index has unique", "self._range[key] return self._simple_new(new_range, name=self._name) elif is_integer(key): new_key = int(key) try:", "stop, step) return cls._simple_new(rng, name=name) @classmethod def from_range( cls, data:", "self.name is not None: attrs.append((\"name\", ibase.default_pprint(self.name))) return attrs def _format_data(self,", "uniques def equals(self, other: object) -> bool: \"\"\" Determines if", "- second.start) % gcd: return self._simple_new(_empty_range) # calculate parameters for", "stop = start + len(self) * step new_range = range(start,", "deprecated:: 0.25.0 Use ``start`` instead. \"\"\" warnings.warn( self._deprecation_message.format(\"_start\", \"start\"), FutureWarning,", "= self._cached_int64index res._name = self._name return res def _get_data_as_items(self): \"\"\"", "self.start + abs(self.step) * no_steps def _extended_gcd(self, a: int, b:", ") new_index = new_index.astype(dtype) return new_index def _minmax(self, meth: str):", "np.concatenate([x._values for x in rng_indexes]) result = Int64Index(values) return result.rename(name)", "would sort the index and its underlying data. Returns -------", "if next_ is None else next_ return RangeIndex(start, stop, step).rename(name)", "self._deprecation_message.format(\"_stop\", \"stop\"), FutureWarning, stacklevel=2, ) return self.stop @property def step(self)", "rstep, **attrs) # for compat with numpy / Int64Index #", "use for construction \"\"\" return Int64Index @cache_readonly def _data(self) ->", "stop, step \"\"\" rng = self._range return [(\"start\", rng.start), (\"stop\",", "for system-level memory consumption Returns ------- bytes used Notes -----", "_min_fitting_element(self, lower_limit: int) -> int: \"\"\"Returns the smallest element greater", "and (start_s - end_o) <= step_s and (start_o - end_s)", "if step == 0: raise ValueError(\"Step must not be zero\")", "if len(self) == 1: start = self.start // other new_range", "validate the arguments if com.all_none(start, stop, step): raise TypeError(\"RangeIndex(...) must", "from pandas.core.indexes.base import maybe_extract_name from pandas.core.indexes.numeric import ( Float64Index, Int64Index,", "integers\") start = ensure_python_int(start) if start is not None else", "upper_limit: int) -> int: \"\"\"Returns the largest element smaller than", "( (start_s - start_o) % step_o == 0 and (start_s", "range, name: Hashable = None) -> RangeIndex: result = object.__new__(cls)", "keys. \"\"\" if isinstance(key, slice): new_range = self._range[key] return self._simple_new(new_range,", "case of all RangeIndex instances. When all members of \"indexes\"", "See Also -------- numpy.ndarray.argsort \"\"\" ascending = kwargs.pop(\"ascending\", True) #", "= Int64Index(np.concatenate([x._values for x in rng_indexes])) return result.rename(name) if step", "to solve Bezout's identity: a*x + b*y = gcd(x, y)", "start_o if len(self) == 1 and len(other) == 1: step_s", "def take( self, indices, axis: int = 0, allow_fill: bool", "attrs def _format_data(self, name=None): # we are formatting thru the", "return new_index def _minmax(self, meth: str): no_steps = len(self) -", "0.25.0 Use ``stop`` instead. \"\"\" # GH 25710 warnings.warn( self._deprecation_message.format(\"_stop\",", "= min(start_s, start_o) end_r = max(end_s, end_o) if step_o ==", "\"step\"), FutureWarning, stacklevel=2, ) return self.step @cache_readonly def nbytes(self) ->", "type: ignore[override] return self._int64index.delete(loc) def take( self, indices, axis: int", "of type RangeIndex: result will be RangeIndex if possible, Int64Index", "def _getitem_slice(self: RangeIndex, slobj: slice) -> RangeIndex: \"\"\" Fastpath for", "solve intersection problem # performance hint: for identical step sizes,", "Returns the indices that would sort the index and its", "or array-like sort : False or None, default None Whether", "1, 1) return self._simple_new(new_range, name=self.name) return self._int64index // other #", "compat with numpy / Int64Index # even if we can", "result except (ValueError, TypeError, ZeroDivisionError): # Defer to Int64Index implementation", "= self._range[key] return self._simple_new(new_range, name=self._name) elif is_integer(key): new_key = int(key)", "old_s = 0, 1 t, old_t = 1, 0 r,", "other index types. copy : bool, default False Unused, accepted", "be able to get a RangeIndex back, # but not", "or not len(other): return self._simple_new(_empty_range) first = self._range[::-1] if self.step", "the RangeIndex \"\"\" return len(self._range) @property def size(self) -> int:", "unpack_zerodim_and_defer if TYPE_CHECKING: from pandas import Index _empty_range = range(0)", "Diophantine equation # solve intersection problem # performance hint: for", "self.start) // abs(self.step) return self.start + abs(self.step) * no_steps def", "* no_steps def _extended_gcd(self, a: int, b: int) -> tuple[int,", "deep=False See Also -------- numpy.ndarray.nbytes \"\"\" return self.nbytes @property def", "in [\"start\", \"stop\", \"step\"] ) def memory_usage(self, deep: bool =", "\"\"\" s, old_s = 0, 1 t, old_t = 1,", "return self._range[new_key] except IndexError as err: raise IndexError( f\"index {key}", "raise ValueError else: rstep = left.step with np.errstate(all=\"ignore\"): rstart =", "stop, step).rename(name) # Here all \"indexes\" had 0 length, i.e.", "from pandas.core.indexes.numeric import ( Float64Index, Int64Index, NumericIndex, ) from pandas.core.ops.common", "Index : The base pandas Index type. Int64Index : Index", "self._range == other._range: # Both are immutable so if ._range", "def all(self, *args, **kwargs) -> bool: return 0 not in", "step_o = other.start, other.step end_o = other.start + other.step *", "if len(overlap) == len(self): return self[:0].rename(res_name) if not isinstance(overlap, RangeIndex):", "return self._int64index.repeat(repeats, axis=axis) def delete(self, loc) -> Int64Index: # type:", "-> int: \"\"\" return the length of the RangeIndex \"\"\"", "int (default: 0) step : int (default: 1) dtype :", "speed. This is the default index type used by DataFrame", "is everything before the intersection new_rng = range(first.start, overlap[0], first.step)", "new_rng = range(first.start, overlap[0], first.step) else: # The difference is", "is None and tolerance is None: if is_integer(key) or (is_float(key)", "if overlap.step != first.step: # In some cases we might", "# Must be an np.ndarray; GH#22390 return op(self._int64index, other) if", "ABCTimedeltaIndex from pandas.core import ops import pandas.core.common as com from", "def _format_attrs(self): \"\"\" Return a list of tuples of the", "target, method=method, tolerance=tolerance, limit=limit ) if self.step > 0: start,", "See Also -------- Index : The base pandas Index type.", "the limit\"\"\" no_steps = (upper_limit - self.start) // abs(self.step) return", "types. copy : bool, default False Unused, accepted for homogeneity", "Operations def _intersection(self, other: Index, sort=False): if not isinstance(other, RangeIndex):", "for obj in non_empty_indexes: rng = obj._range if start is", "reverse.step if not is_signed_integer_dtype(target): # checks/conversions/roundings are delegated to general", "by elements that are not components of the array if", "[(\"start\", rng.start), (\"stop\", rng.stop), (\"step\", rng.step)] def __reduce__(self): d =", "from typing import ( TYPE_CHECKING, Any, Callable, Hashable, List, cast,", "+ [f\"{x:<{max_length}}\" for x in self._range] # -------------------------------------------------------------------- _deprecation_message =", "return super()._difference(other, sort=sort) res_name = ops.get_op_result_name(self, other) first = self._range[::-1]", "intersection disregarding the lower bounds tmp_start = first.start + (second.start", "RangeIndex) and sort is None: start_s, step_s = self.start, self.step", "old_r = b, a while r: quotient = old_r //", "= 0, start else: stop = ensure_python_int(stop) step = ensure_python_int(step)", "int array that for performance reasons is created only when", "fall back to Int64Index return super().__getitem__(key) def _getitem_slice(self: RangeIndex, slobj:", "return super().equals(other) # -------------------------------------------------------------------- # Set Operations def _intersection(self, other:", "index. ``sort=None`` returns a monotonically increasing ``RangeIndex`` if possible or", "Reductions def all(self, *args, **kwargs) -> bool: return 0 not", "== 0 and (start_s - end_o) <= step_s and (start_o", "self.start return self.start + self.step * no_steps def min(self, axis=None,", "import rewrite_exception from pandas.core.dtypes.common import ( ensure_platform_int, ensure_python_int, is_float, is_integer,", "new_range = range(tmp_start, int_high, new_step) new_index = self._simple_new(new_range) # adjust", "step_o % step_s == 0: if ( (start_o - start_s)", "start + 1, 1) return self._simple_new(new_range, name=self.name) return self._int64index //", "\"NaN\") -> list[str]: if not len(self._range): return header first_val_str =", "the RangeIndex\"\"\" nv.validate_minmax_axis(axis) nv.validate_max(args, kwargs) return self._minmax(\"max\") def argsort(self, *args,", "= self._extended_gcd(first.step, second.step) # check whether element sets intersect if", "and len(other) == 1: step_s = step_o = abs(self.start -", "= True, *args, **kwargs) -> int: \"\"\"The minimum value of", "other : Index or array-like sort : False or None,", "pandas.core.dtypes.common import ( ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype,", "= ensure_python_int(stop) step = ensure_python_int(step) if step is not None", "this case return an empty range index. return RangeIndex(0, 0).rename(name)", "has unique values \"\"\" return True @cache_readonly def is_monotonic_increasing(self) ->", "# for compat with numpy / Int64Index # even if", "self._rename(name=name) if dtype: warnings.warn( \"parameter dtype is deprecated and will", "RangeIndex): return self._range == other._range return super().equals(other) # -------------------------------------------------------------------- #", "end_r + step_s / 2, step_s / 2) elif step_o", "def _int64index(self) -> Int64Index: # wrap _cached_int64index so we can", "Use ``start`` instead. \"\"\" warnings.warn( self._deprecation_message.format(\"_start\", \"start\"), FutureWarning, stacklevel=2, )", "self.step else: # GH 28678: work on reversed range for", "self.start + self.step * no_steps def min(self, axis=None, skipna: bool", "underlying data. Returns ------- np.ndarray[np.intp] See Also -------- numpy.ndarray.argsort \"\"\"", "super().symmetric_difference(other, result_name, sort) left = self.difference(other) right = other.difference(self) result", "non-empty index start = rng.start if step is None and", "str): no_steps = len(self) - 1 if no_steps == -1:", "result.astype(\"float64\") return result except (ValueError, TypeError, ZeroDivisionError): # Defer to", "f\"range, {repr(data)} was passed\" ) cls._validate_dtype(dtype) return cls._simple_new(data, name=name) @classmethod", "sys import getsizeof from typing import ( TYPE_CHECKING, Any, Callable,", "GH 25710 return self._range.step @property def _step(self) -> int: \"\"\"", "reverse.start, reverse.stop, reverse.step if not is_signed_integer_dtype(target): # checks/conversions/roundings are delegated", "instances improve computing speed. This is the default index type", "{repr(data)} was passed\" ) cls._validate_dtype(dtype) return cls._simple_new(data, name=name) @classmethod def", "all members of \"indexes\" are of type RangeIndex: result will", "-> np.dtype: return np.dtype(np.int64) @property def is_unique(self) -> bool: \"\"\"", "bounds for axis 0 with size {len(self)}\" ) from err", "1 t, old_t = 1, 0 r, old_r = b,", "\"indexes\" are of type RangeIndex: result will be RangeIndex if", "-> int: \"\"\" The value of the `start` parameter (``0``", "nv.validate_minmax_axis(axis) nv.validate_max(args, kwargs) return self._minmax(\"max\") def argsort(self, *args, **kwargs) ->", "RangeIndex instances. When all members of \"indexes\" are of type", "None, tolerance=None, ) -> np.ndarray: # -> np.ndarray[np.intp] if com.any_not_none(method,", "stop = ensure_python_int(stop) step = ensure_python_int(step) if step is not", "name = self._validate_names(name=name, names=names, deep=deep)[0] new_index = self._rename(name=name) if dtype:", "increasing ``RangeIndex`` if possible or a sorted ``Int64Index`` if not.", "1: step = rng.step elif step is None: # First", "The base pandas Index type. Int64Index : Index of int64", "None and rng.start != next_ ) if non_consecutive: result =", "stop = non_empty_indexes[-1].stop if next_ is None else next_ return", "= (upper_limit - self.start) // abs(self.step) return self.start + abs(self.step)", "= 0, 1 t, old_t = 1, 0 r, old_r", "len(self): return self[:0].rename(res_name) if not isinstance(overlap, RangeIndex): # We won't", "bool: return any(self._range) # -------------------------------------------------------------------- def _cmp_method(self, other, op): if", "-1, dtype=np.intp) if not ascending: result = result[::-1] return result", "returns a monotonically increasing ``RangeIndex`` if possible or a sorted", "rng = range(start, stop, step) return cls._simple_new(rng, name=name) @classmethod def", "r = r, old_r - quotient * r old_s, s", "int: \"\"\" The value of the `step` parameter (``1`` if", "(first.start - second.start) % gcd: return self._simple_new(_empty_range) # calculate parameters", "+ sum( getsizeof(getattr(rng, attr_name)) for attr_name in [\"start\", \"stop\", \"step\"]", "25710 return self._range.step @property def _step(self) -> int: \"\"\" The", "# solve intersection problem # performance hint: for identical step", "elif step_o % step_s == 0: if ( (start_o -", "is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype, ) from pandas.core.dtypes.generic import ABCTimedeltaIndex", "RangeIndex) -> RangeIndex: result = type(self)._simple_new(self._range, name=self._name) result._cache = self._cache", "**attrs) # for compat with numpy / Int64Index # even", "x, y: s, t Returns: gcd, s, t \"\"\" s,", "result = type(self)._simple_new(self._range, name=self._name) result._cache = self._cache return result @doc(Int64Index.copy)", "t Returns: gcd, s, t \"\"\" s, old_s = 0,", "a RangeIndex back, # but not worth the effort. return", "\"\"\" return a list of tuples of start, stop, step", "= self._validate_names(name=name, names=names, deep=deep)[0] new_index = self._rename(name=name) if dtype: warnings.warn(", "- 1, -1, -1, dtype=np.intp) if not ascending: result =", "other): if is_integer(other) and other != 0: if len(self) ==", "`object` dtypes for system-level memory consumption Returns ------- bytes used", "[ operator.pow, ops.rpow, operator.mod, ops.rmod, ops.rfloordiv, divmod, ops.rdivmod, ]: return", "= step = next_ = None # Filter the empty", "bool = False) -> int: \"\"\" Memory usage of my", "with size {len(self)}\" ) from err elif is_scalar(key): raise IndexError(", "= self._range[slobj] return type(self)._simple_new(res, name=self._name) @unpack_zerodim_and_defer(\"__floordiv__\") def __floordiv__(self, other): if", "else: rstep = left.step with np.errstate(all=\"ignore\"): rstart = op(left.start, right)", "have float-like descriptors if not all(is_integer(x) for x in [rstart,", "step_s <= end_s) ): return type(self)(start_r, end_r + step_s, step_s)", "end_s = self.start + self.step * (len(self) - 1) start_o,", "other, result_name: Hashable = None, sort=None): if not isinstance(other, RangeIndex)", "RangeIndex(0, 0).rename(name) def __len__(self) -> int: \"\"\" return the length", "is None: start_s, step_s = self.start, self.step end_s = self.start", "name=self._name) @unpack_zerodim_and_defer(\"__floordiv__\") def __floordiv__(self, other): if is_integer(other) and other !=", ") # fall back to Int64Index return super().__getitem__(key) def _getitem_slice(self:", "overlap = overlap[::-1] if len(overlap) == 0: return self.rename(name=res_name) if", "np.nan elif (meth == \"min\" and self.step > 0) or", "= start + len(self) * step new_range = range(start, stop,", "bool: return 0 not in self._range def any(self, *args, **kwargs)", "isinstance(values, range) result._range = values result._name = name result._cache =", "sort is None: new_index = new_index.sort_values() return new_index def _min_fitting_element(self,", "<= 1 def __contains__(self, key: Any) -> bool: hash(key) try:", "with object coercible to a \" f\"range, {repr(data)} was passed\"", "slice) -> RangeIndex: \"\"\" Fastpath for __getitem__ when we know", "1: step_o = step_s start_r = min(start_s, start_o) end_r =", "(len(other) - 1) if self.step < 0: start_s, step_s, end_s", "pandas import Index _empty_range = range(0) class RangeIndex(NumericIndex): \"\"\" Immutable", ">= start_s) and (end_o - step_s <= end_s) ): return", "| None = None, limit: int | None = None,", "difference is everything after the intersection new_rng = range(overlap[-1] +", "* step new_range = range(start, stop, step or 1) return", "is_scalar, is_signed_integer_dtype, is_timedelta64_dtype, ) from pandas.core.dtypes.generic import ABCTimedeltaIndex from pandas.core", "_cmp_method(self, other, op): if isinstance(other, RangeIndex) and self._range == other._range:", "if self.step > 0: start, stop, step = self.start, self.stop,", "in the underlying data. \"\"\" rng = self._range return getsizeof(rng)", "| None = -1 ) -> tuple[np.ndarray, RangeIndex]: codes =", "and (start_o + step_s >= start_s) and (end_o - step_s", "calculate parameters for the RangeIndex describing the # intersection disregarding", "EA compat nv.validate_argsort(args, kwargs) if self._range.step > 0: result =", "name: Hashable = None) -> RangeIndex: result = object.__new__(cls) assert", "dtype: Dtype | None = None, names=None, ): name =", "non_empty_indexes = [obj for obj in rng_indexes if len(obj)] for", "np.ndarray: \"\"\" Returns the indices that would sort the index", "if self._range.step > 0: result = np.arange(len(self), dtype=np.intp) else: result", "if overlap.step < 0: overlap = overlap[::-1] if len(overlap) ==", "the union of two Index objects and sorts if possible", "= ops.get_op_result_name(self, other) first = self._range[::-1] if self.step < 0", "was not supplied). \"\"\" # GH 25710 return self._range.start @property", "step or 1) return self._simple_new(new_range, name=self.name) if len(self) == 1:", "Any) -> bool: hash(key) try: key = ensure_python_int(key) except TypeError:", "from \"next\" or alternatively # from the last non-empty index", "``Int64Index`` if not. ``sort=False`` always returns an unsorted ``Int64Index`` ..", "other != 0: if len(self) == 0 or self.start %", "is possible return super()._cmp_method(self, op) return super()._cmp_method(other, op) def _arith_method(self,", "elif (meth == \"min\" and self.step > 0) or (meth", "= len(self) - 1 - locs[valid] return ensure_platform_int(locs) # --------------------------------------------------------------------", "self, indices, axis: int = 0, allow_fill: bool = True,", "abs(self.step) * no_steps def _max_fitting_element(self, upper_limit: int) -> int: \"\"\"Returns", "- step_o <= end_o) ): return type(self)(start_r, end_r + step_o,", "values = np.concatenate([x._values for x in rng_indexes]) result = Int64Index(values)", "return False return key in self._range @property def inferred_type(self) ->", "_ = self._extended_gcd(first.step, second.step) # check whether element sets intersect", "raise KeyError(key) return super().get_loc(key, method=method, tolerance=tolerance) def _get_indexer( self, target:", "@property def _stop(self) -> int: \"\"\" The value of the", "(locs >= 0) & (target_array < stop) locs[~valid] = -1", "\"\"\" Extended Euclidean algorithms to solve Bezout's identity: a*x +", "0 and (start_s - end_o) <= step_s and (start_o -", "Int64Index otherwise. E.g.: indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)", "\"\"\" attrs = self._get_data_as_items() if self.name is not None: attrs.append((\"name\",", "sort=None): # optimized set operation if we have another RangeIndex", "super().get_loc(key, method=method, tolerance=tolerance) def _get_indexer( self, target: Index, method: str", "implementing a monotonic integer range. RangeIndex is a memory-saving special", "deep : bool Introspect the data deeply, interrogate `object` dtypes", "self._range.step @property def _step(self) -> int: \"\"\" The value of", "not include memory consumed by elements that are not components", "range # -------------------------------------------------------------------- # Constructors def __new__( cls, start=None, stop=None,", "# -------------------------------------------------------------------- # Indexing Methods @doc(Int64Index.get_loc) def get_loc(self, key, method=None,", "# validate the arguments if com.all_none(start, stop, step): raise TypeError(\"RangeIndex(...)", "bool = True, *args, **kwargs) -> int: \"\"\"The maximum value", "return type(self)(start_r, end_r + step_s, step_s) elif step_s % step_o", "name=res_name) if first is not self._range: new_index = new_index[::-1] return", "RangeIndex: result will be RangeIndex if possible, Int64Index otherwise. E.g.:", "old_r, old_s, old_t def _union(self, other: Index, sort): \"\"\" Form", "step_o) return self._int64index._union(other, sort=sort) def _difference(self, other, sort=None): # optimized", "isinstance(start, RangeIndex): return start.copy(name=name) elif isinstance(start, range): return cls._simple_new(start, name=name)", "be an np.ndarray; GH#22390 return op(self._int64index, other) if op in", "end_o) <= step_s / 2) ): return type(self)(start_r, end_r +", "@property def step(self) -> int: \"\"\" The value of the", "so we need to catch these explicitly return op(self._int64index, other)", "from err raise KeyError(key) return super().get_loc(key, method=method, tolerance=tolerance) def _get_indexer(", "-------------------------------------------------------------------- def _cmp_method(self, other, op): if isinstance(other, RangeIndex) and self._range", "a future version. Use RangeIndex.{} \" \"instead\" ) @property def", "other.step < 0: start_o, step_o, end_o = end_o, -step_o, start_o", "and sorts if possible Parameters ---------- other : Index or", "* no_steps def _max_fitting_element(self, upper_limit: int) -> int: \"\"\"Returns the", "difference is not range-like return super()._difference(other, sort=sort) new_index = type(self)._simple_new(new_rng,", "= [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6) indexes = [RangeIndex(3), RangeIndex(4,", "if non_consecutive: result = Int64Index(np.concatenate([x._values for x in rng_indexes])) return", "to original locs locs[valid] = len(self) - 1 - locs[valid]", "sort) left = self.difference(other) right = other.difference(self) result = left.union(right)", "-1: return np.nan elif (meth == \"min\" and self.step >", "# Defer to Int64Index implementation return op(self._int64index, other) # TODO:", "= str(self._range[0]) last_val_str = str(self._range[-1]) max_length = max(len(first_val_str), len(last_val_str)) return", "we might be able to get a RangeIndex back, #", "\"\"\" return True @cache_readonly def is_monotonic_increasing(self) -> bool: return self._range.step", "( next_ is not None and rng.start != next_ )", "values Parameters ---------- deep : bool Introspect the data deeply,", "factorize( self, sort: bool = False, na_sentinel: int | None", "use # cheaper alternative gcd, s, _ = self._extended_gcd(first.step, second.step)", "binary op \"\"\" if isinstance(other, ABCTimedeltaIndex): # Defer to TimedeltaIndex", "descriptors if not all(is_integer(x) for x in [rstart, rstop, rstep]):", "result = object.__new__(cls) assert isinstance(values, range) result._range = values result._name", "the `start` parameter (``0`` if this was not supplied). ..", "rng = self._range return [(\"start\", rng.start), (\"stop\", rng.stop), (\"step\", rng.step)]", "value of the `stop` parameter. .. deprecated:: 0.25.0 Use ``stop``", "callable that accepts 2 params perform the binary op \"\"\"", "= False) -> int: \"\"\" Memory usage of my values", "/ 2) ): return type(self)(start_r, end_r + step_s / 2,", "return self._int64index.take( indices, axis=axis, allow_fill=allow_fill, fill_value=fill_value, **kwargs, ) def tolist(self)", "Parameters ---------- deep : bool Introspect the data deeply, interrogate", "second.start) int_high = min(first.stop, second.stop) if int_high <= int_low: return", "self._range second = other._range[::-1] if other.step < 0 else other._range", "new_index.step) new_index = self._simple_new(new_range) if (self.step < 0 and other.step", "ValueError as err: raise KeyError(key) from err raise KeyError(key) return", "= (is_signed_integer_dtype, \"signed integer\") _can_hold_na = False _range: range #", "Index \"\"\" if isinstance(other, RangeIndex) and sort is None: start_s,", "elif isinstance(other, (timedelta, np.timedelta64)): # GH#19333 is_integer evaluated True on", "other try: # apply if we have an override if", "nv.validate_min(args, kwargs) return self._minmax(\"min\") def max(self, axis=None, skipna: bool =", "\"\"\" return Int64Index @cache_readonly def _data(self) -> np.ndarray: \"\"\" An", "obj in rng_indexes if len(obj)] for obj in non_empty_indexes: rng", "new_index = new_index[::-1] if sort is None: new_index = new_index.sort_values()", "= self.start // other new_range = range(start, start + 1,", "== -1: return np.nan elif (meth == \"min\" and self.step", "pandas._libs.lib import no_default from pandas._typing import Dtype from pandas.compat.numpy import", "self.start + self.step * (len(self) - 1) start_o, step_o =", "- end_o) <= step_s and (start_o - end_s) <= step_s", "inferred_type(self) -> str: return \"integer\" # -------------------------------------------------------------------- # Indexing Methods", "return super()._difference(other, sort=sort) if overlap.step != first.step: # In some", "_format_data(self, name=None): # we are formatting thru the attributes return", "dtype: Dtype | None = None ) -> RangeIndex: \"\"\"", "GH 25710 warnings.warn( self._deprecation_message.format(\"_step\", \"step\"), FutureWarning, stacklevel=2, ) return self.step", "no explicit index is provided by the user. Parameters ----------", "Introspect the data deeply, interrogate `object` dtypes for system-level memory", "value of the RangeIndex\"\"\" nv.validate_minmax_axis(axis) nv.validate_max(args, kwargs) return self._minmax(\"max\") def", "its underlying data. Returns ------- np.ndarray[np.intp] See Also -------- numpy.ndarray.argsort", "the RangeIndex describing the # intersection disregarding the lower bounds", "= \"NaN\") -> list[str]: if not len(self._range): return header first_val_str", "(upper_limit - self.start) // abs(self.step) return self.start + abs(self.step) *", "*args, **kwargs) -> bool: return any(self._range) # -------------------------------------------------------------------- def _cmp_method(self,", "op \"\"\" if isinstance(other, ABCTimedeltaIndex): # Defer to TimedeltaIndex implementation", "== 1: step_s = step_o = abs(self.start - other.start) elif", "name is no_default else name if values.dtype.kind == \"f\": return", "rstep = left.step with np.errstate(all=\"ignore\"): rstart = op(left.start, right) rstop", "if no_steps == -1: return np.nan elif (meth == \"min\"", "_format_attrs(self): \"\"\" Return a list of tuples of the (attr,", "== 0 and self.step % other == 0: start =", "the index. Attributes ---------- start stop step Methods ------- from_range", "numpy.ndarray.nbytes \"\"\" return self.nbytes @property def dtype(self) -> np.dtype: return", "result = type(self)(rstart, rstop, rstep, **attrs) # for compat with", "x in self._range] # -------------------------------------------------------------------- _deprecation_message = ( \"RangeIndex.{} is", "step_o == step_s: if ( (start_s - start_o) % step_s", "-1 ) -> tuple[np.ndarray, RangeIndex]: codes = np.arange(len(self), dtype=np.intp) uniques", "\"\"\" if not all(isinstance(x, RangeIndex) for x in indexes): return", "= gcd(x, y) Finds one particular solution for x, y:", "same elements. \"\"\" if isinstance(other, RangeIndex): return self._range == other._range", "+ step_s / 2, step_s / 2) elif step_o %", "def get_loc(self, key, method=None, tolerance=None): if method is None and", "locs[valid] / step if step != self.step: # We reversed", "Fastpath for __getitem__ when we know we have a slice.", "- step_s <= end_s) ): return type(self)(start_r, end_r + step_s,", "first is not self._range: new_index = new_index[::-1] return new_index def", "first.step * second.step // gcd new_range = range(tmp_start, int_high, new_step)", "False, na_sentinel: int | None = -1 ) -> tuple[np.ndarray,", "index had only one element if rng.start == start: values", "max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:", "locs = target_array - start valid = (locs % step", "deprecated and will be removed in a future \" \"version.", "*args, **kwargs) -> np.ndarray: \"\"\" Returns the indices that would", "See Also -------- numpy.ndarray.nbytes \"\"\" return self.nbytes @property def dtype(self)", "result._reset_identity() return result # -------------------------------------------------------------------- @cache_readonly def _constructor(self) -> type[Int64Index]:", "the index and its underlying data. Returns ------- np.ndarray[np.intp] See", "Index _empty_range = range(0) class RangeIndex(NumericIndex): \"\"\" Immutable Index implementing", "is_integer(key) or (is_float(key) and key.is_integer()): new_key = int(key) try: return", "self if sort and self.step < 0: codes = codes[::-1]", "and other.step < 0) is not (new_index.step < 0): new_index", "if not is_integer(rstep) or not rstep: raise ValueError else: rstep", "== \"f\": return Float64Index(values, name=name) return Int64Index._simple_new(values, name=name) def _view(self:", "-step_s, start_s if other.step < 0: start_o, step_o, end_o =", "Returns: gcd, s, t \"\"\" s, old_s = 0, 1", "or (is_float(key) and key.is_integer()): new_key = int(key) try: return self._range.index(new_key)", "bool: return self._range.step > 0 or len(self) <= 1 @cache_readonly", "and self._range == other._range: # Both are immutable so if", "are immutable so if ._range attr. are equal, shortcut is", "have an override if step: with np.errstate(all=\"ignore\"): rstep = step(left.step,", "is_timedelta64_dtype, ) from pandas.core.dtypes.generic import ABCTimedeltaIndex from pandas.core import ops", "allow_fill=allow_fill, fill_value=fill_value, **kwargs, ) def tolist(self) -> list[int]: return list(self._range)", "no_steps = (upper_limit - self.start) // abs(self.step) return self.start +", "tolerance=tolerance) def _get_indexer( self, target: Index, method: str | None", "with other index types. copy : bool, default False Unused,", "name: Hashable = no_default): name = self.name if name is", "representing monotonic ranges. Using RangeIndex may in some instances improve", "def is_monotonic_decreasing(self) -> bool: return self._range.step < 0 or len(self)", "rng[-1] + step if non_empty_indexes: # Get the stop value", "def is_monotonic_increasing(self) -> bool: return self._range.step > 0 or len(self)", "False Unused, accepted for homogeneity with other index types. name", "isinstance(data, range): raise TypeError( f\"{cls.__name__}(...) must be called with object", "< 0 else self._range overlap = self.intersection(other) if overlap.step <", "2 params perform the binary op \"\"\" if isinstance(other, ABCTimedeltaIndex):", "equation # solve intersection problem # performance hint: for identical", "[RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5]) \"\"\" if not all(isinstance(x, RangeIndex)", "-------------------------------------------------------------------- # Reductions def all(self, *args, **kwargs) -> bool: return", "rng_indexes])) return result.rename(name) if step is not None: next_ =", "that accepts 2 params perform the binary op \"\"\" if", "= range(start, stop, step) return cls._simple_new(rng, name=name) @classmethod def from_range(", "*args, **kwargs) -> int: \"\"\"The minimum value of the RangeIndex\"\"\"", "and other != 0: if len(self) == 0 or self.start", "// other # -------------------------------------------------------------------- # Reductions def all(self, *args, **kwargs)", "range: transform to original locs locs[valid] = len(self) - 1", "= other.start + other.step * (len(other) - 1) if self.step", "and decreasing ranges int_low = max(first.start, second.start) int_high = min(first.stop,", "- self.start) // abs(self.step)) return self.start + abs(self.step) * no_steps", "not None else 0 if stop is None: start, stop", "\" \"instead\" ) @property def start(self) -> int: \"\"\" The", "Hashable = no_default): name = self.name if name is no_default", "if possible or a sorted ``Int64Index`` if not. ``sort=False`` always", "( (start_s - start_o) % step_s == 0 and (start_s", ") return self.start @property def stop(self) -> int: \"\"\" The", "Method hint: linear Diophantine equation # solve intersection problem #", "== 1: step_o = step_s start_r = min(start_s, start_o) end_r", "= self, other try: # apply if we have an", "result._name = name result._cache = {} result._reset_identity() return result #", "RangeIndex if isinstance(start, RangeIndex): return start.copy(name=name) elif isinstance(start, range): return", "The value of the `start` parameter (``0`` if this was", "are valid indices\" ) # fall back to Int64Index return", "_typ = \"rangeindex\" _engine_type = libindex.Int64Engine _dtype_validation_metadata = (is_signed_integer_dtype, \"signed", "if this was not supplied). .. deprecated:: 0.25.0 Use ``step``", "RangeIndex]: codes = np.arange(len(self), dtype=np.intp) uniques = self if sort", "TODO: if other is a RangeIndex we may have more", "deprecated:: 0.25.0 Use ``step`` instead. \"\"\" # GH 25710 warnings.warn(", "gcd: return self._simple_new(_empty_range) # calculate parameters for the RangeIndex describing", ": np.int64 Unused, accepted for homogeneity with other index types.", "(end_o - step_s <= end_s) ): return type(self)(start_r, end_r +", "stacklevel=2, ) return self.step @cache_readonly def nbytes(self) -> int: \"\"\"", "= self._rename(name=name) if dtype: warnings.warn( \"parameter dtype is deprecated and", "ops.rfloordiv, divmod, ops.rdivmod, ]: return op(self._int64index, other) step: Callable |", "False, dtype: Dtype | None = None, names=None, ): name", "an unsorted ``Int64Index`` .. versionadded:: 0.25.0 Returns ------- union :", "f\"index {key} is out of bounds for axis 0 with", "The difference is everything after the intersection new_rng = range(overlap[-1]", "other step = self.step // other stop = start +", "def equals(self, other: object) -> bool: \"\"\" Determines if two", "sort the index and its underlying data. Returns ------- np.ndarray[np.intp]", "RangeIndex instance If int and \"stop\" is not given, interpreted", "one particular solution for x, y: s, t Returns: gcd,", "6)] -> Int64Index([0,1,2,4,5]) \"\"\" if not all(isinstance(x, RangeIndex) for x", "return ibase._new_Index, (type(self), d), None # -------------------------------------------------------------------- # Rendering Methods", "\"integer\" # -------------------------------------------------------------------- # Indexing Methods @doc(Int64Index.get_loc) def get_loc(self, key,", "Float64Index(values, name=name) return Int64Index._simple_new(values, name=name) def _view(self: RangeIndex) -> RangeIndex:", "return result def factorize( self, sort: bool = False, na_sentinel:", "self.start % other == 0 and self.step % other ==", "step new_range = range(start, stop, step or 1) return self._simple_new(new_range,", "\"\"\"Returns the smallest element greater than or equal to the", "= ensure_python_int(start) if start is not None else 0 if", "def delete(self, loc) -> Int64Index: # type: ignore[override] return self._int64index.delete(loc)", "= False, na_sentinel: int | None = -1 ) ->", "from self._range @doc(Int64Index._shallow_copy) def _shallow_copy(self, values, name: Hashable = no_default):", "= range(first.start, overlap[0], first.step) else: # The difference is not", "self._range.stop @property def _stop(self) -> int: \"\"\" The value of", "\" \"arrays are valid indices\" ) # fall back to", "step_s >= start_s) and (end_o - step_s <= end_s) ):", "None: return super().symmetric_difference(other, result_name, sort) left = self.difference(other) right =", "not None: return super().symmetric_difference(other, result_name, sort) left = self.difference(other) right", "# Set Operations def _intersection(self, other: Index, sort=False): if not", "new_index = self._simple_new(new_range) if (self.step < 0 and other.step <", "_start(self) -> int: \"\"\" The value of the `start` parameter", "return self._minmax(\"max\") def argsort(self, *args, **kwargs) -> np.ndarray: \"\"\" Returns", "len(obj)] for obj in non_empty_indexes: rng = obj._range if start", "False or None, default None Whether to sort resulting index.", "DataFrame and Series when no explicit index is provided by", "len(self) == 0 or self.start % other == 0 and", "the largest element smaller than or equal to the limit\"\"\"", "+ other.step * (len(other) - 1) if self.step < 0:", "from the last non-empty index stop = non_empty_indexes[-1].stop if next_", "right) # we don't have a representable op # so", "cls, data: range, name=None, dtype: Dtype | None = None", "end_r + step_s, step_s) if ( (step_s % 2 ==", "isinstance(other, ABCTimedeltaIndex): # Defer to TimedeltaIndex implementation return NotImplemented elif", "compat nv.validate_argsort(args, kwargs) if self._range.step > 0: result = np.arange(len(self),", "explicitly return op(self._int64index, other) elif is_timedelta64_dtype(other): # Must be an", "step == 0: raise ValueError(\"Step must not be zero\") rng", "for performance reasons is created only when needed. The constructed", "future \" \"version. Use the astype method instead.\", FutureWarning, stacklevel=2,", "% step_s == 0 and (start_s - end_o) <= step_s", "to a \" f\"range, {repr(data)} was passed\" ) cls._validate_dtype(dtype) return", "valid = (locs % step == 0) & (locs >=", "# In some cases we might be able to get", "is not given, interpreted as \"stop\" instead. stop : int", "is out of bounds for axis 0 with size {len(self)}\"", "def _format_with_header(self, header: list[str], na_rep: str = \"NaN\") -> list[str]:", "name result._cache = {} result._reset_identity() return result # -------------------------------------------------------------------- @cache_readonly", "return self._range.step > 0 or len(self) <= 1 @cache_readonly def", "other: Index, sort=False): if not isinstance(other, RangeIndex): # Int64Index return", "no_steps = -(-(lower_limit - self.start) // abs(self.step)) return self.start +", "return the length of the RangeIndex \"\"\" return len(self._range) @property", "< 0: overlap = overlap[::-1] if len(overlap) == 0: return", "tolerance=tolerance, limit=limit ) if self.step > 0: start, stop, step", "# Reductions def all(self, *args, **kwargs) -> bool: return 0", "Int64Index limited to representing monotonic ranges. Using RangeIndex may in", "Float64Index, Int64Index, NumericIndex, ) from pandas.core.ops.common import unpack_zerodim_and_defer if TYPE_CHECKING:", "raise IndexError( \"only integers, slices (`:`), \" \"ellipsis (`...`), numpy.newaxis", "= (locs % step == 0) & (locs >= 0)", "= rng.step elif step is None: # First non-empty index", "values \"\"\" return True @cache_readonly def is_monotonic_increasing(self) -> bool: return", "return super()._get_indexer(target, method=method, tolerance=tolerance) target_array = np.asarray(target) locs = target_array", "# as a Float64Index if we have float-like descriptors if", "and self.step < 0): return self.start return self.start + self.step", "next_ = rng[-1] + step if non_empty_indexes: # Get the", "Rendering Methods def _format_attrs(self): \"\"\" Return a list of tuples", "[f\"{x:<{max_length}}\" for x in self._range] # -------------------------------------------------------------------- _deprecation_message = (", "self.start // other new_range = range(start, start + 1, 1)", "so return a base index if not is_integer(rstep) or not", "type(self)(rstart, rstop, rstep, **attrs) # for compat with numpy /", "if other.step < 0: start_o, step_o, end_o = end_o, -step_o,", "not isinstance(overlap, RangeIndex): # We won't end up with RangeIndex,", "provided by the user. Parameters ---------- start : int (default:", "of int64 data. \"\"\" _typ = \"rangeindex\" _engine_type = libindex.Int64Engine", "-> int: \"\"\"The minimum value of the RangeIndex\"\"\" nv.validate_minmax_axis(axis) nv.validate_min(args,", "if int_high <= int_low: return self._simple_new(_empty_range) # Method hint: linear", "rng.start == start: values = np.concatenate([x._values for x in rng_indexes])", "tolist(self) -> list[int]: return list(self._range) @doc(Int64Index.__iter__) def __iter__(self): yield from", "cls, start=None, stop=None, step=None, dtype: Dtype | None = None,", "not in self._range def any(self, *args, **kwargs) -> bool: return", "ABCTimedeltaIndex): # Defer to TimedeltaIndex implementation return NotImplemented elif isinstance(other,", "operator from sys import getsizeof from typing import ( TYPE_CHECKING,", "-> RangeIndex: \"\"\" Fastpath for __getitem__ when we know we", "step_o = step_s start_r = min(start_s, start_o) end_r = max(end_s,", "no_steps def _max_fitting_element(self, upper_limit: int) -> int: \"\"\"Returns the largest", "None: start_s, step_s = self.start, self.step end_s = self.start +", "object coercible to a \" f\"range, {repr(data)} was passed\" )", "codes, uniques def equals(self, other: object) -> bool: \"\"\" Determines", "2 == 0) and (abs(start_s - start_o) <= step_s /", "= name result._cache = {} result._reset_identity() return result # --------------------------------------------------------------------", "-> str: return \"integer\" # -------------------------------------------------------------------- # Indexing Methods @doc(Int64Index.get_loc)", "RangeIndex: \"\"\" Fastpath for __getitem__ when we know we have", "bool, default False Unused, accepted for homogeneity with other index", "index stop = non_empty_indexes[-1].stop if next_ is None else next_", "obj._range if start is None: # This is set by", "__getitem__ when we know we have a slice. \"\"\" res", "performance reasons is created only when needed. The constructed array", "def start(self) -> int: \"\"\" The value of the `start`", "is not None else 1 if step == 0: raise", "the `step` parameter (``1`` if this was not supplied). \"\"\"", "return super().get_loc(key, method=method, tolerance=tolerance) def _get_indexer( self, target: Index, method:", "Int64Index: return self._int64index.repeat(repeats, axis=axis) def delete(self, loc) -> Int64Index: #", "int (default: 1) dtype : np.int64 Unused, accepted for homogeneity", "step_s) if ( (step_s % 2 == 0) and (abs(start_s", "= self.step // other stop = start + len(self) *", "name=self.name) if len(self) == 1: start = self.start // other", "type(self).__name__): return self._int64index.take( indices, axis=axis, allow_fill=allow_fill, fill_value=fill_value, **kwargs, ) def", "self.start, self.step end_s = self.start + self.step * (len(self) -", "When all members of \"indexes\" are of type RangeIndex: result", "\"\"\" warnings.warn( self._deprecation_message.format(\"_start\", \"start\"), FutureWarning, stacklevel=2, ) return self.start @property", "result.rename(name) if step is not None: next_ = rng[-1] +", "def copy( self, name: Hashable = None, deep: bool =", "Index or array-like sort : False or None, default None", "\"\"\" return np.arange(self.start, self.stop, self.step, dtype=np.int64) @cache_readonly def _cached_int64index(self) ->", "start.copy(name=name) elif isinstance(start, range): return cls._simple_new(start, name=name) # validate the", "step : int (default: 1) dtype : np.int64 Unused, accepted", "method=method, tolerance=tolerance, limit=limit ) if self.step > 0: start, stop,", "name) elif len(indexes) == 1: return indexes[0] rng_indexes = cast(List[RangeIndex],", "None = None if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]:", "# RangeIndex if isinstance(start, RangeIndex): return start.copy(name=name) elif isinstance(start, range):", "len(self) def __getitem__(self, key): \"\"\" Conserve RangeIndex type for scalar", "self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not isinstance(other, RangeIndex):", "if sort and self.step < 0: codes = codes[::-1] uniques", ") from pandas.core.dtypes.generic import ABCTimedeltaIndex from pandas.core import ops import", "indices, axis: int = 0, allow_fill: bool = True, fill_value=None,", "0 length, i.e. were empty. # In this case return", "deprecated:: 0.25.0 Use ``stop`` instead. \"\"\" # GH 25710 warnings.warn(", "end_o = other.start + other.step * (len(other) - 1) if", "result = result.astype(\"float64\") return result except (ValueError, TypeError, ZeroDivisionError): #", "first_val_str = str(self._range[0]) last_val_str = str(self._range[-1]) max_length = max(len(first_val_str), len(last_val_str))", "other.start) elif len(self) == 1: step_s = step_o elif len(other)", "_get_data_as_items(self): \"\"\" return a list of tuples of start, stop,", "RangeIndex: \"\"\" Create RangeIndex from a range object. Returns -------", "= new_index[::-1] if sort is None: new_index = new_index.sort_values() return", "rstart = op(left.start, right) rstop = op(left.stop, right) result =", "Hashable = None, sort=None): if not isinstance(other, RangeIndex) or sort", "step_s == 0 and (start_o + step_s >= start_s) and", "return Float64Index(values, name=name) return Int64Index._simple_new(values, name=name) def _view(self: RangeIndex) ->", ": The base pandas Index type. Int64Index : Index of", "result = np.arange(len(self), dtype=np.intp) else: result = np.arange(len(self) - 1,", "assert isinstance(values, range) result._range = values result._name = name result._cache", "TypeError(\"RangeIndex(...) must be called with integers\") start = ensure_python_int(start) if", "of start, stop, step \"\"\" rng = self._range return [(\"start\",", "extract_array(other, extract_numpy=True, extract_range=True) attrs = self._get_attributes_dict() left, right = self,", "names=None, ): name = self._validate_names(name=name, names=names, deep=deep)[0] new_index = self._rename(name=name)", "sort and self.step < 0: codes = codes[::-1] uniques =", "abs(self.step) return self.start + abs(self.step) * no_steps def _extended_gcd(self, a:", "we need to catch these explicitly return op(self._int64index, other) elif", "start_s, step_s, end_s = end_s, -step_s, start_s if other.step <", "# Method hint: linear Diophantine equation # solve intersection problem", "int(key) try: return self._range[new_key] except IndexError as err: raise IndexError(", "# from the last non-empty index stop = non_empty_indexes[-1].stop if", "a: int, b: int) -> tuple[int, int, int]: \"\"\" Extended", "locs[valid] = locs[valid] / step if step != self.step: #", "`stop` parameter. \"\"\" return self._range.stop @property def _stop(self) -> int:", "\"\"\" if isinstance(other, RangeIndex) and sort is None: start_s, step_s", "cls._simple_new(start, name=name) # validate the arguments if com.all_none(start, stop, step):", "= new_index._min_fitting_element(int_low) new_range = range(new_start, new_index.stop, new_index.step) new_index = self._simple_new(new_range)", "**kwargs) -> np.ndarray: \"\"\" Returns the indices that would sort", "-step_o, start_o if len(self) == 1 and len(other) == 1:", "supplied). \"\"\" # GH 25710 return self._range.start @property def _start(self)", "or (meth == \"max\" and self.step < 0): return self.start", "% step == 0) & (locs >= 0) & (target_array", "(abs(end_s - end_o) <= step_s / 2) ): return type(self)(start_r,", "the `stop` parameter. .. deprecated:: 0.25.0 Use ``stop`` instead. \"\"\"", "int: \"\"\"Returns the smallest element greater than or equal to", "self._name return res def _get_data_as_items(self): \"\"\" return a list of", ".. deprecated:: 0.25.0 Use ``stop`` instead. \"\"\" # GH 25710", "to sort resulting index. ``sort=None`` returns a monotonically increasing ``RangeIndex``", "import annotations from datetime import timedelta import operator from sys", "min(start_s, start_o) end_r = max(end_s, end_o) if step_o == step_s:", "@classmethod def _simple_new(cls, values: range, name: Hashable = None) ->", "numpy / Int64Index # even if we can represent as", "old_t, t = t, old_t - quotient * t return", "if non_empty_indexes: # Get the stop value from \"next\" or", "the `step` parameter (``1`` if this was not supplied). ..", "/ 2) elif step_o % step_s == 0: if (", "raise IndexError( f\"index {key} is out of bounds for axis", "numpy.ndarray.argsort \"\"\" ascending = kwargs.pop(\"ascending\", True) # EA compat nv.validate_argsort(args,", "parameter. .. deprecated:: 0.25.0 Use ``stop`` instead. \"\"\" # GH", "An int array that for performance reasons is created only", "start, stop = 0, start else: stop = ensure_python_int(stop) step", "TimedeltaIndex implementation return NotImplemented elif isinstance(other, (timedelta, np.timedelta64)): # GH#19333", "`stop` parameter. .. deprecated:: 0.25.0 Use ``stop`` instead. \"\"\" #", "return op(self._int64index, other) # TODO: Do attrs get handled reliably?", "< 0 and other.step < 0) is not (new_index.step <", "rng.start != next_ ) if non_consecutive: result = Int64Index(np.concatenate([x._values for", "== 0) and (abs(start_s - start_o) <= step_s / 2)", "for obj in rng_indexes if len(obj)] for obj in non_empty_indexes:", "the data deeply, interrogate `object` dtypes for system-level memory consumption", "r: quotient = old_r // r old_r, r = r,", "RangeIndex \"\"\" if not isinstance(data, range): raise TypeError( f\"{cls.__name__}(...) must", "s, old_s - quotient * s old_t, t = t,", "indices that would sort the index and its underlying data.", "if stop is None: start, stop = 0, start else:", "the astype method instead.\", FutureWarning, stacklevel=2, ) new_index = new_index.astype(dtype)", "the intersection new_rng = range(first.start, overlap[0], first.step) else: # The", "\"\"\" The value of the `stop` parameter. \"\"\" return self._range.stop", "_minmax(self, meth: str): no_steps = len(self) - 1 if no_steps", "other new_range = range(start, start + 1, 1) return self._simple_new(new_range,", "None, sort=None): if not isinstance(other, RangeIndex) or sort is not", "result_name = self._convert_can_do_setop(other) if not isinstance(other, RangeIndex): return super()._difference(other, sort=sort)", "data deeply, interrogate `object` dtypes for system-level memory consumption Returns", "step = self.start, self.stop, self.step else: # GH 28678: work", "Int64Index(np.concatenate([x._values for x in rng_indexes])) return result.rename(name) if step is", ">= 0) & (target_array < stop) locs[~valid] = -1 locs[valid]", "op): \"\"\" Parameters ---------- other : Any op : callable", "try: return self._range[new_key] except IndexError as err: raise IndexError( f\"index", "return super().symmetric_difference(other, result_name, sort) left = self.difference(other) right = other.difference(self)", "tolerance is None: if is_integer(key) or (is_float(key) and key.is_integer()): new_key", "the last non-empty index stop = non_empty_indexes[-1].stop if next_ is", "Both are immutable so if ._range attr. are equal, shortcut", "step_s, step_s) elif step_s % step_o == 0: if (", "are not components of the array if deep=False See Also", "new_key = int(key) try: return self._range.index(new_key) except ValueError as err:", "overlap[0] == first.start: # The difference is everything after the", "thru the attributes return None def _format_with_header(self, header: list[str], na_rep:", "err raise KeyError(key) return super().get_loc(key, method=method, tolerance=tolerance) def _get_indexer( self,", "boolean \" \"arrays are valid indices\" ) # fall back", "could use # cheaper alternative gcd, s, _ = self._extended_gcd(first.step,", "import pandas.core.common as com from pandas.core.construction import extract_array import pandas.core.indexes.base", "int: \"\"\" The value of the `start` parameter (``0`` if", "return RangeIndex(0, 0).rename(name) def __len__(self) -> int: \"\"\" return the", "elif isinstance(start, range): return cls._simple_new(start, name=name) # validate the arguments", "= first.step * second.step // gcd new_range = range(tmp_start, int_high,", "not be zero\") rng = range(start, stop, step) return cls._simple_new(rng,", "a*x + b*y = gcd(x, y) Finds one particular solution", "op(left.stop, right) result = type(self)(rstart, rstop, rstep, **attrs) # for", "[operator.mul, ops.rmul, operator.truediv, ops.rtruediv]: step = op # TODO: if", "other : Any op : callable that accepts 2 params", "of the RangeIndex\"\"\" nv.validate_minmax_axis(axis) nv.validate_min(args, kwargs) return self._minmax(\"min\") def max(self,", "the user. Parameters ---------- start : int (default: 0), range,", "if not isinstance(data, range): raise TypeError( f\"{cls.__name__}(...) must be called", "\" \"ellipsis (`...`), numpy.newaxis (`None`) \" \"and integer or boolean", "_shallow_copy(self, values, name: Hashable = no_default): name = self.name if", "was not supplied). .. deprecated:: 0.25.0 Use ``step`` instead. \"\"\"", "return self.stop @property def step(self) -> int: \"\"\" The value", "\"\"\" return the length of the RangeIndex \"\"\" return len(self._range)", "Int64Index: return Int64Index._simple_new(self._data, name=self.name) @property def _int64index(self) -> Int64Index: #", "Hashable = None, ) -> RangeIndex: cls._validate_dtype(dtype) name = maybe_extract_name(name,", "0 and (start_o + step_s >= start_s) and (end_o -", "self.step < 0 else self._range overlap = self.intersection(other) if overlap.step", "GH 28678: work on reversed range for simplicity reverse =", "res = self._cached_int64index res._name = self._name return res def _get_data_as_items(self):", "step(left.step, right) # we don't have a representable op #", "self[:0].rename(res_name) if not isinstance(overlap, RangeIndex): # We won't end up", "not None and rng.start != next_ ) if non_consecutive: result", "can represent as a RangeIndex, return # as a Float64Index", "(``0`` if this was not supplied). .. deprecated:: 0.25.0 Use", "Indexing Methods @doc(Int64Index.get_loc) def get_loc(self, key, method=None, tolerance=None): if method", "copy : bool, default False Unused, accepted for homogeneity with", "\"\"\" return len(self._range) @property def size(self) -> int: return len(self)", "def _stop(self) -> int: \"\"\" The value of the `stop`", "name=None, dtype: Dtype | None = None ) -> RangeIndex:", "return self._range.index(new_key) except ValueError as err: raise KeyError(key) from err", "= uniques[::-1] return codes, uniques def equals(self, other: object) ->", "return super()._concat(indexes, name) elif len(indexes) == 1: return indexes[0] rng_indexes", "self.stop, self.step, dtype=np.int64) @cache_readonly def _cached_int64index(self) -> Int64Index: return Int64Index._simple_new(self._data,", "cls) # RangeIndex if isinstance(start, RangeIndex): return start.copy(name=name) elif isinstance(start,", "return a base index if not is_integer(rstep) or not rstep:", "or equal to the limit\"\"\" no_steps = (upper_limit - self.start)", "parameter (``0`` if this was not supplied). \"\"\" # GH", "not (new_index.step < 0): new_index = new_index[::-1] if sort is", "was not supplied). \"\"\" # GH 25710 return self._range.step @property", "to get a RangeIndex back, # but not worth the", "be called with integers\") start = ensure_python_int(start) if start is", "_getitem_slice(self: RangeIndex, slobj: slice) -> RangeIndex: \"\"\" Fastpath for __getitem__", "== 0 and (start_s + step_o >= start_o) and (end_s", "we know we have a slice. \"\"\" res = self._range[slobj]", "start = self.start // other new_range = range(start, start +", "None: start, stop = 0, start else: stop = ensure_python_int(stop)", "if is_integer(other) and other != 0: if len(self) == 0", "self._simple_new(new_range) if (self.step < 0 and other.step < 0) is", "+ len(self) * step new_range = range(start, stop, step or", "left.step with np.errstate(all=\"ignore\"): rstart = op(left.start, right) rstop = op(left.stop,", "d = self._get_attributes_dict() d.update(dict(self._get_data_as_items())) return ibase._new_Index, (type(self), d), None #", "isinstance(start, range): return cls._simple_new(start, name=name) # validate the arguments if", "step_s = step_o elif len(other) == 1: step_o = step_s", "<= int_low: return self._simple_new(_empty_range) # Method hint: linear Diophantine equation", "GH#19333 is_integer evaluated True on timedelta64, # so we need", "step Methods ------- from_range See Also -------- Index : The", "> 0 or len(self) <= 1 @cache_readonly def is_monotonic_decreasing(self) ->", "def symmetric_difference(self, other, result_name: Hashable = None, sort=None): if not", "type[Int64Index]: \"\"\" return the class to use for construction \"\"\"", "new_index = new_index[::-1] return new_index def symmetric_difference(self, other, result_name: Hashable", "return any(self._range) # -------------------------------------------------------------------- def _cmp_method(self, other, op): if isinstance(other,", "pandas Index type. Int64Index : Index of int64 data. \"\"\"", "RangeIndex, slobj: slice) -> RangeIndex: \"\"\" Fastpath for __getitem__ when", "ensure_python_int(stop) step = ensure_python_int(step) if step is not None else", "parameters for the RangeIndex describing the # intersection disregarding the", "kwargs) return self._minmax(\"max\") def argsort(self, *args, **kwargs) -> np.ndarray: \"\"\"", "(start_s - start_o) % step_s == 0 and (start_s -", "are delegated to general method return super()._get_indexer(target, method=method, tolerance=tolerance) target_array", "| None = None, copy: bool = False, name: Hashable", "start = rng.start if step is None and len(rng) >", "\"\"\" Determines if two Index objects contain the same elements.", "Constructors def __new__( cls, start=None, stop=None, step=None, dtype: Dtype |", "self._simple_new(_empty_range) # calculate parameters for the RangeIndex describing the #", "# GH 25710 return self._range.step @property def _step(self) -> int:", "def __iter__(self): yield from self._range @doc(Int64Index._shallow_copy) def _shallow_copy(self, values, name:", "step_o >= start_o) and (end_s - step_o <= end_o) ):", "second.start) % gcd: return self._simple_new(_empty_range) # calculate parameters for the", "\"next\" or alternatively # from the last non-empty index stop", "sort=sort) res_name = ops.get_op_result_name(self, other) first = self._range[::-1] if self.step", "result = result[::-1] return result def factorize( self, sort: bool", "other.start, other.step end_o = other.start + other.step * (len(other) -", "codes = codes[::-1] uniques = uniques[::-1] return codes, uniques def", "None, names=None, ): name = self._validate_names(name=name, names=names, deep=deep)[0] new_index =", "else other._range # check whether intervals intersect # deals with", "the intersection new_rng = range(overlap[-1] + first.step, first.stop, first.step) elif", "skipna: bool = True, *args, **kwargs) -> int: \"\"\"The maximum", "(start_o + step_s >= start_s) and (end_o - step_s <=", "will be RangeIndex if possible, Int64Index otherwise. E.g.: indexes =", "else: # GH 28678: work on reversed range for simplicity", "length, i.e. were empty. # In this case return an", "header first_val_str = str(self._range[0]) last_val_str = str(self._range[-1]) max_length = max(len(first_val_str),", "try: return self._range.index(new_key) except ValueError as err: raise KeyError(key) from", "* s new_step = first.step * second.step // gcd new_range", "% step_s == 0: if ( (start_o - start_s) %", "doc, ) from pandas.util._exceptions import rewrite_exception from pandas.core.dtypes.common import (", "extract_array import pandas.core.indexes.base as ibase from pandas.core.indexes.base import maybe_extract_name from", "self._simple_new(new_range) # adjust index to limiting interval new_start = new_index._min_fitting_element(int_low)", "this was not supplied). \"\"\" # GH 25710 return self._range.step", "rng.step)] def __reduce__(self): d = self._get_attributes_dict() d.update(dict(self._get_data_as_items())) return ibase._new_Index, (type(self),", "% other == 0 and self.step % other == 0:", "while r: quotient = old_r // r old_r, r =", "method: str | None = None, limit: int | None", "(type(self), d), None # -------------------------------------------------------------------- # Rendering Methods def _format_attrs(self):", "= False, name: Hashable = None, ) -> RangeIndex: cls._validate_dtype(dtype)", "step_s / 2) elif step_o % step_s == 0: if", "whether element sets intersect if (first.start - second.start) % gcd:", "self.difference(other) right = other.difference(self) result = left.union(right) if result_name is", "= result.rename(result_name) return result # -------------------------------------------------------------------- def _concat(self, indexes: list[Index],", "step_s % step_o == 0: if ( (start_s - start_o)", "= self._range[::-1] start, stop, step = reverse.start, reverse.stop, reverse.step if", "case return an empty range index. return RangeIndex(0, 0).rename(name) def", "Defer to TimedeltaIndex implementation return NotImplemented elif isinstance(other, (timedelta, np.timedelta64)):", "KeyError(key) from err raise KeyError(key) return super().get_loc(key, method=method, tolerance=tolerance) def", "0 else other._range # check whether intervals intersect # deals", "attrs = self._get_data_as_items() if self.name is not None: attrs.append((\"name\", ibase.default_pprint(self.name)))", "self._cache return result @doc(Int64Index.copy) def copy( self, name: Hashable =", "0) and (abs(start_s - start_o) <= step_s / 2) and", "is_signed_integer_dtype, is_timedelta64_dtype, ) from pandas.core.dtypes.generic import ABCTimedeltaIndex from pandas.core import", "(\"stop\", rng.stop), (\"step\", rng.step)] def __reduce__(self): d = self._get_attributes_dict() d.update(dict(self._get_data_as_items()))", "objects and sorts if possible Parameters ---------- other : Index", "end_o = end_o, -step_o, start_o if len(self) == 1 and", "if self.name is not None: attrs.append((\"name\", ibase.default_pprint(self.name))) return attrs def", "super()._difference(other, sort=sort) if overlap.step != first.step: # In some cases", "\"arrays are valid indices\" ) # fall back to Int64Index", "warnings.warn( self._deprecation_message.format(\"_step\", \"step\"), FutureWarning, stacklevel=2, ) return self.step @cache_readonly def", "0) step : int (default: 1) dtype : np.int64 Unused,", "index. return RangeIndex(0, 0).rename(name) def __len__(self) -> int: \"\"\" return", "tolerance, limit): return super()._get_indexer( target, method=method, tolerance=tolerance, limit=limit ) if", "deep=deep)[0] new_index = self._rename(name=name) if dtype: warnings.warn( \"parameter dtype is", "(len(self) - 1) start_o, step_o = other.start, other.step end_o =", "yield from self._range @doc(Int64Index._shallow_copy) def _shallow_copy(self, values, name: Hashable =", "self._int64index._union(other, sort=sort) def _difference(self, other, sort=None): # optimized set operation", "result = Int64Index(values) return result.rename(name) step = rng.start - start", "-1 locs[valid] = locs[valid] / step if step != self.step:", "return type(self)(start_r, end_r + step_s, step_s) if ( (step_s %", "from sys import getsizeof from typing import ( TYPE_CHECKING, Any,", "-------------------------------------------------------------------- def _concat(self, indexes: list[Index], name: Hashable) -> Index: \"\"\"", "consumption Returns ------- bytes used Notes ----- Memory usage does", "= overlap[::-1] if len(overlap) == 0: return self.rename(name=res_name) if len(overlap)", "parent method for the case of all RangeIndex instances. When", "0 not in self._range def any(self, *args, **kwargs) -> bool:", "FutureWarning, stacklevel=2, ) return self.start @property def stop(self) -> int:", "int: \"\"\"The minimum value of the RangeIndex\"\"\" nv.validate_minmax_axis(axis) nv.validate_min(args, kwargs)", "@property def _int64index(self) -> Int64Index: # wrap _cached_int64index so we", "Euclidean algorithms to solve Bezout's identity: a*x + b*y =", "step = rng.step elif step is None: # First non-empty", "self._range] # -------------------------------------------------------------------- _deprecation_message = ( \"RangeIndex.{} is deprecated and", "= self._simple_new(new_range) if (self.step < 0 and other.step < 0)", "-> Int64Index: with rewrite_exception(\"Int64Index\", type(self).__name__): return self._int64index.take( indices, axis=axis, allow_fill=allow_fill,", "2) ): return type(self)(start_r, end_r + step_s / 2, step_s", "attr. are equal, shortcut is possible return super()._cmp_method(self, op) return", "nv.validate_minmax_axis(axis) nv.validate_min(args, kwargs) return self._minmax(\"min\") def max(self, axis=None, skipna: bool", "get_loc(self, key, method=None, tolerance=None): if method is None and tolerance", "self._deprecation_message.format(\"_step\", \"step\"), FutureWarning, stacklevel=2, ) return self.step @cache_readonly def nbytes(self)", "if other.step < 0 else other._range # check whether intervals", "in a future version. Use RangeIndex.{} \" \"instead\" ) @property", "type for scalar and slice keys. \"\"\" if isinstance(key, slice):", "0, 1 t, old_t = 1, 0 r, old_r =", "_empty_range = range(0) class RangeIndex(NumericIndex): \"\"\" Immutable Index implementing a", "range index. return RangeIndex(0, 0).rename(name) def __len__(self) -> int: \"\"\"", "new_rng = range(overlap[-1] + first.step, first.stop, first.step) elif overlap[-1] ==", "import timedelta import operator from sys import getsizeof from typing", "explicit index is provided by the user. Parameters ---------- start", "is_signed_integer_dtype(target): # checks/conversions/roundings are delegated to general method return super()._get_indexer(target,", "len(rng) > 1) or ( next_ is not None and", "= self._get_attributes_dict() d.update(dict(self._get_data_as_items())) return ibase._new_Index, (type(self), d), None # --------------------------------------------------------------------", "not supplied). \"\"\" # GH 25710 return self._range.step @property def", "RangeIndex: cls._validate_dtype(dtype) name = maybe_extract_name(name, start, cls) # RangeIndex if", "1) dtype : np.int64 Unused, accepted for homogeneity with other", "= other.start, other.step end_o = other.start + other.step * (len(other)", "if not isinstance(other, RangeIndex): return super()._difference(other, sort=sort) res_name = ops.get_op_result_name(self,", "ignore[override] return self._int64index.delete(loc) def take( self, indices, axis: int =", "np.arange(len(self), dtype=np.intp) uniques = self if sort and self.step <", "all(self, *args, **kwargs) -> bool: return 0 not in self._range", "locs[~valid] = -1 locs[valid] = locs[valid] / step if step", "this was not supplied). .. deprecated:: 0.25.0 Use ``step`` instead.", "# EA compat nv.validate_argsort(args, kwargs) if self._range.step > 0: result", "the index has unique values \"\"\" return True @cache_readonly def", "result_name, sort) left = self.difference(other) right = other.difference(self) result =", "end_s) ): return type(self)(start_r, end_r + step_s, step_s) elif step_s", "\"\"\" Memory usage of my values Parameters ---------- deep :", "before the intersection new_rng = range(first.start, overlap[0], first.step) else: #", "+ first.step, first.stop, first.step) elif overlap[-1] == first[-1]: # The", "if isinstance(other, RangeIndex) and sort is None: start_s, step_s =", "len(rng) > 1: step = rng.step elif step is None:", "kwargs.pop(\"ascending\", True) # EA compat nv.validate_argsort(args, kwargs) if self._range.step >", "first non-empty index start = rng.start if step is None", "self.name res = self._cached_int64index res._name = self._name return res def", "locs locs[valid] = len(self) - 1 - locs[valid] return ensure_platform_int(locs)", "index to limiting interval new_start = new_index._min_fitting_element(int_low) new_range = range(new_start,", "stop=None, step=None, dtype: Dtype | None = None, copy: bool", "= self.start, self.step end_s = self.start + self.step * (len(self)", "performance hint: for identical step sizes, could use # cheaper", "object.__new__(cls) assert isinstance(values, range) result._range = values result._name = name", "_difference(self, other, sort=None): # optimized set operation if we have", "t = t, old_t - quotient * t return old_r,", "index has unique values \"\"\" return True @cache_readonly def is_monotonic_increasing(self)", "be zero\") rng = range(start, stop, step) return cls._simple_new(rng, name=name)", "rng.start - start non_consecutive = (step != rng.step and len(rng)", "bool = False, na_sentinel: int | None = -1 )", "_concat(self, indexes: list[Index], name: Hashable) -> Index: \"\"\" Overriding parent", "data. \"\"\" rng = self._range return getsizeof(rng) + sum( getsizeof(getattr(rng,", "linear Diophantine equation # solve intersection problem # performance hint:", "str: return \"integer\" # -------------------------------------------------------------------- # Indexing Methods @doc(Int64Index.get_loc) def", "self._int64index.take( indices, axis=axis, allow_fill=allow_fill, fill_value=fill_value, **kwargs, ) def tolist(self) ->", "overlap[::-1] if len(overlap) == 0: return self.rename(name=res_name) if len(overlap) ==", "the empty indexes non_empty_indexes = [obj for obj in rng_indexes", "locs[valid] return ensure_platform_int(locs) # -------------------------------------------------------------------- def repeat(self, repeats, axis=None) ->", "when no explicit index is provided by the user. Parameters", "by the user. Parameters ---------- start : int (default: 0),", "/ 2, step_s / 2) elif step_o % step_s ==", "header: list[str], na_rep: str = \"NaN\") -> list[str]: if not", "indexes[0] rng_indexes = cast(List[RangeIndex], indexes) start = step = next_", "step = self.step // other stop = start + len(self)", "step_s = step_o = abs(self.start - other.start) elif len(self) ==", "-> int: \"\"\" Return the number of bytes in the", "were empty. # In this case return an empty range", "getsizeof from typing import ( TYPE_CHECKING, Any, Callable, Hashable, List,", "= non_empty_indexes[-1].stop if next_ is None else next_ return RangeIndex(start,", "index is provided by the user. Parameters ---------- start :", "_simple_new(cls, values: range, name: Hashable = None) -> RangeIndex: result", "if com.all_none(start, stop, step): raise TypeError(\"RangeIndex(...) must be called with", "if com.any_not_none(method, tolerance, limit): return super()._get_indexer( target, method=method, tolerance=tolerance, limit=limit", "monotonically increasing ``RangeIndex`` if possible or a sorted ``Int64Index`` if", "int) -> int: \"\"\"Returns the largest element smaller than or", "( Float64Index, Int64Index, NumericIndex, ) from pandas.core.ops.common import unpack_zerodim_and_defer if", "The difference is not range-like return super()._difference(other, sort=sort) new_index =", "if not. ``sort=False`` always returns an unsorted ``Int64Index`` .. versionadded::", "for compat with numpy / Int64Index # even if we", "if not isinstance(other, RangeIndex) or sort is not None: return", "stop, step = self.start, self.stop, self.step else: # GH 28678:", "= op # TODO: if other is a RangeIndex we", "ibase from pandas.core.indexes.base import maybe_extract_name from pandas.core.indexes.numeric import ( Float64Index,", "reverse.stop, reverse.step if not is_signed_integer_dtype(target): # checks/conversions/roundings are delegated to", "of the `start` parameter (``0`` if this was not supplied).", "-> bool: return any(self._range) # -------------------------------------------------------------------- def _cmp_method(self, other, op):", ".. versionadded:: 0.25.0 Returns ------- union : Index \"\"\" if", "in self._range @property def inferred_type(self) -> str: return \"integer\" #", "str = \"NaN\") -> list[str]: if not len(self._range): return header", "not isinstance(other, RangeIndex): # Int64Index return super()._intersection(other, sort=sort) if not", "\"stop\"), FutureWarning, stacklevel=2, ) return self.stop @property def step(self) ->", "two Index objects and sorts if possible Parameters ---------- other", "- start_o) % step_s == 0 and (start_s - end_o)", "pandas.core.dtypes.generic import ABCTimedeltaIndex from pandas.core import ops import pandas.core.common as", "= maybe_extract_name(name, start, cls) # RangeIndex if isinstance(start, RangeIndex): return", "sort=sort) def _difference(self, other, sort=None): # optimized set operation if", "nv.validate_max(args, kwargs) return self._minmax(\"max\") def argsort(self, *args, **kwargs) -> np.ndarray:", "0: start = self.start // other step = self.step //", "int_low = max(first.start, second.start) int_high = min(first.stop, second.stop) if int_high", "0, allow_fill: bool = True, fill_value=None, **kwargs ) -> Int64Index:", "the RangeIndex\"\"\" nv.validate_minmax_axis(axis) nv.validate_min(args, kwargs) return self._minmax(\"min\") def max(self, axis=None,", "solution for x, y: s, t Returns: gcd, s, t", "np.errstate(all=\"ignore\"): rstart = op(left.start, right) rstop = op(left.stop, right) result", "if isinstance(key, slice): new_range = self._range[key] return self._simple_new(new_range, name=self._name) elif", "pandas._typing import Dtype from pandas.compat.numpy import function as nv from", "(new_index.step < 0): new_index = new_index[::-1] if sort is None:", "= \"rangeindex\" _engine_type = libindex.Int64Engine _dtype_validation_metadata = (is_signed_integer_dtype, \"signed integer\")", "rng_indexes = cast(List[RangeIndex], indexes) start = step = next_ =", "attr_name in [\"start\", \"stop\", \"step\"] ) def memory_usage(self, deep: bool", "ensure_python_int(step) if step is not None else 1 if step", "r old_s, s = s, old_s - quotient * s", "in- and decreasing ranges int_low = max(first.start, second.start) int_high =", "if step is not None: next_ = rng[-1] + step", "with np.errstate(all=\"ignore\"): rstep = step(left.step, right) # we don't have", "-> np.ndarray: \"\"\" Returns the indices that would sort the", "not None: result = result.rename(result_name) return result # -------------------------------------------------------------------- def", "TypeError( f\"{cls.__name__}(...) must be called with object coercible to a", "nbytes(self) -> int: \"\"\" Return the number of bytes in", "a RangeIndex we may have more efficient options other =", "Int64Index: with rewrite_exception(\"Int64Index\", type(self).__name__): return self._int64index.take( indices, axis=axis, allow_fill=allow_fill, fill_value=fill_value,", "if overlap[0] == first.start: # The difference is everything after", "# optimized set operation if we have another RangeIndex self._validate_sort_keyword(sort)", "# -------------------------------------------------------------------- _deprecation_message = ( \"RangeIndex.{} is deprecated and will", "or len(self) <= 1 def __contains__(self, key: Any) -> bool:", "Dtype from pandas.compat.numpy import function as nv from pandas.util._decorators import", "and rng.start != next_ ) if non_consecutive: result = Int64Index(np.concatenate([x._values", "indices\" ) # fall back to Int64Index return super().__getitem__(key) def", "int: \"\"\" Memory usage of my values Parameters ---------- deep", "my values Parameters ---------- deep : bool Introspect the data", "original locs locs[valid] = len(self) - 1 - locs[valid] return", "minimum value of the RangeIndex\"\"\" nv.validate_minmax_axis(axis) nv.validate_min(args, kwargs) return self._minmax(\"min\")", "return self[:0].rename(res_name) if not isinstance(overlap, RangeIndex): # We won't end", "of \"indexes\" are of type RangeIndex: result will be RangeIndex", "pandas.core import ops import pandas.core.common as com from pandas.core.construction import", "timedelta import operator from sys import getsizeof from typing import", "= self._range[::-1] if self.step < 0 else self._range overlap =", "name=name) return Int64Index._simple_new(values, name=name) def _view(self: RangeIndex) -> RangeIndex: result", "_extended_gcd(self, a: int, b: int) -> tuple[int, int, int]: \"\"\"", "r, old_r - quotient * r old_s, s = s,", "``stop`` instead. \"\"\" # GH 25710 warnings.warn( self._deprecation_message.format(\"_stop\", \"stop\"), FutureWarning,", "new_step = first.step * second.step // gcd new_range = range(tmp_start,", "0.25.0 Use ``step`` instead. \"\"\" # GH 25710 warnings.warn( self._deprecation_message.format(\"_step\",", "def _min_fitting_element(self, lower_limit: int) -> int: \"\"\"Returns the smallest element", "return new_index def symmetric_difference(self, other, result_name: Hashable = None, sort=None):", "with RangeIndex, so fall back return super()._difference(other, sort=sort) if overlap.step", "self.name if name is no_default else name if values.dtype.kind ==", "return self._int64index // other # -------------------------------------------------------------------- # Reductions def all(self,", "elif overlap[-1] == first[-1]: # The difference is everything before", "even if we can represent as a RangeIndex, return #", "as a RangeIndex, return # as a Float64Index if we", "intersect if (first.start - second.start) % gcd: return self._simple_new(_empty_range) #", "step_o == 0: if ( (start_s - start_o) % step_o", "instead. stop : int (default: 0) step : int (default:", "array that for performance reasons is created only when needed.", "that for performance reasons is created only when needed. The", "and tolerance is None: if is_integer(key) or (is_float(key) and key.is_integer()):", "data. Returns ------- np.ndarray[np.intp] See Also -------- numpy.ndarray.argsort \"\"\" ascending", "<= step_s / 2) ): return type(self)(start_r, end_r + step_s", ") -> np.ndarray: # -> np.ndarray[np.intp] if com.any_not_none(method, tolerance, limit):", "name=self._name) elif is_integer(key): new_key = int(key) try: return self._range[new_key] except", "elif is_integer(key): new_key = int(key) try: return self._range[new_key] except IndexError", "meth: str): no_steps = len(self) - 1 if no_steps ==", "Using RangeIndex may in some instances improve computing speed. This", "str(self._range[0]) last_val_str = str(self._range[-1]) max_length = max(len(first_val_str), len(last_val_str)) return header", "None: # This is set by the first non-empty index", "# GH 25710 warnings.warn( self._deprecation_message.format(\"_stop\", \"stop\"), FutureWarning, stacklevel=2, ) return", "Filter the empty indexes non_empty_indexes = [obj for obj in", "# Indexing Methods @doc(Int64Index.get_loc) def get_loc(self, key, method=None, tolerance=None): if", "0) is not (new_index.step < 0): new_index = new_index[::-1] if", "Dtype | None = None, names=None, ): name = self._validate_names(name=name,", "shortcut is possible return super()._cmp_method(self, op) return super()._cmp_method(other, op) def", "None: new_index = new_index.sort_values() return new_index def _min_fitting_element(self, lower_limit: int)", "= rng.start - start non_consecutive = (step != rng.step and", "if ( (start_s - start_o) % step_o == 0 and", "ops.rtruediv]: step = op # TODO: if other is a", "if this was not supplied). .. deprecated:: 0.25.0 Use ``start``", "// abs(self.step) return self.start + abs(self.step) * no_steps def _extended_gcd(self,", "key: Any) -> bool: hash(key) try: key = ensure_python_int(key) except", "end_s) <= step_s ): return type(self)(start_r, end_r + step_s, step_s)", "that are not components of the array if deep=False See", "None = None, limit: int | None = None, tolerance=None,", "limit): return super()._get_indexer( target, method=method, tolerance=tolerance, limit=limit ) if self.step", "from_range See Also -------- Index : The base pandas Index", "Index, sort): \"\"\" Form the union of two Index objects", "so fall back return super()._difference(other, sort=sort) if overlap.step != first.step:", "RangeIndex(start, stop, step).rename(name) # Here all \"indexes\" had 0 length,", ") from err elif is_scalar(key): raise IndexError( \"only integers, slices", "def _data(self) -> np.ndarray: \"\"\" An int array that for", "@property def _step(self) -> int: \"\"\" The value of the", "return self._simple_new(_empty_range) # calculate parameters for the RangeIndex describing the", "nv from pandas.util._decorators import ( cache_readonly, doc, ) from pandas.util._exceptions", "s old_t, t = t, old_t - quotient * t", "self.step end_s = self.start + self.step * (len(self) - 1)", "def _format_data(self, name=None): # we are formatting thru the attributes", "axis=None) -> Int64Index: return self._int64index.repeat(repeats, axis=axis) def delete(self, loc) ->", "* second.step // gcd new_range = range(tmp_start, int_high, new_step) new_index", "len(self) <= 1 @cache_readonly def is_monotonic_decreasing(self) -> bool: return self._range.step", "step_o <= end_o) ): return type(self)(start_r, end_r + step_o, step_o)", "max(first.start, second.start) int_high = min(first.stop, second.stop) if int_high <= int_low:", "step = op # TODO: if other is a RangeIndex", "op # so return a base index if not is_integer(rstep)", "as a Float64Index if we have float-like descriptors if not", "\"f\": return Float64Index(values, name=name) return Int64Index._simple_new(values, name=name) def _view(self: RangeIndex)", "RangeIndex) or sort is not None: return super().symmetric_difference(other, result_name, sort)", "= [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5]) \"\"\" if not all(isinstance(x,", "other) elif is_timedelta64_dtype(other): # Must be an np.ndarray; GH#22390 return", "# GH 28678: work on reversed range for simplicity reverse", "of the `stop` parameter. .. deprecated:: 0.25.0 Use ``stop`` instead.", "is None: if is_integer(key) or (is_float(key) and key.is_integer()): new_key =", "axis: int = 0, allow_fill: bool = True, fill_value=None, **kwargs", "dtype=np.intp) if not ascending: result = result[::-1] return result def", "f\"{cls.__name__}(...) must be called with object coercible to a \"", "-1, -1, dtype=np.intp) if not ascending: result = result[::-1] return", "def memory_usage(self, deep: bool = False) -> int: \"\"\" Memory", "np.ndarray[np.intp] See Also -------- numpy.ndarray.argsort \"\"\" ascending = kwargs.pop(\"ascending\", True)", "not all(isinstance(x, RangeIndex) for x in indexes): return super()._concat(indexes, name)", "with np.errstate(all=\"ignore\"): rstart = op(left.start, right) rstop = op(left.stop, right)", "= kwargs.pop(\"ascending\", True) # EA compat nv.validate_argsort(args, kwargs) if self._range.step", "= other.difference(self) result = left.union(right) if result_name is not None:", "Methods def _format_attrs(self): \"\"\" Return a list of tuples of", "0: overlap = overlap[::-1] if len(overlap) == 0: return self.rename(name=res_name)", "np.arange(self.start, self.stop, self.step, dtype=np.int64) @cache_readonly def _cached_int64index(self) -> Int64Index: return", "-> int: \"\"\"The maximum value of the RangeIndex\"\"\" nv.validate_minmax_axis(axis) nv.validate_max(args,", "# -------------------------------------------------------------------- # Set Operations def _intersection(self, other: Index, sort=False):", "of the `stop` parameter. \"\"\" return self._range.stop @property def _stop(self)", "@doc(Int64Index.__iter__) def __iter__(self): yield from self._range @doc(Int64Index._shallow_copy) def _shallow_copy(self, values,", "method=None, tolerance=None): if method is None and tolerance is None:", "@property def _start(self) -> int: \"\"\" The value of the", "dtype=np.intp) else: result = np.arange(len(self) - 1, -1, -1, dtype=np.intp)", "None else 1 if step == 0: raise ValueError(\"Step must", "# We reversed this range: transform to original locs locs[valid]", "start non_consecutive = (step != rng.step and len(rng) > 1)", "deals with in- and decreasing ranges int_low = max(first.start, second.start)", "for identical step sizes, could use # cheaper alternative gcd,", "solve Bezout's identity: a*x + b*y = gcd(x, y) Finds", "rstop, rstep]): result = result.astype(\"float64\") return result except (ValueError, TypeError,", "other) step: Callable | None = None if op in", "from datetime import timedelta import operator from sys import getsizeof", "pandas.core.indexes.numeric import ( Float64Index, Int64Index, NumericIndex, ) from pandas.core.ops.common import", "accepts 2 params perform the binary op \"\"\" if isinstance(other,", "\"only integers, slices (`:`), \" \"ellipsis (`...`), numpy.newaxis (`None`) \"", "start, stop, step \"\"\" rng = self._range return [(\"start\", rng.start),", "\"\"\" Return a list of tuples of the (attr, formatted_value)", "indexes: list[Index], name: Hashable) -> Index: \"\"\" Overriding parent method", "simplicity reverse = self._range[::-1] start, stop, step = reverse.start, reverse.stop,", "range, or other RangeIndex instance If int and \"stop\" is", "uniques = uniques[::-1] return codes, uniques def equals(self, other: object)", "ops.rpow, operator.mod, ops.rmod, ops.rfloordiv, divmod, ops.rdivmod, ]: return op(self._int64index, other)", "= max(first.start, second.start) int_high = min(first.stop, second.stop) if int_high <=", "\"rangeindex\" _engine_type = libindex.Int64Engine _dtype_validation_metadata = (is_signed_integer_dtype, \"signed integer\") _can_hold_na", "step is None: # First non-empty index had only one", "(`:`), \" \"ellipsis (`...`), numpy.newaxis (`None`) \" \"and integer or", "super().equals(other) # -------------------------------------------------------------------- # Set Operations def _intersection(self, other: Index,", "stop step Methods ------- from_range See Also -------- Index :", "FutureWarning, stacklevel=2, ) return self.step @cache_readonly def nbytes(self) -> int:", "end_o) <= step_s and (start_o - end_s) <= step_s ):", "step == 0) & (locs >= 0) & (target_array <", "dtype: Dtype | None = None, copy: bool = False,", "result = left.union(right) if result_name is not None: result =", ": bool, default False Unused, accepted for homogeneity with other", "the stop value from \"next\" or alternatively # from the", "not given, interpreted as \"stop\" instead. stop : int (default:", "Methods @doc(Int64Index.get_loc) def get_loc(self, key, method=None, tolerance=None): if method is", "equal to the limit\"\"\" no_steps = -(-(lower_limit - self.start) //", "self._range @doc(Int64Index._shallow_copy) def _shallow_copy(self, values, name: Hashable = no_default): name", ") return self.stop @property def step(self) -> int: \"\"\" The", "a representable op # so return a base index if", "-------------------------------------------------------------------- # Rendering Methods def _format_attrs(self): \"\"\" Return a list", "repeat(self, repeats, axis=None) -> Int64Index: return self._int64index.repeat(repeats, axis=axis) def delete(self,", "-> int: return len(self) def __getitem__(self, key): \"\"\" Conserve RangeIndex", "# Both are immutable so if ._range attr. are equal,", "Hashable = None, deep: bool = False, dtype: Dtype |", "np.ndarray[np.intp] if com.any_not_none(method, tolerance, limit): return super()._get_indexer( target, method=method, tolerance=tolerance,", "-> np.ndarray[np.intp] if com.any_not_none(method, tolerance, limit): return super()._get_indexer( target, method=method,", ") -> Int64Index: with rewrite_exception(\"Int64Index\", type(self).__name__): return self._int64index.take( indices, axis=axis,", "formatting thru the attributes return None def _format_with_header(self, header: list[str],", "return result except (ValueError, TypeError, ZeroDivisionError): # Defer to Int64Index", "return start.copy(name=name) elif isinstance(start, range): return cls._simple_new(start, name=name) # validate", "integers, slices (`:`), \" \"ellipsis (`...`), numpy.newaxis (`None`) \" \"and", "self.step < 0: codes = codes[::-1] uniques = uniques[::-1] return", "elif step_s % step_o == 0: if ( (start_s -", "int and \"stop\" is not given, interpreted as \"stop\" instead.", "return np.dtype(np.int64) @property def is_unique(self) -> bool: \"\"\" return if", "None else next_ return RangeIndex(start, stop, step).rename(name) # Here all", "back to Int64Index return super().__getitem__(key) def _getitem_slice(self: RangeIndex, slobj: slice)", "the first non-empty index start = rng.start if step is", "intersection problem # performance hint: for identical step sizes, could", "ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype, ) from pandas.core.dtypes.generic import", "not components of the array if deep=False See Also --------", "% step_o == 0 and (start_s + step_o >= start_o)", "= first.start + (second.start - first.start) * first.step // gcd", "method is None and tolerance is None: if is_integer(key) or", "= self._name return res def _get_data_as_items(self): \"\"\" return a list", "maximum value of the RangeIndex\"\"\" nv.validate_minmax_axis(axis) nv.validate_max(args, kwargs) return self._minmax(\"max\")", "= 1, 0 r, old_r = b, a while r:", "pandas.core.construction import extract_array import pandas.core.indexes.base as ibase from pandas.core.indexes.base import", "second.step) # check whether element sets intersect if (first.start -", "maybe_extract_name(name, start, cls) # RangeIndex if isinstance(start, RangeIndex): return start.copy(name=name)", "step \"\"\" rng = self._range return [(\"start\", rng.start), (\"stop\", rng.stop),", "op(self._int64index, other) if op in [ operator.pow, ops.rpow, operator.mod, ops.rmod,", "the same elements. \"\"\" if isinstance(other, RangeIndex): return self._range ==", "# The difference is not range-like return super()._difference(other, sort=sort) new_index", "end_s = end_s, -step_s, start_s if other.step < 0: start_o,", "rewrite_exception from pandas.core.dtypes.common import ( ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar,", "= left.step with np.errstate(all=\"ignore\"): rstart = op(left.start, right) rstop =", "-> list[int]: return list(self._range) @doc(Int64Index.__iter__) def __iter__(self): yield from self._range", "b: int) -> tuple[int, int, int]: \"\"\" Extended Euclidean algorithms", "range) result._range = values result._name = name result._cache = {}", "range(new_start, new_index.stop, new_index.step) new_index = self._simple_new(new_range) if (self.step < 0", "if start is not None else 0 if stop is", "= range(0) class RangeIndex(NumericIndex): \"\"\" Immutable Index implementing a monotonic", "def is_unique(self) -> bool: \"\"\" return if the index has", "dtype=np.int64) @cache_readonly def _cached_int64index(self) -> Int64Index: return Int64Index._simple_new(self._data, name=self.name) @property", "new_start = new_index._min_fitting_element(int_low) new_range = range(new_start, new_index.stop, new_index.step) new_index =", "if we can represent as a RangeIndex, return # as", "% step_s == 0 and (start_o + step_s >= start_s)", "// other stop = start + len(self) * step new_range", "% 2 == 0) and (abs(start_s - start_o) <= step_s", "else 1 if step == 0: raise ValueError(\"Step must not", "None # Filter the empty indexes non_empty_indexes = [obj for", "sorted ``Int64Index`` if not. ``sort=False`` always returns an unsorted ``Int64Index``", "x in indexes): return super()._concat(indexes, name) elif len(indexes) == 1:", "---------- other : Any op : callable that accepts 2", "step != self.step: # We reversed this range: transform to", "indexes non_empty_indexes = [obj for obj in rng_indexes if len(obj)]", "used by DataFrame and Series when no explicit index is", "/ step if step != self.step: # We reversed this", "t, old_t - quotient * t return old_r, old_s, old_t", "self._simple_new(new_range, name=self.name) return self._int64index // other # -------------------------------------------------------------------- # Reductions", "return self._simple_new(_empty_range) # Method hint: linear Diophantine equation # solve", "of bytes in the underlying data. \"\"\" rng = self._range", "RangeIndex \"\"\" return len(self._range) @property def size(self) -> int: return", "old_r - quotient * r old_s, s = s, old_s", "else: # The difference is not range-like return super()._difference(other, sort=sort)", "- quotient * s old_t, t = t, old_t -", "self._range.step > 0 or len(self) <= 1 @cache_readonly def is_monotonic_decreasing(self)", "checks/conversions/roundings are delegated to general method return super()._get_indexer(target, method=method, tolerance=tolerance)", "be \" \"removed in a future version. Use RangeIndex.{} \"", "cls._simple_new(data, name=name) @classmethod def _simple_new(cls, values: range, name: Hashable =", "\"stop\", \"step\"] ) def memory_usage(self, deep: bool = False) ->", "size(self) -> int: return len(self) def __getitem__(self, key): \"\"\" Conserve", "any(self._range) # -------------------------------------------------------------------- def _cmp_method(self, other, op): if isinstance(other, RangeIndex)", "Use the astype method instead.\", FutureWarning, stacklevel=2, ) new_index =", "start valid = (locs % step == 0) & (locs", "self.step < 0): return self.start return self.start + self.step *", "constructed array is saved in ``_cache``. \"\"\" return np.arange(self.start, self.stop,", "is_scalar(key): raise IndexError( \"only integers, slices (`:`), \" \"ellipsis (`...`),", "-> int: \"\"\" The value of the `stop` parameter. ..", "evaluated True on timedelta64, # so we need to catch", "# -------------------------------------------------------------------- def _cmp_method(self, other, op): if isinstance(other, RangeIndex) and", "obj in non_empty_indexes: rng = obj._range if start is None:", "\"\"\"Returns the largest element smaller than or equal to the", "RangeIndex may in some instances improve computing speed. This is", "len(self._range): return header first_val_str = str(self._range[0]) last_val_str = str(self._range[-1]) max_length", "= None, copy: bool = False, name: Hashable = None,", "the (attr, formatted_value) \"\"\" attrs = self._get_data_as_items() if self.name is", "E.g.: indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6) indexes =", "+ step_s, step_s) elif step_s % step_o == 0: if", "is None: # This is set by the first non-empty", "`start` parameter (``0`` if this was not supplied). \"\"\" #", "= self._range[::-1] if self.step < 0 else self._range second =", "bool Introspect the data deeply, interrogate `object` dtypes for system-level", "Determines if two Index objects contain the same elements. \"\"\"", "result # -------------------------------------------------------------------- @cache_readonly def _constructor(self) -> type[Int64Index]: \"\"\" return", "range, name=None, dtype: Dtype | None = None ) ->", "return super()._get_indexer( target, method=method, tolerance=tolerance, limit=limit ) if self.step >", "return # as a Float64Index if we have float-like descriptors", "Index, method: str | None = None, limit: int |", "version. Use RangeIndex.{} \" \"instead\" ) @property def start(self) ->", "bool: hash(key) try: key = ensure_python_int(key) except TypeError: return False", "\"stop\" instead. stop : int (default: 0) step : int", "object. Returns ------- RangeIndex \"\"\" if not isinstance(data, range): raise", "+ 1, 1) return self._simple_new(new_range, name=self.name) return self._int64index // other", "0: start, stop, step = self.start, self.stop, self.step else: #", "start_s if other.step < 0: start_o, step_o, end_o = end_o,", "possible return super()._cmp_method(self, op) return super()._cmp_method(other, op) def _arith_method(self, other,", "target: Index, method: str | None = None, limit: int", "if len(obj)] for obj in non_empty_indexes: rng = obj._range if", "# checks/conversions/roundings are delegated to general method return super()._get_indexer(target, method=method,", "instances. When all members of \"indexes\" are of type RangeIndex:", "\"\"\" # GH 25710 return self._range.step @property def _step(self) ->", "type(self)._simple_new(res, name=self._name) @unpack_zerodim_and_defer(\"__floordiv__\") def __floordiv__(self, other): if is_integer(other) and other", "is not None else 0 if stop is None: start,", "slice keys. \"\"\" if isinstance(key, slice): new_range = self._range[key] return", "not supplied). .. deprecated:: 0.25.0 Use ``step`` instead. \"\"\" #", "maybe_extract_name from pandas.core.indexes.numeric import ( Float64Index, Int64Index, NumericIndex, ) from", "return op(self._int64index, other) elif is_timedelta64_dtype(other): # Must be an np.ndarray;", "self._get_attributes_dict() left, right = self, other try: # apply if", "valid indices\" ) # fall back to Int64Index return super().__getitem__(key)", "len(self) - 1 - locs[valid] return ensure_platform_int(locs) # -------------------------------------------------------------------- def", "or other RangeIndex instance If int and \"stop\" is not", "than or equal to the limit\"\"\" no_steps = (upper_limit -", "rstep = step(left.step, right) # we don't have a representable", "int: \"\"\"Returns the largest element smaller than or equal to", "25710 return self._range.start @property def _start(self) -> int: \"\"\" The", "# Here all \"indexes\" had 0 length, i.e. were empty.", "= rng.start if step is None and len(rng) > 1:", "range(first.start, overlap[0], first.step) else: # The difference is not range-like", "(default: 0), range, or other RangeIndex instance If int and", "sort=sort) if overlap.step != first.step: # In some cases we", "special case of Int64Index limited to representing monotonic ranges. Using", "result_name is not None: result = result.rename(result_name) return result #", "< 0 else self._range second = other._range[::-1] if other.step <", "dtype : np.int64 Unused, accepted for homogeneity with other index", "not is_signed_integer_dtype(target): # checks/conversions/roundings are delegated to general method return", "# Rendering Methods def _format_attrs(self): \"\"\" Return a list of", "resulting index. ``sort=None`` returns a monotonically increasing ``RangeIndex`` if possible", "0: if ( (start_o - start_s) % step_s == 0", ") if non_consecutive: result = Int64Index(np.concatenate([x._values for x in rng_indexes]))", "that would sort the index and its underlying data. Returns", "str(self._range[-1]) max_length = max(len(first_val_str), len(last_val_str)) return header + [f\"{x:<{max_length}}\" for", "== 0: return self.rename(name=res_name) if len(overlap) == len(self): return self[:0].rename(res_name)", "RangeIndex): # We won't end up with RangeIndex, so fall", "name if values.dtype.kind == \"f\": return Float64Index(values, name=name) return Int64Index._simple_new(values,", "\"\"\" Fastpath for __getitem__ when we know we have a", "old_r, r = r, old_r - quotient * r old_s,", "from pandas.compat.numpy import function as nv from pandas.util._decorators import (", "0: start_o, step_o, end_o = end_o, -step_o, start_o if len(self)", "\" \"version. Use the astype method instead.\", FutureWarning, stacklevel=2, )", "we can represent as a RangeIndex, return # as a", "y) Finds one particular solution for x, y: s, t", "= type(self)._simple_new(new_rng, name=res_name) if first is not self._range: new_index =", "stop, step or 1) return self._simple_new(new_range, name=self.name) if len(self) ==", "alternative gcd, s, _ = self._extended_gcd(first.step, second.step) # check whether", "apply if we have an override if step: with np.errstate(all=\"ignore\"):", "by DataFrame and Series when no explicit index is provided", "new_index.sort_values() return new_index def _min_fitting_element(self, lower_limit: int) -> int: \"\"\"Returns", "ibase.default_pprint(self.name))) return attrs def _format_data(self, name=None): # we are formatting", "int: \"\"\" The value of the `stop` parameter. \"\"\" return", "operator.truediv, ops.rtruediv]: step = op # TODO: if other is", "result.rename(result_name) return result # -------------------------------------------------------------------- def _concat(self, indexes: list[Index], name:", "members of \"indexes\" are of type RangeIndex: result will be", "as np from pandas._libs import index as libindex from pandas._libs.lib", "\"\"\" Conserve RangeIndex type for scalar and slice keys. \"\"\"", "start = self.start // other step = self.step // other", "If int and \"stop\" is not given, interpreted as \"stop\"", "= no_default): name = self.name if name is no_default else", "Int64Index, NumericIndex, ) from pandas.core.ops.common import unpack_zerodim_and_defer if TYPE_CHECKING: from", "if (self.step < 0 and other.step < 0) is not", "1) if self.step < 0: start_s, step_s, end_s = end_s,", "bool = False, name: Hashable = None, ) -> RangeIndex:", "Returns ------- np.ndarray[np.intp] See Also -------- numpy.ndarray.argsort \"\"\" ascending =", "annotations from datetime import timedelta import operator from sys import", "``sort=False`` always returns an unsorted ``Int64Index`` .. versionadded:: 0.25.0 Returns", "index types. name : object, optional Name to be stored", "None and len(rng) > 1: step = rng.step elif step", "some instances improve computing speed. This is the default index", "1 and len(other) == 1: step_s = step_o = abs(self.start", "import ( TYPE_CHECKING, Any, Callable, Hashable, List, cast, ) import", "return result # -------------------------------------------------------------------- def _concat(self, indexes: list[Index], name: Hashable)", "old_t - quotient * t return old_r, old_s, old_t def", "stop = 0, start else: stop = ensure_python_int(stop) step =", "object) -> bool: \"\"\" Determines if two Index objects contain", "is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype, ) from pandas.core.dtypes.generic import ABCTimedeltaIndex from", "First non-empty index had only one element if rng.start ==", "= new_index.astype(dtype) return new_index def _minmax(self, meth: str): no_steps =", "Set Operations def _intersection(self, other: Index, sort=False): if not isinstance(other,", "first.start) * first.step // gcd * s new_step = first.step", "Name to be stored in the index. Attributes ---------- start", "step_o elif len(other) == 1: step_o = step_s start_r =", "non-empty index had only one element if rng.start == start:", "or self.start % other == 0 and self.step % other", "int]: \"\"\" Extended Euclidean algorithms to solve Bezout's identity: a*x", "indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6) indexes = [RangeIndex(3),", "with rewrite_exception(\"Int64Index\", type(self).__name__): return self._int64index.take( indices, axis=axis, allow_fill=allow_fill, fill_value=fill_value, **kwargs,", "from_range( cls, data: range, name=None, dtype: Dtype | None =", "is None and len(rng) > 1: step = rng.step elif", "np.int64 Unused, accepted for homogeneity with other index types. copy", "= r, old_r - quotient * r old_s, s =", "not. ``sort=False`` always returns an unsorted ``Int64Index`` .. versionadded:: 0.25.0", "know we have a slice. \"\"\" res = self._range[slobj] return", "index start = rng.start if step is None and len(rng)", "raise ValueError(\"Step must not be zero\") rng = range(start, stop,", "< 0) is not (new_index.step < 0): new_index = new_index[::-1]", "a Float64Index if we have float-like descriptors if not all(is_integer(x)", ": Any op : callable that accepts 2 params perform", "index as libindex from pandas._libs.lib import no_default from pandas._typing import", "new_index def _minmax(self, meth: str): no_steps = len(self) - 1", "loc) -> Int64Index: # type: ignore[override] return self._int64index.delete(loc) def take(", "ops.get_op_result_name(self, other) first = self._range[::-1] if self.step < 0 else", "!= self.step: # We reversed this range: transform to original", "def min(self, axis=None, skipna: bool = True, *args, **kwargs) ->", "axis=None, skipna: bool = True, *args, **kwargs) -> int: \"\"\"The", "new_index = type(self)._simple_new(new_rng, name=res_name) if first is not self._range: new_index", "== 0: if ( (start_s - start_o) % step_o ==", "step_s start_r = min(start_s, start_o) end_r = max(end_s, end_o) if", "= (step != rng.step and len(rng) > 1) or (", "def from_range( cls, data: range, name=None, dtype: Dtype | None", "array if deep=False See Also -------- numpy.ndarray.nbytes \"\"\" return self.nbytes", "\"\"\" The value of the `stop` parameter. .. deprecated:: 0.25.0", ": False or None, default None Whether to sort resulting", "b*y = gcd(x, y) Finds one particular solution for x,", "element greater than or equal to the limit\"\"\" no_steps =", "with numpy / Int64Index # even if we can represent", "* t return old_r, old_s, old_t def _union(self, other: Index,", "def __new__( cls, start=None, stop=None, step=None, dtype: Dtype | None", "= op(left.start, right) rstop = op(left.stop, right) result = type(self)(rstart,", "first.stop, first.step) elif overlap[-1] == first[-1]: # The difference is", "self.step, dtype=np.int64) @cache_readonly def _cached_int64index(self) -> Int64Index: return Int64Index._simple_new(self._data, name=self.name)", "will be \" \"removed in a future version. Use RangeIndex.{}", "\"\"\" _typ = \"rangeindex\" _engine_type = libindex.Int64Engine _dtype_validation_metadata = (is_signed_integer_dtype,", "result_name: Hashable = None, sort=None): if not isinstance(other, RangeIndex) or", "step = reverse.start, reverse.stop, reverse.step if not is_signed_integer_dtype(target): # checks/conversions/roundings", "stop(self) -> int: \"\"\" The value of the `stop` parameter.", "index. Attributes ---------- start stop step Methods ------- from_range See", "other is a RangeIndex we may have more efficient options", "limit\"\"\" no_steps = -(-(lower_limit - self.start) // abs(self.step)) return self.start", "we may have more efficient options other = extract_array(other, extract_numpy=True,", "1 if step == 0: raise ValueError(\"Step must not be", "name=name) @classmethod def from_range( cls, data: range, name=None, dtype: Dtype", "type(self)(start_r, end_r + step_s / 2, step_s / 2) elif", "(timedelta, np.timedelta64)): # GH#19333 is_integer evaluated True on timedelta64, #", "RangeIndex(NumericIndex): \"\"\" Immutable Index implementing a monotonic integer range. RangeIndex", "= self.difference(other) right = other.difference(self) result = left.union(right) if result_name", "(\"step\", rng.step)] def __reduce__(self): d = self._get_attributes_dict() d.update(dict(self._get_data_as_items())) return ibase._new_Index,", "int) -> tuple[int, int, int]: \"\"\" Extended Euclidean algorithms to", "``Int64Index`` .. versionadded:: 0.25.0 Returns ------- union : Index \"\"\"", "for x in indexes): return super()._concat(indexes, name) elif len(indexes) ==", "next_ is None else next_ return RangeIndex(start, stop, step).rename(name) #", "= self.name if name is no_default else name if values.dtype.kind", "com.any_not_none(method, tolerance, limit): return super()._get_indexer( target, method=method, tolerance=tolerance, limit=limit )", "and (end_o - step_s <= end_s) ): return type(self)(start_r, end_r", "cast(List[RangeIndex], indexes) start = step = next_ = None #", "range object. Returns ------- RangeIndex \"\"\" if not isinstance(data, range):", "return result.rename(name) if step is not None: next_ = rng[-1]", "{} result._reset_identity() return result # -------------------------------------------------------------------- @cache_readonly def _constructor(self) ->", "\"\"\" The value of the `start` parameter (``0`` if this", "Int64Index(values) return result.rename(name) step = rng.start - start non_consecutive =", "is None: new_index = new_index.sort_values() return new_index def _min_fitting_element(self, lower_limit:", "import unpack_zerodim_and_defer if TYPE_CHECKING: from pandas import Index _empty_range =", "right = self, other try: # apply if we have", "if self.step < 0: start_s, step_s, end_s = end_s, -step_s,", "-> np.ndarray: \"\"\" An int array that for performance reasons", ") def tolist(self) -> list[int]: return list(self._range) @doc(Int64Index.__iter__) def __iter__(self):", "len(indexes) == 1: return indexes[0] rng_indexes = cast(List[RangeIndex], indexes) start", "next_ is not None and rng.start != next_ ) if", "name=self._name) result._cache = self._cache return result @doc(Int64Index.copy) def copy( self,", "-> bool: return self._range.step > 0 or len(self) <= 1", "range(start, stop, step) return cls._simple_new(rng, name=name) @classmethod def from_range( cls,", "return self.nbytes @property def dtype(self) -> np.dtype: return np.dtype(np.int64) @property", "for attr_name in [\"start\", \"stop\", \"step\"] ) def memory_usage(self, deep:", "= t, old_t - quotient * t return old_r, old_s,", "(second.start - first.start) * first.step // gcd * s new_step", "com.all_none(start, stop, step): raise TypeError(\"RangeIndex(...) must be called with integers\")", "bool = True, fill_value=None, **kwargs ) -> Int64Index: with rewrite_exception(\"Int64Index\",", "if ._range attr. are equal, shortcut is possible return super()._cmp_method(self,", "gcd new_range = range(tmp_start, int_high, new_step) new_index = self._simple_new(new_range) #", "# fall back to Int64Index return super().__getitem__(key) def _getitem_slice(self: RangeIndex,", "self.step @cache_readonly def nbytes(self) -> int: \"\"\" Return the number", "( (start_o - start_s) % step_s == 0 and (start_o", ": object, optional Name to be stored in the index.", "= obj._range if start is None: # This is set", "self._simple_new(new_range, name=self._name) elif is_integer(key): new_key = int(key) try: return self._range[new_key]", "+ step_s >= start_s) and (end_o - step_s <= end_s)", ": Index or array-like sort : False or None, default", "if not len(self._range): return header first_val_str = str(self._range[0]) last_val_str =", "pandas.util._exceptions import rewrite_exception from pandas.core.dtypes.common import ( ensure_platform_int, ensure_python_int, is_float,", "= values result._name = name result._cache = {} result._reset_identity() return", "self._range return [(\"start\", rng.start), (\"stop\", rng.stop), (\"step\", rng.step)] def __reduce__(self):", "# Constructors def __new__( cls, start=None, stop=None, step=None, dtype: Dtype", "False) -> int: \"\"\" Memory usage of my values Parameters", "Dtype | None = None ) -> RangeIndex: \"\"\" Create", "None # -------------------------------------------------------------------- # Rendering Methods def _format_attrs(self): \"\"\" Return", "other._range # check whether intervals intersect # deals with in-", "computing speed. This is the default index type used by", "- 1) if self.step < 0: start_s, step_s, end_s =", "this range: transform to original locs locs[valid] = len(self) -", "bool: return self._range.step < 0 or len(self) <= 1 def", "sorts if possible Parameters ---------- other : Index or array-like", "versionadded:: 0.25.0 Returns ------- union : Index \"\"\" if isinstance(other,", "# so return a base index if not is_integer(rstep) or", "only one element if rng.start == start: values = np.concatenate([x._values", "- start_o) % step_o == 0 and (start_s + step_o", "import no_default from pandas._typing import Dtype from pandas.compat.numpy import function", "0 or len(self) <= 1 @cache_readonly def is_monotonic_decreasing(self) -> bool:", "**kwargs) -> bool: return any(self._range) # -------------------------------------------------------------------- def _cmp_method(self, other,", "<= step_s / 2) and (abs(end_s - end_o) <= step_s", "True, *args, **kwargs) -> int: \"\"\"The minimum value of the", "reversed this range: transform to original locs locs[valid] = len(self)", "= True, fill_value=None, **kwargs ) -> Int64Index: with rewrite_exception(\"Int64Index\", type(self).__name__):", "= op(left.stop, right) result = type(self)(rstart, rstop, rstep, **attrs) #", "RangeIndex if possible, Int64Index otherwise. E.g.: indexes = [RangeIndex(3), RangeIndex(3,", "in [ operator.pow, ops.rpow, operator.mod, ops.rmod, ops.rfloordiv, divmod, ops.rdivmod, ]:", "(ValueError, TypeError, ZeroDivisionError): # Defer to Int64Index implementation return op(self._int64index,", "is not None and rng.start != next_ ) if non_consecutive:", "no_steps def _extended_gcd(self, a: int, b: int) -> tuple[int, int,", "self.nbytes @property def dtype(self) -> np.dtype: return np.dtype(np.int64) @property def", "slice): new_range = self._range[key] return self._simple_new(new_range, name=self._name) elif is_integer(key): new_key", "user. Parameters ---------- start : int (default: 0), range, or", "RangeIndex, so fall back return super()._difference(other, sort=sort) if overlap.step !=", "len(other): return self._simple_new(_empty_range) first = self._range[::-1] if self.step < 0", "start=None, stop=None, step=None, dtype: Dtype | None = None, copy:", "super()._difference(other, sort=sort) new_index = type(self)._simple_new(new_rng, name=res_name) if first is not", "the underlying data. \"\"\" rng = self._range return getsizeof(rng) +", "-> type[Int64Index]: \"\"\" return the class to use for construction", "None: next_ = rng[-1] + step if non_empty_indexes: # Get", "quotient = old_r // r old_r, r = r, old_r", "name matches self.name res = self._cached_int64index res._name = self._name return", ") -> tuple[np.ndarray, RangeIndex]: codes = np.arange(len(self), dtype=np.intp) uniques =", "elements. \"\"\" if isinstance(other, RangeIndex): return self._range == other._range return", "than or equal to the limit\"\"\" no_steps = -(-(lower_limit -", "axis 0 with size {len(self)}\" ) from err elif is_scalar(key):", "other) if op in [ operator.pow, ops.rpow, operator.mod, ops.rmod, ops.rfloordiv,", "& (locs >= 0) & (target_array < stop) locs[~valid] =", "if step is not None else 1 if step ==", "_constructor(self) -> type[Int64Index]: \"\"\" return the class to use for", "| None = None if op in [operator.mul, ops.rmul, operator.truediv,", "from pandas.core import ops import pandas.core.common as com from pandas.core.construction", "None: attrs.append((\"name\", ibase.default_pprint(self.name))) return attrs def _format_data(self, name=None): # we", "stop is None: start, stop = 0, start else: stop", "_get_indexer( self, target: Index, method: str | None = None,", "@classmethod def from_range( cls, data: range, name=None, dtype: Dtype |", "value of the RangeIndex\"\"\" nv.validate_minmax_axis(axis) nv.validate_min(args, kwargs) return self._minmax(\"min\") def", "if possible, Int64Index otherwise. E.g.: indexes = [RangeIndex(3), RangeIndex(3, 6)]", "_format_with_header(self, header: list[str], na_rep: str = \"NaN\") -> list[str]: if", "for the case of all RangeIndex instances. When all members", "np.arange(len(self) - 1, -1, -1, dtype=np.intp) if not ascending: result", "True on timedelta64, # so we need to catch these", "\"\"\" Create RangeIndex from a range object. Returns ------- RangeIndex", "next_ = None # Filter the empty indexes non_empty_indexes =", "memory_usage(self, deep: bool = False) -> int: \"\"\" Memory usage", "a list of tuples of start, stop, step \"\"\" rng", "for x, y: s, t Returns: gcd, s, t \"\"\"", "get a RangeIndex back, # but not worth the effort.", "non_empty_indexes[-1].stop if next_ is None else next_ return RangeIndex(start, stop,", "if other is a RangeIndex we may have more efficient", "libindex from pandas._libs.lib import no_default from pandas._typing import Dtype from", "we have float-like descriptors if not all(is_integer(x) for x in", "tolerance=tolerance) target_array = np.asarray(target) locs = target_array - start valid", "if we have another RangeIndex self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name =", "to be stored in the index. Attributes ---------- start stop", "== \"min\" and self.step > 0) or (meth == \"max\"", "won't end up with RangeIndex, so fall back return super()._difference(other,", "be RangeIndex if possible, Int64Index otherwise. E.g.: indexes = [RangeIndex(3),", "as err: raise IndexError( f\"index {key} is out of bounds", "= new_index[::-1] return new_index def symmetric_difference(self, other, result_name: Hashable =", "if len(self) == 1 and len(other) == 1: step_s =", "t, old_t = 1, 0 r, old_r = b, a", "arguments if com.all_none(start, stop, step): raise TypeError(\"RangeIndex(...) must be called", "def tolist(self) -> list[int]: return list(self._range) @doc(Int64Index.__iter__) def __iter__(self): yield", "_deprecation_message = ( \"RangeIndex.{} is deprecated and will be \"", "result[::-1] return result def factorize( self, sort: bool = False,", "Finds one particular solution for x, y: s, t Returns:", "Here all \"indexes\" had 0 length, i.e. were empty. #", "try: key = ensure_python_int(key) except TypeError: return False return key", "- start_o) <= step_s / 2) and (abs(end_s - end_o)", "(default: 1) dtype : np.int64 Unused, accepted for homogeneity with", "right) rstop = op(left.stop, right) result = type(self)(rstart, rstop, rstep,", "instead. \"\"\" # GH 25710 warnings.warn( self._deprecation_message.format(\"_step\", \"step\"), FutureWarning, stacklevel=2,", "parameter. \"\"\" return self._range.stop @property def _stop(self) -> int: \"\"\"", "new_index[::-1] if sort is None: new_index = new_index.sort_values() return new_index", "if step is None and len(rng) > 1: step =", "not ascending: result = result[::-1] return result def factorize( self,", "datetime import timedelta import operator from sys import getsizeof from", "self._get_data_as_items() if self.name is not None: attrs.append((\"name\", ibase.default_pprint(self.name))) return attrs", "hint: linear Diophantine equation # solve intersection problem # performance", "stored in the index. Attributes ---------- start stop step Methods", "other: Index, sort): \"\"\" Form the union of two Index", "is not None: result = result.rename(result_name) return result # --------------------------------------------------------------------", "hint: for identical step sizes, could use # cheaper alternative", "int_low: return self._simple_new(_empty_range) # Method hint: linear Diophantine equation #", "self, name: Hashable = None, deep: bool = False, dtype:", "Int64Index._simple_new(values, name=name) def _view(self: RangeIndex) -> RangeIndex: result = type(self)._simple_new(self._range,", "default None Whether to sort resulting index. ``sort=None`` returns a", "lower bounds tmp_start = first.start + (second.start - first.start) *", "\"\"\" ascending = kwargs.pop(\"ascending\", True) # EA compat nv.validate_argsort(args, kwargs)", "1) or ( next_ is not None and rng.start !=", "__floordiv__(self, other): if is_integer(other) and other != 0: if len(self)", "= Int64Index(values) return result.rename(name) step = rng.start - start non_consecutive", "None, default None Whether to sort resulting index. ``sort=None`` returns", "attributes return None def _format_with_header(self, header: list[str], na_rep: str =", "(meth == \"min\" and self.step > 0) or (meth ==", "__reduce__(self): d = self._get_attributes_dict() d.update(dict(self._get_data_as_items())) return ibase._new_Index, (type(self), d), None", "int, int]: \"\"\" Extended Euclidean algorithms to solve Bezout's identity:", "array-like sort : False or None, default None Whether to", "so if ._range attr. are equal, shortcut is possible return", "None) -> RangeIndex: result = object.__new__(cls) assert isinstance(values, range) result._range", "or a sorted ``Int64Index`` if not. ``sort=False`` always returns an", "Memory usage does not include memory consumed by elements that", ") -> RangeIndex: \"\"\" Create RangeIndex from a range object.", "cls._simple_new(rng, name=name) @classmethod def from_range( cls, data: range, name=None, dtype:", "------- bytes used Notes ----- Memory usage does not include", "= rng[-1] + step if non_empty_indexes: # Get the stop", "\"\"\" res = self._range[slobj] return type(self)._simple_new(res, name=self._name) @unpack_zerodim_and_defer(\"__floordiv__\") def __floordiv__(self,", "-> Int64Index: return Int64Index._simple_new(self._data, name=self.name) @property def _int64index(self) -> Int64Index:", "key, method=None, tolerance=None): if method is None and tolerance is", "def _view(self: RangeIndex) -> RangeIndex: result = type(self)._simple_new(self._range, name=self._name) result._cache", "1) start_o, step_o = other.start, other.step end_o = other.start +", "\"\"\" if isinstance(other, ABCTimedeltaIndex): # Defer to TimedeltaIndex implementation return", "__getitem__(self, key): \"\"\" Conserve RangeIndex type for scalar and slice", ".. deprecated:: 0.25.0 Use ``start`` instead. \"\"\" warnings.warn( self._deprecation_message.format(\"_start\", \"start\"),", "t return old_r, old_s, old_t def _union(self, other: Index, sort):", "RangeIndex back, # but not worth the effort. return super()._difference(other,", "- self.start) // abs(self.step) return self.start + abs(self.step) * no_steps", "RangeIndex: result = type(self)._simple_new(self._range, name=self._name) result._cache = self._cache return result", "the case of all RangeIndex instances. When all members of", "= np.concatenate([x._values for x in rng_indexes]) result = Int64Index(values) return", "import index as libindex from pandas._libs.lib import no_default from pandas._typing", "when we know we have a slice. \"\"\" res =", "np.ndarray: \"\"\" An int array that for performance reasons is", "Use ``step`` instead. \"\"\" # GH 25710 warnings.warn( self._deprecation_message.format(\"_step\", \"step\"),", "first.step // gcd * s new_step = first.step * second.step", "Memory usage of my values Parameters ---------- deep : bool", "-> int: \"\"\" The value of the `stop` parameter. \"\"\"", "of tuples of the (attr, formatted_value) \"\"\" attrs = self._get_data_as_items()", "/ Int64Index # even if we can represent as a", "Form the union of two Index objects and sorts if", "stop : int (default: 0) step : int (default: 1)", "passed\" ) cls._validate_dtype(dtype) return cls._simple_new(data, name=name) @classmethod def _simple_new(cls, values:", "\"stop\" is not given, interpreted as \"stop\" instead. stop :", "improve computing speed. This is the default index type used", "= range(new_start, new_index.stop, new_index.step) new_index = self._simple_new(new_range) if (self.step <", "and len(rng) > 1: step = rng.step elif step is", "end_s, -step_s, start_s if other.step < 0: start_o, step_o, end_o", "0.25.0 Use ``start`` instead. \"\"\" warnings.warn( self._deprecation_message.format(\"_start\", \"start\"), FutureWarning, stacklevel=2,", "0: start_s, step_s, end_s = end_s, -step_s, start_s if other.step", "= np.arange(len(self), dtype=np.intp) uniques = self if sort and self.step", "@doc(Int64Index._shallow_copy) def _shallow_copy(self, values, name: Hashable = no_default): name =", "old_s, s = s, old_s - quotient * s old_t,", "rng.step and len(rng) > 1) or ( next_ is not", "no_steps def min(self, axis=None, skipna: bool = True, *args, **kwargs)", "== other._range: # Both are immutable so if ._range attr.", "d), None # -------------------------------------------------------------------- # Rendering Methods def _format_attrs(self): \"\"\"", "size {len(self)}\" ) from err elif is_scalar(key): raise IndexError( \"only", "uniques[::-1] return codes, uniques def equals(self, other: object) -> bool:", "of tuples of start, stop, step \"\"\" rng = self._range", "# adjust index to limiting interval new_start = new_index._min_fitting_element(int_low) new_range", "number of bytes in the underlying data. \"\"\" rng =", "an np.ndarray; GH#22390 return op(self._int64index, other) if op in [", "all(is_integer(x) for x in [rstart, rstop, rstep]): result = result.astype(\"float64\")", "first.start + (second.start - first.start) * first.step // gcd *", "import ( cache_readonly, doc, ) from pandas.util._exceptions import rewrite_exception from", "= None # Filter the empty indexes non_empty_indexes = [obj", "tolerance=None): if method is None and tolerance is None: if", "( \"RangeIndex.{} is deprecated and will be \" \"removed in", "def argsort(self, *args, **kwargs) -> np.ndarray: \"\"\" Returns the indices", "operator.pow, ops.rpow, operator.mod, ops.rmod, ops.rfloordiv, divmod, ops.rdivmod, ]: return op(self._int64index,", "0): return self.start return self.start + self.step * no_steps def", "if isinstance(other, RangeIndex): return self._range == other._range return super().equals(other) #", "element if rng.start == start: values = np.concatenate([x._values for x", "Int64Index return super().__getitem__(key) def _getitem_slice(self: RangeIndex, slobj: slice) -> RangeIndex:", "int_high = min(first.stop, second.stop) if int_high <= int_low: return self._simple_new(_empty_range)", "step_o, step_o) return self._int64index._union(other, sort=sort) def _difference(self, other, sort=None): #", "( ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype, is_timedelta64_dtype, ) from", "return list(self._range) @doc(Int64Index.__iter__) def __iter__(self): yield from self._range @doc(Int64Index._shallow_copy) def", "identical step sizes, could use # cheaper alternative gcd, s,", "always returns an unsorted ``Int64Index`` .. versionadded:: 0.25.0 Returns -------", "\"version. Use the astype method instead.\", FutureWarning, stacklevel=2, ) new_index", "step_s) elif step_s % step_o == 0: if ( (start_s", "list[int]: return list(self._range) @doc(Int64Index.__iter__) def __iter__(self): yield from self._range @doc(Int64Index._shallow_copy)", "with integers\") start = ensure_python_int(start) if start is not None", "np.dtype: return np.dtype(np.int64) @property def is_unique(self) -> bool: \"\"\" return", "isinstance(other, RangeIndex): return super()._difference(other, sort=sort) res_name = ops.get_op_result_name(self, other) first", "called with object coercible to a \" f\"range, {repr(data)} was", "!= rng.step and len(rng) > 1) or ( next_ is", "if not ascending: result = result[::-1] return result def factorize(", "accepted for homogeneity with other index types. name : object,", "ops import pandas.core.common as com from pandas.core.construction import extract_array import", "t \"\"\" s, old_s = 0, 1 t, old_t =", "1: start = self.start // other new_range = range(start, start", "= -1 ) -> tuple[np.ndarray, RangeIndex]: codes = np.arange(len(self), dtype=np.intp)", "and (start_s + step_o >= start_o) and (end_s - step_o", "b, a while r: quotient = old_r // r old_r,", "-------- numpy.ndarray.argsort \"\"\" ascending = kwargs.pop(\"ascending\", True) # EA compat", "= self._convert_can_do_setop(other) if not isinstance(other, RangeIndex): return super()._difference(other, sort=sort) res_name", "err elif is_scalar(key): raise IndexError( \"only integers, slices (`:`), \"", "implementation return NotImplemented elif isinstance(other, (timedelta, np.timedelta64)): # GH#19333 is_integer", ".. deprecated:: 0.25.0 Use ``step`` instead. \"\"\" # GH 25710", "25710 warnings.warn( self._deprecation_message.format(\"_step\", \"step\"), FutureWarning, stacklevel=2, ) return self.step @cache_readonly", "return self.start + abs(self.step) * no_steps def _extended_gcd(self, a: int,", "and self.step % other == 0: start = self.start //", "return result # -------------------------------------------------------------------- @cache_readonly def _constructor(self) -> type[Int64Index]: \"\"\"", "-> bool: hash(key) try: key = ensure_python_int(key) except TypeError: return", "pandas.core.indexes.base as ibase from pandas.core.indexes.base import maybe_extract_name from pandas.core.indexes.numeric import", "= False _range: range # -------------------------------------------------------------------- # Constructors def __new__(", "op(left.start, right) rstop = op(left.stop, right) result = type(self)(rstart, rstop,", "rstop = op(left.stop, right) result = type(self)(rstart, rstop, rstep, **attrs)", "instead.\", FutureWarning, stacklevel=2, ) new_index = new_index.astype(dtype) return new_index def", "< 0: codes = codes[::-1] uniques = uniques[::-1] return codes,", "Returns ------- union : Index \"\"\" if isinstance(other, RangeIndex) and", "== first.start: # The difference is everything after the intersection", "must be called with object coercible to a \" f\"range,", "**kwargs) -> int: \"\"\"The minimum value of the RangeIndex\"\"\" nv.validate_minmax_axis(axis)", "return type(self)(start_r, end_r + step_o, step_o) return self._int64index._union(other, sort=sort) def", "Must be an np.ndarray; GH#22390 return op(self._int64index, other) if op", "): return type(self)(start_r, end_r + step_s, step_s) elif step_s %", "for scalar and slice keys. \"\"\" if isinstance(key, slice): new_range", "gcd, s, _ = self._extended_gcd(first.step, second.step) # check whether element", "old_r // r old_r, r = r, old_r - quotient", "_intersection(self, other: Index, sort=False): if not isinstance(other, RangeIndex): # Int64Index", "is_integer(key): new_key = int(key) try: return self._range[new_key] except IndexError as", "elif len(indexes) == 1: return indexes[0] rng_indexes = cast(List[RangeIndex], indexes)", "super()._cmp_method(other, op) def _arith_method(self, other, op): \"\"\" Parameters ---------- other", "is not self._range: new_index = new_index[::-1] return new_index def symmetric_difference(self,", "return header first_val_str = str(self._range[0]) last_val_str = str(self._range[-1]) max_length =", "-> bool: return self._range.step < 0 or len(self) <= 1", "6)] -> RangeIndex(6) indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5])", "range): raise TypeError( f\"{cls.__name__}(...) must be called with object coercible", "int: \"\"\" The value of the `stop` parameter. .. deprecated::", "libindex.Int64Engine _dtype_validation_metadata = (is_signed_integer_dtype, \"signed integer\") _can_hold_na = False _range:", "try: # apply if we have an override if step:", "- 1 - locs[valid] return ensure_platform_int(locs) # -------------------------------------------------------------------- def repeat(self,", "not rstep: raise ValueError else: rstep = left.step with np.errstate(all=\"ignore\"):", "a slice. \"\"\" res = self._range[slobj] return type(self)._simple_new(res, name=self._name) @unpack_zerodim_and_defer(\"__floordiv__\")", "formatted_value) \"\"\" attrs = self._get_data_as_items() if self.name is not None:", "\"\"\" if isinstance(key, slice): new_range = self._range[key] return self._simple_new(new_range, name=self._name)", "not isinstance(data, range): raise TypeError( f\"{cls.__name__}(...) must be called with", "= range(start, stop, step or 1) return self._simple_new(new_range, name=self.name) if", "int_high, new_step) new_index = self._simple_new(new_range) # adjust index to limiting", "@cache_readonly def is_monotonic_decreasing(self) -> bool: return self._range.step < 0 or", "ensure_python_int(key) except TypeError: return False return key in self._range @property", "= None if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]: step", "by the first non-empty index start = rng.start if step", "if two Index objects contain the same elements. \"\"\" if", "# so we need to catch these explicitly return op(self._int64index,", "attrs.append((\"name\", ibase.default_pprint(self.name))) return attrs def _format_data(self, name=None): # we are", "None ) -> RangeIndex: \"\"\" Create RangeIndex from a range", "def _arith_method(self, other, op): \"\"\" Parameters ---------- other : Any", "self._range @property def inferred_type(self) -> str: return \"integer\" # --------------------------------------------------------------------", "first.step: # In some cases we might be able to", "np from pandas._libs import index as libindex from pandas._libs.lib import", "-> RangeIndex: result = type(self)._simple_new(self._range, name=self._name) result._cache = self._cache return", "result._cache = self._cache return result @doc(Int64Index.copy) def copy( self, name:", "sort=None): if not isinstance(other, RangeIndex) or sort is not None:", "other, sort=None): # optimized set operation if we have another", "contain the same elements. \"\"\" if isinstance(other, RangeIndex): return self._range", "__len__(self) -> int: \"\"\" return the length of the RangeIndex", "== other._range return super().equals(other) # -------------------------------------------------------------------- # Set Operations def", "self.step // other stop = start + len(self) * step", "sort: bool = False, na_sentinel: int | None = -1", "na_rep: str = \"NaN\") -> list[str]: if not len(self._range): return", "% gcd: return self._simple_new(_empty_range) # calculate parameters for the RangeIndex", "the indices that would sort the index and its underlying", "# even if we can represent as a RangeIndex, return", "The value of the `stop` parameter. .. deprecated:: 0.25.0 Use", "min(first.stop, second.stop) if int_high <= int_low: return self._simple_new(_empty_range) # Method", "RangeIndex\"\"\" nv.validate_minmax_axis(axis) nv.validate_max(args, kwargs) return self._minmax(\"max\") def argsort(self, *args, **kwargs)", "ibase._new_Index, (type(self), d), None # -------------------------------------------------------------------- # Rendering Methods def", "largest element smaller than or equal to the limit\"\"\" no_steps", "= extract_array(other, extract_numpy=True, extract_range=True) attrs = self._get_attributes_dict() left, right =", "type(self)(start_r, end_r + step_o, step_o) return self._int64index._union(other, sort=sort) def _difference(self,", "GH 25710 warnings.warn( self._deprecation_message.format(\"_stop\", \"stop\"), FutureWarning, stacklevel=2, ) return self.stop", "we have a slice. \"\"\" res = self._range[slobj] return type(self)._simple_new(res,", "return Int64Index._simple_new(values, name=name) def _view(self: RangeIndex) -> RangeIndex: result =", "may have more efficient options other = extract_array(other, extract_numpy=True, extract_range=True)", "Int64Index : Index of int64 data. \"\"\" _typ = \"rangeindex\"", "supplied). \"\"\" # GH 25710 return self._range.step @property def _step(self)", "name=name) # validate the arguments if com.all_none(start, stop, step): raise", "may in some instances improve computing speed. This is the", "return cls._simple_new(start, name=name) # validate the arguments if com.all_none(start, stop,", "# The difference is everything after the intersection new_rng =", "implementation return op(self._int64index, other) # TODO: Do attrs get handled", "name=self.name) return self._int64index // other # -------------------------------------------------------------------- # Reductions def", "= self.start + self.step * (len(self) - 1) start_o, step_o", "self.start // other step = self.step // other stop =", "0, start else: stop = ensure_python_int(stop) step = ensure_python_int(step) if", "range(start, start + 1, 1) return self._simple_new(new_range, name=self.name) return self._int64index", "a range object. Returns ------- RangeIndex \"\"\" if not isinstance(data,", "= step_o = abs(self.start - other.start) elif len(self) == 1:", "worth the effort. return super()._difference(other, sort=sort) if overlap[0] == first.start:", "return super()._difference(other, sort=sort) new_index = type(self)._simple_new(new_rng, name=res_name) if first is", "after the intersection new_rng = range(overlap[-1] + first.step, first.stop, first.step)", "== 1: start = self.start // other new_range = range(start,", "isinstance(other, RangeIndex) or sort is not None: return super().symmetric_difference(other, result_name,", "_engine_type = libindex.Int64Engine _dtype_validation_metadata = (is_signed_integer_dtype, \"signed integer\") _can_hold_na =", "left, right = self, other try: # apply if we", "# This is set by the first non-empty index start", "``RangeIndex`` if possible or a sorted ``Int64Index`` if not. ``sort=False``", "if deep=False See Also -------- numpy.ndarray.nbytes \"\"\" return self.nbytes @property", "res def _get_data_as_items(self): \"\"\" return a list of tuples of", "is_timedelta64_dtype(other): # Must be an np.ndarray; GH#22390 return op(self._int64index, other)", "Int64Index([0,1,2,4,5]) \"\"\" if not all(isinstance(x, RangeIndex) for x in indexes):", "# -------------------------------------------------------------------- def repeat(self, repeats, axis=None) -> Int64Index: return self._int64index.repeat(repeats,", "argsort(self, *args, **kwargs) -> np.ndarray: \"\"\" Returns the indices that", "for x in rng_indexes])) return result.rename(name) if step is not", "have a slice. \"\"\" res = self._range[slobj] return type(self)._simple_new(res, name=self._name)", "no_default else name if values.dtype.kind == \"f\": return Float64Index(values, name=name)", "gcd(x, y) Finds one particular solution for x, y: s,", "def _simple_new(cls, values: range, name: Hashable = None) -> RangeIndex:", "not range-like return super()._difference(other, sort=sort) new_index = type(self)._simple_new(new_rng, name=res_name) if", "the # intersection disregarding the lower bounds tmp_start = first.start", "= None) -> RangeIndex: result = object.__new__(cls) assert isinstance(values, range)", "\"\"\" Overriding parent method for the case of all RangeIndex", "return super()._cmp_method(other, op) def _arith_method(self, other, op): \"\"\" Parameters ----------", "`step` parameter (``1`` if this was not supplied). .. deprecated::", "allow_fill: bool = True, fill_value=None, **kwargs ) -> Int64Index: with", "@property def is_unique(self) -> bool: \"\"\" return if the index", "bounds tmp_start = first.start + (second.start - first.start) * first.step", "to the limit\"\"\" no_steps = (upper_limit - self.start) // abs(self.step)", "from pandas.core.dtypes.common import ( ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype,", "int, b: int) -> tuple[int, int, int]: \"\"\" Extended Euclidean", "def step(self) -> int: \"\"\" The value of the `step`", "== 1 and len(other) == 1: step_s = step_o =", "len(self) == 1: step_s = step_o elif len(other) == 1:", "can be sure its name matches self.name res = self._cached_int64index", "np.dtype(np.int64) @property def is_unique(self) -> bool: \"\"\" return if the", "self._range[slobj] return type(self)._simple_new(res, name=self._name) @unpack_zerodim_and_defer(\"__floordiv__\") def __floordiv__(self, other): if is_integer(other)", "return len(self) def __getitem__(self, key): \"\"\" Conserve RangeIndex type for", "for simplicity reverse = self._range[::-1] start, stop, step = reverse.start,", "pandas.compat.numpy import function as nv from pandas.util._decorators import ( cache_readonly,", "slice. \"\"\" res = self._range[slobj] return type(self)._simple_new(res, name=self._name) @unpack_zerodim_and_defer(\"__floordiv__\") def", "``_cache``. \"\"\" return np.arange(self.start, self.stop, self.step, dtype=np.int64) @cache_readonly def _cached_int64index(self)", "_arith_method(self, other, op): \"\"\" Parameters ---------- other : Any op", "class to use for construction \"\"\" return Int64Index @cache_readonly def", "# -------------------------------------------------------------------- @cache_readonly def _constructor(self) -> type[Int64Index]: \"\"\" return the", "hash(key) try: key = ensure_python_int(key) except TypeError: return False return", "step is None and len(rng) > 1: step = rng.step", "not len(self._range): return header first_val_str = str(self._range[0]) last_val_str = str(self._range[-1])", "Index objects and sorts if possible Parameters ---------- other :", "if the index has unique values \"\"\" return True @cache_readonly", "__contains__(self, key: Any) -> bool: hash(key) try: key = ensure_python_int(key)", "supplied). .. deprecated:: 0.25.0 Use ``start`` instead. \"\"\" warnings.warn( self._deprecation_message.format(\"_start\",", "intervals intersect # deals with in- and decreasing ranges int_low", "other.step < 0) is not (new_index.step < 0): new_index =", "Index implementing a monotonic integer range. RangeIndex is a memory-saving", "= self if sort and self.step < 0: codes =", "0 r, old_r = b, a while r: quotient =", "0: if len(self) == 0 or self.start % other ==", "= int(key) try: return self._range[new_key] except IndexError as err: raise", "start is not None else 0 if stop is None:", "Float64Index if we have float-like descriptors if not all(is_integer(x) for", "difference is everything before the intersection new_rng = range(first.start, overlap[0],", "- quotient * r old_s, s = s, old_s -", "sort=sort) new_index = type(self)._simple_new(new_rng, name=res_name) if first is not self._range:", "1: return indexes[0] rng_indexes = cast(List[RangeIndex], indexes) start = step", "self.stop @property def step(self) -> int: \"\"\" The value of", "to Int64Index return super().__getitem__(key) def _getitem_slice(self: RangeIndex, slobj: slice) ->", "but not worth the effort. return super()._difference(other, sort=sort) if overlap[0]", "method for the case of all RangeIndex instances. When all", "def __contains__(self, key: Any) -> bool: hash(key) try: key =", "list of tuples of the (attr, formatted_value) \"\"\" attrs =", "int | None = -1 ) -> tuple[np.ndarray, RangeIndex]: codes", "res._name = self._name return res def _get_data_as_items(self): \"\"\" return a", "return op(self._int64index, other) if op in [ operator.pow, ops.rpow, operator.mod,", "= range(tmp_start, int_high, new_step) new_index = self._simple_new(new_range) # adjust index", "-> np.ndarray: # -> np.ndarray[np.intp] if com.any_not_none(method, tolerance, limit): return", "TYPE_CHECKING, Any, Callable, Hashable, List, cast, ) import warnings import", "]: return op(self._int64index, other) step: Callable | None = None", "= old_r // r old_r, r = r, old_r -", "# intersection disregarding the lower bounds tmp_start = first.start +", "*args, **kwargs) -> bool: return 0 not in self._range def", "# Defer to TimedeltaIndex implementation return NotImplemented elif isinstance(other, (timedelta,", "= None, ) -> RangeIndex: cls._validate_dtype(dtype) name = maybe_extract_name(name, start,", "\"\"\" Immutable Index implementing a monotonic integer range. RangeIndex is", "not supplied). .. deprecated:: 0.25.0 Use ``start`` instead. \"\"\" warnings.warn(", "else self._range second = other._range[::-1] if other.step < 0 else", "in some instances improve computing speed. This is the default", "@property def start(self) -> int: \"\"\" The value of the", "start, stop, step = reverse.start, reverse.stop, reverse.step if not is_signed_integer_dtype(target):", "def _difference(self, other, sort=None): # optimized set operation if we", "def any(self, *args, **kwargs) -> bool: return any(self._range) # --------------------------------------------------------------------", ": int (default: 0), range, or other RangeIndex instance If", "------- RangeIndex \"\"\" if not isinstance(data, range): raise TypeError( f\"{cls.__name__}(...)", "from pandas._libs import index as libindex from pandas._libs.lib import no_default", "element smaller than or equal to the limit\"\"\" no_steps =", "def _constructor(self) -> type[Int64Index]: \"\"\" return the class to use", "-------------------------------------------------------------------- _deprecation_message = ( \"RangeIndex.{} is deprecated and will be", "and Series when no explicit index is provided by the", "int: \"\"\" return the length of the RangeIndex \"\"\" return", "-> bool: \"\"\" Determines if two Index objects contain the", "-> int: \"\"\" Memory usage of my values Parameters ----------", "* no_steps def min(self, axis=None, skipna: bool = True, *args,", "= step_o elif len(other) == 1: step_o = step_s start_r", "self._range[::-1] start, stop, step = reverse.start, reverse.stop, reverse.step if not", "union of two Index objects and sorts if possible Parameters", "step_s and (start_o - end_s) <= step_s ): return type(self)(start_r,", "self._deprecation_message.format(\"_start\", \"start\"), FutureWarning, stacklevel=2, ) return self.start @property def stop(self)", "TypeError: return False return key in self._range @property def inferred_type(self)", "if ( (start_o - start_s) % step_s == 0 and", "Int64Index: # type: ignore[override] return self._int64index.delete(loc) def take( self, indices,", "empty indexes non_empty_indexes = [obj for obj in rng_indexes if", "not worth the effort. return super()._difference(other, sort=sort) if overlap[0] ==", "import maybe_extract_name from pandas.core.indexes.numeric import ( Float64Index, Int64Index, NumericIndex, )", "matches self.name res = self._cached_int64index res._name = self._name return res", "return None def _format_with_header(self, header: list[str], na_rep: str = \"NaN\")", "attr_name)) for attr_name in [\"start\", \"stop\", \"step\"] ) def memory_usage(self,", "from pandas.core.ops.common import unpack_zerodim_and_defer if TYPE_CHECKING: from pandas import Index", "bool: \"\"\" return if the index has unique values \"\"\"", "def _concat(self, indexes: list[Index], name: Hashable) -> Index: \"\"\" Overriding", "Hashable) -> Index: \"\"\" Overriding parent method for the case", "we are formatting thru the attributes return None def _format_with_header(self,", "for __getitem__ when we know we have a slice. \"\"\"", "empty. # In this case return an empty range index.", "Returns ------- RangeIndex \"\"\" if not isinstance(data, range): raise TypeError(", "The value of the `step` parameter (``1`` if this was", "if isinstance(start, RangeIndex): return start.copy(name=name) elif isinstance(start, range): return cls._simple_new(start,", "able to get a RangeIndex back, # but not worth", "@cache_readonly def _constructor(self) -> type[Int64Index]: \"\"\" return the class to", "in rng_indexes])) return result.rename(name) if step is not None: next_", "= False, dtype: Dtype | None = None, names=None, ):", "= self._range return [(\"start\", rng.start), (\"stop\", rng.stop), (\"step\", rng.step)] def", "# -> np.ndarray[np.intp] if com.any_not_none(method, tolerance, limit): return super()._get_indexer( target,", "2, step_s / 2) elif step_o % step_s == 0:", "an empty range index. return RangeIndex(0, 0).rename(name) def __len__(self) ->", "pandas.core.indexes.base import maybe_extract_name from pandas.core.indexes.numeric import ( Float64Index, Int64Index, NumericIndex,", "else next_ return RangeIndex(start, stop, step).rename(name) # Here all \"indexes\"", "is everything after the intersection new_rng = range(overlap[-1] + first.step,", "def size(self) -> int: return len(self) def __getitem__(self, key): \"\"\"", "return super().__getitem__(key) def _getitem_slice(self: RangeIndex, slobj: slice) -> RangeIndex: \"\"\"", "include memory consumed by elements that are not components of", "(`None`) \" \"and integer or boolean \" \"arrays are valid", "# GH#19333 is_integer evaluated True on timedelta64, # so we", ": Index of int64 data. \"\"\" _typ = \"rangeindex\" _engine_type", "List, cast, ) import warnings import numpy as np from", "the array if deep=False See Also -------- numpy.ndarray.nbytes \"\"\" return", "is_integer(other) and other != 0: if len(self) == 0 or", "raise TypeError( f\"{cls.__name__}(...) must be called with object coercible to", "start stop step Methods ------- from_range See Also -------- Index", "op # TODO: if other is a RangeIndex we may", "Unused, accepted for homogeneity with other index types. name :", "length of the RangeIndex \"\"\" return len(self._range) @property def size(self)", "# we don't have a representable op # so return", "override if step: with np.errstate(all=\"ignore\"): rstep = step(left.step, right) #", "Int64Index return super()._intersection(other, sort=sort) if not len(self) or not len(other):", "# check whether element sets intersect if (first.start - second.start)", "== first[-1]: # The difference is everything before the intersection", "= range(start, start + 1, 1) return self._simple_new(new_range, name=self.name) return", "step = rng.start - start non_consecutive = (step != rng.step", "\"signed integer\") _can_hold_na = False _range: range # -------------------------------------------------------------------- #", "err: raise KeyError(key) from err raise KeyError(key) return super().get_loc(key, method=method,", "objects contain the same elements. \"\"\" if isinstance(other, RangeIndex): return", "= max(end_s, end_o) if step_o == step_s: if ( (start_s", "[\"start\", \"stop\", \"step\"] ) def memory_usage(self, deep: bool = False)", "no_default): name = self.name if name is no_default else name", "catch these explicitly return op(self._int64index, other) elif is_timedelta64_dtype(other): # Must", "return RangeIndex(start, stop, step).rename(name) # Here all \"indexes\" had 0", "range): return cls._simple_new(start, name=name) # validate the arguments if com.all_none(start,", "- locs[valid] return ensure_platform_int(locs) # -------------------------------------------------------------------- def repeat(self, repeats, axis=None)", "return new_index def _min_fitting_element(self, lower_limit: int) -> int: \"\"\"Returns the", "self.step % other == 0: start = self.start // other", "return super()._cmp_method(self, op) return super()._cmp_method(other, op) def _arith_method(self, other, op):", "= -(-(lower_limit - self.start) // abs(self.step)) return self.start + abs(self.step)", "other RangeIndex instance If int and \"stop\" is not given,", "return self._range.step @property def _step(self) -> int: \"\"\" The value", "getsizeof(getattr(rng, attr_name)) for attr_name in [\"start\", \"stop\", \"step\"] ) def", "step_s ): return type(self)(start_r, end_r + step_s, step_s) if (", "other, op): \"\"\" Parameters ---------- other : Any op :", "we have another RangeIndex self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other)", "rstop, rstep, **attrs) # for compat with numpy / Int64Index", "the limit\"\"\" no_steps = -(-(lower_limit - self.start) // abs(self.step)) return", ") cls._validate_dtype(dtype) return cls._simple_new(data, name=name) @classmethod def _simple_new(cls, values: range,", "== 1: step_s = step_o elif len(other) == 1: step_o", "start_o, step_o = other.start, other.step end_o = other.start + other.step", "== 0 or self.start % other == 0 and self.step", "This is set by the first non-empty index start =", "\"\"\" Return the number of bytes in the underlying data.", "- 1) start_o, step_o = other.start, other.step end_o = other.start", "-> RangeIndex: cls._validate_dtype(dtype) name = maybe_extract_name(name, start, cls) # RangeIndex", "Use RangeIndex.{} \" \"instead\" ) @property def start(self) -> int:", "self._range def any(self, *args, **kwargs) -> bool: return any(self._range) #", "> 0) or (meth == \"max\" and self.step < 0):", "= self.start, self.stop, self.step else: # GH 28678: work on", "next_ ) if non_consecutive: result = Int64Index(np.concatenate([x._values for x in", "return the class to use for construction \"\"\" return Int64Index", "with in- and decreasing ranges int_low = max(first.start, second.start) int_high", "return self._int64index.delete(loc) def take( self, indices, axis: int = 0,", "# -------------------------------------------------------------------- # Constructors def __new__( cls, start=None, stop=None, step=None,", "indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5]) \"\"\" if not", "cls._validate_dtype(dtype) return cls._simple_new(data, name=name) @classmethod def _simple_new(cls, values: range, name:", "# Get the stop value from \"next\" or alternatively #", "is no_default else name if values.dtype.kind == \"f\": return Float64Index(values,", "fill_value=None, **kwargs ) -> Int64Index: with rewrite_exception(\"Int64Index\", type(self).__name__): return self._int64index.take(", "not all(is_integer(x) for x in [rstart, rstop, rstep]): result =", "bytes used Notes ----- Memory usage does not include memory", "= min(first.stop, second.stop) if int_high <= int_low: return self._simple_new(_empty_range) #", "def _get_data_as_items(self): \"\"\" return a list of tuples of start,", "if not isinstance(other, RangeIndex): # Int64Index return super()._intersection(other, sort=sort) if", "is set by the first non-empty index start = rng.start", "# performance hint: for identical step sizes, could use #", "key = ensure_python_int(key) except TypeError: return False return key in", "step is not None else 1 if step == 0:", "< 0 or len(self) <= 1 def __contains__(self, key: Any)", "``step`` instead. \"\"\" # GH 25710 warnings.warn( self._deprecation_message.format(\"_step\", \"step\"), FutureWarning,", "or equal to the limit\"\"\" no_steps = -(-(lower_limit - self.start)", "@cache_readonly def is_monotonic_increasing(self) -> bool: return self._range.step > 0 or", "start: values = np.concatenate([x._values for x in rng_indexes]) result =", "of the `step` parameter (``1`` if this was not supplied).", "supplied). .. deprecated:: 0.25.0 Use ``step`` instead. \"\"\" # GH", "int: \"\"\" Return the number of bytes in the underlying", "if op in [ operator.pow, ops.rpow, operator.mod, ops.rmod, ops.rfloordiv, divmod,", "``sort=None`` returns a monotonically increasing ``RangeIndex`` if possible or a", "* first.step // gcd * s new_step = first.step *", "import Index _empty_range = range(0) class RangeIndex(NumericIndex): \"\"\" Immutable Index", "if step != self.step: # We reversed this range: transform", "if self.step < 0 else self._range second = other._range[::-1] if", "start_o) % step_o == 0 and (start_s + step_o >=", "stop) locs[~valid] = -1 locs[valid] = locs[valid] / step if", "-> int: \"\"\"Returns the largest element smaller than or equal", "0) & (target_array < stop) locs[~valid] = -1 locs[valid] =", "# check whether intervals intersect # deals with in- and", ">= start_o) and (end_s - step_o <= end_o) ): return", "import extract_array import pandas.core.indexes.base as ibase from pandas.core.indexes.base import maybe_extract_name", "another RangeIndex self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not", "is None else next_ return RangeIndex(start, stop, step).rename(name) # Here", "None: # First non-empty index had only one element if", "new_index = self._simple_new(new_range) # adjust index to limiting interval new_start", "Callable, Hashable, List, cast, ) import warnings import numpy as", "of Int64Index limited to representing monotonic ranges. Using RangeIndex may", "len(self._range) @property def size(self) -> int: return len(self) def __getitem__(self,", "if len(self) == 0 or self.start % other == 0", "rstep]): result = result.astype(\"float64\") return result except (ValueError, TypeError, ZeroDivisionError):", "tuples of the (attr, formatted_value) \"\"\" attrs = self._get_data_as_items() if", "__new__( cls, start=None, stop=None, step=None, dtype: Dtype | None =", "new_index = self._rename(name=name) if dtype: warnings.warn( \"parameter dtype is deprecated", "slobj: slice) -> RangeIndex: \"\"\" Fastpath for __getitem__ when we", "ops.rmul, operator.truediv, ops.rtruediv]: step = op # TODO: if other", "== 0) & (locs >= 0) & (target_array < stop)", "dtypes for system-level memory consumption Returns ------- bytes used Notes", "not isinstance(other, RangeIndex): return super()._difference(other, sort=sort) res_name = ops.get_op_result_name(self, other)", "< 0 else other._range # check whether intervals intersect #", "self._simple_new(new_range, name=self.name) if len(self) == 1: start = self.start //", "cases we might be able to get a RangeIndex back,", "new_range = range(start, stop, step or 1) return self._simple_new(new_range, name=self.name)", "raise KeyError(key) from err raise KeyError(key) return super().get_loc(key, method=method, tolerance=tolerance)", "- other.start) elif len(self) == 1: step_s = step_o elif", "new_index.astype(dtype) return new_index def _minmax(self, meth: str): no_steps = len(self)", "len(self) == 1: start = self.start // other new_range =", "else self._range overlap = self.intersection(other) if overlap.step < 0: overlap", "general method return super()._get_indexer(target, method=method, tolerance=tolerance) target_array = np.asarray(target) locs", "step_s / 2) and (abs(end_s - end_o) <= step_s /", "union : Index \"\"\" if isinstance(other, RangeIndex) and sort is", "was not supplied). .. deprecated:: 0.25.0 Use ``start`` instead. \"\"\"", "is_monotonic_increasing(self) -> bool: return self._range.step > 0 or len(self) <=", "elif is_scalar(key): raise IndexError( \"only integers, slices (`:`), \" \"ellipsis", "accepted for homogeneity with other index types. copy : bool,", "for construction \"\"\" return Int64Index @cache_readonly def _data(self) -> np.ndarray:", "as libindex from pandas._libs.lib import no_default from pandas._typing import Dtype", "need to catch these explicitly return op(self._int64index, other) elif is_timedelta64_dtype(other):", "Series when no explicit index is provided by the user.", "(start_o - start_s) % step_s == 0 and (start_o +", "-> Index: \"\"\" Overriding parent method for the case of", "% step_o == 0: if ( (start_s - start_o) %", "and len(rng) > 1) or ( next_ is not None", "-> Int64Index: return self._int64index.repeat(repeats, axis=axis) def delete(self, loc) -> Int64Index:", "def inferred_type(self) -> str: return \"integer\" # -------------------------------------------------------------------- # Indexing", "int (default: 0), range, or other RangeIndex instance If int", "Dtype | None = None, copy: bool = False, name:", "RangeIndex(3, 6)] -> RangeIndex(6) indexes = [RangeIndex(3), RangeIndex(4, 6)] ->", "if method is None and tolerance is None: if is_integer(key)", "in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]: step = op # TODO:", "deeply, interrogate `object` dtypes for system-level memory consumption Returns -------", "-> Int64Index([0,1,2,4,5]) \"\"\" if not all(isinstance(x, RangeIndex) for x in", "overlap.step != first.step: # In some cases we might be", "new_range = range(new_start, new_index.stop, new_index.step) new_index = self._simple_new(new_range) if (self.step", "as nv from pandas.util._decorators import ( cache_readonly, doc, ) from", "return np.nan elif (meth == \"min\" and self.step > 0)", "if rng.start == start: values = np.concatenate([x._values for x in", "Any op : callable that accepts 2 params perform the", "pandas.core.common as com from pandas.core.construction import extract_array import pandas.core.indexes.base as", "(is_float(key) and key.is_integer()): new_key = int(key) try: return self._range.index(new_key) except", "\" \"and integer or boolean \" \"arrays are valid indices\"", "possible, Int64Index otherwise. E.g.: indexes = [RangeIndex(3), RangeIndex(3, 6)] ->", "underlying data. \"\"\" rng = self._range return getsizeof(rng) + sum(", "target_array = np.asarray(target) locs = target_array - start valid =", "return True @cache_readonly def is_monotonic_increasing(self) -> bool: return self._range.step >", "step_s == 0: if ( (start_o - start_s) % step_s", "self.step > 0) or (meth == \"max\" and self.step <", "-------------------------------------------------------------------- # Indexing Methods @doc(Int64Index.get_loc) def get_loc(self, key, method=None, tolerance=None):", "intersection new_rng = range(first.start, overlap[0], first.step) else: # The difference", "def _cmp_method(self, other, op): if isinstance(other, RangeIndex) and self._range ==", "_range: range # -------------------------------------------------------------------- # Constructors def __new__( cls, start=None,", "def __len__(self) -> int: \"\"\" return the length of the", "intersect # deals with in- and decreasing ranges int_low =", "return an empty range index. return RangeIndex(0, 0).rename(name) def __len__(self)", "_max_fitting_element(self, upper_limit: int) -> int: \"\"\"Returns the largest element smaller", "step_s, step_s) if ( (step_s % 2 == 0) and", "result = Int64Index(np.concatenate([x._values for x in rng_indexes])) return result.rename(name) if", ") -> RangeIndex: cls._validate_dtype(dtype) name = maybe_extract_name(name, start, cls) #", "y: s, t Returns: gcd, s, t \"\"\" s, old_s", "op(self._int64index, other) step: Callable | None = None if op", "back return super()._difference(other, sort=sort) if overlap.step != first.step: # In", "0: codes = codes[::-1] uniques = uniques[::-1] return codes, uniques", "(step != rng.step and len(rng) > 1) or ( next_", "= len(self) - 1 if no_steps == -1: return np.nan", "0 and (start_s + step_o >= start_o) and (end_s -", "sort=sort) if not len(self) or not len(other): return self._simple_new(_empty_range) first", "True, fill_value=None, **kwargs ) -> Int64Index: with rewrite_exception(\"Int64Index\", type(self).__name__): return", "sizes, could use # cheaper alternative gcd, s, _ =", "Extended Euclidean algorithms to solve Bezout's identity: a*x + b*y", "1) return self._simple_new(new_range, name=self.name) if len(self) == 1: start =", "a memory-saving special case of Int64Index limited to representing monotonic", "tolerance=None, ) -> np.ndarray: # -> np.ndarray[np.intp] if com.any_not_none(method, tolerance,", "import ABCTimedeltaIndex from pandas.core import ops import pandas.core.common as com", "not isinstance(other, RangeIndex) or sort is not None: return super().symmetric_difference(other,", "None = None ) -> RangeIndex: \"\"\" Create RangeIndex from", "2) and (abs(end_s - end_o) <= step_s / 2) ):", "to use for construction \"\"\" return Int64Index @cache_readonly def _data(self)", "homogeneity with other index types. name : object, optional Name", "Index objects contain the same elements. \"\"\" if isinstance(other, RangeIndex):", "RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5]) \"\"\" if not all(isinstance(x, RangeIndex) for", "be sure its name matches self.name res = self._cached_int64index res._name", "be called with object coercible to a \" f\"range, {repr(data)}", "> 1: step = rng.step elif step is None: #", "None Whether to sort resulting index. ``sort=None`` returns a monotonically", "from err elif is_scalar(key): raise IndexError( \"only integers, slices (`:`),", "**kwargs) -> int: \"\"\"The maximum value of the RangeIndex\"\"\" nv.validate_minmax_axis(axis)", "1, 0 r, old_r = b, a while r: quotient", "divmod, ops.rdivmod, ]: return op(self._int64index, other) step: Callable | None", "values result._name = name result._cache = {} result._reset_identity() return result", "\"\"\" rng = self._range return getsizeof(rng) + sum( getsizeof(getattr(rng, attr_name))", "other.step * (len(other) - 1) if self.step < 0: start_s,", "sure its name matches self.name res = self._cached_int64index res._name =", "self.start) // abs(self.step)) return self.start + abs(self.step) * no_steps def", "Whether to sort resulting index. ``sort=None`` returns a monotonically increasing", "old_t def _union(self, other: Index, sort): \"\"\" Form the union", "if self.step < 0 else self._range overlap = self.intersection(other) if", "rng.start), (\"stop\", rng.stop), (\"step\", rng.step)] def __reduce__(self): d = self._get_attributes_dict()", "<= 1 @cache_readonly def is_monotonic_decreasing(self) -> bool: return self._range.step <", "sets intersect if (first.start - second.start) % gcd: return self._simple_new(_empty_range)", "is not None: return super().symmetric_difference(other, result_name, sort) left = self.difference(other)", "type RangeIndex: result will be RangeIndex if possible, Int64Index otherwise.", "one element if rng.start == start: values = np.concatenate([x._values for", "have more efficient options other = extract_array(other, extract_numpy=True, extract_range=True) attrs", "= -1 locs[valid] = locs[valid] / step if step !=", "[obj for obj in rng_indexes if len(obj)] for obj in", "equal to the limit\"\"\" no_steps = (upper_limit - self.start) //", "= other._range[::-1] if other.step < 0 else other._range # check", "0 or self.start % other == 0 and self.step %", "+ abs(self.step) * no_steps def _max_fitting_element(self, upper_limit: int) -> int:", "result @doc(Int64Index.copy) def copy( self, name: Hashable = None, deep:", "return if the index has unique values \"\"\" return True", "deep: bool = False) -> int: \"\"\" Memory usage of", "stop, step): raise TypeError(\"RangeIndex(...) must be called with integers\") start", "start_o) end_r = max(end_s, end_o) if step_o == step_s: if", "\"min\" and self.step > 0) or (meth == \"max\" and", "indexes) start = step = next_ = None # Filter", "delete(self, loc) -> Int64Index: # type: ignore[override] return self._int64index.delete(loc) def", "or 1) return self._simple_new(new_range, name=self.name) if len(self) == 1: start", "getsizeof(rng) + sum( getsizeof(getattr(rng, attr_name)) for attr_name in [\"start\", \"stop\",", "NumericIndex, ) from pandas.core.ops.common import unpack_zerodim_and_defer if TYPE_CHECKING: from pandas", "two Index objects contain the same elements. \"\"\" if isinstance(other,", "= self._range return getsizeof(rng) + sum( getsizeof(getattr(rng, attr_name)) for attr_name", "-------------------------------------------------------------------- # Constructors def __new__( cls, start=None, stop=None, step=None, dtype:", "or sort is not None: return super().symmetric_difference(other, result_name, sort) left", "RangeIndex type for scalar and slice keys. \"\"\" if isinstance(key,", ") if self.step > 0: start, stop, step = self.start,", "result = result.rename(result_name) return result # -------------------------------------------------------------------- def _concat(self, indexes:", "< 0): return self.start return self.start + self.step * no_steps", "some cases we might be able to get a RangeIndex", "method=method, tolerance=tolerance) target_array = np.asarray(target) locs = target_array - start", "self.step: # We reversed this range: transform to original locs", "\"\"\" if isinstance(other, RangeIndex): return self._range == other._range return super().equals(other)", "= type(self)(rstart, rstop, rstep, **attrs) # for compat with numpy", "0: result = np.arange(len(self), dtype=np.intp) else: result = np.arange(len(self) -", "type(self)(start_r, end_r + step_s, step_s) elif step_s % step_o ==", "we have an override if step: with np.errstate(all=\"ignore\"): rstep =", "adjust index to limiting interval new_start = new_index._min_fitting_element(int_low) new_range =", "else name if values.dtype.kind == \"f\": return Float64Index(values, name=name) return", "step: with np.errstate(all=\"ignore\"): rstep = step(left.step, right) # we don't", "len(overlap) == len(self): return self[:0].rename(res_name) if not isinstance(overlap, RangeIndex): #", "immutable so if ._range attr. are equal, shortcut is possible", "if first is not self._range: new_index = new_index[::-1] return new_index", "int: return len(self) def __getitem__(self, key): \"\"\" Conserve RangeIndex type", "len(self) <= 1 def __contains__(self, key: Any) -> bool: hash(key)", "s, t Returns: gcd, s, t \"\"\" s, old_s =", "start_o, step_o, end_o = end_o, -step_o, start_o if len(self) ==", "= None, sort=None): if not isinstance(other, RangeIndex) or sort is", "usage of my values Parameters ---------- deep : bool Introspect", "bool: \"\"\" Determines if two Index objects contain the same", "== 0: if ( (start_o - start_s) % step_s ==", "RangeIndex.{} \" \"instead\" ) @property def start(self) -> int: \"\"\"", "stacklevel=2, ) return self.start @property def stop(self) -> int: \"\"\"", "-> Int64Index: # wrap _cached_int64index so we can be sure", "in ``_cache``. \"\"\" return np.arange(self.start, self.stop, self.step, dtype=np.int64) @cache_readonly def", "no_default from pandas._typing import Dtype from pandas.compat.numpy import function as", "interval new_start = new_index._min_fitting_element(int_low) new_range = range(new_start, new_index.stop, new_index.step) new_index", "return self.start + self.step * no_steps def min(self, axis=None, skipna:", "copy( self, name: Hashable = None, deep: bool = False,", "the number of bytes in the underlying data. \"\"\" rng", "step_s == 0 and (start_s - end_o) <= step_s and", "return Int64Index._simple_new(self._data, name=self.name) @property def _int64index(self) -> Int64Index: # wrap", "gcd, s, t \"\"\" s, old_s = 0, 1 t,", "sort): \"\"\" Form the union of two Index objects and", "cls._validate_dtype(dtype) name = maybe_extract_name(name, start, cls) # RangeIndex if isinstance(start,", "self.step < 0 else self._range second = other._range[::-1] if other.step", "name = maybe_extract_name(name, start, cls) # RangeIndex if isinstance(start, RangeIndex):", "in self._range] # -------------------------------------------------------------------- _deprecation_message = ( \"RangeIndex.{} is deprecated", "had only one element if rng.start == start: values =", "effort. return super()._difference(other, sort=sort) if overlap[0] == first.start: # The", "of two Index objects and sorts if possible Parameters ----------", "step_o == 0 and (start_s + step_o >= start_o) and", "step if non_empty_indexes: # Get the stop value from \"next\"", "_view(self: RangeIndex) -> RangeIndex: result = type(self)._simple_new(self._range, name=self._name) result._cache =", "): return type(self)(start_r, end_r + step_o, step_o) return self._int64index._union(other, sort=sort)", "._range attr. are equal, shortcut is possible return super()._cmp_method(self, op)", "to representing monotonic ranges. Using RangeIndex may in some instances", "( cache_readonly, doc, ) from pandas.util._exceptions import rewrite_exception from pandas.core.dtypes.common", "RangeIndex is a memory-saving special case of Int64Index limited to", "Attributes ---------- start stop step Methods ------- from_range See Also", "rng_indexes if len(obj)] for obj in non_empty_indexes: rng = obj._range", "= next_ = None # Filter the empty indexes non_empty_indexes", ") return self.step @cache_readonly def nbytes(self) -> int: \"\"\" Return", "sort resulting index. ``sort=None`` returns a monotonically increasing ``RangeIndex`` if", "0) & (locs >= 0) & (target_array < stop) locs[~valid]", "is None: # First non-empty index had only one element", "def _union(self, other: Index, sort): \"\"\" Form the union of", "greater than or equal to the limit\"\"\" no_steps = -(-(lower_limit", "def factorize( self, sort: bool = False, na_sentinel: int |", "a base index if not is_integer(rstep) or not rstep: raise", "1: step_s = step_o = abs(self.start - other.start) elif len(self)", "= self._get_attributes_dict() left, right = self, other try: # apply", "overlap = self.intersection(other) if overlap.step < 0: overlap = overlap[::-1]", "everything before the intersection new_rng = range(first.start, overlap[0], first.step) else:", "values.dtype.kind == \"f\": return Float64Index(values, name=name) return Int64Index._simple_new(values, name=name) def", "old_s - quotient * s old_t, t = t, old_t", "other, op): if isinstance(other, RangeIndex) and self._range == other._range: #", "op) return super()._cmp_method(other, op) def _arith_method(self, other, op): \"\"\" Parameters", "an override if step: with np.errstate(all=\"ignore\"): rstep = step(left.step, right)", "range for simplicity reverse = self._range[::-1] start, stop, step =", "@doc(Int64Index.copy) def copy( self, name: Hashable = None, deep: bool", "): return type(self)(start_r, end_r + step_s, step_s) if ( (step_s", "integer\") _can_hold_na = False _range: range # -------------------------------------------------------------------- # Constructors", "method instead.\", FutureWarning, stacklevel=2, ) new_index = new_index.astype(dtype) return new_index", "was passed\" ) cls._validate_dtype(dtype) return cls._simple_new(data, name=name) @classmethod def _simple_new(cls,", "from pandas.util._exceptions import rewrite_exception from pandas.core.dtypes.common import ( ensure_platform_int, ensure_python_int,", "0 and self.step % other == 0: start = self.start", "a RangeIndex, return # as a Float64Index if we have", "tuples of start, stop, step \"\"\" rng = self._range return", "GH#22390 return op(self._int64index, other) if op in [ operator.pow, ops.rpow,", "28678: work on reversed range for simplicity reverse = self._range[::-1]", "| None = None ) -> RangeIndex: \"\"\" Create RangeIndex", "cache_readonly, doc, ) from pandas.util._exceptions import rewrite_exception from pandas.core.dtypes.common import", "rng.stop), (\"step\", rng.step)] def __reduce__(self): d = self._get_attributes_dict() d.update(dict(self._get_data_as_items())) return", "with other index types. name : object, optional Name to", "numpy.newaxis (`None`) \" \"and integer or boolean \" \"arrays are", "None and tolerance is None: if is_integer(key) or (is_float(key) and", "types. name : object, optional Name to be stored in", "Int64Index # even if we can represent as a RangeIndex,", "wrap _cached_int64index so we can be sure its name matches", "# -------------------------------------------------------------------- # Reductions def all(self, *args, **kwargs) -> bool:", "max_length = max(len(first_val_str), len(last_val_str)) return header + [f\"{x:<{max_length}}\" for x", "start_o) % step_s == 0 and (start_s - end_o) <=", "# GH 25710 return self._range.start @property def _start(self) -> int:", "step(self) -> int: \"\"\" The value of the `step` parameter", "self._int64index.delete(loc) def take( self, indices, axis: int = 0, allow_fill:", "= libindex.Int64Engine _dtype_validation_metadata = (is_signed_integer_dtype, \"signed integer\") _can_hold_na = False", "non_consecutive: result = Int64Index(np.concatenate([x._values for x in rng_indexes])) return result.rename(name)", "warnings.warn( \"parameter dtype is deprecated and will be removed in", "repeats, axis=None) -> Int64Index: return self._int64index.repeat(repeats, axis=axis) def delete(self, loc)", "int64 data. \"\"\" _typ = \"rangeindex\" _engine_type = libindex.Int64Engine _dtype_validation_metadata", "def _step(self) -> int: \"\"\" The value of the `step`", "a list of tuples of the (attr, formatted_value) \"\"\" attrs", "a while r: quotient = old_r // r old_r, r", "header + [f\"{x:<{max_length}}\" for x in self._range] # -------------------------------------------------------------------- _deprecation_message", "zero\") rng = range(start, stop, step) return cls._simple_new(rng, name=name) @classmethod", "(``0`` if this was not supplied). \"\"\" # GH 25710", "return self._simple_new(new_range, name=self._name) elif is_integer(key): new_key = int(key) try: return", "particular solution for x, y: s, t Returns: gcd, s,", "key in self._range @property def inferred_type(self) -> str: return \"integer\"", "is provided by the user. Parameters ---------- start : int", "to TimedeltaIndex implementation return NotImplemented elif isinstance(other, (timedelta, np.timedelta64)): #", "**kwargs ) -> Int64Index: with rewrite_exception(\"Int64Index\", type(self).__name__): return self._int64index.take( indices,", "rstep: raise ValueError else: rstep = left.step with np.errstate(all=\"ignore\"): rstart", "except ValueError as err: raise KeyError(key) from err raise KeyError(key)", "old_s, old_t def _union(self, other: Index, sort): \"\"\" Form the", "(end_s - step_o <= end_o) ): return type(self)(start_r, end_r +", "super()._difference(other, sort=sort) if overlap[0] == first.start: # The difference is", "will be removed in a future \" \"version. Use the", "new_index[::-1] return new_index def symmetric_difference(self, other, result_name: Hashable = None,", "-> RangeIndex: result = object.__new__(cls) assert isinstance(values, range) result._range =", "interpreted as \"stop\" instead. stop : int (default: 0) step", "attrs = self._get_attributes_dict() left, right = self, other try: #", "timedelta64, # so we need to catch these explicitly return", "the smallest element greater than or equal to the limit\"\"\"", "False _range: range # -------------------------------------------------------------------- # Constructors def __new__( cls,", "# GH 25710 warnings.warn( self._deprecation_message.format(\"_step\", \"step\"), FutureWarning, stacklevel=2, ) return", "\"\"\"The minimum value of the RangeIndex\"\"\" nv.validate_minmax_axis(axis) nv.validate_min(args, kwargs) return", "to limiting interval new_start = new_index._min_fitting_element(int_low) new_range = range(new_start, new_index.stop,", "res = self._range[slobj] return type(self)._simple_new(res, name=self._name) @unpack_zerodim_and_defer(\"__floordiv__\") def __floordiv__(self, other):", "overlap[-1] == first[-1]: # The difference is everything before the", "start_o) and (end_s - step_o <= end_o) ): return type(self)(start_r,", "= object.__new__(cls) assert isinstance(values, range) result._range = values result._name =", "self._range return getsizeof(rng) + sum( getsizeof(getattr(rng, attr_name)) for attr_name in", "ranges. Using RangeIndex may in some instances improve computing speed.", "np.asarray(target) locs = target_array - start valid = (locs %", "Defer to Int64Index implementation return op(self._int64index, other) # TODO: Do", "is not None: attrs.append((\"name\", ibase.default_pprint(self.name))) return attrs def _format_data(self, name=None):", "no_steps = len(self) - 1 if no_steps == -1: return", "and (end_s - step_o <= end_o) ): return type(self)(start_r, end_r", "possible or a sorted ``Int64Index`` if not. ``sort=False`` always returns", "in rng_indexes if len(obj)] for obj in non_empty_indexes: rng =", "elif len(self) == 1: step_s = step_o elif len(other) ==", "% other == 0: start = self.start // other step", "The value of the `stop` parameter. \"\"\" return self._range.stop @property", "def _cached_int64index(self) -> Int64Index: return Int64Index._simple_new(self._data, name=self.name) @property def _int64index(self)", "abs(self.start - other.start) elif len(self) == 1: step_s = step_o", "ValueError else: rstep = left.step with np.errstate(all=\"ignore\"): rstart = op(left.start,", ": callable that accepts 2 params perform the binary op", "have a representable op # so return a base index", "needed. The constructed array is saved in ``_cache``. \"\"\" return", "= step_s start_r = min(start_s, start_o) end_r = max(end_s, end_o)", "-------------------------------------------------------------------- @cache_readonly def _constructor(self) -> type[Int64Index]: \"\"\" return the class", "int) -> int: \"\"\"Returns the smallest element greater than or", "x in [rstart, rstop, rstep]): result = result.astype(\"float64\") return result", "the length of the RangeIndex \"\"\" return len(self._range) @property def", "Return a list of tuples of the (attr, formatted_value) \"\"\"", "monotonic integer range. RangeIndex is a memory-saving special case of", "name = self.name if name is no_default else name if", "_step(self) -> int: \"\"\" The value of the `step` parameter", "the default index type used by DataFrame and Series when", "not supplied). \"\"\" # GH 25710 return self._range.start @property def", "return key in self._range @property def inferred_type(self) -> str: return", "in self._range def any(self, *args, **kwargs) -> bool: return any(self._range)", "(abs(start_s - start_o) <= step_s / 2) and (abs(end_s -", "if sort is None: new_index = new_index.sort_values() return new_index def", "self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not isinstance(other, RangeIndex): return", "\"start\"), FutureWarning, stacklevel=2, ) return self.start @property def stop(self) ->", "instead. \"\"\" warnings.warn( self._deprecation_message.format(\"_start\", \"start\"), FutureWarning, stacklevel=2, ) return self.start", "self.step * no_steps def min(self, axis=None, skipna: bool = True,", "= np.asarray(target) locs = target_array - start valid = (locs", "False, name: Hashable = None, ) -> RangeIndex: cls._validate_dtype(dtype) name", "reversed range for simplicity reverse = self._range[::-1] start, stop, step", "// abs(self.step)) return self.start + abs(self.step) * no_steps def _max_fitting_element(self,", "quotient * s old_t, t = t, old_t - quotient", "gcd * s new_step = first.step * second.step // gcd", "super()._difference(other, sort=sort) res_name = ops.get_op_result_name(self, other) first = self._range[::-1] if", "x in rng_indexes])) return result.rename(name) if step is not None:", "intersection new_rng = range(overlap[-1] + first.step, first.stop, first.step) elif overlap[-1]", "we can be sure its name matches self.name res =", "= None, deep: bool = False, dtype: Dtype | None", "np.errstate(all=\"ignore\"): rstep = step(left.step, right) # we don't have a", "* (len(self) - 1) start_o, step_o = other.start, other.step end_o", "these explicitly return op(self._int64index, other) elif is_timedelta64_dtype(other): # Must be", "-------- Index : The base pandas Index type. Int64Index :", "0), range, or other RangeIndex instance If int and \"stop\"", "second.stop) if int_high <= int_low: return self._simple_new(_empty_range) # Method hint:", "RangeIndex): return start.copy(name=name) elif isinstance(start, range): return cls._simple_new(start, name=name) #", "if start is None: # This is set by the", "second = other._range[::-1] if other.step < 0 else other._range #", "def _shallow_copy(self, values, name: Hashable = no_default): name = self.name", "_int64index(self) -> Int64Index: # wrap _cached_int64index so we can be", "= ensure_python_int(step) if step is not None else 1 if", "more efficient options other = extract_array(other, extract_numpy=True, extract_range=True) attrs =", "return self._range.step < 0 or len(self) <= 1 def __contains__(self,", "@doc(Int64Index.get_loc) def get_loc(self, key, method=None, tolerance=None): if method is None", "Int64Index @cache_readonly def _data(self) -> np.ndarray: \"\"\" An int array", "scalar and slice keys. \"\"\" if isinstance(key, slice): new_range =", "lower_limit: int) -> int: \"\"\"Returns the smallest element greater than", "unique values \"\"\" return True @cache_readonly def is_monotonic_increasing(self) -> bool:", "limit\"\"\" no_steps = (upper_limit - self.start) // abs(self.step) return self.start", "is not (new_index.step < 0): new_index = new_index[::-1] if sort", "new_range = self._range[key] return self._simple_new(new_range, name=self._name) elif is_integer(key): new_key =", "= end_s, -step_s, start_s if other.step < 0: start_o, step_o,", "return np.arange(self.start, self.stop, self.step, dtype=np.int64) @cache_readonly def _cached_int64index(self) -> Int64Index:", "Get the stop value from \"next\" or alternatively # from", "s, old_s = 0, 1 t, old_t = 1, 0", "out of bounds for axis 0 with size {len(self)}\" )", "identity: a*x + b*y = gcd(x, y) Finds one particular", "i.e. were empty. # In this case return an empty", "stacklevel=2, ) return self.stop @property def step(self) -> int: \"\"\"", "True) # EA compat nv.validate_argsort(args, kwargs) if self._range.step > 0:", "-> int: \"\"\" The value of the `step` parameter (``1``", "int | None = None, tolerance=None, ) -> np.ndarray: #", "len(self) - 1 if no_steps == -1: return np.nan elif", "kwargs) if self._range.step > 0: result = np.arange(len(self), dtype=np.intp) else:", "------- union : Index \"\"\" if isinstance(other, RangeIndex) and sort", "other) first = self._range[::-1] if self.step < 0 else self._range", "as \"stop\" instead. stop : int (default: 0) step :", "None = None, tolerance=None, ) -> np.ndarray: # -> np.ndarray[np.intp]", "the `start` parameter (``0`` if this was not supplied). \"\"\"", "\"\"\" # GH 25710 warnings.warn( self._deprecation_message.format(\"_stop\", \"stop\"), FutureWarning, stacklevel=2, )", "as ibase from pandas.core.indexes.base import maybe_extract_name from pandas.core.indexes.numeric import (", "from pandas.core.construction import extract_array import pandas.core.indexes.base as ibase from pandas.core.indexes.base", "0 or len(self) <= 1 def __contains__(self, key: Any) ->", "----- Memory usage does not include memory consumed by elements", "else: result = np.arange(len(self) - 1, -1, -1, dtype=np.intp) if", "new_step) new_index = self._simple_new(new_range) # adjust index to limiting interval", "a monotonically increasing ``RangeIndex`` if possible or a sorted ``Int64Index``", "-------------------------------------------------------------------- def repeat(self, repeats, axis=None) -> Int64Index: return self._int64index.repeat(repeats, axis=axis)", "(locs % step == 0) & (locs >= 0) &", "(self.step < 0 and other.step < 0) is not (new_index.step", "\"\"\" # GH 25710 warnings.warn( self._deprecation_message.format(\"_step\", \"step\"), FutureWarning, stacklevel=2, )", "RangeIndex): return super()._difference(other, sort=sort) res_name = ops.get_op_result_name(self, other) first =", "pandas.util._decorators import ( cache_readonly, doc, ) from pandas.util._exceptions import rewrite_exception", "disregarding the lower bounds tmp_start = first.start + (second.start -", "RangeIndex(6) indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5]) \"\"\" if", "return ensure_platform_int(locs) # -------------------------------------------------------------------- def repeat(self, repeats, axis=None) -> Int64Index:", "value of the `step` parameter (``1`` if this was not", "In this case return an empty range index. return RangeIndex(0,", "other index types. name : object, optional Name to be", "def max(self, axis=None, skipna: bool = True, *args, **kwargs) ->", "the lower bounds tmp_start = first.start + (second.start - first.start)", "# but not worth the effort. return super()._difference(other, sort=sort) if", "if ( (step_s % 2 == 0) and (abs(start_s -", "# type: ignore[override] return self._int64index.delete(loc) def take( self, indices, axis:", "elif len(other) == 1: step_o = step_s start_r = min(start_s,", "_cached_int64index(self) -> Int64Index: return Int64Index._simple_new(self._data, name=self.name) @property def _int64index(self) ->", "if not all(isinstance(x, RangeIndex) for x in indexes): return super()._concat(indexes,", "/ 2) and (abs(end_s - end_o) <= step_s / 2)", "result def factorize( self, sort: bool = False, na_sentinel: int", "type used by DataFrame and Series when no explicit index", "== 0 and (start_o + step_s >= start_s) and (end_o", "RangeIndex\"\"\" nv.validate_minmax_axis(axis) nv.validate_min(args, kwargs) return self._minmax(\"min\") def max(self, axis=None, skipna:", "limited to representing monotonic ranges. Using RangeIndex may in some", "-------------------------------------------------------------------- # Set Operations def _intersection(self, other: Index, sort=False): if", "@cache_readonly def _cached_int64index(self) -> Int64Index: return Int64Index._simple_new(self._data, name=self.name) @property def", "self._range == other._range return super().equals(other) # -------------------------------------------------------------------- # Set Operations", "first = self._range[::-1] if self.step < 0 else self._range second", "end_r + step_s, step_s) elif step_s % step_o == 0:", "end_o) if step_o == step_s: if ( (start_s - start_o)", "set operation if we have another RangeIndex self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other,", "\"\"\" # GH 25710 return self._range.start @property def _start(self) ->", "right) result = type(self)(rstart, rstop, rstep, **attrs) # for compat", "0) or (meth == \"max\" and self.step < 0): return", "-> int: \"\"\"Returns the smallest element greater than or equal", "skipna: bool = True, *args, **kwargs) -> int: \"\"\"The minimum", "== start: values = np.concatenate([x._values for x in rng_indexes]) result", "name : object, optional Name to be stored in the", ": bool Introspect the data deeply, interrogate `object` dtypes for", "import ops import pandas.core.common as com from pandas.core.construction import extract_array", "isinstance(other, RangeIndex): # Int64Index return super()._intersection(other, sort=sort) if not len(self)", "not None else 1 if step == 0: raise ValueError(\"Step", "interrogate `object` dtypes for system-level memory consumption Returns ------- bytes", "# The difference is everything before the intersection new_rng =", "\"step\"] ) def memory_usage(self, deep: bool = False) -> int:", "na_sentinel: int | None = -1 ) -> tuple[np.ndarray, RangeIndex]:", "the binary op \"\"\" if isinstance(other, ABCTimedeltaIndex): # Defer to", "ascending = kwargs.pop(\"ascending\", True) # EA compat nv.validate_argsort(args, kwargs) if", "== len(self): return self[:0].rename(res_name) if not isinstance(overlap, RangeIndex): # We", "self._int64index // other # -------------------------------------------------------------------- # Reductions def all(self, *args,", "> 1) or ( next_ is not None and rng.start", "operation if we have another RangeIndex self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name", "ops.rmod, ops.rfloordiv, divmod, ops.rdivmod, ]: return op(self._int64index, other) step: Callable", "0 with size {len(self)}\" ) from err elif is_scalar(key): raise", "if isinstance(other, ABCTimedeltaIndex): # Defer to TimedeltaIndex implementation return NotImplemented", "RangeIndex we may have more efficient options other = extract_array(other,", "except IndexError as err: raise IndexError( f\"index {key} is out", "new_index.stop, new_index.step) new_index = self._simple_new(new_range) if (self.step < 0 and", "if is_integer(key) or (is_float(key) and key.is_integer()): new_key = int(key) try:", "of the (attr, formatted_value) \"\"\" attrs = self._get_data_as_items() if self.name", "all(isinstance(x, RangeIndex) for x in indexes): return super()._concat(indexes, name) elif", "step = ensure_python_int(step) if step is not None else 1", "is not range-like return super()._difference(other, sort=sort) new_index = type(self)._simple_new(new_rng, name=res_name)", "for homogeneity with other index types. copy : bool, default", "str | None = None, limit: int | None =", "0).rename(name) def __len__(self) -> int: \"\"\" return the length of", "self._simple_new(_empty_range) # Method hint: linear Diophantine equation # solve intersection", "left = self.difference(other) right = other.difference(self) result = left.union(right) if", "options other = extract_array(other, extract_numpy=True, extract_range=True) attrs = self._get_attributes_dict() left,", "not is_integer(rstep) or not rstep: raise ValueError else: rstep =", "0 if stop is None: start, stop = 0, start", "<= end_s) ): return type(self)(start_r, end_r + step_s, step_s) elif", "so we can be sure its name matches self.name res", "<= end_o) ): return type(self)(start_r, end_r + step_o, step_o) return", "except (ValueError, TypeError, ZeroDivisionError): # Defer to Int64Index implementation return", "RangeIndex describing the # intersection disregarding the lower bounds tmp_start", "the attributes return None def _format_with_header(self, header: list[str], na_rep: str", "cheaper alternative gcd, s, _ = self._extended_gcd(first.step, second.step) # check", "and key.is_integer()): new_key = int(key) try: return self._range.index(new_key) except ValueError", "(target_array < stop) locs[~valid] = -1 locs[valid] = locs[valid] /", ") def memory_usage(self, deep: bool = False) -> int: \"\"\"", "end_r + step_o, step_o) return self._int64index._union(other, sort=sort) def _difference(self, other,", "and self.step < 0: codes = codes[::-1] uniques = uniques[::-1]", "== 0: start = self.start // other step = self.step", "# deals with in- and decreasing ranges int_low = max(first.start,", "op) def _arith_method(self, other, op): \"\"\" Parameters ---------- other :", "@property def dtype(self) -> np.dtype: return np.dtype(np.int64) @property def is_unique(self)", "\"removed in a future version. Use RangeIndex.{} \" \"instead\" )", "from pandas import Index _empty_range = range(0) class RangeIndex(NumericIndex): \"\"\"", "1 def __contains__(self, key: Any) -> bool: hash(key) try: key", "import pandas.core.indexes.base as ibase from pandas.core.indexes.base import maybe_extract_name from pandas.core.indexes.numeric", "equals(self, other: object) -> bool: \"\"\" Determines if two Index", "last non-empty index stop = non_empty_indexes[-1].stop if next_ is None", "return type(self)(start_r, end_r + step_s / 2, step_s / 2)", "as com from pandas.core.construction import extract_array import pandas.core.indexes.base as ibase", "start else: stop = ensure_python_int(stop) step = ensure_python_int(step) if step", "= range(overlap[-1] + first.step, first.stop, first.step) elif overlap[-1] == first[-1]:", "return super()._intersection(other, sort=sort) if not len(self) or not len(other): return", "smallest element greater than or equal to the limit\"\"\" no_steps", "if not len(self) or not len(other): return self._simple_new(_empty_range) first =", "ascending: result = result[::-1] return result def factorize( self, sort:", "RangeIndex self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_name = self._convert_can_do_setop(other) if not isinstance(other,", "alternatively # from the last non-empty index stop = non_empty_indexes[-1].stop", "s = s, old_s - quotient * s old_t, t", "return NotImplemented elif isinstance(other, (timedelta, np.timedelta64)): # GH#19333 is_integer evaluated", "step_s = self.start, self.step end_s = self.start + self.step *", "other._range: # Both are immutable so if ._range attr. are", "def nbytes(self) -> int: \"\"\" Return the number of bytes", "[RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6) indexes = [RangeIndex(3), RangeIndex(4, 6)]", "op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]: step = op #", "def stop(self) -> int: \"\"\" The value of the `stop`", "{key} is out of bounds for axis 0 with size", "RangeIndex): # Int64Index return super()._intersection(other, sort=sort) if not len(self) or", "new_index = new_index.sort_values() return new_index def _min_fitting_element(self, lower_limit: int) ->", "result._cache = {} result._reset_identity() return result # -------------------------------------------------------------------- @cache_readonly def", "codes[::-1] uniques = uniques[::-1] return codes, uniques def equals(self, other:", "np.ndarray: # -> np.ndarray[np.intp] if com.any_not_none(method, tolerance, limit): return super()._get_indexer(", "): name = self._validate_names(name=name, names=names, deep=deep)[0] new_index = self._rename(name=name) if", "and (abs(end_s - end_o) <= step_s / 2) ): return", "warnings.warn( self._deprecation_message.format(\"_stop\", \"stop\"), FutureWarning, stacklevel=2, ) return self.stop @property def", "Any, Callable, Hashable, List, cast, ) import warnings import numpy", "of the array if deep=False See Also -------- numpy.ndarray.nbytes \"\"\"", "\"\"\" An int array that for performance reasons is created", "else: stop = ensure_python_int(stop) step = ensure_python_int(step) if step is", "= target_array - start valid = (locs % step ==", ") @property def start(self) -> int: \"\"\" The value of", "self.intersection(other) if overlap.step < 0: overlap = overlap[::-1] if len(overlap)", "(``1`` if this was not supplied). .. deprecated:: 0.25.0 Use", "return indexes[0] rng_indexes = cast(List[RangeIndex], indexes) start = step =", "overlap.step < 0: overlap = overlap[::-1] if len(overlap) == 0:", "slices (`:`), \" \"ellipsis (`...`), numpy.newaxis (`None`) \" \"and integer", "+ self.step * no_steps def min(self, axis=None, skipna: bool =", "is saved in ``_cache``. \"\"\" return np.arange(self.start, self.stop, self.step, dtype=np.int64)", "= codes[::-1] uniques = uniques[::-1] return codes, uniques def equals(self,", "represent as a RangeIndex, return # as a Float64Index if", "if result_name is not None: result = result.rename(result_name) return result", "last_val_str = str(self._range[-1]) max_length = max(len(first_val_str), len(last_val_str)) return header +", "ValueError(\"Step must not be zero\") rng = range(start, stop, step)", "None = None, copy: bool = False, name: Hashable =", "from pandas._typing import Dtype from pandas.compat.numpy import function as nv", "x in rng_indexes]) result = Int64Index(values) return result.rename(name) step =", "def dtype(self) -> np.dtype: return np.dtype(np.int64) @property def is_unique(self) ->", "if values.dtype.kind == \"f\": return Float64Index(values, name=name) return Int64Index._simple_new(values, name=name)", "range(0) class RangeIndex(NumericIndex): \"\"\" Immutable Index implementing a monotonic integer", "Callable | None = None if op in [operator.mul, ops.rmul,", "from pandas.util._decorators import ( cache_readonly, doc, ) from pandas.util._exceptions import", "if step: with np.errstate(all=\"ignore\"): rstep = step(left.step, right) # we", "In some cases we might be able to get a", "np.ndarray; GH#22390 return op(self._int64index, other) if op in [ operator.pow,", "self.start + abs(self.step) * no_steps def _max_fitting_element(self, upper_limit: int) ->", "must not be zero\") rng = range(start, stop, step) return", "return self._simple_new(_empty_range) first = self._range[::-1] if self.step < 0 else", "import ( Float64Index, Int64Index, NumericIndex, ) from pandas.core.ops.common import unpack_zerodim_and_defer", "is deprecated and will be \" \"removed in a future", "# We won't end up with RangeIndex, so fall back", "\"and integer or boolean \" \"arrays are valid indices\" )", "consumed by elements that are not components of the array", "isinstance(key, slice): new_range = self._range[key] return self._simple_new(new_range, name=self._name) elif is_integer(key):", "or None, default None Whether to sort resulting index. ``sort=None``", "= np.arange(len(self), dtype=np.intp) else: result = np.arange(len(self) - 1, -1,", "\"ellipsis (`...`), numpy.newaxis (`None`) \" \"and integer or boolean \"", "!= first.step: # In some cases we might be able", "describing the # intersection disregarding the lower bounds tmp_start =", "- quotient * t return old_r, old_s, old_t def _union(self,", "deprecated and will be \" \"removed in a future version.", "parameter (``1`` if this was not supplied). .. deprecated:: 0.25.0", "self._int64index.repeat(repeats, axis=axis) def delete(self, loc) -> Int64Index: # type: ignore[override]", "1) return self._simple_new(new_range, name=self.name) return self._int64index // other # --------------------------------------------------------------------", "self._range.start @property def _start(self) -> int: \"\"\" The value of", "is a memory-saving special case of Int64Index limited to representing", "None if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]: step =", "+ step_s, step_s) if ( (step_s % 2 == 0)", "self.rename(name=res_name) if len(overlap) == len(self): return self[:0].rename(res_name) if not isinstance(overlap,", "other, result_name = self._convert_can_do_setop(other) if not isinstance(other, RangeIndex): return super()._difference(other,", "_cached_int64index so we can be sure its name matches self.name", "= reverse.start, reverse.stop, reverse.step if not is_signed_integer_dtype(target): # checks/conversions/roundings are", "The difference is everything before the intersection new_rng = range(first.start,", "on reversed range for simplicity reverse = self._range[::-1] start, stop,", "return res def _get_data_as_items(self): \"\"\" return a list of tuples", "True @cache_readonly def is_monotonic_increasing(self) -> bool: return self._range.step > 0", "start = ensure_python_int(start) if start is not None else 0", "# -------------------------------------------------------------------- # Rendering Methods def _format_attrs(self): \"\"\" Return a", "`start` parameter (``0`` if this was not supplied). .. deprecated::", "name=name) def _view(self: RangeIndex) -> RangeIndex: result = type(self)._simple_new(self._range, name=self._name)", "to catch these explicitly return op(self._int64index, other) elif is_timedelta64_dtype(other): #" ]
[ "forward(self, x): out = self.l1(x) out = self.relu(out) out =", "self.relu(out) out = self.l4(out) # no activation and no softmax", "out = self.relu(out) out = self.l4(out) # no activation and", "out = self.l2(out) out = self.relu(out) out = self.l3(out) out", "nn.Linear(hidden_size, hidden_size) self.l3 = nn.Linear(hidden_size, hidden_size) self.l4 = nn.Linear(hidden_size, num_classes)", "self.relu = nn.ReLU() def forward(self, x): out = self.l1(x) out", "= self.relu(out) out = self.l2(out) out = self.relu(out) out =", "as nn class NeuralNet(nn.Module): def __init__(self, input_size, hidden_size, num_classes): super(NeuralNet,", "out = self.l1(x) out = self.relu(out) out = self.l2(out) out", "self.l4(out) # no activation and no softmax at the end", "no activation and no softmax at the end return out", "NeuralNet(nn.Module): def __init__(self, input_size, hidden_size, num_classes): super(NeuralNet, self).__init__() self.l1 =", "= self.relu(out) out = self.l3(out) out = self.relu(out) out =", "self.l3(out) out = self.relu(out) out = self.l4(out) # no activation", "= nn.Linear(input_size, hidden_size) self.l2 = nn.Linear(hidden_size, hidden_size) self.l3 = nn.Linear(hidden_size,", "self.l4 = nn.Linear(hidden_size, num_classes) self.relu = nn.ReLU() def forward(self, x):", "x): out = self.l1(x) out = self.relu(out) out = self.l2(out)", "hidden_size) self.l2 = nn.Linear(hidden_size, hidden_size) self.l3 = nn.Linear(hidden_size, hidden_size) self.l4", "self.l1(x) out = self.relu(out) out = self.l2(out) out = self.relu(out)", "out = self.relu(out) out = self.l2(out) out = self.relu(out) out", "self.l2(out) out = self.relu(out) out = self.l3(out) out = self.relu(out)", "nn.Linear(hidden_size, hidden_size) self.l4 = nn.Linear(hidden_size, num_classes) self.relu = nn.ReLU() def", "out = self.relu(out) out = self.l3(out) out = self.relu(out) out", "class NeuralNet(nn.Module): def __init__(self, input_size, hidden_size, num_classes): super(NeuralNet, self).__init__() self.l1", "torch.nn as nn class NeuralNet(nn.Module): def __init__(self, input_size, hidden_size, num_classes):", "def __init__(self, input_size, hidden_size, num_classes): super(NeuralNet, self).__init__() self.l1 = nn.Linear(input_size,", "= self.relu(out) out = self.l4(out) # no activation and no", "num_classes): super(NeuralNet, self).__init__() self.l1 = nn.Linear(input_size, hidden_size) self.l2 = nn.Linear(hidden_size,", "__init__(self, input_size, hidden_size, num_classes): super(NeuralNet, self).__init__() self.l1 = nn.Linear(input_size, hidden_size)", "= nn.Linear(hidden_size, hidden_size) self.l4 = nn.Linear(hidden_size, num_classes) self.relu = nn.ReLU()", "self.relu(out) out = self.l2(out) out = self.relu(out) out = self.l3(out)", "def forward(self, x): out = self.l1(x) out = self.relu(out) out", "torch import torch.nn as nn class NeuralNet(nn.Module): def __init__(self, input_size,", "nn class NeuralNet(nn.Module): def __init__(self, input_size, hidden_size, num_classes): super(NeuralNet, self).__init__()", "super(NeuralNet, self).__init__() self.l1 = nn.Linear(input_size, hidden_size) self.l2 = nn.Linear(hidden_size, hidden_size)", "= nn.Linear(hidden_size, num_classes) self.relu = nn.ReLU() def forward(self, x): out", "import torch.nn as nn class NeuralNet(nn.Module): def __init__(self, input_size, hidden_size,", "num_classes) self.relu = nn.ReLU() def forward(self, x): out = self.l1(x)", "= nn.ReLU() def forward(self, x): out = self.l1(x) out =", "input_size, hidden_size, num_classes): super(NeuralNet, self).__init__() self.l1 = nn.Linear(input_size, hidden_size) self.l2", "= self.l4(out) # no activation and no softmax at the", "nn.Linear(input_size, hidden_size) self.l2 = nn.Linear(hidden_size, hidden_size) self.l3 = nn.Linear(hidden_size, hidden_size)", "out = self.l3(out) out = self.relu(out) out = self.l4(out) #", "nn.Linear(hidden_size, num_classes) self.relu = nn.ReLU() def forward(self, x): out =", "import torch import torch.nn as nn class NeuralNet(nn.Module): def __init__(self,", "self).__init__() self.l1 = nn.Linear(input_size, hidden_size) self.l2 = nn.Linear(hidden_size, hidden_size) self.l3", "= nn.Linear(hidden_size, hidden_size) self.l3 = nn.Linear(hidden_size, hidden_size) self.l4 = nn.Linear(hidden_size,", "out = self.l4(out) # no activation and no softmax at", "# no activation and no softmax at the end return", "= self.l3(out) out = self.relu(out) out = self.l4(out) # no", "= self.l2(out) out = self.relu(out) out = self.l3(out) out =", "hidden_size) self.l3 = nn.Linear(hidden_size, hidden_size) self.l4 = nn.Linear(hidden_size, num_classes) self.relu", "hidden_size) self.l4 = nn.Linear(hidden_size, num_classes) self.relu = nn.ReLU() def forward(self,", "nn.ReLU() def forward(self, x): out = self.l1(x) out = self.relu(out)", "self.relu(out) out = self.l3(out) out = self.relu(out) out = self.l4(out)", "hidden_size, num_classes): super(NeuralNet, self).__init__() self.l1 = nn.Linear(input_size, hidden_size) self.l2 =", "self.l1 = nn.Linear(input_size, hidden_size) self.l2 = nn.Linear(hidden_size, hidden_size) self.l3 =", "self.l3 = nn.Linear(hidden_size, hidden_size) self.l4 = nn.Linear(hidden_size, num_classes) self.relu =", "= self.l1(x) out = self.relu(out) out = self.l2(out) out =", "self.l2 = nn.Linear(hidden_size, hidden_size) self.l3 = nn.Linear(hidden_size, hidden_size) self.l4 =" ]
[ "file named ``config.json`` placed in the ``utils`` directory. References ----------", "name. The name of the ``log_file`` is a combination of", "os import logging from jwql.logging.logging_functions import configure_logging from jwql.logging.logging_functions import", "<NAME>, 2013 (WFC3 QL Version) Use --- To log the", "Dependencies ------------ The user must have a configuration file named", "The name of the module being logged. production_mode : bool", "open(setup_file_name) as setup: for line in setup: if line[0:8] ==", "if __name__ == '__main__': module = os.path.basename(__file__).replace('.py', '') configure_logging(module) my_main_function()", "module in module_list] # Log common module version information for", "to working dir. Returns ------- log_file : str The full", "func(*a, **kw) t2_cpu = time.clock() t2_time = time.time() # Log", "line[12:-2] module_list = module_required.split(',') # Clean up the module list", "@log_fail def my_main_function(): pass if __name__ == '__main__': module =", "to decorate. Returns ------- wrapped : func The wrapped function.", "is a combination of the name of the module being", "being logged and the current datetime. Parameters ---------- module :", "output should be written to the production environment. path :", "= [module.replace('\"', '').replace(\"'\", '').replace(' ', '') for module in module_list]", "where the log file will be written to. \"\"\" timestamp", "time.time() func(*a, **kw) t2_cpu = time.clock() t2_time = time.time() #", "production_mode: log_file = os.path.join(log_path, module, filename) else: log_file = os.path.join(path,", "@wraps(func) def wrapped(*a, **kw): # Log environment information logging.info('User: '", "list of required modules settings = get_config() setup_file_name = settings['setup_file']", "user must have a configuration file named ``config.json`` placed in", "the output should be written to the production environment. path", "References ---------- This code is adopted and updated from python", "combination of the name of the module being logged and", "the name of the module being logged and the current", "= time.time() func(*a, **kw) t2_cpu = time.clock() t2_time = time.time()", "- <NAME>, 2013 (WFC3 QL Version) Use --- To log", "module in module_list: try: mod = importlib.import_module(module) logging.info(module + '", "Log files are written to the ``logs/`` directory in the", "be written to the production environement. path : str Where", "list module_list = [module.replace('\"', '').replace(\"'\", '').replace(' ', '') for module", "= [] if user != admin_account and module not in", "time it t1_cpu = time.clock() t1_time = time.time() func(*a, **kw)", "in the ``utils`` directory. References ---------- This code is adopted", "---------- func : func The function to decorate. Returns -------", "import configure_logging from jwql.logging.logging_functions import log_info from jwql.logging.logging_functions import log_fail", ":: import os import logging from jwql.logging.logging_functions import configure_logging from", "def wrapped(*a, **kw): # Log environment information logging.info('User: ' +", "time hours_cpu, remainder_cpu = divmod(t2_cpu - t1_cpu, 60 * 60)", "**kw) t2_cpu = time.clock() t2_time = time.time() # Log execution", "``utils`` directory. References ---------- This code is adopted and updated", "logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%m/%d/%Y %H:%M:%S %p', level=logging.INFO) set_permissions(log_file) def", "'')) logging.info('Python Executable Path: ' + sys.executable) # Read in", "'').replace(' ', '') for module in module_list] module_list = [module.split('=')[0]", "build list of required modules settings = get_config() setup_file_name =", "to where the log file will be written to. \"\"\"", "if production_mode: log_file = os.path.join(log_path, module, filename) else: log_file =", "t1_cpu = time.clock() t1_time = time.time() func(*a, **kw) t2_cpu =", "as err: logging.warning(err) # Call the function and time it", "pwd import socket import sys import time import traceback from", "module name. The name of the ``log_file`` is a combination", "a combination of the name of the module being logged", "datetime.datetime.now().strftime('%Y-%m-%d-%H-%M') filename = '{0}_{1}.log'.format(module, timestamp) user = pwd.getpwuid(os.getuid()).pw_name settings =", "to log the execution of modules. Log files are written", "= time.time() # Log execution time hours_cpu, remainder_cpu = divmod(t2_cpu", "by module name and timestamp, e.g. ``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log`` Authors ------- -", "# Determine log file location if production_mode: log_file = make_log_file(module)", "name of the module being logged. production_mode : bool Whether", "format. Parameters ---------- module : str The name of the", "timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M') filename = '{0}_{1}.log'.format(module, timestamp) user = pwd.getpwuid(os.getuid()).pw_name", "module, filename) else: log_file = os.path.join(path, filename) ensure_dir_exists(os.path.dirname(log_file)) return log_file", "from functools import wraps from jwql.utils.permissions import set_permissions from jwql.utils.utils", "and system information. Future packages we want to track can", "# Log environment information logging.info('User: ' + getpass.getuser()) logging.info('System: '", "functions for the ``jwql`` automation platform. This module provides decorators", "t2_cpu = time.clock() t2_time = time.time() # Log execution time", "log if user-supplied path; default to working dir. \"\"\" #", "= get_config() admin_account = settings['admin_account'] log_path = settings['log_dir'] exempt_modules =", "module = os.path.basename(__file__).replace('.py', '') configure_logging(module) my_main_function() Dependencies ------------ The user", "hours_time, remainder_time = divmod(t2_time - t1_time, 60 * 60) minutes_time,", "' Path: ' + mod.__path__[0]) except ImportError as err: logging.warning(err)", "= time.clock() t1_time = time.time() func(*a, **kw) t2_cpu = time.clock()", "logging.info('Elapsed Real Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_time, minutes_time, seconds_time)) logging.info('Elapsed CPU Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_cpu,", "environement. path : str Where to write the log if", "if user != admin_account and module not in exempt_modules and", "'') configure_logging(module) my_main_function() Dependencies ------------ The user must have a", "# Call the function and time it t1_cpu = time.clock()", "admin_account = settings['admin_account'] log_path = settings['log_dir'] exempt_modules = [] if", "2013 (WFC3 QL Version) Use --- To log the execution", "``logging_functions.py`` written by Alex Viana, 2013 for the WFC3 Quicklook", "import set_permissions from jwql.utils.utils import get_config, ensure_dir_exists LOG_FILE_LOC = ''", "the ``jwql`` automation platform. This module provides decorators to log", "time.clock() t2_time = time.time() # Log execution time hours_cpu, remainder_cpu", "global LOG_FILE_LOC global PRODUCTION_BOOL LOG_FILE_LOC = log_file PRODUCTION_BOOL = production_mode", "want to track can be added or removed as necessary.", "Executable Path: ' + sys.executable) # Read in setup.py file", "name based on the module name. The name of the", "= settings['setup_file'] with open(setup_file_name) as setup: for line in setup:", "line[0:8] == \"REQUIRES\": module_required = line[12:-2] module_list = module_required.split(',') #", "on the module name. The name of the ``log_file`` is", "the function func(*a, **kw) logging.info('Completed Successfully') except Exception: logging.critical(traceback.format_exc()) logging.critical('CRASHED')", "permissions logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%m/%d/%Y %H:%M:%S %p', level=logging.INFO) set_permissions(log_file)", "current datetime. Parameters ---------- module : str The name of", "wraps from jwql.utils.permissions import set_permissions from jwql.utils.utils import get_config, ensure_dir_exists", "of the module being logged. production_mode : bool Whether or", "import os import logging from jwql.logging.logging_functions import configure_logging from jwql.logging.logging_functions", "LOG_FILE_LOC global PRODUCTION_BOOL LOG_FILE_LOC = log_file PRODUCTION_BOOL = production_mode #", "exempt_modules and production_mode: module = os.path.join('dev', module) if production_mode: log_file", "e.g. ``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log`` Authors ------- - <NAME> 2018 - <NAME>, 2013", "from jwql.utils.permissions import set_permissions from jwql.utils.utils import get_config, ensure_dir_exists LOG_FILE_LOC", "+ ' Version: ' + mod.__version__) logging.info(module + ' Path:", "a configuration file named ``config.json`` placed in the ``utils`` directory.", "provides decorators to log the execution of modules. Log files", "function. \"\"\" @wraps(func) def wrapped(*a, **kw): # Log environment information", "log_info(func): \"\"\"Decorator to log useful system information. This function can", "log file location if production_mode: log_file = make_log_file(module) else: log_file", "else: log_file = make_log_file(module, production_mode=False, path=path) global LOG_FILE_LOC global PRODUCTION_BOOL", "configure_logging(module) my_main_function() Dependencies ------------ The user must have a configuration", "platform. \"\"\" import datetime import getpass import importlib import logging", "%p', level=logging.INFO) set_permissions(log_file) def make_log_file(module, production_mode=True, path='./'): \"\"\"Create the log", "working dir. \"\"\" # Determine log file location if production_mode:", "Call the function and time it t1_cpu = time.clock() t1_time", "logging.info(module + ' Path: ' + mod.__path__[0]) except ImportError as", "datetime import getpass import importlib import logging import os import", "decorators to log the execution of modules. Log files are", "os.path.join(log_path, module, filename) else: log_file = os.path.join(path, filename) ensure_dir_exists(os.path.dirname(log_file)) return", "import log_fail @log_info @log_fail def my_main_function(): pass if __name__ ==", "divmod(t2_time - t1_time, 60 * 60) minutes_time, seconds_time = divmod(remainder_time,", "importlib import logging import os import pwd import socket import", "system information. This function can be used as a decorator", "The wrapped function. \"\"\" @wraps(func) def wrapped(*a, **kw): # Log", "settings['setup_file'] with open(setup_file_name) as setup: for line in setup: if", "= divmod(remainder_cpu, 60) hours_time, remainder_time = divmod(t2_time - t1_time, 60", "file with a standard logging format. Parameters ---------- module :", "storage area, named by module name and timestamp, e.g. ``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log``", "if production_mode: log_file = make_log_file(module) else: log_file = make_log_file(module, production_mode=False,", "the output should be written to the production environement. path", "= make_log_file(module) else: log_file = make_log_file(module, production_mode=False, path=path) global LOG_FILE_LOC", "logged. production_mode : bool Whether or not the output should", "module provides decorators to log the execution of modules. Log", "jwql.logging.logging_functions import log_fail @log_info @log_fail def my_main_function(): pass if __name__", "can be used as a decorator to log user environment", "def log_info(func): \"\"\"Decorator to log useful system information. This function", "production environment. path : str Where to write the log", "Authors ------- - <NAME> 2018 - <NAME>, 2013 (WFC3 QL", "2018 - <NAME>, 2013 (WFC3 QL Version) Use --- To", "socket import sys import time import traceback from functools import", "datefmt='%m/%d/%Y %H:%M:%S %p', level=logging.INFO) set_permissions(log_file) def make_log_file(module, production_mode=True, path='./'): \"\"\"Create", "func The wrapped function. \"\"\" @wraps(func) def wrapped(*a, **kw): try:", "log_file = make_log_file(module) else: log_file = make_log_file(module, production_mode=False, path=path) global", "import socket import sys import time import traceback from functools", "remainder_cpu = divmod(t2_cpu - t1_cpu, 60 * 60) minutes_cpu, seconds_cpu", "PRODUCTION_BOOL = production_mode # Create the log file and set", "'{0}_{1}.log'.format(module, timestamp) user = pwd.getpwuid(os.getuid()).pw_name settings = get_config() admin_account =", "log file with a standard logging format. Parameters ---------- module", "Read in setup.py file to build list of required modules", "import datetime import getpass import importlib import logging import os", "Log common module version information for module in module_list: try:", "60) logging.info('Elapsed Real Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_time, minutes_time, seconds_time)) logging.info('Elapsed CPU Time:", "module name and timestamp, e.g. ``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log`` Authors ------- - <NAME>", "return wrapped def log_fail(func): \"\"\"Decorator to log crashes in the", "modules settings = get_config() setup_file_name = settings['setup_file'] with open(setup_file_name) as", "in setup: if line[0:8] == \"REQUIRES\": module_required = line[12:-2] module_list", "and timestamp, e.g. ``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log`` Authors ------- - <NAME> 2018 -", "= divmod(t2_time - t1_time, 60 * 60) minutes_time, seconds_time =", "# Create the log file and set the permissions logging.basicConfig(filename=log_file,", "function. \"\"\" @wraps(func) def wrapped(*a, **kw): try: # Run the", "# Log common module version information for module in module_list:", "a standard logging format. Parameters ---------- module : str The", "This function can be used as a decorator to log", "my_main_function(): pass if __name__ == '__main__': module = os.path.basename(__file__).replace('.py', '')", ": bool Whether or not the output should be written", "if user-supplied path; default to working dir. Returns ------- log_file", "files are written to the ``logs/`` directory in the ``jwql``", "def configure_logging(module, production_mode=True, path='./'): \"\"\"Configure the log file with a", "file location if production_mode: log_file = make_log_file(module) else: log_file =", "import get_config, ensure_dir_exists LOG_FILE_LOC = '' PRODUCTION_BOOL = '' def", "must have a configuration file named ``config.json`` placed in the", "and the current datetime. Parameters ---------- module : str The", "the execution of a module, use: :: import os import", "' + sys.executable) # Read in setup.py file to build", "os import pwd import socket import sys import time import", "to track can be added or removed as necessary. Parameters", "in module_list] module_list = [module.split('=')[0] for module in module_list] #", "func The wrapped function. \"\"\" @wraps(func) def wrapped(*a, **kw): #", "try: mod = importlib.import_module(module) logging.info(module + ' Version: ' +", "directory in the ``jwql`` central storage area, named by module", "information. This function can be used as a decorator to", "be written to the production environment. path : str Where", "module : str The name of the module being logged.", "production_mode: log_file = make_log_file(module) else: log_file = make_log_file(module, production_mode=False, path=path)", "This code is adopted and updated from python routine ``logging_functions.py``", "log file and set the permissions logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s: %(message)s',", "= datetime.datetime.now().strftime('%Y-%m-%d-%H-%M') filename = '{0}_{1}.log'.format(module, timestamp) user = pwd.getpwuid(os.getuid()).pw_name settings", "production_mode=True, path='./'): \"\"\"Create the log file name based on the", "use: :: import os import logging from jwql.logging.logging_functions import configure_logging", "user-supplied path; default to working dir. \"\"\" # Determine log", "+ mod.__path__[0]) except ImportError as err: logging.warning(err) # Call the", "module being logged. production_mode : bool Whether or not the", "+ sys.version.replace('\\n', '')) logging.info('Python Executable Path: ' + sys.executable) #", "the function and time it t1_cpu = time.clock() t1_time =", "def make_log_file(module, production_mode=True, path='./'): \"\"\"Create the log file name based", "setup: if line[0:8] == \"REQUIRES\": module_required = line[12:-2] module_list =", "' + mod.__version__) logging.info(module + ' Path: ' + mod.__path__[0])", "ensure_dir_exists(os.path.dirname(log_file)) return log_file def log_info(func): \"\"\"Decorator to log useful system", "from jwql.logging.logging_functions import log_fail @log_info @log_fail def my_main_function(): pass if", "or removed as necessary. Parameters ---------- func : func The", "function to decorate. Returns ------- wrapped : func The wrapped", "import logging from jwql.logging.logging_functions import configure_logging from jwql.logging.logging_functions import log_info", "%(levelname)s: %(message)s', datefmt='%m/%d/%Y %H:%M:%S %p', level=logging.INFO) set_permissions(log_file) def make_log_file(module, production_mode=True,", "line in setup: if line[0:8] == \"REQUIRES\": module_required = line[12:-2]", "Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_time, minutes_time, seconds_time)) logging.info('Elapsed CPU Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_cpu, minutes_cpu, seconds_cpu))", "def my_main_function(): pass if __name__ == '__main__': module = os.path.basename(__file__).replace('.py',", "os.path.join(path, filename) ensure_dir_exists(os.path.dirname(log_file)) return log_file def log_info(func): \"\"\"Decorator to log", "setup_file_name = settings['setup_file'] with open(setup_file_name) as setup: for line in", "+ sys.executable) # Read in setup.py file to build list", "+ socket.gethostname()) logging.info('Python Version: ' + sys.version.replace('\\n', '')) logging.info('Python Executable", "area, named by module name and timestamp, e.g. ``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log`` Authors", "divmod(t2_cpu - t1_cpu, 60 * 60) minutes_cpu, seconds_cpu = divmod(remainder_cpu,", "!= admin_account and module not in exempt_modules and production_mode: module", "to the ``logs/`` directory in the ``jwql`` central storage area,", "the execution of modules. Log files are written to the", "and updated from python routine ``logging_functions.py`` written by Alex Viana,", "in exempt_modules and production_mode: module = os.path.join('dev', module) if production_mode:", ": func The wrapped function. \"\"\" @wraps(func) def wrapped(*a, **kw):", "--- To log the execution of a module, use: ::", "sys import time import traceback from functools import wraps from", "information for module in module_list: try: mod = importlib.import_module(module) logging.info(module", "dir. \"\"\" # Determine log file location if production_mode: log_file", "module_list = module_required.split(',') # Clean up the module list module_list", "logging.info(module + ' Version: ' + mod.__version__) logging.info(module + '", "= '' PRODUCTION_BOOL = '' def configure_logging(module, production_mode=True, path='./'): \"\"\"Configure", "full path to where the log file will be written", "and production_mode: module = os.path.join('dev', module) if production_mode: log_file =", "QL Version) Use --- To log the execution of a", "for the WFC3 Quicklook automation platform. \"\"\" import datetime import", "output should be written to the production environement. path :", "the log file with a standard logging format. Parameters ----------", "or not the output should be written to the production", "level=logging.INFO) set_permissions(log_file) def make_log_file(module, production_mode=True, path='./'): \"\"\"Create the log file", "in setup.py file to build list of required modules settings", "+ mod.__version__) logging.info(module + ' Path: ' + mod.__path__[0]) except", "60 * 60) minutes_time, seconds_time = divmod(remainder_time, 60) logging.info('Elapsed Real", "of a module, use: :: import os import logging from", "make_log_file(module, production_mode=False, path=path) global LOG_FILE_LOC global PRODUCTION_BOOL LOG_FILE_LOC = log_file", "modules. Log files are written to the ``logs/`` directory in", "``jwql`` central storage area, named by module name and timestamp,", "the ``jwql`` central storage area, named by module name and", "module_list: try: mod = importlib.import_module(module) logging.info(module + ' Version: '", "**kw): # Log environment information logging.info('User: ' + getpass.getuser()) logging.info('System:", "str Where to write the log if user-supplied path; default", "will be written to. \"\"\" timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M') filename =", "module) if production_mode: log_file = os.path.join(log_path, module, filename) else: log_file", "configure_logging(module, production_mode=True, path='./'): \"\"\"Configure the log file with a standard", "{0:.0f}:{1:.0f}:{2:f}'.format(hours_time, minutes_time, seconds_time)) logging.info('Elapsed CPU Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_cpu, minutes_cpu, seconds_cpu)) return", "named ``config.json`` placed in the ``utils`` directory. References ---------- This", "= importlib.import_module(module) logging.info(module + ' Version: ' + mod.__version__) logging.info(module", "the permissions logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%m/%d/%Y %H:%M:%S %p', level=logging.INFO)", "Use --- To log the execution of a module, use:", "' Version: ' + mod.__version__) logging.info(module + ' Path: '", "for the ``jwql`` automation platform. This module provides decorators to", "Alex Viana, 2013 for the WFC3 Quicklook automation platform. \"\"\"", "Log environment information logging.info('User: ' + getpass.getuser()) logging.info('System: ' +", "have a configuration file named ``config.json`` placed in the ``utils``", "should be written to the production environement. path : str", ": str Where to write the log if user-supplied path;", "pwd.getpwuid(os.getuid()).pw_name settings = get_config() admin_account = settings['admin_account'] log_path = settings['log_dir']", "<NAME> 2018 - <NAME>, 2013 (WFC3 QL Version) Use ---", "the log if user-supplied path; default to working dir. \"\"\"", "log_file = make_log_file(module, production_mode=False, path=path) global LOG_FILE_LOC global PRODUCTION_BOOL LOG_FILE_LOC", "path; default to working dir. Returns ------- log_file : str", "Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_cpu, minutes_cpu, seconds_cpu)) return wrapped def log_fail(func): \"\"\"Decorator to", "file and set the permissions logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%m/%d/%Y", "the module being logged and the current datetime. Parameters ----------", "The name of the ``log_file`` is a combination of the", "version information for module in module_list: try: mod = importlib.import_module(module)", "decorated code. Parameters ---------- func : func The function to", "logging.info('System: ' + socket.gethostname()) logging.info('Python Version: ' + sys.version.replace('\\n', ''))", "\"\"\"Create the log file name based on the module name.", "written to. \"\"\" timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M') filename = '{0}_{1}.log'.format(module, timestamp)", "= get_config() setup_file_name = settings['setup_file'] with open(setup_file_name) as setup: for", "= [module.split('=')[0] for module in module_list] # Log common module", "__name__ == '__main__': module = os.path.basename(__file__).replace('.py', '') configure_logging(module) my_main_function() Dependencies", "' + socket.gethostname()) logging.info('Python Version: ' + sys.version.replace('\\n', '')) logging.info('Python", "= os.path.join(path, filename) ensure_dir_exists(os.path.dirname(log_file)) return log_file def log_info(func): \"\"\"Decorator to", "This module provides decorators to log the execution of modules.", "module version information for module in module_list: try: mod =", "set the permissions logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%m/%d/%Y %H:%M:%S %p',", "\"\"\"Decorator to log useful system information. This function can be", "for module in module_list: try: mod = importlib.import_module(module) logging.info(module +", "filename) else: log_file = os.path.join(path, filename) ensure_dir_exists(os.path.dirname(log_file)) return log_file def", "not the output should be written to the production environment.", "\"\"\" Logging functions for the ``jwql`` automation platform. This module", "time.time() # Log execution time hours_cpu, remainder_cpu = divmod(t2_cpu -", "(WFC3 QL Version) Use --- To log the execution of", "and set the permissions logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%m/%d/%Y %H:%M:%S", "logging.info('User: ' + getpass.getuser()) logging.info('System: ' + socket.gethostname()) logging.info('Python Version:", "in the decorated code. Parameters ---------- func : func The", "' + mod.__path__[0]) except ImportError as err: logging.warning(err) # Call", "return log_file def log_info(func): \"\"\"Decorator to log useful system information.", "as setup: for line in setup: if line[0:8] == \"REQUIRES\":", "execution time hours_cpu, remainder_cpu = divmod(t2_cpu - t1_cpu, 60 *", "{0:.0f}:{1:.0f}:{2:f}'.format(hours_cpu, minutes_cpu, seconds_cpu)) return wrapped def log_fail(func): \"\"\"Decorator to log", "def log_fail(func): \"\"\"Decorator to log crashes in the decorated code.", "automation platform. \"\"\" import datetime import getpass import importlib import", "central storage area, named by module name and timestamp, e.g.", "* 60) minutes_time, seconds_time = divmod(remainder_time, 60) logging.info('Elapsed Real Time:", "configuration file named ``config.json`` placed in the ``utils`` directory. References", "'__main__': module = os.path.basename(__file__).replace('.py', '') configure_logging(module) my_main_function() Dependencies ------------ The", "log file will be written to. \"\"\" timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M')", "make_log_file(module, production_mode=True, path='./'): \"\"\"Create the log file name based on", "------- log_file : str The full path to where the", "path; default to working dir. \"\"\" # Determine log file", "\"\"\"Configure the log file with a standard logging format. Parameters", "time import traceback from functools import wraps from jwql.utils.permissions import", "Create the log file and set the permissions logging.basicConfig(filename=log_file, format='%(asctime)s", "log useful system information. This function can be used as", "are written to the ``logs/`` directory in the ``jwql`` central", "importlib.import_module(module) logging.info(module + ' Version: ' + mod.__version__) logging.info(module +", "directory. References ---------- This code is adopted and updated from", "wrapped(*a, **kw): # Log environment information logging.info('User: ' + getpass.getuser())", "The user must have a configuration file named ``config.json`` placed", "in module_list: try: mod = importlib.import_module(module) logging.info(module + ' Version:", "my_main_function() Dependencies ------------ The user must have a configuration file", "\"\"\"Decorator to log crashes in the decorated code. Parameters ----------", "platform. This module provides decorators to log the execution of", "file name based on the module name. The name of", "datetime. Parameters ---------- module : str The name of the", "decorate. Returns ------- wrapped : func The wrapped function. \"\"\"", "', '') for module in module_list] module_list = [module.split('=')[0] for", "\"\"\" @wraps(func) def wrapped(*a, **kw): # Log environment information logging.info('User:", "func : func The function to decorate. Returns ------- wrapped", "ImportError as err: logging.warning(err) # Call the function and time", "production environement. path : str Where to write the log", "to log crashes in the decorated code. Parameters ---------- func", "name of the module being logged and the current datetime.", "= module_required.split(',') # Clean up the module list module_list =", "module, use: :: import os import logging from jwql.logging.logging_functions import", "divmod(remainder_cpu, 60) hours_time, remainder_time = divmod(t2_time - t1_time, 60 *", "set_permissions(log_file) def make_log_file(module, production_mode=True, path='./'): \"\"\"Create the log file name", "log_file = os.path.join(log_path, module, filename) else: log_file = os.path.join(path, filename)", "from jwql.utils.utils import get_config, ensure_dir_exists LOG_FILE_LOC = '' PRODUCTION_BOOL =", "logging.info('Python Executable Path: ' + sys.executable) # Read in setup.py", "logging.info('Python Version: ' + sys.version.replace('\\n', '')) logging.info('Python Executable Path: '", "Version: ' + sys.version.replace('\\n', '')) logging.info('Python Executable Path: ' +", "written to the ``logs/`` directory in the ``jwql`` central storage", "getpass.getuser()) logging.info('System: ' + socket.gethostname()) logging.info('Python Version: ' + sys.version.replace('\\n',", "@log_info @log_fail def my_main_function(): pass if __name__ == '__main__': module", "logging from jwql.logging.logging_functions import configure_logging from jwql.logging.logging_functions import log_info from", "execution of modules. Log files are written to the ``logs/``", "divmod(remainder_time, 60) logging.info('Elapsed Real Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_time, minutes_time, seconds_time)) logging.info('Elapsed CPU", "file will be written to. \"\"\" timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M') filename", "seconds_cpu)) return wrapped def log_fail(func): \"\"\"Decorator to log crashes in", "Future packages we want to track can be added or", "minutes_time, seconds_time = divmod(remainder_time, 60) logging.info('Elapsed Real Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_time, minutes_time,", "file to build list of required modules settings = get_config()", "t1_time, 60 * 60) minutes_time, seconds_time = divmod(remainder_time, 60) logging.info('Elapsed", "[] if user != admin_account and module not in exempt_modules", "sys.version.replace('\\n', '')) logging.info('Python Executable Path: ' + sys.executable) # Read", "60) minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60) hours_time, remainder_time = divmod(t2_time", "= settings['log_dir'] exempt_modules = [] if user != admin_account and", "logged and the current datetime. Parameters ---------- module : str", "\"\"\" import datetime import getpass import importlib import logging import", "= production_mode # Create the log file and set the", "get_config() admin_account = settings['admin_account'] log_path = settings['log_dir'] exempt_modules = []", "func(*a, **kw) logging.info('Completed Successfully') except Exception: logging.critical(traceback.format_exc()) logging.critical('CRASHED') return wrapped", "in module_list] # Log common module version information for module", "t2_time = time.time() # Log execution time hours_cpu, remainder_cpu =", "time.clock() t1_time = time.time() func(*a, **kw) t2_cpu = time.clock() t2_time", "t1_time = time.time() func(*a, **kw) t2_cpu = time.clock() t2_time =", "production_mode : bool Whether or not the output should be", "useful system information. This function can be used as a", "Parameters ---------- func : func The function to decorate. Returns", "== '__main__': module = os.path.basename(__file__).replace('.py', '') configure_logging(module) my_main_function() Dependencies ------------", "log_file PRODUCTION_BOOL = production_mode # Create the log file and", "module_list = [module.replace('\"', '').replace(\"'\", '').replace(' ', '') for module in", "minutes_time, seconds_time)) logging.info('Elapsed CPU Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_cpu, minutes_cpu, seconds_cpu)) return wrapped", "timestamp, e.g. ``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log`` Authors ------- - <NAME> 2018 - <NAME>,", "log user environment and system information. Future packages we want", "jwql.logging.logging_functions import configure_logging from jwql.logging.logging_functions import log_info from jwql.logging.logging_functions import", "Determine log file location if production_mode: log_file = make_log_file(module) else:", "import log_info from jwql.logging.logging_functions import log_fail @log_info @log_fail def my_main_function():", "to the production environement. path : str Where to write", "Version: ' + mod.__version__) logging.info(module + ' Path: ' +", "function can be used as a decorator to log user", "environment information logging.info('User: ' + getpass.getuser()) logging.info('System: ' + socket.gethostname())", "working dir. Returns ------- log_file : str The full path", "system information. Future packages we want to track can be", "PRODUCTION_BOOL = '' def configure_logging(module, production_mode=True, path='./'): \"\"\"Configure the log", "To log the execution of a module, use: :: import", "\"REQUIRES\": module_required = line[12:-2] module_list = module_required.split(',') # Clean up", "name of the ``log_file`` is a combination of the name", "# Clean up the module list module_list = [module.replace('\"', '').replace(\"'\",", "logging.warning(err) # Call the function and time it t1_cpu =", "admin_account and module not in exempt_modules and production_mode: module =", "of the module being logged and the current datetime. Parameters", "necessary. Parameters ---------- func : func The function to decorate.", "---------- module : str The name of the module being", "the production environment. path : str Where to write the", "traceback from functools import wraps from jwql.utils.permissions import set_permissions from", "str The name of the module being logged. production_mode :", "user-supplied path; default to working dir. Returns ------- log_file :", "settings['admin_account'] log_path = settings['log_dir'] exempt_modules = [] if user !=", "``logs/`` directory in the ``jwql`` central storage area, named by", "production_mode=False, path=path) global LOG_FILE_LOC global PRODUCTION_BOOL LOG_FILE_LOC = log_file PRODUCTION_BOOL", "socket.gethostname()) logging.info('Python Version: ' + sys.version.replace('\\n', '')) logging.info('Python Executable Path:", "seconds_cpu = divmod(remainder_cpu, 60) hours_time, remainder_time = divmod(t2_time - t1_time,", "getpass import importlib import logging import os import pwd import", "Parameters ---------- module : str The name of the module", "being logged. production_mode : bool Whether or not the output", "Path: ' + mod.__path__[0]) except ImportError as err: logging.warning(err) #", "it t1_cpu = time.clock() t1_time = time.time() func(*a, **kw) t2_cpu", "seconds_time = divmod(remainder_time, 60) logging.info('Elapsed Real Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_time, minutes_time, seconds_time))", "crashes in the decorated code. Parameters ---------- func : func", "can be added or removed as necessary. Parameters ---------- func", "get_config() setup_file_name = settings['setup_file'] with open(setup_file_name) as setup: for line", "for module in module_list] module_list = [module.split('=')[0] for module in", "python routine ``logging_functions.py`` written by Alex Viana, 2013 for the", "written to the production environement. path : str Where to", "removed as necessary. Parameters ---------- func : func The function", "try: # Run the function func(*a, **kw) logging.info('Completed Successfully') except", "= os.path.join(log_path, module, filename) else: log_file = os.path.join(path, filename) ensure_dir_exists(os.path.dirname(log_file))", "``jwql`` automation platform. This module provides decorators to log the", "the log file name based on the module name. The", "in the ``jwql`` central storage area, named by module name", "routine ``logging_functions.py`` written by Alex Viana, 2013 for the WFC3", "str The full path to where the log file will", "decorator to log user environment and system information. Future packages", "log if user-supplied path; default to working dir. Returns -------", "+ getpass.getuser()) logging.info('System: ' + socket.gethostname()) logging.info('Python Version: ' +", "log crashes in the decorated code. Parameters ---------- func :", "get_config, ensure_dir_exists LOG_FILE_LOC = '' PRODUCTION_BOOL = '' def configure_logging(module,", "if line[0:8] == \"REQUIRES\": module_required = line[12:-2] module_list = module_required.split(',')", "the production environement. path : str Where to write the", "log_fail(func): \"\"\"Decorator to log crashes in the decorated code. Parameters", "is adopted and updated from python routine ``logging_functions.py`` written by", "a module, use: :: import os import logging from jwql.logging.logging_functions", "for line in setup: if line[0:8] == \"REQUIRES\": module_required =", "if user-supplied path; default to working dir. \"\"\" # Determine", "import getpass import importlib import logging import os import pwd", "code is adopted and updated from python routine ``logging_functions.py`` written", "the module list module_list = [module.replace('\"', '').replace(\"'\", '').replace(' ', '')", "the decorated code. Parameters ---------- func : func The function", "Where to write the log if user-supplied path; default to", "added or removed as necessary. Parameters ---------- func : func", "functools import wraps from jwql.utils.permissions import set_permissions from jwql.utils.utils import", "placed in the ``utils`` directory. References ---------- This code is", "[module.split('=')[0] for module in module_list] # Log common module version", "* 60) minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60) hours_time, remainder_time =", "Version) Use --- To log the execution of a module,", "Returns ------- log_file : str The full path to where", "The function to decorate. Returns ------- wrapped : func The", "filename) ensure_dir_exists(os.path.dirname(log_file)) return log_file def log_info(func): \"\"\"Decorator to log useful", "seconds_time)) logging.info('Elapsed CPU Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_cpu, minutes_cpu, seconds_cpu)) return wrapped def", "production_mode=True, path='./'): \"\"\"Configure the log file with a standard logging", "= settings['admin_account'] log_path = settings['log_dir'] exempt_modules = [] if user", "module in module_list] module_list = [module.split('=')[0] for module in module_list]", "of modules. Log files are written to the ``logs/`` directory", "function func(*a, **kw) logging.info('Completed Successfully') except Exception: logging.critical(traceback.format_exc()) logging.critical('CRASHED') return", "minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60) hours_time, remainder_time = divmod(t2_time -", "%(message)s', datefmt='%m/%d/%Y %H:%M:%S %p', level=logging.INFO) set_permissions(log_file) def make_log_file(module, production_mode=True, path='./'):", "from jwql.logging.logging_functions import log_info from jwql.logging.logging_functions import log_fail @log_info @log_fail", "# Read in setup.py file to build list of required", "Quicklook automation platform. \"\"\" import datetime import getpass import importlib", "wrapped function. \"\"\" @wraps(func) def wrapped(*a, **kw): # Log environment", "jwql.utils.utils import get_config, ensure_dir_exists LOG_FILE_LOC = '' PRODUCTION_BOOL = ''", "The full path to where the log file will be", "path : str Where to write the log if user-supplied", "execution of a module, use: :: import os import logging", "+ ' Path: ' + mod.__path__[0]) except ImportError as err:", "common module version information for module in module_list: try: mod", "a decorator to log user environment and system information. Future", "module_list] module_list = [module.split('=')[0] for module in module_list] # Log", "logging import os import pwd import socket import sys import", "os.path.join('dev', module) if production_mode: log_file = os.path.join(log_path, module, filename) else:", "CPU Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_cpu, minutes_cpu, seconds_cpu)) return wrapped def log_fail(func): \"\"\"Decorator", "and time it t1_cpu = time.clock() t1_time = time.time() func(*a,", "we want to track can be added or removed as", "settings = get_config() setup_file_name = settings['setup_file'] with open(setup_file_name) as setup:", "log_fail @log_info @log_fail def my_main_function(): pass if __name__ == '__main__':", "The wrapped function. \"\"\" @wraps(func) def wrapped(*a, **kw): try: #", "set_permissions from jwql.utils.utils import get_config, ensure_dir_exists LOG_FILE_LOC = '' PRODUCTION_BOOL", "automation platform. This module provides decorators to log the execution", "60) hours_time, remainder_time = divmod(t2_time - t1_time, 60 * 60)", "= '' def configure_logging(module, production_mode=True, path='./'): \"\"\"Configure the log file", "named by module name and timestamp, e.g. ``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log`` Authors -------", "jwql.utils.permissions import set_permissions from jwql.utils.utils import get_config, ensure_dir_exists LOG_FILE_LOC =", "= divmod(remainder_time, 60) logging.info('Elapsed Real Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_time, minutes_time, seconds_time)) logging.info('Elapsed", "module = os.path.join('dev', module) if production_mode: log_file = os.path.join(log_path, module,", "import traceback from functools import wraps from jwql.utils.permissions import set_permissions", "production_mode # Create the log file and set the permissions", "for module in module_list] # Log common module version information", "to. \"\"\" timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M') filename = '{0}_{1}.log'.format(module, timestamp) user", "the module name. The name of the ``log_file`` is a", "the WFC3 Quicklook automation platform. \"\"\" import datetime import getpass", "path=path) global LOG_FILE_LOC global PRODUCTION_BOOL LOG_FILE_LOC = log_file PRODUCTION_BOOL =", "log_file : str The full path to where the log", "wrapped function. \"\"\" @wraps(func) def wrapped(*a, **kw): try: # Run", "import sys import time import traceback from functools import wraps", "by Alex Viana, 2013 for the WFC3 Quicklook automation platform.", "settings = get_config() admin_account = settings['admin_account'] log_path = settings['log_dir'] exempt_modules", "standard logging format. Parameters ---------- module : str The name", "PRODUCTION_BOOL LOG_FILE_LOC = log_file PRODUCTION_BOOL = production_mode # Create the", "the module being logged. production_mode : bool Whether or not", "= line[12:-2] module_list = module_required.split(',') # Clean up the module", "**kw): try: # Run the function func(*a, **kw) logging.info('Completed Successfully')", "of the ``log_file`` is a combination of the name of", "' + getpass.getuser()) logging.info('System: ' + socket.gethostname()) logging.info('Python Version: '", "minutes_cpu, seconds_cpu)) return wrapped def log_fail(func): \"\"\"Decorator to log crashes", "global PRODUCTION_BOOL LOG_FILE_LOC = log_file PRODUCTION_BOOL = production_mode # Create", "Clean up the module list module_list = [module.replace('\"', '').replace(\"'\", '').replace('", "remainder_time = divmod(t2_time - t1_time, 60 * 60) minutes_time, seconds_time", "dir. Returns ------- log_file : str The full path to", "------------ The user must have a configuration file named ``config.json``", "the ``log_file`` is a combination of the name of the", "import time import traceback from functools import wraps from jwql.utils.permissions", "path to where the log file will be written to.", "as a decorator to log user environment and system information.", "used as a decorator to log user environment and system", "------- wrapped : func The wrapped function. \"\"\" @wraps(func) def", "import importlib import logging import os import pwd import socket", "= log_file PRODUCTION_BOOL = production_mode # Create the log file", "err: logging.warning(err) # Call the function and time it t1_cpu", "production_mode: module = os.path.join('dev', module) if production_mode: log_file = os.path.join(log_path,", "---------- This code is adopted and updated from python routine", "user environment and system information. Future packages we want to", "not in exempt_modules and production_mode: module = os.path.join('dev', module) if", "the log file and set the permissions logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s:", "function and time it t1_cpu = time.clock() t1_time = time.time()", "\"\"\" @wraps(func) def wrapped(*a, **kw): try: # Run the function", "[module.replace('\"', '').replace(\"'\", '').replace(' ', '') for module in module_list] module_list", "from jwql.logging.logging_functions import configure_logging from jwql.logging.logging_functions import log_info from jwql.logging.logging_functions", "t1_cpu, 60 * 60) minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60) hours_time,", "to write the log if user-supplied path; default to working", "configure_logging from jwql.logging.logging_functions import log_info from jwql.logging.logging_functions import log_fail @log_info", "'' PRODUCTION_BOOL = '' def configure_logging(module, production_mode=True, path='./'): \"\"\"Configure the", "the log file will be written to. \"\"\" timestamp =", ": func The function to decorate. Returns ------- wrapped :", "logging.info('Elapsed CPU Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_cpu, minutes_cpu, seconds_cpu)) return wrapped def log_fail(func):", "user != admin_account and module not in exempt_modules and production_mode:", "os.path.basename(__file__).replace('.py', '') configure_logging(module) my_main_function() Dependencies ------------ The user must have", "import wraps from jwql.utils.permissions import set_permissions from jwql.utils.utils import get_config,", "= time.clock() t2_time = time.time() # Log execution time hours_cpu,", "# Run the function func(*a, **kw) logging.info('Completed Successfully') except Exception:", "module being logged and the current datetime. Parameters ---------- module", "log the execution of modules. Log files are written to", "Viana, 2013 for the WFC3 Quicklook automation platform. \"\"\" import", "bool Whether or not the output should be written to", "based on the module name. The name of the ``log_file``", "= divmod(t2_cpu - t1_cpu, 60 * 60) minutes_cpu, seconds_cpu =", "mod = importlib.import_module(module) logging.info(module + ' Version: ' + mod.__version__)", "module_required.split(',') # Clean up the module list module_list = [module.replace('\"',", "2013 for the WFC3 Quicklook automation platform. \"\"\" import datetime", "'').replace(\"'\", '').replace(' ', '') for module in module_list] module_list =", "``config.json`` placed in the ``utils`` directory. References ---------- This code", "updated from python routine ``logging_functions.py`` written by Alex Viana, 2013", ": str The name of the module being logged. production_mode", "wrapped(*a, **kw): try: # Run the function func(*a, **kw) logging.info('Completed", "user = pwd.getpwuid(os.getuid()).pw_name settings = get_config() admin_account = settings['admin_account'] log_path", "from python routine ``logging_functions.py`` written by Alex Viana, 2013 for", "LOG_FILE_LOC = '' PRODUCTION_BOOL = '' def configure_logging(module, production_mode=True, path='./'):", "as necessary. Parameters ---------- func : func The function to", "to log useful system information. This function can be used", "log_file = os.path.join(path, filename) ensure_dir_exists(os.path.dirname(log_file)) return log_file def log_info(func): \"\"\"Decorator", "the current datetime. Parameters ---------- module : str The name", "module list module_list = [module.replace('\"', '').replace(\"'\", '').replace(' ', '') for", "be written to. \"\"\" timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M') filename = '{0}_{1}.log'.format(module,", "log file name based on the module name. The name", "path='./'): \"\"\"Configure the log file with a standard logging format.", "mod.__path__[0]) except ImportError as err: logging.warning(err) # Call the function", "import logging import os import pwd import socket import sys", "= make_log_file(module, production_mode=False, path=path) global LOG_FILE_LOC global PRODUCTION_BOOL LOG_FILE_LOC =", "== \"REQUIRES\": module_required = line[12:-2] module_list = module_required.split(',') # Clean", "Logging functions for the ``jwql`` automation platform. This module provides", "the log if user-supplied path; default to working dir. Returns", "log the execution of a module, use: :: import os", "pass if __name__ == '__main__': module = os.path.basename(__file__).replace('.py', '') configure_logging(module)", "packages we want to track can be added or removed", "timestamp) user = pwd.getpwuid(os.getuid()).pw_name settings = get_config() admin_account = settings['admin_account']", "log_file def log_info(func): \"\"\"Decorator to log useful system information. This", "'' def configure_logging(module, production_mode=True, path='./'): \"\"\"Configure the log file with", "with a standard logging format. Parameters ---------- module : str", "of the name of the module being logged and the", "Returns ------- wrapped : func The wrapped function. \"\"\" @wraps(func)", "'') for module in module_list] module_list = [module.split('=')[0] for module", "up the module list module_list = [module.replace('\"', '').replace(\"'\", '').replace(' ',", "default to working dir. \"\"\" # Determine log file location", "the ``logs/`` directory in the ``jwql`` central storage area, named", "should be written to the production environment. path : str", "mod.__version__) logging.info(module + ' Path: ' + mod.__path__[0]) except ImportError", "Log execution time hours_cpu, remainder_cpu = divmod(t2_cpu - t1_cpu, 60", "' + sys.version.replace('\\n', '')) logging.info('Python Executable Path: ' + sys.executable)", "= os.path.basename(__file__).replace('.py', '') configure_logging(module) my_main_function() Dependencies ------------ The user must", "WFC3 Quicklook automation platform. \"\"\" import datetime import getpass import", "sys.executable) # Read in setup.py file to build list of", "``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log`` Authors ------- - <NAME> 2018 - <NAME>, 2013 (WFC3", "module_list] # Log common module version information for module in", "location if production_mode: log_file = make_log_file(module) else: log_file = make_log_file(module,", "information. Future packages we want to track can be added", "and module not in exempt_modules and production_mode: module = os.path.join('dev',", "filename = '{0}_{1}.log'.format(module, timestamp) user = pwd.getpwuid(os.getuid()).pw_name settings = get_config()", "format='%(asctime)s %(levelname)s: %(message)s', datefmt='%m/%d/%Y %H:%M:%S %p', level=logging.INFO) set_permissions(log_file) def make_log_file(module,", "Whether or not the output should be written to the", "default to working dir. Returns ------- log_file : str The", "jwql.logging.logging_functions import log_info from jwql.logging.logging_functions import log_fail @log_info @log_fail def", "logging format. Parameters ---------- module : str The name of", "write the log if user-supplied path; default to working dir.", "------- - <NAME> 2018 - <NAME>, 2013 (WFC3 QL Version)", "- <NAME> 2018 - <NAME>, 2013 (WFC3 QL Version) Use", "wrapped : func The wrapped function. \"\"\" @wraps(func) def wrapped(*a,", "to the production environment. path : str Where to write", "LOG_FILE_LOC = log_file PRODUCTION_BOOL = production_mode # Create the log", "written by Alex Viana, 2013 for the WFC3 Quicklook automation", "environment. path : str Where to write the log if", "import pwd import socket import sys import time import traceback", "\"\"\" timestamp = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M') filename = '{0}_{1}.log'.format(module, timestamp) user =", "- t1_time, 60 * 60) minutes_time, seconds_time = divmod(remainder_time, 60)", "be used as a decorator to log user environment and", "be added or removed as necessary. Parameters ---------- func :", "Real Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_time, minutes_time, seconds_time)) logging.info('Elapsed CPU Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_cpu, minutes_cpu,", "# Log execution time hours_cpu, remainder_cpu = divmod(t2_cpu - t1_cpu,", "60) minutes_time, seconds_time = divmod(remainder_time, 60) logging.info('Elapsed Real Time: {0:.0f}:{1:.0f}:{2:f}'.format(hours_time,", "required modules settings = get_config() setup_file_name = settings['setup_file'] with open(setup_file_name)", "@wraps(func) def wrapped(*a, **kw): try: # Run the function func(*a,", "not the output should be written to the production environement.", "= os.path.join('dev', module) if production_mode: log_file = os.path.join(log_path, module, filename)", "information logging.info('User: ' + getpass.getuser()) logging.info('System: ' + socket.gethostname()) logging.info('Python", "make_log_file(module) else: log_file = make_log_file(module, production_mode=False, path=path) global LOG_FILE_LOC global", "log_info from jwql.logging.logging_functions import log_fail @log_info @log_fail def my_main_function(): pass", "hours_cpu, remainder_cpu = divmod(t2_cpu - t1_cpu, 60 * 60) minutes_cpu,", "Path: ' + sys.executable) # Read in setup.py file to", "track can be added or removed as necessary. Parameters ----------", "adopted and updated from python routine ``logging_functions.py`` written by Alex", "else: log_file = os.path.join(path, filename) ensure_dir_exists(os.path.dirname(log_file)) return log_file def log_info(func):", "exempt_modules = [] if user != admin_account and module not", "%H:%M:%S %p', level=logging.INFO) set_permissions(log_file) def make_log_file(module, production_mode=True, path='./'): \"\"\"Create the", "of required modules settings = get_config() setup_file_name = settings['setup_file'] with", "except ImportError as err: logging.warning(err) # Call the function and", "- t1_cpu, 60 * 60) minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60)", "Run the function func(*a, **kw) logging.info('Completed Successfully') except Exception: logging.critical(traceback.format_exc())", "60 * 60) minutes_cpu, seconds_cpu = divmod(remainder_cpu, 60) hours_time, remainder_time", "settings['log_dir'] exempt_modules = [] if user != admin_account and module", "\"\"\" # Determine log file location if production_mode: log_file =", "the ``utils`` directory. References ---------- This code is adopted and", "written to the production environment. path : str Where to", "to log user environment and system information. Future packages we", "with open(setup_file_name) as setup: for line in setup: if line[0:8]", "module_required = line[12:-2] module_list = module_required.split(',') # Clean up the", "def wrapped(*a, **kw): try: # Run the function func(*a, **kw)", "wrapped def log_fail(func): \"\"\"Decorator to log crashes in the decorated", "path='./'): \"\"\"Create the log file name based on the module", "``log_file`` is a combination of the name of the module", "setup: for line in setup: if line[0:8] == \"REQUIRES\": module_required", ": str The full path to where the log file", "module not in exempt_modules and production_mode: module = os.path.join('dev', module)", "to build list of required modules settings = get_config() setup_file_name", "log_path = settings['log_dir'] exempt_modules = [] if user != admin_account", "to working dir. \"\"\" # Determine log file location if", "ensure_dir_exists LOG_FILE_LOC = '' PRODUCTION_BOOL = '' def configure_logging(module, production_mode=True,", "name and timestamp, e.g. ``monitor_filesystem/monitor_filesystem_2018-06-20-15:22:51.log`` Authors ------- - <NAME> 2018", "module_list = [module.split('=')[0] for module in module_list] # Log common", "= '{0}_{1}.log'.format(module, timestamp) user = pwd.getpwuid(os.getuid()).pw_name settings = get_config() admin_account", "setup.py file to build list of required modules settings =", "= pwd.getpwuid(os.getuid()).pw_name settings = get_config() admin_account = settings['admin_account'] log_path =", "func The function to decorate. Returns ------- wrapped : func", "environment and system information. Future packages we want to track", "import os import pwd import socket import sys import time", "code. Parameters ---------- func : func The function to decorate." ]
[ "typing import Optional session: Optional[ClientSession] = None __all__ = (session,)", "from typing import Optional session: Optional[ClientSession] = None __all__ =", "from aiohttp import ClientSession from typing import Optional session: Optional[ClientSession]", "aiohttp import ClientSession from typing import Optional session: Optional[ClientSession] =", "import ClientSession from typing import Optional session: Optional[ClientSession] = None", "ClientSession from typing import Optional session: Optional[ClientSession] = None __all__" ]
[ "_md5.hexdigest() def sha1(path): _sha1 = hashlib.sha1() fh = open(path, 'rb')", "= open(path, 'rb') if start > 0: fh.seek(start) if stop", "os.path.getsize(path) pos = start while pos < stop: size =", "= start while pos < stop: size = min(CHUNK, stop", "break _sha256.update(chunk) fh.close() return _sha256.hexdigest() def sha384(path): _sha384 = hashlib.sha384()", "= fh.read(size) if not chunk: break pos += len(chunk) _md5.update(chunk)", "& 0xFFFFFFFF) def md5(path, start=0, stop=-1): _md5 = hashlib.md5() fh", "fh.close() return _sha384.hexdigest() def sha512(path): _sha512 = hashlib.sha512() fh =", "GPLv3 license that can be found # in http://www.gnu.org/licenses/gpl-3.0.html import", "fh.read(CHUNK) if not chunk: break _sha256.update(chunk) fh.close() return _sha256.hexdigest() def", "if not chunk: break _sha384.update(chunk) fh.close() return _sha384.hexdigest() def sha512(path):", "_sha224.update(chunk) fh.close() return _sha224.hexdigest() def sha256(path): _sha256 = hashlib.sha256() fh", "_sha384 = hashlib.sha384() fh = open(path, 'rb') while True: chunk", "return _sha384.hexdigest() def sha512(path): _sha512 = hashlib.sha512() fh = open(path,", "chunk = fh.read(CHUNK) if not chunk: break _sha256.update(chunk) fh.close() return", "def sha512(path): _sha512 = hashlib.sha512() fh = open(path, 'rb') while", "if not chunk: break _sha256.update(chunk) fh.close() return _sha256.hexdigest() def sha384(path):", "chunk: break pos += len(chunk) _md5.update(chunk) fh.close() return _md5.hexdigest() def", "stop == -1: stop = os.path.getsize(path) pos = start while", "hashlib.sha256() fh = open(path, 'rb') while True: chunk = fh.read(CHUNK)", "while True: chunk = fh.read(CHUNK) if not chunk: break _sha384.update(chunk)", "0: fh.seek(start) if stop == -1: stop = os.path.getsize(path) pos", "chunk = fh.read(CHUNK) if not chunk: break _crc = zlib.crc32(chunk,", "start > 0: fh.seek(start) if stop == -1: stop =", "= hashlib.sha224() fh = open(path, 'rb') while True: chunk =", "_sha384.update(chunk) fh.close() return _sha384.hexdigest() def sha512(path): _sha512 = hashlib.sha512() fh", "= hashlib.sha256() fh = open(path, 'rb') while True: chunk =", "start while pos < stop: size = min(CHUNK, stop -", "= fh.read(CHUNK) if not chunk: break _sha384.update(chunk) fh.close() return _sha384.hexdigest()", "= fh.read(CHUNK) if not chunk: break _crc = zlib.crc32(chunk, _crc)", "crc(path): _crc = 0 fh = open(path, 'rb') while True:", "LiuLang <<EMAIL>> # Use of this source code is governed", "break _sha384.update(chunk) fh.close() return _sha384.hexdigest() def sha512(path): _sha512 = hashlib.sha512()", "fh.close() return _sha256.hexdigest() def sha384(path): _sha384 = hashlib.sha384() fh =", "stop - pos) chunk = fh.read(size) if not chunk: break", "# Use of this source code is governed by GPLv3", "def md5(path, start=0, stop=-1): _md5 = hashlib.md5() fh = open(path,", "= os.path.getsize(path) pos = start while pos < stop: size", "if not chunk: break _sha224.update(chunk) fh.close() return _sha224.hexdigest() def sha256(path):", "2 ** 20 def crc(path): _crc = 0 fh =", "start=0, stop=-1): _md5 = hashlib.md5() fh = open(path, 'rb') if", "_sha256.hexdigest() def sha384(path): _sha384 = hashlib.sha384() fh = open(path, 'rb')", "_sha224.hexdigest() def sha256(path): _sha256 = hashlib.sha256() fh = open(path, 'rb')", "hashlib.sha512() fh = open(path, 'rb') while True: chunk = fh.read(CHUNK)", "hashlib.sha384() fh = open(path, 'rb') while True: chunk = fh.read(CHUNK)", "zlib CHUNK = 2 ** 20 def crc(path): _crc =", "return _sha224.hexdigest() def sha256(path): _sha256 = hashlib.sha256() fh = open(path,", "= fh.read(CHUNK) if not chunk: break _sha224.update(chunk) fh.close() return _sha224.hexdigest()", "_crc) fh.close() return '%X' % (_crc & 0xFFFFFFFF) def md5(path,", "= min(CHUNK, stop - pos) chunk = fh.read(size) if not", "while True: chunk = fh.read(CHUNK) if not chunk: break _crc", "open(path, 'rb') while True: chunk = fh.read(CHUNK) if not chunk:", "return _md5.hexdigest() def sha1(path): _sha1 = hashlib.sha1() fh = open(path,", "sha384(path): _sha384 = hashlib.sha384() fh = open(path, 'rb') while True:", "break _sha1.update(chunk) fh.close() return _sha1.hexdigest() def sha224(path): _sha224 = hashlib.sha224()", "not chunk: break _crc = zlib.crc32(chunk, _crc) fh.close() return '%X'", "import zlib CHUNK = 2 ** 20 def crc(path): _crc", "not chunk: break _sha1.update(chunk) fh.close() return _sha1.hexdigest() def sha224(path): _sha224", "chunk = fh.read(CHUNK) if not chunk: break _sha1.update(chunk) fh.close() return", "20 def crc(path): _crc = 0 fh = open(path, 'rb')", "not chunk: break _sha224.update(chunk) fh.close() return _sha224.hexdigest() def sha256(path): _sha256", "(_crc & 0xFFFFFFFF) def md5(path, start=0, stop=-1): _md5 = hashlib.md5()", "= 0 fh = open(path, 'rb') while True: chunk =", "Use of this source code is governed by GPLv3 license", "def sha1(path): _sha1 = hashlib.sha1() fh = open(path, 'rb') while", "sha1(path): _sha1 = hashlib.sha1() fh = open(path, 'rb') while True:", "chunk = fh.read(CHUNK) if not chunk: break _sha224.update(chunk) fh.close() return", "# in http://www.gnu.org/licenses/gpl-3.0.html import hashlib import os import zlib CHUNK", "len(chunk) _md5.update(chunk) fh.close() return _md5.hexdigest() def sha1(path): _sha1 = hashlib.sha1()", "True: chunk = fh.read(CHUNK) if not chunk: break _sha384.update(chunk) fh.close()", "fh.close() return _sha1.hexdigest() def sha224(path): _sha224 = hashlib.sha224() fh =", "def sha224(path): _sha224 = hashlib.sha224() fh = open(path, 'rb') while", "not chunk: break _sha256.update(chunk) fh.close() return _sha256.hexdigest() def sha384(path): _sha384", "_crc = 0 fh = open(path, 'rb') while True: chunk", "os import zlib CHUNK = 2 ** 20 def crc(path):", "_sha1.update(chunk) fh.close() return _sha1.hexdigest() def sha224(path): _sha224 = hashlib.sha224() fh", "pos) chunk = fh.read(size) if not chunk: break pos +=", "fh.seek(start) if stop == -1: stop = os.path.getsize(path) pos =", "= fh.read(CHUNK) if not chunk: break _sha1.update(chunk) fh.close() return _sha1.hexdigest()", "while pos < stop: size = min(CHUNK, stop - pos)", "def sha256(path): _sha256 = hashlib.sha256() fh = open(path, 'rb') while", "chunk: break _sha1.update(chunk) fh.close() return _sha1.hexdigest() def sha224(path): _sha224 =", "pos += len(chunk) _md5.update(chunk) fh.close() return _md5.hexdigest() def sha1(path): _sha1", "fh.close() return '%X' % (_crc & 0xFFFFFFFF) def md5(path, start=0,", "while True: chunk = fh.read(CHUNK) if not chunk: break _sha1.update(chunk)", "chunk = fh.read(CHUNK) if not chunk: break _sha384.update(chunk) fh.close() return", "= zlib.crc32(chunk, _crc) fh.close() return '%X' % (_crc & 0xFFFFFFFF)", "<<EMAIL>> # Use of this source code is governed by", "return '%X' % (_crc & 0xFFFFFFFF) def md5(path, start=0, stop=-1):", "break pos += len(chunk) _md5.update(chunk) fh.close() return _md5.hexdigest() def sha1(path):", "source code is governed by GPLv3 license that can be", "True: chunk = fh.read(CHUNK) if not chunk: break _sha1.update(chunk) fh.close()", "sha512(path): _sha512 = hashlib.sha512() fh = open(path, 'rb') while True:", "-1: stop = os.path.getsize(path) pos = start while pos <", "chunk = fh.read(size) if not chunk: break pos += len(chunk)", "sha224(path): _sha224 = hashlib.sha224() fh = open(path, 'rb') while True:", "if not chunk: break _sha1.update(chunk) fh.close() return _sha1.hexdigest() def sha224(path):", "chunk: break _crc = zlib.crc32(chunk, _crc) fh.close() return '%X' %", "_sha224 = hashlib.sha224() fh = open(path, 'rb') while True: chunk", "break _sha224.update(chunk) fh.close() return _sha224.hexdigest() def sha256(path): _sha256 = hashlib.sha256()", "# Copyright (C) 2014-2015 LiuLang <<EMAIL>> # Use of this", "= fh.read(CHUNK) if not chunk: break _sha512.update(chunk) fh.close() return _sha512.hexdigest()", "def crc(path): _crc = 0 fh = open(path, 'rb') while", "pos = start while pos < stop: size = min(CHUNK,", "stop: size = min(CHUNK, stop - pos) chunk = fh.read(size)", "_sha384.hexdigest() def sha512(path): _sha512 = hashlib.sha512() fh = open(path, 'rb')", "'%X' % (_crc & 0xFFFFFFFF) def md5(path, start=0, stop=-1): _md5", "_md5.update(chunk) fh.close() return _md5.hexdigest() def sha1(path): _sha1 = hashlib.sha1() fh", "while True: chunk = fh.read(CHUNK) if not chunk: break _sha512.update(chunk)", "= 2 ** 20 def crc(path): _crc = 0 fh", "= fh.read(CHUNK) if not chunk: break _sha256.update(chunk) fh.close() return _sha256.hexdigest()", "is governed by GPLv3 license that can be found #", "fh.read(CHUNK) if not chunk: break _crc = zlib.crc32(chunk, _crc) fh.close()", "fh.read(CHUNK) if not chunk: break _sha1.update(chunk) fh.close() return _sha1.hexdigest() def", "% (_crc & 0xFFFFFFFF) def md5(path, start=0, stop=-1): _md5 =", "== -1: stop = os.path.getsize(path) pos = start while pos", "_sha1.hexdigest() def sha224(path): _sha224 = hashlib.sha224() fh = open(path, 'rb')", "while True: chunk = fh.read(CHUNK) if not chunk: break _sha256.update(chunk)", "be found # in http://www.gnu.org/licenses/gpl-3.0.html import hashlib import os import", "in http://www.gnu.org/licenses/gpl-3.0.html import hashlib import os import zlib CHUNK =", "md5(path, start=0, stop=-1): _md5 = hashlib.md5() fh = open(path, 'rb')", "this source code is governed by GPLv3 license that can", "open(path, 'rb') if start > 0: fh.seek(start) if stop ==", "'rb') while True: chunk = fh.read(CHUNK) if not chunk: break", "import hashlib import os import zlib CHUNK = 2 **", "CHUNK = 2 ** 20 def crc(path): _crc = 0", "_sha256.update(chunk) fh.close() return _sha256.hexdigest() def sha384(path): _sha384 = hashlib.sha384() fh", "if not chunk: break _crc = zlib.crc32(chunk, _crc) fh.close() return", "break _crc = zlib.crc32(chunk, _crc) fh.close() return '%X' % (_crc", "+= len(chunk) _md5.update(chunk) fh.close() return _md5.hexdigest() def sha1(path): _sha1 =", "return _sha1.hexdigest() def sha224(path): _sha224 = hashlib.sha224() fh = open(path,", "return _sha256.hexdigest() def sha384(path): _sha384 = hashlib.sha384() fh = open(path,", "= open(path, 'rb') while True: chunk = fh.read(CHUNK) if not", "that can be found # in http://www.gnu.org/licenses/gpl-3.0.html import hashlib import", "hashlib.md5() fh = open(path, 'rb') if start > 0: fh.seek(start)", "code is governed by GPLv3 license that can be found", "chunk: break _sha224.update(chunk) fh.close() return _sha224.hexdigest() def sha256(path): _sha256 =", "True: chunk = fh.read(CHUNK) if not chunk: break _sha224.update(chunk) fh.close()", "= hashlib.sha384() fh = open(path, 'rb') while True: chunk =", "chunk: break _sha384.update(chunk) fh.close() return _sha384.hexdigest() def sha512(path): _sha512 =", "stop = os.path.getsize(path) pos = start while pos < stop:", "chunk = fh.read(CHUNK) if not chunk: break _sha512.update(chunk) fh.close() return", "_sha256 = hashlib.sha256() fh = open(path, 'rb') while True: chunk", "hashlib import os import zlib CHUNK = 2 ** 20", "sha256(path): _sha256 = hashlib.sha256() fh = open(path, 'rb') while True:", "= hashlib.sha1() fh = open(path, 'rb') while True: chunk =", "True: chunk = fh.read(CHUNK) if not chunk: break _sha256.update(chunk) fh.close()", "True: chunk = fh.read(CHUNK) if not chunk: break _sha512.update(chunk) fh.close()", "= hashlib.md5() fh = open(path, 'rb') if start > 0:", "governed by GPLv3 license that can be found # in", "if stop == -1: stop = os.path.getsize(path) pos = start", "can be found # in http://www.gnu.org/licenses/gpl-3.0.html import hashlib import os", "by GPLv3 license that can be found # in http://www.gnu.org/licenses/gpl-3.0.html", "not chunk: break _sha384.update(chunk) fh.close() return _sha384.hexdigest() def sha512(path): _sha512", "_md5 = hashlib.md5() fh = open(path, 'rb') if start >", "fh.close() return _md5.hexdigest() def sha1(path): _sha1 = hashlib.sha1() fh =", "fh.read(size) if not chunk: break pos += len(chunk) _md5.update(chunk) fh.close()", "while True: chunk = fh.read(CHUNK) if not chunk: break _sha224.update(chunk)", "(C) 2014-2015 LiuLang <<EMAIL>> # Use of this source code", "'rb') if start > 0: fh.seek(start) if stop == -1:", "Copyright (C) 2014-2015 LiuLang <<EMAIL>> # Use of this source", "- pos) chunk = fh.read(size) if not chunk: break pos", "True: chunk = fh.read(CHUNK) if not chunk: break _crc =", "hashlib.sha1() fh = open(path, 'rb') while True: chunk = fh.read(CHUNK)", "< stop: size = min(CHUNK, stop - pos) chunk =", "zlib.crc32(chunk, _crc) fh.close() return '%X' % (_crc & 0xFFFFFFFF) def", "import os import zlib CHUNK = 2 ** 20 def", "fh.close() return _sha224.hexdigest() def sha256(path): _sha256 = hashlib.sha256() fh =", "0 fh = open(path, 'rb') while True: chunk = fh.read(CHUNK)", "chunk: break _sha256.update(chunk) fh.close() return _sha256.hexdigest() def sha384(path): _sha384 =", "_crc = zlib.crc32(chunk, _crc) fh.close() return '%X' % (_crc &", "2014-2015 LiuLang <<EMAIL>> # Use of this source code is", "found # in http://www.gnu.org/licenses/gpl-3.0.html import hashlib import os import zlib", "not chunk: break pos += len(chunk) _md5.update(chunk) fh.close() return _md5.hexdigest()", "_sha1 = hashlib.sha1() fh = open(path, 'rb') while True: chunk", "size = min(CHUNK, stop - pos) chunk = fh.read(size) if", "pos < stop: size = min(CHUNK, stop - pos) chunk", "min(CHUNK, stop - pos) chunk = fh.read(size) if not chunk:", "_sha512 = hashlib.sha512() fh = open(path, 'rb') while True: chunk", "** 20 def crc(path): _crc = 0 fh = open(path,", "stop=-1): _md5 = hashlib.md5() fh = open(path, 'rb') if start", "http://www.gnu.org/licenses/gpl-3.0.html import hashlib import os import zlib CHUNK = 2", "fh = open(path, 'rb') while True: chunk = fh.read(CHUNK) if", "hashlib.sha224() fh = open(path, 'rb') while True: chunk = fh.read(CHUNK)", "= hashlib.sha512() fh = open(path, 'rb') while True: chunk =", "<gh_stars>0 # Copyright (C) 2014-2015 LiuLang <<EMAIL>> # Use of", "0xFFFFFFFF) def md5(path, start=0, stop=-1): _md5 = hashlib.md5() fh =", "of this source code is governed by GPLv3 license that", "fh.read(CHUNK) if not chunk: break _sha384.update(chunk) fh.close() return _sha384.hexdigest() def", "fh = open(path, 'rb') if start > 0: fh.seek(start) if", "> 0: fh.seek(start) if stop == -1: stop = os.path.getsize(path)", "if start > 0: fh.seek(start) if stop == -1: stop", "if not chunk: break pos += len(chunk) _md5.update(chunk) fh.close() return", "license that can be found # in http://www.gnu.org/licenses/gpl-3.0.html import hashlib", "fh.read(CHUNK) if not chunk: break _sha224.update(chunk) fh.close() return _sha224.hexdigest() def", "def sha384(path): _sha384 = hashlib.sha384() fh = open(path, 'rb') while" ]