ngram
listlengths 0
82k
|
|---|
[
"\"b\": 1}\\n' def test_print_idents(capsys): Formatter(indents=True).print(sampleJson) captured = capsys.readouterr() assert captured.out",
"captured = capsys.readouterr() assert captured.out == '{\\n \"a\": \"s\",\\n \"b\":",
"utf-8 -*- import os import pytest import json from kafkacli.formatter",
"sampleJson = json.loads('{\"a\":\"s\", \"b\":1}') def test_print_default(capsys): Formatter().print(sampleJson) captured = capsys.readouterr()",
"-*- coding: utf-8 -*- import os import pytest import json",
"def test_print_colors(capsys): Formatter(colors=True).print(sampleJson) captured = capsys.readouterr() assert captured.out == \\",
"Formatter(colors=True).print(sampleJson) captured = capsys.readouterr() assert captured.out == \\ '{\"a\": \\x1b[34m\"s\"\\x1b[39m,",
"# -*- coding: utf-8 -*- import os import pytest import",
"captured = capsys.readouterr() assert captured.out == '{\"a\": \"s\", \"b\": 1}\\n'",
"os import pytest import json from kafkacli.formatter import Formatter sampleJson",
"= capsys.readouterr() assert captured.out == '{\"a\": \"s\", \"b\": 1}\\n' def",
"== '{\"a\": \"s\", \"b\": 1}\\n' def test_print_idents(capsys): Formatter(indents=True).print(sampleJson) captured =",
"1\\n}\\n' def test_print_colors(capsys): Formatter(colors=True).print(sampleJson) captured = capsys.readouterr() assert captured.out ==",
"'{\\n \"a\": \"s\",\\n \"b\": 1\\n}\\n' def test_print_colors(capsys): Formatter(colors=True).print(sampleJson) captured =",
"#!/usr/bin/env python # -*- coding: utf-8 -*- import os import",
"assert captured.out == '{\\n \"a\": \"s\",\\n \"b\": 1\\n}\\n' def test_print_colors(capsys):",
"assert captured.out == '{\"a\": \"s\", \"b\": 1}\\n' def test_print_idents(capsys): Formatter(indents=True).print(sampleJson)",
"captured = capsys.readouterr() assert captured.out == \\ '{\"a\": \\x1b[34m\"s\"\\x1b[39m, \"b\":",
"python # -*- coding: utf-8 -*- import os import pytest",
"import Formatter sampleJson = json.loads('{\"a\":\"s\", \"b\":1}') def test_print_default(capsys): Formatter().print(sampleJson) captured",
"\"b\":1}') def test_print_default(capsys): Formatter().print(sampleJson) captured = capsys.readouterr() assert captured.out ==",
"capsys.readouterr() assert captured.out == '{\\n \"a\": \"s\",\\n \"b\": 1\\n}\\n' def",
"import pytest import json from kafkacli.formatter import Formatter sampleJson =",
"\"s\", \"b\": 1}\\n' def test_print_idents(capsys): Formatter(indents=True).print(sampleJson) captured = capsys.readouterr() assert",
"test_print_colors(capsys): Formatter(colors=True).print(sampleJson) captured = capsys.readouterr() assert captured.out == \\ '{\"a\":",
"json from kafkacli.formatter import Formatter sampleJson = json.loads('{\"a\":\"s\", \"b\":1}') def",
"test_print_default(capsys): Formatter().print(sampleJson) captured = capsys.readouterr() assert captured.out == '{\"a\": \"s\",",
"captured.out == '{\\n \"a\": \"s\",\\n \"b\": 1\\n}\\n' def test_print_colors(capsys): Formatter(colors=True).print(sampleJson)",
"\"b\": 1\\n}\\n' def test_print_colors(capsys): Formatter(colors=True).print(sampleJson) captured = capsys.readouterr() assert captured.out",
"import os import pytest import json from kafkacli.formatter import Formatter",
"kafkacli.formatter import Formatter sampleJson = json.loads('{\"a\":\"s\", \"b\":1}') def test_print_default(capsys): Formatter().print(sampleJson)",
"\"a\": \"s\",\\n \"b\": 1\\n}\\n' def test_print_colors(capsys): Formatter(colors=True).print(sampleJson) captured = capsys.readouterr()",
"capsys.readouterr() assert captured.out == '{\"a\": \"s\", \"b\": 1}\\n' def test_print_idents(capsys):",
"Formatter(indents=True).print(sampleJson) captured = capsys.readouterr() assert captured.out == '{\\n \"a\": \"s\",\\n",
"coding: utf-8 -*- import os import pytest import json from",
"Formatter().print(sampleJson) captured = capsys.readouterr() assert captured.out == '{\"a\": \"s\", \"b\":",
"from kafkacli.formatter import Formatter sampleJson = json.loads('{\"a\":\"s\", \"b\":1}') def test_print_default(capsys):",
"'{\"a\": \"s\", \"b\": 1}\\n' def test_print_idents(capsys): Formatter(indents=True).print(sampleJson) captured = capsys.readouterr()",
"\"s\",\\n \"b\": 1\\n}\\n' def test_print_colors(capsys): Formatter(colors=True).print(sampleJson) captured = capsys.readouterr() assert",
"test_print_idents(capsys): Formatter(indents=True).print(sampleJson) captured = capsys.readouterr() assert captured.out == '{\\n \"a\":",
"= capsys.readouterr() assert captured.out == \\ '{\"a\": \\x1b[34m\"s\"\\x1b[39m, \"b\": \\x1b[31m1\\x1b[39m}\\n'",
"def test_print_idents(capsys): Formatter(indents=True).print(sampleJson) captured = capsys.readouterr() assert captured.out == '{\\n",
"def test_print_default(capsys): Formatter().print(sampleJson) captured = capsys.readouterr() assert captured.out == '{\"a\":",
"json.loads('{\"a\":\"s\", \"b\":1}') def test_print_default(capsys): Formatter().print(sampleJson) captured = capsys.readouterr() assert captured.out",
"= json.loads('{\"a\":\"s\", \"b\":1}') def test_print_default(capsys): Formatter().print(sampleJson) captured = capsys.readouterr() assert",
"== '{\\n \"a\": \"s\",\\n \"b\": 1\\n}\\n' def test_print_colors(capsys): Formatter(colors=True).print(sampleJson) captured",
"= capsys.readouterr() assert captured.out == '{\\n \"a\": \"s\",\\n \"b\": 1\\n}\\n'",
"1}\\n' def test_print_idents(capsys): Formatter(indents=True).print(sampleJson) captured = capsys.readouterr() assert captured.out ==",
"-*- import os import pytest import json from kafkacli.formatter import",
"import json from kafkacli.formatter import Formatter sampleJson = json.loads('{\"a\":\"s\", \"b\":1}')",
"Formatter sampleJson = json.loads('{\"a\":\"s\", \"b\":1}') def test_print_default(capsys): Formatter().print(sampleJson) captured =",
"captured.out == '{\"a\": \"s\", \"b\": 1}\\n' def test_print_idents(capsys): Formatter(indents=True).print(sampleJson) captured",
"pytest import json from kafkacli.formatter import Formatter sampleJson = json.loads('{\"a\":\"s\","
] |
[
"django import forms from .models import Application class ApplicationForm(forms.ModelForm): class",
"import forms from .models import Application class ApplicationForm(forms.ModelForm): class Meta:",
".models import Application class ApplicationForm(forms.ModelForm): class Meta: model = Application",
"Application class ApplicationForm(forms.ModelForm): class Meta: model = Application fields =",
"import Application class ApplicationForm(forms.ModelForm): class Meta: model = Application fields",
"forms from .models import Application class ApplicationForm(forms.ModelForm): class Meta: model",
"class ApplicationForm(forms.ModelForm): class Meta: model = Application fields = ('resume',",
"ApplicationForm(forms.ModelForm): class Meta: model = Application fields = ('resume', 'cover_letter',)",
"from .models import Application class ApplicationForm(forms.ModelForm): class Meta: model =",
"from django import forms from .models import Application class ApplicationForm(forms.ModelForm):"
] |
[
"# Test Case: protocolo.py # # Tests: vistprotocol unit test",
"PST 2011 # ---------------------------------------------------------------------------- tapp = visit_bin_path(\"visitprotocol\") res = sexe(tapp,ret_output=True)",
"# ---------------------------------------------------------------------------- # CLASSES: nightly # # Test Case: protocolo.py",
"# <NAME>, Tue Jan 11 10:19:23 PST 2011 # ----------------------------------------------------------------------------",
"10:19:23 PST 2011 # ---------------------------------------------------------------------------- tapp = visit_bin_path(\"visitprotocol\") res =",
"---------------------------------------------------------------------------- tapp = visit_bin_path(\"visitprotocol\") res = sexe(tapp,ret_output=True) if res[\"return_code\"] ==",
"tapp = visit_bin_path(\"visitprotocol\") res = sexe(tapp,ret_output=True) if res[\"return_code\"] == 0:",
"<NAME>, Tue Jan 11 10:19:23 PST 2011 # ---------------------------------------------------------------------------- tapp",
"sexe(tapp,ret_output=True) if res[\"return_code\"] == 0: excode = 111 else: excode",
"res[\"return_code\"] == 0: excode = 111 else: excode = 113",
"# # <NAME>, Tue Jan 11 10:19:23 PST 2011 #",
"CLASSES: nightly # # Test Case: protocolo.py # # Tests:",
"# # Tests: vistprotocol unit test # # <NAME>, Tue",
"= visit_bin_path(\"visitprotocol\") res = sexe(tapp,ret_output=True) if res[\"return_code\"] == 0: excode",
"# Tests: vistprotocol unit test # # <NAME>, Tue Jan",
"2011 # ---------------------------------------------------------------------------- tapp = visit_bin_path(\"visitprotocol\") res = sexe(tapp,ret_output=True) if",
"# ---------------------------------------------------------------------------- tapp = visit_bin_path(\"visitprotocol\") res = sexe(tapp,ret_output=True) if res[\"return_code\"]",
"Test Case: protocolo.py # # Tests: vistprotocol unit test #",
"visit_bin_path(\"visitprotocol\") res = sexe(tapp,ret_output=True) if res[\"return_code\"] == 0: excode =",
"Jan 11 10:19:23 PST 2011 # ---------------------------------------------------------------------------- tapp = visit_bin_path(\"visitprotocol\")",
"protocolo.py # # Tests: vistprotocol unit test # # <NAME>,",
"Case: protocolo.py # # Tests: vistprotocol unit test # #",
"res = sexe(tapp,ret_output=True) if res[\"return_code\"] == 0: excode = 111",
"= sexe(tapp,ret_output=True) if res[\"return_code\"] == 0: excode = 111 else:",
"unit test # # <NAME>, Tue Jan 11 10:19:23 PST",
"---------------------------------------------------------------------------- # CLASSES: nightly # # Test Case: protocolo.py #",
"if res[\"return_code\"] == 0: excode = 111 else: excode =",
"# # Test Case: protocolo.py # # Tests: vistprotocol unit",
"== 0: excode = 111 else: excode = 113 Exit(excode)",
"Tests: vistprotocol unit test # # <NAME>, Tue Jan 11",
"nightly # # Test Case: protocolo.py # # Tests: vistprotocol",
"vistprotocol unit test # # <NAME>, Tue Jan 11 10:19:23",
"11 10:19:23 PST 2011 # ---------------------------------------------------------------------------- tapp = visit_bin_path(\"visitprotocol\") res",
"# CLASSES: nightly # # Test Case: protocolo.py # #",
"test # # <NAME>, Tue Jan 11 10:19:23 PST 2011",
"Tue Jan 11 10:19:23 PST 2011 # ---------------------------------------------------------------------------- tapp ="
] |
[
"== 1: end = 1 break #print for x in",
"libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() while Possible(CurrX,CurrY,Table,nTiles) == 0: MemorySize = len(Memory) Dir",
"= randint(1,4) if Dir == 1: Table[CurrX][CurrY - 1] =",
"0 if table[x+2][y] == white: return 0 elif direction ==",
"= [[0 for i in range(nTiles)]for i in range(nTiles)] for",
"= white end = 0 while end == 0: while",
"2: CurrX -= 2 elif Dir == 3: CurrY -=",
"if table[x][y+2] == white: return 0 elif direction == 4:",
"0: MemorySize = len(Memory) Dir = Memory[MemorySize-1] if Dir ==",
"Memory = [] CurrX = 1 CurrY = 1 Table[CurrX][CurrY]",
"== 1: Table[CurrX][CurrY - 1] = white CurrY -= 2",
"def CheckDir(x,y,size,direction,table): if direction == 1: if y - 2",
"in range(nTiles): Table[x][y] = black libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() Memory = []",
"== 3: Table[CurrX][CurrY + 1] = white CurrY += 2",
"direction == 1: if y - 2 <= 0: return",
"- 2 <= 0: return 0 if table[x-2][y] == white:",
"randint(1,4) if Dir == 1: Table[CurrX][CurrY - 1] = white",
"white CurrX += 2 Table[CurrX][CurrY] = white elif Dir ==",
"+= 2 Table[CurrX][CurrY] = white elif Dir == 3: Table[CurrX][CurrY",
"table[x+2][y] == black: return 1 if x-2 > 0: if",
"for x in range(nTiles): for y in range(nTiles): Table[x][y] =",
"if CurrX == 1 and CurrY == 1: end =",
"1][CurrY] = white CurrX += 2 Table[CurrX][CurrY] = white elif",
"for i in range(nTiles)]for i in range(nTiles)] for x in",
"Memory[MemorySize-1] if CurrX == 1 and CurrY == 1: end",
"size: return 0 if table[x+2][y] == white: return 0 elif",
"= white CurrY += 2 Table[CurrX][CurrY] = white elif Dir",
"+ 2 >= size: return 0 if table[x+2][y] == white:",
"== white: return 0 elif direction == 3: if y",
"table[x-2][y] == white: return 0 return 1 def Possible(x,y,table,size): if",
"-= 2 elif Dir == 3: CurrY -= 2 elif",
"range(nTiles)] for x in range(nTiles): for y in range(nTiles): Table[x][y]",
"range(nTiles): for y in range(nTiles): Table[x][y] = black libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush()",
"size: return 0 if table[x][y+2] == white: return 0 elif",
"white end = 0 while end == 0: while Possible(CurrX,CurrY,Table,nTiles):",
"Table = [[0 for i in range(nTiles)]for i in range(nTiles)]",
"x+2 < size: if table[x+2][y] == black: return 1 if",
"- 1] = white CurrY -= 2 Table[CurrX][CurrY] = white",
"as libtcod from random import randint nSquares = 30 nTiles",
"in range(nTiles): for y in range(nTiles): libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() while Possible(CurrX,CurrY,Table,nTiles)",
"0: Dir = randint(1,4) if Dir == 1: Table[CurrX][CurrY -",
"Table[CurrX][CurrY] = white end = 0 while end == 0:",
"CurrY = 1 Table[CurrX][CurrY] = white end = 0 while",
"== 4: if x - 2 <= 0: return 0",
"SCREEN_HEIGHT, 'pyMazeBacktrack', False, libtcod.RENDERER_OPENGL) def CheckDir(x,y,size,direction,table): if direction == 1:",
"== 4: Table[CurrX - 1][CurrY] = white CurrX -= 2",
"= 30 nTiles = nSquares * 2 + 1 SCREEN_WIDTH",
"y + 2 >= size: return 0 if table[x][y+2] ==",
"CurrY += 2 elif Dir == 2: CurrX -= 2",
"== 0: MemorySize = len(Memory) Dir = Memory[MemorySize-1] if Dir",
"elif Dir == 4: Table[CurrX - 1][CurrY] = white CurrX",
"return 1 def Possible(x,y,table,size): if x+2 < size: if table[x+2][y]",
"white = libtcod.white Table = [[0 for i in range(nTiles)]for",
"== black: return 1 if x-2 > 0: if table[x-2][y]",
"table[x-2][y] == black: return 1 if y+2 < size: if",
"2 elif Dir == 4: CurrX += 2 del Memory[MemorySize-1]",
"white Memory.append(Dir) #print for x in range(nTiles): for y in",
"return 0 return 1 def Possible(x,y,table,size): if x+2 < size:",
"Memory[MemorySize-1] if Dir == 1: CurrY += 2 elif Dir",
"elif Dir == 3: CurrY -= 2 elif Dir ==",
"3: if y + 2 >= size: return 0 if",
"for y in range(nTiles): Table[x][y] = black libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() Memory",
"if x + 2 >= size: return 0 if table[x+2][y]",
"2 >= size: return 0 if table[x][y+2] == white: return",
"elif Dir == 3: Table[CurrX][CurrY + 1] = white CurrY",
"Table[CurrX][CurrY - 1] = white CurrY -= 2 Table[CurrX][CurrY] =",
"end == 0: while Possible(CurrX,CurrY,Table,nTiles): Dir = randint(1,4) while CheckDir(CurrX,CurrY,nTiles,Dir,Table)",
"return 1 if y-2 > 0: if table[x][y-2] == black:",
"1 SCREEN_WIDTH = nTiles SCREEN_HEIGHT = nTiles libtcod.console_set_custom_font(\"cp437_12x12.png\", libtcod.FONT_LAYOUT_ASCII_INROW) libtcod.console_init_root(SCREEN_WIDTH,",
"+ 1 SCREEN_WIDTH = nTiles SCREEN_HEIGHT = nTiles libtcod.console_set_custom_font(\"cp437_12x12.png\", libtcod.FONT_LAYOUT_ASCII_INROW)",
"libtcod from random import randint nSquares = 30 nTiles =",
"Table[CurrX][CurrY] = white elif Dir == 3: Table[CurrX][CurrY + 1]",
"1: if y - 2 <= 0: return 0 if",
"== black: return 1 if y+2 < size: if table[x][y+2]",
"white: return 0 elif direction == 3: if y +",
"+= 2 elif Dir == 2: CurrX -= 2 elif",
"== 2: Table[CurrX + 1][CurrY] = white CurrX += 2",
"return 0 elif direction == 4: if x - 2",
"randint nSquares = 30 nTiles = nSquares * 2 +",
"0 return 1 def Possible(x,y,table,size): if x+2 < size: if",
"CurrX == 1 and CurrY == 1: end = 1",
"x + 2 >= size: return 0 if table[x+2][y] ==",
"[] CurrX = 1 CurrY = 1 Table[CurrX][CurrY] = white",
"if Dir == 1: CurrY += 2 elif Dir ==",
"= white CurrX -= 2 Table[CurrX][CurrY] = white Memory.append(Dir) #print",
"nTiles libtcod.console_set_custom_font(\"cp437_12x12.png\", libtcod.FONT_LAYOUT_ASCII_INROW) libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'pyMazeBacktrack', False, libtcod.RENDERER_OPENGL) def CheckDir(x,y,size,direction,table):",
"y-2 > 0: if table[x][y-2] == black: return 1 return",
"return 1 return 0 black = libtcod.black white = libtcod.white",
"len(Memory) Dir = Memory[MemorySize-1] if Dir == 1: CurrY +=",
"1: Table[CurrX][CurrY - 1] = white CurrY -= 2 Table[CurrX][CurrY]",
"elif direction == 3: if y + 2 >= size:",
"< size: if table[x+2][y] == black: return 1 if x-2",
"= nSquares * 2 + 1 SCREEN_WIDTH = nTiles SCREEN_HEIGHT",
"if y+2 < size: if table[x][y+2] == black: return 1",
"= libtcod.black white = libtcod.white Table = [[0 for i",
"2 Table[CurrX][CurrY] = white Memory.append(Dir) #print for x in range(nTiles):",
"black: return 1 if x-2 > 0: if table[x-2][y] ==",
"elif Dir == 2: Table[CurrX + 1][CurrY] = white CurrX",
"Memory.append(Dir) #print for x in range(nTiles): for y in range(nTiles):",
"3: CurrY -= 2 elif Dir == 4: CurrX +=",
">= size: return 0 if table[x+2][y] == white: return 0",
"4: CurrX += 2 del Memory[MemorySize-1] if CurrX == 1",
"2 <= 0: return 0 if table[x-2][y] == white: return",
"size: if table[x][y+2] == black: return 1 if y-2 >",
"1 return 0 black = libtcod.black white = libtcod.white Table",
"if x - 2 <= 0: return 0 if table[x-2][y]",
"if x+2 < size: if table[x+2][y] == black: return 1",
"0 if table[x-2][y] == white: return 0 return 1 def",
"+= 2 del Memory[MemorySize-1] if CurrX == 1 and CurrY",
"= white CurrY -= 2 Table[CurrX][CurrY] = white elif Dir",
"return 0 if table[x][y+2] == white: return 0 elif direction",
"= libtcod.white Table = [[0 for i in range(nTiles)]for i",
"= white Memory.append(Dir) #print for x in range(nTiles): for y",
"range(nTiles): for y in range(nTiles): libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() while Possible(CurrX,CurrY,Table,nTiles) ==",
"Table[CurrX][CurrY] = white elif Dir == 2: Table[CurrX + 1][CurrY]",
"Dir == 2: CurrX -= 2 elif Dir == 3:",
"0: while Possible(CurrX,CurrY,Table,nTiles): Dir = randint(1,4) while CheckDir(CurrX,CurrY,nTiles,Dir,Table) == 0:",
"break #print for x in range(nTiles): for y in range(nTiles):",
"if table[x-2][y] == black: return 1 if y+2 < size:",
"from random import randint nSquares = 30 nTiles = nSquares",
"if table[x-2][y] == white: return 0 return 1 def Possible(x,y,table,size):",
"False, libtcod.RENDERER_OPENGL) def CheckDir(x,y,size,direction,table): if direction == 1: if y",
"2 <= 0: return 0 if table[x][y-2] == white: return",
"CurrX += 2 del Memory[MemorySize-1] if CurrX == 1 and",
"= nTiles libtcod.console_set_custom_font(\"cp437_12x12.png\", libtcod.FONT_LAYOUT_ASCII_INROW) libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'pyMazeBacktrack', False, libtcod.RENDERER_OPENGL) def",
"and CurrY == 1: end = 1 break #print for",
"Table[CurrX][CurrY + 1] = white CurrY += 2 Table[CurrX][CurrY] =",
"if y-2 > 0: if table[x][y-2] == black: return 1",
"x in range(nTiles): for y in range(nTiles): libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() libtcod.console_wait_for_keypress(True)",
"== black: return 1 return 0 black = libtcod.black white",
"elif Dir == 2: CurrX -= 2 elif Dir ==",
"range(nTiles): libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() while Possible(CurrX,CurrY,Table,nTiles) == 0: MemorySize = len(Memory)",
"2 >= size: return 0 if table[x+2][y] == white: return",
"1 def Possible(x,y,table,size): if x+2 < size: if table[x+2][y] ==",
"white CurrY -= 2 Table[CurrX][CurrY] = white elif Dir ==",
"0 elif direction == 2: if x + 2 >=",
"return 0 if table[x-2][y] == white: return 0 return 1",
"black: return 1 if y+2 < size: if table[x][y+2] ==",
"Possible(x,y,table,size): if x+2 < size: if table[x+2][y] == black: return",
"CurrX += 2 Table[CurrX][CurrY] = white elif Dir == 3:",
"while Possible(CurrX,CurrY,Table,nTiles) == 0: MemorySize = len(Memory) Dir = Memory[MemorySize-1]",
"+ 1][CurrY] = white CurrX += 2 Table[CurrX][CurrY] = white",
"while end == 0: while Possible(CurrX,CurrY,Table,nTiles): Dir = randint(1,4) while",
"for x in range(nTiles): for y in range(nTiles): libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush()",
"x - 2 <= 0: return 0 if table[x-2][y] ==",
"white CurrX -= 2 Table[CurrX][CurrY] = white Memory.append(Dir) #print for",
"== 1 and CurrY == 1: end = 1 break",
"= 0 while end == 0: while Possible(CurrX,CurrY,Table,nTiles): Dir =",
"CurrX -= 2 Table[CurrX][CurrY] = white Memory.append(Dir) #print for x",
"Dir == 1: Table[CurrX][CurrY - 1] = white CurrY -=",
"'pyMazeBacktrack', False, libtcod.RENDERER_OPENGL) def CheckDir(x,y,size,direction,table): if direction == 1: if",
"2 Table[CurrX][CurrY] = white elif Dir == 2: Table[CurrX +",
"if y - 2 <= 0: return 0 if table[x][y-2]",
"1 Table[CurrX][CurrY] = white end = 0 while end ==",
"[[0 for i in range(nTiles)]for i in range(nTiles)] for x",
"y+2 < size: if table[x][y+2] == black: return 1 if",
"x in range(nTiles): for y in range(nTiles): libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() while",
"CurrY += 2 Table[CurrX][CurrY] = white elif Dir == 4:",
"range(nTiles)]for i in range(nTiles)] for x in range(nTiles): for y",
"== 1: CurrY += 2 elif Dir == 2: CurrX",
"i in range(nTiles)]for i in range(nTiles)] for x in range(nTiles):",
"= randint(1,4) while CheckDir(CurrX,CurrY,nTiles,Dir,Table) == 0: Dir = randint(1,4) if",
"libtcod.RENDERER_OPENGL) def CheckDir(x,y,size,direction,table): if direction == 1: if y -",
"direction == 2: if x + 2 >= size: return",
"if Dir == 1: Table[CurrX][CurrY - 1] = white CurrY",
"table[x][y+2] == black: return 1 if y-2 > 0: if",
"Table[CurrX][CurrY] = white Memory.append(Dir) #print for x in range(nTiles): for",
"== 2: if x + 2 >= size: return 0",
"in range(nTiles): for y in range(nTiles): Table[x][y] = black libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white)",
"return 0 elif direction == 2: if x + 2",
"4: if x - 2 <= 0: return 0 if",
"== 3: CurrY -= 2 elif Dir == 4: CurrX",
"black: return 1 return 0 black = libtcod.black white =",
"0 elif direction == 4: if x - 2 <=",
"nSquares = 30 nTiles = nSquares * 2 + 1",
"2 del Memory[MemorySize-1] if CurrX == 1 and CurrY ==",
"#print for x in range(nTiles): for y in range(nTiles): libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white)",
"libtcodpy as libtcod from random import randint nSquares = 30",
"1] = white CurrY += 2 Table[CurrX][CurrY] = white elif",
"libtcod.console_flush() Memory = [] CurrX = 1 CurrY = 1",
"Dir = Memory[MemorySize-1] if Dir == 1: CurrY += 2",
"< size: if table[x][y+2] == black: return 1 if y-2",
"direction == 3: if y + 2 >= size: return",
"return 0 if table[x][y-2] == white: return 0 elif direction",
"if table[x][y+2] == black: return 1 if y-2 > 0:",
"in range(nTiles): libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() while Possible(CurrX,CurrY,Table,nTiles) == 0: MemorySize =",
"2 Table[CurrX][CurrY] = white elif Dir == 4: Table[CurrX -",
"white elif Dir == 3: Table[CurrX][CurrY + 1] = white",
"0: if table[x][y-2] == black: return 1 return 0 black",
"libtcod.FONT_LAYOUT_ASCII_INROW) libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'pyMazeBacktrack', False, libtcod.RENDERER_OPENGL) def CheckDir(x,y,size,direction,table): if direction",
"nTiles SCREEN_HEIGHT = nTiles libtcod.console_set_custom_font(\"cp437_12x12.png\", libtcod.FONT_LAYOUT_ASCII_INROW) libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'pyMazeBacktrack', False,",
"if direction == 1: if y - 2 <= 0:",
"== black: return 1 if y-2 > 0: if table[x][y-2]",
"== white: return 0 return 1 def Possible(x,y,table,size): if x+2",
"0 black = libtcod.black white = libtcod.white Table = [[0",
"while Possible(CurrX,CurrY,Table,nTiles): Dir = randint(1,4) while CheckDir(CurrX,CurrY,nTiles,Dir,Table) == 0: Dir",
"x-2 > 0: if table[x-2][y] == black: return 1 if",
"table[x][y-2] == white: return 0 elif direction == 2: if",
"== 0: while Possible(CurrX,CurrY,Table,nTiles): Dir = randint(1,4) while CheckDir(CurrX,CurrY,nTiles,Dir,Table) ==",
"return 0 black = libtcod.black white = libtcod.white Table =",
"black: return 1 if y-2 > 0: if table[x][y-2] ==",
"-= 2 Table[CurrX][CurrY] = white elif Dir == 2: Table[CurrX",
"0 if table[x][y+2] == white: return 0 elif direction ==",
"MemorySize = len(Memory) Dir = Memory[MemorySize-1] if Dir == 1:",
"2 elif Dir == 3: CurrY -= 2 elif Dir",
"CurrY -= 2 elif Dir == 4: CurrX += 2",
"2 + 1 SCREEN_WIDTH = nTiles SCREEN_HEIGHT = nTiles libtcod.console_set_custom_font(\"cp437_12x12.png\",",
"0: return 0 if table[x][y-2] == white: return 0 elif",
"= 1 break #print for x in range(nTiles): for y",
"= [] CurrX = 1 CurrY = 1 Table[CurrX][CurrY] =",
"table[x][y+2] == white: return 0 elif direction == 4: if",
"table[x+2][y] == white: return 0 elif direction == 3: if",
"return 1 if x-2 > 0: if table[x-2][y] == black:",
"2 elif Dir == 2: CurrX -= 2 elif Dir",
"0 while end == 0: while Possible(CurrX,CurrY,Table,nTiles): Dir = randint(1,4)",
"3: Table[CurrX][CurrY + 1] = white CurrY += 2 Table[CurrX][CurrY]",
"Possible(CurrX,CurrY,Table,nTiles) == 0: MemorySize = len(Memory) Dir = Memory[MemorySize-1] if",
"y in range(nTiles): Table[x][y] = black libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() Memory =",
"white CurrY += 2 Table[CurrX][CurrY] = white elif Dir ==",
"del Memory[MemorySize-1] if CurrX == 1 and CurrY == 1:",
"2: if x + 2 >= size: return 0 if",
"0 if table[x][y-2] == white: return 0 elif direction ==",
"nTiles = nSquares * 2 + 1 SCREEN_WIDTH = nTiles",
"0 elif direction == 3: if y + 2 >=",
"== 4: CurrX += 2 del Memory[MemorySize-1] if CurrX ==",
"return 0 elif direction == 3: if y + 2",
"CheckDir(x,y,size,direction,table): if direction == 1: if y - 2 <=",
"0: return 0 if table[x-2][y] == white: return 0 return",
"if table[x][y-2] == white: return 0 elif direction == 2:",
"+ 2 >= size: return 0 if table[x][y+2] == white:",
"Table[CurrX][CurrY] = white elif Dir == 4: Table[CurrX - 1][CurrY]",
"import libtcodpy as libtcod from random import randint nSquares =",
"CurrX = 1 CurrY = 1 Table[CurrX][CurrY] = white end",
"Dir == 1: CurrY += 2 elif Dir == 2:",
"direction == 4: if x - 2 <= 0: return",
"= Memory[MemorySize-1] if Dir == 1: CurrY += 2 elif",
"= 1 CurrY = 1 Table[CurrX][CurrY] = white end =",
"libtcod.console_flush() while Possible(CurrX,CurrY,Table,nTiles) == 0: MemorySize = len(Memory) Dir =",
"1 and CurrY == 1: end = 1 break #print",
"in range(nTiles)]for i in range(nTiles)] for x in range(nTiles): for",
"== 0: Dir = randint(1,4) if Dir == 1: Table[CurrX][CurrY",
"while CheckDir(CurrX,CurrY,nTiles,Dir,Table) == 0: Dir = randint(1,4) if Dir ==",
"1 break #print for x in range(nTiles): for y in",
"white: return 0 return 1 def Possible(x,y,table,size): if x+2 <",
"white: return 0 elif direction == 2: if x +",
"if table[x+2][y] == black: return 1 if x-2 > 0:",
"i in range(nTiles)] for x in range(nTiles): for y in",
"2: Table[CurrX + 1][CurrY] = white CurrX += 2 Table[CurrX][CurrY]",
"1 if y-2 > 0: if table[x][y-2] == black: return",
"= white elif Dir == 4: Table[CurrX - 1][CurrY] =",
"1: end = 1 break #print for x in range(nTiles):",
"1 if x-2 > 0: if table[x-2][y] == black: return",
"1] = white CurrY -= 2 Table[CurrX][CurrY] = white elif",
"= white elif Dir == 3: Table[CurrX][CurrY + 1] =",
"for y in range(nTiles): libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() while Possible(CurrX,CurrY,Table,nTiles) == 0:",
"elif direction == 4: if x - 2 <= 0:",
"if y + 2 >= size: return 0 if table[x][y+2]",
"import randint nSquares = 30 nTiles = nSquares * 2",
"0: if table[x-2][y] == black: return 1 if y+2 <",
"= nTiles SCREEN_HEIGHT = nTiles libtcod.console_set_custom_font(\"cp437_12x12.png\", libtcod.FONT_LAYOUT_ASCII_INROW) libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'pyMazeBacktrack',",
"black libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() Memory = [] CurrX = 1 CurrY",
"> 0: if table[x-2][y] == black: return 1 if y+2",
"= black libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() Memory = [] CurrX = 1",
"<= 0: return 0 if table[x-2][y] == white: return 0",
"-= 2 elif Dir == 4: CurrX += 2 del",
"libtcod.white Table = [[0 for i in range(nTiles)]for i in",
"x in range(nTiles): for y in range(nTiles): Table[x][y] = black",
"end = 0 while end == 0: while Possible(CurrX,CurrY,Table,nTiles): Dir",
"if table[x+2][y] == white: return 0 elif direction == 3:",
"libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'pyMazeBacktrack', False, libtcod.RENDERER_OPENGL) def CheckDir(x,y,size,direction,table): if direction ==",
"Dir = randint(1,4) if Dir == 1: Table[CurrX][CurrY - 1]",
"if x-2 > 0: if table[x-2][y] == black: return 1",
"def Possible(x,y,table,size): if x+2 < size: if table[x+2][y] == black:",
"y - 2 <= 0: return 0 if table[x][y-2] ==",
"1][CurrY] = white CurrX -= 2 Table[CurrX][CurrY] = white Memory.append(Dir)",
"+= 2 Table[CurrX][CurrY] = white elif Dir == 4: Table[CurrX",
"elif Dir == 4: CurrX += 2 del Memory[MemorySize-1] if",
"-= 2 Table[CurrX][CurrY] = white Memory.append(Dir) #print for x in",
"white elif Dir == 2: Table[CurrX + 1][CurrY] = white",
"table[x][y-2] == black: return 1 return 0 black = libtcod.black",
"libtcod.black white = libtcod.white Table = [[0 for i in",
"SCREEN_WIDTH = nTiles SCREEN_HEIGHT = nTiles libtcod.console_set_custom_font(\"cp437_12x12.png\", libtcod.FONT_LAYOUT_ASCII_INROW) libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT,",
"in range(nTiles)] for x in range(nTiles): for y in range(nTiles):",
"Dir == 3: CurrY -= 2 elif Dir == 4:",
"== 1: if y - 2 <= 0: return 0",
"+ 1] = white CurrY += 2 Table[CurrX][CurrY] = white",
"if table[x][y-2] == black: return 1 return 0 black =",
"1: CurrY += 2 elif Dir == 2: CurrX -=",
"30 nTiles = nSquares * 2 + 1 SCREEN_WIDTH =",
"Dir == 2: Table[CurrX + 1][CurrY] = white CurrX +=",
"Possible(CurrX,CurrY,Table,nTiles): Dir = randint(1,4) while CheckDir(CurrX,CurrY,nTiles,Dir,Table) == 0: Dir =",
"= len(Memory) Dir = Memory[MemorySize-1] if Dir == 1: CurrY",
"return 1 if y+2 < size: if table[x][y+2] == black:",
"randint(1,4) while CheckDir(CurrX,CurrY,nTiles,Dir,Table) == 0: Dir = randint(1,4) if Dir",
"== white: return 0 elif direction == 4: if x",
"SCREEN_HEIGHT = nTiles libtcod.console_set_custom_font(\"cp437_12x12.png\", libtcod.FONT_LAYOUT_ASCII_INROW) libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'pyMazeBacktrack', False, libtcod.RENDERER_OPENGL)",
"1 CurrY = 1 Table[CurrX][CurrY] = white end = 0",
"range(nTiles): Table[x][y] = black libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() Memory = [] CurrX",
"== white: return 0 elif direction == 2: if x",
"libtcod.console_set_custom_font(\"cp437_12x12.png\", libtcod.FONT_LAYOUT_ASCII_INROW) libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'pyMazeBacktrack', False, libtcod.RENDERER_OPENGL) def CheckDir(x,y,size,direction,table): if",
"end = 1 break #print for x in range(nTiles): for",
"> 0: if table[x][y-2] == black: return 1 return 0",
"random import randint nSquares = 30 nTiles = nSquares *",
"Dir == 4: Table[CurrX - 1][CurrY] = white CurrX -=",
"CurrY == 1: end = 1 break #print for x",
"nSquares * 2 + 1 SCREEN_WIDTH = nTiles SCREEN_HEIGHT =",
"Table[CurrX - 1][CurrY] = white CurrX -= 2 Table[CurrX][CurrY] =",
"- 2 <= 0: return 0 if table[x][y-2] == white:",
">= size: return 0 if table[x][y+2] == white: return 0",
"1 if y+2 < size: if table[x][y+2] == black: return",
"* 2 + 1 SCREEN_WIDTH = nTiles SCREEN_HEIGHT = nTiles",
"4: Table[CurrX - 1][CurrY] = white CurrX -= 2 Table[CurrX][CurrY]",
"- 1][CurrY] = white CurrX -= 2 Table[CurrX][CurrY] = white",
"elif direction == 2: if x + 2 >= size:",
"y in range(nTiles): libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() while Possible(CurrX,CurrY,Table,nTiles) == 0: MemorySize",
"Dir == 3: Table[CurrX][CurrY + 1] = white CurrY +=",
"= 1 Table[CurrX][CurrY] = white end = 0 while end",
"white elif Dir == 4: Table[CurrX - 1][CurrY] = white",
"CurrY -= 2 Table[CurrX][CurrY] = white elif Dir == 2:",
"libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() Memory = [] CurrX = 1 CurrY =",
"return 0 if table[x+2][y] == white: return 0 elif direction",
"Table[x][y] = black libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white) libtcod.console_flush() Memory = [] CurrX =",
"Dir == 4: CurrX += 2 del Memory[MemorySize-1] if CurrX",
"white: return 0 elif direction == 4: if x -",
"CheckDir(CurrX,CurrY,nTiles,Dir,Table) == 0: Dir = randint(1,4) if Dir == 1:",
"Table[CurrX + 1][CurrY] = white CurrX += 2 Table[CurrX][CurrY] =",
"2 Table[CurrX][CurrY] = white elif Dir == 3: Table[CurrX][CurrY +",
"black = libtcod.black white = libtcod.white Table = [[0 for",
"<= 0: return 0 if table[x][y-2] == white: return 0",
"size: if table[x+2][y] == black: return 1 if x-2 >",
"= white CurrX += 2 Table[CurrX][CurrY] = white elif Dir",
"= white elif Dir == 2: Table[CurrX + 1][CurrY] =",
"== 2: CurrX -= 2 elif Dir == 3: CurrY",
"CurrX -= 2 elif Dir == 3: CurrY -= 2",
"Dir = randint(1,4) while CheckDir(CurrX,CurrY,nTiles,Dir,Table) == 0: Dir = randint(1,4)",
"== 3: if y + 2 >= size: return 0"
] |
[
"(EventTracker, \"eventTracker\", \"event-tracker\", \"event_tracker\"), ( BatchInferenceJob, \"batchInferenceJob\", \"batch-inference-job\", \"batch_inference_job\", ),",
"BatchInferenceJob, \"batchInferenceJob\", \"batch-inference-job\", \"batch_inference_job\", ), (BatchSegmentJob, \"batchSegmentJob\", \"batch-segment-job\", \"batch_segment_job\"), ],",
"the Apache License, Version 2.0 (the \"License\"). You may not",
"# # Licensed under the Apache License, Version 2.0 (the",
"governing permissions and limitations under the License. # # ######################################################################################################################",
"\"event_tracker\"), ( BatchInferenceJob, \"batchInferenceJob\", \"batch-inference-job\", \"batch_inference_job\", ), (BatchSegmentJob, \"batchSegmentJob\", \"batch-segment-job\",",
"Unless required by applicable law or agreed to in writing,",
"by applicable law or agreed to in writing, software distributed",
"language governing permissions and limitations under the License. # #",
"import ( DatasetGroup, Schema, Dataset, DatasetImportJob, Solution, SolutionVersion, Campaign, EventTracker,",
"2.0 (the \"License\"). You may not use this file except",
"snake): assert klass().name.camel == camel assert klass().name.dash == dash assert",
"software distributed under the License is distributed # # on",
"\"solution\"), (SolutionVersion, \"solutionVersion\", \"solution-version\", \"solution_version\"), (Campaign, \"campaign\", \"campaign\", \"campaign\"), (EventTracker,",
"# # ###################################################################################################################### import pytest from shared.resource import ( DatasetGroup,",
"), (BatchSegmentJob, \"batchSegmentJob\", \"batch-segment-job\", \"batch_segment_job\"), ], ids=[ \"DatasetGroup\", \"Schema\", \"Dataset\",",
"the License. # # ###################################################################################################################### import pytest from shared.resource import",
"EventTracker, BatchSegmentJob, BatchInferenceJob, ) @pytest.mark.parametrize( \"klass,camel,dash,snake\", [ (DatasetGroup, \"datasetGroup\", \"dataset-group\",",
"\"batch-inference-job\", \"batch_inference_job\", ), (BatchSegmentJob, \"batchSegmentJob\", \"batch-segment-job\", \"batch_segment_job\"), ], ids=[ \"DatasetGroup\",",
"CONDITIONS OF ANY KIND, either express or implied. See the",
"\"datasetGroup\", \"dataset-group\", \"dataset_group\"), (Schema, \"schema\", \"schema\", \"schema\"), (Dataset, \"dataset\", \"dataset\",",
"SolutionVersion, Campaign, EventTracker, BatchSegmentJob, BatchInferenceJob, ) @pytest.mark.parametrize( \"klass,camel,dash,snake\", [ (DatasetGroup,",
"implied. See the License for # # the specific language",
"( DatasetGroup, Schema, Dataset, DatasetImportJob, Solution, SolutionVersion, Campaign, EventTracker, BatchSegmentJob,",
"\"dataset-group\", \"dataset_group\"), (Schema, \"schema\", \"schema\", \"schema\"), (Dataset, \"dataset\", \"dataset\", \"dataset\"),",
"\"solutionVersion\", \"solution-version\", \"solution_version\"), (Campaign, \"campaign\", \"campaign\", \"campaign\"), (EventTracker, \"eventTracker\", \"event-tracker\",",
"under the Apache License, Version 2.0 (the \"License\"). You may",
"\"dataset_group\"), (Schema, \"schema\", \"schema\", \"schema\"), (Dataset, \"dataset\", \"dataset\", \"dataset\"), (",
"<gh_stars>1-10 # ###################################################################################################################### # Copyright Amazon.com, Inc. or its affiliates.",
"or its affiliates. All Rights Reserved. # # # #",
"Version 2.0 (the \"License\"). You may not use this file",
"Schema, Dataset, DatasetImportJob, Solution, SolutionVersion, Campaign, EventTracker, BatchSegmentJob, BatchInferenceJob, )",
"\"schema\", \"schema\", \"schema\"), (Dataset, \"dataset\", \"dataset\", \"dataset\"), ( DatasetImportJob, \"datasetImportJob\",",
"\"batchSegmentJob\", \"batch-segment-job\", \"batch_segment_job\"), ], ids=[ \"DatasetGroup\", \"Schema\", \"Dataset\", \"DatasetImportJob\", \"Solution\",",
"# # # Licensed under the Apache License, Version 2.0",
"IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"compliance # # with the License. You may obtain a",
"# # on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR",
"(DatasetGroup, \"datasetGroup\", \"dataset-group\", \"dataset_group\"), (Schema, \"schema\", \"schema\", \"schema\"), (Dataset, \"dataset\",",
"\"schema\"), (Dataset, \"dataset\", \"dataset\", \"dataset\"), ( DatasetImportJob, \"datasetImportJob\", \"dataset-import-job\", \"dataset_import_job\",",
"at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # #",
"KIND, either express or implied. See the License for #",
"# # the specific language governing permissions and limitations under",
"Dataset, DatasetImportJob, Solution, SolutionVersion, Campaign, EventTracker, BatchSegmentJob, BatchInferenceJob, ) @pytest.mark.parametrize(",
"\"dataset_import_job\", ), (Solution, \"solution\", \"solution\", \"solution\"), (SolutionVersion, \"solutionVersion\", \"solution-version\", \"solution_version\"),",
"# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.",
"the License. You may obtain a copy of the License",
"agreed to in writing, software distributed under the License is",
"assert klass().name.camel == camel assert klass().name.dash == dash assert klass().name.snake",
"\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"# # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required",
"\"solution\", \"solution\", \"solution\"), (SolutionVersion, \"solutionVersion\", \"solution-version\", \"solution_version\"), (Campaign, \"campaign\", \"campaign\",",
"# ###################################################################################################################### import pytest from shared.resource import ( DatasetGroup, Schema,",
"\"batch_inference_job\", ), (BatchSegmentJob, \"batchSegmentJob\", \"batch-segment-job\", \"batch_segment_job\"), ], ids=[ \"DatasetGroup\", \"Schema\",",
"\"schema\", \"schema\"), (Dataset, \"dataset\", \"dataset\", \"dataset\"), ( DatasetImportJob, \"datasetImportJob\", \"dataset-import-job\",",
"DatasetGroup, Schema, Dataset, DatasetImportJob, Solution, SolutionVersion, Campaign, EventTracker, BatchSegmentJob, BatchInferenceJob,",
"Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #",
"ANY KIND, either express or implied. See the License for",
"\"solution_version\"), (Campaign, \"campaign\", \"campaign\", \"campaign\"), (EventTracker, \"eventTracker\", \"event-tracker\", \"event_tracker\"), (",
"\"campaign\", \"campaign\"), (EventTracker, \"eventTracker\", \"event-tracker\", \"event_tracker\"), ( BatchInferenceJob, \"batchInferenceJob\", \"batch-inference-job\",",
"\"batch_segment_job\"), ], ids=[ \"DatasetGroup\", \"Schema\", \"Dataset\", \"DatasetImportJob\", \"Solution\", \"SolutionVersion\", \"Campaign\",",
"\"DatasetImportJob\", \"Solution\", \"SolutionVersion\", \"Campaign\", \"EventTracker\", \"BatchInferenceJob\", \"BatchSegmentJob,\", ], ) def",
"Rights Reserved. # # # # Licensed under the Apache",
"ids=[ \"DatasetGroup\", \"Schema\", \"Dataset\", \"DatasetImportJob\", \"Solution\", \"SolutionVersion\", \"Campaign\", \"EventTracker\", \"BatchInferenceJob\",",
"Inc. or its affiliates. All Rights Reserved. # # #",
"\"DatasetGroup\", \"Schema\", \"Dataset\", \"DatasetImportJob\", \"Solution\", \"SolutionVersion\", \"Campaign\", \"EventTracker\", \"BatchInferenceJob\", \"BatchSegmentJob,\",",
"http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law",
"express or implied. See the License for # # the",
"), (Solution, \"solution\", \"solution\", \"solution\"), (SolutionVersion, \"solutionVersion\", \"solution-version\", \"solution_version\"), (Campaign,",
"BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"DatasetImportJob, Solution, SolutionVersion, Campaign, EventTracker, BatchSegmentJob, BatchInferenceJob, ) @pytest.mark.parametrize( \"klass,camel,dash,snake\",",
"\"SolutionVersion\", \"Campaign\", \"EventTracker\", \"BatchInferenceJob\", \"BatchSegmentJob,\", ], ) def test_resource_naming(klass, camel,",
"writing, software distributed under the License is distributed # #",
"# Licensed under the Apache License, Version 2.0 (the \"License\").",
"\"solution-version\", \"solution_version\"), (Campaign, \"campaign\", \"campaign\", \"campaign\"), (EventTracker, \"eventTracker\", \"event-tracker\", \"event_tracker\"),",
"# the specific language governing permissions and limitations under the",
"with the License. You may obtain a copy of the",
"pytest from shared.resource import ( DatasetGroup, Schema, Dataset, DatasetImportJob, Solution,",
"Apache License, Version 2.0 (the \"License\"). You may not use",
"obtain a copy of the License at # # #",
"See the License for # # the specific language governing",
"for # # the specific language governing permissions and limitations",
"import pytest from shared.resource import ( DatasetGroup, Schema, Dataset, DatasetImportJob,",
"\"event-tracker\", \"event_tracker\"), ( BatchInferenceJob, \"batchInferenceJob\", \"batch-inference-job\", \"batch_inference_job\", ), (BatchSegmentJob, \"batchSegmentJob\",",
"###################################################################################################################### import pytest from shared.resource import ( DatasetGroup, Schema, Dataset,",
"License. # # ###################################################################################################################### import pytest from shared.resource import (",
") def test_resource_naming(klass, camel, dash, snake): assert klass().name.camel == camel",
"== camel assert klass().name.dash == dash assert klass().name.snake == snake",
"under the License is distributed # # on an \"AS",
"Amazon.com, Inc. or its affiliates. All Rights Reserved. # #",
"is distributed # # on an \"AS IS\" BASIS, WITHOUT",
"(Solution, \"solution\", \"solution\", \"solution\"), (SolutionVersion, \"solutionVersion\", \"solution-version\", \"solution_version\"), (Campaign, \"campaign\",",
"from shared.resource import ( DatasetGroup, Schema, Dataset, DatasetImportJob, Solution, SolutionVersion,",
"# with the License. You may obtain a copy of",
"\"klass,camel,dash,snake\", [ (DatasetGroup, \"datasetGroup\", \"dataset-group\", \"dataset_group\"), (Schema, \"schema\", \"schema\", \"schema\"),",
"its affiliates. All Rights Reserved. # # # # Licensed",
"# # # # Unless required by applicable law or",
"applicable law or agreed to in writing, software distributed under",
"All Rights Reserved. # # # # Licensed under the",
"shared.resource import ( DatasetGroup, Schema, Dataset, DatasetImportJob, Solution, SolutionVersion, Campaign,",
"You may not use this file except in compliance #",
"( BatchInferenceJob, \"batchInferenceJob\", \"batch-inference-job\", \"batch_inference_job\", ), (BatchSegmentJob, \"batchSegmentJob\", \"batch-segment-job\", \"batch_segment_job\"),",
"License is distributed # # on an \"AS IS\" BASIS,",
"\"BatchInferenceJob\", \"BatchSegmentJob,\", ], ) def test_resource_naming(klass, camel, dash, snake): assert",
"the specific language governing permissions and limitations under the License.",
"# # Unless required by applicable law or agreed to",
"Licensed under the Apache License, Version 2.0 (the \"License\"). You",
"# # # Unless required by applicable law or agreed",
"permissions and limitations under the License. # # ###################################################################################################################### import",
"\"dataset\", \"dataset\", \"dataset\"), ( DatasetImportJob, \"datasetImportJob\", \"dataset-import-job\", \"dataset_import_job\", ), (Solution,",
"(SolutionVersion, \"solutionVersion\", \"solution-version\", \"solution_version\"), (Campaign, \"campaign\", \"campaign\", \"campaign\"), (EventTracker, \"eventTracker\",",
"this file except in compliance # # with the License.",
"\"batch-segment-job\", \"batch_segment_job\"), ], ids=[ \"DatasetGroup\", \"Schema\", \"Dataset\", \"DatasetImportJob\", \"Solution\", \"SolutionVersion\",",
"use this file except in compliance # # with the",
"\"dataset\"), ( DatasetImportJob, \"datasetImportJob\", \"dataset-import-job\", \"dataset_import_job\", ), (Solution, \"solution\", \"solution\",",
"a copy of the License at # # # #",
"\"dataset\", \"dataset\"), ( DatasetImportJob, \"datasetImportJob\", \"dataset-import-job\", \"dataset_import_job\", ), (Solution, \"solution\",",
"\"campaign\"), (EventTracker, \"eventTracker\", \"event-tracker\", \"event_tracker\"), ( BatchInferenceJob, \"batchInferenceJob\", \"batch-inference-job\", \"batch_inference_job\",",
"to in writing, software distributed under the License is distributed",
"BatchSegmentJob, BatchInferenceJob, ) @pytest.mark.parametrize( \"klass,camel,dash,snake\", [ (DatasetGroup, \"datasetGroup\", \"dataset-group\", \"dataset_group\"),",
"or agreed to in writing, software distributed under the License",
"under the License. # # ###################################################################################################################### import pytest from shared.resource",
"specific language governing permissions and limitations under the License. #",
"test_resource_naming(klass, camel, dash, snake): assert klass().name.camel == camel assert klass().name.dash",
"law or agreed to in writing, software distributed under the",
"OR CONDITIONS OF ANY KIND, either express or implied. See",
"# # # # Licensed under the Apache License, Version",
"def test_resource_naming(klass, camel, dash, snake): assert klass().name.camel == camel assert",
"except in compliance # # with the License. You may",
"DatasetImportJob, \"datasetImportJob\", \"dataset-import-job\", \"dataset_import_job\", ), (Solution, \"solution\", \"solution\", \"solution\"), (SolutionVersion,",
"not use this file except in compliance # # with",
"either express or implied. See the License for # #",
"OF ANY KIND, either express or implied. See the License",
"of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 #",
"( DatasetImportJob, \"datasetImportJob\", \"dataset-import-job\", \"dataset_import_job\", ), (Solution, \"solution\", \"solution\", \"solution\"),",
"(the \"License\"). You may not use this file except in",
"# # with the License. You may obtain a copy",
"on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF",
"\"License\"). You may not use this file except in compliance",
"\"Campaign\", \"EventTracker\", \"BatchInferenceJob\", \"BatchSegmentJob,\", ], ) def test_resource_naming(klass, camel, dash,",
"\"eventTracker\", \"event-tracker\", \"event_tracker\"), ( BatchInferenceJob, \"batchInferenceJob\", \"batch-inference-job\", \"batch_inference_job\", ), (BatchSegmentJob,",
"BatchInferenceJob, ) @pytest.mark.parametrize( \"klass,camel,dash,snake\", [ (DatasetGroup, \"datasetGroup\", \"dataset-group\", \"dataset_group\"), (Schema,",
"and limitations under the License. # # ###################################################################################################################### import pytest",
"License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # #",
"# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS",
"\"datasetImportJob\", \"dataset-import-job\", \"dataset_import_job\", ), (Solution, \"solution\", \"solution\", \"solution\"), (SolutionVersion, \"solutionVersion\",",
"the License for # # the specific language governing permissions",
") @pytest.mark.parametrize( \"klass,camel,dash,snake\", [ (DatasetGroup, \"datasetGroup\", \"dataset-group\", \"dataset_group\"), (Schema, \"schema\",",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"\"Schema\", \"Dataset\", \"DatasetImportJob\", \"Solution\", \"SolutionVersion\", \"Campaign\", \"EventTracker\", \"BatchInferenceJob\", \"BatchSegmentJob,\", ],",
"may obtain a copy of the License at # #",
"# Unless required by applicable law or agreed to in",
"[ (DatasetGroup, \"datasetGroup\", \"dataset-group\", \"dataset_group\"), (Schema, \"schema\", \"schema\", \"schema\"), (Dataset,",
"\"Solution\", \"SolutionVersion\", \"Campaign\", \"EventTracker\", \"BatchInferenceJob\", \"BatchSegmentJob,\", ], ) def test_resource_naming(klass,",
"the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"(Campaign, \"campaign\", \"campaign\", \"campaign\"), (EventTracker, \"eventTracker\", \"event-tracker\", \"event_tracker\"), ( BatchInferenceJob,",
"(Schema, \"schema\", \"schema\", \"schema\"), (Dataset, \"dataset\", \"dataset\", \"dataset\"), ( DatasetImportJob,",
"(BatchSegmentJob, \"batchSegmentJob\", \"batch-segment-job\", \"batch_segment_job\"), ], ids=[ \"DatasetGroup\", \"Schema\", \"Dataset\", \"DatasetImportJob\",",
"\"Dataset\", \"DatasetImportJob\", \"Solution\", \"SolutionVersion\", \"Campaign\", \"EventTracker\", \"BatchInferenceJob\", \"BatchSegmentJob,\", ], )",
"], ) def test_resource_naming(klass, camel, dash, snake): assert klass().name.camel ==",
"Reserved. # # # # Licensed under the Apache License,",
"License for # # the specific language governing permissions and",
"(Dataset, \"dataset\", \"dataset\", \"dataset\"), ( DatasetImportJob, \"datasetImportJob\", \"dataset-import-job\", \"dataset_import_job\", ),",
"file except in compliance # # with the License. You",
"# ###################################################################################################################### # Copyright Amazon.com, Inc. or its affiliates. All",
"License. You may obtain a copy of the License at",
"the License is distributed # # on an \"AS IS\"",
"@pytest.mark.parametrize( \"klass,camel,dash,snake\", [ (DatasetGroup, \"datasetGroup\", \"dataset-group\", \"dataset_group\"), (Schema, \"schema\", \"schema\",",
"may not use this file except in compliance # #",
"\"solution\", \"solution\"), (SolutionVersion, \"solutionVersion\", \"solution-version\", \"solution_version\"), (Campaign, \"campaign\", \"campaign\", \"campaign\"),",
"You may obtain a copy of the License at #",
"# http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable",
"copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0",
"\"EventTracker\", \"BatchInferenceJob\", \"BatchSegmentJob,\", ], ) def test_resource_naming(klass, camel, dash, snake):",
"distributed under the License is distributed # # on an",
"required by applicable law or agreed to in writing, software",
"\"campaign\", \"campaign\", \"campaign\"), (EventTracker, \"eventTracker\", \"event-tracker\", \"event_tracker\"), ( BatchInferenceJob, \"batchInferenceJob\",",
"or implied. See the License for # # the specific",
"klass().name.camel == camel assert klass().name.dash == dash assert klass().name.snake ==",
"# # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless",
"in compliance # # with the License. You may obtain",
"\"BatchSegmentJob,\", ], ) def test_resource_naming(klass, camel, dash, snake): assert klass().name.camel",
"affiliates. All Rights Reserved. # # # # Licensed under",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"\"dataset-import-job\", \"dataset_import_job\", ), (Solution, \"solution\", \"solution\", \"solution\"), (SolutionVersion, \"solutionVersion\", \"solution-version\",",
"dash, snake): assert klass().name.camel == camel assert klass().name.dash == dash",
"###################################################################################################################### # Copyright Amazon.com, Inc. or its affiliates. All Rights",
"an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"Campaign, EventTracker, BatchSegmentJob, BatchInferenceJob, ) @pytest.mark.parametrize( \"klass,camel,dash,snake\", [ (DatasetGroup, \"datasetGroup\",",
"camel, dash, snake): assert klass().name.camel == camel assert klass().name.dash ==",
"Solution, SolutionVersion, Campaign, EventTracker, BatchSegmentJob, BatchInferenceJob, ) @pytest.mark.parametrize( \"klass,camel,dash,snake\", [",
"distributed # # on an \"AS IS\" BASIS, WITHOUT WARRANTIES",
"\"batchInferenceJob\", \"batch-inference-job\", \"batch_inference_job\", ), (BatchSegmentJob, \"batchSegmentJob\", \"batch-segment-job\", \"batch_segment_job\"), ], ids=[",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by",
"License, Version 2.0 (the \"License\"). You may not use this",
"in writing, software distributed under the License is distributed #",
"], ids=[ \"DatasetGroup\", \"Schema\", \"Dataset\", \"DatasetImportJob\", \"Solution\", \"SolutionVersion\", \"Campaign\", \"EventTracker\",",
"limitations under the License. # # ###################################################################################################################### import pytest from"
] |
[
"##################################################################### ## Sending a simple email message. ## def send(txt,",
"+ \" \" + task + \" grade\" p =",
"in range(100,1000))\\ and sys.argv[2] in ['Fall', 'Spring']\\ and int(sys.argv[3]) in",
"season = sys.argv[2] year = sys.argv[3] task = sys.argv[4] sender",
"+ \"@bu.edu\" msg[\"To\"] = \",\".join([target + \"@bu.edu\" for target in",
"files by email to enrolled students; the ## input grade",
"email on linux. import sys # For command line arguments.",
"import os # For commands and file manipulation (walk, path,",
"os # For commands and file manipulation (walk, path, system).",
"manipulation (walk, path, system). ##################################################################### ## Sending a simple email",
"+ task + \" grade\" p = Popen([\"/usr/sbin/sendmail\", \"-t\"], stdin=PIPE)",
"'UTF-8')) ##################################################################### ## Process the command line parameters. ## if",
"## if len(sys.argv) == 6\\ and (int(sys.argv[1][0:3]) in range(100,1000))\\ and",
"files: txt = open('./data/'+file, 'r').read() targets = file.split('.')[0].split(\"_\") send(txt, courseNumber,",
"file in files: txt = open('./data/'+file, 'r').read() targets = file.split('.')[0].split(\"_\")",
"file.split('.')[0].split(\"_\") send(txt, courseNumber, task, sender, targets) print('Sent grade file to",
"students; the ## input grade file names should correspond to",
"grade file names should correspond to the user names of",
"# For command line arguments. import os # For commands",
"= sender + \"@bu.edu\" msg[\"Subject\"] = \"CS \" + courseNumber",
"from email.mime.text import MIMEText # For creating a message string.",
"python gradefiles-send.py <###> <Fall|Spring> <YYYY> <task> <sender-username>\\n') exit() ##################################################################### ##",
"6\\ and (int(sys.argv[1][0:3]) in range(100,1000))\\ and sys.argv[2] in ['Fall', 'Spring']\\",
"= sys.argv[1] # Accepts course names like \"591 X1.\" season",
"the user names of ## the students. ## ## from",
"line arguments. import os # For commands and file manipulation",
"like \"591 X1.\" season = sys.argv[2] year = sys.argv[3] task",
"## ## gradefiles-send.py ## ## Script to send grade files",
"sys.argv[4] sender = sys.argv[5] else: print('\\n Usage:\\n\\n % python gradefiles-send.py",
"for list of files. ## if not os.path.exists('./data'): print('No folder",
"targets) print('Sent grade file to ' + str(targets) + '.')",
"containing grade files found. Exiting.') exit() ##################################################################### ## Send the",
"names of ## the students. ## ## from email.mime.text import",
"exit() ##################################################################### ## Check for list of files. ## if",
"txt = open('./data/'+file, 'r').read() targets = file.split('.')[0].split(\"_\") send(txt, courseNumber, task,",
"##################################################################### ## Check for list of files. ## if not",
"a message string. from subprocess import Popen, PIPE # For",
"\"-t\"], stdin=PIPE) p.communicate(bytes(msg.as_string(), 'UTF-8')) ##################################################################### ## Process the command line",
"targets = file.split('.')[0].split(\"_\") send(txt, courseNumber, task, sender, targets) print('Sent grade",
"import sys # For command line arguments. import os #",
"\" + courseNumber + \" \" + task + \"",
"\" + task + \" grade\" p = Popen([\"/usr/sbin/sendmail\", \"-t\"],",
"print('\\n Usage:\\n\\n % python gradefiles-send.py <###> <Fall|Spring> <YYYY> <task> <sender-username>\\n')",
"path, system). ##################################################################### ## Sending a simple email message. ##",
"files in os.walk('./data/'): for file in files: txt = open('./data/'+file,",
"(int(sys.argv[1][0:3]) in range(100,1000))\\ and sys.argv[2] in ['Fall', 'Spring']\\ and int(sys.argv[3])",
"simple email message. ## def send(txt, courseNumber, task, sender, targets):",
"+ courseNumber + \" \" + task + \" grade\"",
"## Process the command line parameters. ## if len(sys.argv) ==",
"to send grade files by email to enrolled students; the",
"parameters. ## if len(sys.argv) == 6\\ and (int(sys.argv[1][0:3]) in range(100,1000))\\",
"## Script to send grade files by email to enrolled",
"sys.argv[5] else: print('\\n Usage:\\n\\n % python gradefiles-send.py <###> <Fall|Spring> <YYYY>",
"For sending email on linux. import sys # For command",
"= Popen([\"/usr/sbin/sendmail\", \"-t\"], stdin=PIPE) p.communicate(bytes(msg.as_string(), 'UTF-8')) ##################################################################### ## Process the",
"from subprocess import Popen, PIPE # For sending email on",
"'Spring']\\ and int(sys.argv[3]) in range(2000,2100): courseNumber = sys.argv[1] # Accepts",
"\"591 X1.\" season = sys.argv[2] year = sys.argv[3] task =",
"the students. ## ## from email.mime.text import MIMEText # For",
"in targets]) msg[\"Cc\"] = sender + \"@bu.edu\" msg[\"Subject\"] = \"CS",
"\"data\" containing grade files found. Exiting.') exit() ##################################################################### ## Send",
"files. ## for curdir, dirs, files in os.walk('./data/'): for file",
"sender = sys.argv[5] else: print('\\n Usage:\\n\\n % python gradefiles-send.py <###>",
"= \",\".join([target + \"@bu.edu\" for target in targets]) msg[\"Cc\"] =",
"MIMEText(txt) msg[\"From\"] = sender + \"@bu.edu\" msg[\"To\"] = \",\".join([target +",
"= sender + \"@bu.edu\" msg[\"To\"] = \",\".join([target + \"@bu.edu\" for",
"the command line parameters. ## if len(sys.argv) == 6\\ and",
"course names like \"591 X1.\" season = sys.argv[2] year =",
"sender, targets): msg = MIMEText(txt) msg[\"From\"] = sender + \"@bu.edu\"",
"email to enrolled students; the ## input grade file names",
"+ \"@bu.edu\" msg[\"Subject\"] = \"CS \" + courseNumber + \"",
"## from email.mime.text import MIMEText # For creating a message",
"files. ## if not os.path.exists('./data'): print('No folder \"data\" containing grade",
"Popen([\"/usr/sbin/sendmail\", \"-t\"], stdin=PIPE) p.communicate(bytes(msg.as_string(), 'UTF-8')) ##################################################################### ## Process the command",
"and file manipulation (walk, path, system). ##################################################################### ## Sending a",
"= sys.argv[4] sender = sys.argv[5] else: print('\\n Usage:\\n\\n % python",
"import Popen, PIPE # For sending email on linux. import",
"os.path.exists('./data'): print('No folder \"data\" containing grade files found. Exiting.') exit()",
"<YYYY> <task> <sender-username>\\n') exit() ##################################################################### ## Check for list of",
"task = sys.argv[4] sender = sys.argv[5] else: print('\\n Usage:\\n\\n %",
"['Fall', 'Spring']\\ and int(sys.argv[3]) in range(2000,2100): courseNumber = sys.argv[1] #",
"dirs, files in os.walk('./data/'): for file in files: txt =",
"<task> <sender-username>\\n') exit() ##################################################################### ## Check for list of files.",
"print('Sent grade file to ' + str(targets) + '.') #eof",
"grade files. ## for curdir, dirs, files in os.walk('./data/'): for",
"+ \"@bu.edu\" for target in targets]) msg[\"Cc\"] = sender +",
"if not os.path.exists('./data'): print('No folder \"data\" containing grade files found.",
"msg = MIMEText(txt) msg[\"From\"] = sender + \"@bu.edu\" msg[\"To\"] =",
"##################################################################### ## Send the grade files. ## for curdir, dirs,",
"\"@bu.edu\" msg[\"Subject\"] = \"CS \" + courseNumber + \" \"",
"targets]) msg[\"Cc\"] = sender + \"@bu.edu\" msg[\"Subject\"] = \"CS \"",
"of files. ## if not os.path.exists('./data'): print('No folder \"data\" containing",
"PIPE # For sending email on linux. import sys #",
"## ## from email.mime.text import MIMEText # For creating a",
"by email to enrolled students; the ## input grade file",
"targets): msg = MIMEText(txt) msg[\"From\"] = sender + \"@bu.edu\" msg[\"To\"]",
"send(txt, courseNumber, task, sender, targets): msg = MIMEText(txt) msg[\"From\"] =",
"string. from subprocess import Popen, PIPE # For sending email",
"grade files by email to enrolled students; the ## input",
"courseNumber = sys.argv[1] # Accepts course names like \"591 X1.\"",
"in range(2000,2100): courseNumber = sys.argv[1] # Accepts course names like",
"<sender-username>\\n') exit() ##################################################################### ## Check for list of files. ##",
"in files: txt = open('./data/'+file, 'r').read() targets = file.split('.')[0].split(\"_\") send(txt,",
"p = Popen([\"/usr/sbin/sendmail\", \"-t\"], stdin=PIPE) p.communicate(bytes(msg.as_string(), 'UTF-8')) ##################################################################### ## Process",
"For commands and file manipulation (walk, path, system). ##################################################################### ##",
"email message. ## def send(txt, courseNumber, task, sender, targets): msg",
"Accepts course names like \"591 X1.\" season = sys.argv[2] year",
"## the students. ## ## from email.mime.text import MIMEText #",
"correspond to the user names of ## the students. ##",
"courseNumber, task, sender, targets): msg = MIMEText(txt) msg[\"From\"] = sender",
"= sys.argv[5] else: print('\\n Usage:\\n\\n % python gradefiles-send.py <###> <Fall|Spring>",
"+ \" grade\" p = Popen([\"/usr/sbin/sendmail\", \"-t\"], stdin=PIPE) p.communicate(bytes(msg.as_string(), 'UTF-8'))",
"Usage:\\n\\n % python gradefiles-send.py <###> <Fall|Spring> <YYYY> <task> <sender-username>\\n') exit()",
"range(2000,2100): courseNumber = sys.argv[1] # Accepts course names like \"591",
"= sys.argv[2] year = sys.argv[3] task = sys.argv[4] sender =",
"## def send(txt, courseNumber, task, sender, targets): msg = MIMEText(txt)",
"## for curdir, dirs, files in os.walk('./data/'): for file in",
"Process the command line parameters. ## if len(sys.argv) == 6\\",
"input grade file names should correspond to the user names",
"list of files. ## if not os.path.exists('./data'): print('No folder \"data\"",
"Popen, PIPE # For sending email on linux. import sys",
"folder \"data\" containing grade files found. Exiting.') exit() ##################################################################### ##",
"in ['Fall', 'Spring']\\ and int(sys.argv[3]) in range(2000,2100): courseNumber = sys.argv[1]",
"= open('./data/'+file, 'r').read() targets = file.split('.')[0].split(\"_\") send(txt, courseNumber, task, sender,",
"the ## input grade file names should correspond to the",
"subprocess import Popen, PIPE # For sending email on linux.",
"task, sender, targets): msg = MIMEText(txt) msg[\"From\"] = sender +",
"range(100,1000))\\ and sys.argv[2] in ['Fall', 'Spring']\\ and int(sys.argv[3]) in range(2000,2100):",
"exit() ##################################################################### ## Send the grade files. ## for curdir,",
"\" \" + task + \" grade\" p = Popen([\"/usr/sbin/sendmail\",",
"curdir, dirs, files in os.walk('./data/'): for file in files: txt",
"Send the grade files. ## for curdir, dirs, files in",
"<Fall|Spring> <YYYY> <task> <sender-username>\\n') exit() ##################################################################### ## Check for list",
"# For sending email on linux. import sys # For",
"print('No folder \"data\" containing grade files found. Exiting.') exit() #####################################################################",
"sys # For command line arguments. import os # For",
"\"@bu.edu\" msg[\"To\"] = \",\".join([target + \"@bu.edu\" for target in targets])",
"names should correspond to the user names of ## the",
"sys.argv[3] task = sys.argv[4] sender = sys.argv[5] else: print('\\n Usage:\\n\\n",
"sender, targets) print('Sent grade file to ' + str(targets) +",
"send grade files by email to enrolled students; the ##",
"##################################################################### ## Process the command line parameters. ## if len(sys.argv)",
"For creating a message string. from subprocess import Popen, PIPE",
"message. ## def send(txt, courseNumber, task, sender, targets): msg =",
"not os.path.exists('./data'): print('No folder \"data\" containing grade files found. Exiting.')",
"len(sys.argv) == 6\\ and (int(sys.argv[1][0:3]) in range(100,1000))\\ and sys.argv[2] in",
"= MIMEText(txt) msg[\"From\"] = sender + \"@bu.edu\" msg[\"To\"] = \",\".join([target",
"% python gradefiles-send.py <###> <Fall|Spring> <YYYY> <task> <sender-username>\\n') exit() #####################################################################",
"For command line arguments. import os # For commands and",
"os.walk('./data/'): for file in files: txt = open('./data/'+file, 'r').read() targets",
"names like \"591 X1.\" season = sys.argv[2] year = sys.argv[3]",
"message string. from subprocess import Popen, PIPE # For sending",
"Sending a simple email message. ## def send(txt, courseNumber, task,",
"(walk, path, system). ##################################################################### ## Sending a simple email message.",
"linux. import sys # For command line arguments. import os",
"sender + \"@bu.edu\" msg[\"Subject\"] = \"CS \" + courseNumber +",
"to the user names of ## the students. ## ##",
"to enrolled students; the ## input grade file names should",
"file names should correspond to the user names of ##",
"gradefiles-send.py ## ## Script to send grade files by email",
"file manipulation (walk, path, system). ##################################################################### ## Sending a simple",
"# For commands and file manipulation (walk, path, system). #####################################################################",
"commands and file manipulation (walk, path, system). ##################################################################### ## Sending",
"msg[\"To\"] = \",\".join([target + \"@bu.edu\" for target in targets]) msg[\"Cc\"]",
"sys.argv[2] in ['Fall', 'Spring']\\ and int(sys.argv[3]) in range(2000,2100): courseNumber =",
"system). ##################################################################### ## Sending a simple email message. ## def",
"\",\".join([target + \"@bu.edu\" for target in targets]) msg[\"Cc\"] = sender",
"for target in targets]) msg[\"Cc\"] = sender + \"@bu.edu\" msg[\"Subject\"]",
"in os.walk('./data/'): for file in files: txt = open('./data/'+file, 'r').read()",
"## Sending a simple email message. ## def send(txt, courseNumber,",
"stdin=PIPE) p.communicate(bytes(msg.as_string(), 'UTF-8')) ##################################################################### ## Process the command line parameters.",
"\"@bu.edu\" for target in targets]) msg[\"Cc\"] = sender + \"@bu.edu\"",
"## ## Script to send grade files by email to",
"on linux. import sys # For command line arguments. import",
"command line arguments. import os # For commands and file",
"Script to send grade files by email to enrolled students;",
"# Accepts course names like \"591 X1.\" season = sys.argv[2]",
"sys.argv[2] year = sys.argv[3] task = sys.argv[4] sender = sys.argv[5]",
"user names of ## the students. ## ## from email.mime.text",
"courseNumber, task, sender, targets) print('Sent grade file to ' +",
"task, sender, targets) print('Sent grade file to ' + str(targets)",
"for curdir, dirs, files in os.walk('./data/'): for file in files:",
"= \"CS \" + courseNumber + \" \" + task",
"arguments. import os # For commands and file manipulation (walk,",
"year = sys.argv[3] task = sys.argv[4] sender = sys.argv[5] else:",
"grade files found. Exiting.') exit() ##################################################################### ## Send the grade",
"## Send the grade files. ## for curdir, dirs, files",
"for file in files: txt = open('./data/'+file, 'r').read() targets =",
"msg[\"Subject\"] = \"CS \" + courseNumber + \" \" +",
"## input grade file names should correspond to the user",
"sending email on linux. import sys # For command line",
"a simple email message. ## def send(txt, courseNumber, task, sender,",
"msg[\"From\"] = sender + \"@bu.edu\" msg[\"To\"] = \",\".join([target + \"@bu.edu\"",
"target in targets]) msg[\"Cc\"] = sender + \"@bu.edu\" msg[\"Subject\"] =",
"sender + \"@bu.edu\" msg[\"To\"] = \",\".join([target + \"@bu.edu\" for target",
"sys.argv[1] # Accepts course names like \"591 X1.\" season =",
"gradefiles-send.py <###> <Fall|Spring> <YYYY> <task> <sender-username>\\n') exit() ##################################################################### ## Check",
"should correspond to the user names of ## the students.",
"'r').read() targets = file.split('.')[0].split(\"_\") send(txt, courseNumber, task, sender, targets) print('Sent",
"and int(sys.argv[3]) in range(2000,2100): courseNumber = sys.argv[1] # Accepts course",
"of ## the students. ## ## from email.mime.text import MIMEText",
"p.communicate(bytes(msg.as_string(), 'UTF-8')) ##################################################################### ## Process the command line parameters. ##",
"command line parameters. ## if len(sys.argv) == 6\\ and (int(sys.argv[1][0:3])",
"if len(sys.argv) == 6\\ and (int(sys.argv[1][0:3]) in range(100,1000))\\ and sys.argv[2]",
"Check for list of files. ## if not os.path.exists('./data'): print('No",
"courseNumber + \" \" + task + \" grade\" p",
"def send(txt, courseNumber, task, sender, targets): msg = MIMEText(txt) msg[\"From\"]",
"\" grade\" p = Popen([\"/usr/sbin/sendmail\", \"-t\"], stdin=PIPE) p.communicate(bytes(msg.as_string(), 'UTF-8')) #####################################################################",
"Exiting.') exit() ##################################################################### ## Send the grade files. ## for",
"msg[\"Cc\"] = sender + \"@bu.edu\" msg[\"Subject\"] = \"CS \" +",
"<###> <Fall|Spring> <YYYY> <task> <sender-username>\\n') exit() ##################################################################### ## Check for",
"else: print('\\n Usage:\\n\\n % python gradefiles-send.py <###> <Fall|Spring> <YYYY> <task>",
"found. Exiting.') exit() ##################################################################### ## Send the grade files. ##",
"int(sys.argv[3]) in range(2000,2100): courseNumber = sys.argv[1] # Accepts course names",
"\"CS \" + courseNumber + \" \" + task +",
"## if not os.path.exists('./data'): print('No folder \"data\" containing grade files",
"open('./data/'+file, 'r').read() targets = file.split('.')[0].split(\"_\") send(txt, courseNumber, task, sender, targets)",
"enrolled students; the ## input grade file names should correspond",
"== 6\\ and (int(sys.argv[1][0:3]) in range(100,1000))\\ and sys.argv[2] in ['Fall',",
"X1.\" season = sys.argv[2] year = sys.argv[3] task = sys.argv[4]",
"import MIMEText # For creating a message string. from subprocess",
"= sys.argv[3] task = sys.argv[4] sender = sys.argv[5] else: print('\\n",
"= file.split('.')[0].split(\"_\") send(txt, courseNumber, task, sender, targets) print('Sent grade file",
"files found. Exiting.') exit() ##################################################################### ## Send the grade files.",
"## gradefiles-send.py ## ## Script to send grade files by",
"grade\" p = Popen([\"/usr/sbin/sendmail\", \"-t\"], stdin=PIPE) p.communicate(bytes(msg.as_string(), 'UTF-8')) ##################################################################### ##",
"and (int(sys.argv[1][0:3]) in range(100,1000))\\ and sys.argv[2] in ['Fall', 'Spring']\\ and",
"# For creating a message string. from subprocess import Popen,",
"task + \" grade\" p = Popen([\"/usr/sbin/sendmail\", \"-t\"], stdin=PIPE) p.communicate(bytes(msg.as_string(),",
"line parameters. ## if len(sys.argv) == 6\\ and (int(sys.argv[1][0:3]) in",
"##################################################################### ## ## gradefiles-send.py ## ## Script to send grade",
"email.mime.text import MIMEText # For creating a message string. from",
"## Check for list of files. ## if not os.path.exists('./data'):",
"send(txt, courseNumber, task, sender, targets) print('Sent grade file to '",
"students. ## ## from email.mime.text import MIMEText # For creating",
"creating a message string. from subprocess import Popen, PIPE #",
"the grade files. ## for curdir, dirs, files in os.walk('./data/'):",
"MIMEText # For creating a message string. from subprocess import",
"and sys.argv[2] in ['Fall', 'Spring']\\ and int(sys.argv[3]) in range(2000,2100): courseNumber"
] |
[
"# Save as protobuf with tf.Session() as sess: tf.initialize_all_variables().run() output_graph_def",
"tf.train.Saver() save_path = \"./target/\" + name + \"/\" model_name =",
") with tf.gfile.GFile(\"./target/\" + name + \".pb\", \"wb\") as file:",
"all_variables))) wxf.export(npy, name + '.wxf', target_format='wxf') # Save as protobuf",
"name + \".pb\", \"wb\") as file: # 保存模型 file.write(output_graph_def.SerializeToString()) #",
"= os.path.join(save_path, model_name) saver.save(sess, save_path_full) ckpt = tf.train.get_checkpoint_state(save_path) reader =",
"= tf.graph_util.convert_variables_to_constants( sess=sess, input_graph_def=sess.graph_def, # output_node_names=['G_paper_1/images_out'] output_node_names=['G_paper_1/ToRGB_lod0/add'] ) with tf.gfile.GFile(\"./target/\"",
"= tf.InteractiveSession() G, D, Gs = pickle.load(file) saver = tf.train.Saver()",
"tf import wolframclient.serializers as wxf name = 'karras2018iclr-celebahq-1024x1024' file =",
"not os.path.exists(save_path): os.makedirs(save_path) save_path_full = os.path.join(save_path, model_name) saver.save(sess, save_path_full) ckpt",
"import pickle import tensorflow as tf import wolframclient.serializers as wxf",
"output_node_names=['G_paper_1/images_out'] output_node_names=['G_paper_1/ToRGB_lod0/add'] ) with tf.gfile.GFile(\"./target/\" + name + \".pb\", \"wb\")",
"'.wxf', target_format='wxf') # Save as protobuf with tf.Session() as sess:",
"save_path = \"./target/\" + name + \"/\" model_name = 'model'",
"name + \"/\" model_name = 'model' if not os.path.exists(save_path): os.makedirs(save_path)",
"saver = tf.train.Saver() save_path = \"./target/\" + name + \"/\"",
"tensorflow as tf import wolframclient.serializers as wxf name = 'karras2018iclr-celebahq-1024x1024'",
"sess=sess, input_graph_def=sess.graph_def, # output_node_names=['G_paper_1/images_out'] output_node_names=['G_paper_1/ToRGB_lod0/add'] ) with tf.gfile.GFile(\"./target/\" + name",
"tf.Session() as sess: tf.initialize_all_variables().run() output_graph_def = tf.graph_util.convert_variables_to_constants( sess=sess, input_graph_def=sess.graph_def, #",
"ckpt = tf.train.get_checkpoint_state(save_path) reader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path) all_variables = list(reader.get_variable_to_shape_map().keys()) npy",
"= 'model' if not os.path.exists(save_path): os.makedirs(save_path) save_path_full = os.path.join(save_path, model_name)",
"wolframclient.serializers as wxf name = 'karras2018iclr-celebahq-1024x1024' file = open(name +",
"tf.graph_util.convert_variables_to_constants( sess=sess, input_graph_def=sess.graph_def, # output_node_names=['G_paper_1/images_out'] output_node_names=['G_paper_1/ToRGB_lod0/add'] ) with tf.gfile.GFile(\"./target/\" +",
"import os import pickle import tensorflow as tf import wolframclient.serializers",
"os import pickle import tensorflow as tf import wolframclient.serializers as",
"# output_node_names=['G_paper_1/images_out'] output_node_names=['G_paper_1/ToRGB_lod0/add'] ) with tf.gfile.GFile(\"./target/\" + name + \".pb\",",
"= 'karras2018iclr-celebahq-1024x1024' file = open(name + '.pkl', 'rb') sess =",
"+ name + \"/\" model_name = 'model' if not os.path.exists(save_path):",
"wxf name = 'karras2018iclr-celebahq-1024x1024' file = open(name + '.pkl', 'rb')",
"os.path.exists(save_path): os.makedirs(save_path) save_path_full = os.path.join(save_path, model_name) saver.save(sess, save_path_full) ckpt =",
"trained on CelebaHQ/2-exporter.py<gh_stars>1-10 import os import pickle import tensorflow as",
"with tf.gfile.GFile(\"./target/\" + name + \".pb\", \"wb\") as file: #",
"import wolframclient.serializers as wxf name = 'karras2018iclr-celebahq-1024x1024' file = open(name",
"tf.train.get_checkpoint_state(save_path) reader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path) all_variables = list(reader.get_variable_to_shape_map().keys()) npy = dict(zip(all_variables,",
"'.pkl', 'rb') sess = tf.InteractiveSession() G, D, Gs = pickle.load(file)",
"with tf.Session() as sess: tf.initialize_all_variables().run() output_graph_def = tf.graph_util.convert_variables_to_constants( sess=sess, input_graph_def=sess.graph_def,",
"map(reader.get_tensor, all_variables))) wxf.export(npy, name + '.wxf', target_format='wxf') # Save as",
"import tensorflow as tf import wolframclient.serializers as wxf name =",
"tf.InteractiveSession() G, D, Gs = pickle.load(file) saver = tf.train.Saver() save_path",
"input_graph_def=sess.graph_def, # output_node_names=['G_paper_1/images_out'] output_node_names=['G_paper_1/ToRGB_lod0/add'] ) with tf.gfile.GFile(\"./target/\" + name +",
"save_path_full) ckpt = tf.train.get_checkpoint_state(save_path) reader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path) all_variables = list(reader.get_variable_to_shape_map().keys())",
"as wxf name = 'karras2018iclr-celebahq-1024x1024' file = open(name + '.pkl',",
"all_variables = list(reader.get_variable_to_shape_map().keys()) npy = dict(zip(all_variables, map(reader.get_tensor, all_variables))) wxf.export(npy, name",
"+ '.wxf', target_format='wxf') # Save as protobuf with tf.Session() as",
"tf.initialize_all_variables().run() output_graph_def = tf.graph_util.convert_variables_to_constants( sess=sess, input_graph_def=sess.graph_def, # output_node_names=['G_paper_1/images_out'] output_node_names=['G_paper_1/ToRGB_lod0/add'] )",
"= dict(zip(all_variables, map(reader.get_tensor, all_variables))) wxf.export(npy, name + '.wxf', target_format='wxf') #",
"'karras2018iclr-celebahq-1024x1024' file = open(name + '.pkl', 'rb') sess = tf.InteractiveSession()",
"save_path_full = os.path.join(save_path, model_name) saver.save(sess, save_path_full) ckpt = tf.train.get_checkpoint_state(save_path) reader",
"npy = dict(zip(all_variables, map(reader.get_tensor, all_variables))) wxf.export(npy, name + '.wxf', target_format='wxf')",
"tf.gfile.GFile(\"./target/\" + name + \".pb\", \"wb\") as file: # 保存模型",
"\"./target/\" + name + \"/\" model_name = 'model' if not",
"pickle.load(file) saver = tf.train.Saver() save_path = \"./target/\" + name +",
"tf.train.NewCheckpointReader(ckpt.model_checkpoint_path) all_variables = list(reader.get_variable_to_shape_map().keys()) npy = dict(zip(all_variables, map(reader.get_tensor, all_variables))) wxf.export(npy,",
"output_node_names=['G_paper_1/ToRGB_lod0/add'] ) with tf.gfile.GFile(\"./target/\" + name + \".pb\", \"wb\") as",
"G, D, Gs = pickle.load(file) saver = tf.train.Saver() save_path =",
"'rb') sess = tf.InteractiveSession() G, D, Gs = pickle.load(file) saver",
"list(reader.get_variable_to_shape_map().keys()) npy = dict(zip(all_variables, map(reader.get_tensor, all_variables))) wxf.export(npy, name + '.wxf',",
"as protobuf with tf.Session() as sess: tf.initialize_all_variables().run() output_graph_def = tf.graph_util.convert_variables_to_constants(",
"wxf.export(npy, name + '.wxf', target_format='wxf') # Save as protobuf with",
"saver.save(sess, save_path_full) ckpt = tf.train.get_checkpoint_state(save_path) reader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path) all_variables =",
"+ name + \".pb\", \"wb\") as file: # 保存模型 file.write(output_graph_def.SerializeToString())",
"= open(name + '.pkl', 'rb') sess = tf.InteractiveSession() G, D,",
"protobuf with tf.Session() as sess: tf.initialize_all_variables().run() output_graph_def = tf.graph_util.convert_variables_to_constants( sess=sess,",
"open(name + '.pkl', 'rb') sess = tf.InteractiveSession() G, D, Gs",
"'model' if not os.path.exists(save_path): os.makedirs(save_path) save_path_full = os.path.join(save_path, model_name) saver.save(sess,",
"target_format='wxf') # Save as protobuf with tf.Session() as sess: tf.initialize_all_variables().run()",
"= pickle.load(file) saver = tf.train.Saver() save_path = \"./target/\" + name",
"reader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path) all_variables = list(reader.get_variable_to_shape_map().keys()) npy = dict(zip(all_variables, map(reader.get_tensor,",
"CelebaHQ/2-exporter.py<gh_stars>1-10 import os import pickle import tensorflow as tf import",
"name = 'karras2018iclr-celebahq-1024x1024' file = open(name + '.pkl', 'rb') sess",
"sess = tf.InteractiveSession() G, D, Gs = pickle.load(file) saver =",
"output_graph_def = tf.graph_util.convert_variables_to_constants( sess=sess, input_graph_def=sess.graph_def, # output_node_names=['G_paper_1/images_out'] output_node_names=['G_paper_1/ToRGB_lod0/add'] ) with",
"as tf import wolframclient.serializers as wxf name = 'karras2018iclr-celebahq-1024x1024' file",
"+ \"/\" model_name = 'model' if not os.path.exists(save_path): os.makedirs(save_path) save_path_full",
"file = open(name + '.pkl', 'rb') sess = tf.InteractiveSession() G,",
"\"/\" model_name = 'model' if not os.path.exists(save_path): os.makedirs(save_path) save_path_full =",
"Save as protobuf with tf.Session() as sess: tf.initialize_all_variables().run() output_graph_def =",
"os.makedirs(save_path) save_path_full = os.path.join(save_path, model_name) saver.save(sess, save_path_full) ckpt = tf.train.get_checkpoint_state(save_path)",
"+ '.pkl', 'rb') sess = tf.InteractiveSession() G, D, Gs =",
"= \"./target/\" + name + \"/\" model_name = 'model' if",
"dict(zip(all_variables, map(reader.get_tensor, all_variables))) wxf.export(npy, name + '.wxf', target_format='wxf') # Save",
"= list(reader.get_variable_to_shape_map().keys()) npy = dict(zip(all_variables, map(reader.get_tensor, all_variables))) wxf.export(npy, name +",
"pickle import tensorflow as tf import wolframclient.serializers as wxf name",
"= tf.train.get_checkpoint_state(save_path) reader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path) all_variables = list(reader.get_variable_to_shape_map().keys()) npy =",
"model_name) saver.save(sess, save_path_full) ckpt = tf.train.get_checkpoint_state(save_path) reader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path) all_variables",
"Gs = pickle.load(file) saver = tf.train.Saver() save_path = \"./target/\" +",
"name + '.wxf', target_format='wxf') # Save as protobuf with tf.Session()",
"if not os.path.exists(save_path): os.makedirs(save_path) save_path_full = os.path.join(save_path, model_name) saver.save(sess, save_path_full)",
"= tf.train.NewCheckpointReader(ckpt.model_checkpoint_path) all_variables = list(reader.get_variable_to_shape_map().keys()) npy = dict(zip(all_variables, map(reader.get_tensor, all_variables)))",
"+ \".pb\", \"wb\") as file: # 保存模型 file.write(output_graph_def.SerializeToString()) # 序列化输出",
"<filename>Publisher/PGGAN-1024 trained on CelebaHQ/2-exporter.py<gh_stars>1-10 import os import pickle import tensorflow",
"on CelebaHQ/2-exporter.py<gh_stars>1-10 import os import pickle import tensorflow as tf",
"D, Gs = pickle.load(file) saver = tf.train.Saver() save_path = \"./target/\"",
"= tf.train.Saver() save_path = \"./target/\" + name + \"/\" model_name",
"as sess: tf.initialize_all_variables().run() output_graph_def = tf.graph_util.convert_variables_to_constants( sess=sess, input_graph_def=sess.graph_def, # output_node_names=['G_paper_1/images_out']",
"model_name = 'model' if not os.path.exists(save_path): os.makedirs(save_path) save_path_full = os.path.join(save_path,",
"sess: tf.initialize_all_variables().run() output_graph_def = tf.graph_util.convert_variables_to_constants( sess=sess, input_graph_def=sess.graph_def, # output_node_names=['G_paper_1/images_out'] output_node_names=['G_paper_1/ToRGB_lod0/add']",
"os.path.join(save_path, model_name) saver.save(sess, save_path_full) ckpt = tf.train.get_checkpoint_state(save_path) reader = tf.train.NewCheckpointReader(ckpt.model_checkpoint_path)"
] |
[
"time.sleep(time1) def takePosition(): changeDegree([7,8],[180,0]) changeDegree([0,1,2,3,4,5,6,7,8,9,10,11],[0,50,130,0,170,170,0,180,0,60,150,0]) def changeDegreeGpio(pin,degree,update,duration): pinSize = len(pin)",
"i in range(0,pinSize): hand.servo[pin[i]].angle = Current[pin[i]] servo['Current_Position']['I2C'][pin[i]] = Current[pin[i]] writeYaml()",
"changeDegreeGpio(pin,degree,update,duration): pinSize = len(pin) for i in range(0,pinSize): p =",
"changeDegree([0],[0]) time.sleep(0.08) def no(times=3): for i in range(0,times): changeDegree([15],[70],5,0.05) time.sleep(0.2)",
"as conf: servo = yaml.load(conf, Loader=yaml.FullLoader) return servo def writeYaml(s=None):",
"deg in range(CurrentGpio[p],degree[i],update): duty = deg/18 duty+=2 Servo[p].ChangeDutyCycle(duty) time.sleep(duration) CurrentGpio[p]=degree[i]",
"changeDegreeGpio([0],[90],10,0.01) def random0(): r = random.randrange(1,10000000)%3 if(r==1): changeDegree([0],[20]) changeDegree([0],[0]) elif(r==2):",
"for i in range(0,6): Servo.append(GPIO.PWM(GpioPin[i],50)) Servo[i].start(0) def changeDegree(pin,newDegree,time1=0.05,update=5): maxChange =",
"changeDegree([0],[20]) changeDegree([0],[0]) changeDegree([4],[120]) changeDegree([10],[140]) changeDegree([10],[180]) changeDegree([4],[170]) else: changeDegree([3,4],[50,120]) changeDegree([9,10],[100,140]) changeDegree([9,10],[60,180])",
"= yaml.load(conf, Loader=yaml.FullLoader) writeYaml(servoBackUp) servo = readYaml() if servo ==",
"for i in range(0,pinSize): maxChange = max(abs(Current[pin[i]]-newDegree[i]),maxChange) for deg in",
"servo['Pin']['Gpio'] for i in range(0,6): GPIO.setup(GpioPin[i], GPIO.OUT) Servo = []",
"move ',r) changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1 takePosition() def random3(): changeDegree([3,4],[20,150]) pin =",
"= Current[pin[i]] servo['Current_Position']['I2C'][pin[i]] = Current[pin[i]] writeYaml() time.sleep(time1) def takePosition(): changeDegree([7,8],[180,0])",
"servo == None: with open('{}/src/configurationBackUp.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf: servoBackUp =",
"random.randrange(1,1000000)%4 print (' move ',r) changeDegree([pin[r]],[deg[ok[r]][r]]) takePosition() def randomCall(t): changeDegree([3,4,5,6,7,8,9,10],[50,110,80,70,100,80,160,20])",
"GpioPin = servo['Pin']['Gpio'] for i in range(0,6): GPIO.setup(GpioPin[i], GPIO.OUT) Servo",
"GPIO.setup(Motor1[x], GPIO.OUT) GPIO.setup(Motor2[x], GPIO.OUT) EN1 = GPIO.PWM(Motor1['EN'], 100) EN2 =",
"select[i%16]%4 changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1 takePosition() def expression(t): print (' i got",
"if d==1: GPIO.output(Motor2['input2'], GPIO.HIGH) EN2.ChangeDutyCycle(x) EN1.ChangeDutyCycle(x) def Stop(): Run(0,0,0,0,0) def",
"= readYaml() if servo == None: with open('{}/src/configurationBackUp.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as",
"deg = [[80,50,100,70],[110,90,110,90]] select = [89,93,472,347,2, 34, 134, 1937, 1983,",
"def Start_Slow(a, b, c, d): for i in range(0,100,20): Run(a,b,c,d,i)",
"from talk import say GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) from adafruit_servokit import ServoKit",
"= servo['Current_Position']['Gpio'] GpioPin = servo['Pin']['Gpio'] for i in range(0,6): GPIO.setup(GpioPin[i],",
"in range(100,0,-20): Run(a,b,c,d,i) time.sleep(0.5) def yes(times=3): for i in range(0,times):",
"if servo == None: print('close') exit() Initial = servo['Initial_Position']['I2C'] Current",
"multiprocessing import RPi.GPIO as GPIO from talk import say GPIO.setwarnings(False)",
"EN2.start(0) hand = ServoKit(channels=16) ROOT_PATH = os.path.realpath(os.path.join(__file__, '..', '..')) def",
"i got value of t is : ',t) if(t==0): random0()",
"changeDegreeGpio([0],[60],5,0.05) changeDegreeGpio([0],[90],5,0.05) def random1(): r = random.randrange(1,3) if(r==1): changeDegree([0],[20]) changeDegree([0],[0])",
"open('{}/src/configuration.yaml'.format(ROOT_PATH),'w', encoding='utf8') as conf: if s==None: yaml.dump(servo,conf) else: yaml.dump(s,conf) servo",
"import multiprocessing import RPi.GPIO as GPIO from talk import say",
"r = select[i%16]%4 changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1 takePosition() def expression(t): print ('",
"for deg in range(CurrentGpio[p],degree[i],update): duty = deg/18 duty+=2 Servo[p].ChangeDutyCycle(duty) time.sleep(duration)",
"time import random import multiprocessing import RPi.GPIO as GPIO from",
"len(pin) for i in range(0,pinSize): maxChange = max(abs(Current[pin[i]]-newDegree[i]),maxChange) for deg",
"time.sleep(0.2) changeDegree([15],[90],5,0.05) def move_head(times=3): for i in range(0,times): changeDegree([0],[20]) changeDegreeGpio([0],[80],5,0.05)",
"in range(0,15): r = random.randrange(1,1000000)%4 print (' move ',r) changeDegree([pin[r]],[deg[ok[r]][r]])",
"in range(0,6): Servo.append(GPIO.PWM(GpioPin[i],50)) Servo[i].start(0) def changeDegree(pin,newDegree,time1=0.05,update=5): maxChange = 0 pinSize",
"changeDegreeGpio([0],[90],5,0.05) else: changeDegreeGpio([0],[60],5,0.05) changeDegreeGpio([0],[90],5,0.05) def random1(): r = random.randrange(1,3) if(r==1):",
"random2() elif(t==3): random3() else: randomCall(t) def speakOnline(t): expression(t) def speakOffline(speech):",
"for i in range(0,6): GPIO.setup(GpioPin[i], GPIO.OUT) Servo = [] for",
"c, d, x): GPIO.output(Motor1['input1'], GPIO.LOW) GPIO.output(Motor1['input2'], GPIO.LOW) GPIO.output(Motor2['input1'], GPIO.LOW) GPIO.output(Motor2['input2'],",
"34, 134, 1937, 1983, 1739, 107, 894, 48, 28, 2048,589,689,123,",
"GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) from adafruit_servokit import ServoKit Motor1 = {'EN': 27,",
"changeDegree([9],[100]) changeDegree([9],[60]) changeDegree([3],[0]) elif(r==2): changeDegree([0],[20]) changeDegree([0],[0]) changeDegree([4],[120]) changeDegree([10],[140]) changeDegree([10],[180]) changeDegree([4],[170])",
"servo == None: print('close') exit() Initial = servo['Initial_Position']['I2C'] Current =",
"[0,0,0,0] ln = len(select) for i in range(0,t*3): r =",
"Initial = servo['Initial_Position']['I2C'] Current = servo['Current_Position']['I2C'] InitialGpio = servo['Initial_Position']['Gpio'] CurrentGpio",
"48, 28, 2048,589,689,123, 34,27,4,91,102,893,10283,53,1283,9485,1973,873,1973,0,10973] ok = [0,0,0,0] ln = len(select)",
"select = [89,93,472,347,2, 34, 134, 1937, 1983, 1739, 107, 894,",
"894, 48, 28, 2048,589,689,123, 34,27,4,91,102,893,10283,53,1283,9485,1973,873,1973,0,10973] ok = [0,0,0,0] ln =",
"i in range(0,pinSize): p = pin[i] if CurrentGpio[p]>degree[i]: update =",
"import os.path import yaml import time import random import multiprocessing",
"changeDegree(pin,newDegree,time1=0.05,update=5): maxChange = 0 pinSize = len(pin) for i in",
"elif(t==1): random1() elif(t==2): random2() elif(t==3): random3() else: randomCall(t) def speakOnline(t):",
"GPIO.output(Motor1['input1'], GPIO.HIGH) if b==1: GPIO.output(Motor1['input2'], GPIO.HIGH) if c==1: GPIO.output(Motor2['input1'], GPIO.HIGH)",
"from adafruit_servokit import ServoKit Motor1 = {'EN': 27, 'input1': 19,",
"deg/18 duty+=2 Servo[p].ChangeDutyCycle(duty) time.sleep(duration) CurrentGpio[p]=degree[i] writeYaml() def Run(a, b, c,",
"changeDegree([pin[r]],[deg[ok[r]][r]]) takePosition() def randomCall(t): changeDegree([3,4,5,6,7,8,9,10],[50,110,80,70,100,80,160,20]) pin = [5,6,7,8] deg =",
"random import multiprocessing import RPi.GPIO as GPIO from talk import",
"= GPIO.PWM(Motor1['EN'], 100) EN2 = GPIO.PWM(Motor2['EN'], 100) EN1.start(0) EN2.start(0) hand",
"expression(t) def speakOffline(speech): t = int(len(speech)/15) print ('Offline t value",
"changeDegree([3],[0]) elif(r==2): changeDegree([0],[20]) changeDegree([0],[0]) changeDegree([4],[120]) changeDegree([10],[140]) changeDegree([10],[180]) changeDegree([4],[170]) else: changeDegree([3,4],[50,120])",
"= select[i%len(select)]%4 print (' move ',r) changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1 takePosition() def",
"20} for x in Motor1: GPIO.setup(Motor1[x], GPIO.OUT) GPIO.setup(Motor2[x], GPIO.OUT) EN1",
"ROOT_PATH = os.path.realpath(os.path.join(__file__, '..', '..')) def readYaml(): with open('{}/src/configuration.yaml'.format(ROOT_PATH),'r+', encoding='utf8')",
"= {'EN': 27, 'input1': 19, 'input2': 16} Motor2 = {'EN':",
"range(0,pinSize): if Current[pin[i]]<newDegree[i]: Current[pin[i]] += update elif Current[pin[i]]>newDegree[i]: Current[pin[i]] -=",
"Motor2 = {'EN': 22, 'input1': 26, 'input2': 20} for x",
"range(0,times): changeDegree([15],[70],5,0.05) time.sleep(0.2) changeDegree([15],[110],5,0.05) time.sleep(0.2) changeDegree([15],[90],5,0.05) def move_head(times=3): for i",
"= yaml.load(conf, Loader=yaml.FullLoader) return servo def writeYaml(s=None): with open('{}/src/configuration.yaml'.format(ROOT_PATH),'w', encoding='utf8')",
"= random.randrange(1,1000000)%4 print (' move ',r) changeDegree([pin[r]],[deg[ok[r]][r]]) takePosition() def randomCall(t):",
"if a==1: GPIO.output(Motor1['input1'], GPIO.HIGH) if b==1: GPIO.output(Motor1['input2'], GPIO.HIGH) if c==1:",
"as GPIO from talk import say GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) from adafruit_servokit",
"print ('Offline t value is : ',t) p1 = multiprocessing.Process(target=expression,args=[t])",
"servo['Initial_Position']['Gpio'] CurrentGpio = servo['Current_Position']['Gpio'] GpioPin = servo['Pin']['Gpio'] for i in",
"1937, 1983, 1739, 107, 894, 48, 28, 2048,589,689,123, 34,27,4,91,102,893,10283,53,1283,9485,1973,873,1973,0,10973] ok",
"Stop(): Run(0,0,0,0,0) def Start_Slow(a, b, c, d): for i in",
"update elif Current[pin[i]]>newDegree[i]: Current[pin[i]] -= update for i in range(0,pinSize):",
"range(0,t*3): r = select[i%16]%4 changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1 takePosition() def expression(t): print",
"i in range(0,15): r = random.randrange(1,1000000)%4 print (' move ',r)",
"range(0,6): Servo.append(GPIO.PWM(GpioPin[i],50)) Servo[i].start(0) def changeDegree(pin,newDegree,time1=0.05,update=5): maxChange = 0 pinSize =",
"RPi.GPIO as GPIO from talk import say GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) from",
"pinSize = len(pin) for i in range(0,pinSize): p = pin[i]",
"changeDegreeGpio([0],[90],5,0.05) def random1(): r = random.randrange(1,3) if(r==1): changeDegree([0],[20]) changeDegree([0],[0]) changeDegree([3],[50])",
"Current[pin[i]] writeYaml() time.sleep(time1) def takePosition(): changeDegree([7,8],[180,0]) changeDegree([0,1,2,3,4,5,6,7,8,9,10,11],[0,50,130,0,170,170,0,180,0,60,150,0]) def changeDegreeGpio(pin,degree,update,duration): pinSize",
"range(0,15): r = select[i%len(select)]%4 print (' move ',r) changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1",
"GPIO.HIGH) if d==1: GPIO.output(Motor2['input2'], GPIO.HIGH) EN2.ChangeDutyCycle(x) EN1.ChangeDutyCycle(x) def Stop(): Run(0,0,0,0,0)",
"got value of t is : ',t) if(t==0): random0() elif(t==1):",
"= deg/18 duty+=2 Servo[p].ChangeDutyCycle(duty) time.sleep(duration) CurrentGpio[p]=degree[i] writeYaml() def Run(a, b,",
"changeDegree([3,4],[0,180]) def random2(): changeDegree([3,4],[20,150]) pin = [7,8,9,10] deg = [[160,0,60,100],[180,20,100,140]]",
"i in range(0,times): changeDegree([15],[70],5,0.05) time.sleep(0.2) changeDegree([15],[110],5,0.05) time.sleep(0.2) changeDegree([15],[90],5,0.05) def move_head(times=3):",
"Run(a,b,c,d,i) time.sleep(0.5) def yes(times=3): for i in range(0,times): changeDegree([0],[30]) time.sleep(0.08)",
"def changeDegreeGpio(pin,degree,update,duration): pinSize = len(pin) for i in range(0,pinSize): p",
"1739, 107, 894, 48, 28, 2048,589,689,123, 34,27,4,91,102,893,10283,53,1283,9485,1973,873,1973,0,10973] ok = [0,0,0,0]",
"randomCall(t) def speakOnline(t): expression(t) def speakOffline(speech): t = int(len(speech)/15) print",
"for i in range(0,t*3): r = select[i%16]%4 changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1 takePosition()",
"os.path.realpath(os.path.join(__file__, '..', '..')) def readYaml(): with open('{}/src/configuration.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf:",
"i in range(0,15): r = select[i%len(select)]%4 print (' move ',r)",
"19, 'input2': 16} Motor2 = {'EN': 22, 'input1': 26, 'input2':",
"27, 'input1': 19, 'input2': 16} Motor2 = {'EN': 22, 'input1':",
"== None: with open('{}/src/configurationBackUp.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf: servoBackUp = yaml.load(conf,",
"def move_head(times=3): for i in range(0,times): changeDegree([0],[20]) changeDegreeGpio([0],[80],5,0.05) changeDegree([0],[0]) changeDegreeGpio([0],[100],5,0.05)",
"r = random.randrange(1,3) if(r==1): changeDegree([0],[20]) changeDegree([0],[0]) changeDegree([3],[50]) changeDegree([9],[100]) changeDegree([9],[60]) changeDegree([3],[0])",
"None: print('close') exit() Initial = servo['Initial_Position']['I2C'] Current = servo['Current_Position']['I2C'] InitialGpio",
"def random2(): changeDegree([3,4],[20,150]) pin = [7,8,9,10] deg = [[160,0,60,100],[180,20,100,140]] ok",
"changeDegree([9,10],[100,140]) changeDegree([9,10],[60,180]) changeDegree([3,4],[0,180]) def random2(): changeDegree([3,4],[20,150]) pin = [7,8,9,10] deg",
"if(r==1): changeDegree([0],[20]) changeDegree([0],[0]) changeDegree([3],[50]) changeDegree([9],[100]) changeDegree([9],[60]) changeDegree([3],[0]) elif(r==2): changeDegree([0],[20]) changeDegree([0],[0])",
"speakOffline(speech): t = int(len(speech)/15) print ('Offline t value is :",
"= len(pin) for i in range(0,pinSize): maxChange = max(abs(Current[pin[i]]-newDegree[i]),maxChange) for",
"Current[pin[i]] servo['Current_Position']['I2C'][pin[i]] = Current[pin[i]] writeYaml() time.sleep(time1) def takePosition(): changeDegree([7,8],[180,0]) changeDegree([0,1,2,3,4,5,6,7,8,9,10,11],[0,50,130,0,170,170,0,180,0,60,150,0])",
"move_head(times=3): for i in range(0,times): changeDegree([0],[20]) changeDegreeGpio([0],[80],5,0.05) changeDegree([0],[0]) changeDegreeGpio([0],[100],5,0.05) changeDegreeGpio([0],[90],10,0.01)",
"x): GPIO.output(Motor1['input1'], GPIO.LOW) GPIO.output(Motor1['input2'], GPIO.LOW) GPIO.output(Motor2['input1'], GPIO.LOW) GPIO.output(Motor2['input2'], GPIO.LOW) if",
"for i in range(0,pinSize): if Current[pin[i]]<newDegree[i]: Current[pin[i]] += update elif",
"ok = [0,0,0,0] ln = len(select) for i in range(0,t*3):",
"for i in range(0,times): changeDegree([0],[20]) changeDegreeGpio([0],[80],5,0.05) changeDegree([0],[0]) changeDegreeGpio([0],[100],5,0.05) changeDegreeGpio([0],[90],10,0.01) def",
"'input2': 16} Motor2 = {'EN': 22, 'input1': 26, 'input2': 20}",
"',r) changeDegree([pin[r]],[deg[ok[r]][r]]) takePosition() def randomCall(t): changeDegree([3,4,5,6,7,8,9,10],[50,110,80,70,100,80,160,20]) pin = [5,6,7,8] deg",
"changeDegree([0],[0]) changeDegree([4],[120]) changeDegree([10],[140]) changeDegree([10],[180]) changeDegree([4],[170]) else: changeDegree([3,4],[50,120]) changeDegree([9,10],[100,140]) changeDegree([9,10],[60,180]) changeDegree([3,4],[0,180])",
"16} Motor2 = {'EN': 22, 'input1': 26, 'input2': 20} for",
"random.randrange(1,10000000)%3 if(r==1): changeDegree([0],[20]) changeDegree([0],[0]) elif(r==2): changeDegreeGpio([0],[120],5,0.05) changeDegreeGpio([0],[90],5,0.05) else: changeDegreeGpio([0],[60],5,0.05) changeDegreeGpio([0],[90],5,0.05)",
"a==1: GPIO.output(Motor1['input1'], GPIO.HIGH) if b==1: GPIO.output(Motor1['input2'], GPIO.HIGH) if c==1: GPIO.output(Motor2['input1'],",
"time.sleep(0.5) def Stop_Slow(a,b,c,d): for i in range(100,0,-20): Run(a,b,c,d,i) time.sleep(0.5) def",
"changeDegree([0],[0]) elif(r==2): changeDegreeGpio([0],[120],5,0.05) changeDegreeGpio([0],[90],5,0.05) else: changeDegreeGpio([0],[60],5,0.05) changeDegreeGpio([0],[90],5,0.05) def random1(): r",
"'..', '..')) def readYaml(): with open('{}/src/configuration.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf: servo",
"= -update for deg in range(CurrentGpio[p],degree[i],update): duty = deg/18 duty+=2",
"randomCall(t): changeDegree([3,4,5,6,7,8,9,10],[50,110,80,70,100,80,160,20]) pin = [5,6,7,8] deg = [[80,50,100,70],[110,90,110,90]] select =",
"GPIO.output(Motor2['input2'], GPIO.HIGH) EN2.ChangeDutyCycle(x) EN1.ChangeDutyCycle(x) def Stop(): Run(0,0,0,0,0) def Start_Slow(a, b,",
": ',t) if(t==0): random0() elif(t==1): random1() elif(t==2): random2() elif(t==3): random3()",
"= [[80,50,100,70],[110,90,110,90]] select = [89,93,472,347,2, 34, 134, 1937, 1983, 1739,",
"[] for i in range(0,6): Servo.append(GPIO.PWM(GpioPin[i],50)) Servo[i].start(0) def changeDegree(pin,newDegree,time1=0.05,update=5): maxChange",
"yaml.load(conf, Loader=yaml.FullLoader) writeYaml(servoBackUp) servo = readYaml() if servo == None:",
"'input2': 20} for x in Motor1: GPIO.setup(Motor1[x], GPIO.OUT) GPIO.setup(Motor2[x], GPIO.OUT)",
"[7,8,9,10] deg = [[160,0,60,100],[180,20,100,140]] ok = [0,0,0,0] for i in",
"elif(t==3): random3() else: randomCall(t) def speakOnline(t): expression(t) def speakOffline(speech): t",
"-update for deg in range(CurrentGpio[p],degree[i],update): duty = deg/18 duty+=2 Servo[p].ChangeDutyCycle(duty)",
"random0(): r = random.randrange(1,10000000)%3 if(r==1): changeDegree([0],[20]) changeDegree([0],[0]) elif(r==2): changeDegreeGpio([0],[120],5,0.05) changeDegreeGpio([0],[90],5,0.05)",
"deg in range(0,maxChange,update): for i in range(0,pinSize): if Current[pin[i]]<newDegree[i]: Current[pin[i]]",
"EN2 = GPIO.PWM(Motor2['EN'], 100) EN1.start(0) EN2.start(0) hand = ServoKit(channels=16) ROOT_PATH",
"134, 1937, 1983, 1739, 107, 894, 48, 28, 2048,589,689,123, 34,27,4,91,102,893,10283,53,1283,9485,1973,873,1973,0,10973]",
"len(pin) for i in range(0,pinSize): p = pin[i] if CurrentGpio[p]>degree[i]:",
"= len(select) for i in range(0,t*3): r = select[i%16]%4 changeDegree([pin[r]],[deg[ok[r]][r]])",
"if s==None: yaml.dump(servo,conf) else: yaml.dump(s,conf) servo = readYaml() if servo",
"changeDegree([15],[90],5,0.05) def move_head(times=3): for i in range(0,times): changeDegree([0],[20]) changeDegreeGpio([0],[80],5,0.05) changeDegree([0],[0])",
"changeDegree([15],[70],5,0.05) time.sleep(0.2) changeDegree([15],[110],5,0.05) time.sleep(0.2) changeDegree([15],[90],5,0.05) def move_head(times=3): for i in",
"ServoKit Motor1 = {'EN': 27, 'input1': 19, 'input2': 16} Motor2",
"if CurrentGpio[p]>degree[i]: update = -update for deg in range(CurrentGpio[p],degree[i],update): duty",
"changeDegree([9],[60]) changeDegree([3],[0]) elif(r==2): changeDegree([0],[20]) changeDegree([0],[0]) changeDegree([4],[120]) changeDegree([10],[140]) changeDegree([10],[180]) changeDegree([4],[170]) else:",
"x in Motor1: GPIO.setup(Motor1[x], GPIO.OUT) GPIO.setup(Motor2[x], GPIO.OUT) EN1 = GPIO.PWM(Motor1['EN'],",
"def speakOnline(t): expression(t) def speakOffline(speech): t = int(len(speech)/15) print ('Offline",
"in range(0,pinSize): p = pin[i] if CurrentGpio[p]>degree[i]: update = -update",
"+= update elif Current[pin[i]]>newDegree[i]: Current[pin[i]] -= update for i in",
"if b==1: GPIO.output(Motor1['input2'], GPIO.HIGH) if c==1: GPIO.output(Motor2['input1'], GPIO.HIGH) if d==1:",
"= GPIO.PWM(Motor2['EN'], 100) EN1.start(0) EN2.start(0) hand = ServoKit(channels=16) ROOT_PATH =",
"pin[i] if CurrentGpio[p]>degree[i]: update = -update for deg in range(CurrentGpio[p],degree[i],update):",
"GPIO.HIGH) EN2.ChangeDutyCycle(x) EN1.ChangeDutyCycle(x) def Stop(): Run(0,0,0,0,0) def Start_Slow(a, b, c,",
"expression(t): print (' i got value of t is :",
"28, 2048,589,689,123, 34,27,4,91,102,893,10283,53,1283,9485,1973,873,1973,0,10973] ok = [0,0,0,0] ln = len(select) for",
"t value is : ',t) p1 = multiprocessing.Process(target=expression,args=[t]) p1.start() say(speech)",
"range(0,pinSize): maxChange = max(abs(Current[pin[i]]-newDegree[i]),maxChange) for deg in range(0,maxChange,update): for i",
"= 0 pinSize = len(pin) for i in range(0,pinSize): maxChange",
"servo = yaml.load(conf, Loader=yaml.FullLoader) return servo def writeYaml(s=None): with open('{}/src/configuration.yaml'.format(ROOT_PATH),'w',",
"GPIO.output(Motor2['input1'], GPIO.LOW) GPIO.output(Motor2['input2'], GPIO.LOW) if a==1: GPIO.output(Motor1['input1'], GPIO.HIGH) if b==1:",
"changeDegree([0],[30]) time.sleep(0.08) changeDegree([0],[0]) time.sleep(0.08) def no(times=3): for i in range(0,times):",
"maxChange = 0 pinSize = len(pin) for i in range(0,pinSize):",
"100) EN2 = GPIO.PWM(Motor2['EN'], 100) EN1.start(0) EN2.start(0) hand = ServoKit(channels=16)",
"servo = readYaml() if servo == None: print('close') exit() Initial",
"of t is : ',t) if(t==0): random0() elif(t==1): random1() elif(t==2):",
"time.sleep(0.08) def no(times=3): for i in range(0,times): changeDegree([15],[70],5,0.05) time.sleep(0.2) changeDegree([15],[110],5,0.05)",
"Servo[p].ChangeDutyCycle(duty) time.sleep(duration) CurrentGpio[p]=degree[i] writeYaml() def Run(a, b, c, d, x):",
"def changeDegree(pin,newDegree,time1=0.05,update=5): maxChange = 0 pinSize = len(pin) for i",
"#!/usr/bin/env python import os import os.path import yaml import time",
"d==1: GPIO.output(Motor2['input2'], GPIO.HIGH) EN2.ChangeDutyCycle(x) EN1.ChangeDutyCycle(x) def Stop(): Run(0,0,0,0,0) def Start_Slow(a,",
"in range(0,maxChange,update): for i in range(0,pinSize): if Current[pin[i]]<newDegree[i]: Current[pin[i]] +=",
"= random.randrange(1,10000000)%3 if(r==1): changeDegree([0],[20]) changeDegree([0],[0]) elif(r==2): changeDegreeGpio([0],[120],5,0.05) changeDegreeGpio([0],[90],5,0.05) else: changeDegreeGpio([0],[60],5,0.05)",
"i in range(0,times): changeDegree([0],[20]) changeDegreeGpio([0],[80],5,0.05) changeDegree([0],[0]) changeDegreeGpio([0],[100],5,0.05) changeDegreeGpio([0],[90],10,0.01) def random0():",
"deg = [[160,0,60,100],[180,20,100,140]] ok = [0,0,0,0] for i in range(0,15):",
"def random0(): r = random.randrange(1,10000000)%3 if(r==1): changeDegree([0],[20]) changeDegree([0],[0]) elif(r==2): changeDegreeGpio([0],[120],5,0.05)",
"servo def writeYaml(s=None): with open('{}/src/configuration.yaml'.format(ROOT_PATH),'w', encoding='utf8') as conf: if s==None:",
"26, 'input2': 20} for x in Motor1: GPIO.setup(Motor1[x], GPIO.OUT) GPIO.setup(Motor2[x],",
"Motor1: GPIO.setup(Motor1[x], GPIO.OUT) GPIO.setup(Motor2[x], GPIO.OUT) EN1 = GPIO.PWM(Motor1['EN'], 100) EN2",
"s==None: yaml.dump(servo,conf) else: yaml.dump(s,conf) servo = readYaml() if servo ==",
"== None: print('close') exit() Initial = servo['Initial_Position']['I2C'] Current = servo['Current_Position']['I2C']",
"print('close') exit() Initial = servo['Initial_Position']['I2C'] Current = servo['Current_Position']['I2C'] InitialGpio =",
"in range(0,pinSize): if Current[pin[i]]<newDegree[i]: Current[pin[i]] += update elif Current[pin[i]]>newDegree[i]: Current[pin[i]]",
"ok[r]^=1 takePosition() def expression(t): print (' i got value of",
"exit() Initial = servo['Initial_Position']['I2C'] Current = servo['Current_Position']['I2C'] InitialGpio = servo['Initial_Position']['Gpio']",
"[89,93,472,347,2, 34, 134, 1937, 1983, 1739, 107, 894, 48, 28,",
"adafruit_servokit import ServoKit Motor1 = {'EN': 27, 'input1': 19, 'input2':",
"python import os import os.path import yaml import time import",
"InitialGpio = servo['Initial_Position']['Gpio'] CurrentGpio = servo['Current_Position']['Gpio'] GpioPin = servo['Pin']['Gpio'] for",
"GPIO.OUT) Servo = [] for i in range(0,6): Servo.append(GPIO.PWM(GpioPin[i],50)) Servo[i].start(0)",
"= {'EN': 22, 'input1': 26, 'input2': 20} for x in",
"-= update for i in range(0,pinSize): hand.servo[pin[i]].angle = Current[pin[i]] servo['Current_Position']['I2C'][pin[i]]",
"random1() elif(t==2): random2() elif(t==3): random3() else: randomCall(t) def speakOnline(t): expression(t)",
"time.sleep(0.5) def yes(times=3): for i in range(0,times): changeDegree([0],[30]) time.sleep(0.08) changeDegree([0],[0])",
"for i in range(0,15): r = select[i%len(select)]%4 print (' move",
"= [1,2,0,3,1,0,3,2,1,0,2,3,1,2,3,0,3,1,2,3,1,2,3,0,3,1] for i in range(0,15): r = select[i%len(select)]%4 print",
"range(CurrentGpio[p],degree[i],update): duty = deg/18 duty+=2 Servo[p].ChangeDutyCycle(duty) time.sleep(duration) CurrentGpio[p]=degree[i] writeYaml() def",
"GPIO.setup(GpioPin[i], GPIO.OUT) Servo = [] for i in range(0,6): Servo.append(GPIO.PWM(GpioPin[i],50))",
"= [0,0,0,0] for i in range(0,15): r = random.randrange(1,1000000)%4 print",
"os.path import yaml import time import random import multiprocessing import",
"deg = [[160,0,60,100],[180,20,100,140]] ok = [0,0,0,0] select = [1,2,0,3,1,0,3,2,1,0,2,3,1,2,3,0,3,1,2,3,1,2,3,0,3,1] for",
"def no(times=3): for i in range(0,times): changeDegree([15],[70],5,0.05) time.sleep(0.2) changeDegree([15],[110],5,0.05) time.sleep(0.2)",
"readYaml() if servo == None: print('close') exit() Initial = servo['Initial_Position']['I2C']",
"elif(r==2): changeDegreeGpio([0],[120],5,0.05) changeDegreeGpio([0],[90],5,0.05) else: changeDegreeGpio([0],[60],5,0.05) changeDegreeGpio([0],[90],5,0.05) def random1(): r =",
"writeYaml() time.sleep(time1) def takePosition(): changeDegree([7,8],[180,0]) changeDegree([0,1,2,3,4,5,6,7,8,9,10,11],[0,50,130,0,170,170,0,180,0,60,150,0]) def changeDegreeGpio(pin,degree,update,duration): pinSize =",
"[5,6,7,8] deg = [[80,50,100,70],[110,90,110,90]] select = [89,93,472,347,2, 34, 134, 1937,",
"changeDegree([4],[120]) changeDegree([10],[140]) changeDegree([10],[180]) changeDegree([4],[170]) else: changeDegree([3,4],[50,120]) changeDegree([9,10],[100,140]) changeDegree([9,10],[60,180]) changeDegree([3,4],[0,180]) def",
"takePosition() def randomCall(t): changeDegree([3,4,5,6,7,8,9,10],[50,110,80,70,100,80,160,20]) pin = [5,6,7,8] deg = [[80,50,100,70],[110,90,110,90]]",
"def readYaml(): with open('{}/src/configuration.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf: servo = yaml.load(conf,",
"Current[pin[i]]>newDegree[i]: Current[pin[i]] -= update for i in range(0,pinSize): hand.servo[pin[i]].angle =",
"takePosition() def random3(): changeDegree([3,4],[20,150]) pin = [7,8,9,10] deg = [[160,0,60,100],[180,20,100,140]]",
"Run(a,b,c,d,i) time.sleep(0.5) def Stop_Slow(a,b,c,d): for i in range(100,0,-20): Run(a,b,c,d,i) time.sleep(0.5)",
"changeDegree([7,8],[180,0]) changeDegree([0,1,2,3,4,5,6,7,8,9,10,11],[0,50,130,0,170,170,0,180,0,60,150,0]) def changeDegreeGpio(pin,degree,update,duration): pinSize = len(pin) for i in",
"= [[160,0,60,100],[180,20,100,140]] ok = [0,0,0,0] for i in range(0,15): r",
"else: changeDegreeGpio([0],[60],5,0.05) changeDegreeGpio([0],[90],5,0.05) def random1(): r = random.randrange(1,3) if(r==1): changeDegree([0],[20])",
"select = [1,2,0,3,1,0,3,2,1,0,2,3,1,2,3,0,3,1,2,3,1,2,3,0,3,1] for i in range(0,15): r = select[i%len(select)]%4",
"yes(times=3): for i in range(0,times): changeDegree([0],[30]) time.sleep(0.08) changeDegree([0],[0]) time.sleep(0.08) def",
"',t) if(t==0): random0() elif(t==1): random1() elif(t==2): random2() elif(t==3): random3() else:",
"b, c, d, x): GPIO.output(Motor1['input1'], GPIO.LOW) GPIO.output(Motor1['input2'], GPIO.LOW) GPIO.output(Motor2['input1'], GPIO.LOW)",
"say GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) from adafruit_servokit import ServoKit Motor1 = {'EN':",
"def Stop_Slow(a,b,c,d): for i in range(100,0,-20): Run(a,b,c,d,i) time.sleep(0.5) def yes(times=3):",
"maxChange = max(abs(Current[pin[i]]-newDegree[i]),maxChange) for deg in range(0,maxChange,update): for i in",
"ok = [0,0,0,0] for i in range(0,15): r = random.randrange(1,1000000)%4",
"[[160,0,60,100],[180,20,100,140]] ok = [0,0,0,0] for i in range(0,15): r =",
"b==1: GPIO.output(Motor1['input2'], GPIO.HIGH) if c==1: GPIO.output(Motor2['input1'], GPIO.HIGH) if d==1: GPIO.output(Motor2['input2'],",
"else: randomCall(t) def speakOnline(t): expression(t) def speakOffline(speech): t = int(len(speech)/15)",
"conf: servoBackUp = yaml.load(conf, Loader=yaml.FullLoader) writeYaml(servoBackUp) servo = readYaml() if",
"Current = servo['Current_Position']['I2C'] InitialGpio = servo['Initial_Position']['Gpio'] CurrentGpio = servo['Current_Position']['Gpio'] GpioPin",
"in range(0,15): r = select[i%len(select)]%4 print (' move ',r) changeDegree([pin[r]],[deg[ok[r]][r]])",
"random3(): changeDegree([3,4],[20,150]) pin = [7,8,9,10] deg = [[160,0,60,100],[180,20,100,140]] ok =",
"pin = [7,8,9,10] deg = [[160,0,60,100],[180,20,100,140]] ok = [0,0,0,0] for",
"= [89,93,472,347,2, 34, 134, 1937, 1983, 1739, 107, 894, 48,",
"changeDegree([10],[180]) changeDegree([4],[170]) else: changeDegree([3,4],[50,120]) changeDegree([9,10],[100,140]) changeDegree([9,10],[60,180]) changeDegree([3,4],[0,180]) def random2(): changeDegree([3,4],[20,150])",
"[7,8,9,10] deg = [[160,0,60,100],[180,20,100,140]] ok = [0,0,0,0] select = [1,2,0,3,1,0,3,2,1,0,2,3,1,2,3,0,3,1,2,3,1,2,3,0,3,1]",
"i in range(100,0,-20): Run(a,b,c,d,i) time.sleep(0.5) def yes(times=3): for i in",
"talk import say GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) from adafruit_servokit import ServoKit Motor1",
"random3() else: randomCall(t) def speakOnline(t): expression(t) def speakOffline(speech): t =",
"import yaml import time import random import multiprocessing import RPi.GPIO",
"Run(a, b, c, d, x): GPIO.output(Motor1['input1'], GPIO.LOW) GPIO.output(Motor1['input2'], GPIO.LOW) GPIO.output(Motor2['input1'],",
"GPIO.output(Motor1['input2'], GPIO.HIGH) if c==1: GPIO.output(Motor2['input1'], GPIO.HIGH) if d==1: GPIO.output(Motor2['input2'], GPIO.HIGH)",
"range(0,100,20): Run(a,b,c,d,i) time.sleep(0.5) def Stop_Slow(a,b,c,d): for i in range(100,0,-20): Run(a,b,c,d,i)",
"import ServoKit Motor1 = {'EN': 27, 'input1': 19, 'input2': 16}",
"t = int(len(speech)/15) print ('Offline t value is : ',t)",
"= int(len(speech)/15) print ('Offline t value is : ',t) p1",
"EN2.ChangeDutyCycle(x) EN1.ChangeDutyCycle(x) def Stop(): Run(0,0,0,0,0) def Start_Slow(a, b, c, d):",
"time.sleep(duration) CurrentGpio[p]=degree[i] writeYaml() def Run(a, b, c, d, x): GPIO.output(Motor1['input1'],",
"c==1: GPIO.output(Motor2['input1'], GPIO.HIGH) if d==1: GPIO.output(Motor2['input2'], GPIO.HIGH) EN2.ChangeDutyCycle(x) EN1.ChangeDutyCycle(x) def",
"changeDegree([0],[20]) changeDegreeGpio([0],[80],5,0.05) changeDegree([0],[0]) changeDegreeGpio([0],[100],5,0.05) changeDegreeGpio([0],[90],10,0.01) def random0(): r = random.randrange(1,10000000)%3",
"len(select) for i in range(0,t*3): r = select[i%16]%4 changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1",
"random2(): changeDegree([3,4],[20,150]) pin = [7,8,9,10] deg = [[160,0,60,100],[180,20,100,140]] ok =",
"speakOnline(t): expression(t) def speakOffline(speech): t = int(len(speech)/15) print ('Offline t",
"= [] for i in range(0,6): Servo.append(GPIO.PWM(GpioPin[i],50)) Servo[i].start(0) def changeDegree(pin,newDegree,time1=0.05,update=5):",
"i in range(0,100,20): Run(a,b,c,d,i) time.sleep(0.5) def Stop_Slow(a,b,c,d): for i in",
"1983, 1739, 107, 894, 48, 28, 2048,589,689,123, 34,27,4,91,102,893,10283,53,1283,9485,1973,873,1973,0,10973] ok =",
"= [7,8,9,10] deg = [[160,0,60,100],[180,20,100,140]] ok = [0,0,0,0] select =",
"def expression(t): print (' i got value of t is",
"EN1.ChangeDutyCycle(x) def Stop(): Run(0,0,0,0,0) def Start_Slow(a, b, c, d): for",
"Motor1 = {'EN': 27, 'input1': 19, 'input2': 16} Motor2 =",
"[[80,50,100,70],[110,90,110,90]] select = [89,93,472,347,2, 34, 134, 1937, 1983, 1739, 107,",
"is : ',t) if(t==0): random0() elif(t==1): random1() elif(t==2): random2() elif(t==3):",
"int(len(speech)/15) print ('Offline t value is : ',t) p1 =",
"0 pinSize = len(pin) for i in range(0,pinSize): maxChange =",
"with open('{}/src/configuration.yaml'.format(ROOT_PATH),'w', encoding='utf8') as conf: if s==None: yaml.dump(servo,conf) else: yaml.dump(s,conf)",
"= max(abs(Current[pin[i]]-newDegree[i]),maxChange) for deg in range(0,maxChange,update): for i in range(0,pinSize):",
"p = pin[i] if CurrentGpio[p]>degree[i]: update = -update for deg",
"GPIO.setup(Motor2[x], GPIO.OUT) EN1 = GPIO.PWM(Motor1['EN'], 100) EN2 = GPIO.PWM(Motor2['EN'], 100)",
"yaml.dump(servo,conf) else: yaml.dump(s,conf) servo = readYaml() if servo == None:",
"in range(0,100,20): Run(a,b,c,d,i) time.sleep(0.5) def Stop_Slow(a,b,c,d): for i in range(100,0,-20):",
"[0,0,0,0] for i in range(0,15): r = random.randrange(1,1000000)%4 print ('",
"ok[r]^=1 takePosition() def random3(): changeDegree([3,4],[20,150]) pin = [7,8,9,10] deg =",
"for i in range(0,times): changeDegree([0],[30]) time.sleep(0.08) changeDegree([0],[0]) time.sleep(0.08) def no(times=3):",
"Servo = [] for i in range(0,6): Servo.append(GPIO.PWM(GpioPin[i],50)) Servo[i].start(0) def",
"i in range(0,times): changeDegree([0],[30]) time.sleep(0.08) changeDegree([0],[0]) time.sleep(0.08) def no(times=3): for",
"range(0,6): GPIO.setup(GpioPin[i], GPIO.OUT) Servo = [] for i in range(0,6):",
"GPIO.output(Motor2['input2'], GPIO.LOW) if a==1: GPIO.output(Motor1['input1'], GPIO.HIGH) if b==1: GPIO.output(Motor1['input2'], GPIO.HIGH)",
"for deg in range(0,maxChange,update): for i in range(0,pinSize): if Current[pin[i]]<newDegree[i]:",
"= servo['Initial_Position']['Gpio'] CurrentGpio = servo['Current_Position']['Gpio'] GpioPin = servo['Pin']['Gpio'] for i",
"in range(0,times): changeDegree([0],[20]) changeDegreeGpio([0],[80],5,0.05) changeDegree([0],[0]) changeDegreeGpio([0],[100],5,0.05) changeDegreeGpio([0],[90],10,0.01) def random0(): r",
"= ServoKit(channels=16) ROOT_PATH = os.path.realpath(os.path.join(__file__, '..', '..')) def readYaml(): with",
"changeDegree([0,1,2,3,4,5,6,7,8,9,10,11],[0,50,130,0,170,170,0,180,0,60,150,0]) def changeDegreeGpio(pin,degree,update,duration): pinSize = len(pin) for i in range(0,pinSize):",
"changeDegree([15],[110],5,0.05) time.sleep(0.2) changeDegree([15],[90],5,0.05) def move_head(times=3): for i in range(0,times): changeDegree([0],[20])",
"encoding='utf8') as conf: servo = yaml.load(conf, Loader=yaml.FullLoader) return servo def",
"= pin[i] if CurrentGpio[p]>degree[i]: update = -update for deg in",
"range(0,times): changeDegree([0],[20]) changeDegreeGpio([0],[80],5,0.05) changeDegree([0],[0]) changeDegreeGpio([0],[100],5,0.05) changeDegreeGpio([0],[90],10,0.01) def random0(): r =",
"Current[pin[i]]<newDegree[i]: Current[pin[i]] += update elif Current[pin[i]]>newDegree[i]: Current[pin[i]] -= update for",
"GPIO from talk import say GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) from adafruit_servokit import",
"def speakOffline(speech): t = int(len(speech)/15) print ('Offline t value is",
"(' i got value of t is : ',t) if(t==0):",
"def random3(): changeDegree([3,4],[20,150]) pin = [7,8,9,10] deg = [[160,0,60,100],[180,20,100,140]] ok",
"servoBackUp = yaml.load(conf, Loader=yaml.FullLoader) writeYaml(servoBackUp) servo = readYaml() if servo",
"GPIO.OUT) GPIO.setup(Motor2[x], GPIO.OUT) EN1 = GPIO.PWM(Motor1['EN'], 100) EN2 = GPIO.PWM(Motor2['EN'],",
"open('{}/src/configuration.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf: servo = yaml.load(conf, Loader=yaml.FullLoader) return servo",
"changeDegreeGpio([0],[100],5,0.05) changeDegreeGpio([0],[90],10,0.01) def random0(): r = random.randrange(1,10000000)%3 if(r==1): changeDegree([0],[20]) changeDegree([0],[0])",
"= os.path.realpath(os.path.join(__file__, '..', '..')) def readYaml(): with open('{}/src/configuration.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as",
"move ',r) changeDegree([pin[r]],[deg[ok[r]][r]]) takePosition() def randomCall(t): changeDegree([3,4,5,6,7,8,9,10],[50,110,80,70,100,80,160,20]) pin = [5,6,7,8]",
"107, 894, 48, 28, 2048,589,689,123, 34,27,4,91,102,893,10283,53,1283,9485,1973,873,1973,0,10973] ok = [0,0,0,0] ln",
"changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1 takePosition() def expression(t): print (' i got value",
"changeDegree([0],[0]) changeDegreeGpio([0],[100],5,0.05) changeDegreeGpio([0],[90],10,0.01) def random0(): r = random.randrange(1,10000000)%3 if(r==1): changeDegree([0],[20])",
"i in range(0,6): Servo.append(GPIO.PWM(GpioPin[i],50)) Servo[i].start(0) def changeDegree(pin,newDegree,time1=0.05,update=5): maxChange = 0",
"in Motor1: GPIO.setup(Motor1[x], GPIO.OUT) GPIO.setup(Motor2[x], GPIO.OUT) EN1 = GPIO.PWM(Motor1['EN'], 100)",
"changeDegreeGpio([0],[80],5,0.05) changeDegree([0],[0]) changeDegreeGpio([0],[100],5,0.05) changeDegreeGpio([0],[90],10,0.01) def random0(): r = random.randrange(1,10000000)%3 if(r==1):",
"EN1 = GPIO.PWM(Motor1['EN'], 100) EN2 = GPIO.PWM(Motor2['EN'], 100) EN1.start(0) EN2.start(0)",
"time.sleep(0.08) changeDegree([0],[0]) time.sleep(0.08) def no(times=3): for i in range(0,times): changeDegree([15],[70],5,0.05)",
"yaml.load(conf, Loader=yaml.FullLoader) return servo def writeYaml(s=None): with open('{}/src/configuration.yaml'.format(ROOT_PATH),'w', encoding='utf8') as",
"servo['Current_Position']['I2C'] InitialGpio = servo['Initial_Position']['Gpio'] CurrentGpio = servo['Current_Position']['Gpio'] GpioPin = servo['Pin']['Gpio']",
"elif Current[pin[i]]>newDegree[i]: Current[pin[i]] -= update for i in range(0,pinSize): hand.servo[pin[i]].angle",
"GPIO.LOW) GPIO.output(Motor2['input1'], GPIO.LOW) GPIO.output(Motor2['input2'], GPIO.LOW) if a==1: GPIO.output(Motor1['input1'], GPIO.HIGH) if",
"100) EN1.start(0) EN2.start(0) hand = ServoKit(channels=16) ROOT_PATH = os.path.realpath(os.path.join(__file__, '..',",
"t is : ',t) if(t==0): random0() elif(t==1): random1() elif(t==2): random2()",
"open('{}/src/configurationBackUp.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf: servoBackUp = yaml.load(conf, Loader=yaml.FullLoader) writeYaml(servoBackUp) servo",
"= servo['Current_Position']['I2C'] InitialGpio = servo['Initial_Position']['Gpio'] CurrentGpio = servo['Current_Position']['Gpio'] GpioPin =",
"{'EN': 22, 'input1': 26, 'input2': 20} for x in Motor1:",
"pin = [7,8,9,10] deg = [[160,0,60,100],[180,20,100,140]] ok = [0,0,0,0] select",
"as conf: if s==None: yaml.dump(servo,conf) else: yaml.dump(s,conf) servo = readYaml()",
"changeDegree([4],[170]) else: changeDegree([3,4],[50,120]) changeDegree([9,10],[100,140]) changeDegree([9,10],[60,180]) changeDegree([3,4],[0,180]) def random2(): changeDegree([3,4],[20,150]) pin",
"conf: servo = yaml.load(conf, Loader=yaml.FullLoader) return servo def writeYaml(s=None): with",
"with open('{}/src/configuration.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf: servo = yaml.load(conf, Loader=yaml.FullLoader) return",
"Run(0,0,0,0,0) def Start_Slow(a, b, c, d): for i in range(0,100,20):",
"random0() elif(t==1): random1() elif(t==2): random2() elif(t==3): random3() else: randomCall(t) def",
"changeDegree([10],[140]) changeDegree([10],[180]) changeDegree([4],[170]) else: changeDegree([3,4],[50,120]) changeDegree([9,10],[100,140]) changeDegree([9,10],[60,180]) changeDegree([3,4],[0,180]) def random2():",
"ServoKit(channels=16) ROOT_PATH = os.path.realpath(os.path.join(__file__, '..', '..')) def readYaml(): with open('{}/src/configuration.yaml'.format(ROOT_PATH),'r+',",
"in range(0,6): GPIO.setup(GpioPin[i], GPIO.OUT) Servo = [] for i in",
"update = -update for deg in range(CurrentGpio[p],degree[i],update): duty = deg/18",
"for i in range(0,times): changeDegree([15],[70],5,0.05) time.sleep(0.2) changeDegree([15],[110],5,0.05) time.sleep(0.2) changeDegree([15],[90],5,0.05) def",
"'input1': 26, 'input2': 20} for x in Motor1: GPIO.setup(Motor1[x], GPIO.OUT)",
"for x in Motor1: GPIO.setup(Motor1[x], GPIO.OUT) GPIO.setup(Motor2[x], GPIO.OUT) EN1 =",
"select[i%len(select)]%4 print (' move ',r) changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1 takePosition() def random3():",
"i in range(0,pinSize): maxChange = max(abs(Current[pin[i]]-newDegree[i]),maxChange) for deg in range(0,maxChange,update):",
"if Current[pin[i]]<newDegree[i]: Current[pin[i]] += update elif Current[pin[i]]>newDegree[i]: Current[pin[i]] -= update",
"update for i in range(0,pinSize): hand.servo[pin[i]].angle = Current[pin[i]] servo['Current_Position']['I2C'][pin[i]] =",
"with open('{}/src/configurationBackUp.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf: servoBackUp = yaml.load(conf, Loader=yaml.FullLoader) writeYaml(servoBackUp)",
"range(0,pinSize): p = pin[i] if CurrentGpio[p]>degree[i]: update = -update for",
"= [0,0,0,0] select = [1,2,0,3,1,0,3,2,1,0,2,3,1,2,3,0,3,1,2,3,1,2,3,0,3,1] for i in range(0,15): r",
"def Run(a, b, c, d, x): GPIO.output(Motor1['input1'], GPIO.LOW) GPIO.output(Motor1['input2'], GPIO.LOW)",
"b, c, d): for i in range(0,100,20): Run(a,b,c,d,i) time.sleep(0.5) def",
"writeYaml(s=None): with open('{}/src/configuration.yaml'.format(ROOT_PATH),'w', encoding='utf8') as conf: if s==None: yaml.dump(servo,conf) else:",
"range(0,pinSize): hand.servo[pin[i]].angle = Current[pin[i]] servo['Current_Position']['I2C'][pin[i]] = Current[pin[i]] writeYaml() time.sleep(time1) def",
"if servo == None: with open('{}/src/configurationBackUp.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf: servoBackUp",
"yaml import time import random import multiprocessing import RPi.GPIO as",
"GPIO.output(Motor2['input1'], GPIO.HIGH) if d==1: GPIO.output(Motor2['input2'], GPIO.HIGH) EN2.ChangeDutyCycle(x) EN1.ChangeDutyCycle(x) def Stop():",
"random.randrange(1,3) if(r==1): changeDegree([0],[20]) changeDegree([0],[0]) changeDegree([3],[50]) changeDegree([9],[100]) changeDegree([9],[60]) changeDegree([3],[0]) elif(r==2): changeDegree([0],[20])",
"for i in range(0,pinSize): hand.servo[pin[i]].angle = Current[pin[i]] servo['Current_Position']['I2C'][pin[i]] = Current[pin[i]]",
"= [7,8,9,10] deg = [[160,0,60,100],[180,20,100,140]] ok = [0,0,0,0] for i",
"Loader=yaml.FullLoader) writeYaml(servoBackUp) servo = readYaml() if servo == None: print('close')",
"= servo['Pin']['Gpio'] for i in range(0,6): GPIO.setup(GpioPin[i], GPIO.OUT) Servo =",
"i in range(0,pinSize): if Current[pin[i]]<newDegree[i]: Current[pin[i]] += update elif Current[pin[i]]>newDegree[i]:",
"as conf: servoBackUp = yaml.load(conf, Loader=yaml.FullLoader) writeYaml(servoBackUp) servo = readYaml()",
"duty = deg/18 duty+=2 Servo[p].ChangeDutyCycle(duty) time.sleep(duration) CurrentGpio[p]=degree[i] writeYaml() def Run(a,",
"import say GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) from adafruit_servokit import ServoKit Motor1 =",
"None: with open('{}/src/configurationBackUp.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf: servoBackUp = yaml.load(conf, Loader=yaml.FullLoader)",
"r = random.randrange(1,1000000)%4 print (' move ',r) changeDegree([pin[r]],[deg[ok[r]][r]]) takePosition() def",
"range(0,maxChange,update): for i in range(0,pinSize): if Current[pin[i]]<newDegree[i]: Current[pin[i]] += update",
"Current[pin[i]] -= update for i in range(0,pinSize): hand.servo[pin[i]].angle = Current[pin[i]]",
"for i in range(0,100,20): Run(a,b,c,d,i) time.sleep(0.5) def Stop_Slow(a,b,c,d): for i",
"GPIO.HIGH) if c==1: GPIO.output(Motor2['input1'], GPIO.HIGH) if d==1: GPIO.output(Motor2['input2'], GPIO.HIGH) EN2.ChangeDutyCycle(x)",
"time.sleep(0.2) changeDegree([15],[110],5,0.05) time.sleep(0.2) changeDegree([15],[90],5,0.05) def move_head(times=3): for i in range(0,times):",
"servo['Initial_Position']['I2C'] Current = servo['Current_Position']['I2C'] InitialGpio = servo['Initial_Position']['Gpio'] CurrentGpio = servo['Current_Position']['Gpio']",
"= [0,0,0,0] ln = len(select) for i in range(0,t*3): r",
"changeDegree([0],[20]) changeDegree([0],[0]) changeDegree([3],[50]) changeDegree([9],[100]) changeDegree([9],[60]) changeDegree([3],[0]) elif(r==2): changeDegree([0],[20]) changeDegree([0],[0]) changeDegree([4],[120])",
"servo['Current_Position']['I2C'][pin[i]] = Current[pin[i]] writeYaml() time.sleep(time1) def takePosition(): changeDegree([7,8],[180,0]) changeDegree([0,1,2,3,4,5,6,7,8,9,10,11],[0,50,130,0,170,170,0,180,0,60,150,0]) def",
"[[160,0,60,100],[180,20,100,140]] ok = [0,0,0,0] select = [1,2,0,3,1,0,3,2,1,0,2,3,1,2,3,0,3,1,2,3,1,2,3,0,3,1] for i in",
"takePosition(): changeDegree([7,8],[180,0]) changeDegree([0,1,2,3,4,5,6,7,8,9,10,11],[0,50,130,0,170,170,0,180,0,60,150,0]) def changeDegreeGpio(pin,degree,update,duration): pinSize = len(pin) for i",
"r = select[i%len(select)]%4 print (' move ',r) changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1 takePosition()",
"range(100,0,-20): Run(a,b,c,d,i) time.sleep(0.5) def yes(times=3): for i in range(0,times): changeDegree([0],[30])",
"os import os.path import yaml import time import random import",
"i in range(0,t*3): r = select[i%16]%4 changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1 takePosition() def",
"('Offline t value is : ',t) p1 = multiprocessing.Process(target=expression,args=[t]) p1.start()",
"GPIO.output(Motor1['input1'], GPIO.LOW) GPIO.output(Motor1['input2'], GPIO.LOW) GPIO.output(Motor2['input1'], GPIO.LOW) GPIO.output(Motor2['input2'], GPIO.LOW) if a==1:",
"if(t==0): random0() elif(t==1): random1() elif(t==2): random2() elif(t==3): random3() else: randomCall(t)",
"print (' move ',r) changeDegree([pin[r]],[deg[ok[r]][r]]) takePosition() def randomCall(t): changeDegree([3,4,5,6,7,8,9,10],[50,110,80,70,100,80,160,20]) pin",
"def yes(times=3): for i in range(0,times): changeDegree([0],[30]) time.sleep(0.08) changeDegree([0],[0]) time.sleep(0.08)",
"range(0,times): changeDegree([0],[30]) time.sleep(0.08) changeDegree([0],[0]) time.sleep(0.08) def no(times=3): for i in",
"import time import random import multiprocessing import RPi.GPIO as GPIO",
"changeDegree([9,10],[60,180]) changeDegree([3,4],[0,180]) def random2(): changeDegree([3,4],[20,150]) pin = [7,8,9,10] deg =",
"in range(0,pinSize): hand.servo[pin[i]].angle = Current[pin[i]] servo['Current_Position']['I2C'][pin[i]] = Current[pin[i]] writeYaml() time.sleep(time1)",
"(' move ',r) changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1 takePosition() def random3(): changeDegree([3,4],[20,150]) pin",
"def takePosition(): changeDegree([7,8],[180,0]) changeDegree([0,1,2,3,4,5,6,7,8,9,10,11],[0,50,130,0,170,170,0,180,0,60,150,0]) def changeDegreeGpio(pin,degree,update,duration): pinSize = len(pin) for",
"def Stop(): Run(0,0,0,0,0) def Start_Slow(a, b, c, d): for i",
"no(times=3): for i in range(0,times): changeDegree([15],[70],5,0.05) time.sleep(0.2) changeDegree([15],[110],5,0.05) time.sleep(0.2) changeDegree([15],[90],5,0.05)",
"GPIO.PWM(Motor2['EN'], 100) EN1.start(0) EN2.start(0) hand = ServoKit(channels=16) ROOT_PATH = os.path.realpath(os.path.join(__file__,",
"2048,589,689,123, 34,27,4,91,102,893,10283,53,1283,9485,1973,873,1973,0,10973] ok = [0,0,0,0] ln = len(select) for i",
"if c==1: GPIO.output(Motor2['input1'], GPIO.HIGH) if d==1: GPIO.output(Motor2['input2'], GPIO.HIGH) EN2.ChangeDutyCycle(x) EN1.ChangeDutyCycle(x)",
"Servo[i].start(0) def changeDegree(pin,newDegree,time1=0.05,update=5): maxChange = 0 pinSize = len(pin) for",
"changeDegree([3,4,5,6,7,8,9,10],[50,110,80,70,100,80,160,20]) pin = [5,6,7,8] deg = [[80,50,100,70],[110,90,110,90]] select = [89,93,472,347,2,",
"in range(0,pinSize): maxChange = max(abs(Current[pin[i]]-newDegree[i]),maxChange) for deg in range(0,maxChange,update): for",
"encoding='utf8') as conf: servoBackUp = yaml.load(conf, Loader=yaml.FullLoader) writeYaml(servoBackUp) servo =",
"takePosition() def expression(t): print (' i got value of t",
"GPIO.HIGH) if b==1: GPIO.output(Motor1['input2'], GPIO.HIGH) if c==1: GPIO.output(Motor2['input1'], GPIO.HIGH) if",
"for i in range(0,15): r = random.randrange(1,1000000)%4 print (' move",
"GPIO.LOW) GPIO.output(Motor2['input2'], GPIO.LOW) if a==1: GPIO.output(Motor1['input1'], GPIO.HIGH) if b==1: GPIO.output(Motor1['input2'],",
"d, x): GPIO.output(Motor1['input1'], GPIO.LOW) GPIO.output(Motor1['input2'], GPIO.LOW) GPIO.output(Motor2['input1'], GPIO.LOW) GPIO.output(Motor2['input2'], GPIO.LOW)",
"{'EN': 27, 'input1': 19, 'input2': 16} Motor2 = {'EN': 22,",
"range(0,15): r = random.randrange(1,1000000)%4 print (' move ',r) changeDegree([pin[r]],[deg[ok[r]][r]]) takePosition()",
"Current[pin[i]] += update elif Current[pin[i]]>newDegree[i]: Current[pin[i]] -= update for i",
"def random1(): r = random.randrange(1,3) if(r==1): changeDegree([0],[20]) changeDegree([0],[0]) changeDegree([3],[50]) changeDegree([9],[100])",
"if(r==1): changeDegree([0],[20]) changeDegree([0],[0]) elif(r==2): changeDegreeGpio([0],[120],5,0.05) changeDegreeGpio([0],[90],5,0.05) else: changeDegreeGpio([0],[60],5,0.05) changeDegreeGpio([0],[90],5,0.05) def",
"def randomCall(t): changeDegree([3,4,5,6,7,8,9,10],[50,110,80,70,100,80,160,20]) pin = [5,6,7,8] deg = [[80,50,100,70],[110,90,110,90]] select",
"readYaml(): with open('{}/src/configuration.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf: servo = yaml.load(conf, Loader=yaml.FullLoader)",
"CurrentGpio[p]=degree[i] writeYaml() def Run(a, b, c, d, x): GPIO.output(Motor1['input1'], GPIO.LOW)",
"changeDegree([0],[20]) changeDegree([0],[0]) elif(r==2): changeDegreeGpio([0],[120],5,0.05) changeDegreeGpio([0],[90],5,0.05) else: changeDegreeGpio([0],[60],5,0.05) changeDegreeGpio([0],[90],5,0.05) def random1():",
"GPIO.LOW) GPIO.output(Motor1['input2'], GPIO.LOW) GPIO.output(Motor2['input1'], GPIO.LOW) GPIO.output(Motor2['input2'], GPIO.LOW) if a==1: GPIO.output(Motor1['input1'],",
"Stop_Slow(a,b,c,d): for i in range(100,0,-20): Run(a,b,c,d,i) time.sleep(0.5) def yes(times=3): for",
"elif(r==2): changeDegree([0],[20]) changeDegree([0],[0]) changeDegree([4],[120]) changeDegree([10],[140]) changeDegree([10],[180]) changeDegree([4],[170]) else: changeDegree([3,4],[50,120]) changeDegree([9,10],[100,140])",
"pinSize = len(pin) for i in range(0,pinSize): maxChange = max(abs(Current[pin[i]]-newDegree[i]),maxChange)",
"(' move ',r) changeDegree([pin[r]],[deg[ok[r]][r]]) takePosition() def randomCall(t): changeDegree([3,4,5,6,7,8,9,10],[50,110,80,70,100,80,160,20]) pin =",
"readYaml() if servo == None: with open('{}/src/configurationBackUp.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf:",
"servo = readYaml() if servo == None: with open('{}/src/configurationBackUp.yaml'.format(ROOT_PATH),'r+', encoding='utf8')",
"else: yaml.dump(s,conf) servo = readYaml() if servo == None: with",
"[0,0,0,0] select = [1,2,0,3,1,0,3,2,1,0,2,3,1,2,3,0,3,1,2,3,1,2,3,0,3,1] for i in range(0,15): r =",
"34,27,4,91,102,893,10283,53,1283,9485,1973,873,1973,0,10973] ok = [0,0,0,0] ln = len(select) for i in",
"Start_Slow(a, b, c, d): for i in range(0,100,20): Run(a,b,c,d,i) time.sleep(0.5)",
"'input1': 19, 'input2': 16} Motor2 = {'EN': 22, 'input1': 26,",
"= select[i%16]%4 changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1 takePosition() def expression(t): print (' i",
"in range(0,times): changeDegree([15],[70],5,0.05) time.sleep(0.2) changeDegree([15],[110],5,0.05) time.sleep(0.2) changeDegree([15],[90],5,0.05) def move_head(times=3): for",
"elif(t==2): random2() elif(t==3): random3() else: randomCall(t) def speakOnline(t): expression(t) def",
"import RPi.GPIO as GPIO from talk import say GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM)",
"ln = len(select) for i in range(0,t*3): r = select[i%16]%4",
"pin = [5,6,7,8] deg = [[80,50,100,70],[110,90,110,90]] select = [89,93,472,347,2, 34,",
"changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1 takePosition() def random3(): changeDegree([3,4],[20,150]) pin = [7,8,9,10] deg",
"hand.servo[pin[i]].angle = Current[pin[i]] servo['Current_Position']['I2C'][pin[i]] = Current[pin[i]] writeYaml() time.sleep(time1) def takePosition():",
"changeDegree([3,4],[50,120]) changeDegree([9,10],[100,140]) changeDegree([9,10],[60,180]) changeDegree([3,4],[0,180]) def random2(): changeDegree([3,4],[20,150]) pin = [7,8,9,10]",
"CurrentGpio[p]>degree[i]: update = -update for deg in range(CurrentGpio[p],degree[i],update): duty =",
"changeDegree([0],[0]) changeDegree([3],[50]) changeDegree([9],[100]) changeDegree([9],[60]) changeDegree([3],[0]) elif(r==2): changeDegree([0],[20]) changeDegree([0],[0]) changeDegree([4],[120]) changeDegree([10],[140])",
"',r) changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1 takePosition() def random3(): changeDegree([3,4],[20,150]) pin = [7,8,9,10]",
"for i in range(0,pinSize): p = pin[i] if CurrentGpio[p]>degree[i]: update",
"CurrentGpio = servo['Current_Position']['Gpio'] GpioPin = servo['Pin']['Gpio'] for i in range(0,6):",
"Loader=yaml.FullLoader) return servo def writeYaml(s=None): with open('{}/src/configuration.yaml'.format(ROOT_PATH),'w', encoding='utf8') as conf:",
"in range(CurrentGpio[p],degree[i],update): duty = deg/18 duty+=2 Servo[p].ChangeDutyCycle(duty) time.sleep(duration) CurrentGpio[p]=degree[i] writeYaml()",
"hand = ServoKit(channels=16) ROOT_PATH = os.path.realpath(os.path.join(__file__, '..', '..')) def readYaml():",
"= [[160,0,60,100],[180,20,100,140]] ok = [0,0,0,0] select = [1,2,0,3,1,0,3,2,1,0,2,3,1,2,3,0,3,1,2,3,1,2,3,0,3,1] for i",
"in range(0,t*3): r = select[i%16]%4 changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1 takePosition() def expression(t):",
"'..')) def readYaml(): with open('{}/src/configuration.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf: servo =",
"encoding='utf8') as conf: if s==None: yaml.dump(servo,conf) else: yaml.dump(s,conf) servo =",
"r = random.randrange(1,10000000)%3 if(r==1): changeDegree([0],[20]) changeDegree([0],[0]) elif(r==2): changeDegreeGpio([0],[120],5,0.05) changeDegreeGpio([0],[90],5,0.05) else:",
"value of t is : ',t) if(t==0): random0() elif(t==1): random1()",
"for i in range(100,0,-20): Run(a,b,c,d,i) time.sleep(0.5) def yes(times=3): for i",
"in range(0,times): changeDegree([0],[30]) time.sleep(0.08) changeDegree([0],[0]) time.sleep(0.08) def no(times=3): for i",
"return servo def writeYaml(s=None): with open('{}/src/configuration.yaml'.format(ROOT_PATH),'w', encoding='utf8') as conf: if",
"print (' i got value of t is : ',t)",
"22, 'input1': 26, 'input2': 20} for x in Motor1: GPIO.setup(Motor1[x],",
"duty+=2 Servo[p].ChangeDutyCycle(duty) time.sleep(duration) CurrentGpio[p]=degree[i] writeYaml() def Run(a, b, c, d,",
"def writeYaml(s=None): with open('{}/src/configuration.yaml'.format(ROOT_PATH),'w', encoding='utf8') as conf: if s==None: yaml.dump(servo,conf)",
"changeDegreeGpio([0],[120],5,0.05) changeDegreeGpio([0],[90],5,0.05) else: changeDegreeGpio([0],[60],5,0.05) changeDegreeGpio([0],[90],5,0.05) def random1(): r = random.randrange(1,3)",
"import os import os.path import yaml import time import random",
"GPIO.OUT) EN1 = GPIO.PWM(Motor1['EN'], 100) EN2 = GPIO.PWM(Motor2['EN'], 100) EN1.start(0)",
"else: changeDegree([3,4],[50,120]) changeDegree([9,10],[100,140]) changeDegree([9,10],[60,180]) changeDegree([3,4],[0,180]) def random2(): changeDegree([3,4],[20,150]) pin =",
"ok = [0,0,0,0] select = [1,2,0,3,1,0,3,2,1,0,2,3,1,2,3,0,3,1,2,3,1,2,3,0,3,1] for i in range(0,15):",
"i in range(0,6): GPIO.setup(GpioPin[i], GPIO.OUT) Servo = [] for i",
"GPIO.LOW) if a==1: GPIO.output(Motor1['input1'], GPIO.HIGH) if b==1: GPIO.output(Motor1['input2'], GPIO.HIGH) if",
"writeYaml(servoBackUp) servo = readYaml() if servo == None: print('close') exit()",
"= random.randrange(1,3) if(r==1): changeDegree([0],[20]) changeDegree([0],[0]) changeDegree([3],[50]) changeDegree([9],[100]) changeDegree([9],[60]) changeDegree([3],[0]) elif(r==2):",
"changeDegree([3],[50]) changeDegree([9],[100]) changeDegree([9],[60]) changeDegree([3],[0]) elif(r==2): changeDegree([0],[20]) changeDegree([0],[0]) changeDegree([4],[120]) changeDegree([10],[140]) changeDegree([10],[180])",
"GPIO.PWM(Motor1['EN'], 100) EN2 = GPIO.PWM(Motor2['EN'], 100) EN1.start(0) EN2.start(0) hand =",
"import random import multiprocessing import RPi.GPIO as GPIO from talk",
"print (' move ',r) changeDegree([pin[r]],[deg[ok[r]][r]]) ok[r]^=1 takePosition() def random3(): changeDegree([3,4],[20,150])",
"yaml.dump(s,conf) servo = readYaml() if servo == None: with open('{}/src/configurationBackUp.yaml'.format(ROOT_PATH),'r+',",
"Servo.append(GPIO.PWM(GpioPin[i],50)) Servo[i].start(0) def changeDegree(pin,newDegree,time1=0.05,update=5): maxChange = 0 pinSize = len(pin)",
"= servo['Initial_Position']['I2C'] Current = servo['Current_Position']['I2C'] InitialGpio = servo['Initial_Position']['Gpio'] CurrentGpio =",
"random1(): r = random.randrange(1,3) if(r==1): changeDegree([0],[20]) changeDegree([0],[0]) changeDegree([3],[50]) changeDegree([9],[100]) changeDegree([9],[60])",
"writeYaml() def Run(a, b, c, d, x): GPIO.output(Motor1['input1'], GPIO.LOW) GPIO.output(Motor1['input2'],",
"= readYaml() if servo == None: print('close') exit() Initial =",
"= Current[pin[i]] writeYaml() time.sleep(time1) def takePosition(): changeDegree([7,8],[180,0]) changeDegree([0,1,2,3,4,5,6,7,8,9,10,11],[0,50,130,0,170,170,0,180,0,60,150,0]) def changeDegreeGpio(pin,degree,update,duration):",
"[1,2,0,3,1,0,3,2,1,0,2,3,1,2,3,0,3,1,2,3,1,2,3,0,3,1] for i in range(0,15): r = select[i%len(select)]%4 print ('",
"max(abs(Current[pin[i]]-newDegree[i]),maxChange) for deg in range(0,maxChange,update): for i in range(0,pinSize): if",
"conf: if s==None: yaml.dump(servo,conf) else: yaml.dump(s,conf) servo = readYaml() if",
"changeDegree([3,4],[20,150]) pin = [7,8,9,10] deg = [[160,0,60,100],[180,20,100,140]] ok = [0,0,0,0]",
"servo['Current_Position']['Gpio'] GpioPin = servo['Pin']['Gpio'] for i in range(0,6): GPIO.setup(GpioPin[i], GPIO.OUT)",
"EN1.start(0) EN2.start(0) hand = ServoKit(channels=16) ROOT_PATH = os.path.realpath(os.path.join(__file__, '..', '..'))",
"= len(pin) for i in range(0,pinSize): p = pin[i] if",
"c, d): for i in range(0,100,20): Run(a,b,c,d,i) time.sleep(0.5) def Stop_Slow(a,b,c,d):",
"GPIO.output(Motor1['input2'], GPIO.LOW) GPIO.output(Motor2['input1'], GPIO.LOW) GPIO.output(Motor2['input2'], GPIO.LOW) if a==1: GPIO.output(Motor1['input1'], GPIO.HIGH)",
"= [5,6,7,8] deg = [[80,50,100,70],[110,90,110,90]] select = [89,93,472,347,2, 34, 134,",
"GPIO.setmode(GPIO.BCM) from adafruit_servokit import ServoKit Motor1 = {'EN': 27, 'input1':",
"d): for i in range(0,100,20): Run(a,b,c,d,i) time.sleep(0.5) def Stop_Slow(a,b,c,d): for"
] |
[
"session = object_session(self) return session.query( mother_tongue_association_table).filter_by(lang_id=self.id).count() @property def deletable(self): return",
") mother_tongue_association_table = Table( 'mother_tongue_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'),",
"Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)",
"= object_session(self) return session.query( mother_tongue_association_table).filter_by(lang_id=self.id).count() @property def deletable(self): return (",
"ForeignKey('languages.id'), nullable=False) ) mother_tongue_association_table = Table( 'mother_tongue_association', Base.metadata, Column( 'translator_id',",
"Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) class Language(Base): __tablename__ = 'languages'",
"nullable=False) ) mother_tongue_association_table = Table( 'mother_tongue_association', Base.metadata, Column( 'translator_id', UUID,",
"ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) written_association_table = Table(",
"onegov.core.orm.types import UUID spoken_association_table = Table( 'spoken_lang_association', Base.metadata, Column( 'translator_id',",
"sqlalchemy.orm import object_session from onegov.core.orm import Base from onegov.core.orm.types import",
"mother_tongue_association_table = Table( 'mother_tongue_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'), nullable=False),",
"return session.query( written_association_table).filter_by(lang_id=self.id).count() @property def native_speakers_count(self): \"\"\"Having it as mother",
") written_association_table = Table( 'written_lang_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'),",
"session.query( mother_tongue_association_table).filter_by(lang_id=self.id).count() @property def deletable(self): return ( self.speakers_count + self.writers_count",
"Column, Text, Table, ForeignKey from sqlalchemy.orm import object_session from onegov.core.orm",
"@property def deletable(self): return ( self.speakers_count + self.writers_count + self.native_speakers_count",
") id = Column(UUID, primary_key=True, default=uuid4) name = Column(Text, nullable=False)",
"spoken_association_table).filter_by(lang_id=self.id).count() @property def writers_count(self): session = object_session(self) return session.query( written_association_table).filter_by(lang_id=self.id).count()",
"__table_args__ = ( Index('unique_name', 'name', unique=True), ) id = Column(UUID,",
"from sqlalchemy import Index, Column, Text, Table, ForeignKey from sqlalchemy.orm",
"session.query( spoken_association_table).filter_by(lang_id=self.id).count() @property def writers_count(self): session = object_session(self) return session.query(",
"( Index('unique_name', 'name', unique=True), ) id = Column(UUID, primary_key=True, default=uuid4)",
"default=uuid4) name = Column(Text, nullable=False) @property def speakers_count(self): session =",
"tongue...\"\"\" session = object_session(self) return session.query( mother_tongue_association_table).filter_by(lang_id=self.id).count() @property def deletable(self):",
"sqlalchemy import Index, Column, Text, Table, ForeignKey from sqlalchemy.orm import",
"@property def native_speakers_count(self): \"\"\"Having it as mother tongue...\"\"\" session =",
"UUID, ForeignKey('languages.id'), nullable=False) ) class Language(Base): __tablename__ = 'languages' __table_args__",
"return session.query( spoken_association_table).filter_by(lang_id=self.id).count() @property def writers_count(self): session = object_session(self) return",
"class Language(Base): __tablename__ = 'languages' __table_args__ = ( Index('unique_name', 'name',",
"from onegov.core.orm.types import UUID spoken_association_table = Table( 'spoken_lang_association', Base.metadata, Column(",
"@property def writers_count(self): session = object_session(self) return session.query( written_association_table).filter_by(lang_id=self.id).count() @property",
"nullable=False) ) class Language(Base): __tablename__ = 'languages' __table_args__ = (",
"UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) class Language(Base):",
"def speakers_count(self): session = object_session(self) return session.query( spoken_association_table).filter_by(lang_id=self.id).count() @property def",
"mother tongue...\"\"\" session = object_session(self) return session.query( mother_tongue_association_table).filter_by(lang_id=self.id).count() @property def",
"name = Column(Text, nullable=False) @property def speakers_count(self): session = object_session(self)",
"UUID, ForeignKey('languages.id'), nullable=False) ) written_association_table = Table( 'written_lang_association', Base.metadata, Column(",
"from sqlalchemy.orm import object_session from onegov.core.orm import Base from onegov.core.orm.types",
"Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) mother_tongue_association_table = Table( 'mother_tongue_association', Base.metadata,",
"UUID spoken_association_table = Table( 'spoken_lang_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'),",
"onegov.core.orm import Base from onegov.core.orm.types import UUID spoken_association_table = Table(",
"Index, Column, Text, Table, ForeignKey from sqlalchemy.orm import object_session from",
"'mother_tongue_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'),",
"= Column(UUID, primary_key=True, default=uuid4) name = Column(Text, nullable=False) @property def",
"Base from onegov.core.orm.types import UUID spoken_association_table = Table( 'spoken_lang_association', Base.metadata,",
"Column(Text, nullable=False) @property def speakers_count(self): session = object_session(self) return session.query(",
"def native_speakers_count(self): \"\"\"Having it as mother tongue...\"\"\" session = object_session(self)",
"object_session(self) return session.query( mother_tongue_association_table).filter_by(lang_id=self.id).count() @property def deletable(self): return ( self.speakers_count",
"'translator_id', UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) class",
"deletable(self): return ( self.speakers_count + self.writers_count + self.native_speakers_count ) ==",
"nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) class Language(Base): __tablename__ =",
"import uuid4 from sqlalchemy import Index, Column, Text, Table, ForeignKey",
"session = object_session(self) return session.query( written_association_table).filter_by(lang_id=self.id).count() @property def native_speakers_count(self): \"\"\"Having",
"__tablename__ = 'languages' __table_args__ = ( Index('unique_name', 'name', unique=True), )",
"ForeignKey('languages.id'), nullable=False) ) class Language(Base): __tablename__ = 'languages' __table_args__ =",
"session = object_session(self) return session.query( spoken_association_table).filter_by(lang_id=self.id).count() @property def writers_count(self): session",
"return session.query( mother_tongue_association_table).filter_by(lang_id=self.id).count() @property def deletable(self): return ( self.speakers_count +",
"object_session(self) return session.query( written_association_table).filter_by(lang_id=self.id).count() @property def native_speakers_count(self): \"\"\"Having it as",
"= object_session(self) return session.query( spoken_association_table).filter_by(lang_id=self.id).count() @property def writers_count(self): session =",
"'spoken_lang_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'),",
"return ( self.speakers_count + self.writers_count + self.native_speakers_count ) == 0",
"ForeignKey('languages.id'), nullable=False) ) written_association_table = Table( 'written_lang_association', Base.metadata, Column( 'translator_id',",
"Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) written_association_table = Table( 'written_lang_association', Base.metadata,",
"primary_key=True, default=uuid4) name = Column(Text, nullable=False) @property def speakers_count(self): session",
"= Column(Text, nullable=False) @property def speakers_count(self): session = object_session(self) return",
"'written_lang_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'),",
"ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) class Language(Base): __tablename__",
"nullable=False) ) written_association_table = Table( 'written_lang_association', Base.metadata, Column( 'translator_id', UUID,",
"'languages' __table_args__ = ( Index('unique_name', 'name', unique=True), ) id =",
"written_association_table = Table( 'written_lang_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'), nullable=False),",
"def deletable(self): return ( self.speakers_count + self.writers_count + self.native_speakers_count )",
"Table( 'spoken_lang_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID,",
"speakers_count(self): session = object_session(self) return session.query( spoken_association_table).filter_by(lang_id=self.id).count() @property def writers_count(self):",
"'translator_id', UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) written_association_table",
"\"\"\"Having it as mother tongue...\"\"\" session = object_session(self) return session.query(",
"it as mother tongue...\"\"\" session = object_session(self) return session.query( mother_tongue_association_table).filter_by(lang_id=self.id).count()",
"Table, ForeignKey from sqlalchemy.orm import object_session from onegov.core.orm import Base",
"UUID, ForeignKey('languages.id'), nullable=False) ) mother_tongue_association_table = Table( 'mother_tongue_association', Base.metadata, Column(",
"uuid4 from sqlalchemy import Index, Column, Text, Table, ForeignKey from",
"spoken_association_table = Table( 'spoken_lang_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'), nullable=False),",
"Column(UUID, primary_key=True, default=uuid4) name = Column(Text, nullable=False) @property def speakers_count(self):",
"= object_session(self) return session.query( written_association_table).filter_by(lang_id=self.id).count() @property def native_speakers_count(self): \"\"\"Having it",
"= Table( 'spoken_lang_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id',",
"import Base from onegov.core.orm.types import UUID spoken_association_table = Table( 'spoken_lang_association',",
"Table( 'written_lang_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID,",
"'name', unique=True), ) id = Column(UUID, primary_key=True, default=uuid4) name =",
"import UUID spoken_association_table = Table( 'spoken_lang_association', Base.metadata, Column( 'translator_id', UUID,",
"unique=True), ) id = Column(UUID, primary_key=True, default=uuid4) name = Column(Text,",
"UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) mother_tongue_association_table =",
"nullable=False) @property def speakers_count(self): session = object_session(self) return session.query( spoken_association_table).filter_by(lang_id=self.id).count()",
"from uuid import uuid4 from sqlalchemy import Index, Column, Text,",
"session.query( written_association_table).filter_by(lang_id=self.id).count() @property def native_speakers_count(self): \"\"\"Having it as mother tongue...\"\"\"",
"Column( 'translator_id', UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) )",
"Language(Base): __tablename__ = 'languages' __table_args__ = ( Index('unique_name', 'name', unique=True),",
"object_session from onegov.core.orm import Base from onegov.core.orm.types import UUID spoken_association_table",
"uuid import uuid4 from sqlalchemy import Index, Column, Text, Table,",
"import object_session from onegov.core.orm import Base from onegov.core.orm.types import UUID",
"ForeignKey from sqlalchemy.orm import object_session from onegov.core.orm import Base from",
"import Index, Column, Text, Table, ForeignKey from sqlalchemy.orm import object_session",
"ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) mother_tongue_association_table = Table(",
"native_speakers_count(self): \"\"\"Having it as mother tongue...\"\"\" session = object_session(self) return",
"Index('unique_name', 'name', unique=True), ) id = Column(UUID, primary_key=True, default=uuid4) name",
"@property def speakers_count(self): session = object_session(self) return session.query( spoken_association_table).filter_by(lang_id=self.id).count() @property",
"= ( Index('unique_name', 'name', unique=True), ) id = Column(UUID, primary_key=True,",
"Table( 'mother_tongue_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID,",
"nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) written_association_table = Table( 'written_lang_association',",
"mother_tongue_association_table).filter_by(lang_id=self.id).count() @property def deletable(self): return ( self.speakers_count + self.writers_count +",
"UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) written_association_table =",
"id = Column(UUID, primary_key=True, default=uuid4) name = Column(Text, nullable=False) @property",
"def writers_count(self): session = object_session(self) return session.query( written_association_table).filter_by(lang_id=self.id).count() @property def",
"object_session(self) return session.query( spoken_association_table).filter_by(lang_id=self.id).count() @property def writers_count(self): session = object_session(self)",
"nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) mother_tongue_association_table = Table( 'mother_tongue_association',",
") class Language(Base): __tablename__ = 'languages' __table_args__ = ( Index('unique_name',",
"= 'languages' __table_args__ = ( Index('unique_name', 'name', unique=True), ) id",
"= Table( 'written_lang_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id',",
"written_association_table).filter_by(lang_id=self.id).count() @property def native_speakers_count(self): \"\"\"Having it as mother tongue...\"\"\" session",
"Text, Table, ForeignKey from sqlalchemy.orm import object_session from onegov.core.orm import",
"'translator_id', UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False) ) mother_tongue_association_table",
"= Table( 'mother_tongue_association', Base.metadata, Column( 'translator_id', UUID, ForeignKey('translators.id'), nullable=False), Column('lang_id',",
"writers_count(self): session = object_session(self) return session.query( written_association_table).filter_by(lang_id=self.id).count() @property def native_speakers_count(self):",
"from onegov.core.orm import Base from onegov.core.orm.types import UUID spoken_association_table =",
"as mother tongue...\"\"\" session = object_session(self) return session.query( mother_tongue_association_table).filter_by(lang_id=self.id).count() @property"
] |
[
"graph def by removing the skipped nodes and clean up",
"map.\" % name) return node_map[stripped_name] def values_from_const(node_def): \"\"\"Extracts the values",
"2.0 (the \"License\"); # you may not use this file",
"enumerate(new_node.input): if input_node == value.name: new_node.input[i] = value.input[0] result_graph_def.node.extend([new_node]) result_graph_def.library.CopyFrom(input_graph_def.library)",
"be skipped. inputs_to_remove: List of nodes to be removed from",
"removed. Args: input_graph_def: GraphDef object to be cleaned. node_to_skip: Dict",
"result_graph_def = graph_pb2.GraphDef() for node in input_graph_def.node: if node.name in",
"scale_after_normalization(node): if node.op == \"BatchNormWithGlobalNormalization\": return node.attr[\"scale_after_normalization\"].b return True def",
"entry indexed by name for every node. name: Identifies the",
"\"\"\" result_graph_def = graph_pb2.GraphDef() for node in input_graph_def.node: if node.name",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"in nodes_to_skip: continue new_node = node_def_pb2.NodeDef() new_node.CopyFrom(node) for value in",
"indexed by name for every node. name: Identifies the node",
"Const NodeDef that has the values we want to access.",
"node_from_map(node_map, name): \"\"\"Pulls a node def from a dictionary for",
"as a numpy ndarray. Args: node_def: Const NodeDef that has",
"present in the dictionary. \"\"\" stripped_name = node_name_from_input(name) if stripped_name",
"= node_name[1:] m = re.search(r\"(.*):\\d+$\", node_name) if m: node_name =",
"use this file except in compliance with the License. #",
"# Whether to scale by gamma after normalization. def scale_after_normalization(node):",
"def node_from_map(node_map, name): \"\"\"Pulls a node def from a dictionary",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"values from a const NodeDef as a numpy ndarray. Args:",
"License. # You may obtain a copy of the License",
"the values. Raises: ValueError: If the node isn't a Const.",
"nodes to be removed from inputs of all nodes. Returns:",
"inputs of all nodes. Returns: GraphDef that has been cleaned.",
"List of nodes to be removed from inputs of all",
"to access. Returns: Numpy ndarray containing the values. Raises: ValueError:",
"under the License is distributed on an \"AS IS\" BASIS,",
"# Custom op name for fused depthwise conv2d FUSED_DEPTHWISE_CONV2D =",
"tensor_value # Whether to scale by gamma after normalization. def",
"License for the specific language governing permissions and # limitations",
"up the nodes with inputs that have been removed. Args:",
"by gamma after normalization. def scale_after_normalization(node): if node.op == \"BatchNormWithGlobalNormalization\":",
"FUSED_DEPTHWISE_CONV2D = 'FusedDepthwiseConv2dNative' # The grappler op name for fused",
"Returns: Numpy ndarray containing the values. Raises: ValueError: If the",
"tensor_value = tensor_util.MakeNdarray(input_tensor) return tensor_value # Whether to scale by",
"input_tensor = node_def.attr[\"value\"].tensor tensor_value = tensor_util.MakeNdarray(input_tensor) return tensor_value # Whether",
"if node_name.startswith(\"^\"): node_name = node_name[1:] m = re.search(r\"(.*):\\d+$\", node_name) if",
"'%s' found in map.\" % name) return node_map[stripped_name] def values_from_const(node_def):",
"NodeDef as a numpy ndarray. Args: node_def: Const NodeDef that",
"governing permissions and # limitations under the License. # ==============================================================================",
"graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.python.framework import tensor_util #",
"limitations under the License. # ============================================================================== import re from tensorflow.core.framework",
"== value.name: new_node.input[i] = value.input[0] result_graph_def.node.extend([new_node]) result_graph_def.library.CopyFrom(input_graph_def.library) result_graph_def.versions.CopyFrom(input_graph_def.versions) return result_graph_def",
"with '_' FUSED_MATMUL = '_FusedMatMul' def node_from_map(node_map, name): \"\"\"Pulls a",
"if input_node == value.name: new_node.input[i] = value.input[0] result_graph_def.node.extend([new_node]) result_graph_def.library.CopyFrom(input_graph_def.library) result_graph_def.versions.CopyFrom(input_graph_def.versions)",
"node isn't present in the dictionary. \"\"\" stripped_name = node_name_from_input(name)",
"node_map[stripped_name] def values_from_const(node_def): \"\"\"Extracts the values from a const NodeDef",
"and other decorations to get the underlying node name.\"\"\" if",
"name) return node_map[stripped_name] def values_from_const(node_def): \"\"\"Extracts the values from a",
"node_name.startswith(\"^\"): node_name = node_name[1:] m = re.search(r\"(.*):\\d+$\", node_name) if m:",
"inputs_to_remove: List of nodes to be removed from inputs of",
"def node_name_from_input(node_name): \"\"\"Strips off ports and other decorations to get",
"GraphDef object to be cleaned. node_to_skip: Dict with node names",
"tensorflow.python.framework import tensor_util # Custom op name for fused depthwise",
"in compliance with the License. # You may obtain a",
"off ports and other decorations to get the underlying node",
"the skipped nodes and clean up the nodes with inputs",
"values. Raises: ValueError: If the node isn't a Const. \"\"\"",
"software # distributed under the License is distributed on an",
"node_name = node_name[1:] m = re.search(r\"(.*):\\d+$\", node_name) if m: node_name",
"cleaned. \"\"\" result_graph_def = graph_pb2.GraphDef() for node in input_graph_def.node: if",
"be a Const op for values_from_const.\" % node_def.name) input_tensor =",
"\"\"\"Strips off ports and other decorations to get the underlying",
"with inputs that have been removed. Args: input_graph_def: GraphDef object",
"the underlying node name.\"\"\" if node_name.startswith(\"^\"): node_name = node_name[1:] m",
"ports and other decorations to get the underlying node name.\"\"\"",
"re.search(r\"(.*):\\d+$\", node_name) if m: node_name = m.group(1) return node_name def",
"if node.name in nodes_to_skip: continue new_node = node_def_pb2.NodeDef() new_node.CopyFrom(node) for",
"nodes_to_skip: continue new_node = node_def_pb2.NodeDef() new_node.CopyFrom(node) for value in inputs_to_remove:",
"input_graph_def.node: if node.name in nodes_to_skip: continue new_node = node_def_pb2.NodeDef() new_node.CopyFrom(node)",
"been removed. Args: input_graph_def: GraphDef object to be cleaned. node_to_skip:",
"node_name) if m: node_name = m.group(1) return node_name def cleanup_graph_def(input_graph_def,",
"OF ANY KIND, either express or implied. # See the",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"ANY KIND, either express or implied. # See the License",
"See the License for the specific language governing permissions and",
"# ============================================================================== import re from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework",
"the License. # You may obtain a copy of the",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"for the specific language governing permissions and # limitations under",
"to in writing, software # distributed under the License is",
"# See the License for the specific language governing permissions",
"node_name def cleanup_graph_def(input_graph_def, nodes_to_skip, inputs_to_remove): \"\"\"Clean up the graph def",
"name for fused MatMul which starts with '_' FUSED_MATMUL =",
"cleaned. node_to_skip: Dict with node names to be skipped. inputs_to_remove:",
"language governing permissions and # limitations under the License. #",
"or agreed to in writing, software # distributed under the",
"required by applicable law or agreed to in writing, software",
"tensorflow.core.framework import node_def_pb2 from tensorflow.python.framework import tensor_util # Custom op",
"in the dictionary. \"\"\" stripped_name = node_name_from_input(name) if stripped_name not",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"with the License. # You may obtain a copy of",
"all nodes. Returns: GraphDef that has been cleaned. \"\"\" result_graph_def",
"== \"BatchNormWithGlobalNormalization\": return node.attr[\"scale_after_normalization\"].b return True def node_name_from_input(node_name): \"\"\"Strips off",
"LLC # # Licensed under the Apache License, Version 2.0",
"node name.\"\"\" if node_name.startswith(\"^\"): node_name = node_name[1:] m = re.search(r\"(.*):\\d+$\",",
"compliance with the License. # You may obtain a copy",
"conv2d FUSED_DEPTHWISE_CONV2D = 'FusedDepthwiseConv2dNative' # The grappler op name for",
"agreed to in writing, software # distributed under the License",
"License. # ============================================================================== import re from tensorflow.core.framework import graph_pb2 from",
"node with the given name. Raises: ValueError: If the node",
"distributed under the License is distributed on an \"AS IS\"",
"we want to find. Returns: NodeDef of the node with",
"'_' FUSED_MATMUL = '_FusedMatMul' def node_from_map(node_map, name): \"\"\"Pulls a node",
"node_def_pb2 from tensorflow.python.framework import tensor_util # Custom op name for",
"values we want to access. Returns: Numpy ndarray containing the",
"been cleaned. \"\"\" result_graph_def = graph_pb2.GraphDef() for node in input_graph_def.node:",
"express or implied. # See the License for the specific",
"except in compliance with the License. # You may obtain",
"Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"values_from_const(node_def): \"\"\"Extracts the values from a const NodeDef as a",
"not use this file except in compliance with the License.",
"import tensor_util # Custom op name for fused depthwise conv2d",
"Dictionary containing an entry indexed by name for every node.",
"containing the values. Raises: ValueError: If the node isn't a",
"if node_def.op != \"Const\": raise ValueError( \"Node named '%s' should",
"writing, software # distributed under the License is distributed on",
"node_name = m.group(1) return node_name def cleanup_graph_def(input_graph_def, nodes_to_skip, inputs_to_remove): \"\"\"Clean",
"you may not use this file except in compliance with",
"depthwise conv2d FUSED_DEPTHWISE_CONV2D = 'FusedDepthwiseConv2dNative' # The grappler op name",
"a Const. \"\"\" if node_def.op != \"Const\": raise ValueError( \"Node",
"# Licensed under the Apache License, Version 2.0 (the \"License\");",
"the node isn't present in the dictionary. \"\"\" stripped_name =",
"node isn't a Const. \"\"\" if node_def.op != \"Const\": raise",
"% name) return node_map[stripped_name] def values_from_const(node_def): \"\"\"Extracts the values from",
"# The grappler op name for fused MatMul which starts",
"return node.attr[\"scale_after_normalization\"].b return True def node_name_from_input(node_name): \"\"\"Strips off ports and",
"= re.search(r\"(.*):\\d+$\", node_name) if m: node_name = m.group(1) return node_name",
"if node.op == \"BatchNormWithGlobalNormalization\": return node.attr[\"scale_after_normalization\"].b return True def node_name_from_input(node_name):",
"If the node isn't present in the dictionary. \"\"\" stripped_name",
"normalization. def scale_after_normalization(node): if node.op == \"BatchNormWithGlobalNormalization\": return node.attr[\"scale_after_normalization\"].b return",
"name): \"\"\"Pulls a node def from a dictionary for a",
"removing the skipped nodes and clean up the nodes with",
"CONDITIONS OF ANY KIND, either express or implied. # See",
"input_graph_def: GraphDef object to be cleaned. node_to_skip: Dict with node",
"return True def node_name_from_input(node_name): \"\"\"Strips off ports and other decorations",
"return tensor_value # Whether to scale by gamma after normalization.",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"the graph def by removing the skipped nodes and clean",
"node.attr[\"scale_after_normalization\"].b return True def node_name_from_input(node_name): \"\"\"Strips off ports and other",
"raise ValueError(\"No node named '%s' found in map.\" % name)",
"from tensorflow.core.framework import node_def_pb2 from tensorflow.python.framework import tensor_util # Custom",
"from inputs of all nodes. Returns: GraphDef that has been",
"a dictionary for a given name. Args: node_map: Dictionary containing",
"to get the underlying node name.\"\"\" if node_name.startswith(\"^\"): node_name =",
"input_node == value.name: new_node.input[i] = value.input[0] result_graph_def.node.extend([new_node]) result_graph_def.library.CopyFrom(input_graph_def.library) result_graph_def.versions.CopyFrom(input_graph_def.versions) return",
"skipped. inputs_to_remove: List of nodes to be removed from inputs",
"names to be skipped. inputs_to_remove: List of nodes to be",
"2019 Google LLC # # Licensed under the Apache License,",
"Dict with node names to be skipped. inputs_to_remove: List of",
"removed from inputs of all nodes. Returns: GraphDef that has",
"import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.python.framework import tensor_util",
"OR CONDITIONS OF ANY KIND, either express or implied. #",
"the License is distributed on an \"AS IS\" BASIS, #",
"node_def: Const NodeDef that has the values we want to",
"every node. name: Identifies the node we want to find.",
"in node_map: raise ValueError(\"No node named '%s' found in map.\"",
"new_node.CopyFrom(node) for value in inputs_to_remove: for i, input_node in enumerate(new_node.input):",
"node def from a dictionary for a given name. Args:",
"const NodeDef as a numpy ndarray. Args: node_def: Const NodeDef",
"node we want to find. Returns: NodeDef of the node",
"name.\"\"\" if node_name.startswith(\"^\"): node_name = node_name[1:] m = re.search(r\"(.*):\\d+$\", node_name)",
"fused MatMul which starts with '_' FUSED_MATMUL = '_FusedMatMul' def",
"that have been removed. Args: input_graph_def: GraphDef object to be",
"Raises: ValueError: If the node isn't present in the dictionary.",
"Whether to scale by gamma after normalization. def scale_after_normalization(node): if",
"by name for every node. name: Identifies the node we",
"law or agreed to in writing, software # distributed under",
"the values we want to access. Returns: Numpy ndarray containing",
"Identifies the node we want to find. Returns: NodeDef of",
"the node with the given name. Raises: ValueError: If the",
"= '_FusedMatMul' def node_from_map(node_map, name): \"\"\"Pulls a node def from",
"node_name_from_input(node_name): \"\"\"Strips off ports and other decorations to get the",
"want to find. Returns: NodeDef of the node with the",
"if m: node_name = m.group(1) return node_name def cleanup_graph_def(input_graph_def, nodes_to_skip,",
"value in inputs_to_remove: for i, input_node in enumerate(new_node.input): if input_node",
"graph_pb2.GraphDef() for node in input_graph_def.node: if node.name in nodes_to_skip: continue",
"i, input_node in enumerate(new_node.input): if input_node == value.name: new_node.input[i] =",
"inputs_to_remove: for i, input_node in enumerate(new_node.input): if input_node == value.name:",
"of the node with the given name. Raises: ValueError: If",
"for a given name. Args: node_map: Dictionary containing an entry",
"nodes. Returns: GraphDef that has been cleaned. \"\"\" result_graph_def =",
"starts with '_' FUSED_MATMUL = '_FusedMatMul' def node_from_map(node_map, name): \"\"\"Pulls",
"node_def_pb2.NodeDef() new_node.CopyFrom(node) for value in inputs_to_remove: for i, input_node in",
"op for values_from_const.\" % node_def.name) input_tensor = node_def.attr[\"value\"].tensor tensor_value =",
"may obtain a copy of the License at # #",
"if stripped_name not in node_map: raise ValueError(\"No node named '%s'",
"a numpy ndarray. Args: node_def: Const NodeDef that has the",
"for values_from_const.\" % node_def.name) input_tensor = node_def.attr[\"value\"].tensor tensor_value = tensor_util.MakeNdarray(input_tensor)",
"\"\"\"Extracts the values from a const NodeDef as a numpy",
"'%s' should be a Const op for values_from_const.\" % node_def.name)",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"def by removing the skipped nodes and clean up the",
"containing an entry indexed by name for every node. name:",
"name. Raises: ValueError: If the node isn't present in the",
"may not use this file except in compliance with the",
"Custom op name for fused depthwise conv2d FUSED_DEPTHWISE_CONV2D = 'FusedDepthwiseConv2dNative'",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"this file except in compliance with the License. # You",
"def from a dictionary for a given name. Args: node_map:",
"'_FusedMatMul' def node_from_map(node_map, name): \"\"\"Pulls a node def from a",
"m.group(1) return node_name def cleanup_graph_def(input_graph_def, nodes_to_skip, inputs_to_remove): \"\"\"Clean up the",
"isn't a Const. \"\"\" if node_def.op != \"Const\": raise ValueError(",
"not in node_map: raise ValueError(\"No node named '%s' found in",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"named '%s' found in map.\" % name) return node_map[stripped_name] def",
"# # Licensed under the Apache License, Version 2.0 (the",
"file except in compliance with the License. # You may",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"dictionary. \"\"\" stripped_name = node_name_from_input(name) if stripped_name not in node_map:",
"node names to be skipped. inputs_to_remove: List of nodes to",
"for node in input_graph_def.node: if node.name in nodes_to_skip: continue new_node",
"underlying node name.\"\"\" if node_name.startswith(\"^\"): node_name = node_name[1:] m =",
"a Const op for values_from_const.\" % node_def.name) input_tensor = node_def.attr[\"value\"].tensor",
"node.op == \"BatchNormWithGlobalNormalization\": return node.attr[\"scale_after_normalization\"].b return True def node_name_from_input(node_name): \"\"\"Strips",
"node_to_skip: Dict with node names to be skipped. inputs_to_remove: List",
"= node_name_from_input(name) if stripped_name not in node_map: raise ValueError(\"No node",
"from a const NodeDef as a numpy ndarray. Args: node_def:",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"tensor_util.MakeNdarray(input_tensor) return tensor_value # Whether to scale by gamma after",
"\"Const\": raise ValueError( \"Node named '%s' should be a Const",
"node in input_graph_def.node: if node.name in nodes_to_skip: continue new_node =",
"scale by gamma after normalization. def scale_after_normalization(node): if node.op ==",
"name. Args: node_map: Dictionary containing an entry indexed by name",
"MatMul which starts with '_' FUSED_MATMUL = '_FusedMatMul' def node_from_map(node_map,",
"node named '%s' found in map.\" % name) return node_map[stripped_name]",
"\"BatchNormWithGlobalNormalization\": return node.attr[\"scale_after_normalization\"].b return True def node_name_from_input(node_name): \"\"\"Strips off ports",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"True def node_name_from_input(node_name): \"\"\"Strips off ports and other decorations to",
"or implied. # See the License for the specific language",
"stripped_name = node_name_from_input(name) if stripped_name not in node_map: raise ValueError(\"No",
"def cleanup_graph_def(input_graph_def, nodes_to_skip, inputs_to_remove): \"\"\"Clean up the graph def by",
"Const. \"\"\" if node_def.op != \"Const\": raise ValueError( \"Node named",
"\"Node named '%s' should be a Const op for values_from_const.\"",
"have been removed. Args: input_graph_def: GraphDef object to be cleaned.",
"KIND, either express or implied. # See the License for",
"specific language governing permissions and # limitations under the License.",
"'FusedDepthwiseConv2dNative' # The grappler op name for fused MatMul which",
"node_name[1:] m = re.search(r\"(.*):\\d+$\", node_name) if m: node_name = m.group(1)",
"the node isn't a Const. \"\"\" if node_def.op != \"Const\":",
"fused depthwise conv2d FUSED_DEPTHWISE_CONV2D = 'FusedDepthwiseConv2dNative' # The grappler op",
"with node names to be skipped. inputs_to_remove: List of nodes",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"should be a Const op for values_from_const.\" % node_def.name) input_tensor",
"nodes_to_skip, inputs_to_remove): \"\"\"Clean up the graph def by removing the",
"return node_map[stripped_name] def values_from_const(node_def): \"\"\"Extracts the values from a const",
"permissions and # limitations under the License. # ============================================================================== import",
"in inputs_to_remove: for i, input_node in enumerate(new_node.input): if input_node ==",
"in enumerate(new_node.input): if input_node == value.name: new_node.input[i] = value.input[0] result_graph_def.node.extend([new_node])",
"(the \"License\"); # you may not use this file except",
"# you may not use this file except in compliance",
"= m.group(1) return node_name def cleanup_graph_def(input_graph_def, nodes_to_skip, inputs_to_remove): \"\"\"Clean up",
"= node_def.attr[\"value\"].tensor tensor_value = tensor_util.MakeNdarray(input_tensor) return tensor_value # Whether to",
"for fused MatMul which starts with '_' FUSED_MATMUL = '_FusedMatMul'",
"tensor_util # Custom op name for fused depthwise conv2d FUSED_DEPTHWISE_CONV2D",
"that has the values we want to access. Returns: Numpy",
"from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.python.framework",
"# # Unless required by applicable law or agreed to",
"continue new_node = node_def_pb2.NodeDef() new_node.CopyFrom(node) for value in inputs_to_remove: for",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"op name for fused MatMul which starts with '_' FUSED_MATMUL",
"gamma after normalization. def scale_after_normalization(node): if node.op == \"BatchNormWithGlobalNormalization\": return",
"Version 2.0 (the \"License\"); # you may not use this",
"import re from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2",
"name for every node. name: Identifies the node we want",
"the given name. Raises: ValueError: If the node isn't present",
"numpy ndarray. Args: node_def: Const NodeDef that has the values",
"implied. # See the License for the specific language governing",
"the dictionary. \"\"\" stripped_name = node_name_from_input(name) if stripped_name not in",
"under the Apache License, Version 2.0 (the \"License\"); # you",
"= node_def_pb2.NodeDef() new_node.CopyFrom(node) for value in inputs_to_remove: for i, input_node",
"ndarray containing the values. Raises: ValueError: If the node isn't",
"Args: node_def: Const NodeDef that has the values we want",
"other decorations to get the underlying node name.\"\"\" if node_name.startswith(\"^\"):",
"Args: input_graph_def: GraphDef object to be cleaned. node_to_skip: Dict with",
"the values from a const NodeDef as a numpy ndarray.",
"nodes with inputs that have been removed. Args: input_graph_def: GraphDef",
"by applicable law or agreed to in writing, software #",
"# limitations under the License. # ============================================================================== import re from",
"of nodes to be removed from inputs of all nodes.",
"in input_graph_def.node: if node.name in nodes_to_skip: continue new_node = node_def_pb2.NodeDef()",
"m: node_name = m.group(1) return node_name def cleanup_graph_def(input_graph_def, nodes_to_skip, inputs_to_remove):",
"\"\"\" stripped_name = node_name_from_input(name) if stripped_name not in node_map: raise",
"nodes and clean up the nodes with inputs that have",
"find. Returns: NodeDef of the node with the given name.",
"Raises: ValueError: If the node isn't a Const. \"\"\" if",
"that has been cleaned. \"\"\" result_graph_def = graph_pb2.GraphDef() for node",
"to be cleaned. node_to_skip: Dict with node names to be",
"Copyright 2019 Google LLC # # Licensed under the Apache",
"for i, input_node in enumerate(new_node.input): if input_node == value.name: new_node.input[i]",
"FUSED_MATMUL = '_FusedMatMul' def node_from_map(node_map, name): \"\"\"Pulls a node def",
"import node_def_pb2 from tensorflow.python.framework import tensor_util # Custom op name",
"skipped nodes and clean up the nodes with inputs that",
"op name for fused depthwise conv2d FUSED_DEPTHWISE_CONV2D = 'FusedDepthwiseConv2dNative' #",
"to find. Returns: NodeDef of the node with the given",
"dictionary for a given name. Args: node_map: Dictionary containing an",
"of all nodes. Returns: GraphDef that has been cleaned. \"\"\"",
"input_node in enumerate(new_node.input): if input_node == value.name: new_node.input[i] = value.input[0]",
"def values_from_const(node_def): \"\"\"Extracts the values from a const NodeDef as",
"node_name_from_input(name) if stripped_name not in node_map: raise ValueError(\"No node named",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"for fused depthwise conv2d FUSED_DEPTHWISE_CONV2D = 'FusedDepthwiseConv2dNative' # The grappler",
"Unless required by applicable law or agreed to in writing,",
"node_def.name) input_tensor = node_def.attr[\"value\"].tensor tensor_value = tensor_util.MakeNdarray(input_tensor) return tensor_value #",
"the specific language governing permissions and # limitations under the",
"an entry indexed by name for every node. name: Identifies",
"def scale_after_normalization(node): if node.op == \"BatchNormWithGlobalNormalization\": return node.attr[\"scale_after_normalization\"].b return True",
"inputs_to_remove): \"\"\"Clean up the graph def by removing the skipped",
"applicable law or agreed to in writing, software # distributed",
"to be skipped. inputs_to_remove: List of nodes to be removed",
"Google LLC # # Licensed under the Apache License, Version",
"in map.\" % name) return node_map[stripped_name] def values_from_const(node_def): \"\"\"Extracts the",
"tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.python.framework import",
"in writing, software # distributed under the License is distributed",
"isn't present in the dictionary. \"\"\" stripped_name = node_name_from_input(name) if",
"grappler op name for fused MatMul which starts with '_'",
"\"\"\" if node_def.op != \"Const\": raise ValueError( \"Node named '%s'",
"we want to access. Returns: Numpy ndarray containing the values.",
"from a dictionary for a given name. Args: node_map: Dictionary",
"for every node. name: Identifies the node we want to",
"from tensorflow.python.framework import tensor_util # Custom op name for fused",
"= 'FusedDepthwiseConv2dNative' # The grappler op name for fused MatMul",
"node_def.attr[\"value\"].tensor tensor_value = tensor_util.MakeNdarray(input_tensor) return tensor_value # Whether to scale",
"re from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"inputs that have been removed. Args: input_graph_def: GraphDef object to",
"License, Version 2.0 (the \"License\"); # you may not use",
"the License. # ============================================================================== import re from tensorflow.core.framework import graph_pb2",
"# You may obtain a copy of the License at",
"ndarray. Args: node_def: Const NodeDef that has the values we",
"stripped_name not in node_map: raise ValueError(\"No node named '%s' found",
"get the underlying node name.\"\"\" if node_name.startswith(\"^\"): node_name = node_name[1:]",
"If the node isn't a Const. \"\"\" if node_def.op !=",
"be removed from inputs of all nodes. Returns: GraphDef that",
"want to access. Returns: Numpy ndarray containing the values. Raises:",
"% node_def.name) input_tensor = node_def.attr[\"value\"].tensor tensor_value = tensor_util.MakeNdarray(input_tensor) return tensor_value",
"============================================================================== import re from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"has been cleaned. \"\"\" result_graph_def = graph_pb2.GraphDef() for node in",
"for value in inputs_to_remove: for i, input_node in enumerate(new_node.input): if",
"!= \"Const\": raise ValueError( \"Node named '%s' should be a",
"= graph_pb2.GraphDef() for node in input_graph_def.node: if node.name in nodes_to_skip:",
"Returns: NodeDef of the node with the given name. Raises:",
"NodeDef that has the values we want to access. Returns:",
"<gh_stars>0 # Copyright 2019 Google LLC # # Licensed under",
"given name. Args: node_map: Dictionary containing an entry indexed by",
"the License for the specific language governing permissions and #",
"ValueError: If the node isn't present in the dictionary. \"\"\"",
"a const NodeDef as a numpy ndarray. Args: node_def: Const",
"node_map: Dictionary containing an entry indexed by name for every",
"Apache License, Version 2.0 (the \"License\"); # you may not",
"up the graph def by removing the skipped nodes and",
"either express or implied. # See the License for the",
"the nodes with inputs that have been removed. Args: input_graph_def:",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"access. Returns: Numpy ndarray containing the values. Raises: ValueError: If",
"raise ValueError( \"Node named '%s' should be a Const op",
"ValueError: If the node isn't a Const. \"\"\" if node_def.op",
"under the License. # ============================================================================== import re from tensorflow.core.framework import",
"clean up the nodes with inputs that have been removed.",
"which starts with '_' FUSED_MATMUL = '_FusedMatMul' def node_from_map(node_map, name):",
"cleanup_graph_def(input_graph_def, nodes_to_skip, inputs_to_remove): \"\"\"Clean up the graph def by removing",
"ValueError(\"No node named '%s' found in map.\" % name) return",
"ValueError( \"Node named '%s' should be a Const op for",
"found in map.\" % name) return node_map[stripped_name] def values_from_const(node_def): \"\"\"Extracts",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"return node_name def cleanup_graph_def(input_graph_def, nodes_to_skip, inputs_to_remove): \"\"\"Clean up the graph",
"= tensor_util.MakeNdarray(input_tensor) return tensor_value # Whether to scale by gamma",
"with the given name. Raises: ValueError: If the node isn't",
"\"\"\"Pulls a node def from a dictionary for a given",
"by removing the skipped nodes and clean up the nodes",
"The grappler op name for fused MatMul which starts with",
"Returns: GraphDef that has been cleaned. \"\"\" result_graph_def = graph_pb2.GraphDef()",
"Numpy ndarray containing the values. Raises: ValueError: If the node",
"node.name in nodes_to_skip: continue new_node = node_def_pb2.NodeDef() new_node.CopyFrom(node) for value",
"new_node = node_def_pb2.NodeDef() new_node.CopyFrom(node) for value in inputs_to_remove: for i,",
"named '%s' should be a Const op for values_from_const.\" %",
"\"License\"); # you may not use this file except in",
"given name. Raises: ValueError: If the node isn't present in",
"m = re.search(r\"(.*):\\d+$\", node_name) if m: node_name = m.group(1) return",
"has the values we want to access. Returns: Numpy ndarray",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"# Copyright 2019 Google LLC # # Licensed under the",
"and clean up the nodes with inputs that have been",
"after normalization. def scale_after_normalization(node): if node.op == \"BatchNormWithGlobalNormalization\": return node.attr[\"scale_after_normalization\"].b",
"a given name. Args: node_map: Dictionary containing an entry indexed",
"the node we want to find. Returns: NodeDef of the",
"# distributed under the License is distributed on an \"AS",
"Args: node_map: Dictionary containing an entry indexed by name for",
"and # limitations under the License. # ============================================================================== import re",
"# Unless required by applicable law or agreed to in",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"You may obtain a copy of the License at #",
"GraphDef that has been cleaned. \"\"\" result_graph_def = graph_pb2.GraphDef() for",
"name for fused depthwise conv2d FUSED_DEPTHWISE_CONV2D = 'FusedDepthwiseConv2dNative' # The",
"\"\"\"Clean up the graph def by removing the skipped nodes",
"a node def from a dictionary for a given name.",
"node_map: raise ValueError(\"No node named '%s' found in map.\" %",
"name: Identifies the node we want to find. Returns: NodeDef",
"be cleaned. node_to_skip: Dict with node names to be skipped.",
"decorations to get the underlying node name.\"\"\" if node_name.startswith(\"^\"): node_name",
"node_def.op != \"Const\": raise ValueError( \"Node named '%s' should be",
"to be removed from inputs of all nodes. Returns: GraphDef",
"the Apache License, Version 2.0 (the \"License\"); # you may",
"values_from_const.\" % node_def.name) input_tensor = node_def.attr[\"value\"].tensor tensor_value = tensor_util.MakeNdarray(input_tensor) return",
"NodeDef of the node with the given name. Raises: ValueError:",
"Const op for values_from_const.\" % node_def.name) input_tensor = node_def.attr[\"value\"].tensor tensor_value",
"object to be cleaned. node_to_skip: Dict with node names to",
"to scale by gamma after normalization. def scale_after_normalization(node): if node.op",
"node. name: Identifies the node we want to find. Returns:"
] |
[
"from torch import Tensor import argparse from . import register_classification_loss_fn",
"**kwargs) -> None: super().__init__() def forward( self, input_sample: Tensor, prediction:",
"None: super().__init__() def forward( self, input_sample: Tensor, prediction: Tensor, target:",
"Reserved. # from torch.nn import functional as F from torch",
") -> Tensor: if target.dim() != prediction.dim(): target = F.one_hot(target,",
"tasks\"\"\" def __init__(self, opts, *args, **kwargs) -> None: super().__init__() def",
"if target.dim() != prediction.dim(): target = F.one_hot(target, num_classes=prediction.shape[-1]) return F.binary_cross_entropy_with_logits(",
"\"\"\"Binary CE for classification tasks\"\"\" def __init__(self, opts, *args, **kwargs)",
"target=target.to(prediction.dtype), weight=None, reduction=\"sum\", ) def __repr__(self) -> str: return \"{}()\".format(self.__class__.__name__)",
"Apple Inc. All Rights Reserved. # from torch.nn import functional",
"functional as F from torch import Tensor import argparse from",
"as F from torch import Tensor import argparse from .",
"return F.binary_cross_entropy_with_logits( input=prediction, target=target.to(prediction.dtype), weight=None, reduction=\"sum\", ) def __repr__(self) ->",
"see accompanying LICENSE file. # Copyright (C) 2022 Apple Inc.",
"ClsBinaryCrossEntropy(BaseCriteria): \"\"\"Binary CE for classification tasks\"\"\" def __init__(self, opts, *args,",
"file. # Copyright (C) 2022 Apple Inc. All Rights Reserved.",
"prediction: Tensor, target: Tensor, *args, **kwargs ) -> Tensor: if",
"F from torch import Tensor import argparse from . import",
"import Tensor import argparse from . import register_classification_loss_fn from ..",
"def __init__(self, opts, *args, **kwargs) -> None: super().__init__() def forward(",
"(C) 2022 Apple Inc. All Rights Reserved. # from torch.nn",
"torch import Tensor import argparse from . import register_classification_loss_fn from",
"All Rights Reserved. # from torch.nn import functional as F",
"Tensor, prediction: Tensor, target: Tensor, *args, **kwargs ) -> Tensor:",
"Tensor import argparse from . import register_classification_loss_fn from .. import",
"Tensor: if target.dim() != prediction.dim(): target = F.one_hot(target, num_classes=prediction.shape[-1]) return",
"# Copyright (C) 2022 Apple Inc. All Rights Reserved. #",
"def forward( self, input_sample: Tensor, prediction: Tensor, target: Tensor, *args,",
"input=prediction, target=target.to(prediction.dtype), weight=None, reduction=\"sum\", ) def __repr__(self) -> str: return",
"accompanying LICENSE file. # Copyright (C) 2022 Apple Inc. All",
"prediction.dim(): target = F.one_hot(target, num_classes=prediction.shape[-1]) return F.binary_cross_entropy_with_logits( input=prediction, target=target.to(prediction.dtype), weight=None,",
"torch.nn import functional as F from torch import Tensor import",
"self, input_sample: Tensor, prediction: Tensor, target: Tensor, *args, **kwargs )",
"*args, **kwargs) -> None: super().__init__() def forward( self, input_sample: Tensor,",
"F.binary_cross_entropy_with_logits( input=prediction, target=target.to(prediction.dtype), weight=None, reduction=\"sum\", ) def __repr__(self) -> str:",
"forward( self, input_sample: Tensor, prediction: Tensor, target: Tensor, *args, **kwargs",
".. import BaseCriteria @register_classification_loss_fn(name=\"binary_cross_entropy\") class ClsBinaryCrossEntropy(BaseCriteria): \"\"\"Binary CE for classification",
"= F.one_hot(target, num_classes=prediction.shape[-1]) return F.binary_cross_entropy_with_logits( input=prediction, target=target.to(prediction.dtype), weight=None, reduction=\"sum\", )",
"BaseCriteria @register_classification_loss_fn(name=\"binary_cross_entropy\") class ClsBinaryCrossEntropy(BaseCriteria): \"\"\"Binary CE for classification tasks\"\"\" def",
"# from torch.nn import functional as F from torch import",
"-> None: super().__init__() def forward( self, input_sample: Tensor, prediction: Tensor,",
"import functional as F from torch import Tensor import argparse",
"2022 Apple Inc. All Rights Reserved. # from torch.nn import",
"classification tasks\"\"\" def __init__(self, opts, *args, **kwargs) -> None: super().__init__()",
"from . import register_classification_loss_fn from .. import BaseCriteria @register_classification_loss_fn(name=\"binary_cross_entropy\") class",
"**kwargs ) -> Tensor: if target.dim() != prediction.dim(): target =",
"licensing see accompanying LICENSE file. # Copyright (C) 2022 Apple",
"opts, *args, **kwargs) -> None: super().__init__() def forward( self, input_sample:",
"LICENSE file. # Copyright (C) 2022 Apple Inc. All Rights",
"super().__init__() def forward( self, input_sample: Tensor, prediction: Tensor, target: Tensor,",
"target.dim() != prediction.dim(): target = F.one_hot(target, num_classes=prediction.shape[-1]) return F.binary_cross_entropy_with_logits( input=prediction,",
"for classification tasks\"\"\" def __init__(self, opts, *args, **kwargs) -> None:",
"Tensor, target: Tensor, *args, **kwargs ) -> Tensor: if target.dim()",
"from .. import BaseCriteria @register_classification_loss_fn(name=\"binary_cross_entropy\") class ClsBinaryCrossEntropy(BaseCriteria): \"\"\"Binary CE for",
"<filename>loss_fn/classification_loss_fns/binary_cross_entropy.py # # For licensing see accompanying LICENSE file. #",
"target = F.one_hot(target, num_classes=prediction.shape[-1]) return F.binary_cross_entropy_with_logits( input=prediction, target=target.to(prediction.dtype), weight=None, reduction=\"sum\",",
"import register_classification_loss_fn from .. import BaseCriteria @register_classification_loss_fn(name=\"binary_cross_entropy\") class ClsBinaryCrossEntropy(BaseCriteria): \"\"\"Binary",
"Rights Reserved. # from torch.nn import functional as F from",
"*args, **kwargs ) -> Tensor: if target.dim() != prediction.dim(): target",
"!= prediction.dim(): target = F.one_hot(target, num_classes=prediction.shape[-1]) return F.binary_cross_entropy_with_logits( input=prediction, target=target.to(prediction.dtype),",
"Tensor, *args, **kwargs ) -> Tensor: if target.dim() != prediction.dim():",
"num_classes=prediction.shape[-1]) return F.binary_cross_entropy_with_logits( input=prediction, target=target.to(prediction.dtype), weight=None, reduction=\"sum\", ) def __repr__(self)",
"target: Tensor, *args, **kwargs ) -> Tensor: if target.dim() !=",
". import register_classification_loss_fn from .. import BaseCriteria @register_classification_loss_fn(name=\"binary_cross_entropy\") class ClsBinaryCrossEntropy(BaseCriteria):",
"For licensing see accompanying LICENSE file. # Copyright (C) 2022",
"import BaseCriteria @register_classification_loss_fn(name=\"binary_cross_entropy\") class ClsBinaryCrossEntropy(BaseCriteria): \"\"\"Binary CE for classification tasks\"\"\"",
"@register_classification_loss_fn(name=\"binary_cross_entropy\") class ClsBinaryCrossEntropy(BaseCriteria): \"\"\"Binary CE for classification tasks\"\"\" def __init__(self,",
"input_sample: Tensor, prediction: Tensor, target: Tensor, *args, **kwargs ) ->",
"Inc. All Rights Reserved. # from torch.nn import functional as",
"# # For licensing see accompanying LICENSE file. # Copyright",
"CE for classification tasks\"\"\" def __init__(self, opts, *args, **kwargs) ->",
"F.one_hot(target, num_classes=prediction.shape[-1]) return F.binary_cross_entropy_with_logits( input=prediction, target=target.to(prediction.dtype), weight=None, reduction=\"sum\", ) def",
"__init__(self, opts, *args, **kwargs) -> None: super().__init__() def forward( self,",
"class ClsBinaryCrossEntropy(BaseCriteria): \"\"\"Binary CE for classification tasks\"\"\" def __init__(self, opts,",
"Copyright (C) 2022 Apple Inc. All Rights Reserved. # from",
"from torch.nn import functional as F from torch import Tensor",
"# For licensing see accompanying LICENSE file. # Copyright (C)",
"import argparse from . import register_classification_loss_fn from .. import BaseCriteria",
"register_classification_loss_fn from .. import BaseCriteria @register_classification_loss_fn(name=\"binary_cross_entropy\") class ClsBinaryCrossEntropy(BaseCriteria): \"\"\"Binary CE",
"-> Tensor: if target.dim() != prediction.dim(): target = F.one_hot(target, num_classes=prediction.shape[-1])",
"argparse from . import register_classification_loss_fn from .. import BaseCriteria @register_classification_loss_fn(name=\"binary_cross_entropy\")"
] |
[
"= len(arr) for i in range(1, n): swap_index = i",
"-> list: n = len(arr) for i in range(1, n):",
"-1, -1): if arr[swap_index] < arr[j]: arr[swap_index], arr[j] = arr[j],",
"list: n = len(arr) for i in range(1, n): swap_index",
"n = len(arr) for i in range(1, n): swap_index =",
"5, 30, 1, 2, 5, 10, 10] a2 = insertion_sort(arr_input)",
"[10, 5, 30, 1, 2, 5, 10, 10] a2 =",
"arr[swap_index], arr[j] = arr[j], arr[swap_index] swap_index -= 1 else: break",
"return arr def main(): arr_input = [10, 5, 30, 1,",
"n): swap_index = i for j in range(i-1, -1, -1):",
"in range(i-1, -1, -1): if arr[swap_index] < arr[j]: arr[swap_index], arr[j]",
"insertion_sort(arr) -> list: n = len(arr) for i in range(1,",
"else: break return arr def main(): arr_input = [10, 5,",
"break return arr def main(): arr_input = [10, 5, 30,",
"for j in range(i-1, -1, -1): if arr[swap_index] < arr[j]:",
"if arr[swap_index] < arr[j]: arr[swap_index], arr[j] = arr[j], arr[swap_index] swap_index",
"def main(): arr_input = [10, 5, 30, 1, 2, 5,",
"\"\"\" Insertion Sort Algorithm:\"\"\" \"\"\"Implementation\"\"\" def insertion_sort(arr) -> list: n",
"arr[swap_index] < arr[j]: arr[swap_index], arr[j] = arr[j], arr[swap_index] swap_index -=",
"Sort Algorithm:\"\"\" \"\"\"Implementation\"\"\" def insertion_sort(arr) -> list: n = len(arr)",
"< arr[j]: arr[swap_index], arr[j] = arr[j], arr[swap_index] swap_index -= 1",
"range(i-1, -1, -1): if arr[swap_index] < arr[j]: arr[swap_index], arr[j] =",
"i in range(1, n): swap_index = i for j in",
"swap_index = i for j in range(i-1, -1, -1): if",
"len(arr) for i in range(1, n): swap_index = i for",
"arr def main(): arr_input = [10, 5, 30, 1, 2,",
"i for j in range(i-1, -1, -1): if arr[swap_index] <",
"arr[j], arr[swap_index] swap_index -= 1 else: break return arr def",
"<filename>Sorting/insertion_sort.py \"\"\" Insertion Sort Algorithm:\"\"\" \"\"\"Implementation\"\"\" def insertion_sort(arr) -> list:",
"the special variable # __name__ if __name__ == \"__main__\": main()",
"range(1, n): swap_index = i for j in range(i-1, -1,",
"= i for j in range(i-1, -1, -1): if arr[swap_index]",
"1, 2, 5, 10, 10] a2 = insertion_sort(arr_input) print(a2) #",
"a2 = insertion_sort(arr_input) print(a2) # Using the special variable #",
"10, 10] a2 = insertion_sort(arr_input) print(a2) # Using the special",
"arr_input = [10, 5, 30, 1, 2, 5, 10, 10]",
"2, 5, 10, 10] a2 = insertion_sort(arr_input) print(a2) # Using",
"-1): if arr[swap_index] < arr[j]: arr[swap_index], arr[j] = arr[j], arr[swap_index]",
"-= 1 else: break return arr def main(): arr_input =",
"insertion_sort(arr_input) print(a2) # Using the special variable # __name__ if",
"j in range(i-1, -1, -1): if arr[swap_index] < arr[j]: arr[swap_index],",
"def insertion_sort(arr) -> list: n = len(arr) for i in",
"in range(1, n): swap_index = i for j in range(i-1,",
"Using the special variable # __name__ if __name__ == \"__main__\":",
"Insertion Sort Algorithm:\"\"\" \"\"\"Implementation\"\"\" def insertion_sort(arr) -> list: n =",
"10] a2 = insertion_sort(arr_input) print(a2) # Using the special variable",
"= arr[j], arr[swap_index] swap_index -= 1 else: break return arr",
"arr[j] = arr[j], arr[swap_index] swap_index -= 1 else: break return",
"arr[swap_index] swap_index -= 1 else: break return arr def main():",
"= insertion_sort(arr_input) print(a2) # Using the special variable # __name__",
"print(a2) # Using the special variable # __name__ if __name__",
"for i in range(1, n): swap_index = i for j",
"\"\"\"Implementation\"\"\" def insertion_sort(arr) -> list: n = len(arr) for i",
"1 else: break return arr def main(): arr_input = [10,",
"main(): arr_input = [10, 5, 30, 1, 2, 5, 10,",
"30, 1, 2, 5, 10, 10] a2 = insertion_sort(arr_input) print(a2)",
"= [10, 5, 30, 1, 2, 5, 10, 10] a2",
"swap_index -= 1 else: break return arr def main(): arr_input",
"arr[j]: arr[swap_index], arr[j] = arr[j], arr[swap_index] swap_index -= 1 else:",
"Algorithm:\"\"\" \"\"\"Implementation\"\"\" def insertion_sort(arr) -> list: n = len(arr) for",
"# Using the special variable # __name__ if __name__ ==",
"5, 10, 10] a2 = insertion_sort(arr_input) print(a2) # Using the"
] |
[
"Segment, Smooth, NewSegment, DARTEL, DARTELNorm2MNI, CreateWarped, VBMSegment) from .model import",
"coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4;",
"SPMCommand, logger, no_spm, scans_for_fname, scans_for_fnames) from .preprocess import (FieldMap, SliceTiming,",
"from .model import (Level1Design, EstimateModel, EstimateContrast, Threshold, OneSampleTTestDesign, TwoSampleTTestDesign, PairedTTestDesign,",
"mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set",
"emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- #",
"CreateWarped, VBMSegment) from .model import (Level1Design, EstimateModel, EstimateContrast, Threshold, OneSampleTTestDesign,",
"from .utils import (Analyze2nii, CalcCoregAffine, ApplyTransform, Reslice, ApplyInverseDeformation, ResliceToReference, DicomImport)",
"-*- # vi: set ft=python sts=4 ts=4 sw=4 et: \"\"\"Top-level",
"Realign, RealignUnwarp, Coregister, Normalize, Normalize12, Segment, Smooth, NewSegment, DARTEL, DARTELNorm2MNI,",
"logger, no_spm, scans_for_fname, scans_for_fnames) from .preprocess import (FieldMap, SliceTiming, Realign,",
"-*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset:",
"python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python",
"sts=4 ts=4 sw=4 et: \"\"\"Top-level namespace for spm.\"\"\" from .base",
"ts=4 sw=4 et: \"\"\"Top-level namespace for spm.\"\"\" from .base import",
"DARTELNorm2MNI, CreateWarped, VBMSegment) from .model import (Level1Design, EstimateModel, EstimateContrast, Threshold,",
"(Info, SPMCommand, logger, no_spm, scans_for_fname, scans_for_fnames) from .preprocess import (FieldMap,",
"sw=4 et: \"\"\"Top-level namespace for spm.\"\"\" from .base import (Info,",
"et: \"\"\"Top-level namespace for spm.\"\"\" from .base import (Info, SPMCommand,",
".preprocess import (FieldMap, SliceTiming, Realign, RealignUnwarp, Coregister, Normalize, Normalize12, Segment,",
"DARTEL, DARTELNorm2MNI, CreateWarped, VBMSegment) from .model import (Level1Design, EstimateModel, EstimateContrast,",
"<gh_stars>1-10 # -*- coding: utf-8 -*- # emacs: -*- mode:",
"namespace for spm.\"\"\" from .base import (Info, SPMCommand, logger, no_spm,",
"EstimateModel, EstimateContrast, Threshold, OneSampleTTestDesign, TwoSampleTTestDesign, PairedTTestDesign, MultipleRegressionDesign) from .utils import",
"spm.\"\"\" from .base import (Info, SPMCommand, logger, no_spm, scans_for_fname, scans_for_fnames)",
"Normalize, Normalize12, Segment, Smooth, NewSegment, DARTEL, DARTELNorm2MNI, CreateWarped, VBMSegment) from",
"from .preprocess import (FieldMap, SliceTiming, Realign, RealignUnwarp, Coregister, Normalize, Normalize12,",
"Smooth, NewSegment, DARTEL, DARTELNorm2MNI, CreateWarped, VBMSegment) from .model import (Level1Design,",
"# vi: set ft=python sts=4 ts=4 sw=4 et: \"\"\"Top-level namespace",
"from .base import (Info, SPMCommand, logger, no_spm, scans_for_fname, scans_for_fnames) from",
"(FieldMap, SliceTiming, Realign, RealignUnwarp, Coregister, Normalize, Normalize12, Segment, Smooth, NewSegment,",
"SliceTiming, Realign, RealignUnwarp, Coregister, Normalize, Normalize12, Segment, Smooth, NewSegment, DARTEL,",
"PairedTTestDesign, MultipleRegressionDesign) from .utils import (Analyze2nii, CalcCoregAffine, ApplyTransform, Reslice, ApplyInverseDeformation,",
"utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode:",
"4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4",
"import (FieldMap, SliceTiming, Realign, RealignUnwarp, Coregister, Normalize, Normalize12, Segment, Smooth,",
"VBMSegment) from .model import (Level1Design, EstimateModel, EstimateContrast, Threshold, OneSampleTTestDesign, TwoSampleTTestDesign,",
"-*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi:",
"\"\"\"Top-level namespace for spm.\"\"\" from .base import (Info, SPMCommand, logger,",
".model import (Level1Design, EstimateModel, EstimateContrast, Threshold, OneSampleTTestDesign, TwoSampleTTestDesign, PairedTTestDesign, MultipleRegressionDesign)",
"(Level1Design, EstimateModel, EstimateContrast, Threshold, OneSampleTTestDesign, TwoSampleTTestDesign, PairedTTestDesign, MultipleRegressionDesign) from .utils",
"# -*- coding: utf-8 -*- # emacs: -*- mode: python;",
"Normalize12, Segment, Smooth, NewSegment, DARTEL, DARTELNorm2MNI, CreateWarped, VBMSegment) from .model",
"OneSampleTTestDesign, TwoSampleTTestDesign, PairedTTestDesign, MultipleRegressionDesign) from .utils import (Analyze2nii, CalcCoregAffine, ApplyTransform,",
"TwoSampleTTestDesign, PairedTTestDesign, MultipleRegressionDesign) from .utils import (Analyze2nii, CalcCoregAffine, ApplyTransform, Reslice,",
"nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et:",
"MultipleRegressionDesign) from .utils import (Analyze2nii, CalcCoregAffine, ApplyTransform, Reslice, ApplyInverseDeformation, ResliceToReference,",
"ft=python sts=4 ts=4 sw=4 et: \"\"\"Top-level namespace for spm.\"\"\" from",
"Coregister, Normalize, Normalize12, Segment, Smooth, NewSegment, DARTEL, DARTELNorm2MNI, CreateWarped, VBMSegment)",
"py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4",
"indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4",
"no_spm, scans_for_fname, scans_for_fnames) from .preprocess import (FieldMap, SliceTiming, Realign, RealignUnwarp,",
"vi: set ft=python sts=4 ts=4 sw=4 et: \"\"\"Top-level namespace for",
"Threshold, OneSampleTTestDesign, TwoSampleTTestDesign, PairedTTestDesign, MultipleRegressionDesign) from .utils import (Analyze2nii, CalcCoregAffine,",
"set ft=python sts=4 ts=4 sw=4 et: \"\"\"Top-level namespace for spm.\"\"\"",
"# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-",
"scans_for_fnames) from .preprocess import (FieldMap, SliceTiming, Realign, RealignUnwarp, Coregister, Normalize,",
"NewSegment, DARTEL, DARTELNorm2MNI, CreateWarped, VBMSegment) from .model import (Level1Design, EstimateModel,",
"RealignUnwarp, Coregister, Normalize, Normalize12, Segment, Smooth, NewSegment, DARTEL, DARTELNorm2MNI, CreateWarped,",
"-*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil",
"for spm.\"\"\" from .base import (Info, SPMCommand, logger, no_spm, scans_for_fname,",
"import (Info, SPMCommand, logger, no_spm, scans_for_fname, scans_for_fnames) from .preprocess import",
"scans_for_fname, scans_for_fnames) from .preprocess import (FieldMap, SliceTiming, Realign, RealignUnwarp, Coregister,",
"import (Level1Design, EstimateModel, EstimateContrast, Threshold, OneSampleTTestDesign, TwoSampleTTestDesign, PairedTTestDesign, MultipleRegressionDesign) from",
"EstimateContrast, Threshold, OneSampleTTestDesign, TwoSampleTTestDesign, PairedTTestDesign, MultipleRegressionDesign) from .utils import (Analyze2nii,",
".base import (Info, SPMCommand, logger, no_spm, scans_for_fname, scans_for_fnames) from .preprocess"
] |
[
"to a set of matrix/vector totalling the changes needed from",
"(vector of bias changes, matrix of weight changes), for the",
"with 2 random numbers. \"\"\" biasVectors.append(np.random.randn(size, 1)) \"\"\"Generate weights for",
"np.random.shuffle(data) batches = [] for i in range(0, len(data), batchSize):",
"[] for i in range(0, len(data), batchSize): batches.append(data[i:i+batchSize]) for batch",
"\"\"\" deltaBiasVectors = [] for biasVector in self.biasVectors: deltaBiasVectors.append(np.zeros(biasVector.shape)) deltaWeightMatrices",
"change the values each batch. \"\"\" print(\"Training network with shape",
"how much to change the values each batch. \"\"\" print(\"Training",
"sumBiasVectors = newSumBiasVectors newSumWeightMatrices = [] for totalWeightMatrix, deltaWeightMatrix in",
"network, based on that set of training data, and add",
"Get a matrix/vector with the required changes to the network,",
"The training date is a list of tuples (inputs, expected",
"np.dot(weightMatrix, deltaBiasVector) * sigmoidDeriv deltaBiasVectors[l] = deltaBiasVector deltaWeightMatrices[l] = np.dot(deltaBiasVector,",
"(inputs, expected outputs). The learning rate is how much to",
"inputs, using feedforward.\"\"\" activations = inputs for biasVector, weightMatrix in",
"of training data, and add it to a set of",
"these and subtract them from the current weights and biases.",
"\"\"\" biasVectors = [] \"\"\"Generate biases for each neuron in",
"= self.weightMatrices[l+1].transpose() sigmoidDeriv = sigmoidDerivative(zVectors[l]) deltaBiasVector = np.dot(weightMatrix, deltaBiasVector) *",
"descend. \"\"\" \"\"\" Setup matrix and vector based on the",
"\"\"\" Return a tuple with gradient of the cost function",
"and add the bias vector. This is the activation vector",
"= [] for biasVector, totalBiasVector in zip(self.biasVectors, sumBiasVectors): newBiasVectors.append(biasVector -",
"in each layer, except the input layer.\"\"\" for size in",
"active output is the expected one.\"\"\" outputs = self.getOutputs(inputs) guess",
"sumBiasVectors): newBiasVectors.append(biasVector - (rate/len(batch)) * totalBiasVector) newWeightMatrices = [] for",
"date is a list of tuples (inputs, expected outputs). The",
"the values each batch. \"\"\" print(\"Training network with shape {},",
"a list of layer sizes and lists for biases and",
"network with a list of layer sizes and lists for",
"weightMatrices @staticmethod def generateRandomNetwork(layerSizes): \"\"\" Initialise a new network with",
"biasVectors.append(np.random.randn(size, 1)) \"\"\"Generate weights for connections between layers.\"\"\" weightMatrices =",
"except the input layer.\"\"\" for size in layerSizes[1:]: \"\"\" np.random.randn",
"generates arrays of arrays of random numbers, based on the",
"with a list of layer sizes and lists for biases",
"and bias vector filled with zeroes. This is used for",
"def train(self, data, epochs, batchSize, rate, testData=None): \"\"\" Train the",
"random samples from the training are used to reduce the",
"weight matrix. Then get dot product between the weight matrix",
"def _tuneNetwork(self, batch, rate): \"\"\" Tune the weights and biases",
"weightMatrix = self.weightMatrices[l+1].transpose() sigmoidDeriv = sigmoidDerivative(zVectors[l]) deltaBiasVector = np.dot(weightMatrix, deltaBiasVector)",
"random numbers, based on the paramters. np.random.randn(3,2) will generate an",
"training time. The training date is a list of tuples",
"results are more probable to be around 0. \"\"\" biasVectors",
"in range(0, len(data), batchSize): batches.append(data[i:i+batchSize]) for batch in batches: self._tuneNetwork(batch,",
"= biasVectors self.weightMatrices = weightMatrices @staticmethod def generateRandomNetwork(layerSizes): \"\"\" Initialise",
"list of tuples (inputs, expected outputs). The learning rate is",
"weights and biases based on the derivative of the cost",
"prevSize in zip(layerSizes[:-1], layerSizes[1:]): weightMatrices.append(np.random.randn(prevSize, size)) return Network(layerSizes, biasVectors, weightMatrices)",
"deltaWeightMatrices[-1] = np.dot(deltaBiasVector, activationVectors[-2].transpose()) for l in range(-2, -len(self.layerSizes), -1):",
"layerSizes[1:]: \"\"\" np.random.randn generates arrays of arrays of random numbers,",
"(testData): result = self._evaluate(testData) print(\"Epoch #{} completed with {:.2f}% correctness.\".format(e+1,",
"deltaBiasVector = np.dot(weightMatrix, deltaBiasVector) * sigmoidDeriv deltaBiasVectors[l] = deltaBiasVector deltaWeightMatrices[l]",
"using feedforward.\"\"\" activations = inputs for biasVector, weightMatrix in zip(self.biasVectors,",
"epochs, batchSize, rate, testData=None): \"\"\" Train the neural network using",
"of random samples from the training are used to reduce",
"The learning rate is how much to change the values",
"= layerSizes self.biasVectors = biasVectors self.weightMatrices = weightMatrices @staticmethod def",
"\"\"\" Get a matrix/vector with the required changes to the",
"use these as the new weights and biases. \"\"\" newBiasVectors",
"and biases. \"\"\" newBiasVectors = [] for biasVector, totalBiasVector in",
"based on the derivative of the cost function with respect",
"arrays with 2 random numbers. \"\"\" biasVectors.append(np.random.randn(size, 1)) \"\"\"Generate weights",
"average of these and subtract them from the current weights",
"totalBiasVector) newWeightMatrices = [] for weightMatrix, totalWeightMatrix in zip(self.weightMatrices, sumWeightMatrices):",
"\"\"\" Setup matrix and vector based on the weight matrix",
"newSumWeightMatrices = [] for totalWeightMatrix, deltaWeightMatrix in zip(sumWeightMatrices, deltaWeightMatrices): newSumWeightMatrices.append(totalWeightMatrix",
"newBiasVectors.append(biasVector - (rate/len(batch)) * totalBiasVector) newWeightMatrices = [] for weightMatrix,",
"a tuple with gradient of the cost function for each",
"in zip(layerSizes[:-1], layerSizes[1:]): weightMatrices.append(np.random.randn(prevSize, size)) return Network(layerSizes, biasVectors, weightMatrices) def",
"last layer is the output layer. \"\"\" self.layerSizes = layerSizes",
"{} and learning rate {} for {} epochs...\".format(self.layerSizes, batchSize, rate,",
"distribution, so the results are more probable to be around",
"function for each bias and weight, in the format (vector",
"are generated using a Gaussian distribution, so the results are",
"\"\"\"Generate weights for connections between layers.\"\"\" weightMatrices = [] for",
"return Network(layerSizes, biasVectors, weightMatrices) def getOutputs(self, inputs): \"\"\"Return a vector",
"in zip(sumWeightMatrices, deltaWeightMatrices): newSumWeightMatrices.append(totalWeightMatrix + deltaWeightMatrix) sumWeightMatrices = newSumWeightMatrices \"\"\"",
"the number of correct guesses.\"\"\" correctGuesses = 0 for inputs,",
"completed.\".format(e)) def _tuneNetwork(self, batch, rate): \"\"\" Tune the weights and",
"bias vector and the weight matrix. Then get dot product",
"and lists for biases and weights for the neurons in",
"= newBiasVectors self.weightMatrices = newWeightMatrices def _backpropagate(self, inputs, expected): \"\"\"",
"because deltaBiasVector is * 1 instead weightMatrix = self.weightMatrices[l+1].transpose() sigmoidDeriv",
"the cost function with respect to the weight/bias. * Then",
"vector of the network's outputs based on the given inputs,",
"matrix and vector based on the weight matrix and bias",
"compared to expected, tune weights and biases based on the",
"and return the number of correct guesses.\"\"\" correctGuesses = 0",
"test data and return the number of correct guesses.\"\"\" correctGuesses",
"For every layer, get the bias vector and the weight",
"= weightMatrices @staticmethod def generateRandomNetwork(layerSizes): \"\"\" Initialise a new network",
"layerSizes[1:]): weightMatrices.append(np.random.randn(prevSize, size)) return Network(layerSizes, biasVectors, weightMatrices) def getOutputs(self, inputs):",
"the neural network using stochastic gradient descent. Smaller batches of",
"random numbers. \"\"\" biasVectors.append(np.random.randn(size, 1)) \"\"\"Generate weights for connections between",
"zVectors.append(zVector) activationVector = sigmoid(zVector) activationVectors.append(activationVector) \"\"\" * Start with output",
"in range(epochs): np.random.shuffle(data) batches = [] for i in range(0,",
"each set of training data, get the average of these",
"layer. \"\"\" self.layerSizes = layerSizes self.biasVectors = biasVectors self.weightMatrices =",
"the most active output is the expected one.\"\"\" outputs =",
"result = self._evaluate(testData) print(\"Epoch #{} completed with {:.2f}% correctness.\".format(e+1, 100/len(testData)*result))",
"-1): # Equivalent to https://i.imgur.com/8PQQ28r.png, because deltaBiasVector is * 1",
"of training date. \"\"\" sumBiasVectors = [] for biasVector in",
"the input layer. \"\"\" deltaBiasVector = (activationVectors[-1] - expected) *",
"based on the paramters. np.random.randn(3,2) will generate an array of",
"range(-2, -len(self.layerSizes), -1): # Equivalent to https://i.imgur.com/8PQQ28r.png, because deltaBiasVector is",
"for biasVector in self.biasVectors: deltaBiasVectors.append(np.zeros(biasVector.shape)) deltaWeightMatrices = [] for weightMatrix",
"deltaWeightMatrices.append(np.zeros(weightMatrix.shape)) \"\"\"Store all activations for the entire network, starting with",
"for storing each change to make for each vector, for",
"sumWeightMatrices): newWeightMatrices.append(weightMatrix - (rate/len(batch)) * totalWeightMatrix) self.biasVectors = newBiasVectors self.weightMatrices",
"expected in testData: \"\"\"Increment correct guesses if the most active",
"network. The first layer is the input layer and the",
"on the paramters. np.random.randn(3,2) will generate an array of 3",
"newSumWeightMatrices.append(totalWeightMatrix + deltaWeightMatrix) sumWeightMatrices = newSumWeightMatrices \"\"\" Take each change",
"layer.\"\"\" activationVector = inputs activationVectors = [inputs] \"\"\"Find the z-vector",
"layer. \"\"\" zVector = np.dot(weightMatrix, activations) + biasVector activations =",
"testData=None): \"\"\" Train the neural network using stochastic gradient descent.",
"the current layer. \"\"\" zVector = np.dot(weightMatrix, activations) + biasVector",
"by using backpropagation with gradient descend. \"\"\" \"\"\" Setup matrix",
"self.biasVectors: sumBiasVectors.append(np.zeros(biasVector.shape)) sumWeightMatrices = [] for weightMatrix in self.weightMatrices: sumWeightMatrices.append(np.zeros(weightMatrix.shape))",
"generateRandomNetwork(layerSizes): \"\"\" Initialise a new network with random weights and",
"and output layers are included in the layerSizes list. The",
"train(self, data, epochs, batchSize, rate, testData=None): \"\"\" Train the neural",
"[] for biasVector in self.biasVectors: sumBiasVectors.append(np.zeros(biasVector.shape)) sumWeightMatrices = [] for",
"the required changes to the network, based on that set",
"the paramters. np.random.randn(3,2) will generate an array of 3 arrays",
"current weights and biases. Then use these as the new",
"vector and add the bias vector. This is the activation",
"tune weights and biases based on the derivative of the",
"generated using a Gaussian distribution, so the results are more",
"in self.weightMatrices: sumWeightMatrices.append(np.zeros(weightMatrix.shape)) for inputs, expected in batch: \"\"\" Get",
"* 2 * sigmoidDerivative(zVectors[-1]) deltaBiasVectors[-1] = deltaBiasVector deltaWeightMatrices[-1] = np.dot(deltaBiasVector,",
"weightMatrix in zip(self.biasVectors, self.weightMatrices): zVector = np.dot(weightMatrix, activationVector) + biasVector",
"totalling the changes needed from all the training data. \"\"\"",
"np.random.randn generates arrays of arrays of random numbers, based on",
"\"\"\" For every layer, get the bias vector and the",
"2 * sigmoidDerivative(zVectors[-1]) deltaBiasVectors[-1] = deltaBiasVector deltaWeightMatrices[-1] = np.dot(deltaBiasVector, activationVectors[-2].transpose())",
"to https://i.imgur.com/8PQQ28r.png, because deltaBiasVector is * 1 instead weightMatrix =",
"testData): \"\"\"Test the network with the specified test data and",
"for connections between layers.\"\"\" weightMatrices = [] for size, prevSize",
"batches = [] for i in range(0, len(data), batchSize): batches.append(data[i:i+batchSize])",
"bias and weight, in the format (vector of bias changes,",
"{}, batch size {} and learning rate {} for {}",
"matrix/vector totalling the changes needed from all the training data.",
"= [] for biasVector in self.biasVectors: sumBiasVectors.append(np.zeros(biasVector.shape)) sumWeightMatrices = []",
"list. The random weights and biases are generated using a",
"in the layerSizes list. The random weights and biases are",
"for biasVector, totalBiasVector in zip(self.biasVectors, sumBiasVectors): newBiasVectors.append(biasVector - (rate/len(batch)) *",
"given inputs, using feedforward.\"\"\" activations = inputs for biasVector, weightMatrix",
"expected one.\"\"\" outputs = self.getOutputs(inputs) guess = np.argmax(outputs) if (guess",
"weightMatrices = [] for size, prevSize in zip(layerSizes[:-1], layerSizes[1:]): weightMatrices.append(np.random.randn(prevSize,",
"deltaBiasVectors[-1] = deltaBiasVector deltaWeightMatrices[-1] = np.dot(deltaBiasVector, activationVectors[-2].transpose()) for l in",
"training are used to reduce the training time. The training",
"for the entire network, starting with the input layer.\"\"\" activationVector",
"the input layer.\"\"\" activationVector = inputs activationVectors = [inputs] \"\"\"Find",
"in the network\"\"\" zVectors = [] for biasVector, weightMatrix in",
"sigmoid(zVector) activationVectors.append(activationVector) \"\"\" * Start with output compared to expected,",
"gradient of the cost function for each bias and weight,",
"sigmoidDeriv = sigmoidDerivative(zVectors[l]) deltaBiasVector = np.dot(weightMatrix, deltaBiasVector) * sigmoidDeriv deltaBiasVectors[l]",
"for the neurons in the network. The first layer is",
"totalBiasVector, deltaBiasVector in zip(sumBiasVectors, deltaBiasVectors): newSumBiasVectors.append(totalBiasVector + deltaBiasVector) sumBiasVectors =",
"weightMatrix, totalWeightMatrix in zip(self.weightMatrices, sumWeightMatrices): newWeightMatrices.append(weightMatrix - (rate/len(batch)) * totalWeightMatrix)",
"for each set of training date. \"\"\" sumBiasVectors = []",
"\"\"\"Find the z-vector for layer in the network\"\"\" zVectors =",
"new weights and biases. \"\"\" newBiasVectors = [] for biasVector,",
"and subtract them from the current weights and biases. Then",
"batch in batches: self._tuneNetwork(batch, rate) if (testData): result = self._evaluate(testData)",
"get dot product between the weight matrix and the output",
"batches.append(data[i:i+batchSize]) for batch in batches: self._tuneNetwork(batch, rate) if (testData): result",
"newSumBiasVectors = [] for totalBiasVector, deltaBiasVector in zip(sumBiasVectors, deltaBiasVectors): newSumBiasVectors.append(totalBiasVector",
"for inputs, expected in batch: \"\"\" Get a matrix/vector with",
"biasVectors, weightMatrices) def getOutputs(self, inputs): \"\"\"Return a vector of the",
"1 instead weightMatrix = self.weightMatrices[l+1].transpose() sigmoidDeriv = sigmoidDerivative(zVectors[l]) deltaBiasVector =",
"(deltaBiasVectors, deltaWeightMatrices) def _evaluate(self, testData): \"\"\"Test the network with the",
"and add it to a set of matrix/vector totalling the",
"weights and biases. Then use these as the new weights",
"deltaBiasVector = (activationVectors[-1] - expected) * 2 * sigmoidDerivative(zVectors[-1]) deltaBiasVectors[-1]",
"training data. \"\"\" deltaBiasVectors, deltaWeightMatrices = self._backpropagate(inputs, expected) newSumBiasVectors =",
"changes), for the specified set of training data. \"\"\" deltaBiasVectors",
"[] for biasVector in self.biasVectors: deltaBiasVectors.append(np.zeros(biasVector.shape)) deltaWeightMatrices = [] for",
"weight, in the format (vector of bias changes, matrix of",
"1)) \"\"\"Generate weights for connections between layers.\"\"\" weightMatrices = []",
"from the current weights and biases. Then use these as",
"+ biasVector zVectors.append(zVector) activationVector = sigmoid(zVector) activationVectors.append(activationVector) \"\"\" * Start",
"self.getOutputs(inputs) guess = np.argmax(outputs) if (guess == expected): correctGuesses +=",
"as np from mathUtils import * class Network(object): \"\"\" Model",
"biases. Input and output layers are included in the layerSizes",
"each batch. \"\"\" print(\"Training network with shape {}, batch size",
"of training data, get the average of these and subtract",
"numbers, based on the paramters. np.random.randn(3,2) will generate an array",
"mathUtils import * class Network(object): \"\"\" Model for a feedforward",
"batchSize): batches.append(data[i:i+batchSize]) for batch in batches: self._tuneNetwork(batch, rate) if (testData):",
"data. \"\"\" deltaBiasVectors = [] for biasVector in self.biasVectors: deltaBiasVectors.append(np.zeros(biasVector.shape))",
"biases of the network by using backpropagation with gradient descend.",
"\"\"\" Initialise the network with a list of layer sizes",
"the entire network, starting with the input layer.\"\"\" activationVector =",
"for the specified set of training data. \"\"\" deltaBiasVectors =",
"np.dot(deltaBiasVector, activationVectors[l-1].transpose()) return (deltaBiasVectors, deltaWeightMatrices) def _evaluate(self, testData): \"\"\"Test the",
"import numpy as np from mathUtils import * class Network(object):",
"= inputs for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices): \"\"\" For",
"batches of random samples from the training are used to",
"numbers. \"\"\" biasVectors.append(np.random.randn(size, 1)) \"\"\"Generate weights for connections between layers.\"\"\"",
"<reponame>tobloef/neural-network<filename>network.py import numpy as np from mathUtils import * class",
"(activationVectors[-1] - expected) * 2 * sigmoidDerivative(zVectors[-1]) deltaBiasVectors[-1] = deltaBiasVector",
"in zip(self.biasVectors, self.weightMatrices): zVector = np.dot(weightMatrix, activationVector) + biasVector zVectors.append(zVector)",
"of correct guesses.\"\"\" correctGuesses = 0 for inputs, expected in",
"from the training are used to reduce the training time.",
"weightMatrices): \"\"\" Initialise the network with a list of layer",
"and biases are generated using a Gaussian distribution, so the",
"and the weight matrix. Then get dot product between the",
"on that set of training data, and add it to",
"= sigmoid(zVector) activationVectors.append(activationVector) \"\"\" * Start with output compared to",
"rate) if (testData): result = self._evaluate(testData) print(\"Epoch #{} completed with",
"weightMatrix in self.weightMatrices: sumWeightMatrices.append(np.zeros(weightMatrix.shape)) for inputs, expected in batch: \"\"\"",
"the weight matrix. Then get dot product between the weight",
"based on the weight matrix and bias vector filled with",
"data, and add it to a set of matrix/vector totalling",
"a set of matrix/vector totalling the changes needed from all",
"paramters. np.random.randn(3,2) will generate an array of 3 arrays with",
"of random numbers, based on the paramters. np.random.randn(3,2) will generate",
"network, starting with the input layer.\"\"\" activationVector = inputs activationVectors",
"for e in range(epochs): np.random.shuffle(data) batches = [] for i",
"and weight, in the format (vector of bias changes, matrix",
"batch size {} and learning rate {} for {} epochs...\".format(self.layerSizes,",
"layer.\"\"\" for size in layerSizes[1:]: \"\"\" np.random.randn generates arrays of",
"import * class Network(object): \"\"\" Model for a feedforward Neural",
"weights for connections between layers.\"\"\" weightMatrices = [] for size,",
"for biases and weights for the neurons in the network.",
"outputs = self.getOutputs(inputs) guess = np.argmax(outputs) if (guess == expected):",
"data and return the number of correct guesses.\"\"\" correctGuesses =",
"batchSize, rate, epochs)) for e in range(epochs): np.random.shuffle(data) batches =",
"[] \"\"\"Generate biases for each neuron in each layer, except",
"based on that set of training data, and add it",
"biasVector, totalBiasVector in zip(self.biasVectors, sumBiasVectors): newBiasVectors.append(biasVector - (rate/len(batch)) * totalBiasVector)",
"= [] for totalBiasVector, deltaBiasVector in zip(sumBiasVectors, deltaBiasVectors): newSumBiasVectors.append(totalBiasVector +",
"[] for weightMatrix in self.weightMatrices: sumWeightMatrices.append(np.zeros(weightMatrix.shape)) for inputs, expected in",
"print(\"Training network with shape {}, batch size {} and learning",
"it to a set of matrix/vector totalling the changes needed",
"deltaBiasVectors, deltaWeightMatrices = self._backpropagate(inputs, expected) newSumBiasVectors = [] for totalBiasVector,",
"-len(self.layerSizes), -1): # Equivalent to https://i.imgur.com/8PQQ28r.png, because deltaBiasVector is *",
"Smaller batches of random samples from the training are used",
"training data, get the average of these and subtract them",
"weightMatrix in self.weightMatrices: deltaWeightMatrices.append(np.zeros(weightMatrix.shape)) \"\"\"Store all activations for the entire",
"values each batch. \"\"\" print(\"Training network with shape {}, batch",
"deltaWeightMatrices): newSumWeightMatrices.append(totalWeightMatrix + deltaWeightMatrix) sumWeightMatrices = newSumWeightMatrices \"\"\" Take each",
"for batch in batches: self._tuneNetwork(batch, rate) if (testData): result =",
"layer in the network\"\"\" zVectors = [] for biasVector, weightMatrix",
"of 3 arrays with 2 random numbers. \"\"\" biasVectors.append(np.random.randn(size, 1))",
"in zip(sumBiasVectors, deltaBiasVectors): newSumBiasVectors.append(totalBiasVector + deltaBiasVector) sumBiasVectors = newSumBiasVectors newSumWeightMatrices",
"the input layer and the last layer is the output",
"outputs based on the given inputs, using feedforward.\"\"\" activations =",
"Setup matrix and vector based on the weight matrix and",
"\"\"\"Generate biases for each neuron in each layer, except the",
"for totalBiasVector, deltaBiasVector in zip(sumBiasVectors, deltaBiasVectors): newSumBiasVectors.append(totalBiasVector + deltaBiasVector) sumBiasVectors",
"reduce the training time. The training date is a list",
"between layers.\"\"\" weightMatrices = [] for size, prevSize in zip(layerSizes[:-1],",
"current layer. \"\"\" zVector = np.dot(weightMatrix, activations) + biasVector activations",
"each layer, except the input layer.\"\"\" for size in layerSizes[1:]:",
"for each vector, for each set of training date. \"\"\"",
"for weightMatrix in self.weightMatrices: sumWeightMatrices.append(np.zeros(weightMatrix.shape)) for inputs, expected in batch:",
"self.weightMatrices): zVector = np.dot(weightMatrix, activationVector) + biasVector zVectors.append(zVector) activationVector =",
"activation vector for the current layer. \"\"\" zVector = np.dot(weightMatrix,",
"matrix and the output vector and add the bias vector.",
"data, epochs, batchSize, rate, testData=None): \"\"\" Train the neural network",
"the weight/bias. * Then move onto each hidden layer and",
"of tuples (inputs, expected outputs). The learning rate is how",
"\"\"\" sumBiasVectors = [] for biasVector in self.biasVectors: sumBiasVectors.append(np.zeros(biasVector.shape)) sumWeightMatrices",
"guesses if the most active output is the expected one.\"\"\"",
"correctness.\".format(e+1, 100/len(testData)*result)) else: print(\"Epoch #{} completed.\".format(e)) def _tuneNetwork(self, batch, rate):",
"Then use these as the new weights and biases. \"\"\"",
"biasVectors self.weightMatrices = weightMatrices @staticmethod def generateRandomNetwork(layerSizes): \"\"\" Initialise a",
"in zip(self.biasVectors, sumBiasVectors): newBiasVectors.append(biasVector - (rate/len(batch)) * totalBiasVector) newWeightMatrices =",
"respect to the weight/bias. * Then move onto each hidden",
"def __init__(self, layerSizes, biasVectors, weightMatrices): \"\"\" Initialise the network with",
"backpropagation with gradient descend. \"\"\" \"\"\" Setup matrix and vector",
"is the expected one.\"\"\" outputs = self.getOutputs(inputs) guess = np.argmax(outputs)",
"network using stochastic gradient descent. Smaller batches of random samples",
"network\"\"\" zVectors = [] for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices):",
"new network with random weights and biases. Input and output",
"arrays of random numbers, based on the paramters. np.random.randn(3,2) will",
"for each set of training data, get the average of",
"sumBiasVectors.append(np.zeros(biasVector.shape)) sumWeightMatrices = [] for weightMatrix in self.weightMatrices: sumWeightMatrices.append(np.zeros(weightMatrix.shape)) for",
"derivative of the cost function with respect to the weight/bias.",
"format (vector of bias changes, matrix of weight changes), for",
"layerSizes list. The random weights and biases are generated using",
"output vector and add the bias vector. This is the",
"the z-vector for layer in the network\"\"\" zVectors = []",
"if the most active output is the expected one.\"\"\" outputs",
"for a feedforward Neural Network that use backpropagation with stochastic",
"of the cost function for each bias and weight, in",
"newSumBiasVectors newSumWeightMatrices = [] for totalWeightMatrix, deltaWeightMatrix in zip(sumWeightMatrices, deltaWeightMatrices):",
"rate, testData=None): \"\"\" Train the neural network using stochastic gradient",
"= inputs activationVectors = [inputs] \"\"\"Find the z-vector for layer",
"all activations for the entire network, starting with the input",
"for inputs, expected in testData: \"\"\"Increment correct guesses if the",
"Then move onto each hidden layer and the input layer.",
"epochs)) for e in range(epochs): np.random.shuffle(data) batches = [] for",
"training date is a list of tuples (inputs, expected outputs).",
"rate, epochs)) for e in range(epochs): np.random.shuffle(data) batches = []",
"the weight matrix and bias vector filled with zeroes. This",
"100/len(testData)*result)) else: print(\"Epoch #{} completed.\".format(e)) def _tuneNetwork(self, batch, rate): \"\"\"",
"network with shape {}, batch size {} and learning rate",
"most active output is the expected one.\"\"\" outputs = self.getOutputs(inputs)",
"are used to reduce the training time. The training date",
"sumBiasVectors = [] for biasVector in self.biasVectors: sumBiasVectors.append(np.zeros(biasVector.shape)) sumWeightMatrices =",
"neurons in the network. The first layer is the input",
"+ biasVector activations = sigmoid(zVector) return activations def train(self, data,",
"[] for totalBiasVector, deltaBiasVector in zip(sumBiasVectors, deltaBiasVectors): newSumBiasVectors.append(totalBiasVector + deltaBiasVector)",
"to the network, based on that set of training data,",
"included in the layerSizes list. The random weights and biases",
"feedforward.\"\"\" activations = inputs for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices):",
"= [inputs] \"\"\"Find the z-vector for layer in the network\"\"\"",
"expected outputs). The learning rate is how much to change",
"is the output layer. \"\"\" self.layerSizes = layerSizes self.biasVectors =",
"layer is the output layer. \"\"\" self.layerSizes = layerSizes self.biasVectors",
"are more probable to be around 0. \"\"\" biasVectors =",
"__init__(self, layerSizes, biasVectors, weightMatrices): \"\"\" Initialise the network with a",
"0. \"\"\" biasVectors = [] \"\"\"Generate biases for each neuron",
"correct guesses if the most active output is the expected",
"newSumWeightMatrices \"\"\" Take each change for each set of training",
"for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices): \"\"\" For every layer,",
"return (deltaBiasVectors, deltaWeightMatrices) def _evaluate(self, testData): \"\"\"Test the network with",
"expected): \"\"\" Return a tuple with gradient of the cost",
"def _backpropagate(self, inputs, expected): \"\"\" Return a tuple with gradient",
"biases are generated using a Gaussian distribution, so the results",
"with zeroes. This is used for storing each change to",
"learning rate {} for {} epochs...\".format(self.layerSizes, batchSize, rate, epochs)) for",
"[] for biasVector, totalBiasVector in zip(self.biasVectors, sumBiasVectors): newBiasVectors.append(biasVector - (rate/len(batch))",
"- expected) * 2 * sigmoidDerivative(zVectors[-1]) deltaBiasVectors[-1] = deltaBiasVector deltaWeightMatrices[-1]",
"biases and weights for the neurons in the network. The",
"* sigmoidDeriv deltaBiasVectors[l] = deltaBiasVector deltaWeightMatrices[l] = np.dot(deltaBiasVector, activationVectors[l-1].transpose()) return",
"newSumBiasVectors.append(totalBiasVector + deltaBiasVector) sumBiasVectors = newSumBiasVectors newSumWeightMatrices = [] for",
"\"\"\" Initialise a new network with random weights and biases.",
"def getOutputs(self, inputs): \"\"\"Return a vector of the network's outputs",
"layer sizes and lists for biases and weights for the",
"set of training data, and add it to a set",
"self.biasVectors: deltaBiasVectors.append(np.zeros(biasVector.shape)) deltaWeightMatrices = [] for weightMatrix in self.weightMatrices: deltaWeightMatrices.append(np.zeros(weightMatrix.shape))",
"{} epochs...\".format(self.layerSizes, batchSize, rate, epochs)) for e in range(epochs): np.random.shuffle(data)",
"# Equivalent to https://i.imgur.com/8PQQ28r.png, because deltaBiasVector is * 1 instead",
"random weights and biases are generated using a Gaussian distribution,",
"and biases. Input and output layers are included in the",
"hidden layer and the input layer. \"\"\" deltaBiasVector = (activationVectors[-1]",
"the new weights and biases. \"\"\" newBiasVectors = [] for",
"totalBiasVector in zip(self.biasVectors, sumBiasVectors): newBiasVectors.append(biasVector - (rate/len(batch)) * totalBiasVector) newWeightMatrices",
"layer and the input layer. \"\"\" deltaBiasVector = (activationVectors[-1] -",
"neural network using stochastic gradient descent. Smaller batches of random",
"dot product between the weight matrix and the output vector",
"tuples (inputs, expected outputs). The learning rate is how much",
"with respect to the weight/bias. * Then move onto each",
"(rate/len(batch)) * totalWeightMatrix) self.biasVectors = newBiasVectors self.weightMatrices = newWeightMatrices def",
"expected) newSumBiasVectors = [] for totalBiasVector, deltaBiasVector in zip(sumBiasVectors, deltaBiasVectors):",
"output layer. \"\"\" self.layerSizes = layerSizes self.biasVectors = biasVectors self.weightMatrices",
"list of layer sizes and lists for biases and weights",
"size, prevSize in zip(layerSizes[:-1], layerSizes[1:]): weightMatrices.append(np.random.randn(prevSize, size)) return Network(layerSizes, biasVectors,",
"set of training date. \"\"\" sumBiasVectors = [] for biasVector",
"bias vector filled with zeroes. This is used for storing",
"weights and biases of the network by using backpropagation with",
"training date. \"\"\" sumBiasVectors = [] for biasVector in self.biasVectors:",
"zip(self.biasVectors, sumBiasVectors): newBiasVectors.append(biasVector - (rate/len(batch)) * totalBiasVector) newWeightMatrices = []",
"the network by using backpropagation with gradient descend. \"\"\" \"\"\"",
"\"\"\" newBiasVectors = [] for biasVector, totalBiasVector in zip(self.biasVectors, sumBiasVectors):",
"in the format (vector of bias changes, matrix of weight",
"= [] for weightMatrix in self.weightMatrices: sumWeightMatrices.append(np.zeros(weightMatrix.shape)) for inputs, expected",
"zVector = np.dot(weightMatrix, activations) + biasVector activations = sigmoid(zVector) return",
"between the weight matrix and the output vector and add",
"to reduce the training time. The training date is a",
"totalWeightMatrix, deltaWeightMatrix in zip(sumWeightMatrices, deltaWeightMatrices): newSumWeightMatrices.append(totalWeightMatrix + deltaWeightMatrix) sumWeightMatrices =",
"will generate an array of 3 arrays with 2 random",
"neuron in each layer, except the input layer.\"\"\" for size",
"each set of training date. \"\"\" sumBiasVectors = [] for",
"the cost function for each bias and weight, in the",
"* sigmoidDerivative(zVectors[-1]) deltaBiasVectors[-1] = deltaBiasVector deltaWeightMatrices[-1] = np.dot(deltaBiasVector, activationVectors[-2].transpose()) for",
"= deltaBiasVector deltaWeightMatrices[l] = np.dot(deltaBiasVector, activationVectors[l-1].transpose()) return (deltaBiasVectors, deltaWeightMatrices) def",
"with gradient of the cost function for each bias and",
"weight/bias. * Then move onto each hidden layer and the",
"Network(layerSizes, biasVectors, weightMatrices) def getOutputs(self, inputs): \"\"\"Return a vector of",
"weights and biases are generated using a Gaussian distribution, so",
"weights and biases. \"\"\" newBiasVectors = [] for biasVector, totalBiasVector",
"batch. \"\"\" print(\"Training network with shape {}, batch size {}",
"first layer is the input layer and the last layer",
"for the current layer. \"\"\" zVector = np.dot(weightMatrix, activations) +",
"used for storing each change to make for each vector,",
"inputs): \"\"\"Return a vector of the network's outputs based on",
"the training time. The training date is a list of",
"for layer in the network\"\"\" zVectors = [] for biasVector,",
"print(\"Epoch #{} completed.\".format(e)) def _tuneNetwork(self, batch, rate): \"\"\" Tune the",
"network with random weights and biases. Input and output layers",
"deltaBiasVector) sumBiasVectors = newSumBiasVectors newSumWeightMatrices = [] for totalWeightMatrix, deltaWeightMatrix",
"guesses.\"\"\" correctGuesses = 0 for inputs, expected in testData: \"\"\"Increment",
"array of 3 arrays with 2 random numbers. \"\"\" biasVectors.append(np.random.randn(size,",
"deltaWeightMatrix in zip(sumWeightMatrices, deltaWeightMatrices): newSumWeightMatrices.append(totalWeightMatrix + deltaWeightMatrix) sumWeightMatrices = newSumWeightMatrices",
"each vector, for each set of training date. \"\"\" sumBiasVectors",
"stochastic gradient descent. Smaller batches of random samples from the",
"to be around 0. \"\"\" biasVectors = [] \"\"\"Generate biases",
"zip(self.biasVectors, self.weightMatrices): \"\"\" For every layer, get the bias vector",
"biasVector in self.biasVectors: deltaBiasVectors.append(np.zeros(biasVector.shape)) deltaWeightMatrices = [] for weightMatrix in",
"testData: \"\"\"Increment correct guesses if the most active output is",
"each neuron in each layer, except the input layer.\"\"\" for",
"the specified set of training data. \"\"\" deltaBiasVectors = []",
"weight matrix and bias vector filled with zeroes. This is",
"in batch: \"\"\" Get a matrix/vector with the required changes",
"deltaBiasVector in zip(sumBiasVectors, deltaBiasVectors): newSumBiasVectors.append(totalBiasVector + deltaBiasVector) sumBiasVectors = newSumBiasVectors",
"for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices): zVector = np.dot(weightMatrix, activationVector)",
"inputs, expected in batch: \"\"\" Get a matrix/vector with the",
"np.random.randn(3,2) will generate an array of 3 arrays with 2",
"self.weightMatrices: sumWeightMatrices.append(np.zeros(weightMatrix.shape)) for inputs, expected in batch: \"\"\" Get a",
"layers are included in the layerSizes list. The random weights",
"network by using backpropagation with gradient descend. \"\"\" \"\"\" Setup",
"zip(self.weightMatrices, sumWeightMatrices): newWeightMatrices.append(weightMatrix - (rate/len(batch)) * totalWeightMatrix) self.biasVectors = newBiasVectors",
"= [] for weightMatrix, totalWeightMatrix in zip(self.weightMatrices, sumWeightMatrices): newWeightMatrices.append(weightMatrix -",
"activationVectors = [inputs] \"\"\"Find the z-vector for layer in the",
"biases. \"\"\" newBiasVectors = [] for biasVector, totalBiasVector in zip(self.biasVectors,",
"newWeightMatrices = [] for weightMatrix, totalWeightMatrix in zip(self.weightMatrices, sumWeightMatrices): newWeightMatrices.append(weightMatrix",
"to expected, tune weights and biases based on the derivative",
"make for each vector, for each set of training date.",
"use backpropagation with stochastic gradient decent. \"\"\" def __init__(self, layerSizes,",
"batches: self._tuneNetwork(batch, rate) if (testData): result = self._evaluate(testData) print(\"Epoch #{}",
"batch, rate): \"\"\" Tune the weights and biases of the",
"and the input layer. \"\"\" deltaBiasVector = (activationVectors[-1] - expected)",
"= np.dot(deltaBiasVector, activationVectors[l-1].transpose()) return (deltaBiasVectors, deltaWeightMatrices) def _evaluate(self, testData): \"\"\"Test",
"The random weights and biases are generated using a Gaussian",
"descent. Smaller batches of random samples from the training are",
"vector based on the weight matrix and bias vector filled",
"deltaWeightMatrices[l] = np.dot(deltaBiasVector, activationVectors[l-1].transpose()) return (deltaBiasVectors, deltaWeightMatrices) def _evaluate(self, testData):",
"= np.dot(deltaBiasVector, activationVectors[-2].transpose()) for l in range(-2, -len(self.layerSizes), -1): #",
"each change to make for each vector, for each set",
"biasVectors = [] \"\"\"Generate biases for each neuron in each",
"vector. This is the activation vector for the current layer.",
"getOutputs(self, inputs): \"\"\"Return a vector of the network's outputs based",
"and biases based on the derivative of the cost function",
"is * 1 instead weightMatrix = self.weightMatrices[l+1].transpose() sigmoidDeriv = sigmoidDerivative(zVectors[l])",
"decent. \"\"\" def __init__(self, layerSizes, biasVectors, weightMatrices): \"\"\" Initialise the",
"newWeightMatrices.append(weightMatrix - (rate/len(batch)) * totalWeightMatrix) self.biasVectors = newBiasVectors self.weightMatrices =",
"every layer, get the bias vector and the weight matrix.",
"in self.weightMatrices: deltaWeightMatrices.append(np.zeros(weightMatrix.shape)) \"\"\"Store all activations for the entire network,",
"the output vector and add the bias vector. This is",
"training data, and add it to a set of matrix/vector",
"the average of these and subtract them from the current",
"expected, tune weights and biases based on the derivative of",
"random weights and biases. Input and output layers are included",
"range(epochs): np.random.shuffle(data) batches = [] for i in range(0, len(data),",
"The first layer is the input layer and the last",
"gradient descent. Smaller batches of random samples from the training",
"\"\"\" * Start with output compared to expected, tune weights",
"vector filled with zeroes. This is used for storing each",
"deltaBiasVectors[l] = deltaBiasVector deltaWeightMatrices[l] = np.dot(deltaBiasVector, activationVectors[l-1].transpose()) return (deltaBiasVectors, deltaWeightMatrices)",
"layer is the input layer and the last layer is",
"the network\"\"\" zVectors = [] for biasVector, weightMatrix in zip(self.biasVectors,",
"arrays of arrays of random numbers, based on the paramters.",
"= (activationVectors[-1] - expected) * 2 * sigmoidDerivative(zVectors[-1]) deltaBiasVectors[-1] =",
"shape {}, batch size {} and learning rate {} for",
"\"\"\"Return a vector of the network's outputs based on the",
"guess = np.argmax(outputs) if (guess == expected): correctGuesses += 1",
"more probable to be around 0. \"\"\" biasVectors = []",
"the current weights and biases. Then use these as the",
"the training are used to reduce the training time. The",
"Gaussian distribution, so the results are more probable to be",
"activationVector = inputs activationVectors = [inputs] \"\"\"Find the z-vector for",
"e in range(epochs): np.random.shuffle(data) batches = [] for i in",
"learning rate is how much to change the values each",
"= sigmoid(zVector) return activations def train(self, data, epochs, batchSize, rate,",
"of the cost function with respect to the weight/bias. *",
"changes to the network, based on that set of training",
"= [] for totalWeightMatrix, deltaWeightMatrix in zip(sumWeightMatrices, deltaWeightMatrices): newSumWeightMatrices.append(totalWeightMatrix +",
"\"\"\" Train the neural network using stochastic gradient descent. Smaller",
"sigmoid(zVector) return activations def train(self, data, epochs, batchSize, rate, testData=None):",
"biases for each neuron in each layer, except the input",
"inputs, expected in testData: \"\"\"Increment correct guesses if the most",
"specified set of training data. \"\"\" deltaBiasVectors = [] for",
"_backpropagate(self, inputs, expected): \"\"\" Return a tuple with gradient of",
"[] for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices): zVector = np.dot(weightMatrix,",
"network with the specified test data and return the number",
"rate): \"\"\" Tune the weights and biases of the network",
"sumWeightMatrices.append(np.zeros(weightMatrix.shape)) for inputs, expected in batch: \"\"\" Get a matrix/vector",
"Return a tuple with gradient of the cost function for",
"newBiasVectors = [] for biasVector, totalBiasVector in zip(self.biasVectors, sumBiasVectors): newBiasVectors.append(biasVector",
"training data. \"\"\" deltaBiasVectors = [] for biasVector in self.biasVectors:",
"size)) return Network(layerSizes, biasVectors, weightMatrices) def getOutputs(self, inputs): \"\"\"Return a",
"self.weightMatrices = weightMatrices @staticmethod def generateRandomNetwork(layerSizes): \"\"\" Initialise a new",
"zVector = np.dot(weightMatrix, activationVector) + biasVector zVectors.append(zVector) activationVector = sigmoid(zVector)",
"is used for storing each change to make for each",
"deltaWeightMatrices) def _evaluate(self, testData): \"\"\"Test the network with the specified",
"input layer. \"\"\" deltaBiasVector = (activationVectors[-1] - expected) * 2",
"activations) + biasVector activations = sigmoid(zVector) return activations def train(self,",
"\"\"\"Store all activations for the entire network, starting with the",
"bias vector. This is the activation vector for the current",
"2 random numbers. \"\"\" biasVectors.append(np.random.randn(size, 1)) \"\"\"Generate weights for connections",
"matrix. Then get dot product between the weight matrix and",
"rate is how much to change the values each batch.",
"deltaBiasVectors.append(np.zeros(biasVector.shape)) deltaWeightMatrices = [] for weightMatrix in self.weightMatrices: deltaWeightMatrices.append(np.zeros(weightMatrix.shape)) \"\"\"Store",
"number of correct guesses.\"\"\" correctGuesses = 0 for inputs, expected",
"\"\"\" np.random.randn generates arrays of arrays of random numbers, based",
"* class Network(object): \"\"\" Model for a feedforward Neural Network",
"with the required changes to the network, based on that",
"else: print(\"Epoch #{} completed.\".format(e)) def _tuneNetwork(self, batch, rate): \"\"\" Tune",
"= newSumWeightMatrices \"\"\" Take each change for each set of",
"get the average of these and subtract them from the",
"and learning rate {} for {} epochs...\".format(self.layerSizes, batchSize, rate, epochs))",
"Model for a feedforward Neural Network that use backpropagation with",
"the network's outputs based on the given inputs, using feedforward.\"\"\"",
"batchSize, rate, testData=None): \"\"\" Train the neural network using stochastic",
"= self._evaluate(testData) print(\"Epoch #{} completed with {:.2f}% correctness.\".format(e+1, 100/len(testData)*result)) else:",
"= newWeightMatrices def _backpropagate(self, inputs, expected): \"\"\" Return a tuple",
"np.dot(weightMatrix, activationVector) + biasVector zVectors.append(zVector) activationVector = sigmoid(zVector) activationVectors.append(activationVector) \"\"\"",
"self.layerSizes = layerSizes self.biasVectors = biasVectors self.weightMatrices = weightMatrices @staticmethod",
"in self.biasVectors: sumBiasVectors.append(np.zeros(biasVector.shape)) sumWeightMatrices = [] for weightMatrix in self.weightMatrices:",
"zip(sumBiasVectors, deltaBiasVectors): newSumBiasVectors.append(totalBiasVector + deltaBiasVector) sumBiasVectors = newSumBiasVectors newSumWeightMatrices =",
"= self.getOutputs(inputs) guess = np.argmax(outputs) if (guess == expected): correctGuesses",
"a new network with random weights and biases. Input and",
"output is the expected one.\"\"\" outputs = self.getOutputs(inputs) guess =",
"a vector of the network's outputs based on the given",
"\"\"\" \"\"\" Setup matrix and vector based on the weight",
"\"\"\"Increment correct guesses if the most active output is the",
"This is used for storing each change to make for",
"[] for totalWeightMatrix, deltaWeightMatrix in zip(sumWeightMatrices, deltaWeightMatrices): newSumWeightMatrices.append(totalWeightMatrix + deltaWeightMatrix)",
"_tuneNetwork(self, batch, rate): \"\"\" Tune the weights and biases of",
"an array of 3 arrays with 2 random numbers. \"\"\"",
"for i in range(0, len(data), batchSize): batches.append(data[i:i+batchSize]) for batch in",
"in the network. The first layer is the input layer",
"in batches: self._tuneNetwork(batch, rate) if (testData): result = self._evaluate(testData) print(\"Epoch",
"= [] for i in range(0, len(data), batchSize): batches.append(data[i:i+batchSize]) for",
"layer, get the bias vector and the weight matrix. Then",
"input layer and the last layer is the output layer.",
"layer. \"\"\" deltaBiasVector = (activationVectors[-1] - expected) * 2 *",
"all the training data. \"\"\" deltaBiasVectors, deltaWeightMatrices = self._backpropagate(inputs, expected)",
"is the input layer and the last layer is the",
"Tune the weights and biases of the network by using",
"[inputs] \"\"\"Find the z-vector for layer in the network\"\"\" zVectors",
"instead weightMatrix = self.weightMatrices[l+1].transpose() sigmoidDeriv = sigmoidDerivative(zVectors[l]) deltaBiasVector = np.dot(weightMatrix,",
"weight matrix and the output vector and add the bias",
"biases. Then use these as the new weights and biases.",
"the given inputs, using feedforward.\"\"\" activations = inputs for biasVector,",
"set of matrix/vector totalling the changes needed from all the",
"on the derivative of the cost function with respect to",
"changes, matrix of weight changes), for the specified set of",
"and the last layer is the output layer. \"\"\" self.layerSizes",
"self.weightMatrices): \"\"\" For every layer, get the bias vector and",
"of matrix/vector totalling the changes needed from all the training",
"that use backpropagation with stochastic gradient decent. \"\"\" def __init__(self,",
"zeroes. This is used for storing each change to make",
"for biasVector in self.biasVectors: sumBiasVectors.append(np.zeros(biasVector.shape)) sumWeightMatrices = [] for weightMatrix",
"required changes to the network, based on that set of",
"Start with output compared to expected, tune weights and biases",
"https://i.imgur.com/8PQQ28r.png, because deltaBiasVector is * 1 instead weightMatrix = self.weightMatrices[l+1].transpose()",
"the layerSizes list. The random weights and biases are generated",
"\"\"\" deltaBiasVectors, deltaWeightMatrices = self._backpropagate(inputs, expected) newSumBiasVectors = [] for",
"samples from the training are used to reduce the training",
"biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices): zVector = np.dot(weightMatrix, activationVector) +",
"= np.dot(weightMatrix, activations) + biasVector activations = sigmoid(zVector) return activations",
"in testData: \"\"\"Increment correct guesses if the most active output",
"deltaWeightMatrices = [] for weightMatrix in self.weightMatrices: deltaWeightMatrices.append(np.zeros(weightMatrix.shape)) \"\"\"Store all",
"if (testData): result = self._evaluate(testData) print(\"Epoch #{} completed with {:.2f}%",
"np from mathUtils import * class Network(object): \"\"\" Model for",
"self.weightMatrices: deltaWeightMatrices.append(np.zeros(weightMatrix.shape)) \"\"\"Store all activations for the entire network, starting",
"a matrix/vector with the required changes to the network, based",
"each change for each set of training data, get the",
"the specified test data and return the number of correct",
"that set of training data, and add it to a",
"activationVectors.append(activationVector) \"\"\" * Start with output compared to expected, tune",
"and the output vector and add the bias vector. This",
"cost function for each bias and weight, in the format",
"Network that use backpropagation with stochastic gradient decent. \"\"\" def",
"def _evaluate(self, testData): \"\"\"Test the network with the specified test",
"from all the training data. \"\"\" deltaBiasVectors, deltaWeightMatrices = self._backpropagate(inputs,",
"move onto each hidden layer and the input layer. \"\"\"",
"#{} completed with {:.2f}% correctness.\".format(e+1, 100/len(testData)*result)) else: print(\"Epoch #{} completed.\".format(e))",
"{:.2f}% correctness.\".format(e+1, 100/len(testData)*result)) else: print(\"Epoch #{} completed.\".format(e)) def _tuneNetwork(self, batch,",
"of training data. \"\"\" deltaBiasVectors = [] for biasVector in",
"onto each hidden layer and the input layer. \"\"\" deltaBiasVector",
"the changes needed from all the training data. \"\"\" deltaBiasVectors,",
"and weights for the neurons in the network. The first",
"in self.biasVectors: deltaBiasVectors.append(np.zeros(biasVector.shape)) deltaWeightMatrices = [] for weightMatrix in self.weightMatrices:",
"inputs, expected): \"\"\" Return a tuple with gradient of the",
"of bias changes, matrix of weight changes), for the specified",
"+ deltaWeightMatrix) sumWeightMatrices = newSumWeightMatrices \"\"\" Take each change for",
"with output compared to expected, tune weights and biases based",
"for each neuron in each layer, except the input layer.\"\"\"",
"using a Gaussian distribution, so the results are more probable",
"biasVector in self.biasVectors: sumBiasVectors.append(np.zeros(biasVector.shape)) sumWeightMatrices = [] for weightMatrix in",
"= [] \"\"\"Generate biases for each neuron in each layer,",
"vector, for each set of training date. \"\"\" sumBiasVectors =",
"class Network(object): \"\"\" Model for a feedforward Neural Network that",
"with the input layer.\"\"\" activationVector = inputs activationVectors = [inputs]",
"to make for each vector, for each set of training",
"= [] for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices): zVector =",
"data. \"\"\" deltaBiasVectors, deltaWeightMatrices = self._backpropagate(inputs, expected) newSumBiasVectors = []",
"- (rate/len(batch)) * totalWeightMatrix) self.biasVectors = newBiasVectors self.weightMatrices = newWeightMatrices",
"print(\"Epoch #{} completed with {:.2f}% correctness.\".format(e+1, 100/len(testData)*result)) else: print(\"Epoch #{}",
"using backpropagation with gradient descend. \"\"\" \"\"\" Setup matrix and",
"the network with a list of layer sizes and lists",
"sizes and lists for biases and weights for the neurons",
"of weight changes), for the specified set of training data.",
"from mathUtils import * class Network(object): \"\"\" Model for a",
"correctGuesses = 0 for inputs, expected in testData: \"\"\"Increment correct",
"the network. The first layer is the input layer and",
"and vector based on the weight matrix and bias vector",
"\"\"\" print(\"Training network with shape {}, batch size {} and",
"matrix and bias vector filled with zeroes. This is used",
"so the results are more probable to be around 0.",
"output layers are included in the layerSizes list. The random",
"is a list of tuples (inputs, expected outputs). The learning",
"zVectors = [] for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices): zVector",
"* Then move onto each hidden layer and the input",
"each bias and weight, in the format (vector of bias",
"Take each change for each set of training data, get",
"a Gaussian distribution, so the results are more probable to",
"based on the given inputs, using feedforward.\"\"\" activations = inputs",
"the expected one.\"\"\" outputs = self.getOutputs(inputs) guess = np.argmax(outputs) if",
"be around 0. \"\"\" biasVectors = [] \"\"\"Generate biases for",
"layerSizes, biasVectors, weightMatrices): \"\"\" Initialise the network with a list",
"= [] for biasVector in self.biasVectors: deltaBiasVectors.append(np.zeros(biasVector.shape)) deltaWeightMatrices = []",
"+ deltaBiasVector) sumBiasVectors = newSumBiasVectors newSumWeightMatrices = [] for totalWeightMatrix,",
"needed from all the training data. \"\"\" deltaBiasVectors, deltaWeightMatrices =",
"around 0. \"\"\" biasVectors = [] \"\"\"Generate biases for each",
"range(0, len(data), batchSize): batches.append(data[i:i+batchSize]) for batch in batches: self._tuneNetwork(batch, rate)",
"deltaBiasVectors = [] for biasVector in self.biasVectors: deltaBiasVectors.append(np.zeros(biasVector.shape)) deltaWeightMatrices =",
"totalWeightMatrix) self.biasVectors = newBiasVectors self.weightMatrices = newWeightMatrices def _backpropagate(self, inputs,",
"= self._backpropagate(inputs, expected) newSumBiasVectors = [] for totalBiasVector, deltaBiasVector in",
"these as the new weights and biases. \"\"\" newBiasVectors =",
"storing each change to make for each vector, for each",
"get the bias vector and the weight matrix. Then get",
"with random weights and biases. Input and output layers are",
"_evaluate(self, testData): \"\"\"Test the network with the specified test data",
"of the network by using backpropagation with gradient descend. \"\"\"",
"add it to a set of matrix/vector totalling the changes",
"len(data), batchSize): batches.append(data[i:i+batchSize]) for batch in batches: self._tuneNetwork(batch, rate) if",
"in range(-2, -len(self.layerSizes), -1): # Equivalent to https://i.imgur.com/8PQQ28r.png, because deltaBiasVector",
"deltaBiasVectors): newSumBiasVectors.append(totalBiasVector + deltaBiasVector) sumBiasVectors = newSumBiasVectors newSumWeightMatrices = []",
"= [] for size, prevSize in zip(layerSizes[:-1], layerSizes[1:]): weightMatrices.append(np.random.randn(prevSize, size))",
"subtract them from the current weights and biases. Then use",
"- (rate/len(batch)) * totalBiasVector) newWeightMatrices = [] for weightMatrix, totalWeightMatrix",
"with the specified test data and return the number of",
"zip(layerSizes[:-1], layerSizes[1:]): weightMatrices.append(np.random.randn(prevSize, size)) return Network(layerSizes, biasVectors, weightMatrices) def getOutputs(self,",
"return the number of correct guesses.\"\"\" correctGuesses = 0 for",
"sigmoidDeriv deltaBiasVectors[l] = deltaBiasVector deltaWeightMatrices[l] = np.dot(deltaBiasVector, activationVectors[l-1].transpose()) return (deltaBiasVectors,",
"sumWeightMatrices = newSumWeightMatrices \"\"\" Take each change for each set",
"= 0 for inputs, expected in testData: \"\"\"Increment correct guesses",
"for weightMatrix, totalWeightMatrix in zip(self.weightMatrices, sumWeightMatrices): newWeightMatrices.append(weightMatrix - (rate/len(batch)) *",
"weights for the neurons in the network. The first layer",
"set of training data. \"\"\" deltaBiasVectors = [] for biasVector",
"biasVector activations = sigmoid(zVector) return activations def train(self, data, epochs,",
"one.\"\"\" outputs = self.getOutputs(inputs) guess = np.argmax(outputs) if (guess ==",
"l in range(-2, -len(self.layerSizes), -1): # Equivalent to https://i.imgur.com/8PQQ28r.png, because",
"layers.\"\"\" weightMatrices = [] for size, prevSize in zip(layerSizes[:-1], layerSizes[1:]):",
"the training data. \"\"\" deltaBiasVectors, deltaWeightMatrices = self._backpropagate(inputs, expected) newSumBiasVectors",
"[] for size, prevSize in zip(layerSizes[:-1], layerSizes[1:]): weightMatrices.append(np.random.randn(prevSize, size)) return",
"[] for weightMatrix, totalWeightMatrix in zip(self.weightMatrices, sumWeightMatrices): newWeightMatrices.append(weightMatrix - (rate/len(batch))",
"weights and biases. Input and output layers are included in",
"the derivative of the cost function with respect to the",
"connections between layers.\"\"\" weightMatrices = [] for size, prevSize in",
"\"\"\" Tune the weights and biases of the network by",
"deltaBiasVector is * 1 instead weightMatrix = self.weightMatrices[l+1].transpose() sigmoidDeriv =",
"stochastic gradient decent. \"\"\" def __init__(self, layerSizes, biasVectors, weightMatrices): \"\"\"",
"the network with the specified test data and return the",
"np.argmax(outputs) if (guess == expected): correctGuesses += 1 return correctGuesses",
"layerSizes self.biasVectors = biasVectors self.weightMatrices = weightMatrices @staticmethod def generateRandomNetwork(layerSizes):",
"0 for inputs, expected in testData: \"\"\"Increment correct guesses if",
"add the bias vector. This is the activation vector for",
"np.dot(weightMatrix, activations) + biasVector activations = sigmoid(zVector) return activations def",
"epochs...\".format(self.layerSizes, batchSize, rate, epochs)) for e in range(epochs): np.random.shuffle(data) batches",
"the bias vector and the weight matrix. Then get dot",
"in zip(self.weightMatrices, sumWeightMatrices): newWeightMatrices.append(weightMatrix - (rate/len(batch)) * totalWeightMatrix) self.biasVectors =",
"* 1 instead weightMatrix = self.weightMatrices[l+1].transpose() sigmoidDeriv = sigmoidDerivative(zVectors[l]) deltaBiasVector",
"matrix of weight changes), for the specified set of training",
"= sigmoidDerivative(zVectors[l]) deltaBiasVector = np.dot(weightMatrix, deltaBiasVector) * sigmoidDeriv deltaBiasVectors[l] =",
"is the activation vector for the current layer. \"\"\" zVector",
"the weight matrix and the output vector and add the",
"sigmoidDerivative(zVectors[-1]) deltaBiasVectors[-1] = deltaBiasVector deltaWeightMatrices[-1] = np.dot(deltaBiasVector, activationVectors[-2].transpose()) for l",
"used to reduce the training time. The training date is",
"as the new weights and biases. \"\"\" newBiasVectors = []",
"correct guesses.\"\"\" correctGuesses = 0 for inputs, expected in testData:",
"layer and the last layer is the output layer. \"\"\"",
"are included in the layerSizes list. The random weights and",
"return activations def train(self, data, epochs, batchSize, rate, testData=None): \"\"\"",
"Network(object): \"\"\" Model for a feedforward Neural Network that use",
"the weights and biases of the network by using backpropagation",
"biasVector zVectors.append(zVector) activationVector = sigmoid(zVector) activationVectors.append(activationVector) \"\"\" * Start with",
"= np.dot(weightMatrix, deltaBiasVector) * sigmoidDeriv deltaBiasVectors[l] = deltaBiasVector deltaWeightMatrices[l] =",
"the output layer. \"\"\" self.layerSizes = layerSizes self.biasVectors = biasVectors",
"matrix/vector with the required changes to the network, based on",
"the neurons in the network. The first layer is the",
"Train the neural network using stochastic gradient descent. Smaller batches",
"activationVectors[-2].transpose()) for l in range(-2, -len(self.layerSizes), -1): # Equivalent to",
"@staticmethod def generateRandomNetwork(layerSizes): \"\"\" Initialise a new network with random",
"numpy as np from mathUtils import * class Network(object): \"\"\"",
"\"\"\" zVector = np.dot(weightMatrix, activations) + biasVector activations = sigmoid(zVector)",
"filled with zeroes. This is used for storing each change",
"inputs for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices): \"\"\" For every",
"bias changes, matrix of weight changes), for the specified set",
"\"\"\" biasVectors.append(np.random.randn(size, 1)) \"\"\"Generate weights for connections between layers.\"\"\" weightMatrices",
"i in range(0, len(data), batchSize): batches.append(data[i:i+batchSize]) for batch in batches:",
"newWeightMatrices def _backpropagate(self, inputs, expected): \"\"\" Return a tuple with",
"cost function with respect to the weight/bias. * Then move",
"expected in batch: \"\"\" Get a matrix/vector with the required",
"with gradient descend. \"\"\" \"\"\" Setup matrix and vector based",
"(rate/len(batch)) * totalBiasVector) newWeightMatrices = [] for weightMatrix, totalWeightMatrix in",
"feedforward Neural Network that use backpropagation with stochastic gradient decent.",
"inputs activationVectors = [inputs] \"\"\"Find the z-vector for layer in",
"and biases of the network by using backpropagation with gradient",
"self.weightMatrices = newWeightMatrices def _backpropagate(self, inputs, expected): \"\"\" Return a",
"function with respect to the weight/bias. * Then move onto",
"\"\"\"Test the network with the specified test data and return",
"date. \"\"\" sumBiasVectors = [] for biasVector in self.biasVectors: sumBiasVectors.append(np.zeros(biasVector.shape))",
"the format (vector of bias changes, matrix of weight changes),",
"sigmoidDerivative(zVectors[l]) deltaBiasVector = np.dot(weightMatrix, deltaBiasVector) * sigmoidDeriv deltaBiasVectors[l] = deltaBiasVector",
"generate an array of 3 arrays with 2 random numbers.",
"biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices): \"\"\" For every layer, get",
"input layer.\"\"\" activationVector = inputs activationVectors = [inputs] \"\"\"Find the",
"for size in layerSizes[1:]: \"\"\" np.random.randn generates arrays of arrays",
"= np.argmax(outputs) if (guess == expected): correctGuesses += 1 return",
"{} for {} epochs...\".format(self.layerSizes, batchSize, rate, epochs)) for e in",
"time. The training date is a list of tuples (inputs,",
"each hidden layer and the input layer. \"\"\" deltaBiasVector =",
"for l in range(-2, -len(self.layerSizes), -1): # Equivalent to https://i.imgur.com/8PQQ28r.png,",
"in layerSizes[1:]: \"\"\" np.random.randn generates arrays of arrays of random",
"the results are more probable to be around 0. \"\"\"",
"them from the current weights and biases. Then use these",
"for weightMatrix in self.weightMatrices: deltaWeightMatrices.append(np.zeros(weightMatrix.shape)) \"\"\"Store all activations for the",
"entire network, starting with the input layer.\"\"\" activationVector = inputs",
"a feedforward Neural Network that use backpropagation with stochastic gradient",
"weightMatrices) def getOutputs(self, inputs): \"\"\"Return a vector of the network's",
"self.weightMatrices[l+1].transpose() sigmoidDeriv = sigmoidDerivative(zVectors[l]) deltaBiasVector = np.dot(weightMatrix, deltaBiasVector) * sigmoidDeriv",
"the network, based on that set of training data, and",
"specified test data and return the number of correct guesses.\"\"\"",
"Input and output layers are included in the layerSizes list.",
"of arrays of random numbers, based on the paramters. np.random.randn(3,2)",
"change for each set of training data, get the average",
"gradient descend. \"\"\" \"\"\" Setup matrix and vector based on",
"biases based on the derivative of the cost function with",
"\"\"\" def __init__(self, layerSizes, biasVectors, weightMatrices): \"\"\" Initialise the network",
"size {} and learning rate {} for {} epochs...\".format(self.layerSizes, batchSize,",
"rate {} for {} epochs...\".format(self.layerSizes, batchSize, rate, epochs)) for e",
"activationVectors[l-1].transpose()) return (deltaBiasVectors, deltaWeightMatrices) def _evaluate(self, testData): \"\"\"Test the network",
"much to change the values each batch. \"\"\" print(\"Training network",
"deltaWeightMatrix) sumWeightMatrices = newSumWeightMatrices \"\"\" Take each change for each",
"batch: \"\"\" Get a matrix/vector with the required changes to",
"= newSumBiasVectors newSumWeightMatrices = [] for totalWeightMatrix, deltaWeightMatrix in zip(sumWeightMatrices,",
"activations for the entire network, starting with the input layer.\"\"\"",
"activations = inputs for biasVector, weightMatrix in zip(self.biasVectors, self.weightMatrices): \"\"\"",
"\"\"\" self.layerSizes = layerSizes self.biasVectors = biasVectors self.weightMatrices = weightMatrices",
"* totalWeightMatrix) self.biasVectors = newBiasVectors self.weightMatrices = newWeightMatrices def _backpropagate(self,",
"input layer.\"\"\" for size in layerSizes[1:]: \"\"\" np.random.randn generates arrays",
"the last layer is the output layer. \"\"\" self.layerSizes =",
"activationVector) + biasVector zVectors.append(zVector) activationVector = sigmoid(zVector) activationVectors.append(activationVector) \"\"\" *",
"for size, prevSize in zip(layerSizes[:-1], layerSizes[1:]): weightMatrices.append(np.random.randn(prevSize, size)) return Network(layerSizes,",
"output compared to expected, tune weights and biases based on",
"z-vector for layer in the network\"\"\" zVectors = [] for",
"deltaWeightMatrices = self._backpropagate(inputs, expected) newSumBiasVectors = [] for totalBiasVector, deltaBiasVector",
"with {:.2f}% correctness.\".format(e+1, 100/len(testData)*result)) else: print(\"Epoch #{} completed.\".format(e)) def _tuneNetwork(self,",
"activationVector = sigmoid(zVector) activationVectors.append(activationVector) \"\"\" * Start with output compared",
"vector and the weight matrix. Then get dot product between",
"This is the activation vector for the current layer. \"\"\"",
"weightMatrix in zip(self.biasVectors, self.weightMatrices): \"\"\" For every layer, get the",
"self.biasVectors = newBiasVectors self.weightMatrices = newWeightMatrices def _backpropagate(self, inputs, expected):",
"changes needed from all the training data. \"\"\" deltaBiasVectors, deltaWeightMatrices",
"to the weight/bias. * Then move onto each hidden layer",
"the bias vector. This is the activation vector for the",
"activations = sigmoid(zVector) return activations def train(self, data, epochs, batchSize,",
"newBiasVectors self.weightMatrices = newWeightMatrices def _backpropagate(self, inputs, expected): \"\"\" Return",
"deltaBiasVector deltaWeightMatrices[-1] = np.dot(deltaBiasVector, activationVectors[-2].transpose()) for l in range(-2, -len(self.layerSizes),",
"with stochastic gradient decent. \"\"\" def __init__(self, layerSizes, biasVectors, weightMatrices):",
"Neural Network that use backpropagation with stochastic gradient decent. \"\"\"",
"of the network's outputs based on the given inputs, using",
"Then get dot product between the weight matrix and the",
"activations def train(self, data, epochs, batchSize, rate, testData=None): \"\"\" Train",
"deltaBiasVector) * sigmoidDeriv deltaBiasVectors[l] = deltaBiasVector deltaWeightMatrices[l] = np.dot(deltaBiasVector, activationVectors[l-1].transpose())",
"lists for biases and weights for the neurons in the",
"data, get the average of these and subtract them from",
"self.biasVectors = biasVectors self.weightMatrices = weightMatrices @staticmethod def generateRandomNetwork(layerSizes): \"\"\"",
"totalWeightMatrix in zip(self.weightMatrices, sumWeightMatrices): newWeightMatrices.append(weightMatrix - (rate/len(batch)) * totalWeightMatrix) self.biasVectors",
"probable to be around 0. \"\"\" biasVectors = [] \"\"\"Generate",
"for totalWeightMatrix, deltaWeightMatrix in zip(sumWeightMatrices, deltaWeightMatrices): newSumWeightMatrices.append(totalWeightMatrix + deltaWeightMatrix) sumWeightMatrices",
"zip(self.biasVectors, self.weightMatrices): zVector = np.dot(weightMatrix, activationVector) + biasVector zVectors.append(zVector) activationVector",
"product between the weight matrix and the output vector and",
"np.dot(deltaBiasVector, activationVectors[-2].transpose()) for l in range(-2, -len(self.layerSizes), -1): # Equivalent",
"\"\"\" deltaBiasVector = (activationVectors[-1] - expected) * 2 * sigmoidDerivative(zVectors[-1])",
"set of training data, get the average of these and",
"tuple with gradient of the cost function for each bias",
"for each bias and weight, in the format (vector of",
"sumWeightMatrices = [] for weightMatrix in self.weightMatrices: sumWeightMatrices.append(np.zeros(weightMatrix.shape)) for inputs,",
"= np.dot(weightMatrix, activationVector) + biasVector zVectors.append(zVector) activationVector = sigmoid(zVector) activationVectors.append(activationVector)",
"of these and subtract them from the current weights and",
"Initialise a new network with random weights and biases. Input",
"change to make for each vector, for each set of",
"is how much to change the values each batch. \"\"\"",
"size in layerSizes[1:]: \"\"\" np.random.randn generates arrays of arrays of",
"on the given inputs, using feedforward.\"\"\" activations = inputs for",
"\"\"\" Take each change for each set of training data,",
"for {} epochs...\".format(self.layerSizes, batchSize, rate, epochs)) for e in range(epochs):",
"= deltaBiasVector deltaWeightMatrices[-1] = np.dot(deltaBiasVector, activationVectors[-2].transpose()) for l in range(-2,",
"def generateRandomNetwork(layerSizes): \"\"\" Initialise a new network with random weights",
"zip(sumWeightMatrices, deltaWeightMatrices): newSumWeightMatrices.append(totalWeightMatrix + deltaWeightMatrix) sumWeightMatrices = newSumWeightMatrices \"\"\" Take",
"the activation vector for the current layer. \"\"\" zVector =",
"of layer sizes and lists for biases and weights for",
"expected) * 2 * sigmoidDerivative(zVectors[-1]) deltaBiasVectors[-1] = deltaBiasVector deltaWeightMatrices[-1] =",
"the input layer.\"\"\" for size in layerSizes[1:]: \"\"\" np.random.randn generates",
"Equivalent to https://i.imgur.com/8PQQ28r.png, because deltaBiasVector is * 1 instead weightMatrix",
"biasVectors, weightMatrices): \"\"\" Initialise the network with a list of",
"Initialise the network with a list of layer sizes and",
"\"\"\" Model for a feedforward Neural Network that use backpropagation",
"a list of tuples (inputs, expected outputs). The learning rate",
"self._tuneNetwork(batch, rate) if (testData): result = self._evaluate(testData) print(\"Epoch #{} completed",
"* Start with output compared to expected, tune weights and",
"with shape {}, batch size {} and learning rate {}",
"3 arrays with 2 random numbers. \"\"\" biasVectors.append(np.random.randn(size, 1)) \"\"\"Generate",
"deltaBiasVector deltaWeightMatrices[l] = np.dot(deltaBiasVector, activationVectors[l-1].transpose()) return (deltaBiasVectors, deltaWeightMatrices) def _evaluate(self,",
"vector for the current layer. \"\"\" zVector = np.dot(weightMatrix, activations)",
"to change the values each batch. \"\"\" print(\"Training network with",
"outputs). The learning rate is how much to change the",
"completed with {:.2f}% correctness.\".format(e+1, 100/len(testData)*result)) else: print(\"Epoch #{} completed.\".format(e)) def",
"= [] for weightMatrix in self.weightMatrices: deltaWeightMatrices.append(np.zeros(weightMatrix.shape)) \"\"\"Store all activations",
"self._evaluate(testData) print(\"Epoch #{} completed with {:.2f}% correctness.\".format(e+1, 100/len(testData)*result)) else: print(\"Epoch",
"layer, except the input layer.\"\"\" for size in layerSizes[1:]: \"\"\"",
"on the weight matrix and bias vector filled with zeroes.",
"[] for weightMatrix in self.weightMatrices: deltaWeightMatrices.append(np.zeros(weightMatrix.shape)) \"\"\"Store all activations for",
"using stochastic gradient descent. Smaller batches of random samples from",
"backpropagation with stochastic gradient decent. \"\"\" def __init__(self, layerSizes, biasVectors,",
"network's outputs based on the given inputs, using feedforward.\"\"\" activations",
"self._backpropagate(inputs, expected) newSumBiasVectors = [] for totalBiasVector, deltaBiasVector in zip(sumBiasVectors,",
"* totalBiasVector) newWeightMatrices = [] for weightMatrix, totalWeightMatrix in zip(self.weightMatrices,",
"weight changes), for the specified set of training data. \"\"\"",
"and biases. Then use these as the new weights and",
"in zip(self.biasVectors, self.weightMatrices): \"\"\" For every layer, get the bias",
"starting with the input layer.\"\"\" activationVector = inputs activationVectors =",
"weightMatrices.append(np.random.randn(prevSize, size)) return Network(layerSizes, biasVectors, weightMatrices) def getOutputs(self, inputs): \"\"\"Return",
"#{} completed.\".format(e)) def _tuneNetwork(self, batch, rate): \"\"\" Tune the weights",
"gradient decent. \"\"\" def __init__(self, layerSizes, biasVectors, weightMatrices): \"\"\" Initialise"
] |
[
"from datetime import datetime from marquez_airflow import DAG from airflow.operators.postgres_operator",
"dag=dag ) t3 = PostgresOperator( task_id='insert', postgres_conn_id='food_delivery_db', sql=''' INSERT INTO",
"INTEGER REFERENCES orders(id), placed_on TIMESTAMP NOT NULL, discount_id INTEGER REFERENCES",
") t3 = PostgresOperator( task_id='insert', postgres_conn_id='food_delivery_db', sql=''' INSERT INTO orders_7_days",
"AS category_id FROM orders AS o INNER JOIN menu_items AS",
"= mi.category_id INNER JOIN menus AS m ON m.id =",
"description='Loads newly placed orders weekly.' ) t1 = PostgresOperator( task_id='if_not_exists',",
"newly placed orders weekly.' ) t1 = PostgresOperator( task_id='if_not_exists', postgres_conn_id='food_delivery_db',",
"marquez_airflow import DAG from airflow.operators.postgres_operator import PostgresOperator from airflow.utils.dates import",
"DAG from airflow.operators.postgres_operator import PostgresOperator from airflow.utils.dates import days_ago default_args",
"AS order_id, o.placed_on, o.discount_id, m.id AS menu_id, m.restaurant_id, mi.id AS",
"import PostgresOperator from airflow.utils.dates import days_ago default_args = { 'owner':",
"category_id) SELECT o.id AS order_id, o.placed_on, o.discount_id, m.id AS menu_id,",
"airflow.utils.dates import days_ago default_args = { 'owner': 'datascience', 'depends_on_past': False,",
"placed_on TIMESTAMP NOT NULL, discount_id INTEGER REFERENCES discounts(id), menu_id INTEGER",
"placed orders weekly.' ) t1 = PostgresOperator( task_id='if_not_exists', postgres_conn_id='food_delivery_db', sql='''",
"PostgresOperator( task_id='if_not_exists', postgres_conn_id='food_delivery_db', sql=''' CREATE TABLE IF NOT EXISTS orders_7_days",
"INNER JOIN menu_items AS mi ON mi.id = o.menu_item_id INNER",
"AS menu_id, m.restaurant_id, mi.id AS menu_item_id, c.id AS category_id FROM",
"AS m ON m.id = c.menu_id WHERE o.placed_on >= NOW()",
"'email': ['<EMAIL>'] } dag = DAG( 'etl_orders_7_days', schedule_interval='@hourly', catchup=False, default_args=default_args,",
"o.menu_item_id INNER JOIN categories AS c ON c.id = mi.category_id",
"import datetime from marquez_airflow import DAG from airflow.operators.postgres_operator import PostgresOperator",
"orders AS o INNER JOIN menu_items AS mi ON mi.id",
"'7 days' ''', dag=dag ) t1 >> t2 >> t3",
"INTO orders_7_days (order_id, placed_on, discount_id, menu_id, restaurant_id, menu_item_id, category_id) SELECT",
"orders weekly.' ) t1 = PostgresOperator( task_id='if_not_exists', postgres_conn_id='food_delivery_db', sql=''' CREATE",
"REFERENCES categories(id) );''', dag=dag ) t2 = PostgresOperator( task_id='tuncate', postgres_conn_id='food_delivery_db',",
"JOIN menus AS m ON m.id = c.menu_id WHERE o.placed_on",
"mi.id AS menu_item_id, c.id AS category_id FROM orders AS o",
"False, 'start_date': days_ago(1), 'email_on_failure': False, 'email_on_retry': False, 'email': ['<EMAIL>'] }",
"= PostgresOperator( task_id='insert', postgres_conn_id='food_delivery_db', sql=''' INSERT INTO orders_7_days (order_id, placed_on,",
"menus(id), restaurant_id INTEGER REFERENCES restaurants(id), menu_item_id INTEGER REFERENCES menu_items(id), category_id",
"sql='TRUNCATE TABLE orders_7_days;', dag=dag ) t3 = PostgresOperator( task_id='insert', postgres_conn_id='food_delivery_db',",
"False, 'email_on_retry': False, 'email': ['<EMAIL>'] } dag = DAG( 'etl_orders_7_days',",
"airflow.operators.postgres_operator import PostgresOperator from airflow.utils.dates import days_ago default_args = {",
"NOT EXISTS orders_7_days ( order_id INTEGER REFERENCES orders(id), placed_on TIMESTAMP",
"restaurant_id, menu_item_id, category_id) SELECT o.id AS order_id, o.placed_on, o.discount_id, m.id",
"discount_id INTEGER REFERENCES discounts(id), menu_id INTEGER REFERENCES menus(id), restaurant_id INTEGER",
"'owner': 'datascience', 'depends_on_past': False, 'start_date': days_ago(1), 'email_on_failure': False, 'email_on_retry': False,",
"= { 'owner': 'datascience', 'depends_on_past': False, 'start_date': days_ago(1), 'email_on_failure': False,",
"menus AS m ON m.id = c.menu_id WHERE o.placed_on >=",
"t3 = PostgresOperator( task_id='insert', postgres_conn_id='food_delivery_db', sql=''' INSERT INTO orders_7_days (order_id,",
"o.id AS order_id, o.placed_on, o.discount_id, m.id AS menu_id, m.restaurant_id, mi.id",
"categories(id) );''', dag=dag ) t2 = PostgresOperator( task_id='tuncate', postgres_conn_id='food_delivery_db', sql='TRUNCATE",
"= c.menu_id WHERE o.placed_on >= NOW() - interval '7 days'",
"postgres_conn_id='food_delivery_db', sql='TRUNCATE TABLE orders_7_days;', dag=dag ) t3 = PostgresOperator( task_id='insert',",
"SELECT o.id AS order_id, o.placed_on, o.discount_id, m.id AS menu_id, m.restaurant_id,",
"task_id='if_not_exists', postgres_conn_id='food_delivery_db', sql=''' CREATE TABLE IF NOT EXISTS orders_7_days (",
"m.id AS menu_id, m.restaurant_id, mi.id AS menu_item_id, c.id AS category_id",
"AS o INNER JOIN menu_items AS mi ON mi.id =",
"mi.category_id INNER JOIN menus AS m ON m.id = c.menu_id",
"placed_on, discount_id, menu_id, restaurant_id, menu_item_id, category_id) SELECT o.id AS order_id,",
"discount_id, menu_id, restaurant_id, menu_item_id, category_id) SELECT o.id AS order_id, o.placed_on,",
"INTEGER REFERENCES menus(id), restaurant_id INTEGER REFERENCES restaurants(id), menu_item_id INTEGER REFERENCES",
"c ON c.id = mi.category_id INNER JOIN menus AS m",
"orders_7_days;', dag=dag ) t3 = PostgresOperator( task_id='insert', postgres_conn_id='food_delivery_db', sql=''' INSERT",
"'start_date': days_ago(1), 'email_on_failure': False, 'email_on_retry': False, 'email': ['<EMAIL>'] } dag",
"menu_id, m.restaurant_id, mi.id AS menu_item_id, c.id AS category_id FROM orders",
"NULL, discount_id INTEGER REFERENCES discounts(id), menu_id INTEGER REFERENCES menus(id), restaurant_id",
"FROM orders AS o INNER JOIN menu_items AS mi ON",
"restaurant_id INTEGER REFERENCES restaurants(id), menu_item_id INTEGER REFERENCES menu_items(id), category_id INTEGER",
"JOIN categories AS c ON c.id = mi.category_id INNER JOIN",
"(order_id, placed_on, discount_id, menu_id, restaurant_id, menu_item_id, category_id) SELECT o.id AS",
"restaurants(id), menu_item_id INTEGER REFERENCES menu_items(id), category_id INTEGER REFERENCES categories(id) );''',",
"menu_item_id, category_id) SELECT o.id AS order_id, o.placed_on, o.discount_id, m.id AS",
"REFERENCES restaurants(id), menu_item_id INTEGER REFERENCES menu_items(id), category_id INTEGER REFERENCES categories(id)",
"TABLE IF NOT EXISTS orders_7_days ( order_id INTEGER REFERENCES orders(id),",
"from marquez_airflow import DAG from airflow.operators.postgres_operator import PostgresOperator from airflow.utils.dates",
"order_id INTEGER REFERENCES orders(id), placed_on TIMESTAMP NOT NULL, discount_id INTEGER",
"DAG( 'etl_orders_7_days', schedule_interval='@hourly', catchup=False, default_args=default_args, description='Loads newly placed orders weekly.'",
"TIMESTAMP NOT NULL, discount_id INTEGER REFERENCES discounts(id), menu_id INTEGER REFERENCES",
"PostgresOperator( task_id='insert', postgres_conn_id='food_delivery_db', sql=''' INSERT INTO orders_7_days (order_id, placed_on, discount_id,",
"menu_item_id, c.id AS category_id FROM orders AS o INNER JOIN",
"REFERENCES discounts(id), menu_id INTEGER REFERENCES menus(id), restaurant_id INTEGER REFERENCES restaurants(id),",
"ON m.id = c.menu_id WHERE o.placed_on >= NOW() - interval",
"- interval '7 days' ''', dag=dag ) t1 >> t2",
"c.menu_id WHERE o.placed_on >= NOW() - interval '7 days' ''',",
"menu_id, restaurant_id, menu_item_id, category_id) SELECT o.id AS order_id, o.placed_on, o.discount_id,",
"INNER JOIN categories AS c ON c.id = mi.category_id INNER",
"menu_item_id INTEGER REFERENCES menu_items(id), category_id INTEGER REFERENCES categories(id) );''', dag=dag",
"PostgresOperator( task_id='tuncate', postgres_conn_id='food_delivery_db', sql='TRUNCATE TABLE orders_7_days;', dag=dag ) t3 =",
"mi ON mi.id = o.menu_item_id INNER JOIN categories AS c",
"mi.id = o.menu_item_id INNER JOIN categories AS c ON c.id",
"AS c ON c.id = mi.category_id INNER JOIN menus AS",
"<reponame>phixMe/marquez from datetime import datetime from marquez_airflow import DAG from",
"} dag = DAG( 'etl_orders_7_days', schedule_interval='@hourly', catchup=False, default_args=default_args, description='Loads newly",
"REFERENCES menu_items(id), category_id INTEGER REFERENCES categories(id) );''', dag=dag ) t2",
"orders_7_days (order_id, placed_on, discount_id, menu_id, restaurant_id, menu_item_id, category_id) SELECT o.id",
"= o.menu_item_id INNER JOIN categories AS c ON c.id =",
"False, 'email': ['<EMAIL>'] } dag = DAG( 'etl_orders_7_days', schedule_interval='@hourly', catchup=False,",
"INTEGER REFERENCES categories(id) );''', dag=dag ) t2 = PostgresOperator( task_id='tuncate',",
"from airflow.operators.postgres_operator import PostgresOperator from airflow.utils.dates import days_ago default_args =",
"t2 = PostgresOperator( task_id='tuncate', postgres_conn_id='food_delivery_db', sql='TRUNCATE TABLE orders_7_days;', dag=dag )",
"category_id INTEGER REFERENCES categories(id) );''', dag=dag ) t2 = PostgresOperator(",
"NOW() - interval '7 days' ''', dag=dag ) t1 >>",
"'datascience', 'depends_on_past': False, 'start_date': days_ago(1), 'email_on_failure': False, 'email_on_retry': False, 'email':",
"AS mi ON mi.id = o.menu_item_id INNER JOIN categories AS",
"order_id, o.placed_on, o.discount_id, m.id AS menu_id, m.restaurant_id, mi.id AS menu_item_id,",
"menu_id INTEGER REFERENCES menus(id), restaurant_id INTEGER REFERENCES restaurants(id), menu_item_id INTEGER",
"o.placed_on >= NOW() - interval '7 days' ''', dag=dag )",
"m.restaurant_id, mi.id AS menu_item_id, c.id AS category_id FROM orders AS",
"PostgresOperator from airflow.utils.dates import days_ago default_args = { 'owner': 'datascience',",
"sql=''' INSERT INTO orders_7_days (order_id, placed_on, discount_id, menu_id, restaurant_id, menu_item_id,",
"'depends_on_past': False, 'start_date': days_ago(1), 'email_on_failure': False, 'email_on_retry': False, 'email': ['<EMAIL>']",
"interval '7 days' ''', dag=dag ) t1 >> t2 >>",
"c.id AS category_id FROM orders AS o INNER JOIN menu_items",
"WHERE o.placed_on >= NOW() - interval '7 days' ''', dag=dag",
"dag = DAG( 'etl_orders_7_days', schedule_interval='@hourly', catchup=False, default_args=default_args, description='Loads newly placed",
"import DAG from airflow.operators.postgres_operator import PostgresOperator from airflow.utils.dates import days_ago",
") t1 = PostgresOperator( task_id='if_not_exists', postgres_conn_id='food_delivery_db', sql=''' CREATE TABLE IF",
"categories AS c ON c.id = mi.category_id INNER JOIN menus",
"INNER JOIN menus AS m ON m.id = c.menu_id WHERE",
"import days_ago default_args = { 'owner': 'datascience', 'depends_on_past': False, 'start_date':",
"IF NOT EXISTS orders_7_days ( order_id INTEGER REFERENCES orders(id), placed_on",
">= NOW() - interval '7 days' ''', dag=dag ) t1",
"c.id = mi.category_id INNER JOIN menus AS m ON m.id",
"menu_items(id), category_id INTEGER REFERENCES categories(id) );''', dag=dag ) t2 =",
"INTEGER REFERENCES restaurants(id), menu_item_id INTEGER REFERENCES menu_items(id), category_id INTEGER REFERENCES",
"NOT NULL, discount_id INTEGER REFERENCES discounts(id), menu_id INTEGER REFERENCES menus(id),",
"m ON m.id = c.menu_id WHERE o.placed_on >= NOW() -",
"datetime import datetime from marquez_airflow import DAG from airflow.operators.postgres_operator import",
"ON mi.id = o.menu_item_id INNER JOIN categories AS c ON",
"EXISTS orders_7_days ( order_id INTEGER REFERENCES orders(id), placed_on TIMESTAMP NOT",
"REFERENCES menus(id), restaurant_id INTEGER REFERENCES restaurants(id), menu_item_id INTEGER REFERENCES menu_items(id),",
"orders_7_days ( order_id INTEGER REFERENCES orders(id), placed_on TIMESTAMP NOT NULL,",
"dag=dag ) t2 = PostgresOperator( task_id='tuncate', postgres_conn_id='food_delivery_db', sql='TRUNCATE TABLE orders_7_days;',",
"discounts(id), menu_id INTEGER REFERENCES menus(id), restaurant_id INTEGER REFERENCES restaurants(id), menu_item_id",
"TABLE orders_7_days;', dag=dag ) t3 = PostgresOperator( task_id='insert', postgres_conn_id='food_delivery_db', sql='''",
"'email_on_retry': False, 'email': ['<EMAIL>'] } dag = DAG( 'etl_orders_7_days', schedule_interval='@hourly',",
"['<EMAIL>'] } dag = DAG( 'etl_orders_7_days', schedule_interval='@hourly', catchup=False, default_args=default_args, description='Loads",
"ON c.id = mi.category_id INNER JOIN menus AS m ON",
") t2 = PostgresOperator( task_id='tuncate', postgres_conn_id='food_delivery_db', sql='TRUNCATE TABLE orders_7_days;', dag=dag",
"o INNER JOIN menu_items AS mi ON mi.id = o.menu_item_id",
"task_id='insert', postgres_conn_id='food_delivery_db', sql=''' INSERT INTO orders_7_days (order_id, placed_on, discount_id, menu_id,",
"= PostgresOperator( task_id='tuncate', postgres_conn_id='food_delivery_db', sql='TRUNCATE TABLE orders_7_days;', dag=dag ) t3",
"postgres_conn_id='food_delivery_db', sql=''' CREATE TABLE IF NOT EXISTS orders_7_days ( order_id",
"= DAG( 'etl_orders_7_days', schedule_interval='@hourly', catchup=False, default_args=default_args, description='Loads newly placed orders",
"t1 = PostgresOperator( task_id='if_not_exists', postgres_conn_id='food_delivery_db', sql=''' CREATE TABLE IF NOT",
"orders(id), placed_on TIMESTAMP NOT NULL, discount_id INTEGER REFERENCES discounts(id), menu_id",
"task_id='tuncate', postgres_conn_id='food_delivery_db', sql='TRUNCATE TABLE orders_7_days;', dag=dag ) t3 = PostgresOperator(",
"sql=''' CREATE TABLE IF NOT EXISTS orders_7_days ( order_id INTEGER",
"schedule_interval='@hourly', catchup=False, default_args=default_args, description='Loads newly placed orders weekly.' ) t1",
"( order_id INTEGER REFERENCES orders(id), placed_on TIMESTAMP NOT NULL, discount_id",
"INTEGER REFERENCES discounts(id), menu_id INTEGER REFERENCES menus(id), restaurant_id INTEGER REFERENCES",
"postgres_conn_id='food_delivery_db', sql=''' INSERT INTO orders_7_days (order_id, placed_on, discount_id, menu_id, restaurant_id,",
"from airflow.utils.dates import days_ago default_args = { 'owner': 'datascience', 'depends_on_past':",
"'etl_orders_7_days', schedule_interval='@hourly', catchup=False, default_args=default_args, description='Loads newly placed orders weekly.' )",
"'email_on_failure': False, 'email_on_retry': False, 'email': ['<EMAIL>'] } dag = DAG(",
"REFERENCES orders(id), placed_on TIMESTAMP NOT NULL, discount_id INTEGER REFERENCES discounts(id),",
"default_args=default_args, description='Loads newly placed orders weekly.' ) t1 = PostgresOperator(",
"{ 'owner': 'datascience', 'depends_on_past': False, 'start_date': days_ago(1), 'email_on_failure': False, 'email_on_retry':",
"days_ago(1), 'email_on_failure': False, 'email_on_retry': False, 'email': ['<EMAIL>'] } dag =",
"catchup=False, default_args=default_args, description='Loads newly placed orders weekly.' ) t1 =",
"m.id = c.menu_id WHERE o.placed_on >= NOW() - interval '7",
"default_args = { 'owner': 'datascience', 'depends_on_past': False, 'start_date': days_ago(1), 'email_on_failure':",
"AS menu_item_id, c.id AS category_id FROM orders AS o INNER",
"= PostgresOperator( task_id='if_not_exists', postgres_conn_id='food_delivery_db', sql=''' CREATE TABLE IF NOT EXISTS",
"datetime from marquez_airflow import DAG from airflow.operators.postgres_operator import PostgresOperator from",
"days_ago default_args = { 'owner': 'datascience', 'depends_on_past': False, 'start_date': days_ago(1),",
"menu_items AS mi ON mi.id = o.menu_item_id INNER JOIN categories",
"JOIN menu_items AS mi ON mi.id = o.menu_item_id INNER JOIN",
"INTEGER REFERENCES menu_items(id), category_id INTEGER REFERENCES categories(id) );''', dag=dag )",
"o.placed_on, o.discount_id, m.id AS menu_id, m.restaurant_id, mi.id AS menu_item_id, c.id",
"INSERT INTO orders_7_days (order_id, placed_on, discount_id, menu_id, restaurant_id, menu_item_id, category_id)",
"CREATE TABLE IF NOT EXISTS orders_7_days ( order_id INTEGER REFERENCES",
"category_id FROM orders AS o INNER JOIN menu_items AS mi",
");''', dag=dag ) t2 = PostgresOperator( task_id='tuncate', postgres_conn_id='food_delivery_db', sql='TRUNCATE TABLE",
"o.discount_id, m.id AS menu_id, m.restaurant_id, mi.id AS menu_item_id, c.id AS",
"weekly.' ) t1 = PostgresOperator( task_id='if_not_exists', postgres_conn_id='food_delivery_db', sql=''' CREATE TABLE"
] |
[
"pizza = { 'crust': 'thick', 'toppings': ['mushrooms', 'extra vegan cheese']",
"print(\"You ordered a \" + pizza['crust'] + \"-crust pizza\" +",
"# store information about a pizza being ordered pizza =",
"pizza['crust'] + \"-crust pizza\" + \"with the following toppings:\") for",
"'crust': 'thick', 'toppings': ['mushrooms', 'extra vegan cheese'] } # summarize",
"{ 'crust': 'thick', 'toppings': ['mushrooms', 'extra vegan cheese'] } #",
"<filename>sample/pizza.py # store information about a pizza being ordered pizza",
"'extra vegan cheese'] } # summarize the order print(\"You ordered",
"\"with the following toppings:\") for topping in pizza['toppings']: print(\"\\t\" +",
"# summarize the order print(\"You ordered a \" + pizza['crust']",
"} # summarize the order print(\"You ordered a \" +",
"ordered a \" + pizza['crust'] + \"-crust pizza\" + \"with",
"+ \"with the following toppings:\") for topping in pizza['toppings']: print(\"\\t\"",
"information about a pizza being ordered pizza = { 'crust':",
"order print(\"You ordered a \" + pizza['crust'] + \"-crust pizza\"",
"pizza\" + \"with the following toppings:\") for topping in pizza['toppings']:",
"vegan cheese'] } # summarize the order print(\"You ordered a",
"+ pizza['crust'] + \"-crust pizza\" + \"with the following toppings:\")",
"\"-crust pizza\" + \"with the following toppings:\") for topping in",
"being ordered pizza = { 'crust': 'thick', 'toppings': ['mushrooms', 'extra",
"+ \"-crust pizza\" + \"with the following toppings:\") for topping",
"['mushrooms', 'extra vegan cheese'] } # summarize the order print(\"You",
"pizza being ordered pizza = { 'crust': 'thick', 'toppings': ['mushrooms',",
"\" + pizza['crust'] + \"-crust pizza\" + \"with the following",
"'toppings': ['mushrooms', 'extra vegan cheese'] } # summarize the order",
"a \" + pizza['crust'] + \"-crust pizza\" + \"with the",
"'thick', 'toppings': ['mushrooms', 'extra vegan cheese'] } # summarize the",
"a pizza being ordered pizza = { 'crust': 'thick', 'toppings':",
"the following toppings:\") for topping in pizza['toppings']: print(\"\\t\" + topping)",
"store information about a pizza being ordered pizza = {",
"about a pizza being ordered pizza = { 'crust': 'thick',",
"ordered pizza = { 'crust': 'thick', 'toppings': ['mushrooms', 'extra vegan",
"= { 'crust': 'thick', 'toppings': ['mushrooms', 'extra vegan cheese'] }",
"cheese'] } # summarize the order print(\"You ordered a \"",
"summarize the order print(\"You ordered a \" + pizza['crust'] +",
"the order print(\"You ordered a \" + pizza['crust'] + \"-crust"
] |
[
"a = float(input('Qual é o preço do produto? R$')) d",
"produto que custava R${:.2f}, na promoção de 23% de desconto",
"a - (a * 23 / 100) print('O produto que",
"= float(input('Qual é o preço do produto? R$')) d =",
"print('O produto que custava R${:.2f}, na promoção de 23% de",
"R$')) d = a - (a * 23 / 100)",
"- (a * 23 / 100) print('O produto que custava",
"do produto? R$')) d = a - (a * 23",
"o preço do produto? R$')) d = a - (a",
"custava R${:.2f}, na promoção de 23% de desconto vai custar:",
"<filename>YouTube/CursoEmVideo/python/ex012.py<gh_stars>0 a = float(input('Qual é o preço do produto? R$'))",
"100) print('O produto que custava R${:.2f}, na promoção de 23%",
"produto? R$')) d = a - (a * 23 /",
"R${:.2f}, na promoção de 23% de desconto vai custar: R${:.2f}'",
"/ 100) print('O produto que custava R${:.2f}, na promoção de",
"(a * 23 / 100) print('O produto que custava R${:.2f},",
"23 / 100) print('O produto que custava R${:.2f}, na promoção",
"d = a - (a * 23 / 100) print('O",
"float(input('Qual é o preço do produto? R$')) d = a",
"que custava R${:.2f}, na promoção de 23% de desconto vai",
"na promoção de 23% de desconto vai custar: R${:.2f}' .format(a,",
"* 23 / 100) print('O produto que custava R${:.2f}, na",
"preço do produto? R$')) d = a - (a *",
"promoção de 23% de desconto vai custar: R${:.2f}' .format(a, d))",
"é o preço do produto? R$')) d = a -",
"= a - (a * 23 / 100) print('O produto"
] |
[
"_populate_run_dir(submit_config: SubmitConfig, run_dir: str) -> None: # Copy all necessary",
"_user_name_override _user_name_override = name def get_user_name(): # Get the current",
"if os.path.isdir(os.path.join(run_dir_root, d))] r = re.compile(\"^\\\\d+\") # match one or",
"= False) dnnlib_module_dir_path = util.get_module_dir_by_obj_name(\"dnnlib\") files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores =",
"or run_dir, the base directory is the current # working",
"that resides under the current submit run_dir # Args: #",
"current submit run_dir # Args: # *paths: Path components to",
"= 1<<30 # 1 GB self.data_reader_process_count = 0 # single",
"submit_config[\"run_func_kwargs\"][\"resume_pkl\"] = old_submit_config[\"run_func_kwargs\"][\"resume_pkl\"] submit_config[\"run_func_kwargs\"][\"resume_kimg\"] = old_submit_config[\"run_func_kwargs\"][\"resume_kimg\"] _populate_run_dir(submit_config, host_run_dir) return farm.submit(submit_config,",
"_user_name_override = None class SubmitConfig(util.EasyDict): # Strongly typed config dict",
"= False self.local = internal.local.TargetOptions() self.datasets = [] # (automatically",
"on {1}...\".format(submit_config.run_func_name, submit_config.host_name)) start_time = time.time() run_func_obj = util.get_obj_by_name(submit_config.run_func_name) assert",
"Automatically populated values during submit. Used by various dnnlib libraries",
"[(f[0], os.path.join(run_dir, \"src\", f[1])) for f in files] files +=",
"get_path_from_template # run_desc: Description of the run. Will be used",
"necessary files into the run dir. Assumes that the dir",
"dnnlib.RunContext.get().close() dnnlib.submit_config = None logger.close() # If we hit an",
"of the string run_id = 0 for dir_name in dir_names:",
"return either Windows or Linux formatted path # automatically select",
"start of the string run_id = 0 for dir_name in",
"we extend the support for automatic training resumption, # and",
"= get_path_from_template(submit_config.run_dir_root, PathType.AUTO) if not os.path.exists(run_dir_root): os.makedirs(run_dir_root) run_dir = os.path.join(run_dir_root,",
"= util.Logger(file_name = os.path.join(submit_config.run_dir, \"log.txt\"), file_mode=\"a\", should_flush = True) else:",
"run (set these) self.run_dir_root = \"\" # should always be",
"a normal path back to its template representation path =",
"\".\" in submit_config.run_func_name for _idx in range(submit_config.run_func_name.count(\".\") - 1): run_func_module_dir_path",
"(abs_path, rel_path) tuples of file paths. rel_path root will #",
"RuntimeError(\"The run dir already exists! ({0})\".format(run_dir)) if not os.path.exists(run_dir): os.makedirs(run_dir)",
"# submit_target: Submit target enum value. Used to select where",
"True to load the prior submit_config file from the directory",
"copy source files from the working directory to the #",
"if \"resume_pkl\" in old_submit_config[\"run_func_kwargs\"]: submit_config[\"run_func_kwargs\"][\"resume_pkl\"] = old_submit_config[\"run_func_kwargs\"][\"resume_pkl\"] submit_config[\"run_func_kwargs\"][\"resume_kimg\"] = old_submit_config[\"run_func_kwargs\"][\"resume_kimg\"]",
"= False try: print(\"dnnlib: Running {0}() on {1}...\".format(submit_config.run_func_name, submit_config.host_name)) start_time",
"= os.path.join(submit_config.run_dir, \"log.txt\") log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), \"{0}-error.txt\".format(submit_config.run_name)) shutil.copyfile(log_src, log_dst) #",
"-> str: # Convert a normal path back to its",
"then # override the automatic value # task_name: Automatically populated",
"a run dir, gather files related to the run, copy",
"the working directory to the # run dir. # run_id:",
"normal path to template and the convert it back to",
"the given path template and return either Windows or Linux",
"-> str: # Convert a normal path to template and",
"import pwd return pwd.getpwuid(os.geteuid()).pw_name except: return \"unknown\" else: raise RuntimeError(\"Unknown",
"Attributes: # run_dir_root: Path to the run dir root. Can",
"into the run dir. Assumes that the dir exists, is",
"value during submit # run_name: Automatically populated value during submit",
"by populating the run dir #-------------------------------------------------------------------- host_run_dir = _create_run_dir_local(submit_config, resume,",
"SubmitTarget(Enum): # The target where the function should be run",
"platform\") def make_run_dir_path(*paths): # Make a path/filename that resides under",
"# return correctly formatted path if path_type == PathType.WINDOWS: return",
"populated value during submit # run_func_kwargs: Automatically populated value during",
"run_func_name: str, create_newdir: bool = False, resume: bool = False,",
"existing run directory # load_config: in case resume = True,",
"dir and task name # run_dir_ignore: List of file patterns",
"open_file_or_url(file_or_url): if util.is_url(file_or_url): return util.open_url(file_or_url, cache_dir = \".stylegan2-cache\") return open(file_or_url,",
"def open_file_or_url(file_or_url): if util.is_url(file_or_url): return util.open_url(file_or_url, cache_dir = \".stylegan2-cache\") return",
"run # LOCAL: Run it locally LOCAL = 1 class",
"run_id = max(run_id, i + 1) return run_id def _populate_run_dir(submit_config:",
"should always be passed through get_path_from_template self.run_desc = \"\" self.run_dir_ignore",
"\"run.py\"), os.path.join(run_dir, \"run.py\"))] util.copy_files_and_create_dirs(files) def run_wrapper(submit_config: SubmitConfig) -> None: #",
"be accepted by the following regex: \" + docker_valid_name_regex +",
"Probable reason: unacceptable characters in your submit_config.run_desc. Task name must",
"= \"\" # should always be passed through get_path_from_template self.run_desc",
"original StyleGAN implementation, we extend the support for automatic training",
"4, width = 200, compact = False) if (submit_config.submit_target ==",
"to happen after we close the logs and create a",
"values during submit. Used by various dnnlib libraries # such",
"os.path.join # Returns: # A file/dirname rooted at submit_config.run_dir. If",
"= True) import dnnlib dnnlib.submit_config = submit_config exit_with_errcode = False",
"running OS if path_type == PathType.AUTO: if platform.system() == \"Windows\":",
"normal path back to its template representation path = path.replace(\"\\\\\",",
"command-line arguments. if load_config: config_file = os.path.join(host_run_dir, \"submit_config.pkl\") if os.path.exists(config_file):",
"worker processes to spawn (zero for single # thread operation)",
"# Make a path/filename that resides under the current submit",
"files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = False) dnnlib_module_dir_path",
"submit_config.run_desc. Task name must be accepted by the following regex:",
"\"log.txt\"), file_mode=\"a\", should_flush = True) else: # when running in",
"non-zero value\") if submit_config.user_name is None: submit_config.user_name = get_user_name() submit_config.run_func_name",
"dir # submit_target: Submit target enum value. Used to select",
"# such as the DataReader class def __init__(self): super().__init__() #",
"+ 1) return run_id def _populate_run_dir(submit_config: SubmitConfig, run_dir: str) ->",
"path type path_template = get_template_from_path(path) path = get_path_from_template(path_template, path_type) return",
"import sys import time import traceback from enum import Enum",
"submit. Can be set by the user which will then",
"directory inside the run dir # submit_target: Submit target enum",
"in your submit_config.run_desc. Task name must be accepted by the",
"[\"__pycache__\", \"*.pyproj\", \"*.sln\", \"*.suo\", \".cache\", \".idea\", \".vs\", \".vscode\", \"_cudacache\"] self.run_dir_extra_files",
"f, indent = 4, width = 200, compact = False)",
"pprint import re import shutil import sys import time import",
"class PlatformExtras: # A mixed bag of values used by",
"raise RuntimeError(\"submit_config.num_gpus must be set to a non-zero value\") if",
"import inspect import os import pathlib import pickle import platform",
"\"Windows\": path_type = PathType.WINDOWS elif platform.system() == \"Linux\": path_type =",
"in case resume = True, load prior experiment config instead",
"= PathType.WINDOWS elif platform.system() == \"Linux\": path_type = PathType.LINUX else:",
"to template and the convert it back to a normal",
"run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name) assert \".\" in submit_config.run_func_name for _idx in",
"== SubmitTarget.LOCAL: farm = internal.local.Target() assert farm is not None",
"in a computing cluster. # Compared to original StyleGAN implementation,",
"self.num_gpus = 1 self.print_info = False self.nvprof = False self.local",
"files += [(os.path.join(dnnlib_module_dir_path, \"submission\", \"internal\", \"run.py\"), os.path.join(run_dir, \"run.py\"))] util.copy_files_and_create_dirs(files) def",
"None: return _user_name_override elif platform.system() == \"Windows\": return os.getlogin() elif",
"submit_config = copy.deepcopy(submit_config) submit_target = submit_config.submit_target farm = None if",
"unknown target # Disallow submitting jobs with zero num_gpus if",
"run_func_obj(submit_config = submit_config, **submit_config.run_func_kwargs) else: run_func_obj(**submit_config.run_func_kwargs) print(\"dnnlib: Finished {0}() in",
"[] # (automatically populated) self.run_id = None self.run_name = None",
"rel_path root will # be the src directory inside the",
"the prior submit_config file from the directory # (so to",
"old_submit_config = submit_config submit_config = load_pkl(config_file) submit_config[\"run_id\"] = old_submit_config[\"run_id\"] submit_config[\"run_name\"]",
"import pprint import re import shutil import sys import time",
"directory is the current # working directory. # E.g., `os.path.join(dnnlib.submit_config.run_dir,",
"assert farm is not None # unknown target # Disallow",
"path/filename that resides under the current submit run_dir # Args:",
"submit_config exit_with_errcode = False try: print(\"dnnlib: Running {0}() on {1}...\".format(submit_config.run_func_name,",
"else: traceback.print_exc() log_src = os.path.join(submit_config.run_dir, \"log.txt\") log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), \"{0}-error.txt\".format(submit_config.run_name))",
"Determines in which format should a path be formatted #",
"None self.task_name = None self.host_name = \"localhost\" self.platform_extras = PlatformExtras()",
"= 1 self.print_info = False self.nvprof = False self.local =",
"None, should_flush = True) import dnnlib dnnlib.submit_config = submit_config exit_with_errcode",
"bool, create_new: str) -> str: # Create a new run",
"submit_config.run_func_name for _idx in range(submit_config.run_func_name.count(\".\") - 1): run_func_module_dir_path = os.path.dirname(run_func_module_dir_path)",
"run directory # load_config: in case resume = True, load",
"zero num_gpus if (submit_config.num_gpus is None) or (submit_config.num_gpus == 0):",
"formatted path if path_type == PathType.WINDOWS: return str(pathlib.PureWindowsPath(path_template)) elif path_type",
"Returns: # A file/dirname rooted at submit_config.run_dir. If there's no",
"self.user_name = None self.task_name = None self.host_name = \"localhost\" self.platform_extras",
"of the directory names dir_names = [d for d in",
"os.path.join(run_dir_root, submit_config.run_name) if not resume: if os.path.exists(run_dir) and create_new: raise",
"Assumes IDs are numbers at the start of the directory",
"PathType.LINUX else: raise RuntimeError(\"Unknown platform\") path_template = path_template.replace(\"<USERNAME>\", get_user_name()) #",
"the convert it back to a normal path with given",
"traceback from enum import Enum from .. import util from",
"platform.system() == \"Linux\": path_type = PathType.LINUX else: raise RuntimeError(\"Unknown platform\")",
"None class SubmitConfig(util.EasyDict): # Strongly typed config dict needed to",
"writing is handled by run.sh) logger = util.Logger(file_name = None,",
"\"*.sln\", \"*.suo\", \".cache\", \".idea\", \".vs\", \".vscode\", \"_cudacache\"] self.run_dir_extra_files = []",
"error, get out of the script now and signal the",
"Assumes that the dir exists, is local, and is writable",
"directory to the # run dir. # run_id: Automatically populated",
"pathlib import pickle import platform import pprint import re import",
"\"rb\") def load_pkl(file_or_url): with open_file_or_url(file_or_url) as file: return pickle.load(file, encoding",
"base directory is the current # working directory. # E.g.,",
"run_name: Automatically populated value during submit # run_dir: Automatically populated",
"of the experiment rather than the newly provided # command-line",
"path_type: PathType = PathType.AUTO) -> str: # Replace tags in",
"to the run dir # run_dir_extra_files: List of (abs_path, rel_path)",
"new run directory # resume: resumes a prior experiment using",
"current command-line parameters submit_config = copy.deepcopy(submit_config) submit_target = submit_config.submit_target farm",
"= get_template_from_path(path) path = get_path_from_template(path_template, path_type) return path def set_user_name_override(name:",
"int(m.group()) run_id = max(run_id, i + 1) return run_id def",
"util.get_module_dir_by_obj_name(submit_config.run_func_name) assert \".\" in submit_config.run_func_name for _idx in range(submit_config.run_func_name.count(\".\") -",
"enum import Enum from .. import util from ..util import",
"template representation path = path.replace(\"\\\\\", \"/\") return path def convert_path(path:",
"at the start of the string run_id = 0 for",
"time.time() run_func_obj = util.get_obj_by_name(submit_config.run_func_name) assert callable(run_func_obj) sig = inspect.signature(run_func_obj) if",
"reason: unacceptable characters in your submit_config.run_desc. Task name must be",
"int: # Reads all directory names in a given directory",
"0 for dir_name in dir_names: m = r.match(dir_name) if m",
"\"submit_config.txt\"), \"w\") as f: pprint.pprint(submit_config, stream = f, indent =",
"3 class PlatformExtras: # A mixed bag of values used",
"create_newdir: enforces the creation of a new run directory #",
"util.copy_files_and_create_dirs(files) def run_wrapper(submit_config: SubmitConfig) -> None: # Wrap the actual",
"\"run.py\"))] util.copy_files_and_create_dirs(files) def run_wrapper(submit_config: SubmitConfig) -> None: # Wrap the",
"the start run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO) if not os.path.exists(run_dir_root): os.makedirs(run_dir_root)",
"of resumption, load_config = True to load the prior submit_config",
"platform_extras: Automatically populated values during submit. Used by various dnnlib",
"the current command-line parameters submit_config = copy.deepcopy(submit_config) submit_target = submit_config.submit_target",
"# to whatever process that started this script. if exit_with_errcode:",
"\"{}-{:05d}-{}\".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc) docker_valid_name_regex = \"^[a-zA-Z0-9][a-zA-Z0-9_.-]+$\" if not re.match(docker_valid_name_regex, submit_config.task_name):",
"as the DataReader class def __init__(self): super().__init__() # run (set",
"elif path_type == PathType.LINUX: return str(pathlib.PurePosixPath(path_template)) else: raise RuntimeError(\"Unknown platform\")",
"= None class SubmitConfig(util.EasyDict): # Strongly typed config dict needed",
"name. Probable reason: unacceptable characters in your submit_config.run_desc. Task name",
"or in a computing cluster. # Compared to original StyleGAN",
"_create_run_dir_local(submit_config, resume, create_new = create_newdir) submit_config.task_name = \"{}-{:05d}-{}\".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc)",
"dnnlib.submit_config = submit_config exit_with_errcode = False try: print(\"dnnlib: Running {0}()",
"submit_config file from the directory # (so to maintain the",
"Automatically populated value during submit # run_func_kwargs: Automatically populated value",
"run_dir, the base directory is the current # working directory.",
"submit # run_func_name: Automatically populated value during submit # run_func_kwargs:",
"platform.system() == \"Windows\": return os.getlogin() elif platform.system() == \"Linux\": try:",
"create a _finished.txt exit_with_errcode = True finally: open(os.path.join(submit_config.run_dir, \"_finished.txt\"), \"w\").close()",
"launched # num_gpus: Number of GPUs used/requested for the run",
"will then # override the automatic value # task_name: Automatically",
"(so to maintain the original configuration of the experiment rather",
"with tags # Needs to always be run through get_path_from_template",
"str) -> str: # Create a new run dir with",
"to load the prior submit_config file from the directory #",
"for automatic training resumption, # and network recompilation. import copy",
"load_config = True to load the prior submit_config file from",
"Get the current user name if _user_name_override is not None:",
"# single threaded default _user_name_override = None class SubmitConfig(util.EasyDict): #",
"Set the global username override value global _user_name_override _user_name_override =",
"== SubmitTarget.LOCAL) and submit_config.local.do_not_copy_source_files: return files = [] run_func_module_dir_path =",
"the script now and signal the error # to whatever",
"not None # unknown target # Disallow submitting jobs with",
"def _create_run_dir_local(submit_config: SubmitConfig, resume: bool, create_new: str) -> str: #",
"the dir exists, is local, and is writable pickle.dump(submit_config, open(os.path.join(run_dir,",
"submit # run_func_kwargs: Automatically populated value during submit # user_name:",
"def set_user_name_override(name: str) -> None: # Set the global username",
"\".vs\", \".vscode\", \"_cudacache\"] self.run_dir_extra_files = [] # submit (set these)",
"# run_func_name: Automatically populated value during submit # run_func_kwargs: Automatically",
"flushing if is_local: logger = util.Logger(file_name = os.path.join(submit_config.run_dir, \"log.txt\"), file_mode=\"a\",",
"self.local = internal.local.TargetOptions() self.datasets = [] # (automatically populated) self.run_id",
"Path to the run dir root. Can be optionally templated",
"given directory (non-recursive) and returns the next (increasing) run id",
"name def get_user_name(): # Get the current user name if",
"util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = True) files += submit_config.run_dir_extra_files",
"\"*.pyproj\", \"*.sln\", \"*.suo\", \".cache\", \".idea\", \".vs\", \".vscode\", \"_cudacache\"] self.run_dir_extra_files =",
"# Convert a normal path to template and the convert",
"buffers # data_reader_process_count: Number of worker processes to spawn (zero",
"self.print_info = False self.nvprof = False self.local = internal.local.TargetOptions() self.datasets",
"Prepare submission by populating the run dir #-------------------------------------------------------------------- host_run_dir =",
"and task name # run_dir_ignore: List of file patterns used",
"set_user_name_override(name: str) -> None: # Set the global username override",
"Needs to always be run through get_path_from_template # run_desc: Description",
"\"\" # should always be passed through get_path_from_template self.run_desc =",
"sig = inspect.signature(run_func_obj) if \"submit_config\" in sig.parameters: run_func_obj(submit_config = submit_config,",
"the src directory inside the run dir # submit_target: Submit",
"when submitting # local.do_not_copy_source_files: Do not copy source files from",
"run id # Assumes IDs are numbers at the start",
"# Replace tags in the given path template and return",
"# run_desc: Description of the run. Will be used in",
"= [] # (automatically populated) self.run_id = None self.run_name =",
"# match one or more digits at the start of",
"str, path_type: PathType = PathType.AUTO) -> str: # Convert a",
"def __init__(self): super().__init__() # run (set these) self.run_dir_root = \"\"",
"pwd return pwd.getpwuid(os.geteuid()).pw_name except: return \"unknown\" else: raise RuntimeError(\"Unknown platform\")",
"force flushing (log writing is handled by run.sh) logger =",
"open(os.path.join(submit_config.run_dir, \"_finished.txt\"), \"w\").close() dnnlib.RunContext.get().close() dnnlib.submit_config = None logger.close() # If",
"os.path.join(run_dir, \"src\", f[1])) for f in files] files += [(os.path.join(dnnlib_module_dir_path,",
"information when submitting # local.do_not_copy_source_files: Do not copy source files",
"({0})\".format(run_dir)) if not os.path.exists(run_dir): os.makedirs(run_dir) return run_dir def _get_next_run_id_local(run_dir_root: str)",
"using its existing run directory # load_config: in case resume",
"config instead of using the current command-line parameters submit_config =",
"None) or (submit_config.num_gpus == 0): raise RuntimeError(\"submit_config.num_gpus must be set",
"*paths) return os.path.join(dnnlib.submit_config.run_dir, *paths) def _create_run_dir_local(submit_config: SubmitConfig, resume: bool, create_new:",
"# task_name: Automatically populated value during submit # host_name: Automatically",
"# data_reader_process_count: Number of worker processes to spawn (zero for",
"call for handling logging, exceptions, typing, etc is_local = submit_config.submit_target",
"False) dnnlib_module_dir_path = util.get_module_dir_by_obj_name(\"dnnlib\") files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores = submit_config.run_dir_ignore,",
"farm = internal.local.Target() assert farm is not None # unknown",
"current # working directory. # E.g., `os.path.join(dnnlib.submit_config.run_dir, \"output.txt\"))` import dnnlib",
"\", got \" + submit_config.task_name) # Farm specific preparations for",
"is None) or (dnnlib.submit_config.run_dir is None): return os.path.join(os.getcwd(), *paths) return",
"# Attributes: # run_dir_root: Path to the run dir root.",
"load the prior submit_config file from the directory # (so",
"= None self.host_name = \"localhost\" self.platform_extras = PlatformExtras() def get_path_from_template(path_template:",
"# Disallow submitting jobs with zero num_gpus if (submit_config.num_gpus is",
"add_base_to_relative = False) dnnlib_module_dir_path = util.get_module_dir_by_obj_name(\"dnnlib\") files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores",
"increasing ID number at the start run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO)",
"List of (abs_path, rel_path) tuples of file paths. rel_path root",
"files into the run dir. Assumes that the dir exists,",
"str) -> None: # Set the global username override value",
"d))] r = re.compile(\"^\\\\d+\") # match one or more digits",
"resumes a prior experiment using its existing run directory #",
"representation path = path.replace(\"\\\\\", \"/\") return path def convert_path(path: str,",
"**submit_config.run_func_kwargs) else: run_func_obj(**submit_config.run_func_kwargs) print(\"dnnlib: Finished {0}() in {1}.\".format(submit_config.run_func_name, util.format_time(time.time() -",
"select either WINDOWS or LINUX WINDOWS = 1 LINUX =",
"sys import time import traceback from enum import Enum from",
"must be accepted by the following regex: \" + docker_valid_name_regex",
"time import traceback from enum import Enum from .. import",
"prior experiment config instead of using the current command-line parameters",
"LINUX: Format with Linux/Posix style # AUTO: Use current OS",
"populated value during submit # run_name: Automatically populated value during",
"the support for automatic training resumption, # and network recompilation.",
"the base directory is the current # working directory. #",
"resume: resumes a prior experiment using its existing run directory",
"# user_name: Automatically populated value during submit. Can be set",
"PathType.AUTO) if not os.path.exists(run_dir_root): os.makedirs(run_dir_root) run_dir = os.path.join(run_dir_root, submit_config.run_name) if",
"= util.get_module_dir_by_obj_name(\"dnnlib\") files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative =",
"run dir root. Can be optionally templated with tags #",
"in files] files += [(os.path.join(dnnlib_module_dir_path, \"submission\", \"internal\", \"run.py\"), os.path.join(run_dir, \"run.py\"))]",
"template and the convert it back to a normal path",
"def get_template_from_path(path: str) -> str: # Convert a normal path",
"[] run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name) assert \".\" in submit_config.run_func_name for _idx",
"populated values during submit. Used by various dnnlib libraries #",
"submit # platform_extras: Automatically populated values during submit. Used by",
"arguments. if load_config: config_file = os.path.join(host_run_dir, \"submit_config.pkl\") if os.path.exists(config_file): old_submit_config",
"platform\") path_template = path_template.replace(\"<USERNAME>\", get_user_name()) # return correctly formatted path",
"= 1 class PathType(Enum): # Determines in which format should",
"to always be run through get_path_from_template # run_desc: Description of",
"util from ..util import EasyDict from . import internal class",
"open(file_or_url, \"rb\") def load_pkl(file_or_url): with open_file_or_url(file_or_url) as file: return pickle.load(file,",
"the logs and create a _finished.txt exit_with_errcode = True finally:",
"is not None: i = int(m.group()) run_id = max(run_id, i",
"target # Disallow submitting jobs with zero num_gpus if (submit_config.num_gpus",
"dnnlib if (dnnlib.submit_config is None) or (dnnlib.submit_config.run_dir is None): return",
"submit_config submit_config = load_pkl(config_file) submit_config[\"run_id\"] = old_submit_config[\"run_id\"] submit_config[\"run_name\"] = old_submit_config[\"run_name\"]",
"get_template_from_path(path) path = get_path_from_template(path_template, path_type) return path def set_user_name_override(name: str)",
"file paths. rel_path root will # be the src directory",
"WINDOWS: Format with Windows style # LINUX: Format with Linux/Posix",
"path = get_path_from_template(path_template, path_type) return path def set_user_name_override(name: str) ->",
"if not resume: if os.path.exists(run_dir) and create_new: raise RuntimeError(\"The run",
"\"src\", f[1])) for f in files] files += [(os.path.join(dnnlib_module_dir_path, \"submission\",",
"PathType(Enum): # Determines in which format should a path be",
"0): raise RuntimeError(\"submit_config.num_gpus must be set to a non-zero value\")",
"through get_path_from_template self.run_desc = \"\" self.run_dir_ignore = [\"__pycache__\", \"*.pyproj\", \"*.sln\",",
"# E.g., `os.path.join(dnnlib.submit_config.run_dir, \"output.txt\"))` import dnnlib if (dnnlib.submit_config is None)",
"run dir, and launch the run in appropriate place. #",
"OS if path_type == PathType.AUTO: if platform.system() == \"Windows\": path_type",
"num_gpus if (submit_config.num_gpus is None) or (submit_config.num_gpus == 0): raise",
"start_time = time.time() run_func_obj = util.get_obj_by_name(submit_config.run_func_name) assert callable(run_func_obj) sig =",
"or LINUX WINDOWS = 1 LINUX = 2 AUTO =",
"and is writable pickle.dump(submit_config, open(os.path.join(run_dir, \"submit_config.pkl\"), \"wb\")) with open(os.path.join(run_dir, \"submit_config.txt\"),",
"If we hit an error, get out of the script",
"ignores = submit_config.run_dir_ignore, add_base_to_relative = False) dnnlib_module_dir_path = util.get_module_dir_by_obj_name(\"dnnlib\") files",
"# (so to maintain the original configuration of the experiment",
"Do not copy source files from the working directory to",
"# Reads all directory names in a given directory (non-recursive)",
"\"Linux\": path_type = PathType.LINUX else: raise RuntimeError(\"Unknown platform\") path_template =",
"a non-zero value\") if submit_config.user_name is None: submit_config.user_name = get_user_name()",
"prior experiment using its existing run directory # load_config: in",
"GB self.data_reader_process_count = 0 # single threaded default _user_name_override =",
"cluster, redirect stderr to stdout, and just force flushing (log",
"# Copy all necessary files into the run dir. Assumes",
"value during submit. Can be set by the user which",
"run # print_info: Whether to print debug information when submitting",
"-> None: # Set the global username override value global",
"- 1): run_func_module_dir_path = os.path.dirname(run_func_module_dir_path) files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores =",
"= [(f[0], os.path.join(run_dir, \"src\", f[1])) for f in files] files",
"# working directory. # E.g., `os.path.join(dnnlib.submit_config.run_dir, \"output.txt\"))` import dnnlib if",
"target where the function should be run # LOCAL: Run",
"run dir. Assumes that the dir exists, is local, and",
"== PathType.LINUX: return str(pathlib.PurePosixPath(path_template)) else: raise RuntimeError(\"Unknown platform\") def get_template_from_path(path:",
"current user name if _user_name_override is not None: return _user_name_override",
"a normal path to template and the convert it back",
"tags in the given path template and return either Windows",
"return \"unknown\" else: raise RuntimeError(\"Unknown platform\") def make_run_dir_path(*paths): # Make",
"# LINUX: Format with Linux/Posix style # AUTO: Use current",
"configuration of the experiment rather than the newly provided #",
"LINUX = 2 AUTO = 3 class PlatformExtras: # A",
"# Strongly typed config dict needed to submit runs #",
"= True) files += submit_config.run_dir_extra_files files = [(f[0], os.path.join(run_dir, \"src\",",
"= \"latin1\") def submit_run(submit_config: SubmitConfig, run_func_name: str, create_newdir: bool =",
"digits at the start of the string run_id = 0",
"old_submit_config[\"run_id\"] submit_config[\"run_name\"] = old_submit_config[\"run_name\"] if \"resume_pkl\" in old_submit_config[\"run_func_kwargs\"]: submit_config[\"run_func_kwargs\"][\"resume_pkl\"] =",
"force flushing if is_local: logger = util.Logger(file_name = os.path.join(submit_config.run_dir, \"log.txt\"),",
"be run either locally or in a computing cluster. #",
"error # to whatever process that started this script. if",
"return path def set_user_name_override(name: str) -> None: # Set the",
"util.Logger(file_name = os.path.join(submit_config.run_dir, \"log.txt\"), file_mode=\"a\", should_flush = True) else: #",
"= get_path_from_template(path_template, path_type) return path def set_user_name_override(name: str) -> None:",
"local, and is writable pickle.dump(submit_config, open(os.path.join(run_dir, \"submit_config.pkl\"), \"wb\")) with open(os.path.join(run_dir,",
"submitting jobs with zero num_gpus if (submit_config.num_gpus is None) or",
"host_run_dir) # In case of resumption, load_config = True to",
"# num_gpus: Number of GPUs used/requested for the run #",
"= get_user_name() submit_config.run_func_name = run_func_name submit_config.run_func_kwargs = run_func_kwargs #-------------------------------------------------------------------- #",
"# run_dir_root: Path to the run dir root. Can be",
"# Create a new run dir with increasing ID number",
"f in files] files += [(os.path.join(dnnlib_module_dir_path, \"submission\", \"internal\", \"run.py\"), os.path.join(run_dir,",
"traceback.print_exc() log_src = os.path.join(submit_config.run_dir, \"log.txt\") log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), \"{0}-error.txt\".format(submit_config.run_name)) shutil.copyfile(log_src,",
"more digits at the start of the string run_id =",
"\"Windows\": return os.getlogin() elif platform.system() == \"Linux\": try: import pwd",
"assert callable(run_func_obj) sig = inspect.signature(run_func_obj) if \"submit_config\" in sig.parameters: run_func_obj(submit_config",
"username override value global _user_name_override _user_name_override = name def get_user_name():",
"os.path.join(host_run_dir, \"submit_config.pkl\") if os.path.exists(config_file): old_submit_config = submit_config submit_config = load_pkl(config_file)",
"a submit farm.finalize_submit_config(submit_config, host_run_dir) # In case of resumption, load_config",
"for _idx in range(submit_config.run_func_name.count(\".\") - 1): run_func_module_dir_path = os.path.dirname(run_func_module_dir_path) files",
"a normal path with given path type path_template = get_template_from_path(path)",
"#-------------------------------------------------------------------- # Prepare submission by populating the run dir #--------------------------------------------------------------------",
"Automatically populated value during submit # platform_extras: Automatically populated values",
"path be formatted # WINDOWS: Format with Windows style #",
"Linux/Posix style # AUTO: Use current OS type to select",
"a file, and force flushing if is_local: logger = util.Logger(file_name",
"debug information when submitting # local.do_not_copy_source_files: Do not copy source",
"dnnlib.submit_config = None logger.close() # If we hit an error,",
"str) -> None: # Copy all necessary files into the",
"= True finally: open(os.path.join(submit_config.run_dir, \"_finished.txt\"), \"w\").close() dnnlib.RunContext.get().close() dnnlib.submit_config = None",
"str: # Convert a normal path to template and the",
"pickle import platform import pprint import re import shutil import",
"Wrap the actual run function call for handling logging, exceptions,",
"run_func_name: Automatically populated value during submit # run_func_kwargs: Automatically populated",
"class PathType(Enum): # Determines in which format should a path",
"path_template = get_template_from_path(path) path = get_path_from_template(path_template, path_type) return path def",
"= SubmitTarget.LOCAL self.num_gpus = 1 self.print_info = False self.nvprof =",
"= util.get_obj_by_name(submit_config.run_func_name) assert callable(run_func_obj) sig = inspect.signature(run_func_obj) if \"submit_config\" in",
"gather files related to the run, copy files to the",
"and just force flushing (log writing is handled by run.sh)",
"os.path.join(submit_config.run_dir, \"log.txt\") log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), \"{0}-error.txt\".format(submit_config.run_name)) shutil.copyfile(log_src, log_dst) # Defer",
"logging, exceptions, typing, etc is_local = submit_config.submit_target == SubmitTarget.LOCAL #",
"from ..util import EasyDict from . import internal class SubmitTarget(Enum):",
"**run_func_kwargs) -> None: # Create a run dir, gather files",
"# command-line arguments. if load_config: config_file = os.path.join(host_run_dir, \"submit_config.pkl\") if",
"if exit_with_errcode: sys.exit(1) return submit_config def open_file_or_url(file_or_url): if util.is_url(file_or_url): return",
"select where the run is actually launched # num_gpus: Number",
"platform.system() == \"Windows\": path_type = PathType.WINDOWS elif platform.system() == \"Linux\":",
"a cluster, redirect stderr to stdout, and just force flushing",
"PlatformExtras() def get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO) -> str:",
"False, **run_func_kwargs) -> None: # Create a run dir, gather",
"when running locally, redirect stderr to stdout, log stdout to",
"util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = False) dnnlib_module_dir_path = util.get_module_dir_by_obj_name(\"dnnlib\")",
"exceptions, typing, etc is_local = submit_config.submit_target == SubmitTarget.LOCAL # when",
"\"w\").close() dnnlib.RunContext.get().close() dnnlib.submit_config = None logger.close() # If we hit",
"(increasing) run id # Assumes IDs are numbers at the",
"self.host_name = \"localhost\" self.platform_extras = PlatformExtras() def get_path_from_template(path_template: str, path_type:",
"preparations for a submit farm.finalize_submit_config(submit_config, host_run_dir) # In case of",
"\".cache\", \".idea\", \".vs\", \".vscode\", \"_cudacache\"] self.run_dir_extra_files = [] # submit",
"in old_submit_config[\"run_func_kwargs\"]: submit_config[\"run_func_kwargs\"][\"resume_pkl\"] = old_submit_config[\"run_func_kwargs\"][\"resume_pkl\"] submit_config[\"run_func_kwargs\"][\"resume_kimg\"] = old_submit_config[\"run_func_kwargs\"][\"resume_kimg\"] _populate_run_dir(submit_config, host_run_dir)",
"from enum import Enum from .. import util from ..util",
"patterns used to ignore files when copying files to the",
"path_type == PathType.LINUX: return str(pathlib.PurePosixPath(path_template)) else: raise RuntimeError(\"Unknown platform\") def",
"dir with increasing ID number at the start run_dir_root =",
"run directory # resume: resumes a prior experiment using its",
"= None self.run_func_name = None self.run_func_kwargs = None self.user_name =",
"= old_submit_config[\"run_id\"] submit_config[\"run_name\"] = old_submit_config[\"run_name\"] if \"resume_pkl\" in old_submit_config[\"run_func_kwargs\"]: submit_config[\"run_func_kwargs\"][\"resume_pkl\"]",
"if path_type == PathType.AUTO: if platform.system() == \"Windows\": path_type =",
"resume: bool, create_new: str) -> str: # Create a new",
"run_func_module_dir_path = os.path.dirname(run_func_module_dir_path) files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative",
"path_template = path_template.replace(\"<USERNAME>\", get_user_name()) # return correctly formatted path if",
"when running in a cluster, redirect stderr to stdout, and",
"# Wrap the actual run function call for handling logging,",
"= create_newdir) submit_config.task_name = \"{}-{:05d}-{}\".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc) docker_valid_name_regex = \"^[a-zA-Z0-9][a-zA-Z0-9_.-]+$\"",
"run.sh) logger = util.Logger(file_name = None, should_flush = True) import",
"threaded default _user_name_override = None class SubmitConfig(util.EasyDict): # Strongly typed",
"not re.match(docker_valid_name_regex, submit_config.task_name): raise RuntimeError(\"Invalid task name. Probable reason: unacceptable",
"match one or more digits at the start of the",
"sys.exit(1) return submit_config def open_file_or_url(file_or_url): if util.is_url(file_or_url): return util.open_url(file_or_url, cache_dir",
"submit_config.run_dir_ignore, add_base_to_relative = False) dnnlib_module_dir_path = util.get_module_dir_by_obj_name(\"dnnlib\") files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path,",
"# run_dir_ignore: List of file patterns used to ignore files",
"LINUX WINDOWS = 1 LINUX = 2 AUTO = 3",
"when copying files to the run dir # run_dir_extra_files: List",
"get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO) -> str: # Replace",
"*paths: Path components to be passed to os.path.join # Returns:",
"if m is not None: i = int(m.group()) run_id =",
"Used by various dnnlib libraries # such as the DataReader",
"and the convert it back to a normal path with",
"None logger.close() # If we hit an error, get out",
"with Windows style # LINUX: Format with Linux/Posix style #",
"add_base_to_relative = True) files += submit_config.run_dir_extra_files files = [(f[0], os.path.join(run_dir,",
"# host_name: Automatically populated value during submit # platform_extras: Automatically",
"= None self.run_func_kwargs = None self.user_name = None self.task_name =",
"if os.path.exists(config_file): old_submit_config = submit_config submit_config = load_pkl(config_file) submit_config[\"run_id\"] =",
"self.run_name = None self.run_dir = None self.run_func_name = None self.run_func_kwargs",
"= path_template.replace(\"<USERNAME>\", get_user_name()) # return correctly formatted path if path_type",
"return correctly formatted path if path_type == PathType.WINDOWS: return str(pathlib.PureWindowsPath(path_template))",
"== \"Linux\": try: import pwd return pwd.getpwuid(os.geteuid()).pw_name except: return \"unknown\"",
"\"unknown\" else: raise RuntimeError(\"Unknown platform\") def make_run_dir_path(*paths): # Make a",
"signal the error # to whatever process that started this",
"the run dir # run_dir_extra_files: List of (abs_path, rel_path) tuples",
"if _user_name_override is not None: return _user_name_override elif platform.system() ==",
"None: # Copy all necessary files into the run dir.",
"print_info: Whether to print debug information when submitting # local.do_not_copy_source_files:",
"unacceptable characters in your submit_config.run_desc. Task name must be accepted",
"in sig.parameters: run_func_obj(submit_config = submit_config, **submit_config.run_func_kwargs) else: run_func_obj(**submit_config.run_func_kwargs) print(\"dnnlib: Finished",
"{1}.\".format(submit_config.run_func_name, util.format_time(time.time() - start_time))) except: if is_local: raise else: traceback.print_exc()",
"submit_config or run_dir, the base directory is the current #",
"os.path.join(dnnlib.submit_config.run_dir, *paths) def _create_run_dir_local(submit_config: SubmitConfig, resume: bool, create_new: str) ->",
"at the start of the directory names dir_names = [d",
"d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))] r = re.compile(\"^\\\\d+\") #",
"type path_template = get_template_from_path(path) path = get_path_from_template(path_template, path_type) return path",
"given path type path_template = get_template_from_path(path) path = get_path_from_template(path_template, path_type)",
"= os.path.join(get_path_from_template(submit_config.run_dir_root), \"{0}-error.txt\".format(submit_config.run_name)) shutil.copyfile(log_src, log_dst) # Defer sys.exit(1) to happen",
"submit_config.run_dir. If there's no # submit_config or run_dir, the base",
"docker_valid_name_regex + \", got \" + submit_config.task_name) # Farm specific",
"dir_name in dir_names: m = r.match(dir_name) if m is not",
"the run dir, and launch the run in appropriate place.",
"Can be set by the user which will then #",
"to maintain the original configuration of the experiment rather than",
"related to the run, copy files to the run dir,",
"# Compared to original StyleGAN implementation, we extend the support",
"from . import internal class SubmitTarget(Enum): # The target where",
"os.path.exists(run_dir_root): os.makedirs(run_dir_root) run_dir = os.path.join(run_dir_root, submit_config.run_name) if not resume: if",
"False) if (submit_config.submit_target == SubmitTarget.LOCAL) and submit_config.local.do_not_copy_source_files: return files =",
"submit_target: Submit target enum value. Used to select where the",
"_user_name_override elif platform.system() == \"Windows\": return os.getlogin() elif platform.system() ==",
"m is not None: i = int(m.group()) run_id = max(run_id,",
"type to select either WINDOWS or LINUX WINDOWS = 1",
"the current submit run_dir # Args: # *paths: Path components",
"happen after we close the logs and create a _finished.txt",
"function call for handling logging, exceptions, typing, etc is_local =",
"Format with Windows style # LINUX: Format with Linux/Posix style",
"ignore files when copying files to the run dir #",
"\"_finished.txt\"), \"w\").close() dnnlib.RunContext.get().close() dnnlib.submit_config = None logger.close() # If we",
"during submit # user_name: Automatically populated value during submit. Can",
"all directory names in a given directory (non-recursive) and returns",
"= path.replace(\"\\\\\", \"/\") return path def convert_path(path: str, path_type: PathType",
"logger.close() # If we hit an error, get out of",
"= name def get_user_name(): # Get the current user name",
"None self.host_name = \"localhost\" self.platform_extras = PlatformExtras() def get_path_from_template(path_template: str,",
"exit_with_errcode = True finally: open(os.path.join(submit_config.run_dir, \"_finished.txt\"), \"w\").close() dnnlib.RunContext.get().close() dnnlib.submit_config =",
"(dnnlib.submit_config.run_dir is None): return os.path.join(os.getcwd(), *paths) return os.path.join(dnnlib.submit_config.run_dir, *paths) def",
"def __init__(self): self.data_reader_buffer_size = 1<<30 # 1 GB self.data_reader_process_count =",
"number at the start run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO) if not",
"farm = None if submit_target == SubmitTarget.LOCAL: farm = internal.local.Target()",
"\"output.txt\"))` import dnnlib if (dnnlib.submit_config is None) or (dnnlib.submit_config.run_dir is",
"1 class PathType(Enum): # Determines in which format should a",
"i + 1) return run_id def _populate_run_dir(submit_config: SubmitConfig, run_dir: str)",
"submit_config.submit_target farm = None if submit_target == SubmitTarget.LOCAL: farm =",
"enum value. Used to select where the run is actually",
"RuntimeError(\"Unknown platform\") def make_run_dir_path(*paths): # Make a path/filename that resides",
"# Prepare submission by populating the run dir #-------------------------------------------------------------------- host_run_dir",
"# Needs to always be run through get_path_from_template # run_desc:",
"submit_config.run_dir_extra_files files = [(f[0], os.path.join(run_dir, \"src\", f[1])) for f in",
"# *paths: Path components to be passed to os.path.join #",
"= util.Logger(file_name = None, should_flush = True) import dnnlib dnnlib.submit_config",
"handling logging, exceptions, typing, etc is_local = submit_config.submit_target == SubmitTarget.LOCAL",
"..util import EasyDict from . import internal class SubmitTarget(Enum): #",
"to size internal shared memory buffers # data_reader_process_count: Number of",
"dir root. Can be optionally templated with tags # Needs",
"run dir with increasing ID number at the start run_dir_root",
"# when running in a cluster, redirect stderr to stdout,",
"SubmitConfig, run_dir: str) -> None: # Copy all necessary files",
"correctly formatted path if path_type == PathType.WINDOWS: return str(pathlib.PureWindowsPath(path_template)) elif",
"# run (set these) self.run_dir_root = \"\" # should always",
"# Farm specific preparations for a submit farm.finalize_submit_config(submit_config, host_run_dir) #",
"bag of values used by dnnlib heuristics # Attributes: #",
"create_new = create_newdir) submit_config.task_name = \"{}-{:05d}-{}\".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc) docker_valid_name_regex =",
"value global _user_name_override _user_name_override = name def get_user_name(): # Get",
"convert_path(path: str, path_type: PathType = PathType.AUTO) -> str: # Convert",
"= None self.run_name = None self.run_dir = None self.run_func_name =",
"util.open_url(file_or_url, cache_dir = \".stylegan2-cache\") return open(file_or_url, \"rb\") def load_pkl(file_or_url): with",
"submit_config.run_name) if not resume: if os.path.exists(run_dir) and create_new: raise RuntimeError(\"The",
"Compared to original StyleGAN implementation, we extend the support for",
"start_time))) except: if is_local: raise else: traceback.print_exc() log_src = os.path.join(submit_config.run_dir,",
"Reads all directory names in a given directory (non-recursive) and",
"bool = False, load_config: bool = False, **run_func_kwargs) -> None:",
"during submit # host_name: Automatically populated value during submit #",
"Submit target enum value. Used to select where the run",
"PathType.LINUX: return str(pathlib.PurePosixPath(path_template)) else: raise RuntimeError(\"Unknown platform\") def get_template_from_path(path: str)",
"value. Used to select where the run is actually launched",
"is_local: logger = util.Logger(file_name = os.path.join(submit_config.run_dir, \"log.txt\"), file_mode=\"a\", should_flush =",
"in which format should a path be formatted # WINDOWS:",
"get_path_from_template(submit_config.run_dir_root, PathType.AUTO) if not os.path.exists(run_dir_root): os.makedirs(run_dir_root) run_dir = os.path.join(run_dir_root, submit_config.run_name)",
"in the given path template and return either Windows or",
"files = [] run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name) assert \".\" in submit_config.run_func_name",
"experiment using its existing run directory # load_config: in case",
"self.data_reader_process_count = 0 # single threaded default _user_name_override = None",
"= os.path.dirname(run_func_module_dir_path) files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative =",
"dir already exists! ({0})\".format(run_dir)) if not os.path.exists(run_dir): os.makedirs(run_dir) return run_dir",
"Convert a normal path to template and the convert it",
"raise else: traceback.print_exc() log_src = os.path.join(submit_config.run_dir, \"log.txt\") log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root),",
"hit an error, get out of the script now and",
"run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO) if not os.path.exists(run_dir_root): os.makedirs(run_dir_root) run_dir =",
"# Assumes IDs are numbers at the start of the",
"self.run_dir_root = \"\" # should always be passed through get_path_from_template",
"exit_with_errcode = False try: print(\"dnnlib: Running {0}() on {1}...\".format(submit_config.run_func_name, submit_config.host_name))",
"the run in appropriate place. # create_newdir: enforces the creation",
"(submit_config.num_gpus is None) or (submit_config.num_gpus == 0): raise RuntimeError(\"submit_config.num_gpus must",
"re.compile(\"^\\\\d+\") # match one or more digits at the start",
"try: print(\"dnnlib: Running {0}() on {1}...\".format(submit_config.run_func_name, submit_config.host_name)) start_time = time.time()",
"get_template_from_path(path: str) -> str: # Convert a normal path back",
"= submit_config submit_config = load_pkl(config_file) submit_config[\"run_id\"] = old_submit_config[\"run_id\"] submit_config[\"run_name\"] =",
"+ docker_valid_name_regex + \", got \" + submit_config.task_name) # Farm",
"None self.run_func_kwargs = None self.user_name = None self.task_name = None",
"with open(os.path.join(run_dir, \"submit_config.txt\"), \"w\") as f: pprint.pprint(submit_config, stream = f,",
"to the run dir, and launch the run in appropriate",
"# run_name: Automatically populated value during submit # run_dir: Automatically",
"logger = util.Logger(file_name = None, should_flush = True) import dnnlib",
"names dir_names = [d for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root,",
"to be run either locally or in a computing cluster.",
"close the logs and create a _finished.txt exit_with_errcode = True",
"be passed to os.path.join # Returns: # A file/dirname rooted",
"to print debug information when submitting # local.do_not_copy_source_files: Do not",
"to stdout, and just force flushing (log writing is handled",
"False self.nvprof = False self.local = internal.local.TargetOptions() self.datasets = []",
"case resume = True, load prior experiment config instead of",
"compact = False) if (submit_config.submit_target == SubmitTarget.LOCAL) and submit_config.local.do_not_copy_source_files: return",
"WINDOWS or LINUX WINDOWS = 1 LINUX = 2 AUTO",
"= 0 for dir_name in dir_names: m = r.match(dir_name) if",
"+= util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = True) files +=",
"farm is not None # unknown target # Disallow submitting",
"RuntimeError(\"Unknown platform\") def get_template_from_path(path: str) -> str: # Convert a",
"if not os.path.exists(run_dir_root): os.makedirs(run_dir_root) run_dir = os.path.join(run_dir_root, submit_config.run_name) if not",
"In case of resumption, load_config = True to load the",
"appropriate place. # create_newdir: enforces the creation of a new",
"# Attributes: # data_reader_buffer_size: Used by DataReader to size internal",
"string run_id = 0 for dir_name in dir_names: m =",
"else: run_func_obj(**submit_config.run_func_kwargs) print(\"dnnlib: Finished {0}() in {1}.\".format(submit_config.run_func_name, util.format_time(time.time() - start_time)))",
"tuples of file paths. rel_path root will # be the",
"run_wrapper(submit_config: SubmitConfig) -> None: # Wrap the actual run function",
"load_config: in case resume = True, load prior experiment config",
"def convert_path(path: str, path_type: PathType = PathType.AUTO) -> str: #",
"value during submit # platform_extras: Automatically populated values during submit.",
"if is_local: raise else: traceback.print_exc() log_src = os.path.join(submit_config.run_dir, \"log.txt\") log_dst",
"log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), \"{0}-error.txt\".format(submit_config.run_name)) shutil.copyfile(log_src, log_dst) # Defer sys.exit(1) to",
"= \"{}-{:05d}-{}\".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc) docker_valid_name_regex = \"^[a-zA-Z0-9][a-zA-Z0-9_.-]+$\" if not re.match(docker_valid_name_regex,",
"# LOCAL: Run it locally LOCAL = 1 class PathType(Enum):",
"submit_config[\"run_name\"] = old_submit_config[\"run_name\"] if \"resume_pkl\" in old_submit_config[\"run_func_kwargs\"]: submit_config[\"run_func_kwargs\"][\"resume_pkl\"] = old_submit_config[\"run_func_kwargs\"][\"resume_pkl\"]",
"Linux formatted path # automatically select path type depending on",
"elif platform.system() == \"Linux\": try: import pwd return pwd.getpwuid(os.geteuid()).pw_name except:",
"implementation, we extend the support for automatic training resumption, #",
"submit_config.local.do_not_copy_source_files: return files = [] run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name) assert \".\"",
"os.path.exists(config_file): old_submit_config = submit_config submit_config = load_pkl(config_file) submit_config[\"run_id\"] = old_submit_config[\"run_id\"]",
"Replace tags in the given path template and return either",
"Automatically populated value during submit # user_name: Automatically populated value",
"out of the script now and signal the error #",
"EasyDict from . import internal class SubmitTarget(Enum): # The target",
"run_dir: str) -> None: # Copy all necessary files into",
"just force flushing (log writing is handled by run.sh) logger",
"not None: i = int(m.group()) run_id = max(run_id, i +",
"to the run dir root. Can be optionally templated with",
"dir #-------------------------------------------------------------------- host_run_dir = _create_run_dir_local(submit_config, resume, create_new = create_newdir) submit_config.task_name",
"sig.parameters: run_func_obj(submit_config = submit_config, **submit_config.run_func_kwargs) else: run_func_obj(**submit_config.run_func_kwargs) print(\"dnnlib: Finished {0}()",
"a path/filename that resides under the current submit run_dir #",
"num_gpus: Number of GPUs used/requested for the run # print_info:",
"return str(pathlib.PureWindowsPath(path_template)) elif path_type == PathType.LINUX: return str(pathlib.PurePosixPath(path_template)) else: raise",
"None self.run_dir = None self.run_func_name = None self.run_func_kwargs = None",
"submit_config = load_pkl(config_file) submit_config[\"run_id\"] = old_submit_config[\"run_id\"] submit_config[\"run_name\"] = old_submit_config[\"run_name\"] if",
"actual run function call for handling logging, exceptions, typing, etc",
"the actual run function call for handling logging, exceptions, typing,",
"type depending on running OS if path_type == PathType.AUTO: if",
"pickle.dump(submit_config, open(os.path.join(run_dir, \"submit_config.pkl\"), \"wb\")) with open(os.path.join(run_dir, \"submit_config.txt\"), \"w\") as f:",
"\"wb\")) with open(os.path.join(run_dir, \"submit_config.txt\"), \"w\") as f: pprint.pprint(submit_config, stream =",
"else: raise RuntimeError(\"Unknown platform\") path_template = path_template.replace(\"<USERNAME>\", get_user_name()) # return",
"the current # working directory. # E.g., `os.path.join(dnnlib.submit_config.run_dir, \"output.txt\"))` import",
"Number of worker processes to spawn (zero for single #",
"create_new: raise RuntimeError(\"The run dir already exists! ({0})\".format(run_dir)) if not",
"platform\") def get_template_from_path(path: str) -> str: # Convert a normal",
"single # thread operation) def __init__(self): self.data_reader_buffer_size = 1<<30 #",
"submit_run(submit_config: SubmitConfig, run_func_name: str, create_newdir: bool = False, resume: bool",
"be run through get_path_from_template # run_desc: Description of the run.",
"# Get the current user name if _user_name_override is not",
"for f in files] files += [(os.path.join(dnnlib_module_dir_path, \"submission\", \"internal\", \"run.py\"),",
"as file: return pickle.load(file, encoding = \"latin1\") def submit_run(submit_config: SubmitConfig,",
"= True) else: # when running in a cluster, redirect",
"= PathType.LINUX else: raise RuntimeError(\"Unknown platform\") path_template = path_template.replace(\"<USERNAME>\", get_user_name())",
"task_name: Automatically populated value during submit # host_name: Automatically populated",
"= [\"__pycache__\", \"*.pyproj\", \"*.sln\", \"*.suo\", \".cache\", \".idea\", \".vs\", \".vscode\", \"_cudacache\"]",
"farm.finalize_submit_config(submit_config, host_run_dir) # In case of resumption, load_config = True",
"files related to the run, copy files to the run",
"SubmitTarget.LOCAL) and submit_config.local.do_not_copy_source_files: return files = [] run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name)",
"None # unknown target # Disallow submitting jobs with zero",
"numbers at the start of the directory names dir_names =",
"dict needed to submit runs # Attributes: # run_dir_root: Path",
"get_path_from_template(path_template, path_type) return path def set_user_name_override(name: str) -> None: #",
"= PathType.AUTO) -> str: # Replace tags in the given",
"f: pprint.pprint(submit_config, stream = f, indent = 4, width =",
"(submit_config.submit_target == SubmitTarget.LOCAL) and submit_config.local.do_not_copy_source_files: return files = [] run_func_module_dir_path",
"following regex: \" + docker_valid_name_regex + \", got \" +",
"run dir #-------------------------------------------------------------------- host_run_dir = _create_run_dir_local(submit_config, resume, create_new = create_newdir)",
"locally or in a computing cluster. # Compared to original",
"the original configuration of the experiment rather than the newly",
"formatted path # automatically select path type depending on running",
"running locally, redirect stderr to stdout, log stdout to a",
"# Args: # *paths: Path components to be passed to",
"PathType.WINDOWS: return str(pathlib.PureWindowsPath(path_template)) elif path_type == PathType.LINUX: return str(pathlib.PurePosixPath(path_template)) else:",
"no # submit_config or run_dir, the base directory is the",
"run_dir_extra_files: List of (abs_path, rel_path) tuples of file paths. rel_path",
"SubmitConfig) -> None: # Wrap the actual run function call",
"1) return run_id def _populate_run_dir(submit_config: SubmitConfig, run_dir: str) -> None:",
"size internal shared memory buffers # data_reader_process_count: Number of worker",
"user_name: Automatically populated value during submit. Can be set by",
"import os import pathlib import pickle import platform import pprint",
"used/requested for the run # print_info: Whether to print debug",
"case of resumption, load_config = True to load the prior",
"(non-recursive) and returns the next (increasing) run id # Assumes",
"return util.open_url(file_or_url, cache_dir = \".stylegan2-cache\") return open(file_or_url, \"rb\") def load_pkl(file_or_url):",
"training resumption, # and network recompilation. import copy import inspect",
"dir. # run_id: Automatically populated value during submit # run_name:",
"file_mode=\"a\", should_flush = True) else: # when running in a",
"+= [(os.path.join(dnnlib_module_dir_path, \"submission\", \"internal\", \"run.py\"), os.path.join(run_dir, \"run.py\"))] util.copy_files_and_create_dirs(files) def run_wrapper(submit_config:",
"its existing run directory # load_config: in case resume =",
"# thread operation) def __init__(self): self.data_reader_buffer_size = 1<<30 # 1",
"internal class SubmitTarget(Enum): # The target where the function should",
"is_local = submit_config.submit_target == SubmitTarget.LOCAL # when running locally, redirect",
"to select where the run is actually launched # num_gpus:",
"class SubmitConfig(util.EasyDict): # Strongly typed config dict needed to submit",
"templated with tags # Needs to always be run through",
"if submit_config.user_name is None: submit_config.user_name = get_user_name() submit_config.run_func_name = run_func_name",
"import re import shutil import sys import time import traceback",
"f[1])) for f in files] files += [(os.path.join(dnnlib_module_dir_path, \"submission\", \"internal\",",
"print debug information when submitting # local.do_not_copy_source_files: Do not copy",
"= \"localhost\" self.platform_extras = PlatformExtras() def get_path_from_template(path_template: str, path_type: PathType",
"m = r.match(dir_name) if m is not None: i =",
"str: # Replace tags in the given path template and",
"\"{0}-error.txt\".format(submit_config.run_name)) shutil.copyfile(log_src, log_dst) # Defer sys.exit(1) to happen after we",
"LOCAL: Run it locally LOCAL = 1 class PathType(Enum): #",
"of (abs_path, rel_path) tuples of file paths. rel_path root will",
"Used to select where the run is actually launched #",
"be passed through get_path_from_template self.run_desc = \"\" self.run_dir_ignore = [\"__pycache__\",",
"the DataReader class def __init__(self): super().__init__() # run (set these)",
"self.run_func_name = None self.run_func_kwargs = None self.user_name = None self.task_name",
"= old_submit_config[\"run_name\"] if \"resume_pkl\" in old_submit_config[\"run_func_kwargs\"]: submit_config[\"run_func_kwargs\"][\"resume_pkl\"] = old_submit_config[\"run_func_kwargs\"][\"resume_pkl\"] submit_config[\"run_func_kwargs\"][\"resume_kimg\"]",
"{0}() on {1}...\".format(submit_config.run_func_name, submit_config.host_name)) start_time = time.time() run_func_obj = util.get_obj_by_name(submit_config.run_func_name)",
"range(submit_config.run_func_name.count(\".\") - 1): run_func_module_dir_path = os.path.dirname(run_func_module_dir_path) files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores",
"run dir # submit_target: Submit target enum value. Used to",
"runs # Attributes: # run_dir_root: Path to the run dir",
"select path type depending on running OS if path_type ==",
"{1}...\".format(submit_config.run_func_name, submit_config.host_name)) start_time = time.time() run_func_obj = util.get_obj_by_name(submit_config.run_func_name) assert callable(run_func_obj)",
"now and signal the error # to whatever process that",
"is writable pickle.dump(submit_config, open(os.path.join(run_dir, \"submit_config.pkl\"), \"wb\")) with open(os.path.join(run_dir, \"submit_config.txt\"), \"w\")",
"where the run is actually launched # num_gpus: Number of",
"dir exists, is local, and is writable pickle.dump(submit_config, open(os.path.join(run_dir, \"submit_config.pkl\"),",
"submit farm.finalize_submit_config(submit_config, host_run_dir) # In case of resumption, load_config =",
"if submit_target == SubmitTarget.LOCAL: farm = internal.local.Target() assert farm is",
"to whatever process that started this script. if exit_with_errcode: sys.exit(1)",
"run either locally or in a computing cluster. # Compared",
"\"internal\", \"run.py\"), os.path.join(run_dir, \"run.py\"))] util.copy_files_and_create_dirs(files) def run_wrapper(submit_config: SubmitConfig) -> None:",
"path if path_type == PathType.WINDOWS: return str(pathlib.PureWindowsPath(path_template)) elif path_type ==",
"the global username override value global _user_name_override _user_name_override = name",
"submit run_dir # Args: # *paths: Path components to be",
"process that started this script. if exit_with_errcode: sys.exit(1) return submit_config",
"submit_config.user_name = get_user_name() submit_config.run_func_name = run_func_name submit_config.run_func_kwargs = run_func_kwargs #--------------------------------------------------------------------",
"already exists! ({0})\".format(run_dir)) if not os.path.exists(run_dir): os.makedirs(run_dir) return run_dir def",
"to the run, copy files to the run dir, and",
"os.path.join(run_dir, \"run.py\"))] util.copy_files_and_create_dirs(files) def run_wrapper(submit_config: SubmitConfig) -> None: # Wrap",
"Running {0}() on {1}...\".format(submit_config.run_func_name, submit_config.host_name)) start_time = time.time() run_func_obj =",
"of the script now and signal the error # to",
"= False self.nvprof = False self.local = internal.local.TargetOptions() self.datasets =",
"= load_pkl(config_file) submit_config[\"run_id\"] = old_submit_config[\"run_id\"] submit_config[\"run_name\"] = old_submit_config[\"run_name\"] if \"resume_pkl\"",
"by DataReader to size internal shared memory buffers # data_reader_process_count:",
"is not None: return _user_name_override elif platform.system() == \"Windows\": return",
"os.makedirs(run_dir_root) run_dir = os.path.join(run_dir_root, submit_config.run_name) if not resume: if os.path.exists(run_dir)",
"_user_name_override is not None: return _user_name_override elif platform.system() == \"Windows\":",
"given path template and return either Windows or Linux formatted",
"os.path.join(submit_config.run_dir, \"log.txt\"), file_mode=\"a\", should_flush = True) else: # when running",
"by run.sh) logger = util.Logger(file_name = None, should_flush = True)",
"be set to a non-zero value\") if submit_config.user_name is None:",
"util.Logger(file_name = None, should_flush = True) import dnnlib dnnlib.submit_config =",
"with zero num_gpus if (submit_config.num_gpus is None) or (submit_config.num_gpus ==",
"return submit_config def open_file_or_url(file_or_url): if util.is_url(file_or_url): return util.open_url(file_or_url, cache_dir =",
"return files = [] run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name) assert \".\" in",
"which will then # override the automatic value # task_name:",
"get_user_name(): # Get the current user name if _user_name_override is",
"is None) or (submit_config.num_gpus == 0): raise RuntimeError(\"submit_config.num_gpus must be",
"= _create_run_dir_local(submit_config, resume, create_new = create_newdir) submit_config.task_name = \"{}-{:05d}-{}\".format(submit_config.user_name, submit_config.run_id,",
"path_template.replace(\"<USERNAME>\", get_user_name()) # return correctly formatted path if path_type ==",
"if os.path.exists(run_dir) and create_new: raise RuntimeError(\"The run dir already exists!",
"None: # Wrap the actual run function call for handling",
"= submit_config.submit_target == SubmitTarget.LOCAL # when running locally, redirect stderr",
"= None self.user_name = None self.task_name = None self.host_name =",
"\".vscode\", \"_cudacache\"] self.run_dir_extra_files = [] # submit (set these) self.submit_target",
"the run dir root. Can be optionally templated with tags",
"self.nvprof = False self.local = internal.local.TargetOptions() self.datasets = [] #",
"these) self.run_dir_root = \"\" # should always be passed through",
"run_dir: Automatically populated value during submit # run_func_name: Automatically populated",
"return os.path.join(dnnlib.submit_config.run_dir, *paths) def _create_run_dir_local(submit_config: SubmitConfig, resume: bool, create_new: str)",
"= None self.run_dir = None self.run_func_name = None self.run_func_kwargs =",
"files from the working directory to the # run dir.",
"Run it locally LOCAL = 1 class PathType(Enum): # Determines",
"override the automatic value # task_name: Automatically populated value during",
"True) files += submit_config.run_dir_extra_files files = [(f[0], os.path.join(run_dir, \"src\", f[1]))",
"value during submit # user_name: Automatically populated value during submit.",
"copying files to the run dir # run_dir_extra_files: List of",
"src directory inside the run dir # submit_target: Submit target",
"back to its template representation path = path.replace(\"\\\\\", \"/\") return",
"stream = f, indent = 4, width = 200, compact",
"populated value during submit # run_dir: Automatically populated value during",
"# Returns: # A file/dirname rooted at submit_config.run_dir. If there's",
"DataReader to size internal shared memory buffers # data_reader_process_count: Number",
"self.run_dir_extra_files = [] # submit (set these) self.submit_target = SubmitTarget.LOCAL",
"global _user_name_override _user_name_override = name def get_user_name(): # Get the",
"- start_time))) except: if is_local: raise else: traceback.print_exc() log_src =",
"logger = util.Logger(file_name = os.path.join(submit_config.run_dir, \"log.txt\"), file_mode=\"a\", should_flush = True)",
"# submit (set these) self.submit_target = SubmitTarget.LOCAL self.num_gpus = 1",
"and submit_config.local.do_not_copy_source_files: return files = [] run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name) assert",
"_finished.txt exit_with_errcode = True finally: open(os.path.join(submit_config.run_dir, \"_finished.txt\"), \"w\").close() dnnlib.RunContext.get().close() dnnlib.submit_config",
"run through get_path_from_template # run_desc: Description of the run. Will",
"(log writing is handled by run.sh) logger = util.Logger(file_name =",
"encoding = \"latin1\") def submit_run(submit_config: SubmitConfig, run_func_name: str, create_newdir: bool",
"a given directory (non-recursive) and returns the next (increasing) run",
"= submit_config.run_dir_ignore, add_base_to_relative = True) files += submit_config.run_dir_extra_files files =",
"if (dnnlib.submit_config is None) or (dnnlib.submit_config.run_dir is None): return os.path.join(os.getcwd(),",
"load_pkl(file_or_url): with open_file_or_url(file_or_url) as file: return pickle.load(file, encoding = \"latin1\")",
"SubmitConfig(util.EasyDict): # Strongly typed config dict needed to submit runs",
"the run dir. Assumes that the dir exists, is local,",
"path template and return either Windows or Linux formatted path",
"OS type to select either WINDOWS or LINUX WINDOWS =",
"rooted at submit_config.run_dir. If there's no # submit_config or run_dir,",
"for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))] r = re.compile(\"^\\\\d+\")",
"platform import pprint import re import shutil import sys import",
"run dir, gather files related to the run, copy files",
"\"/\") return path def convert_path(path: str, path_type: PathType = PathType.AUTO)",
"command-line parameters submit_config = copy.deepcopy(submit_config) submit_target = submit_config.submit_target farm =",
"resides under the current submit run_dir # Args: # *paths:",
"elif platform.system() == \"Windows\": return os.getlogin() elif platform.system() == \"Linux\":",
"for single # thread operation) def __init__(self): self.data_reader_buffer_size = 1<<30",
"if (submit_config.submit_target == SubmitTarget.LOCAL) and submit_config.local.do_not_copy_source_files: return files = []",
"start run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO) if not os.path.exists(run_dir_root): os.makedirs(run_dir_root) run_dir",
"must be set to a non-zero value\") if submit_config.user_name is",
"is handled by run.sh) logger = util.Logger(file_name = None, should_flush",
"run_dir_root: Path to the run dir root. Can be optionally",
"str: # Convert a normal path back to its template",
"locally, redirect stderr to stdout, log stdout to a file,",
"# and network recompilation. import copy import inspect import os",
"\" + submit_config.task_name) # Farm specific preparations for a submit",
"path_type == PathType.WINDOWS: return str(pathlib.PureWindowsPath(path_template)) elif path_type == PathType.LINUX: return",
"creation of a new run directory # resume: resumes a",
"heuristics # Attributes: # data_reader_buffer_size: Used by DataReader to size",
"SubmitTarget.LOCAL # when running locally, redirect stderr to stdout, log",
"the run dir #-------------------------------------------------------------------- host_run_dir = _create_run_dir_local(submit_config, resume, create_new =",
"internal.local.TargetOptions() self.datasets = [] # (automatically populated) self.run_id = None",
"\"latin1\") def submit_run(submit_config: SubmitConfig, run_func_name: str, create_newdir: bool = False,",
"in {1}.\".format(submit_config.run_func_name, util.format_time(time.time() - start_time))) except: if is_local: raise else:",
"to the # run dir. # run_id: Automatically populated value",
"during submit. Can be set by the user which will",
"files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = True) files",
"resumption, load_config = True to load the prior submit_config file",
"open_file_or_url(file_or_url) as file: return pickle.load(file, encoding = \"latin1\") def submit_run(submit_config:",
"None self.run_name = None self.run_dir = None self.run_func_name = None",
"os.path.isdir(os.path.join(run_dir_root, d))] r = re.compile(\"^\\\\d+\") # match one or more",
"either locally or in a computing cluster. # Compared to",
"passed to os.path.join # Returns: # A file/dirname rooted at",
"self.run_func_kwargs = None self.user_name = None self.task_name = None self.host_name",
"class def __init__(self): super().__init__() # run (set these) self.run_dir_root =",
"if \"submit_config\" in sig.parameters: run_func_obj(submit_config = submit_config, **submit_config.run_func_kwargs) else: run_func_obj(**submit_config.run_func_kwargs)",
"str, path_type: PathType = PathType.AUTO) -> str: # Replace tags",
"Path components to be passed to os.path.join # Returns: #",
"path_type: PathType = PathType.AUTO) -> str: # Convert a normal",
"an error, get out of the script now and signal",
"of worker processes to spawn (zero for single # thread",
"if not re.match(docker_valid_name_regex, submit_config.task_name): raise RuntimeError(\"Invalid task name. Probable reason:",
"with open_file_or_url(file_or_url) as file: return pickle.load(file, encoding = \"latin1\") def",
"submit_config.run_func_name = run_func_name submit_config.run_func_kwargs = run_func_kwargs #-------------------------------------------------------------------- # Prepare submission",
"= None self.task_name = None self.host_name = \"localhost\" self.platform_extras =",
"self.task_name = None self.host_name = \"localhost\" self.platform_extras = PlatformExtras() def",
"Create a run dir, gather files related to the run,",
"a function to be run either locally or in a",
"= None, should_flush = True) import dnnlib dnnlib.submit_config = submit_config",
"old_submit_config[\"run_func_kwargs\"]: submit_config[\"run_func_kwargs\"][\"resume_pkl\"] = old_submit_config[\"run_func_kwargs\"][\"resume_pkl\"] submit_config[\"run_func_kwargs\"][\"resume_kimg\"] = old_submit_config[\"run_func_kwargs\"][\"resume_kimg\"] _populate_run_dir(submit_config, host_run_dir) return",
"0 # single threaded default _user_name_override = None class SubmitConfig(util.EasyDict):",
"files += submit_config.run_dir_extra_files files = [(f[0], os.path.join(run_dir, \"src\", f[1])) for",
"\"localhost\" self.platform_extras = PlatformExtras() def get_path_from_template(path_template: str, path_type: PathType =",
"convert it back to a normal path with given path",
"submit_target == SubmitTarget.LOCAL: farm = internal.local.Target() assert farm is not",
"PathType.AUTO) -> str: # Replace tags in the given path",
"_get_next_run_id_local(run_dir_root: str) -> int: # Reads all directory names in",
"None self.user_name = None self.task_name = None self.host_name = \"localhost\"",
"False try: print(\"dnnlib: Running {0}() on {1}...\".format(submit_config.run_func_name, submit_config.host_name)) start_time =",
"if load_config: config_file = os.path.join(host_run_dir, \"submit_config.pkl\") if os.path.exists(config_file): old_submit_config =",
"directory names dir_names = [d for d in os.listdir(run_dir_root) if",
"inspect.signature(run_func_obj) if \"submit_config\" in sig.parameters: run_func_obj(submit_config = submit_config, **submit_config.run_func_kwargs) else:",
"submit (set these) self.submit_target = SubmitTarget.LOCAL self.num_gpus = 1 self.print_info",
"elif platform.system() == \"Linux\": path_type = PathType.LINUX else: raise RuntimeError(\"Unknown",
"the creation of a new run directory # resume: resumes",
"in the run dir and task name # run_dir_ignore: List",
"import pickle import platform import pprint import re import shutil",
"= [] # submit (set these) self.submit_target = SubmitTarget.LOCAL self.num_gpus",
"print(\"dnnlib: Finished {0}() in {1}.\".format(submit_config.run_func_name, util.format_time(time.time() - start_time))) except: if",
"run, copy files to the run dir, and launch the",
"shutil import sys import time import traceback from enum import",
"DataReader class def __init__(self): super().__init__() # run (set these) self.run_dir_root",
"A mixed bag of values used by dnnlib heuristics #",
"# Defer sys.exit(1) to happen after we close the logs",
"either WINDOWS or LINUX WINDOWS = 1 LINUX = 2",
"get_user_name()) # return correctly formatted path if path_type == PathType.WINDOWS:",
"#-------------------------------------------------------------------- host_run_dir = _create_run_dir_local(submit_config, resume, create_new = create_newdir) submit_config.task_name =",
"during submit # run_func_name: Automatically populated value during submit #",
"should a path be formatted # WINDOWS: Format with Windows",
"IDs are numbers at the start of the directory names",
"whatever process that started this script. if exit_with_errcode: sys.exit(1) return",
"of a new run directory # resume: resumes a prior",
"this script. if exit_with_errcode: sys.exit(1) return submit_config def open_file_or_url(file_or_url): if",
"Number of GPUs used/requested for the run # print_info: Whether",
"# run_dir: Automatically populated value during submit # run_func_name: Automatically",
"max(run_id, i + 1) return run_id def _populate_run_dir(submit_config: SubmitConfig, run_dir:",
"submit # user_name: Automatically populated value during submit. Can be",
"files to the run dir # run_dir_extra_files: List of (abs_path,",
"str: # Create a new run dir with increasing ID",
"= int(m.group()) run_id = max(run_id, i + 1) return run_id",
"stdout, log stdout to a file, and force flushing if",
"\"log.txt\") log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), \"{0}-error.txt\".format(submit_config.run_name)) shutil.copyfile(log_src, log_dst) # Defer sys.exit(1)",
"value during submit # run_func_kwargs: Automatically populated value during submit",
"raise RuntimeError(\"Unknown platform\") path_template = path_template.replace(\"<USERNAME>\", get_user_name()) # return correctly",
"local.do_not_copy_source_files: Do not copy source files from the working directory",
"tags # Needs to always be run through get_path_from_template #",
"PathType.WINDOWS elif platform.system() == \"Linux\": path_type = PathType.LINUX else: raise",
"files] files += [(os.path.join(dnnlib_module_dir_path, \"submission\", \"internal\", \"run.py\"), os.path.join(run_dir, \"run.py\"))] util.copy_files_and_create_dirs(files)",
"self.submit_target = SubmitTarget.LOCAL self.num_gpus = 1 self.print_info = False self.nvprof",
"None if submit_target == SubmitTarget.LOCAL: farm = internal.local.Target() assert farm",
"= None logger.close() # If we hit an error, get",
"the error # to whatever process that started this script.",
"the newly provided # command-line arguments. if load_config: config_file =",
"override value global _user_name_override _user_name_override = name def get_user_name(): #",
"util.is_url(file_or_url): return util.open_url(file_or_url, cache_dir = \".stylegan2-cache\") return open(file_or_url, \"rb\") def",
"\"submission\", \"internal\", \"run.py\"), os.path.join(run_dir, \"run.py\"))] util.copy_files_and_create_dirs(files) def run_wrapper(submit_config: SubmitConfig) ->",
"to a normal path with given path type path_template =",
"path with given path type path_template = get_template_from_path(path) path =",
"in range(submit_config.run_func_name.count(\".\") - 1): run_func_module_dir_path = os.path.dirname(run_func_module_dir_path) files += util.list_dir_recursively_with_ignore(run_func_module_dir_path,",
"dir_names = [d for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))]",
"= inspect.signature(run_func_obj) if \"submit_config\" in sig.parameters: run_func_obj(submit_config = submit_config, **submit_config.run_func_kwargs)",
"running in a cluster, redirect stderr to stdout, and just",
"raise RuntimeError(\"Unknown platform\") def make_run_dir_path(*paths): # Make a path/filename that",
"that the dir exists, is local, and is writable pickle.dump(submit_config,",
"= 4, width = 200, compact = False) if (submit_config.submit_target",
"Task name must be accepted by the following regex: \"",
"during submit # run_func_kwargs: Automatically populated value during submit #",
"util.get_obj_by_name(submit_config.run_func_name) assert callable(run_func_obj) sig = inspect.signature(run_func_obj) if \"submit_config\" in sig.parameters:",
"directory names in a given directory (non-recursive) and returns the",
"Copy all necessary files into the run dir. Assumes that",
"started this script. if exit_with_errcode: sys.exit(1) return submit_config def open_file_or_url(file_or_url):",
"file: return pickle.load(file, encoding = \"latin1\") def submit_run(submit_config: SubmitConfig, run_func_name:",
"of values used by dnnlib heuristics # Attributes: # data_reader_buffer_size:",
"Attributes: # data_reader_buffer_size: Used by DataReader to size internal shared",
"dnnlib heuristics # Attributes: # data_reader_buffer_size: Used by DataReader to",
"name if _user_name_override is not None: return _user_name_override elif platform.system()",
"a path be formatted # WINDOWS: Format with Windows style",
"= submit_config, **submit_config.run_func_kwargs) else: run_func_obj(**submit_config.run_func_kwargs) print(\"dnnlib: Finished {0}() in {1}.\".format(submit_config.run_func_name,",
"RuntimeError(\"submit_config.num_gpus must be set to a non-zero value\") if submit_config.user_name",
"not os.path.exists(run_dir_root): os.makedirs(run_dir_root) run_dir = os.path.join(run_dir_root, submit_config.run_name) if not resume:",
"extend the support for automatic training resumption, # and network",
"used in the run dir and task name # run_dir_ignore:",
"run_func_obj(**submit_config.run_func_kwargs) print(\"dnnlib: Finished {0}() in {1}.\".format(submit_config.run_func_name, util.format_time(time.time() - start_time))) except:",
"is actually launched # num_gpus: Number of GPUs used/requested for",
"optionally templated with tags # Needs to always be run",
"the run dir and task name # run_dir_ignore: List of",
"submit_config.run_dir_ignore, add_base_to_relative = True) files += submit_config.run_dir_extra_files files = [(f[0],",
"bool = False, **run_func_kwargs) -> None: # Create a run",
"provided # command-line arguments. if load_config: config_file = os.path.join(host_run_dir, \"submit_config.pkl\")",
"during submit # run_name: Automatically populated value during submit #",
"os.path.join(get_path_from_template(submit_config.run_dir_root), \"{0}-error.txt\".format(submit_config.run_name)) shutil.copyfile(log_src, log_dst) # Defer sys.exit(1) to happen after",
"= util.get_module_dir_by_obj_name(submit_config.run_func_name) assert \".\" in submit_config.run_func_name for _idx in range(submit_config.run_func_name.count(\".\")",
"set to a non-zero value\") if submit_config.user_name is None: submit_config.user_name",
"False, load_config: bool = False, **run_func_kwargs) -> None: # Create",
"rel_path) tuples of file paths. rel_path root will # be",
"resume, create_new = create_newdir) submit_config.task_name = \"{}-{:05d}-{}\".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc) docker_valid_name_regex",
"depending on running OS if path_type == PathType.AUTO: if platform.system()",
"self.run_dir_ignore = [\"__pycache__\", \"*.pyproj\", \"*.sln\", \"*.suo\", \".cache\", \".idea\", \".vs\", \".vscode\",",
"r = re.compile(\"^\\\\d+\") # match one or more digits at",
"if not os.path.exists(run_dir): os.makedirs(run_dir) return run_dir def _get_next_run_id_local(run_dir_root: str) ->",
"rather than the newly provided # command-line arguments. if load_config:",
"files when copying files to the run dir # run_dir_extra_files:",
"path_type = PathType.WINDOWS elif platform.system() == \"Linux\": path_type = PathType.LINUX",
"and create_new: raise RuntimeError(\"The run dir already exists! ({0})\".format(run_dir)) if",
"# run_func_kwargs: Automatically populated value during submit # user_name: Automatically",
"platform.system() == \"Linux\": try: import pwd return pwd.getpwuid(os.geteuid()).pw_name except: return",
"# AUTO: Use current OS type to select either WINDOWS",
"Farm specific preparations for a submit farm.finalize_submit_config(submit_config, host_run_dir) # In",
"Format with Linux/Posix style # AUTO: Use current OS type",
"get_path_from_template self.run_desc = \"\" self.run_dir_ignore = [\"__pycache__\", \"*.pyproj\", \"*.sln\", \"*.suo\",",
"= [] run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name) assert \".\" in submit_config.run_func_name for",
"Automatically populated value during submit # run_name: Automatically populated value",
"stdout to a file, and force flushing if is_local: logger",
"the run. Will be used in the run dir and",
"def make_run_dir_path(*paths): # Make a path/filename that resides under the",
"submit_config.submit_target == SubmitTarget.LOCAL # when running locally, redirect stderr to",
"from the directory # (so to maintain the original configuration",
"AUTO: Use current OS type to select either WINDOWS or",
"target enum value. Used to select where the run is",
"cache_dir = \".stylegan2-cache\") return open(file_or_url, \"rb\") def load_pkl(file_or_url): with open_file_or_url(file_or_url)",
"components to be passed to os.path.join # Returns: # A",
"or (submit_config.num_gpus == 0): raise RuntimeError(\"submit_config.num_gpus must be set to",
"= os.path.join(submit_config.run_dir, \"log.txt\"), file_mode=\"a\", should_flush = True) else: # when",
"run_func_obj = util.get_obj_by_name(submit_config.run_func_name) assert callable(run_func_obj) sig = inspect.signature(run_func_obj) if \"submit_config\"",
"PathType.AUTO: if platform.system() == \"Windows\": path_type = PathType.WINDOWS elif platform.system()",
"typing, etc is_local = submit_config.submit_target == SubmitTarget.LOCAL # when running",
"files to the run dir, and launch the run in",
"formatted # WINDOWS: Format with Windows style # LINUX: Format",
"# In case of resumption, load_config = True to load",
"Use current OS type to select either WINDOWS or LINUX",
"used to ignore files when copying files to the run",
"place. # create_newdir: enforces the creation of a new run",
"value # task_name: Automatically populated value during submit # host_name:",
"raise RuntimeError(\"The run dir already exists! ({0})\".format(run_dir)) if not os.path.exists(run_dir):",
"raise RuntimeError(\"Invalid task name. Probable reason: unacceptable characters in your",
"= False) if (submit_config.submit_target == SubmitTarget.LOCAL) and submit_config.local.do_not_copy_source_files: return files",
"during submit. Used by various dnnlib libraries # such as",
"not copy source files from the working directory to the",
"200, compact = False) if (submit_config.submit_target == SubmitTarget.LOCAL) and submit_config.local.do_not_copy_source_files:",
"dir_names: m = r.match(dir_name) if m is not None: i",
"indent = 4, width = 200, compact = False) if",
"= 2 AUTO = 3 class PlatformExtras: # A mixed",
"as f: pprint.pprint(submit_config, stream = f, indent = 4, width",
"If there's no # submit_config or run_dir, the base directory",
"populated value during submit # host_name: Automatically populated value during",
"PathType = PathType.AUTO) -> str: # Replace tags in the",
"[d for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))] r =",
"style # LINUX: Format with Linux/Posix style # AUTO: Use",
"all necessary files into the run dir. Assumes that the",
"be formatted # WINDOWS: Format with Windows style # LINUX:",
"of the run. Will be used in the run dir",
"ignores = submit_config.run_dir_ignore, add_base_to_relative = True) files += submit_config.run_dir_extra_files files",
"instead of using the current command-line parameters submit_config = copy.deepcopy(submit_config)",
"shutil.copyfile(log_src, log_dst) # Defer sys.exit(1) to happen after we close",
"def get_user_name(): # Get the current user name if _user_name_override",
"thread operation) def __init__(self): self.data_reader_buffer_size = 1<<30 # 1 GB",
"one or more digits at the start of the string",
"bool = False, resume: bool = False, load_config: bool =",
"<reponame>gperdrizet/gansformer # Submit a function to be run either locally",
"from the working directory to the # run dir. #",
"\"_cudacache\"] self.run_dir_extra_files = [] # submit (set these) self.submit_target =",
"submission by populating the run dir #-------------------------------------------------------------------- host_run_dir = _create_run_dir_local(submit_config,",
"stderr to stdout, and just force flushing (log writing is",
"List of file patterns used to ignore files when copying",
"got \" + submit_config.task_name) # Farm specific preparations for a",
"to submit runs # Attributes: # run_dir_root: Path to the",
"function should be run # LOCAL: Run it locally LOCAL",
"LOCAL = 1 class PathType(Enum): # Determines in which format",
"run dir already exists! ({0})\".format(run_dir)) if not os.path.exists(run_dir): os.makedirs(run_dir) return",
"try: import pwd return pwd.getpwuid(os.geteuid()).pw_name except: return \"unknown\" else: raise",
"user name if _user_name_override is not None: return _user_name_override elif",
"accepted by the following regex: \" + docker_valid_name_regex + \",",
"util.get_module_dir_by_obj_name(\"dnnlib\") files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = True)",
"cluster. # Compared to original StyleGAN implementation, we extend the",
"the current user name if _user_name_override is not None: return",
"# submit_config or run_dir, the base directory is the current",
"we hit an error, get out of the script now",
"in a given directory (non-recursive) and returns the next (increasing)",
"submit_config.run_func_kwargs = run_func_kwargs #-------------------------------------------------------------------- # Prepare submission by populating the",
"return os.path.join(os.getcwd(), *paths) return os.path.join(dnnlib.submit_config.run_dir, *paths) def _create_run_dir_local(submit_config: SubmitConfig, resume:",
"# (automatically populated) self.run_id = None self.run_name = None self.run_dir",
"= None if submit_target == SubmitTarget.LOCAL: farm = internal.local.Target() assert",
"of GPUs used/requested for the run # print_info: Whether to",
"prior submit_config file from the directory # (so to maintain",
"Can be optionally templated with tags # Needs to always",
"stderr to stdout, log stdout to a file, and force",
"True finally: open(os.path.join(submit_config.run_dir, \"_finished.txt\"), \"w\").close() dnnlib.RunContext.get().close() dnnlib.submit_config = None logger.close()",
"run dir # run_dir_extra_files: List of (abs_path, rel_path) tuples of",
"experiment config instead of using the current command-line parameters submit_config",
"_idx in range(submit_config.run_func_name.count(\".\") - 1): run_func_module_dir_path = os.path.dirname(run_func_module_dir_path) files +=",
"id # Assumes IDs are numbers at the start of",
"path def convert_path(path: str, path_type: PathType = PathType.AUTO) -> str:",
"\".stylegan2-cache\") return open(file_or_url, \"rb\") def load_pkl(file_or_url): with open_file_or_url(file_or_url) as file:",
"# should always be passed through get_path_from_template self.run_desc = \"\"",
"= PathType.AUTO) -> str: # Convert a normal path to",
"import traceback from enum import Enum from .. import util",
"= \"^[a-zA-Z0-9][a-zA-Z0-9_.-]+$\" if not re.match(docker_valid_name_regex, submit_config.task_name): raise RuntimeError(\"Invalid task name.",
"(zero for single # thread operation) def __init__(self): self.data_reader_buffer_size =",
"None) or (dnnlib.submit_config.run_dir is None): return os.path.join(os.getcwd(), *paths) return os.path.join(dnnlib.submit_config.run_dir,",
"resume = True, load prior experiment config instead of using",
"True, load prior experiment config instead of using the current",
"back to a normal path with given path type path_template",
"= False, **run_func_kwargs) -> None: # Create a run dir,",
"# Set the global username override value global _user_name_override _user_name_override",
"the run dir # submit_target: Submit target enum value. Used",
"= os.path.join(run_dir_root, submit_config.run_name) if not resume: if os.path.exists(run_dir) and create_new:",
"= True to load the prior submit_config file from the",
"is local, and is writable pickle.dump(submit_config, open(os.path.join(run_dir, \"submit_config.pkl\"), \"wb\")) with",
"get out of the script now and signal the error",
"default _user_name_override = None class SubmitConfig(util.EasyDict): # Strongly typed config",
"path type depending on running OS if path_type == PathType.AUTO:",
"# A mixed bag of values used by dnnlib heuristics",
"mixed bag of values used by dnnlib heuristics # Attributes:",
"AUTO = 3 class PlatformExtras: # A mixed bag of",
"= 0 # single threaded default _user_name_override = None class",
"spawn (zero for single # thread operation) def __init__(self): self.data_reader_buffer_size",
"= run_func_kwargs #-------------------------------------------------------------------- # Prepare submission by populating the run",
"global username override value global _user_name_override _user_name_override = name def",
"its template representation path = path.replace(\"\\\\\", \"/\") return path def",
"populated value during submit # user_name: Automatically populated value during",
"os.makedirs(run_dir) return run_dir def _get_next_run_id_local(run_dir_root: str) -> int: # Reads",
"SubmitTarget.LOCAL self.num_gpus = 1 self.print_info = False self.nvprof = False",
"root. Can be optionally templated with tags # Needs to",
"using the current command-line parameters submit_config = copy.deepcopy(submit_config) submit_target =",
"Windows or Linux formatted path # automatically select path type",
"submit. Used by various dnnlib libraries # such as the",
"= copy.deepcopy(submit_config) submit_target = submit_config.submit_target farm = None if submit_target",
"run_id def _populate_run_dir(submit_config: SubmitConfig, run_dir: str) -> None: # Copy",
"-> str: # Create a new run dir with increasing",
"= True, load prior experiment config instead of using the",
"import dnnlib if (dnnlib.submit_config is None) or (dnnlib.submit_config.run_dir is None):",
"\"resume_pkl\" in old_submit_config[\"run_func_kwargs\"]: submit_config[\"run_func_kwargs\"][\"resume_pkl\"] = old_submit_config[\"run_func_kwargs\"][\"resume_pkl\"] submit_config[\"run_func_kwargs\"][\"resume_kimg\"] = old_submit_config[\"run_func_kwargs\"][\"resume_kimg\"] _populate_run_dir(submit_config,",
"import copy import inspect import os import pathlib import pickle",
"template and return either Windows or Linux formatted path #",
"re.match(docker_valid_name_regex, submit_config.task_name): raise RuntimeError(\"Invalid task name. Probable reason: unacceptable characters",
"names in a given directory (non-recursive) and returns the next",
"be optionally templated with tags # Needs to always be",
"# create_newdir: enforces the creation of a new run directory",
"run in appropriate place. # create_newdir: enforces the creation of",
"[(os.path.join(dnnlib_module_dir_path, \"submission\", \"internal\", \"run.py\"), os.path.join(run_dir, \"run.py\"))] util.copy_files_and_create_dirs(files) def run_wrapper(submit_config: SubmitConfig)",
"on running OS if path_type == PathType.AUTO: if platform.system() ==",
"= False, resume: bool = False, load_config: bool = False,",
"submit_config.task_name): raise RuntimeError(\"Invalid task name. Probable reason: unacceptable characters in",
"= 1 LINUX = 2 AUTO = 3 class PlatformExtras:",
"def get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO) -> str: #",
"== \"Windows\": return os.getlogin() elif platform.system() == \"Linux\": try: import",
"be set by the user which will then # override",
"internal shared memory buffers # data_reader_process_count: Number of worker processes",
"# A file/dirname rooted at submit_config.run_dir. If there's no #",
"populated value during submit # run_func_name: Automatically populated value during",
"copy import inspect import os import pathlib import pickle import",
"start of the directory names dir_names = [d for d",
"PlatformExtras: # A mixed bag of values used by dnnlib",
"self.run_id = None self.run_name = None self.run_dir = None self.run_func_name",
"Automatically populated value during submit # host_name: Automatically populated value",
"open(os.path.join(run_dir, \"submit_config.txt\"), \"w\") as f: pprint.pprint(submit_config, stream = f, indent",
"enforces the creation of a new run directory # resume:",
"else: raise RuntimeError(\"Unknown platform\") def get_template_from_path(path: str) -> str: #",
"config dict needed to submit runs # Attributes: # run_dir_root:",
"it locally LOCAL = 1 class PathType(Enum): # Determines in",
"Convert a normal path back to its template representation path",
"+= submit_config.run_dir_extra_files files = [(f[0], os.path.join(run_dir, \"src\", f[1])) for f",
"Whether to print debug information when submitting # local.do_not_copy_source_files: Do",
"# platform_extras: Automatically populated values during submit. Used by various",
"is the current # working directory. # E.g., `os.path.join(dnnlib.submit_config.run_dir, \"output.txt\"))`",
"be run # LOCAL: Run it locally LOCAL = 1",
"populated value during submit # platform_extras: Automatically populated values during",
"path.replace(\"\\\\\", \"/\") return path def convert_path(path: str, path_type: PathType =",
"# WINDOWS: Format with Windows style # LINUX: Format with",
"str) -> str: # Convert a normal path back to",
"the experiment rather than the newly provided # command-line arguments.",
"os.path.exists(run_dir): os.makedirs(run_dir) return run_dir def _get_next_run_id_local(run_dir_root: str) -> int: #",
"SubmitConfig, run_func_name: str, create_newdir: bool = False, resume: bool =",
"copy.deepcopy(submit_config) submit_target = submit_config.submit_target farm = None if submit_target ==",
"self.run_desc = \"\" self.run_dir_ignore = [\"__pycache__\", \"*.pyproj\", \"*.sln\", \"*.suo\", \".cache\",",
"callable(run_func_obj) sig = inspect.signature(run_func_obj) if \"submit_config\" in sig.parameters: run_func_obj(submit_config =",
"import util from ..util import EasyDict from . import internal",
"(set these) self.submit_target = SubmitTarget.LOCAL self.num_gpus = 1 self.print_info =",
"submit_config.run_id, submit_config.run_desc) docker_valid_name_regex = \"^[a-zA-Z0-9][a-zA-Z0-9_.-]+$\" if not re.match(docker_valid_name_regex, submit_config.task_name): raise",
"file from the directory # (so to maintain the original",
"flushing (log writing is handled by run.sh) logger = util.Logger(file_name",
"= internal.local.TargetOptions() self.datasets = [] # (automatically populated) self.run_id =",
"automatic value # task_name: Automatically populated value during submit #",
"\"Linux\": try: import pwd return pwd.getpwuid(os.geteuid()).pw_name except: return \"unknown\" else:",
"= time.time() run_func_obj = util.get_obj_by_name(submit_config.run_func_name) assert callable(run_func_obj) sig = inspect.signature(run_func_obj)",
"str, create_newdir: bool = False, resume: bool = False, load_config:",
"get_user_name() submit_config.run_func_name = run_func_name submit_config.run_func_kwargs = run_func_kwargs #-------------------------------------------------------------------- # Prepare",
"automatically select path type depending on running OS if path_type",
"during submit # platform_extras: Automatically populated values during submit. Used",
"to a non-zero value\") if submit_config.user_name is None: submit_config.user_name =",
"to spawn (zero for single # thread operation) def __init__(self):",
"self.run_dir = None self.run_func_name = None self.run_func_kwargs = None self.user_name",
"that started this script. if exit_with_errcode: sys.exit(1) return submit_config def",
"script. if exit_with_errcode: sys.exit(1) return submit_config def open_file_or_url(file_or_url): if util.is_url(file_or_url):",
"Strongly typed config dict needed to submit runs # Attributes:",
"Disallow submitting jobs with zero num_gpus if (submit_config.num_gpus is None)",
"not os.path.exists(run_dir): os.makedirs(run_dir) return run_dir def _get_next_run_id_local(run_dir_root: str) -> int:",
"return pickle.load(file, encoding = \"latin1\") def submit_run(submit_config: SubmitConfig, run_func_name: str,",
"# run_dir_extra_files: List of (abs_path, rel_path) tuples of file paths.",
"the start of the directory names dir_names = [d for",
"format should a path be formatted # WINDOWS: Format with",
"str(pathlib.PureWindowsPath(path_template)) elif path_type == PathType.LINUX: return str(pathlib.PurePosixPath(path_template)) else: raise RuntimeError(\"Unknown",
"experiment rather than the newly provided # command-line arguments. if",
"[] # submit (set these) self.submit_target = SubmitTarget.LOCAL self.num_gpus =",
"or (dnnlib.submit_config.run_dir is None): return os.path.join(os.getcwd(), *paths) return os.path.join(dnnlib.submit_config.run_dir, *paths)",
"= f, indent = 4, width = 200, compact =",
"exists! ({0})\".format(run_dir)) if not os.path.exists(run_dir): os.makedirs(run_dir) return run_dir def _get_next_run_id_local(run_dir_root:",
"in dir_names: m = r.match(dir_name) if m is not None:",
"width = 200, compact = False) if (submit_config.submit_target == SubmitTarget.LOCAL)",
"be used in the run dir and task name #",
"resume: if os.path.exists(run_dir) and create_new: raise RuntimeError(\"The run dir already",
"either Windows or Linux formatted path # automatically select path",
"if (submit_config.num_gpus is None) or (submit_config.num_gpus == 0): raise RuntimeError(\"submit_config.num_gpus",
"script now and signal the error # to whatever process",
"handled by run.sh) logger = util.Logger(file_name = None, should_flush =",
"\".idea\", \".vs\", \".vscode\", \"_cudacache\"] self.run_dir_extra_files = [] # submit (set",
"i = int(m.group()) run_id = max(run_id, i + 1) return",
"False self.local = internal.local.TargetOptions() self.datasets = [] # (automatically populated)",
"with Linux/Posix style # AUTO: Use current OS type to",
"the function should be run # LOCAL: Run it locally",
"create_newdir: bool = False, resume: bool = False, load_config: bool",
"through get_path_from_template # run_desc: Description of the run. Will be",
"operation) def __init__(self): self.data_reader_buffer_size = 1<<30 # 1 GB self.data_reader_process_count",
"run_func_name submit_config.run_func_kwargs = run_func_kwargs #-------------------------------------------------------------------- # Prepare submission by populating",
"logs and create a _finished.txt exit_with_errcode = True finally: open(os.path.join(submit_config.run_dir,",
"to original StyleGAN implementation, we extend the support for automatic",
"__init__(self): self.data_reader_buffer_size = 1<<30 # 1 GB self.data_reader_process_count = 0",
"= submit_config exit_with_errcode = False try: print(\"dnnlib: Running {0}() on",
"\"*.suo\", \".cache\", \".idea\", \".vs\", \".vscode\", \"_cudacache\"] self.run_dir_extra_files = [] #",
"submit_config.task_name = \"{}-{:05d}-{}\".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc) docker_valid_name_regex = \"^[a-zA-Z0-9][a-zA-Z0-9_.-]+$\" if not",
"path to template and the convert it back to a",
"the following regex: \" + docker_valid_name_regex + \", got \"",
"import platform import pprint import re import shutil import sys",
"populated) self.run_id = None self.run_name = None self.run_dir = None",
"shared memory buffers # data_reader_process_count: Number of worker processes to",
"typed config dict needed to submit runs # Attributes: #",
"os import pathlib import pickle import platform import pprint import",
"1 self.print_info = False self.nvprof = False self.local = internal.local.TargetOptions()",
"and network recompilation. import copy import inspect import os import",
"return run_dir def _get_next_run_id_local(run_dir_root: str) -> int: # Reads all",
"run_dir def _get_next_run_id_local(run_dir_root: str) -> int: # Reads all directory",
"for the run # print_info: Whether to print debug information",
"submit # run_name: Automatically populated value during submit # run_dir:",
"exit_with_errcode: sys.exit(1) return submit_config def open_file_or_url(file_or_url): if util.is_url(file_or_url): return util.open_url(file_or_url,",
"Enum from .. import util from ..util import EasyDict from",
"current OS type to select either WINDOWS or LINUX WINDOWS",
"= submit_config.submit_target farm = None if submit_target == SubmitTarget.LOCAL: farm",
"create_newdir) submit_config.task_name = \"{}-{:05d}-{}\".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc) docker_valid_name_regex = \"^[a-zA-Z0-9][a-zA-Z0-9_.-]+$\" if",
"super().__init__() # run (set these) self.run_dir_root = \"\" # should",
"dnnlib dnnlib.submit_config = submit_config exit_with_errcode = False try: print(\"dnnlib: Running",
"and signal the error # to whatever process that started",
"== SubmitTarget.LOCAL # when running locally, redirect stderr to stdout,",
"submit runs # Attributes: # run_dir_root: Path to the run",
"name # run_dir_ignore: List of file patterns used to ignore",
"= r.match(dir_name) if m is not None: i = int(m.group())",
"should_flush = True) import dnnlib dnnlib.submit_config = submit_config exit_with_errcode =",
"path_type = PathType.LINUX else: raise RuntimeError(\"Unknown platform\") path_template = path_template.replace(\"<USERNAME>\",",
"user which will then # override the automatic value #",
"exists, is local, and is writable pickle.dump(submit_config, open(os.path.join(run_dir, \"submit_config.pkl\"), \"wb\"))",
"we close the logs and create a _finished.txt exit_with_errcode =",
"writable pickle.dump(submit_config, open(os.path.join(run_dir, \"submit_config.pkl\"), \"wb\")) with open(os.path.join(run_dir, \"submit_config.txt\"), \"w\") as",
"always be run through get_path_from_template # run_desc: Description of the",
"the user which will then # override the automatic value",
"directory (non-recursive) and returns the next (increasing) run id #",
"etc is_local = submit_config.submit_target == SubmitTarget.LOCAL # when running locally,",
"Automatically populated value during submit. Can be set by the",
"resumption, # and network recompilation. import copy import inspect import",
"a _finished.txt exit_with_errcode = True finally: open(os.path.join(submit_config.run_dir, \"_finished.txt\"), \"w\").close() dnnlib.RunContext.get().close()",
"PathType.AUTO) -> str: # Convert a normal path to template",
"network recompilation. import copy import inspect import os import pathlib",
"these) self.submit_target = SubmitTarget.LOCAL self.num_gpus = 1 self.print_info = False",
"inside the run dir # submit_target: Submit target enum value.",
"= submit_config.run_dir_ignore, add_base_to_relative = False) dnnlib_module_dir_path = util.get_module_dir_by_obj_name(\"dnnlib\") files +=",
"import time import traceback from enum import Enum from ..",
"# override the automatic value # task_name: Automatically populated value",
"path_type) return path def set_user_name_override(name: str) -> None: # Set",
"with given path type path_template = get_template_from_path(path) path = get_path_from_template(path_template,",
"the run is actually launched # num_gpus: Number of GPUs",
"host_run_dir = _create_run_dir_local(submit_config, resume, create_new = create_newdir) submit_config.task_name = \"{}-{:05d}-{}\".format(submit_config.user_name,",
"load_pkl(config_file) submit_config[\"run_id\"] = old_submit_config[\"run_id\"] submit_config[\"run_name\"] = old_submit_config[\"run_name\"] if \"resume_pkl\" in",
"open(os.path.join(run_dir, \"submit_config.pkl\"), \"wb\")) with open(os.path.join(run_dir, \"submit_config.txt\"), \"w\") as f: pprint.pprint(submit_config,",
"import internal class SubmitTarget(Enum): # The target where the function",
"-> str: # Replace tags in the given path template",
"util.format_time(time.time() - start_time))) except: if is_local: raise else: traceback.print_exc() log_src",
"WINDOWS = 1 LINUX = 2 AUTO = 3 class",
"for dir_name in dir_names: m = r.match(dir_name) if m is",
"# The target where the function should be run #",
"submitting # local.do_not_copy_source_files: Do not copy source files from the",
"None: i = int(m.group()) run_id = max(run_id, i + 1)",
"if is_local: logger = util.Logger(file_name = os.path.join(submit_config.run_dir, \"log.txt\"), file_mode=\"a\", should_flush",
"= max(run_id, i + 1) return run_id def _populate_run_dir(submit_config: SubmitConfig,",
"various dnnlib libraries # such as the DataReader class def",
"original configuration of the experiment rather than the newly provided",
"run_id: Automatically populated value during submit # run_name: Automatically populated",
"value during submit # run_dir: Automatically populated value during submit",
"or more digits at the start of the string run_id",
"to be passed to os.path.join # Returns: # A file/dirname",
"file, and force flushing if is_local: logger = util.Logger(file_name =",
"+= util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = False) dnnlib_module_dir_path =",
"1<<30 # 1 GB self.data_reader_process_count = 0 # single threaded",
"assert \".\" in submit_config.run_func_name for _idx in range(submit_config.run_func_name.count(\".\") - 1):",
"re import shutil import sys import time import traceback from",
"working directory to the # run dir. # run_id: Automatically",
"+ \", got \" + submit_config.task_name) # Farm specific preparations",
"return open(file_or_url, \"rb\") def load_pkl(file_or_url): with open_file_or_url(file_or_url) as file: return",
"which format should a path be formatted # WINDOWS: Format",
"os.getlogin() elif platform.system() == \"Linux\": try: import pwd return pwd.getpwuid(os.geteuid()).pw_name",
"working directory. # E.g., `os.path.join(dnnlib.submit_config.run_dir, \"output.txt\"))` import dnnlib if (dnnlib.submit_config",
"StyleGAN implementation, we extend the support for automatic training resumption,",
"paths. rel_path root will # be the src directory inside",
"ID number at the start run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO) if",
"a computing cluster. # Compared to original StyleGAN implementation, we",
"# resume: resumes a prior experiment using its existing run",
"internal.local.Target() assert farm is not None # unknown target #",
"pwd.getpwuid(os.geteuid()).pw_name except: return \"unknown\" else: raise RuntimeError(\"Unknown platform\") def make_run_dir_path(*paths):",
"always be passed through get_path_from_template self.run_desc = \"\" self.run_dir_ignore =",
"os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))] r = re.compile(\"^\\\\d+\") # match one",
"is not None # unknown target # Disallow submitting jobs",
"a new run directory # resume: resumes a prior experiment",
"def _populate_run_dir(submit_config: SubmitConfig, run_dir: str) -> None: # Copy all",
"A file/dirname rooted at submit_config.run_dir. If there's no # submit_config",
"-> None: # Create a run dir, gather files related",
"None: submit_config.user_name = get_user_name() submit_config.run_func_name = run_func_name submit_config.run_func_kwargs = run_func_kwargs",
"self.data_reader_buffer_size = 1<<30 # 1 GB self.data_reader_process_count = 0 #",
"host_name: Automatically populated value during submit # platform_extras: Automatically populated",
"in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))] r = re.compile(\"^\\\\d+\") # match",
"at submit_config.run_dir. If there's no # submit_config or run_dir, the",
"= 200, compact = False) if (submit_config.submit_target == SubmitTarget.LOCAL) and",
"# local.do_not_copy_source_files: Do not copy source files from the working",
"are numbers at the start of the directory names dir_names",
"-> int: # Reads all directory names in a given",
"regex: \" + docker_valid_name_regex + \", got \" + submit_config.task_name)",
"# be the src directory inside the run dir #",
"submit_config def open_file_or_url(file_or_url): if util.is_url(file_or_url): return util.open_url(file_or_url, cache_dir = \".stylegan2-cache\")",
"path back to its template representation path = path.replace(\"\\\\\", \"/\")",
"= False, load_config: bool = False, **run_func_kwargs) -> None: #",
"run_dir = os.path.join(run_dir_root, submit_config.run_name) if not resume: if os.path.exists(run_dir) and",
"the string run_id = 0 for dir_name in dir_names: m",
"log stdout to a file, and force flushing if is_local:",
"{0}() in {1}.\".format(submit_config.run_func_name, util.format_time(time.time() - start_time))) except: if is_local: raise",
"after we close the logs and create a _finished.txt exit_with_errcode",
"a new run dir with increasing ID number at the",
"old_submit_config[\"run_name\"] if \"resume_pkl\" in old_submit_config[\"run_func_kwargs\"]: submit_config[\"run_func_kwargs\"][\"resume_pkl\"] = old_submit_config[\"run_func_kwargs\"][\"resume_pkl\"] submit_config[\"run_func_kwargs\"][\"resume_kimg\"] =",
"run function call for handling logging, exceptions, typing, etc is_local",
"PathType = PathType.AUTO) -> str: # Convert a normal path",
"None: # Create a run dir, gather files related to",
"is None): return os.path.join(os.getcwd(), *paths) return os.path.join(dnnlib.submit_config.run_dir, *paths) def _create_run_dir_local(submit_config:",
"= PlatformExtras() def get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO) ->",
"in appropriate place. # create_newdir: enforces the creation of a",
"Args: # *paths: Path components to be passed to os.path.join",
"submit # run_dir: Automatically populated value during submit # run_func_name:",
"used by dnnlib heuristics # Attributes: # data_reader_buffer_size: Used by",
"be the src directory inside the run dir # submit_target:",
"return run_id def _populate_run_dir(submit_config: SubmitConfig, run_dir: str) -> None: #",
"by the user which will then # override the automatic",
"a prior experiment using its existing run directory # load_config:",
"== \"Windows\": path_type = PathType.WINDOWS elif platform.system() == \"Linux\": path_type",
"in a cluster, redirect stderr to stdout, and just force",
"and create a _finished.txt exit_with_errcode = True finally: open(os.path.join(submit_config.run_dir, \"_finished.txt\"),",
"SubmitConfig, resume: bool, create_new: str) -> str: # Create a",
"of file paths. rel_path root will # be the src",
"= [d for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))] r",
"\"submit_config.pkl\") if os.path.exists(config_file): old_submit_config = submit_config submit_config = load_pkl(config_file) submit_config[\"run_id\"]",
".. import util from ..util import EasyDict from . import",
"Create a new run dir with increasing ID number at",
"# load_config: in case resume = True, load prior experiment",
"= old_submit_config[\"run_func_kwargs\"][\"resume_pkl\"] submit_config[\"run_func_kwargs\"][\"resume_kimg\"] = old_submit_config[\"run_func_kwargs\"][\"resume_kimg\"] _populate_run_dir(submit_config, host_run_dir) return farm.submit(submit_config, host_run_dir)",
"submit_config.task_name) # Farm specific preparations for a submit farm.finalize_submit_config(submit_config, host_run_dir)",
"str(pathlib.PurePosixPath(path_template)) else: raise RuntimeError(\"Unknown platform\") def get_template_from_path(path: str) -> str:",
"# Create a run dir, gather files related to the",
"by the following regex: \" + docker_valid_name_regex + \", got",
"dir # run_dir_extra_files: List of (abs_path, rel_path) tuples of file",
"stdout, and just force flushing (log writing is handled by",
"def _get_next_run_id_local(run_dir_root: str) -> int: # Reads all directory names",
"import shutil import sys import time import traceback from enum",
"Submit a function to be run either locally or in",
"redirect stderr to stdout, log stdout to a file, and",
"str) -> int: # Reads all directory names in a",
"run is actually launched # num_gpus: Number of GPUs used/requested",
"not None: return _user_name_override elif platform.system() == \"Windows\": return os.getlogin()",
"= os.path.join(host_run_dir, \"submit_config.pkl\") if os.path.exists(config_file): old_submit_config = submit_config submit_config =",
"finally: open(os.path.join(submit_config.run_dir, \"_finished.txt\"), \"w\").close() dnnlib.RunContext.get().close() dnnlib.submit_config = None logger.close() #",
"os.path.join(os.getcwd(), *paths) return os.path.join(dnnlib.submit_config.run_dir, *paths) def _create_run_dir_local(submit_config: SubmitConfig, resume: bool,",
"or Linux formatted path # automatically select path type depending",
"populated value during submit. Can be set by the user",
"next (increasing) run id # Assumes IDs are numbers at",
"name must be accepted by the following regex: \" +",
"class SubmitTarget(Enum): # The target where the function should be",
"# print_info: Whether to print debug information when submitting #",
"file/dirname rooted at submit_config.run_dir. If there's no # submit_config or",
"Make a path/filename that resides under the current submit run_dir",
"def run_wrapper(submit_config: SubmitConfig) -> None: # Wrap the actual run",
"submit_config.host_name)) start_time = time.time() run_func_obj = util.get_obj_by_name(submit_config.run_func_name) assert callable(run_func_obj) sig",
"= run_func_name submit_config.run_func_kwargs = run_func_kwargs #-------------------------------------------------------------------- # Prepare submission by",
"path def set_user_name_override(name: str) -> None: # Set the global",
"to select either WINDOWS or LINUX WINDOWS = 1 LINUX",
"run dir. # run_id: Automatically populated value during submit #",
"load_config: bool = False, **run_func_kwargs) -> None: # Create a",
"data_reader_buffer_size: Used by DataReader to size internal shared memory buffers",
"of using the current command-line parameters submit_config = copy.deepcopy(submit_config) submit_target",
"the run, copy files to the run dir, and launch",
"function to be run either locally or in a computing",
"the automatic value # task_name: Automatically populated value during submit",
"def load_pkl(file_or_url): with open_file_or_url(file_or_url) as file: return pickle.load(file, encoding =",
"\"submit_config\" in sig.parameters: run_func_obj(submit_config = submit_config, **submit_config.run_func_kwargs) else: run_func_obj(**submit_config.run_func_kwargs) print(\"dnnlib:",
"task name # run_dir_ignore: List of file patterns used to",
"directory # (so to maintain the original configuration of the",
"task name. Probable reason: unacceptable characters in your submit_config.run_desc. Task",
"return str(pathlib.PurePosixPath(path_template)) else: raise RuntimeError(\"Unknown platform\") def get_template_from_path(path: str) ->",
"# 1 GB self.data_reader_process_count = 0 # single threaded default",
"libraries # such as the DataReader class def __init__(self): super().__init__()",
"# data_reader_buffer_size: Used by DataReader to size internal shared memory",
"dir. Assumes that the dir exists, is local, and is",
"copy files to the run dir, and launch the run",
"submit_config, **submit_config.run_func_kwargs) else: run_func_obj(**submit_config.run_func_kwargs) print(\"dnnlib: Finished {0}() in {1}.\".format(submit_config.run_func_name, util.format_time(time.time()",
"dnnlib libraries # such as the DataReader class def __init__(self):",
"characters in your submit_config.run_desc. Task name must be accepted by",
"value during submit # run_func_name: Automatically populated value during submit",
"the # run dir. # run_id: Automatically populated value during",
"2 AUTO = 3 class PlatformExtras: # A mixed bag",
"value during submit # host_name: Automatically populated value during submit",
"values used by dnnlib heuristics # Attributes: # data_reader_buffer_size: Used",
"where the function should be run # LOCAL: Run it",
"from .. import util from ..util import EasyDict from .",
"is None: submit_config.user_name = get_user_name() submit_config.run_func_name = run_func_name submit_config.run_func_kwargs =",
"files = [(f[0], os.path.join(run_dir, \"src\", f[1])) for f in files]",
"(automatically populated) self.run_id = None self.run_name = None self.run_dir =",
"if platform.system() == \"Windows\": path_type = PathType.WINDOWS elif platform.system() ==",
"not resume: if os.path.exists(run_dir) and create_new: raise RuntimeError(\"The run dir",
"the next (increasing) run id # Assumes IDs are numbers",
"True) else: # when running in a cluster, redirect stderr",
"source files from the working directory to the # run",
"(set these) self.run_dir_root = \"\" # should always be passed",
"The target where the function should be run # LOCAL:",
"return os.getlogin() elif platform.system() == \"Linux\": try: import pwd return",
"load prior experiment config instead of using the current command-line",
"will # be the src directory inside the run dir",
"return path def convert_path(path: str, path_type: PathType = PathType.AUTO) ->",
"processes to spawn (zero for single # thread operation) def",
"1): run_func_module_dir_path = os.path.dirname(run_func_module_dir_path) files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores = submit_config.run_dir_ignore,",
"log_src = os.path.join(submit_config.run_dir, \"log.txt\") log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), \"{0}-error.txt\".format(submit_config.run_name)) shutil.copyfile(log_src, log_dst)",
"submit_target = submit_config.submit_target farm = None if submit_target == SubmitTarget.LOCAL:",
"Description of the run. Will be used in the run",
"None self.run_func_name = None self.run_func_kwargs = None self.user_name = None",
"new run dir with increasing ID number at the start",
"your submit_config.run_desc. Task name must be accepted by the following",
"jobs with zero num_gpus if (submit_config.num_gpus is None) or (submit_config.num_gpus",
"specific preparations for a submit farm.finalize_submit_config(submit_config, host_run_dir) # In case",
"config_file = os.path.join(host_run_dir, \"submit_config.pkl\") if os.path.exists(config_file): old_submit_config = submit_config submit_config",
"run_dir_ignore: List of file patterns used to ignore files when",
"redirect stderr to stdout, and just force flushing (log writing",
"-> None: # Wrap the actual run function call for",
"(dnnlib.submit_config is None) or (dnnlib.submit_config.run_dir is None): return os.path.join(os.getcwd(), *paths)",
"self.platform_extras = PlatformExtras() def get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO)",
"except: if is_local: raise else: traceback.print_exc() log_src = os.path.join(submit_config.run_dir, \"log.txt\")",
"to stdout, log stdout to a file, and force flushing",
"in submit_config.run_func_name for _idx in range(submit_config.run_func_name.count(\".\") - 1): run_func_module_dir_path =",
"None): return os.path.join(os.getcwd(), *paths) return os.path.join(dnnlib.submit_config.run_dir, *paths) def _create_run_dir_local(submit_config: SubmitConfig,",
"Automatically populated value during submit # run_func_name: Automatically populated value",
"during submit # run_dir: Automatically populated value during submit #",
"directory. # E.g., `os.path.join(dnnlib.submit_config.run_dir, \"output.txt\"))` import dnnlib if (dnnlib.submit_config is",
"1 LINUX = 2 AUTO = 3 class PlatformExtras: #",
"by various dnnlib libraries # such as the DataReader class",
"\"\" self.run_dir_ignore = [\"__pycache__\", \"*.pyproj\", \"*.sln\", \"*.suo\", \".cache\", \".idea\", \".vs\",",
"r.match(dir_name) if m is not None: i = int(m.group()) run_id",
"import pathlib import pickle import platform import pprint import re",
"launch the run in appropriate place. # create_newdir: enforces the",
"maintain the original configuration of the experiment rather than the",
"RuntimeError(\"Unknown platform\") path_template = path_template.replace(\"<USERNAME>\", get_user_name()) # return correctly formatted",
"pprint.pprint(submit_config, stream = f, indent = 4, width = 200,",
"load_config: config_file = os.path.join(host_run_dir, \"submit_config.pkl\") if os.path.exists(config_file): old_submit_config = submit_config",
"and force flushing if is_local: logger = util.Logger(file_name = os.path.join(submit_config.run_dir,",
"E.g., `os.path.join(dnnlib.submit_config.run_dir, \"output.txt\"))` import dnnlib if (dnnlib.submit_config is None) or",
"such as the DataReader class def __init__(self): super().__init__() # run",
"and return either Windows or Linux formatted path # automatically",
"-> None: # Copy all necessary files into the run",
"__init__(self): super().__init__() # run (set these) self.run_dir_root = \"\" #",
"memory buffers # data_reader_process_count: Number of worker processes to spawn",
"submit_config[\"run_id\"] = old_submit_config[\"run_id\"] submit_config[\"run_name\"] = old_submit_config[\"run_name\"] if \"resume_pkl\" in old_submit_config[\"run_func_kwargs\"]:",
"# when running locally, redirect stderr to stdout, log stdout",
"run_func_kwargs: Automatically populated value during submit # user_name: Automatically populated",
"resume: bool = False, load_config: bool = False, **run_func_kwargs) ->",
"== 0): raise RuntimeError(\"submit_config.num_gpus must be set to a non-zero",
"run_func_kwargs #-------------------------------------------------------------------- # Prepare submission by populating the run dir",
"\"submit_config.pkl\"), \"wb\")) with open(os.path.join(run_dir, \"submit_config.txt\"), \"w\") as f: pprint.pprint(submit_config, stream",
"directory # resume: resumes a prior experiment using its existing",
"and launch the run in appropriate place. # create_newdir: enforces",
"passed through get_path_from_template self.run_desc = \"\" self.run_dir_ignore = [\"__pycache__\", \"*.pyproj\",",
"returns the next (increasing) run id # Assumes IDs are",
"Will be used in the run dir and task name",
"True) import dnnlib dnnlib.submit_config = submit_config exit_with_errcode = False try:",
"else: raise RuntimeError(\"Unknown platform\") def make_run_dir_path(*paths): # Make a path/filename",
"sys.exit(1) to happen after we close the logs and create",
"dir, gather files related to the run, copy files to",
"return pwd.getpwuid(os.geteuid()).pw_name except: return \"unknown\" else: raise RuntimeError(\"Unknown platform\") def",
"Finished {0}() in {1}.\".format(submit_config.run_func_name, util.format_time(time.time() - start_time))) except: if is_local:",
"style # AUTO: Use current OS type to select either",
"Windows style # LINUX: Format with Linux/Posix style # AUTO:",
"import Enum from .. import util from ..util import EasyDict",
"the directory names dir_names = [d for d in os.listdir(run_dir_root)",
"locally LOCAL = 1 class PathType(Enum): # Determines in which",
"run_desc: Description of the run. Will be used in the",
"for handling logging, exceptions, typing, etc is_local = submit_config.submit_target ==",
"to ignore files when copying files to the run dir",
"path # automatically select path type depending on running OS",
"the run # print_info: Whether to print debug information when",
"RuntimeError(\"Invalid task name. Probable reason: unacceptable characters in your submit_config.run_desc.",
"\"w\") as f: pprint.pprint(submit_config, stream = f, indent = 4,",
"the start of the string run_id = 0 for dir_name",
"actually launched # num_gpus: Number of GPUs used/requested for the",
"print(\"dnnlib: Running {0}() on {1}...\".format(submit_config.run_func_name, submit_config.host_name)) start_time = time.time() run_func_obj",
"submit # host_name: Automatically populated value during submit # platform_extras:",
"Defer sys.exit(1) to happen after we close the logs and",
"dir, and launch the run in appropriate place. # create_newdir:",
"(submit_config.num_gpus == 0): raise RuntimeError(\"submit_config.num_gpus must be set to a",
"\"^[a-zA-Z0-9][a-zA-Z0-9_.-]+$\" if not re.match(docker_valid_name_regex, submit_config.task_name): raise RuntimeError(\"Invalid task name. Probable",
"automatic training resumption, # and network recompilation. import copy import",
"# run dir. # run_id: Automatically populated value during submit",
"`os.path.join(dnnlib.submit_config.run_dir, \"output.txt\"))` import dnnlib if (dnnlib.submit_config is None) or (dnnlib.submit_config.run_dir",
"data_reader_process_count: Number of worker processes to spawn (zero for single",
"make_run_dir_path(*paths): # Make a path/filename that resides under the current",
"needed to submit runs # Attributes: # run_dir_root: Path to",
"dnnlib_module_dir_path = util.get_module_dir_by_obj_name(\"dnnlib\") files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative",
"# If we hit an error, get out of the",
"single threaded default _user_name_override = None class SubmitConfig(util.EasyDict): # Strongly",
"set by the user which will then # override the",
"if path_type == PathType.WINDOWS: return str(pathlib.PureWindowsPath(path_template)) elif path_type == PathType.LINUX:",
"normal path with given path type path_template = get_template_from_path(path) path",
"if util.is_url(file_or_url): return util.open_url(file_or_url, cache_dir = \".stylegan2-cache\") return open(file_or_url, \"rb\")",
"# Submit a function to be run either locally or",
"directory # load_config: in case resume = True, load prior",
"should be run # LOCAL: Run it locally LOCAL =",
"submit_config.user_name is None: submit_config.user_name = get_user_name() submit_config.run_func_name = run_func_name submit_config.run_func_kwargs",
"log_dst) # Defer sys.exit(1) to happen after we close the",
"# Convert a normal path back to its template representation",
"*paths) def _create_run_dir_local(submit_config: SubmitConfig, resume: bool, create_new: str) -> str:",
"_create_run_dir_local(submit_config: SubmitConfig, resume: bool, create_new: str) -> str: # Create",
"Used by DataReader to size internal shared memory buffers #",
"pickle.load(file, encoding = \"latin1\") def submit_run(submit_config: SubmitConfig, run_func_name: str, create_newdir:",
"at the start run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO) if not os.path.exists(run_dir_root):",
"is_local: raise else: traceback.print_exc() log_src = os.path.join(submit_config.run_dir, \"log.txt\") log_dst =",
"None: # Set the global username override value global _user_name_override",
"Automatically populated value during submit # run_dir: Automatically populated value",
"create_new: str) -> str: # Create a new run dir",
"newly provided # command-line arguments. if load_config: config_file = os.path.join(host_run_dir,",
"_user_name_override = name def get_user_name(): # Get the current user",
"by dnnlib heuristics # Attributes: # data_reader_buffer_size: Used by DataReader",
"else: # when running in a cluster, redirect stderr to",
"with increasing ID number at the start run_dir_root = get_path_from_template(submit_config.run_dir_root,",
"populating the run dir #-------------------------------------------------------------------- host_run_dir = _create_run_dir_local(submit_config, resume, create_new",
"to a file, and force flushing if is_local: logger =",
"run dir and task name # run_dir_ignore: List of file",
"# run_id: Automatically populated value during submit # run_name: Automatically",
"recompilation. import copy import inspect import os import pathlib import",
"it back to a normal path with given path type",
"= internal.local.Target() assert farm is not None # unknown target",
"+ submit_config.task_name) # Farm specific preparations for a submit farm.finalize_submit_config(submit_config,",
"there's no # submit_config or run_dir, the base directory is",
"# unknown target # Disallow submitting jobs with zero num_gpus",
"GPUs used/requested for the run # print_info: Whether to print",
"= \"\" self.run_dir_ignore = [\"__pycache__\", \"*.pyproj\", \"*.sln\", \"*.suo\", \".cache\", \".idea\",",
"path = path.replace(\"\\\\\", \"/\") return path def convert_path(path: str, path_type:",
"== PathType.WINDOWS: return str(pathlib.PureWindowsPath(path_template)) elif path_type == PathType.LINUX: return str(pathlib.PurePosixPath(path_template))",
"than the newly provided # command-line arguments. if load_config: config_file",
"# Determines in which format should a path be formatted",
"under the current submit run_dir # Args: # *paths: Path",
"== \"Linux\": path_type = PathType.LINUX else: raise RuntimeError(\"Unknown platform\") path_template",
"for a submit farm.finalize_submit_config(submit_config, host_run_dir) # In case of resumption,",
"os.path.dirname(run_func_module_dir_path) files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = False)",
"path_type == PathType.AUTO: if platform.system() == \"Windows\": path_type = PathType.WINDOWS",
"of file patterns used to ignore files when copying files",
"computing cluster. # Compared to original StyleGAN implementation, we extend",
"to its template representation path = path.replace(\"\\\\\", \"/\") return path",
"= \".stylegan2-cache\") return open(file_or_url, \"rb\") def load_pkl(file_or_url): with open_file_or_url(file_or_url) as",
"return _user_name_override elif platform.system() == \"Windows\": return os.getlogin() elif platform.system()",
"self.datasets = [] # (automatically populated) self.run_id = None self.run_name",
"def submit_run(submit_config: SubmitConfig, run_func_name: str, create_newdir: bool = False, resume:",
"1 GB self.data_reader_process_count = 0 # single threaded default _user_name_override",
"parameters submit_config = copy.deepcopy(submit_config) submit_target = submit_config.submit_target farm = None",
"# automatically select path type depending on running OS if",
"file patterns used to ignore files when copying files to",
"support for automatic training resumption, # and network recompilation. import",
"import EasyDict from . import internal class SubmitTarget(Enum): # The",
"run_id = 0 for dir_name in dir_names: m = r.match(dir_name)",
"root will # be the src directory inside the run",
"and returns the next (increasing) run id # Assumes IDs",
"should_flush = True) else: # when running in a cluster,",
"os.path.exists(run_dir) and create_new: raise RuntimeError(\"The run dir already exists! ({0})\".format(run_dir))",
"docker_valid_name_regex = \"^[a-zA-Z0-9][a-zA-Z0-9_.-]+$\" if not re.match(docker_valid_name_regex, submit_config.task_name): raise RuntimeError(\"Invalid task",
"the directory # (so to maintain the original configuration of",
". import internal class SubmitTarget(Enum): # The target where the",
"== PathType.AUTO: if platform.system() == \"Windows\": path_type = PathType.WINDOWS elif",
"submit_config.run_desc) docker_valid_name_regex = \"^[a-zA-Z0-9][a-zA-Z0-9_.-]+$\" if not re.match(docker_valid_name_regex, submit_config.task_name): raise RuntimeError(\"Invalid",
"\" + docker_valid_name_regex + \", got \" + submit_config.task_name) #",
"raise RuntimeError(\"Unknown platform\") def get_template_from_path(path: str) -> str: # Convert",
"SubmitTarget.LOCAL: farm = internal.local.Target() assert farm is not None #",
"run_dir # Args: # *paths: Path components to be passed",
"import dnnlib dnnlib.submit_config = submit_config exit_with_errcode = False try: print(\"dnnlib:",
"except: return \"unknown\" else: raise RuntimeError(\"Unknown platform\") def make_run_dir_path(*paths): #",
"run. Will be used in the run dir and task",
"to os.path.join # Returns: # A file/dirname rooted at submit_config.run_dir.",
"= 3 class PlatformExtras: # A mixed bag of values",
"False, resume: bool = False, load_config: bool = False, **run_func_kwargs)",
"value\") if submit_config.user_name is None: submit_config.user_name = get_user_name() submit_config.run_func_name =",
"= re.compile(\"^\\\\d+\") # match one or more digits at the",
"inspect import os import pathlib import pickle import platform import"
] |
[
"the top side of the container. :param grid_bottom: Distance between",
"the container. :param grid_bottom: Distance between grid component and the",
"grid_width: Width of grid component. Adaptive by default. :param grid_height:",
"the container. :param grid_left: Distance between grid component and the",
"grid_height=None, grid_top=None, grid_bottom=None, grid_left=None, grid_right=None): \"\"\" :param chart: chart instance",
"every series _flag = self._chart._option.get('series')[0].get('indexflag') _series_index = 0 for s",
"_series: self._chart._option.get('series').append(s) return len(self._chart._option.get('series')), len(_series), _xaxis, _yaxis, _legend, _title def",
"0 for s in self._chart._option.get('series'): if _flag == s.get('indexflag'): s.update(xAxisIndex=_series_index,",
"== s.get('indexflag'): s.update(xAxisIndex=_series_index, yAxisIndex=_series_index) else: _series_index += 1 s.update(xAxisIndex=_series_index, yAxisIndex=_series_index)",
":param series: series data :return: \"\"\" _series, _xaxis, _yaxis, _legend,",
"class Grid(object): def __init__(self): self._chart = None self._js_dependencies = set()",
"self._chart._option.update(grid=[]) self._js_dependencies = chart._js_dependencies _grid = grid(grid_width, grid_height, grid_top, grid_bottom,",
"for _ in range(_index_once): self._chart._option.get('grid').append(_grid) self._js_dependencies.union(chart._js_dependencies) def __custom(self, series): \"\"\"",
"in _series: self._chart._option.get('series').append(s) return len(self._chart._option.get('series')), len(_series), _xaxis, _yaxis, _legend, _title",
"self._chart is None: self._chart = chart self._chart._option.update(grid=[]) self._js_dependencies = chart._js_dependencies",
"Grid(object): def __init__(self): self._chart = None self._js_dependencies = set() def",
"= grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right) if _grid: for",
"else: _series_index += 1 s.update(xAxisIndex=_series_index, yAxisIndex=_series_index) _flag = s.get('indexflag') _grid",
":param grid_top: Distance between grid component and the top side",
"grid component. Adaptive by default. :param grid_top: Distance between grid",
"component and the top side of the container. :param grid_bottom:",
"component. Adaptive by default. :param grid_top: Distance between grid component",
"self._js_dependencies = chart._js_dependencies _grid = grid(grid_width, grid_height, grid_top, grid_bottom, grid_left,",
"_series = ( chart._option.get('series'), chart._option.get('xAxis', None), chart._option.get('yAxis', None), chart._option.get('legend')[0], chart._option.get('title')[0]",
":param grid_left: Distance between grid component and the left side",
"= ( chart._option.get('series'), chart._option.get('xAxis', None), chart._option.get('yAxis', None), chart._option.get('legend')[0], chart._option.get('title')[0] )",
"only identify for every series _flag = self._chart._option.get('series')[0].get('indexflag') _series_index =",
"pprint.pprint(self._chart._option) @property def chart(self): \"\"\" :return: \"\"\" return self._chart def",
"_flag == s.get('indexflag'): s.update(xAxisIndex=_series_index, yAxisIndex=_series_index) else: _series_index += 1 s.update(xAxisIndex=_series_index,",
"left side of the container. :param grid_right: Distance between grid",
"grid_top: Distance between grid component and the top side of",
"component. Adaptive by default. :param grid_height: Height of grid component.",
"None: try: _xaxis[0].update(gridIndex=_index-1) _yaxis[0].update(gridIndex=_index-1) self._chart._option.get('xAxis').append(_xaxis[0]) self._chart._option.get('yAxis').append(_yaxis[0]) except: pass # indexflag",
"and the left side of the container. :param grid_right: Distance",
"self._chart = None self._js_dependencies = set() def add(self, chart, grid_width=None,",
"grid_height: Height of grid component. Adaptive by default. :param grid_top:",
"component and the right side of the container. :return: \"\"\"",
"= self.__custom(_series) self._chart._option.get('legend').append(_legned) self._chart._option.get('title').append(_title) if _xaxis and _yaxis is not",
"for s in self._chart._option.get('series'): if _flag == s.get('indexflag'): s.update(xAxisIndex=_series_index, yAxisIndex=_series_index)",
"of grid component. Adaptive by default. :param grid_height: Height of",
"_flag = s.get('indexflag') _grid = grid(grid_width, grid_height, grid_top, grid_bottom, grid_left,",
"of the container. :return: \"\"\" if self._chart is None: self._chart",
"in self._chart._option.get('series'): if _flag == s.get('indexflag'): s.update(xAxisIndex=_series_index, yAxisIndex=_series_index) else: _series_index",
"None), chart._option.get('yAxis', None), chart._option.get('legend')[0], chart._option.get('title')[0] ) _index, _index_once, _xaxis, _yaxis,",
"grid_height, grid_top, grid_bottom, grid_left, grid_right) for _ in range(_index_once): self._chart._option.get('grid').append(_grid)",
"pass # indexflag is only identify for every series _flag",
"for every series _flag = self._chart._option.get('series')[0].get('indexflag') _series_index = 0 for",
"between grid component and the top side of the container.",
"not None: try: _xaxis[0].update(gridIndex=_index-1) _yaxis[0].update(gridIndex=_index-1) self._chart._option.get('xAxis').append(_xaxis[0]) self._chart._option.get('yAxis').append(_yaxis[0]) except: pass #",
"def add(self, chart, grid_width=None, grid_height=None, grid_top=None, grid_bottom=None, grid_left=None, grid_right=None): \"\"\"",
"\"\"\" return self._chart.render_embed() def show_config(self): \"\"\" :return: \"\"\" import pprint",
"_yaxis[0].update(gridIndex=_index-1) self._chart._option.get('xAxis').append(_xaxis[0]) self._chart._option.get('yAxis').append(_yaxis[0]) except: pass # indexflag is only identify",
":return: \"\"\" _series, _xaxis, _yaxis, _legend, _title = series for",
"( chart._option.get('series'), chart._option.get('xAxis', None), chart._option.get('yAxis', None), chart._option.get('legend')[0], chart._option.get('title')[0] ) _index,",
":return: \"\"\" return self._chart def _repr_html_(self): \"\"\" :return: \"\"\" return",
"\"\"\" :return: \"\"\" return self._chart def _repr_html_(self): \"\"\" :return: \"\"\"",
"path=\"render.html\"): \"\"\" :param path: :return: \"\"\" self._chart.render(path) def render_embed(self): \"\"\"",
"self._chart._option.get('series'): if _flag == s.get('indexflag'): s.update(xAxisIndex=_series_index, yAxisIndex=_series_index) else: _series_index +=",
"\"\"\" :param chart: chart instance :param grid_width: Width of grid",
"= s.get('indexflag') _grid = grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right)",
"series: series data :return: \"\"\" _series, _xaxis, _yaxis, _legend, _title",
"\"\"\" :param path: :return: \"\"\" self._chart.render(path) def render_embed(self): \"\"\" :return:",
"of grid component. Adaptive by default. :param grid_top: Distance between",
"None: self._chart = chart self._chart._option.update(grid=[]) self._js_dependencies = chart._js_dependencies _grid =",
"_yaxis, _legned, _title = self.__custom(_series) self._chart._option.get('legend').append(_legned) self._chart._option.get('title').append(_title) if _xaxis and",
"Distance between grid component and the bottom side of the",
"return len(self._chart._option.get('series')), len(_series), _xaxis, _yaxis, _legend, _title def render(self, path=\"render.html\"):",
"\"\"\" import pprint return pprint.pprint(self._chart._option) @property def chart(self): \"\"\" :return:",
"chart._js_dependencies _grid = grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right) if",
"= grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right) for _ in",
"container. :param grid_right: Distance between grid component and the right",
"\"\"\" :return: \"\"\" return self._chart.render_embed() def show_config(self): \"\"\" :return: \"\"\"",
"chart: chart instance :param grid_width: Width of grid component. Adaptive",
"yAxisIndex=_series_index) _flag = s.get('indexflag') _grid = grid(grid_width, grid_height, grid_top, grid_bottom,",
"grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right) if _grid: for _",
"range(len(self._chart._option.get('series'))): self._chart._option.get('grid').append(_grid) else: _series = ( chart._option.get('series'), chart._option.get('xAxis', None), chart._option.get('yAxis',",
"_index_once, _xaxis, _yaxis, _legned, _title = self.__custom(_series) self._chart._option.get('legend').append(_legned) self._chart._option.get('title').append(_title) if",
"chart instance :param grid_width: Width of grid component. Adaptive by",
"def render(self, path=\"render.html\"): \"\"\" :param path: :return: \"\"\" self._chart.render(path) def",
"and the right side of the container. :return: \"\"\" if",
"\"\"\" _series, _xaxis, _yaxis, _legend, _title = series for s",
"show_config(self): \"\"\" :return: \"\"\" import pprint return pprint.pprint(self._chart._option) @property def",
"and _yaxis is not None: try: _xaxis[0].update(gridIndex=_index-1) _yaxis[0].update(gridIndex=_index-1) self._chart._option.get('xAxis').append(_xaxis[0]) self._chart._option.get('yAxis').append(_yaxis[0])",
"and the top side of the container. :param grid_bottom: Distance",
"indexflag is only identify for every series _flag = self._chart._option.get('series')[0].get('indexflag')",
"_xaxis, _yaxis, _legend, _title def render(self, path=\"render.html\"): \"\"\" :param path:",
"\"\"\" self._chart.render(path) def render_embed(self): \"\"\" :return: \"\"\" return self._chart.render_embed() def",
"is not None: try: _xaxis[0].update(gridIndex=_index-1) _yaxis[0].update(gridIndex=_index-1) self._chart._option.get('xAxis').append(_xaxis[0]) self._chart._option.get('yAxis').append(_yaxis[0]) except: pass",
"def show_config(self): \"\"\" :return: \"\"\" import pprint return pprint.pprint(self._chart._option) @property",
"yAxisIndex=_series_index) else: _series_index += 1 s.update(xAxisIndex=_series_index, yAxisIndex=_series_index) _flag = s.get('indexflag')",
"Adaptive by default. :param grid_height: Height of grid component. Adaptive",
"the bottom side of the container. :param grid_left: Distance between",
"grid_left, grid_right) for _ in range(_index_once): self._chart._option.get('grid').append(_grid) self._js_dependencies.union(chart._js_dependencies) def __custom(self,",
"series data :return: \"\"\" _series, _xaxis, _yaxis, _legend, _title =",
"= set() def add(self, chart, grid_width=None, grid_height=None, grid_top=None, grid_bottom=None, grid_left=None,",
"chart._option.get('yAxis', None), chart._option.get('legend')[0], chart._option.get('title')[0] ) _index, _index_once, _xaxis, _yaxis, _legned,",
"= chart._js_dependencies _grid = grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right)",
"s.update(xAxisIndex=_series_index, yAxisIndex=_series_index) _flag = s.get('indexflag') _grid = grid(grid_width, grid_height, grid_top,",
"grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right) for _ in range(_index_once):",
"right side of the container. :return: \"\"\" if self._chart is",
"if _flag == s.get('indexflag'): s.update(xAxisIndex=_series_index, yAxisIndex=_series_index) else: _series_index += 1",
"= series for s in _series: self._chart._option.get('series').append(s) return len(self._chart._option.get('series')), len(_series),",
"def render_embed(self): \"\"\" :return: \"\"\" return self._chart.render_embed() def show_config(self): \"\"\"",
"grid component and the left side of the container. :param",
"return pprint.pprint(self._chart._option) @property def chart(self): \"\"\" :return: \"\"\" return self._chart",
"is None: self._chart = chart self._chart._option.update(grid=[]) self._js_dependencies = chart._js_dependencies _grid",
") _index, _index_once, _xaxis, _yaxis, _legned, _title = self.__custom(_series) self._chart._option.get('legend').append(_legned)",
"add(self, chart, grid_width=None, grid_height=None, grid_top=None, grid_bottom=None, grid_left=None, grid_right=None): \"\"\" :param",
":param path: :return: \"\"\" self._chart.render(path) def render_embed(self): \"\"\" :return: \"\"\"",
"python # coding=utf-8 from pyecharts.option import grid class Grid(object): def",
"__custom(self, series): \"\"\" :param series: series data :return: \"\"\" _series,",
"pprint return pprint.pprint(self._chart._option) @property def chart(self): \"\"\" :return: \"\"\" return",
"<gh_stars>0 #!/usr/bin/env python # coding=utf-8 from pyecharts.option import grid class",
"\"\"\" :return: \"\"\" import pprint return pprint.pprint(self._chart._option) @property def chart(self):",
":param grid_right: Distance between grid component and the right side",
"chart._option.get('title')[0] ) _index, _index_once, _xaxis, _yaxis, _legned, _title = self.__custom(_series)",
":return: \"\"\" if self._chart is None: self._chart = chart self._chart._option.update(grid=[])",
"_series, _xaxis, _yaxis, _legend, _title = series for s in",
"self._chart._option.get('xAxis').append(_xaxis[0]) self._chart._option.get('yAxis').append(_yaxis[0]) except: pass # indexflag is only identify for",
"between grid component and the right side of the container.",
"in range(_index_once): self._chart._option.get('grid').append(_grid) self._js_dependencies.union(chart._js_dependencies) def __custom(self, series): \"\"\" :param series:",
"Width of grid component. Adaptive by default. :param grid_height: Height",
"grid_bottom, grid_left, grid_right) for _ in range(_index_once): self._chart._option.get('grid').append(_grid) self._js_dependencies.union(chart._js_dependencies) def",
":return: \"\"\" import pprint return pprint.pprint(self._chart._option) @property def chart(self): \"\"\"",
"grid_top=None, grid_bottom=None, grid_left=None, grid_right=None): \"\"\" :param chart: chart instance :param",
"\"\"\" if self._chart is None: self._chart = chart self._chart._option.update(grid=[]) self._js_dependencies",
"@property def chart(self): \"\"\" :return: \"\"\" return self._chart def _repr_html_(self):",
"side of the container. :param grid_right: Distance between grid component",
"grid_left=None, grid_right=None): \"\"\" :param chart: chart instance :param grid_width: Width",
"set() def add(self, chart, grid_width=None, grid_height=None, grid_top=None, grid_bottom=None, grid_left=None, grid_right=None):",
"grid_right: Distance between grid component and the right side of",
"identify for every series _flag = self._chart._option.get('series')[0].get('indexflag') _series_index = 0",
"+= 1 s.update(xAxisIndex=_series_index, yAxisIndex=_series_index) _flag = s.get('indexflag') _grid = grid(grid_width,",
"if _xaxis and _yaxis is not None: try: _xaxis[0].update(gridIndex=_index-1) _yaxis[0].update(gridIndex=_index-1)",
"grid component. Adaptive by default. :param grid_height: Height of grid",
"self._chart._option.get('yAxis').append(_yaxis[0]) except: pass # indexflag is only identify for every",
"default. :param grid_top: Distance between grid component and the top",
"# coding=utf-8 from pyecharts.option import grid class Grid(object): def __init__(self):",
"grid_bottom, grid_left, grid_right) if _grid: for _ in range(len(self._chart._option.get('series'))): self._chart._option.get('grid').append(_grid)",
"Distance between grid component and the top side of the",
"and the bottom side of the container. :param grid_left: Distance",
"self._chart._option.get('grid').append(_grid) self._js_dependencies.union(chart._js_dependencies) def __custom(self, series): \"\"\" :param series: series data",
"\"\"\" :param series: series data :return: \"\"\" _series, _xaxis, _yaxis,",
"range(_index_once): self._chart._option.get('grid').append(_grid) self._js_dependencies.union(chart._js_dependencies) def __custom(self, series): \"\"\" :param series: series",
"s.get('indexflag'): s.update(xAxisIndex=_series_index, yAxisIndex=_series_index) else: _series_index += 1 s.update(xAxisIndex=_series_index, yAxisIndex=_series_index) _flag",
"grid_right=None): \"\"\" :param chart: chart instance :param grid_width: Width of",
"side of the container. :param grid_left: Distance between grid component",
"_xaxis, _yaxis, _legned, _title = self.__custom(_series) self._chart._option.get('legend').append(_legned) self._chart._option.get('title').append(_title) if _xaxis",
"_xaxis[0].update(gridIndex=_index-1) _yaxis[0].update(gridIndex=_index-1) self._chart._option.get('xAxis').append(_xaxis[0]) self._chart._option.get('yAxis').append(_yaxis[0]) except: pass # indexflag is only",
"data :return: \"\"\" _series, _xaxis, _yaxis, _legend, _title = series",
"_legend, _title = series for s in _series: self._chart._option.get('series').append(s) return",
"grid component and the bottom side of the container. :param",
"_ in range(len(self._chart._option.get('series'))): self._chart._option.get('grid').append(_grid) else: _series = ( chart._option.get('series'), chart._option.get('xAxis',",
"self._js_dependencies = set() def add(self, chart, grid_width=None, grid_height=None, grid_top=None, grid_bottom=None,",
"the left side of the container. :param grid_right: Distance between",
"_series_index += 1 s.update(xAxisIndex=_series_index, yAxisIndex=_series_index) _flag = s.get('indexflag') _grid =",
"self._chart._option.get('title').append(_title) if _xaxis and _yaxis is not None: try: _xaxis[0].update(gridIndex=_index-1)",
"render(self, path=\"render.html\"): \"\"\" :param path: :return: \"\"\" self._chart.render(path) def render_embed(self):",
"def __custom(self, series): \"\"\" :param series: series data :return: \"\"\"",
"series for s in _series: self._chart._option.get('series').append(s) return len(self._chart._option.get('series')), len(_series), _xaxis,",
"series): \"\"\" :param series: series data :return: \"\"\" _series, _xaxis,",
"import pprint return pprint.pprint(self._chart._option) @property def chart(self): \"\"\" :return: \"\"\"",
"if _grid: for _ in range(len(self._chart._option.get('series'))): self._chart._option.get('grid').append(_grid) else: _series =",
"grid_left: Distance between grid component and the left side of",
"between grid component and the bottom side of the container.",
"in range(len(self._chart._option.get('series'))): self._chart._option.get('grid').append(_grid) else: _series = ( chart._option.get('series'), chart._option.get('xAxis', None),",
":param chart: chart instance :param grid_width: Width of grid component.",
"of the container. :param grid_bottom: Distance between grid component and",
"s.get('indexflag') _grid = grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right) for",
"grid_width=None, grid_height=None, grid_top=None, grid_bottom=None, grid_left=None, grid_right=None): \"\"\" :param chart: chart",
"by default. :param grid_height: Height of grid component. Adaptive by",
"pyecharts.option import grid class Grid(object): def __init__(self): self._chart = None",
"except: pass # indexflag is only identify for every series",
"len(self._chart._option.get('series')), len(_series), _xaxis, _yaxis, _legend, _title def render(self, path=\"render.html\"): \"\"\"",
"chart(self): \"\"\" :return: \"\"\" return self._chart def _repr_html_(self): \"\"\" :return:",
"the container. :return: \"\"\" if self._chart is None: self._chart =",
"self.__custom(_series) self._chart._option.get('legend').append(_legned) self._chart._option.get('title').append(_title) if _xaxis and _yaxis is not None:",
"= self._chart._option.get('series')[0].get('indexflag') _series_index = 0 for s in self._chart._option.get('series'): if",
"container. :return: \"\"\" if self._chart is None: self._chart = chart",
"_legned, _title = self.__custom(_series) self._chart._option.get('legend').append(_legned) self._chart._option.get('title').append(_title) if _xaxis and _yaxis",
"is only identify for every series _flag = self._chart._option.get('series')[0].get('indexflag') _series_index",
"side of the container. :return: \"\"\" if self._chart is None:",
"render_embed(self): \"\"\" :return: \"\"\" return self._chart.render_embed() def show_config(self): \"\"\" :return:",
"top side of the container. :param grid_bottom: Distance between grid",
"grid_height, grid_top, grid_bottom, grid_left, grid_right) if _grid: for _ in",
"return self._chart.render_embed() def show_config(self): \"\"\" :return: \"\"\" import pprint return",
"1 s.update(xAxisIndex=_series_index, yAxisIndex=_series_index) _flag = s.get('indexflag') _grid = grid(grid_width, grid_height,",
"_title def render(self, path=\"render.html\"): \"\"\" :param path: :return: \"\"\" self._chart.render(path)",
"__init__(self): self._chart = None self._js_dependencies = set() def add(self, chart,",
"None self._js_dependencies = set() def add(self, chart, grid_width=None, grid_height=None, grid_top=None,",
"_index, _index_once, _xaxis, _yaxis, _legned, _title = self.__custom(_series) self._chart._option.get('legend').append(_legned) self._chart._option.get('title').append(_title)",
"self._chart._option.get('series')[0].get('indexflag') _series_index = 0 for s in self._chart._option.get('series'): if _flag",
":param grid_bottom: Distance between grid component and the bottom side",
"Distance between grid component and the left side of the",
"grid_right) if _grid: for _ in range(len(self._chart._option.get('series'))): self._chart._option.get('grid').append(_grid) else: _series",
":param grid_height: Height of grid component. Adaptive by default. :param",
"_xaxis, _yaxis, _legend, _title = series for s in _series:",
"self._chart.render_embed() def show_config(self): \"\"\" :return: \"\"\" import pprint return pprint.pprint(self._chart._option)",
"grid_left, grid_right) if _grid: for _ in range(len(self._chart._option.get('series'))): self._chart._option.get('grid').append(_grid) else:",
"_yaxis, _legend, _title = series for s in _series: self._chart._option.get('series').append(s)",
"_legend, _title def render(self, path=\"render.html\"): \"\"\" :param path: :return: \"\"\"",
"def chart(self): \"\"\" :return: \"\"\" return self._chart def _repr_html_(self): \"\"\"",
"from pyecharts.option import grid class Grid(object): def __init__(self): self._chart =",
"_yaxis is not None: try: _xaxis[0].update(gridIndex=_index-1) _yaxis[0].update(gridIndex=_index-1) self._chart._option.get('xAxis').append(_xaxis[0]) self._chart._option.get('yAxis').append(_yaxis[0]) except:",
"Adaptive by default. :param grid_top: Distance between grid component and",
"component and the left side of the container. :param grid_right:",
"the container. :param grid_right: Distance between grid component and the",
"grid_right) for _ in range(_index_once): self._chart._option.get('grid').append(_grid) self._js_dependencies.union(chart._js_dependencies) def __custom(self, series):",
"self._chart.render(path) def render_embed(self): \"\"\" :return: \"\"\" return self._chart.render_embed() def show_config(self):",
"# indexflag is only identify for every series _flag =",
"chart._option.get('series'), chart._option.get('xAxis', None), chart._option.get('yAxis', None), chart._option.get('legend')[0], chart._option.get('title')[0] ) _index, _index_once,",
"grid component and the top side of the container. :param",
"_yaxis, _legend, _title def render(self, path=\"render.html\"): \"\"\" :param path: :return:",
"#!/usr/bin/env python # coding=utf-8 from pyecharts.option import grid class Grid(object):",
"by default. :param grid_top: Distance between grid component and the",
"else: _series = ( chart._option.get('series'), chart._option.get('xAxis', None), chart._option.get('yAxis', None), chart._option.get('legend')[0],",
"coding=utf-8 from pyecharts.option import grid class Grid(object): def __init__(self): self._chart",
"grid_bottom=None, grid_left=None, grid_right=None): \"\"\" :param chart: chart instance :param grid_width:",
"container. :param grid_bottom: Distance between grid component and the bottom",
"series _flag = self._chart._option.get('series')[0].get('indexflag') _series_index = 0 for s in",
"chart, grid_width=None, grid_height=None, grid_top=None, grid_bottom=None, grid_left=None, grid_right=None): \"\"\" :param chart:",
"_series_index = 0 for s in self._chart._option.get('series'): if _flag ==",
"component and the bottom side of the container. :param grid_left:",
"for s in _series: self._chart._option.get('series').append(s) return len(self._chart._option.get('series')), len(_series), _xaxis, _yaxis,",
"= 0 for s in self._chart._option.get('series'): if _flag == s.get('indexflag'):",
"grid class Grid(object): def __init__(self): self._chart = None self._js_dependencies =",
"self._chart._option.get('legend').append(_legned) self._chart._option.get('title').append(_title) if _xaxis and _yaxis is not None: try:",
"_xaxis and _yaxis is not None: try: _xaxis[0].update(gridIndex=_index-1) _yaxis[0].update(gridIndex=_index-1) self._chart._option.get('xAxis').append(_xaxis[0])",
"bottom side of the container. :param grid_left: Distance between grid",
"self._chart = chart self._chart._option.update(grid=[]) self._js_dependencies = chart._js_dependencies _grid = grid(grid_width,",
"_ in range(_index_once): self._chart._option.get('grid').append(_grid) self._js_dependencies.union(chart._js_dependencies) def __custom(self, series): \"\"\" :param",
"grid_top, grid_bottom, grid_left, grid_right) for _ in range(_index_once): self._chart._option.get('grid').append(_grid) self._js_dependencies.union(chart._js_dependencies)",
"s in _series: self._chart._option.get('series').append(s) return len(self._chart._option.get('series')), len(_series), _xaxis, _yaxis, _legend,",
"try: _xaxis[0].update(gridIndex=_index-1) _yaxis[0].update(gridIndex=_index-1) self._chart._option.get('xAxis').append(_xaxis[0]) self._chart._option.get('yAxis').append(_yaxis[0]) except: pass # indexflag is",
"_grid = grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right) if _grid:",
"grid component and the right side of the container. :return:",
"grid_bottom: Distance between grid component and the bottom side of",
"of the container. :param grid_left: Distance between grid component and",
":param grid_width: Width of grid component. Adaptive by default. :param",
"s.update(xAxisIndex=_series_index, yAxisIndex=_series_index) else: _series_index += 1 s.update(xAxisIndex=_series_index, yAxisIndex=_series_index) _flag =",
"_title = series for s in _series: self._chart._option.get('series').append(s) return len(self._chart._option.get('series')),",
"len(_series), _xaxis, _yaxis, _legend, _title def render(self, path=\"render.html\"): \"\"\" :param",
"the right side of the container. :return: \"\"\" if self._chart",
"None), chart._option.get('legend')[0], chart._option.get('title')[0] ) _index, _index_once, _xaxis, _yaxis, _legned, _title",
"_title = self.__custom(_series) self._chart._option.get('legend').append(_legned) self._chart._option.get('title').append(_title) if _xaxis and _yaxis is",
"instance :param grid_width: Width of grid component. Adaptive by default.",
"= None self._js_dependencies = set() def add(self, chart, grid_width=None, grid_height=None,",
"if self._chart is None: self._chart = chart self._chart._option.update(grid=[]) self._js_dependencies =",
"self._chart._option.get('series').append(s) return len(self._chart._option.get('series')), len(_series), _xaxis, _yaxis, _legend, _title def render(self,",
"= chart self._chart._option.update(grid=[]) self._js_dependencies = chart._js_dependencies _grid = grid(grid_width, grid_height,",
"path: :return: \"\"\" self._chart.render(path) def render_embed(self): \"\"\" :return: \"\"\" return",
"container. :param grid_left: Distance between grid component and the left",
"self._js_dependencies.union(chart._js_dependencies) def __custom(self, series): \"\"\" :param series: series data :return:",
"s in self._chart._option.get('series'): if _flag == s.get('indexflag'): s.update(xAxisIndex=_series_index, yAxisIndex=_series_index) else:",
"self._chart._option.get('grid').append(_grid) else: _series = ( chart._option.get('series'), chart._option.get('xAxis', None), chart._option.get('yAxis', None),",
"_flag = self._chart._option.get('series')[0].get('indexflag') _series_index = 0 for s in self._chart._option.get('series'):",
"_grid = grid(grid_width, grid_height, grid_top, grid_bottom, grid_left, grid_right) for _",
"of the container. :param grid_right: Distance between grid component and",
"between grid component and the left side of the container.",
"Height of grid component. Adaptive by default. :param grid_top: Distance",
"Distance between grid component and the right side of the",
":return: \"\"\" self._chart.render(path) def render_embed(self): \"\"\" :return: \"\"\" return self._chart.render_embed()",
"default. :param grid_height: Height of grid component. Adaptive by default.",
"grid_top, grid_bottom, grid_left, grid_right) if _grid: for _ in range(len(self._chart._option.get('series'))):",
"_grid: for _ in range(len(self._chart._option.get('series'))): self._chart._option.get('grid').append(_grid) else: _series = (",
"def __init__(self): self._chart = None self._js_dependencies = set() def add(self,",
"chart._option.get('xAxis', None), chart._option.get('yAxis', None), chart._option.get('legend')[0], chart._option.get('title')[0] ) _index, _index_once, _xaxis,",
"side of the container. :param grid_bottom: Distance between grid component",
"import grid class Grid(object): def __init__(self): self._chart = None self._js_dependencies",
"chart._option.get('legend')[0], chart._option.get('title')[0] ) _index, _index_once, _xaxis, _yaxis, _legned, _title =",
"\"\"\" return self._chart def _repr_html_(self): \"\"\" :return: \"\"\" return self._chart._repr_html_()",
"for _ in range(len(self._chart._option.get('series'))): self._chart._option.get('grid').append(_grid) else: _series = ( chart._option.get('series'),",
":return: \"\"\" return self._chart.render_embed() def show_config(self): \"\"\" :return: \"\"\" import",
"chart self._chart._option.update(grid=[]) self._js_dependencies = chart._js_dependencies _grid = grid(grid_width, grid_height, grid_top,"
] |
[
"which the keys are descriptions and values uris\"\"\" if not",
"options): logging.warning(\"request payment called with invalid args user_id={} message={} options={}\"",
"options): logging.warning(\"send postback called with invalid args user_id={} message={} options={}\"",
"message, sent_by_maker=True): if not valid_args(user_id, message): logging.warning(\"send message called with",
"with invalid args user_id={} message={}\".format(user_id, message)) return logging.debug(\"Sending message: user_id={0}",
"options)) return role = \"appMaker\" buttons = [] for short_text,",
"message={} options={}\" .format(user_id, message, options)) return role = \"appMaker\" buttons",
"role, \"actions\": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def valid_args(user_id, message,",
"[] for short_text, result in options: buttons.append({ \"type\": \"link\", \"text\":",
"message called with invalid args user_id={} message={}\".format(user_id, message)) return logging.debug(\"Sending",
"\"appMaker\" buttons = [] for text, kind, result in options:",
"the keys are descriptions and values uris\"\"\" if not valid_args(user_id,",
"and third the result for the specified type.\"\"\" if not",
"descriptions and values uris\"\"\" if not valid_args(user_id, message, options): logging.warning(\"send",
"list of tuples in which the first element is the",
"which specifies the amount of cents in the transaction Smooch",
"webhook to listen for the postback.\"\"\" if not valid_args(user_id, message,",
"def get_conversation(user_id): if not user_id: logging.warning(\"get conversation called with invalid",
"need to set up a webhook to listen for the",
"{}, 'get') def request_payment(user_id, message, options): \"\"\"Note that amount is",
"message, options): \"\"\"Options is a list of tuples in which",
"links. The options field is a dictionary in which the",
"values the postback payload. You need to set up a",
"short_text, \"uri\": result}) data = {\"text\": message, \"role\": role, \"actions\":",
"\"amount\": result}) data = {\"text\": message, \"role\": role, \"actions\": buttons}",
"in options: buttons.append({ \"type\": \"link\", \"text\": short_text, \"uri\": result}) data",
"return role = \"appMaker\" buttons = [] for short_text, result",
"short_text, result in options: buttons.append({ \"type\": \"link\", \"text\": short_text, \"uri\":",
"in which the first element is the type of the",
"the result for the specified type.\"\"\" if not valid_args(user_id, message,",
"on your webhook. The options field is a dictionary in",
"listen for on your webhook. The options field is a",
"your webhook. The options field is a dictionary in which",
"from .endpoint import ask def send_message(user_id, message, sent_by_maker=True): if not",
"data, 'post') def send_links(user_id, message, options): \"\"\"Sends a series of",
"is a list of tuples in which the first element",
"return logging.debug(\"Get conversation: user_id={}\".format(user_id)) return ask('appusers/{0}/conversation'.format(user_id), {}, 'get') def request_payment(user_id,",
"role, \"actions\": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_buttons(user_id, message,",
"buttons called with invalid args user_id={} message={} options={}\" .format(user_id, message,",
"not valid_args(user_id, message, options): logging.warning(\"request payment called with invalid args",
"result in options: buttons.append({ \"type\": \"buy\", \"text\": short_text, \"amount\": result})",
"kind, \"text\": text, \"payload\": result }) data = {\"text\": message,",
"if options is not None: if user_id and message and",
"message, options): logging.warning(\"send postback called with invalid args user_id={} message={}",
"settings.\"\"\" if not valid_args(user_id, message, options): logging.warning(\"request payment called with",
"args user_id={} message={}\".format(user_id, message)) return logging.debug(\"Sending message: user_id={0} message={1} sent_by_maker={2}\".format(user_id,",
"links called with invalid args user_id={} message={} options={}\" .format(user_id, message,",
"sent_by_maker)) role = \"appMaker\" if not sent_by_maker: role = \"appUser\"",
"type of the button, second the short text, and third",
"\"link\", \"text\": short_text, \"uri\": result}) data = {\"text\": message, \"role\":",
"message, options=None): if options is not None: if user_id and",
"amount is a integer which specifies the amount of cents",
"specifies the amount of cents in the transaction Smooch will",
"data, 'post') def valid_args(user_id, message, options=None): if options is not",
"postback payload. You need to set up a webhook to",
"logging.debug(\"Get conversation: user_id={}\".format(user_id)) return ask('appusers/{0}/conversation'.format(user_id), {}, 'get') def request_payment(user_id, message,",
"is list: return True return False else: if user_id and",
"if not valid_args(user_id, message, options): logging.warning(\"send buttons called with invalid",
"= [] for short_text, result in options: buttons.append({ \"type\": \"buy\",",
"dictionary in which the keys are descriptions and values uris\"\"\"",
"get_conversation(user_id): if not user_id: logging.warning(\"get conversation called with invalid arg",
"return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_buttons(user_id, message, options): \"\"\"Options is",
"logging.warning(\"request payment called with invalid args user_id={} message={} options={}\" .format(user_id,",
"return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def valid_args(user_id, message, options=None): if options",
"and values uris\"\"\" if not valid_args(user_id, message, options): logging.warning(\"send links",
"options): logging.warning(\"send buttons called with invalid args user_id={} message={} options={}\"",
"buttons = [] for short_text, result in options: buttons.append({ \"type\":",
"options): logging.warning(\"send links called with invalid args user_id={} message={} options={}\"",
"The options field is a dictionary in which the keys",
"is a integer which specifies the amount of cents in",
"return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_links(user_id, message, options): \"\"\"Sends a",
"message, options): logging.warning(\"send buttons called with invalid args user_id={} message={}",
"= [] for short_text, result in options: buttons.append({ \"type\": \"postback\",",
"data, 'post') def get_conversation(user_id): if not user_id: logging.warning(\"get conversation called",
"will default to the currency specified in your account settings.\"\"\"",
"of cents in the transaction Smooch will default to the",
"= [] for text, kind, result in options: buttons.append({ \"type\":",
"buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def valid_args(user_id, message, options=None): if",
"\"role\": role, \"actions\": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def valid_args(user_id,",
"not sent_by_maker: role = \"appUser\" data = {\"text\": message, \"role\":",
"invalid args user_id={} message={} options={}\" .format(user_id, message, options)) return role",
"role = \"appMaker\" buttons = [] for text, kind, result",
"ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def get_conversation(user_id): if not user_id: logging.warning(\"get conversation",
"for on your webhook. The options field is a dictionary",
"options: buttons.append({ \"type\": kind, \"text\": text, \"payload\": result }) data",
"invalid args user_id={} message={}\".format(user_id, message)) return logging.debug(\"Sending message: user_id={0} message={1}",
"options that you can listen for on your webhook. The",
"a integer which specifies the amount of cents in the",
"a webhook to listen for the postback.\"\"\" if not valid_args(user_id,",
"if not valid_args(user_id, message): logging.warning(\"send message called with invalid args",
"return role = \"appMaker\" buttons = [] for text, kind,",
"user_id={}\".format(user_id)) return ask('appusers/{0}/conversation'.format(user_id), {}, 'get') def request_payment(user_id, message, options): \"\"\"Note",
"message, \"role\": role, \"actions\": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def",
"element is the type of the button, second the short",
"descriptions and values the postback payload. You need to set",
"'post') def valid_args(user_id, message, options=None): if options is not None:",
"the button, second the short text, and third the result",
"with invalid args user_id={} message={} options={}\" .format(user_id, message, options)) return",
"\"\"\"Options is a list of tuples in which the first",
"= {\"text\": message, \"role\": role} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def",
"up a webhook to listen for the postback.\"\"\" if not",
"a series of links. The options field is a dictionary",
"= \"appMaker\" buttons = [] for short_text, result in options:",
"specified in your account settings.\"\"\" if not valid_args(user_id, message, options):",
"ask def send_message(user_id, message, sent_by_maker=True): if not valid_args(user_id, message): logging.warning(\"send",
"of links. The options field is a dictionary in which",
"You need to set up a webhook to listen for",
"ask('appusers/{0}/conversation'.format(user_id), {}, 'get') def request_payment(user_id, message, options): \"\"\"Note that amount",
"options: buttons.append({ \"type\": \"link\", \"text\": short_text, \"uri\": result}) data =",
"for text, kind, result in options: buttons.append({ \"type\": kind, \"text\":",
"that you can listen for on your webhook. The options",
"not valid_args(user_id, message, options): logging.warning(\"send buttons called with invalid args",
"and type(options) is list: return True return False else: if",
"= \"appMaker\" buttons = [] for text, kind, result in",
"\"text\": short_text, \"amount\": result}) data = {\"text\": message, \"role\": role,",
"valid_args(user_id, message, options): logging.warning(\"send buttons called with invalid args user_id={}",
"kind, result in options: buttons.append({ \"type\": kind, \"text\": text, \"payload\":",
"short text, and third the result for the specified type.\"\"\"",
"return ask('appusers/{0}/conversation'.format(user_id), {}, 'get') def request_payment(user_id, message, options): \"\"\"Note that",
"the short text, and third the result for the specified",
"ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_links(user_id, message, options): \"\"\"Sends a series",
"def send_postbacks(user_id, message, options): \"\"\"Sends a series of options that",
"integer which specifies the amount of cents in the transaction",
"options): \"\"\"Sends a series of links. The options field is",
"if not user_id: logging.warning(\"get conversation called with invalid arg user_id={}\".format(user_id))",
"\"actions\": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_postbacks(user_id, message, options):",
"options is not None: if user_id and message and options",
"def valid_args(user_id, message, options=None): if options is not None: if",
"options={}\" .format(user_id, message, options)) return role = \"appMaker\" buttons =",
"if not sent_by_maker: role = \"appUser\" data = {\"text\": message,",
"\"type\": kind, \"text\": text, \"payload\": result }) data = {\"text\":",
"called with invalid arg user_id={}\".format(user_id)) return logging.debug(\"Get conversation: user_id={}\".format(user_id)) return",
"amount of cents in the transaction Smooch will default to",
"options): \"\"\"Options is a list of tuples in which the",
"message, sent_by_maker)) role = \"appMaker\" if not sent_by_maker: role =",
"message and options and type(options) is list: return True return",
"import logging from .endpoint import ask def send_message(user_id, message, sent_by_maker=True):",
"message, options)) return role = \"appMaker\" buttons = [] for",
"data, 'post') def send_postbacks(user_id, message, options): \"\"\"Sends a series of",
"'get') def request_payment(user_id, message, options): \"\"\"Note that amount is a",
"user_id: logging.warning(\"get conversation called with invalid arg user_id={}\".format(user_id)) return logging.debug(\"Get",
"send_buttons(user_id, message, options): \"\"\"Options is a list of tuples in",
"not valid_args(user_id, message): logging.warning(\"send message called with invalid args user_id={}",
"first element is the type of the button, second the",
"user_id={} message={} options={}\" .format(user_id, message, options)) return role = \"appMaker\"",
"text, kind, result in options: buttons.append({ \"type\": kind, \"text\": text,",
"series of links. The options field is a dictionary in",
"logging from .endpoint import ask def send_message(user_id, message, sent_by_maker=True): if",
"currency specified in your account settings.\"\"\" if not valid_args(user_id, message,",
"a series of options that you can listen for on",
"can listen for on your webhook. The options field is",
"webhook. The options field is a dictionary in which the",
"\"role\": role} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def get_conversation(user_id): if not",
"for short_text, result in options: buttons.append({ \"type\": \"postback\", \"text\": short_text,",
"message: user_id={0} message={1} sent_by_maker={2}\".format(user_id, message, sent_by_maker)) role = \"appMaker\" if",
"result in options: buttons.append({ \"type\": \"link\", \"text\": short_text, \"uri\": result})",
"valid_args(user_id, message): logging.warning(\"send message called with invalid args user_id={} message={}\".format(user_id,",
"account settings.\"\"\" if not valid_args(user_id, message, options): logging.warning(\"request payment called",
"True return False else: if user_id and message: return True",
"\"text\": short_text, \"uri\": result}) data = {\"text\": message, \"role\": role,",
"the specified type.\"\"\" if not valid_args(user_id, message, options): logging.warning(\"send buttons",
"that amount is a integer which specifies the amount of",
"}) data = {\"text\": message, \"role\": role, \"actions\": buttons} return",
"the postback.\"\"\" if not valid_args(user_id, message, options): logging.warning(\"send postback called",
"postback called with invalid args user_id={} message={} options={}\" .format(user_id, message,",
"data = {\"text\": message, \"role\": role} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post')",
"user_id={} message={}\".format(user_id, message)) return logging.debug(\"Sending message: user_id={0} message={1} sent_by_maker={2}\".format(user_id, message,",
"in your account settings.\"\"\" if not valid_args(user_id, message, options): logging.warning(\"request",
"'post') def send_postbacks(user_id, message, options): \"\"\"Sends a series of options",
"message, options): \"\"\"Sends a series of options that you can",
"\"role\": role, \"actions\": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_postbacks(user_id,",
"is the type of the button, second the short text,",
"buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_postbacks(user_id, message, options): \"\"\"Sends",
"user_id={0} message={1} sent_by_maker={2}\".format(user_id, message, sent_by_maker)) role = \"appMaker\" if not",
"message={}\".format(user_id, message)) return logging.debug(\"Sending message: user_id={0} message={1} sent_by_maker={2}\".format(user_id, message, sent_by_maker))",
"series of options that you can listen for on your",
"return logging.debug(\"Sending message: user_id={0} message={1} sent_by_maker={2}\".format(user_id, message, sent_by_maker)) role =",
"\"actions\": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def valid_args(user_id, message, options=None):",
"of options that you can listen for on your webhook.",
"in options: buttons.append({ \"type\": \"buy\", \"text\": short_text, \"amount\": result}) data",
"message, options): logging.warning(\"request payment called with invalid args user_id={} message={}",
"second the short text, and third the result for the",
"return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def get_conversation(user_id): if not user_id: logging.warning(\"get",
"of the button, second the short text, and third the",
"buttons.append({ \"type\": kind, \"text\": text, \"payload\": result }) data =",
"which the first element is the type of the button,",
"{\"text\": message, \"role\": role} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def get_conversation(user_id):",
"postback.\"\"\" if not valid_args(user_id, message, options): logging.warning(\"send postback called with",
"for the postback.\"\"\" if not valid_args(user_id, message, options): logging.warning(\"send postback",
"sent_by_maker: role = \"appUser\" data = {\"text\": message, \"role\": role}",
"message): logging.warning(\"send message called with invalid args user_id={} message={}\".format(user_id, message))",
"not None: if user_id and message and options and type(options)",
"are descriptions and values the postback payload. You need to",
"role = \"appUser\" data = {\"text\": message, \"role\": role} return",
"keys are descriptions and values the postback payload. You need",
"and options and type(options) is list: return True return False",
"user_id and message and options and type(options) is list: return",
"which the keys are descriptions and values the postback payload.",
"[] for short_text, result in options: buttons.append({ \"type\": \"buy\", \"text\":",
"conversation called with invalid arg user_id={}\".format(user_id)) return logging.debug(\"Get conversation: user_id={}\".format(user_id))",
"not valid_args(user_id, message, options): logging.warning(\"send links called with invalid args",
"and values the postback payload. You need to set up",
"options and type(options) is list: return True return False else:",
"with invalid arg user_id={}\".format(user_id)) return logging.debug(\"Get conversation: user_id={}\".format(user_id)) return ask('appusers/{0}/conversation'.format(user_id),",
"return False else: if user_id and message: return True return",
"the first element is the type of the button, second",
"valid_args(user_id, message, options): logging.warning(\"request payment called with invalid args user_id={}",
"if not valid_args(user_id, message, options): logging.warning(\"request payment called with invalid",
"options: buttons.append({ \"type\": \"buy\", \"text\": short_text, \"amount\": result}) data =",
"buttons.append({ \"type\": \"link\", \"text\": short_text, \"uri\": result}) data = {\"text\":",
"for short_text, result in options: buttons.append({ \"type\": \"link\", \"text\": short_text,",
"buttons = [] for text, kind, result in options: buttons.append({",
"the postback payload. You need to set up a webhook",
"set up a webhook to listen for the postback.\"\"\" if",
"result in options: buttons.append({ \"type\": \"postback\", \"text\": short_text, \"payload\": result",
"\"appMaker\" buttons = [] for short_text, result in options: buttons.append({",
"return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_postbacks(user_id, message, options): \"\"\"Sends a",
"send_links(user_id, message, options): \"\"\"Sends a series of links. The options",
"result in options: buttons.append({ \"type\": kind, \"text\": text, \"payload\": result",
"data = {\"text\": message, \"role\": role, \"actions\": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id),",
"\"type\": \"link\", \"text\": short_text, \"uri\": result}) data = {\"text\": message,",
"is a dictionary in which the keys are descriptions and",
"[] for short_text, result in options: buttons.append({ \"type\": \"postback\", \"text\":",
"are descriptions and values uris\"\"\" if not valid_args(user_id, message, options):",
"= \"appUser\" data = {\"text\": message, \"role\": role} return ask('appusers/{0}/conversation/messages'.format(user_id),",
"in options: buttons.append({ \"type\": kind, \"text\": text, \"payload\": result })",
"valid_args(user_id, message, options): logging.warning(\"send links called with invalid args user_id={}",
"the currency specified in your account settings.\"\"\" if not valid_args(user_id,",
"logging.debug(\"Sending message: user_id={0} message={1} sent_by_maker={2}\".format(user_id, message, sent_by_maker)) role = \"appMaker\"",
"= \"appMaker\" if not sent_by_maker: role = \"appUser\" data =",
"a list of tuples in which the first element is",
"message, \"role\": role} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def get_conversation(user_id): if",
"logging.warning(\"send message called with invalid args user_id={} message={}\".format(user_id, message)) return",
"short_text, result in options: buttons.append({ \"type\": \"buy\", \"text\": short_text, \"amount\":",
"text, \"payload\": result }) data = {\"text\": message, \"role\": role,",
"to listen for the postback.\"\"\" if not valid_args(user_id, message, options):",
"short_text, \"payload\": result }) data = {\"text\": message, \"role\": role,",
"send_message(user_id, message, sent_by_maker=True): if not valid_args(user_id, message): logging.warning(\"send message called",
"\"type\": \"postback\", \"text\": short_text, \"payload\": result }) data = {\"text\":",
"tuples in which the first element is the type of",
"type(options) is list: return True return False else: if user_id",
"type.\"\"\" if not valid_args(user_id, message, options): logging.warning(\"send buttons called with",
"\"postback\", \"text\": short_text, \"payload\": result }) data = {\"text\": message,",
"payload. You need to set up a webhook to listen",
"is not None: if user_id and message and options and",
"\"actions\": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_links(user_id, message, options):",
"not valid_args(user_id, message, options): logging.warning(\"send postback called with invalid args",
"\"buy\", \"text\": short_text, \"amount\": result}) data = {\"text\": message, \"role\":",
"role, \"actions\": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_links(user_id, message,",
"the amount of cents in the transaction Smooch will default",
"transaction Smooch will default to the currency specified in your",
"called with invalid args user_id={} message={} options={}\" .format(user_id, message, options))",
"def send_message(user_id, message, sent_by_maker=True): if not valid_args(user_id, message): logging.warning(\"send message",
"\"role\": role, \"actions\": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_links(user_id,",
".endpoint import ask def send_message(user_id, message, sent_by_maker=True): if not valid_args(user_id,",
"invalid arg user_id={}\".format(user_id)) return logging.debug(\"Get conversation: user_id={}\".format(user_id)) return ask('appusers/{0}/conversation'.format(user_id), {},",
"options=None): if options is not None: if user_id and message",
"button, second the short text, and third the result for",
"if user_id and message and options and type(options) is list:",
"listen for the postback.\"\"\" if not valid_args(user_id, message, options): logging.warning(\"send",
"in which the keys are descriptions and values the postback",
"message, options): \"\"\"Note that amount is a integer which specifies",
"not user_id: logging.warning(\"get conversation called with invalid arg user_id={}\".format(user_id)) return",
"= {\"text\": message, \"role\": role, \"actions\": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data,",
"options)) return role = \"appMaker\" buttons = [] for text,",
"None: if user_id and message and options and type(options) is",
"\"\"\"Sends a series of options that you can listen for",
"values uris\"\"\" if not valid_args(user_id, message, options): logging.warning(\"send links called",
"\"text\": text, \"payload\": result }) data = {\"text\": message, \"role\":",
"logging.warning(\"send buttons called with invalid args user_id={} message={} options={}\" .format(user_id,",
"options): \"\"\"Sends a series of options that you can listen",
"if not valid_args(user_id, message, options): logging.warning(\"send postback called with invalid",
"\"type\": \"buy\", \"text\": short_text, \"amount\": result}) data = {\"text\": message,",
"if not valid_args(user_id, message, options): logging.warning(\"send links called with invalid",
"for short_text, result in options: buttons.append({ \"type\": \"buy\", \"text\": short_text,",
"valid_args(user_id, message, options): logging.warning(\"send postback called with invalid args user_id={}",
"data, 'post') def send_buttons(user_id, message, options): \"\"\"Options is a list",
"sent_by_maker={2}\".format(user_id, message, sent_by_maker)) role = \"appMaker\" if not sent_by_maker: role",
"\"uri\": result}) data = {\"text\": message, \"role\": role, \"actions\": buttons}",
".format(user_id, message, options)) return role = \"appMaker\" buttons = []",
"[] for text, kind, result in options: buttons.append({ \"type\": kind,",
"ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def valid_args(user_id, message, options=None): if options is",
"valid_args(user_id, message, options=None): if options is not None: if user_id",
"\"actions\": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_buttons(user_id, message, options):",
"message)) return logging.debug(\"Sending message: user_id={0} message={1} sent_by_maker={2}\".format(user_id, message, sent_by_maker)) role",
"options): \"\"\"Note that amount is a integer which specifies the",
"payment called with invalid args user_id={} message={} options={}\" .format(user_id, message,",
"for the specified type.\"\"\" if not valid_args(user_id, message, options): logging.warning(\"send",
"def send_buttons(user_id, message, options): \"\"\"Options is a list of tuples",
"third the result for the specified type.\"\"\" if not valid_args(user_id,",
"message, options): \"\"\"Sends a series of links. The options field",
"role, \"actions\": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_postbacks(user_id, message,",
"result for the specified type.\"\"\" if not valid_args(user_id, message, options):",
"'post') def send_buttons(user_id, message, options): \"\"\"Options is a list of",
"Smooch will default to the currency specified in your account",
"the keys are descriptions and values the postback payload. You",
"= [] for short_text, result in options: buttons.append({ \"type\": \"link\",",
"\"\"\"Note that amount is a integer which specifies the amount",
"you can listen for on your webhook. The options field",
"role} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def get_conversation(user_id): if not user_id:",
"False else: if user_id and message: return True return False",
"buttons.append({ \"type\": \"buy\", \"text\": short_text, \"amount\": result}) data = {\"text\":",
"and message and options and type(options) is list: return True",
"ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_postbacks(user_id, message, options): \"\"\"Sends a series",
"of tuples in which the first element is the type",
"\"\"\"Sends a series of links. The options field is a",
"keys are descriptions and values uris\"\"\" if not valid_args(user_id, message,",
"\"payload\": result }) data = {\"text\": message, \"role\": role, \"actions\":",
"def send_links(user_id, message, options): \"\"\"Sends a series of links. The",
"field is a dictionary in which the keys are descriptions",
"logging.warning(\"send links called with invalid args user_id={} message={} options={}\" .format(user_id,",
"def request_payment(user_id, message, options): \"\"\"Note that amount is a integer",
"logging.warning(\"send postback called with invalid args user_id={} message={} options={}\" .format(user_id,",
"role = \"appMaker\" buttons = [] for short_text, result in",
"the type of the button, second the short text, and",
"sent_by_maker=True): if not valid_args(user_id, message): logging.warning(\"send message called with invalid",
"called with invalid args user_id={} message={}\".format(user_id, message)) return logging.debug(\"Sending message:",
"in options: buttons.append({ \"type\": \"postback\", \"text\": short_text, \"payload\": result })",
"default to the currency specified in your account settings.\"\"\" if",
"uris\"\"\" if not valid_args(user_id, message, options): logging.warning(\"send links called with",
"message={1} sent_by_maker={2}\".format(user_id, message, sent_by_maker)) role = \"appMaker\" if not sent_by_maker:",
"return True return False else: if user_id and message: return",
"to the currency specified in your account settings.\"\"\" if not",
"request_payment(user_id, message, options): \"\"\"Note that amount is a integer which",
"'post') def get_conversation(user_id): if not user_id: logging.warning(\"get conversation called with",
"your account settings.\"\"\" if not valid_args(user_id, message, options): logging.warning(\"request payment",
"buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_buttons(user_id, message, options): \"\"\"Options",
"to set up a webhook to listen for the postback.\"\"\"",
"{\"text\": message, \"role\": role, \"actions\": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post')",
"dictionary in which the keys are descriptions and values the",
"short_text, result in options: buttons.append({ \"type\": \"postback\", \"text\": short_text, \"payload\":",
"\"text\": short_text, \"payload\": result }) data = {\"text\": message, \"role\":",
"result }) data = {\"text\": message, \"role\": role, \"actions\": buttons}",
"user_id={}\".format(user_id)) return logging.debug(\"Get conversation: user_id={}\".format(user_id)) return ask('appusers/{0}/conversation'.format(user_id), {}, 'get') def",
"role = \"appMaker\" if not sent_by_maker: role = \"appUser\" data",
"message, options): logging.warning(\"send links called with invalid args user_id={} message={}",
"text, and third the result for the specified type.\"\"\" if",
"\"appMaker\" if not sent_by_maker: role = \"appUser\" data = {\"text\":",
"buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_links(user_id, message, options): \"\"\"Sends",
"arg user_id={}\".format(user_id)) return logging.debug(\"Get conversation: user_id={}\".format(user_id)) return ask('appusers/{0}/conversation'.format(user_id), {}, 'get')",
"send_postbacks(user_id, message, options): \"\"\"Sends a series of options that you",
"args user_id={} message={} options={}\" .format(user_id, message, options)) return role =",
"specified type.\"\"\" if not valid_args(user_id, message, options): logging.warning(\"send buttons called",
"in which the keys are descriptions and values uris\"\"\" if",
"logging.warning(\"get conversation called with invalid arg user_id={}\".format(user_id)) return logging.debug(\"Get conversation:",
"import ask def send_message(user_id, message, sent_by_maker=True): if not valid_args(user_id, message):",
"options field is a dictionary in which the keys are",
"in the transaction Smooch will default to the currency specified",
"\"appUser\" data = {\"text\": message, \"role\": role} return ask('appusers/{0}/conversation/messages'.format(user_id), data,",
"'post') def send_links(user_id, message, options): \"\"\"Sends a series of links.",
"options: buttons.append({ \"type\": \"postback\", \"text\": short_text, \"payload\": result }) data",
"the transaction Smooch will default to the currency specified in",
"short_text, \"amount\": result}) data = {\"text\": message, \"role\": role, \"actions\":",
"cents in the transaction Smooch will default to the currency",
"list: return True return False else: if user_id and message:",
"a dictionary in which the keys are descriptions and values",
"buttons.append({ \"type\": \"postback\", \"text\": short_text, \"payload\": result }) data =",
"\"role\": role, \"actions\": buttons} return ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_buttons(user_id,",
"ask('appusers/{0}/conversation/messages'.format(user_id), data, 'post') def send_buttons(user_id, message, options): \"\"\"Options is a",
"conversation: user_id={}\".format(user_id)) return ask('appusers/{0}/conversation'.format(user_id), {}, 'get') def request_payment(user_id, message, options):",
"result}) data = {\"text\": message, \"role\": role, \"actions\": buttons} return"
] |
[
"numpy as np import zipfile print(\"Extract\") zip_ref = zipfile.ZipFile(\"./asset.zip\", 'r')",
"calc(\"cat.jpg\") calc(\"frog.jpg\") calc(\"fog.jpg\") calc(\"lfog.jpg\") calc(\"d.jpg\") calc(\"b.jpg\") calc(\"bs.jpg\") calc(\"plapper.jpg\") calc(\"ds.jpg\") print(\"Complete\")",
"matplotlib.pylab as plt import numpy as np import zipfile print(\"Extract\")",
"test_image =Image.open(\"asset/\"+imname) test_image=test_image.resize((32,32),Image.ANTIALIAS) test_image=np.array(test_image,dtype=\"float32\") test_image/=255 test_image=test_image.reshape(-1,32,32,3) predictions=model.predict(test_image) index_max_pred=np.argmax(predictions) plt.title(\"Complete: {}\".format(CIFAR_10_CLASSES[index_max_pred]))",
"plt.show() print(\"START TEST\") calc(\"lkw-image.jpg\") calc(\"cat.jpg\") calc(\"frog.jpg\") calc(\"fog.jpg\") calc(\"lfog.jpg\") calc(\"d.jpg\") calc(\"b.jpg\")",
"CIFAR_10_CLASSES=[\"Plane\",\"Car\",\"bird\",\"cat\",\"deer\",\"dog\",\"frog\",\"horse\",\"ship\",\"truck\"] def calc(imname): test_image =Image.open(\"asset/\"+imname) test_image=test_image.resize((32,32),Image.ANTIALIAS) test_image=np.array(test_image,dtype=\"float32\") test_image/=255 test_image=test_image.reshape(-1,32,32,3) predictions=model.predict(test_image)",
"import Image import matplotlib.pylab as plt import numpy as np",
"np import zipfile print(\"Extract\") zip_ref = zipfile.ZipFile(\"./asset.zip\", 'r') zip_ref.extractall(\".\") zip_ref.close()",
"Model\") model=load_model(\"cifar-model.h5\") CIFAR_10_CLASSES=[\"Plane\",\"Car\",\"bird\",\"cat\",\"deer\",\"dog\",\"frog\",\"horse\",\"ship\",\"truck\"] def calc(imname): test_image =Image.open(\"asset/\"+imname) test_image=test_image.resize((32,32),Image.ANTIALIAS) test_image=np.array(test_image,dtype=\"float32\") test_image/=255",
"plt import numpy as np import zipfile print(\"Extract\") zip_ref =",
"zip_ref = zipfile.ZipFile(\"./asset.zip\", 'r') zip_ref.extractall(\".\") zip_ref.close() print(\"Load Model\") model=load_model(\"cifar-model.h5\") CIFAR_10_CLASSES=[\"Plane\",\"Car\",\"bird\",\"cat\",\"deer\",\"dog\",\"frog\",\"horse\",\"ship\",\"truck\"]",
"as plt import numpy as np import zipfile print(\"Extract\") zip_ref",
"test_image/=255 test_image=test_image.reshape(-1,32,32,3) predictions=model.predict(test_image) index_max_pred=np.argmax(predictions) plt.title(\"Complete: {}\".format(CIFAR_10_CLASSES[index_max_pred])) plt.imshow(test_image[0].reshape(32,32,3)) print(predictions) plt.show() print(\"START",
"{}\".format(CIFAR_10_CLASSES[index_max_pred])) plt.imshow(test_image[0].reshape(32,32,3)) print(predictions) plt.show() print(\"START TEST\") calc(\"lkw-image.jpg\") calc(\"cat.jpg\") calc(\"frog.jpg\") calc(\"fog.jpg\")",
"<reponame>Sharkbyteprojects/IRIS-ML_and_Deep-Learning<gh_stars>0 import keras from keras.models import load_model from PIL import",
"zipfile.ZipFile(\"./asset.zip\", 'r') zip_ref.extractall(\".\") zip_ref.close() print(\"Load Model\") model=load_model(\"cifar-model.h5\") CIFAR_10_CLASSES=[\"Plane\",\"Car\",\"bird\",\"cat\",\"deer\",\"dog\",\"frog\",\"horse\",\"ship\",\"truck\"] def calc(imname):",
"print(\"Extract\") zip_ref = zipfile.ZipFile(\"./asset.zip\", 'r') zip_ref.extractall(\".\") zip_ref.close() print(\"Load Model\") model=load_model(\"cifar-model.h5\")",
"plt.title(\"Complete: {}\".format(CIFAR_10_CLASSES[index_max_pred])) plt.imshow(test_image[0].reshape(32,32,3)) print(predictions) plt.show() print(\"START TEST\") calc(\"lkw-image.jpg\") calc(\"cat.jpg\") calc(\"frog.jpg\")",
"from keras.models import load_model from PIL import Image import matplotlib.pylab",
"zipfile print(\"Extract\") zip_ref = zipfile.ZipFile(\"./asset.zip\", 'r') zip_ref.extractall(\".\") zip_ref.close() print(\"Load Model\")",
"'r') zip_ref.extractall(\".\") zip_ref.close() print(\"Load Model\") model=load_model(\"cifar-model.h5\") CIFAR_10_CLASSES=[\"Plane\",\"Car\",\"bird\",\"cat\",\"deer\",\"dog\",\"frog\",\"horse\",\"ship\",\"truck\"] def calc(imname): test_image",
"PIL import Image import matplotlib.pylab as plt import numpy as",
"calc(imname): test_image =Image.open(\"asset/\"+imname) test_image=test_image.resize((32,32),Image.ANTIALIAS) test_image=np.array(test_image,dtype=\"float32\") test_image/=255 test_image=test_image.reshape(-1,32,32,3) predictions=model.predict(test_image) index_max_pred=np.argmax(predictions) plt.title(\"Complete:",
"print(\"Load Model\") model=load_model(\"cifar-model.h5\") CIFAR_10_CLASSES=[\"Plane\",\"Car\",\"bird\",\"cat\",\"deer\",\"dog\",\"frog\",\"horse\",\"ship\",\"truck\"] def calc(imname): test_image =Image.open(\"asset/\"+imname) test_image=test_image.resize((32,32),Image.ANTIALIAS) test_image=np.array(test_image,dtype=\"float32\")",
"print(\"START TEST\") calc(\"lkw-image.jpg\") calc(\"cat.jpg\") calc(\"frog.jpg\") calc(\"fog.jpg\") calc(\"lfog.jpg\") calc(\"d.jpg\") calc(\"b.jpg\") calc(\"bs.jpg\")",
"test_image=test_image.resize((32,32),Image.ANTIALIAS) test_image=np.array(test_image,dtype=\"float32\") test_image/=255 test_image=test_image.reshape(-1,32,32,3) predictions=model.predict(test_image) index_max_pred=np.argmax(predictions) plt.title(\"Complete: {}\".format(CIFAR_10_CLASSES[index_max_pred])) plt.imshow(test_image[0].reshape(32,32,3)) print(predictions)",
"load_model from PIL import Image import matplotlib.pylab as plt import",
"import matplotlib.pylab as plt import numpy as np import zipfile",
"predictions=model.predict(test_image) index_max_pred=np.argmax(predictions) plt.title(\"Complete: {}\".format(CIFAR_10_CLASSES[index_max_pred])) plt.imshow(test_image[0].reshape(32,32,3)) print(predictions) plt.show() print(\"START TEST\") calc(\"lkw-image.jpg\")",
"calc(\"frog.jpg\") calc(\"fog.jpg\") calc(\"lfog.jpg\") calc(\"d.jpg\") calc(\"b.jpg\") calc(\"bs.jpg\") calc(\"plapper.jpg\") calc(\"ds.jpg\") print(\"Complete\") print(\"End\")",
"zip_ref.close() print(\"Load Model\") model=load_model(\"cifar-model.h5\") CIFAR_10_CLASSES=[\"Plane\",\"Car\",\"bird\",\"cat\",\"deer\",\"dog\",\"frog\",\"horse\",\"ship\",\"truck\"] def calc(imname): test_image =Image.open(\"asset/\"+imname) test_image=test_image.resize((32,32),Image.ANTIALIAS)",
"keras.models import load_model from PIL import Image import matplotlib.pylab as",
"= zipfile.ZipFile(\"./asset.zip\", 'r') zip_ref.extractall(\".\") zip_ref.close() print(\"Load Model\") model=load_model(\"cifar-model.h5\") CIFAR_10_CLASSES=[\"Plane\",\"Car\",\"bird\",\"cat\",\"deer\",\"dog\",\"frog\",\"horse\",\"ship\",\"truck\"] def",
"plt.imshow(test_image[0].reshape(32,32,3)) print(predictions) plt.show() print(\"START TEST\") calc(\"lkw-image.jpg\") calc(\"cat.jpg\") calc(\"frog.jpg\") calc(\"fog.jpg\") calc(\"lfog.jpg\")",
"=Image.open(\"asset/\"+imname) test_image=test_image.resize((32,32),Image.ANTIALIAS) test_image=np.array(test_image,dtype=\"float32\") test_image/=255 test_image=test_image.reshape(-1,32,32,3) predictions=model.predict(test_image) index_max_pred=np.argmax(predictions) plt.title(\"Complete: {}\".format(CIFAR_10_CLASSES[index_max_pred])) plt.imshow(test_image[0].reshape(32,32,3))",
"test_image=test_image.reshape(-1,32,32,3) predictions=model.predict(test_image) index_max_pred=np.argmax(predictions) plt.title(\"Complete: {}\".format(CIFAR_10_CLASSES[index_max_pred])) plt.imshow(test_image[0].reshape(32,32,3)) print(predictions) plt.show() print(\"START TEST\")",
"from PIL import Image import matplotlib.pylab as plt import numpy",
"keras from keras.models import load_model from PIL import Image import",
"import keras from keras.models import load_model from PIL import Image",
"print(predictions) plt.show() print(\"START TEST\") calc(\"lkw-image.jpg\") calc(\"cat.jpg\") calc(\"frog.jpg\") calc(\"fog.jpg\") calc(\"lfog.jpg\") calc(\"d.jpg\")",
"TEST\") calc(\"lkw-image.jpg\") calc(\"cat.jpg\") calc(\"frog.jpg\") calc(\"fog.jpg\") calc(\"lfog.jpg\") calc(\"d.jpg\") calc(\"b.jpg\") calc(\"bs.jpg\") calc(\"plapper.jpg\")",
"test_image=np.array(test_image,dtype=\"float32\") test_image/=255 test_image=test_image.reshape(-1,32,32,3) predictions=model.predict(test_image) index_max_pred=np.argmax(predictions) plt.title(\"Complete: {}\".format(CIFAR_10_CLASSES[index_max_pred])) plt.imshow(test_image[0].reshape(32,32,3)) print(predictions) plt.show()",
"model=load_model(\"cifar-model.h5\") CIFAR_10_CLASSES=[\"Plane\",\"Car\",\"bird\",\"cat\",\"deer\",\"dog\",\"frog\",\"horse\",\"ship\",\"truck\"] def calc(imname): test_image =Image.open(\"asset/\"+imname) test_image=test_image.resize((32,32),Image.ANTIALIAS) test_image=np.array(test_image,dtype=\"float32\") test_image/=255 test_image=test_image.reshape(-1,32,32,3)",
"Image import matplotlib.pylab as plt import numpy as np import",
"def calc(imname): test_image =Image.open(\"asset/\"+imname) test_image=test_image.resize((32,32),Image.ANTIALIAS) test_image=np.array(test_image,dtype=\"float32\") test_image/=255 test_image=test_image.reshape(-1,32,32,3) predictions=model.predict(test_image) index_max_pred=np.argmax(predictions)",
"import zipfile print(\"Extract\") zip_ref = zipfile.ZipFile(\"./asset.zip\", 'r') zip_ref.extractall(\".\") zip_ref.close() print(\"Load",
"import numpy as np import zipfile print(\"Extract\") zip_ref = zipfile.ZipFile(\"./asset.zip\",",
"calc(\"fog.jpg\") calc(\"lfog.jpg\") calc(\"d.jpg\") calc(\"b.jpg\") calc(\"bs.jpg\") calc(\"plapper.jpg\") calc(\"ds.jpg\") print(\"Complete\") print(\"End\") quit(0)",
"zip_ref.extractall(\".\") zip_ref.close() print(\"Load Model\") model=load_model(\"cifar-model.h5\") CIFAR_10_CLASSES=[\"Plane\",\"Car\",\"bird\",\"cat\",\"deer\",\"dog\",\"frog\",\"horse\",\"ship\",\"truck\"] def calc(imname): test_image =Image.open(\"asset/\"+imname)",
"calc(\"lkw-image.jpg\") calc(\"cat.jpg\") calc(\"frog.jpg\") calc(\"fog.jpg\") calc(\"lfog.jpg\") calc(\"d.jpg\") calc(\"b.jpg\") calc(\"bs.jpg\") calc(\"plapper.jpg\") calc(\"ds.jpg\")",
"as np import zipfile print(\"Extract\") zip_ref = zipfile.ZipFile(\"./asset.zip\", 'r') zip_ref.extractall(\".\")",
"index_max_pred=np.argmax(predictions) plt.title(\"Complete: {}\".format(CIFAR_10_CLASSES[index_max_pred])) plt.imshow(test_image[0].reshape(32,32,3)) print(predictions) plt.show() print(\"START TEST\") calc(\"lkw-image.jpg\") calc(\"cat.jpg\")",
"import load_model from PIL import Image import matplotlib.pylab as plt"
] |
[
"from django.urls import path, include from users import views as",
"path('login/',auth_views.LoginView.as_view(template_name='users/login.html'),name='login'), path('logout/',auth_views.LogoutView.as_view(template_name='users/logout.html') ,name='logout'), path('profile/', user_views.profile, name='profile'), path('book/',upload_views.book_list,name='book_list'), path('book/upload',upload_views.upload_book,name='upload_book'), ] if",
"admin: path('admin/', admin.site.urls), path('', include('blog.urls')), path('register/', user_views.register, name='register'), path('login/',auth_views.LoginView.as_view(template_name='users/login.html'),name='login'), path('logout/',auth_views.LogoutView.as_view(template_name='users/logout.html')",
"import include, path 2. Add a URL to urlpatterns: path('blog/',",
"path('', Home.as_view(), name='home') Including another URLconf 1. Import the include()",
"2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including",
"include() function: from django.urls import include, path 2. Add a",
"import views as auth_views from upload import views as upload_views",
"Add an import: from my_app import views 2. Add a",
"URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1.",
"import static urlpatterns = [ # Uncomment the next line",
"the next line to enable the admin: path('admin/', admin.site.urls), path('',",
"Add a URL to urlpatterns: path('', views.home, name='home') Class-based views",
"to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import",
"from django.conf.urls.static import static urlpatterns = [ # Uncomment the",
"1. Add an import: from other_app.views import Home 2. Add",
"lines to enable admin: from django.contrib import admin from django.urls",
"settings from django.conf.urls.static import static urlpatterns = [ # Uncomment",
"an import: from other_app.views import Home 2. Add a URL",
"to enable the admin: path('admin/', admin.site.urls), path('', include('blog.urls')), path('register/', user_views.register,",
"next two lines to enable admin: from django.contrib import admin",
"as auth_views from upload import views as upload_views from django.conf",
"Uncomment next two lines to enable admin: from django.contrib import",
"Add a URL to urlpatterns: path('blog/', include('blog.urls')) \"\"\" # Uncomment",
"Home.as_view(), name='home') Including another URLconf 1. Import the include() function:",
"information please see: https://docs.djangoproject.com/en/2.1/topics/http/urls/ Examples: Function views 1. Add an",
"name='home') Class-based views 1. Add an import: from other_app.views import",
"function: from django.urls import include, path 2. Add a URL",
"import views 2. Add a URL to urlpatterns: path('', views.home,",
"views. For more information please see: https://docs.djangoproject.com/en/2.1/topics/http/urls/ Examples: Function views",
"Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')",
"The `urlpatterns` list routes URLs to views. For more information",
"Add an import: from other_app.views import Home 2. Add a",
"import Home 2. Add a URL to urlpatterns: path('', Home.as_view(),",
"static urlpatterns = [ # Uncomment the next line to",
",name='logout'), path('profile/', user_views.profile, name='profile'), path('book/',upload_views.book_list,name='book_list'), path('book/upload',upload_views.upload_book,name='upload_book'), ] if settings.DEBUG: urlpatterns",
"URLs to views. For more information please see: https://docs.djangoproject.com/en/2.1/topics/http/urls/ Examples:",
"views 1. Add an import: from other_app.views import Home 2.",
"path('register/', user_views.register, name='register'), path('login/',auth_views.LoginView.as_view(template_name='users/login.html'),name='login'), path('logout/',auth_views.LogoutView.as_view(template_name='users/logout.html') ,name='logout'), path('profile/', user_views.profile, name='profile'), path('book/',upload_views.book_list,name='book_list'),",
"include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))",
"# Uncomment the next line to enable the admin: path('admin/',",
"path('logout/',auth_views.LogoutView.as_view(template_name='users/logout.html') ,name='logout'), path('profile/', user_views.profile, name='profile'), path('book/',upload_views.book_list,name='book_list'), path('book/upload',upload_views.upload_book,name='upload_book'), ] if settings.DEBUG:",
"urlpatterns: path('blog/', include('blog.urls')) \"\"\" # Uncomment next two lines to",
"other_app.views import Home 2. Add a URL to urlpatterns: path('',",
"list routes URLs to views. For more information please see:",
"Class-based views 1. Add an import: from other_app.views import Home",
"Examples: Function views 1. Add an import: from my_app import",
"the include() function: from django.urls import include, path 2. Add",
"path('admin/', admin.site.urls), path('', include('blog.urls')), path('register/', user_views.register, name='register'), path('login/',auth_views.LoginView.as_view(template_name='users/login.html'),name='login'), path('logout/',auth_views.LogoutView.as_view(template_name='users/logout.html') ,name='logout'),",
"enable the admin: path('admin/', admin.site.urls), path('', include('blog.urls')), path('register/', user_views.register, name='register'),",
"more information please see: https://docs.djangoproject.com/en/2.1/topics/http/urls/ Examples: Function views 1. Add",
"as user_views from django.contrib.auth import views as auth_views from upload",
"as upload_views from django.conf import settings from django.conf.urls.static import static",
"include('blog.urls')) \"\"\" # Uncomment next two lines to enable admin:",
"django.conf import settings from django.conf.urls.static import static urlpatterns = [",
"path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) \"\"\"",
"to views. For more information please see: https://docs.djangoproject.com/en/2.1/topics/http/urls/ Examples: Function",
"include('blog.urls')), path('register/', user_views.register, name='register'), path('login/',auth_views.LoginView.as_view(template_name='users/login.html'),name='login'), path('logout/',auth_views.LogoutView.as_view(template_name='users/logout.html') ,name='logout'), path('profile/', user_views.profile, name='profile'),",
"views as auth_views from upload import views as upload_views from",
"\"\"\" tt URL Configuration The `urlpatterns` list routes URLs to",
"to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an",
"path('blog/', include('blog.urls')) \"\"\" # Uncomment next two lines to enable",
"views as user_views from django.contrib.auth import views as auth_views from",
"import: from other_app.views import Home 2. Add a URL to",
"1. Add an import: from my_app import views 2. Add",
"admin: from django.contrib import admin from django.urls import path, include",
"tt URL Configuration The `urlpatterns` list routes URLs to views.",
"[ # Uncomment the next line to enable the admin:",
"line to enable the admin: path('admin/', admin.site.urls), path('', include('blog.urls')), path('register/',",
"from my_app import views 2. Add a URL to urlpatterns:",
"from users import views as user_views from django.contrib.auth import views",
"urlpatterns = [ # Uncomment the next line to enable",
"name='register'), path('login/',auth_views.LoginView.as_view(template_name='users/login.html'),name='login'), path('logout/',auth_views.LogoutView.as_view(template_name='users/logout.html') ,name='logout'), path('profile/', user_views.profile, name='profile'), path('book/',upload_views.book_list,name='book_list'), path('book/upload',upload_views.upload_book,name='upload_book'), ]",
"import views as user_views from django.contrib.auth import views as auth_views",
"import: from my_app import views 2. Add a URL to",
"upload_views from django.conf import settings from django.conf.urls.static import static urlpatterns",
"Import the include() function: from django.urls import include, path 2.",
"For more information please see: https://docs.djangoproject.com/en/2.1/topics/http/urls/ Examples: Function views 1.",
"URL Configuration The `urlpatterns` list routes URLs to views. For",
"import settings from django.conf.urls.static import static urlpatterns = [ #",
"from django.conf import settings from django.conf.urls.static import static urlpatterns =",
"1. Import the include() function: from django.urls import include, path",
"auth_views from upload import views as upload_views from django.conf import",
"another URLconf 1. Import the include() function: from django.urls import",
"views 2. Add a URL to urlpatterns: path('', views.home, name='home')",
"enable admin: from django.contrib import admin from django.urls import path,",
"from django.urls import include, path 2. Add a URL to",
"my_app import views 2. Add a URL to urlpatterns: path('',",
"import views as upload_views from django.conf import settings from django.conf.urls.static",
"urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import:",
"to enable admin: from django.contrib import admin from django.urls import",
"views as upload_views from django.conf import settings from django.conf.urls.static import",
"a URL to urlpatterns: path('', views.home, name='home') Class-based views 1.",
"django.urls import include, path 2. Add a URL to urlpatterns:",
"name='home') Including another URLconf 1. Import the include() function: from",
"from other_app.views import Home 2. Add a URL to urlpatterns:",
"path('', views.home, name='home') Class-based views 1. Add an import: from",
"= [ # Uncomment the next line to enable the",
"a URL to urlpatterns: path('blog/', include('blog.urls')) \"\"\" # Uncomment next",
"upload import views as upload_views from django.conf import settings from",
"# Uncomment next two lines to enable admin: from django.contrib",
"URLconf 1. Import the include() function: from django.urls import include,",
"from django.contrib.auth import views as auth_views from upload import views",
"Configuration The `urlpatterns` list routes URLs to views. For more",
"URL to urlpatterns: path('blog/', include('blog.urls')) \"\"\" # Uncomment next two",
"user_views from django.contrib.auth import views as auth_views from upload import",
"from django.contrib import admin from django.urls import path, include from",
"URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add",
"two lines to enable admin: from django.contrib import admin from",
"views 1. Add an import: from my_app import views 2.",
"Including another URLconf 1. Import the include() function: from django.urls",
"import path, include from users import views as user_views from",
"user_views.register, name='register'), path('login/',auth_views.LoginView.as_view(template_name='users/login.html'),name='login'), path('logout/',auth_views.LogoutView.as_view(template_name='users/logout.html') ,name='logout'), path('profile/', user_views.profile, name='profile'), path('book/',upload_views.book_list,name='book_list'), path('book/upload',upload_views.upload_book,name='upload_book'),",
"Uncomment the next line to enable the admin: path('admin/', admin.site.urls),",
"Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another",
"see: https://docs.djangoproject.com/en/2.1/topics/http/urls/ Examples: Function views 1. Add an import: from",
"import admin from django.urls import path, include from users import",
"include from users import views as user_views from django.contrib.auth import",
"django.conf.urls.static import static urlpatterns = [ # Uncomment the next",
"2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based",
"Function views 1. Add an import: from my_app import views",
"user_views.profile, name='profile'), path('book/',upload_views.book_list,name='book_list'), path('book/upload',upload_views.upload_book,name='upload_book'), ] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL,",
"path('profile/', user_views.profile, name='profile'), path('book/',upload_views.book_list,name='book_list'), path('book/upload',upload_views.upload_book,name='upload_book'), ] if settings.DEBUG: urlpatterns +=",
"please see: https://docs.djangoproject.com/en/2.1/topics/http/urls/ Examples: Function views 1. Add an import:",
"`urlpatterns` list routes URLs to views. For more information please",
"\"\"\" # Uncomment next two lines to enable admin: from",
"the admin: path('admin/', admin.site.urls), path('', include('blog.urls')), path('register/', user_views.register, name='register'), path('login/',auth_views.LoginView.as_view(template_name='users/login.html'),name='login'),",
"django.urls import path, include from users import views as user_views",
"an import: from my_app import views 2. Add a URL",
"from upload import views as upload_views from django.conf import settings",
"views.home, name='home') Class-based views 1. Add an import: from other_app.views",
"https://docs.djangoproject.com/en/2.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app",
"2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) \"\"\" #",
"a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf",
"path('', include('blog.urls')), path('register/', user_views.register, name='register'), path('login/',auth_views.LoginView.as_view(template_name='users/login.html'),name='login'), path('logout/',auth_views.LogoutView.as_view(template_name='users/logout.html') ,name='logout'), path('profile/', user_views.profile,",
"name='profile'), path('book/',upload_views.book_list,name='book_list'), path('book/upload',upload_views.upload_book,name='upload_book'), ] if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)",
"django.contrib.auth import views as auth_views from upload import views as",
"to urlpatterns: path('blog/', include('blog.urls')) \"\"\" # Uncomment next two lines",
"admin.site.urls), path('', include('blog.urls')), path('register/', user_views.register, name='register'), path('login/',auth_views.LoginView.as_view(template_name='users/login.html'),name='login'), path('logout/',auth_views.LogoutView.as_view(template_name='users/logout.html') ,name='logout'), path('profile/',",
"urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the",
"django.contrib import admin from django.urls import path, include from users",
"users import views as user_views from django.contrib.auth import views as",
"routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.1/topics/http/urls/",
"path, include from users import views as user_views from django.contrib.auth",
"next line to enable the admin: path('admin/', admin.site.urls), path('', include('blog.urls')),",
"admin from django.urls import path, include from users import views"
] |
[
"os import click os.environ[\"GIT_PYTHON_REFRESH\"] = \"quiet\" @click.group() def git(): pass",
"import os import click os.environ[\"GIT_PYTHON_REFRESH\"] = \"quiet\" @click.group() def git():"
] |
[
"\"*.jpg_orig\"] file_list = list() for extension in extensions: file_list =",
"in extensions: file_list = file_list + glob.glob(extension) for file in",
"'') if file.endswith(new_extension): new_name = file.replace(new_extension, '') + \".jpg\" os.rename(file,",
"file_list = list() for extension in extensions: file_list = file_list",
"new_name = file.replace(new_extension, '') + \".jpg\" os.rename(file, new_name) print(\"Done!\") if",
"extension in extensions: new_extension = extension.replace('*', '') if file.endswith(new_extension): new_name",
"extensions = [\"*.jpg_large\", \"*.png_large\", \"*.jpg_orig\"] file_list = list() for extension",
"file_list: for extension in extensions: new_extension = extension.replace('*', '') if",
"extension.replace('*', '') if file.endswith(new_extension): new_name = file.replace(new_extension, '') + \".jpg\"",
"= [\"*.jpg_large\", \"*.png_large\", \"*.jpg_orig\"] file_list = list() for extension in",
"if file.endswith(new_extension): new_name = file.replace(new_extension, '') + \".jpg\" os.rename(file, new_name)",
"file.endswith(new_extension): new_name = file.replace(new_extension, '') + \".jpg\" os.rename(file, new_name) print(\"Done!\")",
"in extensions: new_extension = extension.replace('*', '') if file.endswith(new_extension): new_name =",
"file.replace(new_extension, '') + \".jpg\" os.rename(file, new_name) print(\"Done!\") if __name__ ==",
"import glob import os def main(): os.chdir(\"F:/Downloads\") extensions = [\"*.jpg_large\",",
"+ \".jpg\" os.rename(file, new_name) print(\"Done!\") if __name__ == __name__: main()",
"\"*.png_large\", \"*.jpg_orig\"] file_list = list() for extension in extensions: file_list",
"main(): os.chdir(\"F:/Downloads\") extensions = [\"*.jpg_large\", \"*.png_large\", \"*.jpg_orig\"] file_list = list()",
"glob.glob(extension) for file in file_list: for extension in extensions: new_extension",
"glob import os def main(): os.chdir(\"F:/Downloads\") extensions = [\"*.jpg_large\", \"*.png_large\",",
"[\"*.jpg_large\", \"*.png_large\", \"*.jpg_orig\"] file_list = list() for extension in extensions:",
"'') + \".jpg\" os.rename(file, new_name) print(\"Done!\") if __name__ == __name__:",
"= file_list + glob.glob(extension) for file in file_list: for extension",
"+ glob.glob(extension) for file in file_list: for extension in extensions:",
"import os def main(): os.chdir(\"F:/Downloads\") extensions = [\"*.jpg_large\", \"*.png_large\", \"*.jpg_orig\"]",
"for extension in extensions: file_list = file_list + glob.glob(extension) for",
"os.chdir(\"F:/Downloads\") extensions = [\"*.jpg_large\", \"*.png_large\", \"*.jpg_orig\"] file_list = list() for",
"file_list + glob.glob(extension) for file in file_list: for extension in",
"extension in extensions: file_list = file_list + glob.glob(extension) for file",
"= extension.replace('*', '') if file.endswith(new_extension): new_name = file.replace(new_extension, '') +",
"for extension in extensions: new_extension = extension.replace('*', '') if file.endswith(new_extension):",
"in file_list: for extension in extensions: new_extension = extension.replace('*', '')",
"for file in file_list: for extension in extensions: new_extension =",
"extensions: new_extension = extension.replace('*', '') if file.endswith(new_extension): new_name = file.replace(new_extension,",
"file in file_list: for extension in extensions: new_extension = extension.replace('*',",
"list() for extension in extensions: file_list = file_list + glob.glob(extension)",
"file_list = file_list + glob.glob(extension) for file in file_list: for",
"extensions: file_list = file_list + glob.glob(extension) for file in file_list:",
"os def main(): os.chdir(\"F:/Downloads\") extensions = [\"*.jpg_large\", \"*.png_large\", \"*.jpg_orig\"] file_list",
"= file.replace(new_extension, '') + \".jpg\" os.rename(file, new_name) print(\"Done!\") if __name__",
"= list() for extension in extensions: file_list = file_list +",
"new_extension = extension.replace('*', '') if file.endswith(new_extension): new_name = file.replace(new_extension, '')",
"def main(): os.chdir(\"F:/Downloads\") extensions = [\"*.jpg_large\", \"*.png_large\", \"*.jpg_orig\"] file_list ="
] |
[
"smote = clf_dt.fit(X_train,y_train) smote_pred = smote.predict(X_test) target_names = ['NOT-sub', 'Subscribed']",
"{\"marital\": {\"married\": 1, \"single\": 0, \"divorced\":-1}, \"education\": {\"primary\": 1, \"secondary\":",
"= RandomForestClassifier() smote = clf_dt.fit(X_train,y_train) smote_pred = smote.predict(X_test) target_names =",
"sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test",
"1, \"no\": 0}, \"loan\": {\"yes\": 1, \"no\": 0}, \"y\": {\"yes\":",
"random_state = 0) sm = SMOTE(random_state=27, ratio=1.0) X_train, y_train =",
"RandomForestClassifier() smote = clf_dt.fit(X_train,y_train) smote_pred = smote.predict(X_test) target_names = ['NOT-sub',",
"columns=[\"job\"], prefix=[\"job\"]) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test",
"#RANDOM FOREST from sklearn.ensemble import RandomForestClassifier clf_dt = RandomForestClassifier() clt_dt",
"= dataset.iloc[:, 0:7] y = dataset.iloc[:, 7] X = pd.get_dummies(X,",
"= pd.concat([not_sub_downsampled, sub]) #DECISION TREE y_train = downsampled.y X_train =",
"- RANDOM FOREST from imblearn.over_sampling import SMOTE y = dataset_sample.y",
"# setting up testing and training sets X_train, X_test, y_train,",
"0.25, random_state = 0) #SMOTE sm = SMOTE(random_state=27, ratio=1.0) X_train,",
"= dataset[dataset.education != 'unknown'] dataset['education'] = dataset['education'].astype(int) #COLLERATION MATRIX plt.figure(figsize=(12,10))",
"dataset = dataset.drop(\"previous\", axis=1) dataset = dataset.drop(\"poutcome\", axis=1) dataset.head() #FEATURE",
"'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) #RANDOM FOREST y_train = downsampled.y X_train =",
"import pandas as pd # TRAINING - TEST from sklearn.model_selection",
"#COLLERATION MATRIX plt.figure(figsize=(12,10)) cor = dataset.corr() sns.heatmap(cor, annot=True, cmap=plt.cm.Reds) plt.show()",
"axis=1) #SELECTING TARGET CLASSES not_sub = X[X.y==0] sub = X[X.y==1]",
"y, test_size = 0.25, random_state = 0) X = pd.concat([X_train,",
"axis=1) dataset = dataset.drop(\"day\", axis=1) dataset = dataset.drop(\"month\", axis=1) dataset",
"dataset = dataset[dataset.job != 'unknown'] dataset = dataset[dataset.education != 'unknown']",
"sub = X[X.y==1] not_sub_downsampled = resample(not_sub, replace = False, n_samples",
"pd.concat([not_sub_downsampled, sub]) #DECISION TREE y_train = downsampled.y X_train = downsampled.drop('y',",
"dataset.drop(\"previous\", axis=1) dataset = dataset.drop(\"poutcome\", axis=1) dataset.head() #FEATURE ENGINEERING cleanup_nums",
"import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)",
"sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) #",
"test_size = 0.25, random_state = 0) sm = SMOTE(random_state=27, ratio=1.0)",
"#UNDERSAMPLING from sklearn.utils import resample dataset_sample = pd.get_dummies(dataset, columns=[\"job\"], prefix=[\"job\"])",
"dataset.drop(\"month\", axis=1) dataset = dataset.drop(\"duration\", axis=1) dataset = dataset.drop(\"campaign\", axis=1)",
"import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size",
"y_train) # PREDICTION y_pred = classifier.predict(X_test) # CONFUSION MATRIX from",
"AND TARGET y = dataset_sample.y X = dataset_sample.drop('y', axis=1) #TRAIN",
"not_sub_downsampled = resample(not_sub, replace = False, n_samples = len(sub), random_state",
"{\"yes\": 1, \"no\": 0}, \"y\": {\"yes\": 1, \"no\": 0}} dataset.replace(cleanup_nums,",
"import SMOTE y = dataset_sample.y X = dataset_sample.drop('y', axis=1) #",
"clf_dt.fit(X_train,y_train) smote_pred = smote.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, smote_pred,target_names=target_names))",
"esito = clf_dt.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) from",
"#SPLIT FEATURE AND TARGET y = dataset_sample.y X = dataset_sample.drop('y',",
"confusion_matrix cm = confusion_matrix(y_test, esito) print(cm) plt.hist(esito) # K-NEAREST NEIGHBOURS",
"[31,26,32], width=0.2, color='b', align='center', label='DT') plt.bar(x, [24,28,31], width=0.2, color='r', align='center',",
"dataset.drop(\"contact\", axis=1) dataset = dataset.drop(\"day\", axis=1) dataset = dataset.drop(\"month\", axis=1)",
"y_train, y_test = train_test_split(X, y, test_size=0.25) #DECISION TREE from sklearn",
"color='b', align='center', label='DT') plt.bar(x, [24,28,31], width=0.2, color='r', align='center', label='RF') plt.xticks(x-0.1,",
"= classifier.predict(X_test) # CONFUSION MATRIX from sklearn.metrics import confusion_matrix cm",
"smote_pred,target_names=target_names)) #SMOTE - RANDOM FOREST from imblearn.over_sampling import SMOTE y",
"y = dataset.iloc[:, 7] X = pd.get_dummies(X, columns=[\"job\"], prefix=[\"job\"]) from",
"StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test)",
"DecisionTreeClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test) target_names = ['NOT-sub',",
"= downsampled.y X_train = downsampled.drop('y', axis=1) clf_dt = RandomForestClassifier() clt_dt",
"y_train = downsampled.y X_train = downsampled.drop('y', axis=1) clf_dt = DecisionTreeClassifier()",
"MINORITY AND DOWNSAMPLED MAJORITY downsampled = pd.concat([not_sub_downsampled, sub]) #DECISION TREE",
"= dataset.drop(\"pdays\", axis=1) dataset = dataset.drop(\"previous\", axis=1) dataset = dataset.drop(\"poutcome\",",
"0) sm = SMOTE(random_state=27, ratio=1.0) X_train, y_train = sm.fit_sample(X_train, y_train)",
"axis=1) dataset = dataset.drop(\"previous\", axis=1) dataset = dataset.drop(\"poutcome\", axis=1) dataset.head()",
"target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, y_pred,target_names=target_names)) print(cm) plt.hist(y_pred) #UNDERSAMPLING from",
"PREDICTION y_pred = classifier.predict(X_test) # CONFUSION MATRIX from sklearn.metrics import",
"imblearn.over_sampling import SMOTE y = dataset_sample.y X = dataset_sample.drop('y', axis=1)",
"{\"yes\": 1, \"no\": 0}} dataset.replace(cleanup_nums, inplace=True) dataset.head() dataset.dtypes dataset =",
"= dataset[dataset.job != 'unknown'] dataset = dataset[dataset.education != 'unknown'] dataset['education']",
"MAJORITY downsampled = pd.concat([not_sub_downsampled, sub]) #DECISION TREE y_train = downsampled.y",
"dataset.head() #FEATURE ENGINEERING cleanup_nums = {\"marital\": {\"married\": 1, \"single\": 0,",
"dataset = dataset.drop(\"duration\", axis=1) dataset = dataset.drop(\"campaign\", axis=1) dataset =",
"= pd.read_csv(input_file, sep=';', header = 0) dataset.head() #DELETE NEXT CALLS",
"import RandomForestClassifier clf_dt = RandomForestClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito =",
"plt.hist(esito) #RANDOM FOREST from sklearn.ensemble import RandomForestClassifier clf_dt = RandomForestClassifier()",
"!= 'unknown'] dataset = dataset[dataset.education != 'unknown'] dataset['education'] = dataset['education'].astype(int)",
"plt.bar(x, [24,28,31], width=0.2, color='r', align='center', label='RF') plt.xticks(x-0.1, ['Normal','Under','Smote']) plt.legend(loc='lower right')",
"= train_test_split(X, y, test_size = 0.25, random_state = 0) #SMOTE",
"classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p =",
"numpy as np import pandas as pd import seaborn as",
"width=0.2, color='b', align='center', label='DT') plt.bar(x, [18,61,32], width=0.2, color='r', align='center', label='RF')",
"pd.get_dummies(X, columns=[\"job\"], prefix=[\"job\"]) from sklearn.model_selection import train_test_split X_train, X_test, y_train,",
"NEIGHBOURS import numpy as np import matplotlib.pyplot as plt import",
"ratio=1.0) X_train, y_train = sm.fit_sample(X_train, y_train) clf_dt = DecisionTreeClassifier() #FIT",
"test_size=0.25) #DECISION TREE from sklearn import tree from sklearn.tree import",
"target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) #RANDOM FOREST y_train =",
"dataset = dataset.drop(\"month\", axis=1) dataset = dataset.drop(\"duration\", axis=1) dataset =",
"X = dataset.iloc[:, 0:7] y = dataset.iloc[:, 7] X =",
"import resample dataset_sample = pd.get_dummies(dataset, columns=[\"job\"], prefix=[\"job\"]) #SPLIT FEATURE AND",
"['NOT-sub', 'Subscribed'] print(classification_report(y_test, y_pred,target_names=target_names)) print(cm) plt.hist(y_pred) #UNDERSAMPLING from sklearn.utils import",
"#RANDOM FOREST y_train = downsampled.y X_train = downsampled.drop('y', axis=1) clf_dt",
"sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, esito) print(cm) plt.hist(esito) #RANDOM",
"- TEST from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test",
"pd.read_csv(input_file, sep=';', header = 0) dataset.head() #DELETE NEXT CALLS DATA",
"print(cm) plt.hist(esito) #RANDOM FOREST from sklearn.ensemble import RandomForestClassifier clf_dt =",
"dataset['education'] = dataset['education'].astype(int) #COLLERATION MATRIX plt.figure(figsize=(12,10)) cor = dataset.corr() sns.heatmap(cor,",
"esito = clf_dt.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) #SMOTE",
"dataset_sample = pd.get_dummies(dataset, columns=[\"job\"], prefix=[\"job\"]) #SPLIT FEATURE AND TARGET y",
"1, \"secondary\": 2, \"tertiary\": 3}, \"default\": {\"yes\": 1, \"no\": 0},",
"plt.bar(x-0.2, [31,65,37], width=0.2, color='b', align='center', label='DT') plt.bar(x, [18,61,32], width=0.2, color='r',",
"\"no\": 0}, \"loan\": {\"yes\": 1, \"no\": 0}, \"y\": {\"yes\": 1,",
"sm = SMOTE(random_state=27, ratio=1.0) X_train, y_train = sm.fit_sample(X_train, y_train) clf_dt",
"= 0) X = pd.concat([X_train, y_train], axis=1) #SELECTING TARGET CLASSES",
"dataset.drop(\"pdays\", axis=1) dataset = dataset.drop(\"previous\", axis=1) dataset = dataset.drop(\"poutcome\", axis=1)",
"'Subscribed'] print(classification_report(y_test, smote_pred,target_names=target_names)) #RECAP on RECALL x = np.arange(3) plt.bar(x-0.2,",
"= 5, metric = 'minkowski', p = 2) classifier.fit(X_train, y_train)",
"dataset.drop(\"duration\", axis=1) dataset = dataset.drop(\"campaign\", axis=1) dataset = dataset.drop(\"pdays\", axis=1)",
"0}, \"y\": {\"yes\": 1, \"no\": 0}} dataset.replace(cleanup_nums, inplace=True) dataset.head() dataset.dtypes",
"random_state = 27) # COMBINE MINORITY AND DOWNSAMPLED MAJORITY downsampled",
"KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2) classifier.fit(X_train,",
"columns=[\"job\"], prefix=[\"job\"]) #SPLIT FEATURE AND TARGET y = dataset_sample.y X",
"align='center', label='DT') plt.bar(x, [24,28,31], width=0.2, color='r', align='center', label='RF') plt.xticks(x-0.1, ['Normal','Under','Smote'])",
"y_train = downsampled.y X_train = downsampled.drop('y', axis=1) clf_dt = RandomForestClassifier()",
"cm = confusion_matrix(y_test, esito) print(cm) plt.hist(esito) # K-NEAREST NEIGHBOURS import",
"SMOTE(random_state=27, ratio=1.0) X_train, y_train = sm.fit_sample(X_train, y_train) clf_dt = DecisionTreeClassifier()",
"= train_test_split(X, y, test_size=0.25) #DECISION TREE from sklearn import tree",
"0.25, random_state = 0) X = pd.concat([X_train, y_train], axis=1) #SELECTING",
"pd.get_dummies(dataset, columns=[\"job\"], prefix=[\"job\"]) #SPLIT FEATURE AND TARGET y = dataset_sample.y",
"not_sub = X[X.y==0] sub = X[X.y==1] not_sub_downsampled = resample(not_sub, replace",
"color='b', align='center', label='DT') plt.bar(x, [18,61,32], width=0.2, color='r', align='center', label='RF') plt.xticks(x-0.1,",
"from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X,",
"plt import pandas as pd # TRAINING - TEST from",
"align='center', label='DT') plt.bar(x, [18,61,32], width=0.2, color='r', align='center', label='RF') plt.xticks(x-0.1, ['Normal','Under','Smote'])",
"import confusion_matrix cm = confusion_matrix(y_test, esito) print(cm) plt.hist(esito) #RANDOM FOREST",
"classifier.fit(X_train, y_train) # PREDICTION y_pred = classifier.predict(X_test) # CONFUSION MATRIX",
"5, metric = 'minkowski', p = 2) classifier.fit(X_train, y_train) #",
"align='center', label='RF') plt.xticks(x-0.1, ['Normal','Under','Smote']) plt.legend(loc='upper right') #RECAP on F1 x",
"target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, smote_pred,target_names=target_names)) #RECAP on RECALL x",
"= 'https://raw.githubusercontent.com/lcphy/Digital-Innovation-Lab/master/bank-full.csv' dataset = pd.read_csv(input_file, sep=';', header = 0) dataset.head()",
"= 0.25, random_state = 0) X = pd.concat([X_train, y_train], axis=1)",
"clf_dt.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) #SMOTE - DECISION",
"- DECISION TREE from imblearn.over_sampling import SMOTE #SPLIT FEATURE TARGET",
"y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)",
"sns import matplotlib.pyplot as plt from sklearn.metrics import classification_report #EVERY",
"#RECAP on F1 x = np.arange(3) plt.bar(x-0.2, [31,26,32], width=0.2, color='b',",
"= dataset.drop(\"month\", axis=1) dataset = dataset.drop(\"duration\", axis=1) dataset = dataset.drop(\"campaign\",",
"print(cm) plt.hist(esito) # K-NEAREST NEIGHBOURS import numpy as np import",
"['NOT-sub', 'Subscribed'] print(classification_report(y_test, smote_pred,target_names=target_names)) #RECAP on RECALL x = np.arange(3)",
"X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state",
"plt.show() #CLASSIFIFICATION X = dataset.iloc[:, 0:7] y = dataset.iloc[:, 7]",
"plt.hist(esito) # K-NEAREST NEIGHBOURS import numpy as np import matplotlib.pyplot",
"inplace=True) dataset.head() dataset.dtypes dataset = dataset[dataset.job != 'unknown'] dataset =",
"0}, \"loan\": {\"yes\": 1, \"no\": 0}, \"y\": {\"yes\": 1, \"no\":",
"['Normal','Under','Smote']) plt.legend(loc='upper right') #RECAP on F1 x = np.arange(3) plt.bar(x-0.2,",
"pandas as pd import seaborn as sns import matplotlib.pyplot as",
"esito,target_names=target_names)) from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, esito) print(cm)",
"print(classification_report(y_test, esito,target_names=target_names)) #RANDOM FOREST y_train = downsampled.y X_train = downsampled.drop('y',",
"CLASSES not_sub = X[X.y==0] sub = X[X.y==1] not_sub_downsampled = resample(not_sub,",
"color='r', align='center', label='RF') plt.xticks(x-0.1, ['Normal','Under','Smote']) plt.legend(loc='upper right') #RECAP on F1",
"sklearn.neighbors import KNeighborsClassifier classifier = KNeighborsClassifier(n_neighbors = 5, metric =",
"#SMOTE - DECISION TREE from imblearn.over_sampling import SMOTE #SPLIT FEATURE",
"\"divorced\":-1}, \"education\": {\"primary\": 1, \"secondary\": 2, \"tertiary\": 3}, \"default\": {\"yes\":",
"prefix=[\"job\"]) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test =",
"y_train], axis=1) #SELECTING TARGET CLASSES not_sub = X[X.y==0] sub =",
"plt.legend(loc='upper right') #RECAP on F1 x = np.arange(3) plt.bar(x-0.2, [31,26,32],",
"downsampled = pd.concat([not_sub_downsampled, sub]) #DECISION TREE y_train = downsampled.y X_train",
"DATASET IS RETRIEVED FROM GITHUB input_file = 'https://raw.githubusercontent.com/lcphy/Digital-Innovation-Lab/master/bank-full.csv' dataset =",
"y_test = train_test_split(X, y, test_size=0.25) #DECISION TREE from sklearn import",
"= 0) # SCALING from sklearn.preprocessing import StandardScaler sc =",
"= train_test_split(X, y, test_size = 0.25, random_state = 0) X",
"DECISION TREE from imblearn.over_sampling import SMOTE #SPLIT FEATURE TARGET y",
"downsampled.y X_train = downsampled.drop('y', axis=1) clf_dt = RandomForestClassifier() clt_dt =",
"\"loan\": {\"yes\": 1, \"no\": 0}, \"y\": {\"yes\": 1, \"no\": 0}}",
"#SELECTING TARGET CLASSES not_sub = X[X.y==0] sub = X[X.y==1] not_sub_downsampled",
"= downsampled.y X_train = downsampled.drop('y', axis=1) clf_dt = DecisionTreeClassifier() clt_dt",
"= dataset_sample.y X = dataset_sample.drop('y', axis=1) #TRAIN TEST X_train, X_test,",
"= 0.25, random_state = 0) sm = SMOTE(random_state=27, ratio=1.0) X_train,",
"train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size =",
"p = 2) classifier.fit(X_train, y_train) # PREDICTION y_pred = classifier.predict(X_test)",
"plt.bar(x-0.2, [31,26,32], width=0.2, color='b', align='center', label='DT') plt.bar(x, [24,28,31], width=0.2, color='r',",
"SMOTE #SPLIT FEATURE TARGET y = dataset_sample.y X = dataset_sample.drop('y',",
"\"single\": 0, \"divorced\":-1}, \"education\": {\"primary\": 1, \"secondary\": 2, \"tertiary\": 3},",
"np import pandas as pd import seaborn as sns import",
"axis=1) dataset = dataset.drop(\"duration\", axis=1) dataset = dataset.drop(\"campaign\", axis=1) dataset",
"esito,target_names=target_names)) #RANDOM FOREST y_train = downsampled.y X_train = downsampled.drop('y', axis=1)",
"TREE from imblearn.over_sampling import SMOTE #SPLIT FEATURE TARGET y =",
"K-NEAREST NEIGHBOURS import numpy as np import matplotlib.pyplot as plt",
"y_pred = classifier.predict(X_test) # CONFUSION MATRIX from sklearn.metrics import confusion_matrix",
"y_train) clf_dt = DecisionTreeClassifier() #FIT smote = clf_dt.fit(X_train,y_train) #PREDICITON smote_pred",
"3}, \"default\": {\"yes\": 1, \"no\": 0}, \"housing\": {\"yes\": 1, \"no\":",
"\"secondary\": 2, \"tertiary\": 3}, \"default\": {\"yes\": 1, \"no\": 0}, \"housing\":",
"train_test_split(X, y, test_size=0.25) #DECISION TREE from sklearn import tree from",
"X[X.y==1] not_sub_downsampled = resample(not_sub, replace = False, n_samples = len(sub),",
"esito = clf_dt.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) #RANDOM",
"GITHUB input_file = 'https://raw.githubusercontent.com/lcphy/Digital-Innovation-Lab/master/bank-full.csv' dataset = pd.read_csv(input_file, sep=';', header =",
"TREE from sklearn import tree from sklearn.tree import DecisionTreeClassifier clf_dt",
"['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) #RANDOM FOREST y_train = downsampled.y X_train",
"= ['NOT-sub', 'Subscribed'] print(classification_report(y_test, y_pred,target_names=target_names)) print(cm) plt.hist(y_pred) #UNDERSAMPLING from sklearn.utils",
"sklearn.metrics import classification_report #EVERY TIME THE DATASET IS RETRIEVED FROM",
"confusion_matrix(y_test, y_pred) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, y_pred,target_names=target_names)) print(cm) plt.hist(y_pred)",
"sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size =",
"X_train = downsampled.drop('y', axis=1) clf_dt = RandomForestClassifier() clt_dt = clf_dt.fit(X_train,y_train)",
"dataset = dataset.drop(\"contact\", axis=1) dataset = dataset.drop(\"day\", axis=1) dataset =",
"train_test_split(X, y, test_size = 0.25, random_state = 0) sm =",
"esito) print(cm) plt.hist(esito) # K-NEAREST NEIGHBOURS import numpy as np",
"confusion_matrix cm = confusion_matrix(y_test, y_pred) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test,",
"seaborn as sns import matplotlib.pyplot as plt from sklearn.metrics import",
"print(classification_report(y_test, smote_pred,target_names=target_names)) #RECAP on RECALL x = np.arange(3) plt.bar(x-0.2, [31,65,37],",
"smote.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, smote_pred,target_names=target_names)) #RECAP on RECALL",
"esito,target_names=target_names)) #SMOTE - DECISION TREE from imblearn.over_sampling import SMOTE #SPLIT",
"\"housing\": {\"yes\": 1, \"no\": 0}, \"loan\": {\"yes\": 1, \"no\": 0},",
"sklearn.tree import DecisionTreeClassifier clf_dt = DecisionTreeClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito",
"pd import seaborn as sns import matplotlib.pyplot as plt from",
"X_test, y_train, y_test = train_test_split(X, y, test_size=0.25) #DECISION TREE from",
"= dataset_sample.drop('y', axis=1) #TRAIN TEST X_train, X_test, y_train, y_test =",
"CALLS DATA dataset = dataset.drop(\"contact\", axis=1) dataset = dataset.drop(\"day\", axis=1)",
"prefix=[\"job\"]) #SPLIT FEATURE AND TARGET y = dataset_sample.y X =",
"from sklearn.metrics import classification_report #EVERY TIME THE DATASET IS RETRIEVED",
"= confusion_matrix(y_test, esito) print(cm) plt.hist(esito) # K-NEAREST NEIGHBOURS import numpy",
"0) X = pd.concat([X_train, y_train], axis=1) #SELECTING TARGET CLASSES not_sub",
"dataset = dataset[dataset.education != 'unknown'] dataset['education'] = dataset['education'].astype(int) #COLLERATION MATRIX",
"n_samples = len(sub), random_state = 27) # COMBINE MINORITY AND",
"dataset.drop(\"poutcome\", axis=1) dataset.head() #FEATURE ENGINEERING cleanup_nums = {\"marital\": {\"married\": 1,",
"NEXT CALLS DATA dataset = dataset.drop(\"contact\", axis=1) dataset = dataset.drop(\"day\",",
"as pd import seaborn as sns import matplotlib.pyplot as plt",
"smote_pred = smote.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, smote_pred,target_names=target_names)) #RECAP",
"sklearn.ensemble import RandomForestClassifier clf_dt = RandomForestClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito",
"y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state =",
"dataset_sample.y X = dataset_sample.drop('y', axis=1) # setting up testing and",
"plt.bar(x, [18,61,32], width=0.2, color='r', align='center', label='RF') plt.xticks(x-0.1, ['Normal','Under','Smote']) plt.legend(loc='upper right')",
"= StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) # FITTING",
"import matplotlib.pyplot as plt from sklearn.metrics import classification_report #EVERY TIME",
"0.25, random_state = 0) sm = SMOTE(random_state=27, ratio=1.0) X_train, y_train",
"np.arange(3) plt.bar(x-0.2, [31,65,37], width=0.2, color='b', align='center', label='DT') plt.bar(x, [18,61,32], width=0.2,",
"= 0.25, random_state = 0) # SCALING from sklearn.preprocessing import",
"confusion_matrix(y_test, esito) print(cm) plt.hist(esito) # K-NEAREST NEIGHBOURS import numpy as",
"= SMOTE(random_state=27, ratio=1.0) X_train, y_train = sm.fit_sample(X_train, y_train) clf_dt =",
"tree from sklearn.tree import DecisionTreeClassifier clf_dt = DecisionTreeClassifier() clt_dt =",
"y = dataset_sample.y X = dataset_sample.drop('y', axis=1) # setting up",
"FEATURE TARGET y = dataset_sample.y X = dataset_sample.drop('y', axis=1) #TRAIN",
"= confusion_matrix(y_test, esito) print(cm) plt.hist(esito) #RANDOM FOREST from sklearn.ensemble import",
"dataset.iloc[:, 0:7] y = dataset.iloc[:, 7] X = pd.get_dummies(X, columns=[\"job\"],",
"= pd.concat([X_train, y_train], axis=1) #SELECTING TARGET CLASSES not_sub = X[X.y==0]",
"resample(not_sub, replace = False, n_samples = len(sub), random_state = 27)",
"print(classification_report(y_test, esito,target_names=target_names)) from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, esito)",
"as plt import pandas as pd # TRAINING - TEST",
"print(cm) plt.hist(y_pred) #UNDERSAMPLING from sklearn.utils import resample dataset_sample = pd.get_dummies(dataset,",
"up testing and training sets X_train, X_test, y_train, y_test =",
"# PREDICTION y_pred = classifier.predict(X_test) # CONFUSION MATRIX from sklearn.metrics",
"= 0) #SMOTE sm = SMOTE(random_state=27, ratio=1.0) X_train, y_train =",
"# K-NEAREST NEIGHBOURS import numpy as np import matplotlib.pyplot as",
"'https://raw.githubusercontent.com/lcphy/Digital-Innovation-Lab/master/bank-full.csv' dataset = pd.read_csv(input_file, sep=';', header = 0) dataset.head() #DELETE",
"cm = confusion_matrix(y_test, y_pred) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, y_pred,target_names=target_names))",
"= sc.fit_transform(X_train) X_test = sc.transform(X_test) # FITTING from sklearn.neighbors import",
"downsampled.drop('y', axis=1) clf_dt = RandomForestClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito =",
"len(sub), random_state = 27) # COMBINE MINORITY AND DOWNSAMPLED MAJORITY",
"= clf_dt.fit(X_train,y_train) smote_pred = smote.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test,",
"'Subscribed'] print(classification_report(y_test, y_pred,target_names=target_names)) print(cm) plt.hist(y_pred) #UNDERSAMPLING from sklearn.utils import resample",
"training sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size",
"= {\"marital\": {\"married\": 1, \"single\": 0, \"divorced\":-1}, \"education\": {\"primary\": 1,",
"dataset.head() dataset.dtypes dataset = dataset[dataset.job != 'unknown'] dataset = dataset[dataset.education",
"plt.hist(y_pred) #UNDERSAMPLING from sklearn.utils import resample dataset_sample = pd.get_dummies(dataset, columns=[\"job\"],",
"matplotlib.pyplot as plt from sklearn.metrics import classification_report #EVERY TIME THE",
"plt.figure(figsize=(12,10)) cor = dataset.corr() sns.heatmap(cor, annot=True, cmap=plt.cm.Reds) plt.show() #CLASSIFIFICATION X",
"X = pd.concat([X_train, y_train], axis=1) #SELECTING TARGET CLASSES not_sub =",
"import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test =",
"1, \"no\": 0}} dataset.replace(cleanup_nums, inplace=True) dataset.head() dataset.dtypes dataset = dataset[dataset.job",
"= train_test_split(X, y, test_size = 0.25, random_state = 0) #",
"y = dataset_sample.y X = dataset_sample.drop('y', axis=1) #TRAIN TEST X_train,",
"FOREST y_train = downsampled.y X_train = downsampled.drop('y', axis=1) clf_dt =",
"= sc.transform(X_test) # FITTING from sklearn.neighbors import KNeighborsClassifier classifier =",
"label='DT') plt.bar(x, [18,61,32], width=0.2, color='r', align='center', label='RF') plt.xticks(x-0.1, ['Normal','Under','Smote']) plt.legend(loc='upper",
"= dataset.iloc[:, 7] X = pd.get_dummies(X, columns=[\"job\"], prefix=[\"job\"]) from sklearn.model_selection",
"= downsampled.drop('y', axis=1) clf_dt = DecisionTreeClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito",
"axis=1) dataset = dataset.drop(\"poutcome\", axis=1) dataset.head() #FEATURE ENGINEERING cleanup_nums =",
"dataset.drop(\"day\", axis=1) dataset = dataset.drop(\"month\", axis=1) dataset = dataset.drop(\"duration\", axis=1)",
"= ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) #SMOTE - DECISION TREE from",
"'Subscribed'] print(classification_report(y_test, smote_pred,target_names=target_names)) #SMOTE - RANDOM FOREST from imblearn.over_sampling import",
"F1 x = np.arange(3) plt.bar(x-0.2, [31,26,32], width=0.2, color='b', align='center', label='DT')",
"y, test_size = 0.25, random_state = 0) sm = SMOTE(random_state=27,",
"X_train, y_train = sm.fit_sample(X_train, y_train) clf_dt = RandomForestClassifier() smote =",
"['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) from sklearn.metrics import confusion_matrix cm =",
"imblearn.over_sampling import SMOTE #SPLIT FEATURE TARGET y = dataset_sample.y X",
"dataset = dataset.drop(\"pdays\", axis=1) dataset = dataset.drop(\"previous\", axis=1) dataset =",
"SMOTE(random_state=27, ratio=1.0) X_train, y_train = sm.fit_sample(X_train, y_train) clf_dt = RandomForestClassifier()",
"2) classifier.fit(X_train, y_train) # PREDICTION y_pred = classifier.predict(X_test) # CONFUSION",
"= ['NOT-sub', 'Subscribed'] print(classification_report(y_test, smote_pred,target_names=target_names)) #SMOTE - RANDOM FOREST from",
"\"education\": {\"primary\": 1, \"secondary\": 2, \"tertiary\": 3}, \"default\": {\"yes\": 1,",
"import numpy as np import matplotlib.pyplot as plt import pandas",
"print(classification_report(y_test, y_pred,target_names=target_names)) print(cm) plt.hist(y_pred) #UNDERSAMPLING from sklearn.utils import resample dataset_sample",
"resample dataset_sample = pd.get_dummies(dataset, columns=[\"job\"], prefix=[\"job\"]) #SPLIT FEATURE AND TARGET",
"#DELETE NEXT CALLS DATA dataset = dataset.drop(\"contact\", axis=1) dataset =",
"y_train = sm.fit_sample(X_train, y_train) clf_dt = DecisionTreeClassifier() #FIT smote =",
"right') #RECAP on F1 x = np.arange(3) plt.bar(x-0.2, [31,26,32], width=0.2,",
"Analysis/classification.py import numpy as np import pandas as pd import",
"dataset.drop(\"campaign\", axis=1) dataset = dataset.drop(\"pdays\", axis=1) dataset = dataset.drop(\"previous\", axis=1)",
"'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test,",
"TIME THE DATASET IS RETRIEVED FROM GITHUB input_file = 'https://raw.githubusercontent.com/lcphy/Digital-Innovation-Lab/master/bank-full.csv'",
"# CONFUSION MATRIX from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test,",
"DecisionTreeClassifier clf_dt = DecisionTreeClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test)",
"= 27) # COMBINE MINORITY AND DOWNSAMPLED MAJORITY downsampled =",
"on RECALL x = np.arange(3) plt.bar(x-0.2, [31,65,37], width=0.2, color='b', align='center',",
"dataset = pd.read_csv(input_file, sep=';', header = 0) dataset.head() #DELETE NEXT",
"target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) from sklearn.metrics import confusion_matrix",
"smote_pred = smote.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, smote_pred,target_names=target_names)) #SMOTE",
"0}} dataset.replace(cleanup_nums, inplace=True) dataset.head() dataset.dtypes dataset = dataset[dataset.job != 'unknown']",
"from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train)",
"RANDOM FOREST from imblearn.over_sampling import SMOTE y = dataset_sample.y X",
"#DECISION TREE from sklearn import tree from sklearn.tree import DecisionTreeClassifier",
"y_pred,target_names=target_names)) print(cm) plt.hist(y_pred) #UNDERSAMPLING from sklearn.utils import resample dataset_sample =",
"= dataset['education'].astype(int) #COLLERATION MATRIX plt.figure(figsize=(12,10)) cor = dataset.corr() sns.heatmap(cor, annot=True,",
"dataset_sample.drop('y', axis=1) # setting up testing and training sets X_train,",
"#CLASSIFIFICATION X = dataset.iloc[:, 0:7] y = dataset.iloc[:, 7] X",
"from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) target_names =",
"smote = clf_dt.fit(X_train,y_train) #PREDICITON smote_pred = smote.predict(X_test) target_names = ['NOT-sub',",
"\"no\": 0}, \"y\": {\"yes\": 1, \"no\": 0}} dataset.replace(cleanup_nums, inplace=True) dataset.head()",
"RETRIEVED FROM GITHUB input_file = 'https://raw.githubusercontent.com/lcphy/Digital-Innovation-Lab/master/bank-full.csv' dataset = pd.read_csv(input_file, sep=';',",
"print(classification_report(y_test, smote_pred,target_names=target_names)) #SMOTE - RANDOM FOREST from imblearn.over_sampling import SMOTE",
"#RECAP on RECALL x = np.arange(3) plt.bar(x-0.2, [31,65,37], width=0.2, color='b',",
"= dataset_sample.y X = dataset_sample.drop('y', axis=1) # setting up testing",
"as pd # TRAINING - TEST from sklearn.model_selection import train_test_split",
"import numpy as np import pandas as pd import seaborn",
"7] X = pd.get_dummies(X, columns=[\"job\"], prefix=[\"job\"]) from sklearn.model_selection import train_test_split",
"# TRAINING - TEST from sklearn.model_selection import train_test_split X_train, X_test,",
"sm.fit_sample(X_train, y_train) clf_dt = DecisionTreeClassifier() #FIT smote = clf_dt.fit(X_train,y_train) #PREDICITON",
"sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) target_names = ['NOT-sub',",
"pd # TRAINING - TEST from sklearn.model_selection import train_test_split X_train,",
"plt from sklearn.metrics import classification_report #EVERY TIME THE DATASET IS",
"from imblearn.over_sampling import SMOTE #SPLIT FEATURE TARGET y = dataset_sample.y",
"DATA dataset = dataset.drop(\"contact\", axis=1) dataset = dataset.drop(\"day\", axis=1) dataset",
"axis=1) # setting up testing and training sets X_train, X_test,",
"#FIT smote = clf_dt.fit(X_train,y_train) #PREDICITON smote_pred = smote.predict(X_test) target_names =",
"from sklearn.ensemble import RandomForestClassifier clf_dt = RandomForestClassifier() clt_dt = clf_dt.fit(X_train,y_train)",
"0) # SCALING from sklearn.preprocessing import StandardScaler sc = StandardScaler()",
"clf_dt.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) #RANDOM FOREST y_train",
"dataset[dataset.job != 'unknown'] dataset = dataset[dataset.education != 'unknown'] dataset['education'] =",
"= DecisionTreeClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test) target_names =",
"= ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) from sklearn.metrics import confusion_matrix cm",
"X_train = downsampled.drop('y', axis=1) clf_dt = DecisionTreeClassifier() clt_dt = clf_dt.fit(X_train,y_train)",
"#SMOTE sm = SMOTE(random_state=27, ratio=1.0) X_train, y_train = sm.fit_sample(X_train, y_train)",
"= downsampled.drop('y', axis=1) clf_dt = RandomForestClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito",
"axis=1) dataset = dataset.drop(\"campaign\", axis=1) dataset = dataset.drop(\"pdays\", axis=1) dataset",
"#EVERY TIME THE DATASET IS RETRIEVED FROM GITHUB input_file =",
"sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y,",
"TARGET CLASSES not_sub = X[X.y==0] sub = X[X.y==1] not_sub_downsampled =",
"= sm.fit_sample(X_train, y_train) clf_dt = DecisionTreeClassifier() #FIT smote = clf_dt.fit(X_train,y_train)",
"replace = False, n_samples = len(sub), random_state = 27) #",
"# SCALING from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train",
"sub]) #DECISION TREE y_train = downsampled.y X_train = downsampled.drop('y', axis=1)",
"RECALL x = np.arange(3) plt.bar(x-0.2, [31,65,37], width=0.2, color='b', align='center', label='DT')",
"sns.heatmap(cor, annot=True, cmap=plt.cm.Reds) plt.show() #CLASSIFIFICATION X = dataset.iloc[:, 0:7] y",
"y_pred) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, y_pred,target_names=target_names)) print(cm) plt.hist(y_pred) #UNDERSAMPLING",
"0) #SMOTE sm = SMOTE(random_state=27, ratio=1.0) X_train, y_train = sm.fit_sample(X_train,",
"TRAINING - TEST from sklearn.model_selection import train_test_split X_train, X_test, y_train,",
"#FEATURE ENGINEERING cleanup_nums = {\"marital\": {\"married\": 1, \"single\": 0, \"divorced\":-1},",
"\"tertiary\": 3}, \"default\": {\"yes\": 1, \"no\": 0}, \"housing\": {\"yes\": 1,",
"= clf_dt.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) from sklearn.metrics",
"= 0) dataset.head() #DELETE NEXT CALLS DATA dataset = dataset.drop(\"contact\",",
"numpy as np import matplotlib.pyplot as plt import pandas as",
"dataset.replace(cleanup_nums, inplace=True) dataset.head() dataset.dtypes dataset = dataset[dataset.job != 'unknown'] dataset",
"'unknown'] dataset = dataset[dataset.education != 'unknown'] dataset['education'] = dataset['education'].astype(int) #COLLERATION",
"y, test_size=0.25) #DECISION TREE from sklearn import tree from sklearn.tree",
"clf_dt.fit(X_train,y_train) #PREDICITON smote_pred = smote.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test,",
"smote_pred,target_names=target_names)) #RECAP on RECALL x = np.arange(3) plt.bar(x-0.2, [31,65,37], width=0.2,",
"['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) #SMOTE - DECISION TREE from imblearn.over_sampling",
"dataset = dataset.drop(\"campaign\", axis=1) dataset = dataset.drop(\"pdays\", axis=1) dataset =",
"label='DT') plt.bar(x, [24,28,31], width=0.2, color='r', align='center', label='RF') plt.xticks(x-0.1, ['Normal','Under','Smote']) plt.legend(loc='lower",
"= 2) classifier.fit(X_train, y_train) # PREDICTION y_pred = classifier.predict(X_test) #",
"sc.transform(X_test) # FITTING from sklearn.neighbors import KNeighborsClassifier classifier = KNeighborsClassifier(n_neighbors",
"np.arange(3) plt.bar(x-0.2, [31,26,32], width=0.2, color='b', align='center', label='DT') plt.bar(x, [24,28,31], width=0.2,",
"= smote.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, smote_pred,target_names=target_names)) #SMOTE -",
"dataset.corr() sns.heatmap(cor, annot=True, cmap=plt.cm.Reds) plt.show() #CLASSIFIFICATION X = dataset.iloc[:, 0:7]",
"'minkowski', p = 2) classifier.fit(X_train, y_train) # PREDICTION y_pred =",
"as plt from sklearn.metrics import classification_report #EVERY TIME THE DATASET",
"as np import matplotlib.pyplot as plt import pandas as pd",
"MATRIX from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) target_names",
"from sklearn.utils import resample dataset_sample = pd.get_dummies(dataset, columns=[\"job\"], prefix=[\"job\"]) #SPLIT",
"classifier.predict(X_test) # CONFUSION MATRIX from sklearn.metrics import confusion_matrix cm =",
"[18,61,32], width=0.2, color='r', align='center', label='RF') plt.xticks(x-0.1, ['Normal','Under','Smote']) plt.legend(loc='upper right') #RECAP",
"downsampled.drop('y', axis=1) clf_dt = DecisionTreeClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito =",
"# COMBINE MINORITY AND DOWNSAMPLED MAJORITY downsampled = pd.concat([not_sub_downsampled, sub])",
"MATRIX plt.figure(figsize=(12,10)) cor = dataset.corr() sns.heatmap(cor, annot=True, cmap=plt.cm.Reds) plt.show() #CLASSIFIFICATION",
"= KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)",
"X[X.y==0] sub = X[X.y==1] not_sub_downsampled = resample(not_sub, replace = False,",
"y_train) clf_dt = RandomForestClassifier() smote = clf_dt.fit(X_train,y_train) smote_pred = smote.predict(X_test)",
"classification_report #EVERY TIME THE DATASET IS RETRIEVED FROM GITHUB input_file",
"clf_dt.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) from sklearn.metrics import",
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25) #DECISION TREE",
"\"default\": {\"yes\": 1, \"no\": 0}, \"housing\": {\"yes\": 1, \"no\": 0},",
"cm = confusion_matrix(y_test, esito) print(cm) plt.hist(esito) #RANDOM FOREST from sklearn.ensemble",
"confusion_matrix cm = confusion_matrix(y_test, esito) print(cm) plt.hist(esito) #RANDOM FOREST from",
"= 0.25, random_state = 0) #SMOTE sm = SMOTE(random_state=27, ratio=1.0)",
"['NOT-sub', 'Subscribed'] print(classification_report(y_test, smote_pred,target_names=target_names)) #SMOTE - RANDOM FOREST from imblearn.over_sampling",
"1, \"single\": 0, \"divorced\":-1}, \"education\": {\"primary\": 1, \"secondary\": 2, \"tertiary\":",
"label='RF') plt.xticks(x-0.1, ['Normal','Under','Smote']) plt.legend(loc='upper right') #RECAP on F1 x =",
"X = dataset_sample.drop('y', axis=1) #TRAIN TEST X_train, X_test, y_train, y_test",
"THE DATASET IS RETRIEVED FROM GITHUB input_file = 'https://raw.githubusercontent.com/lcphy/Digital-Innovation-Lab/master/bank-full.csv' dataset",
"from sklearn.tree import DecisionTreeClassifier clf_dt = DecisionTreeClassifier() clt_dt = clf_dt.fit(X_train,y_train)",
"FOREST from imblearn.over_sampling import SMOTE y = dataset_sample.y X =",
"testing and training sets X_train, X_test, y_train, y_test = train_test_split(X,",
"= 'minkowski', p = 2) classifier.fit(X_train, y_train) # PREDICTION y_pred",
"import seaborn as sns import matplotlib.pyplot as plt from sklearn.metrics",
"= dataset.drop(\"campaign\", axis=1) dataset = dataset.drop(\"pdays\", axis=1) dataset = dataset.drop(\"previous\",",
"dataset = dataset.drop(\"poutcome\", axis=1) dataset.head() #FEATURE ENGINEERING cleanup_nums = {\"marital\":",
"clt_dt = clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test) target_names = ['NOT-sub', 'Subscribed']",
"train_test_split(X, y, test_size = 0.25, random_state = 0) X =",
"\"y\": {\"yes\": 1, \"no\": 0}} dataset.replace(cleanup_nums, inplace=True) dataset.head() dataset.dtypes dataset",
"= np.arange(3) plt.bar(x-0.2, [31,26,32], width=0.2, color='b', align='center', label='DT') plt.bar(x, [24,28,31],",
"y, test_size = 0.25, random_state = 0) #SMOTE sm =",
"KNeighborsClassifier classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p",
"= False, n_samples = len(sub), random_state = 27) # COMBINE",
"x = np.arange(3) plt.bar(x-0.2, [31,26,32], width=0.2, color='b', align='center', label='DT') plt.bar(x,",
"DecisionTreeClassifier() #FIT smote = clf_dt.fit(X_train,y_train) #PREDICITON smote_pred = smote.predict(X_test) target_names",
"#PREDICITON smote_pred = smote.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, smote_pred,target_names=target_names))",
"{\"primary\": 1, \"secondary\": 2, \"tertiary\": 3}, \"default\": {\"yes\": 1, \"no\":",
"clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names))",
"FEATURE AND TARGET y = dataset_sample.y X = dataset_sample.drop('y', axis=1)",
"sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, esito) print(cm) plt.hist(esito) #",
"dataset = dataset.drop(\"day\", axis=1) dataset = dataset.drop(\"month\", axis=1) dataset =",
"= clf_dt.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) #SMOTE -",
"clf_dt = RandomForestClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test) target_names",
"axis=1) dataset = dataset.drop(\"pdays\", axis=1) dataset = dataset.drop(\"previous\", axis=1) dataset",
"dataset[dataset.education != 'unknown'] dataset['education'] = dataset['education'].astype(int) #COLLERATION MATRIX plt.figure(figsize=(12,10)) cor",
"FOREST from sklearn.ensemble import RandomForestClassifier clf_dt = RandomForestClassifier() clt_dt =",
"test_size = 0.25, random_state = 0) # SCALING from sklearn.preprocessing",
"ratio=1.0) X_train, y_train = sm.fit_sample(X_train, y_train) clf_dt = RandomForestClassifier() smote",
"= sm.fit_sample(X_train, y_train) clf_dt = RandomForestClassifier() smote = clf_dt.fit(X_train,y_train) smote_pred",
"= confusion_matrix(y_test, y_pred) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, y_pred,target_names=target_names)) print(cm)",
"target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, smote_pred,target_names=target_names)) #SMOTE - RANDOM FOREST",
"= pd.get_dummies(dataset, columns=[\"job\"], prefix=[\"job\"]) #SPLIT FEATURE AND TARGET y =",
"1, \"no\": 0}, \"housing\": {\"yes\": 1, \"no\": 0}, \"loan\": {\"yes\":",
"dataset.head() #DELETE NEXT CALLS DATA dataset = dataset.drop(\"contact\", axis=1) dataset",
"0, \"divorced\":-1}, \"education\": {\"primary\": 1, \"secondary\": 2, \"tertiary\": 3}, \"default\":",
"on F1 x = np.arange(3) plt.bar(x-0.2, [31,26,32], width=0.2, color='b', align='center',",
"1, \"no\": 0}, \"y\": {\"yes\": 1, \"no\": 0}} dataset.replace(cleanup_nums, inplace=True)",
"train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25) #DECISION",
"TREE y_train = downsampled.y X_train = downsampled.drop('y', axis=1) clf_dt =",
"{\"yes\": 1, \"no\": 0}, \"loan\": {\"yes\": 1, \"no\": 0}, \"y\":",
"#TRAIN TEST X_train, X_test, y_train, y_test = train_test_split(X, y, test_size",
"#SPLIT FEATURE TARGET y = dataset_sample.y X = dataset_sample.drop('y', axis=1)",
"train_test_split(X, y, test_size = 0.25, random_state = 0) #SMOTE sm",
"confusion_matrix(y_test, esito) print(cm) plt.hist(esito) #RANDOM FOREST from sklearn.ensemble import RandomForestClassifier",
"test_size = 0.25, random_state = 0) #SMOTE sm = SMOTE(random_state=27,",
"0) dataset.head() #DELETE NEXT CALLS DATA dataset = dataset.drop(\"contact\", axis=1)",
"metric = 'minkowski', p = 2) classifier.fit(X_train, y_train) # PREDICTION",
"dataset['education'].astype(int) #COLLERATION MATRIX plt.figure(figsize=(12,10)) cor = dataset.corr() sns.heatmap(cor, annot=True, cmap=plt.cm.Reds)",
"axis=1) clf_dt = DecisionTreeClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test)",
"{\"married\": 1, \"single\": 0, \"divorced\":-1}, \"education\": {\"primary\": 1, \"secondary\": 2,",
"axis=1) dataset.head() #FEATURE ENGINEERING cleanup_nums = {\"marital\": {\"married\": 1, \"single\":",
"0}, \"housing\": {\"yes\": 1, \"no\": 0}, \"loan\": {\"yes\": 1, \"no\":",
"import DecisionTreeClassifier clf_dt = DecisionTreeClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito =",
"#SMOTE - RANDOM FOREST from imblearn.over_sampling import SMOTE y =",
"FITTING from sklearn.neighbors import KNeighborsClassifier classifier = KNeighborsClassifier(n_neighbors = 5,",
"{\"yes\": 1, \"no\": 0}, \"housing\": {\"yes\": 1, \"no\": 0}, \"loan\":",
"smote.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, smote_pred,target_names=target_names)) #SMOTE - RANDOM",
"= dataset_sample.drop('y', axis=1) # setting up testing and training sets",
"CONFUSION MATRIX from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred)",
"= pd.get_dummies(X, columns=[\"job\"], prefix=[\"job\"]) from sklearn.model_selection import train_test_split X_train, X_test,",
"= dataset.drop(\"poutcome\", axis=1) dataset.head() #FEATURE ENGINEERING cleanup_nums = {\"marital\": {\"married\":",
"= 0) sm = SMOTE(random_state=27, ratio=1.0) X_train, y_train = sm.fit_sample(X_train,",
"import confusion_matrix cm = confusion_matrix(y_test, y_pred) target_names = ['NOT-sub', 'Subscribed']",
"cmap=plt.cm.Reds) plt.show() #CLASSIFIFICATION X = dataset.iloc[:, 0:7] y = dataset.iloc[:,",
"0:7] y = dataset.iloc[:, 7] X = pd.get_dummies(X, columns=[\"job\"], prefix=[\"job\"])",
"matplotlib.pyplot as plt import pandas as pd # TRAINING -",
"[31,65,37], width=0.2, color='b', align='center', label='DT') plt.bar(x, [18,61,32], width=0.2, color='r', align='center',",
"IS RETRIEVED FROM GITHUB input_file = 'https://raw.githubusercontent.com/lcphy/Digital-Innovation-Lab/master/bank-full.csv' dataset = pd.read_csv(input_file,",
"TARGET y = dataset_sample.y X = dataset_sample.drop('y', axis=1) #TRAIN TEST",
"= resample(not_sub, replace = False, n_samples = len(sub), random_state =",
"False, n_samples = len(sub), random_state = 27) # COMBINE MINORITY",
"input_file = 'https://raw.githubusercontent.com/lcphy/Digital-Innovation-Lab/master/bank-full.csv' dataset = pd.read_csv(input_file, sep=';', header = 0)",
"= DecisionTreeClassifier() #FIT smote = clf_dt.fit(X_train,y_train) #PREDICITON smote_pred = smote.predict(X_test)",
"X_test = sc.transform(X_test) # FITTING from sklearn.neighbors import KNeighborsClassifier classifier",
"= clf_dt.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) #RANDOM FOREST",
"= ['NOT-sub', 'Subscribed'] print(classification_report(y_test, smote_pred,target_names=target_names)) #RECAP on RECALL x =",
"COMBINE MINORITY AND DOWNSAMPLED MAJORITY downsampled = pd.concat([not_sub_downsampled, sub]) #DECISION",
"cleanup_nums = {\"marital\": {\"married\": 1, \"single\": 0, \"divorced\":-1}, \"education\": {\"primary\":",
"!= 'unknown'] dataset['education'] = dataset['education'].astype(int) #COLLERATION MATRIX plt.figure(figsize=(12,10)) cor =",
"sep=';', header = 0) dataset.head() #DELETE NEXT CALLS DATA dataset",
"TEST X_train, X_test, y_train, y_test = train_test_split(X, y, test_size =",
"= dataset.drop(\"duration\", axis=1) dataset = dataset.drop(\"campaign\", axis=1) dataset = dataset.drop(\"pdays\",",
"DOWNSAMPLED MAJORITY downsampled = pd.concat([not_sub_downsampled, sub]) #DECISION TREE y_train =",
"annot=True, cmap=plt.cm.Reds) plt.show() #CLASSIFIFICATION X = dataset.iloc[:, 0:7] y =",
"y_train = sm.fit_sample(X_train, y_train) clf_dt = RandomForestClassifier() smote = clf_dt.fit(X_train,y_train)",
"pd.concat([X_train, y_train], axis=1) #SELECTING TARGET CLASSES not_sub = X[X.y==0] sub",
"y, test_size = 0.25, random_state = 0) # SCALING from",
"= smote.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, smote_pred,target_names=target_names)) #RECAP on",
"np import matplotlib.pyplot as plt import pandas as pd #",
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25,",
"import SMOTE #SPLIT FEATURE TARGET y = dataset_sample.y X =",
"= RandomForestClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test) target_names =",
"dataset_sample.drop('y', axis=1) #TRAIN TEST X_train, X_test, y_train, y_test = train_test_split(X,",
"AND DOWNSAMPLED MAJORITY downsampled = pd.concat([not_sub_downsampled, sub]) #DECISION TREE y_train",
"RandomForestClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test) target_names = ['NOT-sub',",
"import matplotlib.pyplot as plt import pandas as pd # TRAINING",
"setting up testing and training sets X_train, X_test, y_train, y_test",
"test_size = 0.25, random_state = 0) X = pd.concat([X_train, y_train],",
"= len(sub), random_state = 27) # COMBINE MINORITY AND DOWNSAMPLED",
"random_state = 0) X = pd.concat([X_train, y_train], axis=1) #SELECTING TARGET",
"cor = dataset.corr() sns.heatmap(cor, annot=True, cmap=plt.cm.Reds) plt.show() #CLASSIFIFICATION X =",
"import KNeighborsClassifier classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski',",
"<filename>Data Analysis/classification.py import numpy as np import pandas as pd",
"X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) # FITTING from sklearn.neighbors",
"width=0.2, color='r', align='center', label='RF') plt.xticks(x-0.1, ['Normal','Under','Smote']) plt.legend(loc='upper right') #RECAP on",
"dataset_sample.y X = dataset_sample.drop('y', axis=1) #TRAIN TEST X_train, X_test, y_train,",
"width=0.2, color='b', align='center', label='DT') plt.bar(x, [24,28,31], width=0.2, color='r', align='center', label='RF')",
"'unknown'] dataset['education'] = dataset['education'].astype(int) #COLLERATION MATRIX plt.figure(figsize=(12,10)) cor = dataset.corr()",
"= dataset.drop(\"day\", axis=1) dataset = dataset.drop(\"month\", axis=1) dataset = dataset.drop(\"duration\",",
"downsampled.y X_train = downsampled.drop('y', axis=1) clf_dt = DecisionTreeClassifier() clt_dt =",
"= clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test) target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test,",
"= dataset.drop(\"previous\", axis=1) dataset = dataset.drop(\"poutcome\", axis=1) dataset.head() #FEATURE ENGINEERING",
"SMOTE y = dataset_sample.y X = dataset_sample.drop('y', axis=1) # setting",
"sm.fit_sample(X_train, y_train) clf_dt = RandomForestClassifier() smote = clf_dt.fit(X_train,y_train) smote_pred =",
"and training sets X_train, X_test, y_train, y_test = train_test_split(X, y,",
"= ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) #RANDOM FOREST y_train = downsampled.y",
"from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, esito) print(cm) plt.hist(esito)",
"'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) #SMOTE - DECISION TREE from imblearn.over_sampling import",
"27) # COMBINE MINORITY AND DOWNSAMPLED MAJORITY downsampled = pd.concat([not_sub_downsampled,",
"dataset.iloc[:, 7] X = pd.get_dummies(X, columns=[\"job\"], prefix=[\"job\"]) from sklearn.model_selection import",
"random_state = 0) #SMOTE sm = SMOTE(random_state=27, ratio=1.0) X_train, y_train",
"= X[X.y==0] sub = X[X.y==1] not_sub_downsampled = resample(not_sub, replace =",
"import confusion_matrix cm = confusion_matrix(y_test, esito) print(cm) plt.hist(esito) # K-NEAREST",
"StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) # FITTING from",
"as np import pandas as pd import seaborn as sns",
"axis=1) #TRAIN TEST X_train, X_test, y_train, y_test = train_test_split(X, y,",
"clf_dt = DecisionTreeClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test) target_names",
"sklearn import tree from sklearn.tree import DecisionTreeClassifier clf_dt = DecisionTreeClassifier()",
"print(classification_report(y_test, esito,target_names=target_names)) #SMOTE - DECISION TREE from imblearn.over_sampling import SMOTE",
"X_train, y_train = sm.fit_sample(X_train, y_train) clf_dt = DecisionTreeClassifier() #FIT smote",
"clf_dt = RandomForestClassifier() smote = clf_dt.fit(X_train,y_train) smote_pred = smote.predict(X_test) target_names",
"axis=1) dataset = dataset.drop(\"month\", axis=1) dataset = dataset.drop(\"duration\", axis=1) dataset",
"\"no\": 0}} dataset.replace(cleanup_nums, inplace=True) dataset.head() dataset.dtypes dataset = dataset[dataset.job !=",
"= dataset.corr() sns.heatmap(cor, annot=True, cmap=plt.cm.Reds) plt.show() #CLASSIFIFICATION X = dataset.iloc[:,",
"= X[X.y==1] not_sub_downsampled = resample(not_sub, replace = False, n_samples =",
"= np.arange(3) plt.bar(x-0.2, [31,65,37], width=0.2, color='b', align='center', label='DT') plt.bar(x, [18,61,32],",
"X = pd.get_dummies(X, columns=[\"job\"], prefix=[\"job\"]) from sklearn.model_selection import train_test_split X_train,",
"= clf_dt.fit(X_train,y_train) #PREDICITON smote_pred = smote.predict(X_test) target_names = ['NOT-sub', 'Subscribed']",
"dataset.dtypes dataset = dataset[dataset.job != 'unknown'] dataset = dataset[dataset.education !=",
"sklearn.utils import resample dataset_sample = pd.get_dummies(dataset, columns=[\"job\"], prefix=[\"job\"]) #SPLIT FEATURE",
"X = dataset_sample.drop('y', axis=1) # setting up testing and training",
"as sns import matplotlib.pyplot as plt from sklearn.metrics import classification_report",
"TEST from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test =",
"train_test_split(X, y, test_size = 0.25, random_state = 0) # SCALING",
"0.25, random_state = 0) # SCALING from sklearn.preprocessing import StandardScaler",
"FROM GITHUB input_file = 'https://raw.githubusercontent.com/lcphy/Digital-Innovation-Lab/master/bank-full.csv' dataset = pd.read_csv(input_file, sep=';', header",
"\"no\": 0}, \"housing\": {\"yes\": 1, \"no\": 0}, \"loan\": {\"yes\": 1,",
"SCALING from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train =",
"target_names = ['NOT-sub', 'Subscribed'] print(classification_report(y_test, esito,target_names=target_names)) #SMOTE - DECISION TREE",
"pandas as pd # TRAINING - TEST from sklearn.model_selection import",
"from imblearn.over_sampling import SMOTE y = dataset_sample.y X = dataset_sample.drop('y',",
"# FITTING from sklearn.neighbors import KNeighborsClassifier classifier = KNeighborsClassifier(n_neighbors =",
"= train_test_split(X, y, test_size = 0.25, random_state = 0) sm",
"esito) print(cm) plt.hist(esito) #RANDOM FOREST from sklearn.ensemble import RandomForestClassifier clf_dt",
"= dataset.drop(\"contact\", axis=1) dataset = dataset.drop(\"day\", axis=1) dataset = dataset.drop(\"month\",",
"2, \"tertiary\": 3}, \"default\": {\"yes\": 1, \"no\": 0}, \"housing\": {\"yes\":",
"from sklearn import tree from sklearn.tree import DecisionTreeClassifier clf_dt =",
"random_state = 0) # SCALING from sklearn.preprocessing import StandardScaler sc",
"import tree from sklearn.tree import DecisionTreeClassifier clf_dt = DecisionTreeClassifier() clt_dt",
"RandomForestClassifier clf_dt = RandomForestClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test)",
"plt.xticks(x-0.1, ['Normal','Under','Smote']) plt.legend(loc='upper right') #RECAP on F1 x = np.arange(3)",
"#DECISION TREE y_train = downsampled.y X_train = downsampled.drop('y', axis=1) clf_dt",
"header = 0) dataset.head() #DELETE NEXT CALLS DATA dataset =",
"from sklearn.neighbors import KNeighborsClassifier classifier = KNeighborsClassifier(n_neighbors = 5, metric",
"axis=1) clf_dt = RandomForestClassifier() clt_dt = clf_dt.fit(X_train,y_train) esito = clf_dt.predict(X_test)",
"clf_dt = DecisionTreeClassifier() #FIT smote = clf_dt.fit(X_train,y_train) #PREDICITON smote_pred =",
"x = np.arange(3) plt.bar(x-0.2, [31,65,37], width=0.2, color='b', align='center', label='DT') plt.bar(x,",
"sc.fit_transform(X_train) X_test = sc.transform(X_test) # FITTING from sklearn.neighbors import KNeighborsClassifier",
"import pandas as pd import seaborn as sns import matplotlib.pyplot",
"ENGINEERING cleanup_nums = {\"marital\": {\"married\": 1, \"single\": 0, \"divorced\":-1}, \"education\":",
"import classification_report #EVERY TIME THE DATASET IS RETRIEVED FROM GITHUB"
] |
[
"subnets = properties.get('subnets') self.assertIsNotNone(subnets, \"Missing subnets\") self.assertEqual(1, len(subnets), \"There should",
"2.0 (the \"License\"); # you may not use this file",
"[ { 'type': 'value', 'key': 'name', 'op': 'eq', 'value': RouteTableTest.route_table_name",
"only be one subnet\") subnet = subnets[0] self.assertIn(RouteTableTest.allowed_subnet_name, subnet.get('id'), \"Incorrect",
"2015-2018 Capital One Services, LLC # # Licensed under the",
"limitations under the License. from azure_common import BaseTest, arm_template class",
"resources): self.assertEqual(len(resources), 1, \"Only one route table should be found\")",
"'value': 'not-null' }, { 'type': 'value', 'key': 'length(properties.subnets)', 'op': 'eq',",
"permissions and # limitations under the License. from azure_common import",
"'key': 'properties.subnets[?ends_with(id, \\'{}\\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name) ), 'value': 'not-null' },",
"found\") properties = route_table.get('properties') self.assertIsNotNone(properties, \"Missing properties\") subnets = properties.get('subnets')",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"_subnet_id_suffix(subnet): return '{}/subnets/{}'.format(RouteTableTest.vnet_name, subnet) def test_route_table_schema_validate(self): with self.sign_out_patch(): p =",
"and # limitations under the License. from azure_common import BaseTest,",
"RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name) ), 'value': 'not-null' }, { 'type': 'value', 'key': 'length(properties.subnets)',",
"'value', 'key': 'length(properties.subnets)', 'op': 'eq', 'value': 1 } ] })",
"language governing permissions and # limitations under the License. from",
"\"Missing properties\") subnets = properties.get('subnets') self.assertIsNotNone(subnets, \"Missing subnets\") self.assertEqual(1, len(subnets),",
"| [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.disallowed_subnet_name) ), 'value': 'not-null' } ] }) resources",
"'properties.subnets[?ends_with(id, \\'{}\\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.disallowed_subnet_name) ), 'value': 'not-null' } ]",
"test_detect_route_only_routes_to_specific_subnets(self): p = self.load_policy({ 'name': 'test-detect-route-only-routes-to-specific-subnets', 'resource': 'azure.routetable', 'filters': [",
"= p.run() self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json') def test_detect_route_table_not_routing_to_incorrect_subnet(self): p = self.load_policy({ 'name':",
"use this file except in compliance with the License. #",
"{ 'type': 'value', 'key': 'properties.subnets[?ends_with(id, \\'{}\\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.disallowed_subnet_name) ),",
"}, validate=True) self.assertTrue(p) @arm_template('route-table-and-vnet.json') def test_find_route_table_by_name(self): p = self.load_policy({ 'name':",
"self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json') def test_detect_route_table_not_routing_to_incorrect_subnet(self): p = self.load_policy({ 'name': 'test-detect-route-table-not-routing-to-incorrect-subnet', 'resource':",
"'test-azure-route-table', 'resource': 'azure.routetable' }, validate=True) self.assertTrue(p) @arm_template('route-table-and-vnet.json') def test_find_route_table_by_name(self): p",
"'{}/subnets/{}'.format(RouteTableTest.vnet_name, subnet) def test_route_table_schema_validate(self): with self.sign_out_patch(): p = self.load_policy({ 'name':",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"\"Missing subnets\") self.assertEqual(1, len(subnets), \"There should only be one subnet\")",
"Copyright 2015-2018 Capital One Services, LLC # # Licensed under",
"License. # You may obtain a copy of the License",
"found\") route_table = resources[0] self.assertEqual(RouteTableTest.route_table_name, route_table.get('name'), \"The wrong route table",
"under the License is distributed on an \"AS IS\" BASIS,",
"with self.sign_out_patch(): p = self.load_policy({ 'name': 'test-azure-route-table', 'resource': 'azure.routetable' },",
"License for the specific language governing permissions and # limitations",
"disallowed_subnet_name = 'cctestsubnet2' @staticmethod def _subnet_id_suffix(subnet): return '{}/subnets/{}'.format(RouteTableTest.vnet_name, subnet) def",
"'type': 'value', 'key': 'name', 'op': 'eq', 'value': RouteTableTest.route_table_name }, {",
"}, { 'type': 'value', 'key': 'length(properties.subnets)', 'op': 'eq', 'value': 1",
"route_table = resources[0] self.assertEqual(RouteTableTest.route_table_name, route_table.get('name'), \"The wrong route table was",
"wrong route table was found\") properties = route_table.get('properties') self.assertIsNotNone(properties, \"Missing",
"'name': 'test-detect-route-table-not-routing-to-incorrect-subnet', 'resource': 'azure.routetable', 'filters': [ { 'type': 'value', 'key':",
"self.assertEqual(len(resources), 0, \"A route table is routing to a disallowed",
"was found\") properties = route_table.get('properties') self.assertIsNotNone(properties, \"Missing properties\") subnets =",
"resources[0] self.assertEqual(RouteTableTest.route_table_name, route_table.get('name'), \"The wrong route table was found\") properties",
"in compliance with the License. # You may obtain a",
"def test_find_route_table_by_name(self): p = self.load_policy({ 'name': 'test-find-route-table-by-name', 'resource': 'azure.routetable', 'filters':",
"software # distributed under the License is distributed on an",
"p.run() self._assert_only_route_table_in_resources(resources) def _assert_only_route_table_in_resources(self, resources): self.assertEqual(len(resources), 1, \"Only one route",
"'cctestroutetable' vnet_name = 'ccroutetablevnet' allowed_subnet_name = 'cctestsubnet1' disallowed_subnet_name = 'cctestsubnet2'",
"] }) resources = p.run() self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json') def test_detect_route_table_not_routing_to_incorrect_subnet(self): p",
"} ] }) resources = p.run() self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json') def test_detect_route_table_not_routing_to_incorrect_subnet(self):",
"_assert_only_route_table_in_resources(self, resources): self.assertEqual(len(resources), 1, \"Only one route table should be",
"One Services, LLC # # Licensed under the Apache License,",
"), 'value': 'not-null' }, { 'type': 'value', 'key': 'length(properties.subnets)', 'op':",
"= p.run() self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json') def test_detect_route_table_is_routing_to_correct_subnet(self): p = self.load_policy({ 'name':",
"] }) resources = p.run() self.assertEqual(len(resources), 0, \"A route table",
"\"There should only be one subnet\") subnet = subnets[0] self.assertIn(RouteTableTest.allowed_subnet_name,",
"[0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name) ), 'value': 'not-null' } ] }) resources =",
"[0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name) ), 'value': 'not-null' }, { 'type': 'value', 'key':",
"RouteTableTest(BaseTest): route_table_name = 'cctestroutetable' vnet_name = 'ccroutetablevnet' allowed_subnet_name = 'cctestsubnet1'",
"'op': 'eq', 'value': RouteTableTest.route_table_name } ] }) resources = p.run()",
"def test_route_table_schema_validate(self): with self.sign_out_patch(): p = self.load_policy({ 'name': 'test-azure-route-table', 'resource':",
"'value', 'key': 'name', 'op': 'eq', 'value': RouteTableTest.route_table_name }, { 'type':",
"'name': 'test-azure-route-table', 'resource': 'azure.routetable' }, validate=True) self.assertTrue(p) @arm_template('route-table-and-vnet.json') def test_find_route_table_by_name(self):",
"OF ANY KIND, either express or implied. # See the",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"ANY KIND, either express or implied. # See the License",
"See the License for the specific language governing permissions and",
"the License. # You may obtain a copy of the",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"for the specific language governing permissions and # limitations under",
"subnet\") @arm_template('route-table-and-vnet.json') def test_detect_route_only_routes_to_specific_subnets(self): p = self.load_policy({ 'name': 'test-detect-route-only-routes-to-specific-subnets', 'resource':",
"to in writing, software # distributed under the License is",
"p.run() self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json') def test_detect_route_table_not_routing_to_incorrect_subnet(self): p = self.load_policy({ 'name': 'test-detect-route-table-not-routing-to-incorrect-subnet',",
"# See the License for the specific language governing permissions",
"test_find_route_table_by_name(self): p = self.load_policy({ 'name': 'test-find-route-table-by-name', 'resource': 'azure.routetable', 'filters': [",
"}) resources = p.run() self._assert_only_route_table_in_resources(resources) def _assert_only_route_table_in_resources(self, resources): self.assertEqual(len(resources), 1,",
"return '{}/subnets/{}'.format(RouteTableTest.vnet_name, subnet) def test_route_table_schema_validate(self): with self.sign_out_patch(): p = self.load_policy({",
"or agreed to in writing, software # distributed under the",
"'name': 'test-detect-route-only-routes-to-specific-subnets', 'resource': 'azure.routetable', 'filters': [ { 'type': 'value', 'key':",
"required by applicable law or agreed to in writing, software",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"'key': 'name', 'op': 'eq', 'value': RouteTableTest.route_table_name } ] }) resources",
"with the License. # You may obtain a copy of",
"p = self.load_policy({ 'name': 'test-azure-route-table', 'resource': 'azure.routetable' }, validate=True) self.assertTrue(p)",
"'name', 'op': 'eq', 'value': RouteTableTest.route_table_name } ] }) resources =",
"self.load_policy({ 'name': 'test-detect-route-only-routes-to-specific-subnets', 'resource': 'azure.routetable', 'filters': [ { 'type': 'value',",
"), 'value': 'not-null' } ] }) resources = p.run() self.assertEqual(len(resources),",
"LLC # # Licensed under the Apache License, Version 2.0",
"resources = p.run() self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json') def test_detect_route_table_is_routing_to_correct_subnet(self): p = self.load_policy({",
"compliance with the License. # You may obtain a copy",
"agreed to in writing, software # distributed under the License",
"distributed under the License is distributed on an \"AS IS\"",
"vnet_name = 'ccroutetablevnet' allowed_subnet_name = 'cctestsubnet1' disallowed_subnet_name = 'cctestsubnet2' @staticmethod",
"self.assertIsNotNone(subnets, \"Missing subnets\") self.assertEqual(1, len(subnets), \"There should only be one",
"= 'cctestroutetable' vnet_name = 'ccroutetablevnet' allowed_subnet_name = 'cctestsubnet1' disallowed_subnet_name =",
"a disallowed subnet\") @arm_template('route-table-and-vnet.json') def test_detect_route_only_routes_to_specific_subnets(self): p = self.load_policy({ 'name':",
"express or implied. # See the License for the specific",
"'resource': 'azure.routetable' }, validate=True) self.assertTrue(p) @arm_template('route-table-and-vnet.json') def test_find_route_table_by_name(self): p =",
"from azure_common import BaseTest, arm_template class RouteTableTest(BaseTest): route_table_name = 'cctestroutetable'",
"except in compliance with the License. # You may obtain",
"Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"{ 'type': 'value', 'key': 'length(properties.subnets)', 'op': 'eq', 'value': 1 }",
"} ] }) resources = p.run() self.assertEqual(len(resources), 0, \"A route",
"not use this file except in compliance with the License.",
"p.run() self.assertEqual(len(resources), 0, \"A route table is routing to a",
"writing, software # distributed under the License is distributed on",
"you may not use this file except in compliance with",
"| [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name) ), 'value': 'not-null' } ] }) resources",
"# Licensed under the Apache License, Version 2.0 (the \"License\");",
"'ccroutetablevnet' allowed_subnet_name = 'cctestsubnet1' disallowed_subnet_name = 'cctestsubnet2' @staticmethod def _subnet_id_suffix(subnet):",
"'type': 'value', 'key': 'properties.subnets[?ends_with(id, \\'{}\\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.disallowed_subnet_name) ), 'value':",
"'not-null' }, { 'type': 'value', 'key': 'length(properties.subnets)', 'op': 'eq', 'value':",
"CONDITIONS OF ANY KIND, either express or implied. # See",
"'name': 'test-detect-route-table-is-routing-to-correct-subnet', 'resource': 'azure.routetable', 'filters': [ { 'type': 'value', 'key':",
"p = self.load_policy({ 'name': 'test-detect-route-table-not-routing-to-incorrect-subnet', 'resource': 'azure.routetable', 'filters': [ {",
"len(subnets), \"There should only be one subnet\") subnet = subnets[0]",
"= self.load_policy({ 'name': 'test-azure-route-table', 'resource': 'azure.routetable' }, validate=True) self.assertTrue(p) @arm_template('route-table-and-vnet.json')",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"# limitations under the License. from azure_common import BaseTest, arm_template",
"'op': 'eq', 'value': RouteTableTest.route_table_name }, { 'type': 'value', 'key': 'properties.subnets[?ends_with(id,",
"route table was found\") properties = route_table.get('properties') self.assertIsNotNone(properties, \"Missing properties\")",
"table was found\") properties = route_table.get('properties') self.assertIsNotNone(properties, \"Missing properties\") subnets",
"'test-detect-route-table-is-routing-to-correct-subnet', 'resource': 'azure.routetable', 'filters': [ { 'type': 'value', 'key': 'name',",
"'resource': 'azure.routetable', 'filters': [ { 'type': 'value', 'key': 'name', 'op':",
"\"A route table is routing to a disallowed subnet\") @arm_template('route-table-and-vnet.json')",
"route table should be found\") route_table = resources[0] self.assertEqual(RouteTableTest.route_table_name, route_table.get('name'),",
"1 } ] }) resources = p.run() self._assert_only_route_table_in_resources(resources) def _assert_only_route_table_in_resources(self,",
"self.load_policy({ 'name': 'test-find-route-table-by-name', 'resource': 'azure.routetable', 'filters': [ { 'type': 'value',",
"RouteTableTest._subnet_id_suffix(RouteTableTest.disallowed_subnet_name) ), 'value': 'not-null' } ] }) resources = p.run()",
"be found\") route_table = resources[0] self.assertEqual(RouteTableTest.route_table_name, route_table.get('name'), \"The wrong route",
"OR CONDITIONS OF ANY KIND, either express or implied. #",
"0, \"A route table is routing to a disallowed subnet\")",
"the License is distributed on an \"AS IS\" BASIS, #",
"def _subnet_id_suffix(subnet): return '{}/subnets/{}'.format(RouteTableTest.vnet_name, subnet) def test_route_table_schema_validate(self): with self.sign_out_patch(): p",
"table is routing to a disallowed subnet\") @arm_template('route-table-and-vnet.json') def test_detect_route_only_routes_to_specific_subnets(self):",
"'value': 1 } ] }) resources = p.run() self._assert_only_route_table_in_resources(resources) def",
"properties.get('subnets') self.assertIsNotNone(subnets, \"Missing subnets\") self.assertEqual(1, len(subnets), \"There should only be",
"= resources[0] self.assertEqual(RouteTableTest.route_table_name, route_table.get('name'), \"The wrong route table was found\")",
"p = self.load_policy({ 'name': 'test-find-route-table-by-name', 'resource': 'azure.routetable', 'filters': [ {",
"\"Only one route table should be found\") route_table = resources[0]",
"= p.run() self.assertEqual(len(resources), 0, \"A route table is routing to",
"test_detect_route_table_not_routing_to_incorrect_subnet(self): p = self.load_policy({ 'name': 'test-detect-route-table-not-routing-to-incorrect-subnet', 'resource': 'azure.routetable', 'filters': [",
"should be found\") route_table = resources[0] self.assertEqual(RouteTableTest.route_table_name, route_table.get('name'), \"The wrong",
"'eq', 'value': RouteTableTest.route_table_name } ] }) resources = p.run() self._assert_only_route_table_in_resources(resources)",
"law or agreed to in writing, software # distributed under",
"one route table should be found\") route_table = resources[0] self.assertEqual(RouteTableTest.route_table_name,",
"@staticmethod def _subnet_id_suffix(subnet): return '{}/subnets/{}'.format(RouteTableTest.vnet_name, subnet) def test_route_table_schema_validate(self): with self.sign_out_patch():",
"self.assertTrue(p) @arm_template('route-table-and-vnet.json') def test_find_route_table_by_name(self): p = self.load_policy({ 'name': 'test-find-route-table-by-name', 'resource':",
"test_detect_route_table_is_routing_to_correct_subnet(self): p = self.load_policy({ 'name': 'test-detect-route-table-is-routing-to-correct-subnet', 'resource': 'azure.routetable', 'filters': [",
"@arm_template('route-table-and-vnet.json') def test_detect_route_table_not_routing_to_incorrect_subnet(self): p = self.load_policy({ 'name': 'test-detect-route-table-not-routing-to-incorrect-subnet', 'resource': 'azure.routetable',",
"under the License. from azure_common import BaseTest, arm_template class RouteTableTest(BaseTest):",
"= p.run() self._assert_only_route_table_in_resources(resources) def _assert_only_route_table_in_resources(self, resources): self.assertEqual(len(resources), 1, \"Only one",
"self.assertIsNotNone(properties, \"Missing properties\") subnets = properties.get('subnets') self.assertIsNotNone(subnets, \"Missing subnets\") self.assertEqual(1,",
"may obtain a copy of the License at # #",
"route_table_name = 'cctestroutetable' vnet_name = 'ccroutetablevnet' allowed_subnet_name = 'cctestsubnet1' disallowed_subnet_name",
"'length(properties.subnets)', 'op': 'eq', 'value': 1 } ] }) resources =",
"'properties.subnets[?ends_with(id, \\'{}\\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name) ), 'value': 'not-null' } ]",
"= 'cctestsubnet1' disallowed_subnet_name = 'cctestsubnet2' @staticmethod def _subnet_id_suffix(subnet): return '{}/subnets/{}'.format(RouteTableTest.vnet_name,",
"self.assertEqual(1, len(subnets), \"There should only be one subnet\") subnet =",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"}) resources = p.run() self.assertEqual(len(resources), 0, \"A route table is",
"may not use this file except in compliance with the",
"'azure.routetable', 'filters': [ { 'type': 'value', 'key': 'name', 'op': 'eq',",
"= self.load_policy({ 'name': 'test-detect-route-table-is-routing-to-correct-subnet', 'resource': 'azure.routetable', 'filters': [ { 'type':",
"'value': 'not-null' } ] }) resources = p.run() self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json')",
"[0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.disallowed_subnet_name) ), 'value': 'not-null' } ] }) resources =",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"p = self.load_policy({ 'name': 'test-detect-route-table-is-routing-to-correct-subnet', 'resource': 'azure.routetable', 'filters': [ {",
"this file except in compliance with the License. # You",
"resources = p.run() self.assertEqual(len(resources), 0, \"A route table is routing",
"route table is routing to a disallowed subnet\") @arm_template('route-table-and-vnet.json') def",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"'type': 'value', 'key': 'name', 'op': 'eq', 'value': RouteTableTest.route_table_name } ]",
"# # Licensed under the Apache License, Version 2.0 (the",
"file except in compliance with the License. # You may",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"'value', 'key': 'properties.subnets[?ends_with(id, \\'{}\\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name) ), 'value': 'not-null'",
"Capital One Services, LLC # # Licensed under the Apache",
"'not-null' } ] }) resources = p.run() self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json') def",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"= 'ccroutetablevnet' allowed_subnet_name = 'cctestsubnet1' disallowed_subnet_name = 'cctestsubnet2' @staticmethod def",
"'not-null' } ] }) resources = p.run() self.assertEqual(len(resources), 0, \"A",
"'key': 'properties.subnets[?ends_with(id, \\'{}\\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name) ), 'value': 'not-null' }",
"'test-detect-route-table-not-routing-to-incorrect-subnet', 'resource': 'azure.routetable', 'filters': [ { 'type': 'value', 'key': 'name',",
"] }) resources = p.run() self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json') def test_detect_route_table_is_routing_to_correct_subnet(self): p",
"'type': 'value', 'key': 'length(properties.subnets)', 'op': 'eq', 'value': 1 } ]",
"'key': 'length(properties.subnets)', 'op': 'eq', 'value': 1 } ] }) resources",
"] }) resources = p.run() self._assert_only_route_table_in_resources(resources) def _assert_only_route_table_in_resources(self, resources): self.assertEqual(len(resources),",
"}) resources = p.run() self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json') def test_detect_route_table_not_routing_to_incorrect_subnet(self): p =",
"}, { 'type': 'value', 'key': 'properties.subnets[?ends_with(id, \\'{}\\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name)",
"properties\") subnets = properties.get('subnets') self.assertIsNotNone(subnets, \"Missing subnets\") self.assertEqual(1, len(subnets), \"There",
"@arm_template('route-table-and-vnet.json') def test_detect_route_table_is_routing_to_correct_subnet(self): p = self.load_policy({ 'name': 'test-detect-route-table-is-routing-to-correct-subnet', 'resource': 'azure.routetable',",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"import BaseTest, arm_template class RouteTableTest(BaseTest): route_table_name = 'cctestroutetable' vnet_name =",
"or implied. # See the License for the specific language",
"def _assert_only_route_table_in_resources(self, resources): self.assertEqual(len(resources), 1, \"Only one route table should",
"KIND, either express or implied. # See the License for",
"specific language governing permissions and # limitations under the License.",
"arm_template class RouteTableTest(BaseTest): route_table_name = 'cctestroutetable' vnet_name = 'ccroutetablevnet' allowed_subnet_name",
"subnet) def test_route_table_schema_validate(self): with self.sign_out_patch(): p = self.load_policy({ 'name': 'test-azure-route-table',",
"'value': 'not-null' } ] }) resources = p.run() self.assertEqual(len(resources), 0,",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"\\'{}\\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name) ), 'value': 'not-null' }, { 'type':",
"disallowed subnet\") @arm_template('route-table-and-vnet.json') def test_detect_route_only_routes_to_specific_subnets(self): p = self.load_policy({ 'name': 'test-detect-route-only-routes-to-specific-subnets',",
"self.load_policy({ 'name': 'test-azure-route-table', 'resource': 'azure.routetable' }, validate=True) self.assertTrue(p) @arm_template('route-table-and-vnet.json') def",
"self.load_policy({ 'name': 'test-detect-route-table-is-routing-to-correct-subnet', 'resource': 'azure.routetable', 'filters': [ { 'type': 'value',",
"@arm_template('route-table-and-vnet.json') def test_detect_route_only_routes_to_specific_subnets(self): p = self.load_policy({ 'name': 'test-detect-route-only-routes-to-specific-subnets', 'resource': 'azure.routetable',",
"= properties.get('subnets') self.assertIsNotNone(subnets, \"Missing subnets\") self.assertEqual(1, len(subnets), \"There should only",
"(the \"License\"); # you may not use this file except",
"resources = p.run() self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json') def test_detect_route_table_not_routing_to_incorrect_subnet(self): p = self.load_policy({",
"Services, LLC # # Licensed under the Apache License, Version",
"# you may not use this file except in compliance",
"'properties.subnets[?ends_with(id, \\'{}\\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name) ), 'value': 'not-null' }, {",
"} ] }) resources = p.run() self._assert_only_route_table_in_resources(resources) def _assert_only_route_table_in_resources(self, resources):",
"= route_table.get('properties') self.assertIsNotNone(properties, \"Missing properties\") subnets = properties.get('subnets') self.assertIsNotNone(subnets, \"Missing",
"class RouteTableTest(BaseTest): route_table_name = 'cctestroutetable' vnet_name = 'ccroutetablevnet' allowed_subnet_name =",
"self.sign_out_patch(): p = self.load_policy({ 'name': 'test-azure-route-table', 'resource': 'azure.routetable' }, validate=True)",
"'value', 'key': 'properties.subnets[?ends_with(id, \\'{}\\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.disallowed_subnet_name) ), 'value': 'not-null'",
"}) resources = p.run() self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json') def test_detect_route_table_is_routing_to_correct_subnet(self): p =",
"# # Unless required by applicable law or agreed to",
"table should be found\") route_table = resources[0] self.assertEqual(RouteTableTest.route_table_name, route_table.get('name'), \"The",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"Version 2.0 (the \"License\"); # you may not use this",
"'value': RouteTableTest.route_table_name } ] }) resources = p.run() self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json')",
"'filters': [ { 'type': 'value', 'key': 'name', 'op': 'eq', 'value':",
"# Copyright 2015-2018 Capital One Services, LLC # # Licensed",
"implied. # See the License for the specific language governing",
"under the Apache License, Version 2.0 (the \"License\"); # you",
"be one subnet\") subnet = subnets[0] self.assertIn(RouteTableTest.allowed_subnet_name, subnet.get('id'), \"Incorrect subnet\")",
"the License. from azure_common import BaseTest, arm_template class RouteTableTest(BaseTest): route_table_name",
"BaseTest, arm_template class RouteTableTest(BaseTest): route_table_name = 'cctestroutetable' vnet_name = 'ccroutetablevnet'",
"by applicable law or agreed to in writing, software #",
"= self.load_policy({ 'name': 'test-find-route-table-by-name', 'resource': 'azure.routetable', 'filters': [ { 'type':",
"self.load_policy({ 'name': 'test-detect-route-table-not-routing-to-incorrect-subnet', 'resource': 'azure.routetable', 'filters': [ { 'type': 'value',",
"to a disallowed subnet\") @arm_template('route-table-and-vnet.json') def test_detect_route_only_routes_to_specific_subnets(self): p = self.load_policy({",
"1, \"Only one route table should be found\") route_table =",
"def test_detect_route_table_is_routing_to_correct_subnet(self): p = self.load_policy({ 'name': 'test-detect-route-table-is-routing-to-correct-subnet', 'resource': 'azure.routetable', 'filters':",
"def test_detect_route_table_not_routing_to_incorrect_subnet(self): p = self.load_policy({ 'name': 'test-detect-route-table-not-routing-to-incorrect-subnet', 'resource': 'azure.routetable', 'filters':",
"allowed_subnet_name = 'cctestsubnet1' disallowed_subnet_name = 'cctestsubnet2' @staticmethod def _subnet_id_suffix(subnet): return",
"self._assert_only_route_table_in_resources(resources) def _assert_only_route_table_in_resources(self, resources): self.assertEqual(len(resources), 1, \"Only one route table",
"p = self.load_policy({ 'name': 'test-detect-route-only-routes-to-specific-subnets', 'resource': 'azure.routetable', 'filters': [ {",
"self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json') def test_detect_route_table_is_routing_to_correct_subnet(self): p = self.load_policy({ 'name': 'test-detect-route-table-is-routing-to-correct-subnet', 'resource':",
"'test-find-route-table-by-name', 'resource': 'azure.routetable', 'filters': [ { 'type': 'value', 'key': 'name',",
"self.assertEqual(len(resources), 1, \"Only one route table should be found\") route_table",
"should only be one subnet\") subnet = subnets[0] self.assertIn(RouteTableTest.allowed_subnet_name, subnet.get('id'),",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"'name', 'op': 'eq', 'value': RouteTableTest.route_table_name }, { 'type': 'value', 'key':",
"azure_common import BaseTest, arm_template class RouteTableTest(BaseTest): route_table_name = 'cctestroutetable' vnet_name",
"Unless required by applicable law or agreed to in writing,",
"@arm_template('route-table-and-vnet.json') def test_find_route_table_by_name(self): p = self.load_policy({ 'name': 'test-find-route-table-by-name', 'resource': 'azure.routetable',",
"resources = p.run() self._assert_only_route_table_in_resources(resources) def _assert_only_route_table_in_resources(self, resources): self.assertEqual(len(resources), 1, \"Only",
"'value': RouteTableTest.route_table_name }, { 'type': 'value', 'key': 'properties.subnets[?ends_with(id, \\'{}\\')] |",
"the specific language governing permissions and # limitations under the",
"'test-detect-route-only-routes-to-specific-subnets', 'resource': 'azure.routetable', 'filters': [ { 'type': 'value', 'key': 'name',",
"'azure.routetable' }, validate=True) self.assertTrue(p) @arm_template('route-table-and-vnet.json') def test_find_route_table_by_name(self): p = self.load_policy({",
"applicable law or agreed to in writing, software # distributed",
"{ 'type': 'value', 'key': 'name', 'op': 'eq', 'value': RouteTableTest.route_table_name },",
"'eq', 'value': RouteTableTest.route_table_name }, { 'type': 'value', 'key': 'properties.subnets[?ends_with(id, \\'{}\\')]",
"routing to a disallowed subnet\") @arm_template('route-table-and-vnet.json') def test_detect_route_only_routes_to_specific_subnets(self): p =",
"License. from azure_common import BaseTest, arm_template class RouteTableTest(BaseTest): route_table_name =",
"RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name) ), 'value': 'not-null' } ] }) resources = p.run()",
"in writing, software # distributed under the License is distributed",
"'key': 'properties.subnets[?ends_with(id, \\'{}\\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.disallowed_subnet_name) ), 'value': 'not-null' }",
"RouteTableTest.route_table_name } ] }) resources = p.run() self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json') def",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"License, Version 2.0 (the \"License\"); # you may not use",
"'type': 'value', 'key': 'properties.subnets[?ends_with(id, \\'{}\\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name) ), 'value':",
"# You may obtain a copy of the License at",
"} ] }) resources = p.run() self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json') def test_detect_route_table_is_routing_to_correct_subnet(self):",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"test_route_table_schema_validate(self): with self.sign_out_patch(): p = self.load_policy({ 'name': 'test-azure-route-table', 'resource': 'azure.routetable'",
"= 'cctestsubnet2' @staticmethod def _subnet_id_suffix(subnet): return '{}/subnets/{}'.format(RouteTableTest.vnet_name, subnet) def test_route_table_schema_validate(self):",
"}, { 'type': 'value', 'key': 'properties.subnets[?ends_with(id, \\'{}\\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.disallowed_subnet_name)",
"the License for the specific language governing permissions and #",
"route_table.get('name'), \"The wrong route table was found\") properties = route_table.get('properties')",
"def test_detect_route_only_routes_to_specific_subnets(self): p = self.load_policy({ 'name': 'test-detect-route-only-routes-to-specific-subnets', 'resource': 'azure.routetable', 'filters':",
"Apache License, Version 2.0 (the \"License\"); # you may not",
"either express or implied. # See the License for the",
"{ 'type': 'value', 'key': 'name', 'op': 'eq', 'value': RouteTableTest.route_table_name }",
"), 'value': 'not-null' } ] }) resources = p.run() self._assert_only_route_table_in_resources(resources)",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"subnets\") self.assertEqual(1, len(subnets), \"There should only be one subnet\") subnet",
"\\'{}\\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.disallowed_subnet_name) ), 'value': 'not-null' } ] })",
"governing permissions and # limitations under the License. from azure_common",
"= self.load_policy({ 'name': 'test-detect-route-table-not-routing-to-incorrect-subnet', 'resource': 'azure.routetable', 'filters': [ { 'type':",
"p.run() self._assert_only_route_table_in_resources(resources) @arm_template('route-table-and-vnet.json') def test_detect_route_table_is_routing_to_correct_subnet(self): p = self.load_policy({ 'name': 'test-detect-route-table-is-routing-to-correct-subnet',",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"RouteTableTest.route_table_name }, { 'type': 'value', 'key': 'properties.subnets[?ends_with(id, \\'{}\\')] | [0]'.format(",
"{ 'type': 'value', 'key': 'properties.subnets[?ends_with(id, \\'{}\\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name) ),",
"| [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name) ), 'value': 'not-null' }, { 'type': 'value',",
"'cctestsubnet2' @staticmethod def _subnet_id_suffix(subnet): return '{}/subnets/{}'.format(RouteTableTest.vnet_name, subnet) def test_route_table_schema_validate(self): with",
"route_table.get('properties') self.assertIsNotNone(properties, \"Missing properties\") subnets = properties.get('subnets') self.assertIsNotNone(subnets, \"Missing subnets\")",
"\"License\"); # you may not use this file except in",
"= self.load_policy({ 'name': 'test-detect-route-only-routes-to-specific-subnets', 'resource': 'azure.routetable', 'filters': [ { 'type':",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"# distributed under the License is distributed on an \"AS",
"self.assertEqual(RouteTableTest.route_table_name, route_table.get('name'), \"The wrong route table was found\") properties =",
"# Unless required by applicable law or agreed to in",
"\"The wrong route table was found\") properties = route_table.get('properties') self.assertIsNotNone(properties,",
"validate=True) self.assertTrue(p) @arm_template('route-table-and-vnet.json') def test_find_route_table_by_name(self): p = self.load_policy({ 'name': 'test-find-route-table-by-name',",
"properties = route_table.get('properties') self.assertIsNotNone(properties, \"Missing properties\") subnets = properties.get('subnets') self.assertIsNotNone(subnets,",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"'value', 'key': 'name', 'op': 'eq', 'value': RouteTableTest.route_table_name } ] })",
"'eq', 'value': 1 } ] }) resources = p.run() self._assert_only_route_table_in_resources(resources)",
"You may obtain a copy of the License at #",
"'name': 'test-find-route-table-by-name', 'resource': 'azure.routetable', 'filters': [ { 'type': 'value', 'key':",
"'key': 'name', 'op': 'eq', 'value': RouteTableTest.route_table_name }, { 'type': 'value',",
"\\'{}\\')] | [0]'.format( RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name) ), 'value': 'not-null' } ] })",
"'op': 'eq', 'value': 1 } ] }) resources = p.run()",
"'cctestsubnet1' disallowed_subnet_name = 'cctestsubnet2' @staticmethod def _subnet_id_suffix(subnet): return '{}/subnets/{}'.format(RouteTableTest.vnet_name, subnet)",
"the Apache License, Version 2.0 (the \"License\"); # you may",
"is routing to a disallowed subnet\") @arm_template('route-table-and-vnet.json') def test_detect_route_only_routes_to_specific_subnets(self): p"
] |
[
"& DEPENDENCIES # ################################################################################ import hashlib import logging import json",
"# ################################################################################ import hashlib import logging import json from collections",
"payload context (State): The current state of the ledger Returns:",
"uri_list) elif action == \"AddArtifact\" or action == \"AddURI\": if",
"# LIBRARIES & DEPENDENCIES # ################################################################################ import hashlib import logging",
"of the artifact artifact_checksum (str): The checksum of the artifact",
"(default []) uri_list (list of dict): The list of the",
"= payload[\"cur_block\"] timestamp = payload[\"timestamp\"] artifact_list = payload[\"artifact_list\"] uri_list =",
"\"\"\" type: list of str Returns the family version of",
"State.data failed \"\"\" # Parsing required fields from transaction payload",
"2.0 (the \"License\"); # you may not use this file",
"the handler object. \"\"\" return [\"1.0\"] @property def encodings(self): \"\"\"",
"namespace_prefix): \"\"\" Constructs the ArtifactTransactionHandler object. Args: namespace_prefix (str): The",
"ArtifactTransactionHandler: \"\"\" Class for handling the Transaction Family : Artifact",
"prefix. \"\"\" return namespace_prefix + \\ hashlib.sha512(artifact_id.encode(\"utf-8\")).hexdigest()[:64] def _display(msg): \"\"\"",
"If \"Add...\" were called on non-existing uuid * If invalid",
"The UTC time for when the transaction was submitted artifact_list",
"payload[\"prev_block\"] cur = payload[\"cur_block\"] timestamp = payload[\"timestamp\"] artifact_list = payload[\"artifact_list\"]",
"the artifact (default []) uri_list (list of dict): The list",
"artifact = create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev,",
"of the artifact artifact_type (str): The type of the artifact",
"= len(msg) msg = [msg] LOGGER.debug(\"+\" + (length + 2)",
"* If deserialization of State.data failed \"\"\" # Parsing required",
"uuid * If invalid operation was called InternalError: * If",
"+ 2) * \"-\" + \"+\") ################################################################################ # # ################################################################################",
"json from collections import OrderedDict from sawtooth_sdk.processor.exceptions import InvalidTransaction from",
"If deserialization of State.data failed \"\"\" # Parsing required fields",
"\"\"\" return { \"uuid\" : artifact_id, \"alias\" : artifact_alias, \"name\"",
"the new data into the state storage. Args: namespace_prefix (str):",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"the transaction (default \"0\") cur (str): the current block id",
"} def validate_transaction(artifact_id, action): \"\"\" Performs soft sanity check in",
": artifact_name, \"content_type\" : artifact_type, \"checksum\" : artifact_checksum, \"label\" :",
"type: list of str Returns the encoding scheme used for",
"Returns the namespaces associating with the handler object. \"\"\" return",
"payload serialization\") # Soft sanity check and loading required data",
"artifact action (str): The command to be performed Raises: InvalidTransaction:",
"elif action == \"create\": artifact = create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type,",
"is returned to be stored on the state storage. Raises:",
"family \"\"\" self._namespace_prefix = namespace_prefix @property def family_name(self): \"\"\" type:",
"the ledger, which includes the data from the transaction, is",
"data validate_transaction(artifact_id, action) data_address = make_artifact_address(self._namespace_prefix, artifact_id) state_entries = context.get_state([data_address])",
"artifact_name, \"content_type\" : artifact_type, \"checksum\" : artifact_checksum, \"label\" : artifact_label,",
"\"\"\" return namespace_prefix + \\ hashlib.sha512(artifact_id.encode(\"utf-8\")).hexdigest()[:64] def _display(msg): \"\"\" Logs",
"object. \"\"\" return \"artifact\" @property def family_versions(self): \"\"\" type: list",
"try: payload = json.loads(transaction.payload.decode()) artifact_id = payload[\"uuid\"] artifact_alias = payload[\"alias\"]",
"namespace_prefix + \\ hashlib.sha512(artifact_id.encode(\"utf-8\")).hexdigest()[:64] def _display(msg): \"\"\" Logs the message",
"for handling the Transaction Family : Artifact Attributes: namespace_prefix (str):",
"list of str Returns the family version of the handler",
"\"prev_block\" : prev, \"cur_block\" : cur, \"timestamp\" : timestamp, \"artifact_list\"",
"uri_list=[]): \"\"\" Constructs the payload to be stored in the",
"ID is required\") if not action: raise InvalidTransaction(\"Action is required\")",
"prefix associating with the transaction family artifact_id (str): The uuid",
"The namespace prefix of the transaction family \"\"\" def __init__(self,",
"length = len(msg) msg = [msg] LOGGER.debug(\"+\" + (length +",
"The message that is to be logged into the debug",
"artifact_alias (str): The alias of the artifact artifact_name (str): The",
"use this file except in compliance with the License. #",
"the state storage. Args: artifact_uuid (str): The uuid of the",
"ArtifactTransactionHandler object. Args: namespace_prefix (str): The namepsace prefix of the",
"state of the ledger, which includes the data from the",
"InternalError: * If deserialization of State.data failed \"\"\" # Parsing",
"artifact_id = payload[\"uuid\"] artifact_alias = payload[\"alias\"] artifact_name = payload[\"name\"] artifact_type",
"\" +\") LOGGER.debug(\"+\" + (length + 2) * \"-\" +",
"prev = payload[\"prev_block\"] cur = payload[\"cur_block\"] timestamp = payload[\"timestamp\"] artifact_list",
"Corporation # Copyright 2017 Wind River # Licensed under the",
"Transaction Family : Artifact Attributes: namespace_prefix (str): The namespace prefix",
"The alias of the artifact artifact_name (str): The name of",
"not in (\"AddArtifact\", \"create\", \"AddURI\", \"amend\"): raise InvalidTransaction(\"Invalid action: {}\".format(action))",
"InvalidTransaction: * If deserialization for payload from transaction failed *",
"associated with the artifact (default []) uri_list (list of dict):",
"msg = [msg] LOGGER.debug(\"+\" + (length + 2) * \"-\"",
"collections import OrderedDict from sawtooth_sdk.processor.exceptions import InvalidTransaction from sawtooth_sdk.processor.exceptions import",
"payload[\"artifact_list\"] uri_list = payload[\"uri_list\"] except ValueError: raise InvalidTransaction(\"Invalid payload serialization\")",
"the artifact artifact_type (str): The type of the artifact artifact_checksum",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"payload[\"action\"] prev = payload[\"prev_block\"] cur = payload[\"cur_block\"] timestamp = payload[\"timestamp\"]",
"to deserialize data.\") else: stored_artifact_id = stored_artifact = None if",
"(list of dict): The list of the artifact uuid associated",
"not action: raise InvalidTransaction(\"Action is required\") if action not in",
"in msg: LOGGER.debug(\"+ \" + line.center(length) + \" +\") LOGGER.debug(\"+\"",
"* If \"Add...\" were called on non-existing uuid * If",
"License. # You may obtain a copy of the License",
"= msg.count(\"\\n\") if n > 0: msg = msg.split(\"\\n\") length",
"for payload from transaction failed * If \"create\" was called",
"not a valid action. \"\"\" if not artifact_id: raise InvalidTransaction(\"Artifact",
"Returns the family version of the handler object. \"\"\" return",
"called on non-existing uuid * If invalid operation was called",
"\"name\" : artifact_name, \"content_type\" : artifact_type, \"checksum\" : artifact_checksum, \"label\"",
"artifact_openchain, \"prev_block\" : prev, \"cur_block\" : cur, \"timestamp\" : timestamp,",
"Args: artifact_id (str): The uuid of the artifact action (str):",
"the artifact already exists in the state storage; or, used",
"dict The dictionary pertaining all the param is created and",
"under the License is distributed on an \"AS IS\" BASIS,",
"License for the specific language governing permissions and # limitations",
"n = msg.count(\"\\n\") if n > 0: msg = msg.split(\"\\n\")",
"\"create\": artifact = create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain,",
"of the uri associated with the artifact (default []) Returns:",
"block id of the transaction timestamp (str): The UTC time",
"== \"create\": artifact = create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label,",
"hashlib import logging import json from collections import OrderedDict from",
"the state storage. \"\"\" return { \"uuid\" : artifact_id, \"alias\"",
"to recover the associated UUID if the artifact already exists",
"order to improve runtime by eliminating the obvious exception errors.",
"fields from transaction payload try: payload = json.loads(transaction.payload.decode()) artifact_id =",
"Args: namespace_prefix (str): The prefix associating with the transaction family",
"artifact_label = payload[\"label\"] artifact_openchain = payload[\"openchain\"] action = payload[\"action\"] prev",
"= create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur,",
"* If \"amend\" was called on non-existing uuid * If",
"+ line.center(length) + \" +\") LOGGER.debug(\"+\" + (length + 2)",
"OrderedDict from sawtooth_sdk.processor.exceptions import InvalidTransaction from sawtooth_sdk.processor.exceptions import InternalError from",
"data = json.dumps(artifact).encode() addresses = context.set_state({data_address:data}) return addresses ################################################################################ #",
"artifact_type, \"checksum\" : artifact_checksum, \"label\" : artifact_label, \"openchain\" : artifact_openchain,",
"0: msg = msg.split(\"\\n\") length = max(len(line) for line in",
"called InternalError: * If deserialization of State.data failed \"\"\" #",
"artifact_openchain, prev, cur, timestamp, artifact_list=[], uri_list=[]): \"\"\" Constructs the payload",
"FUNCTIONS # ################################################################################ def apply(self, transaction, context): \"\"\" Applys the",
"uuid and the namespace prefix. \"\"\" return namespace_prefix + \\",
"artifact_id (str): The uuid of the artifact action (str): The",
"Attributes: namespace_prefix (str): The namespace prefix of the transaction family",
"\"\"\" type: list of str Returns the encoding scheme used",
"valid action. \"\"\" if not artifact_id: raise InvalidTransaction(\"Artifact ID is",
"= payload[\"alias\"] artifact_name = payload[\"name\"] artifact_type = payload[\"content_type\"] artifact_checksum =",
"not passed in or the action is not a valid",
"n > 0: msg = msg.split(\"\\n\") length = max(len(line) for",
"in compliance with the License. # You may obtain a",
"payload from transaction onto the state storage. Args: transaction (Transaction):",
"# Soft sanity check and loading required data validate_transaction(artifact_id, action)",
"cur, timestamp, artifact_list, uri_list) # Adding the final payload to",
"software # distributed under the License is distributed on an",
"action (str): The command to be performed Raises: InvalidTransaction: If",
"encodings(self): \"\"\" type: list of str Returns the encoding scheme",
"action are not passed in or the action is not",
"is required\") if not action: raise InvalidTransaction(\"Action is required\") if",
"cur, \"timestamp\" : timestamp, \"artifact_list\" : artifact_list, \"uri_list\" : uri_list",
"timestamp, \"artifact_list\" : artifact_list, \"uri_list\" : uri_list } def validate_transaction(artifact_id,",
"The list of the artifact uuid associated with the artifact",
"state storage. Args: artifact_uuid (str): The uuid of the artifact",
"2) * \"-\" + \"+\") for line in msg: LOGGER.debug(\"+",
"If \"amend\" was called on non-existing uuid * If \"Add...\"",
"The transaction pertaining the payload context (State): The current state",
"of the handler object. \"\"\" return [\"1.0\"] @property def encodings(self):",
"The uuid of the artifact Returns: type: str The address-to-be,",
"of dict): The list of the uri associated with the",
"\"\"\" return [self._namespace_prefix] ################################################################################ # FUNCTIONS # ################################################################################ def apply(self,",
"\"\"\" Creates an artifact address which will be used to",
"artifact_list = payload[\"artifact_list\"] uri_list = payload[\"uri_list\"] except ValueError: raise InvalidTransaction(\"Invalid",
"was called on non-existing uuid * If \"Add...\" were called",
"from transaction payload try: payload = json.loads(transaction.payload.decode()) artifact_id = payload[\"uuid\"]",
"def _display(msg): \"\"\" Logs the message to the debug logger.",
"Returns: type: State The new state of the ledger, which",
"transaction, context): \"\"\" Applys the payload from transaction onto the",
"associating with the handler object. \"\"\" return [self._namespace_prefix] ################################################################################ #",
"Hard sanity check before creating final payload for the state",
"checksum of the artifact artifact_label (str): The label of the",
"prefix of the transaction family \"\"\" def __init__(self, namespace_prefix): \"\"\"",
"self._namespace_prefix = namespace_prefix @property def family_name(self): \"\"\" type: str Returns",
"debug logger \"\"\" n = msg.count(\"\\n\") if n > 0:",
"from sawtooth_sdk.processor.exceptions import InvalidTransaction from sawtooth_sdk.processor.exceptions import InternalError from sawtooth_sdk.processor.handler",
"data into the state storage. Args: namespace_prefix (str): The prefix",
"= payload[\"prev_block\"] cur = payload[\"cur_block\"] timestamp = payload[\"timestamp\"] artifact_list =",
"serialization\") # Soft sanity check and loading required data validate_transaction(artifact_id,",
"InvalidTransaction(\"Artifact ID is required\") if not action: raise InvalidTransaction(\"Action is",
"apply(self, transaction, context): \"\"\" Applys the payload from transaction onto",
"Constructs the payload to be stored in the state storage.",
"id of the transaction timestamp (str): The UTC time for",
"stored_artifact_id is None: raise InvalidTransaction( \"Invalid Action-requires an existing artifact.\"",
"all the param is created and returned to be stored",
"the data from the transaction, is returned to be stored",
"deserialization for payload from transaction failed * If \"create\" was",
"= payload[\"checksum\"] artifact_label = payload[\"label\"] artifact_openchain = payload[\"openchain\"] action =",
"= payload[\"openchain\"] action = payload[\"action\"] prev = payload[\"prev_block\"] cur =",
"import InvalidTransaction from sawtooth_sdk.processor.exceptions import InternalError from sawtooth_sdk.processor.handler import TransactionHandler",
"associating with the transaction family artifact_id (str): The uuid of",
"to be logged into the debug logger \"\"\" n =",
"prev, cur, timestamp, artifact_list, uri_list) elif action == \"AddArtifact\" or",
"sawtooth_sdk.processor.exceptions import InternalError from sawtooth_sdk.processor.handler import TransactionHandler LOGGER = logging.getLogger(__name__)",
"permissions and # limitations under the License. # ------------------------------------------------------------------------------ ################################################################################",
"transaction family \"\"\" def __init__(self, namespace_prefix): \"\"\" Constructs the ArtifactTransactionHandler",
"################################################################################ def apply(self, transaction, context): \"\"\" Applys the payload from",
"when the transaction was submitted artifact_list (list of dict): The",
"payload[\"uuid\"] artifact_alias = payload[\"alias\"] artifact_name = payload[\"name\"] artifact_type = payload[\"content_type\"]",
"artifact_id (str): The uuid of the artifact Returns: type: str",
"= context.get_state([data_address]) # Hard sanity check before creating final payload",
"for the handler object. \"\"\" return [\"csv-utf8\"] @property def namespaces(self):",
"create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp)",
"OF ANY KIND, either express or implied. # See the",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"\"\"\" Constructs the payload to be stored in the state",
"(default \"0\") cur (str): the current block id of the",
"ANY KIND, either express or implied. # See the License",
"See the License for the specific language governing permissions and",
"Class for handling the Transaction Family : Artifact Attributes: namespace_prefix",
"final payload for the state storage if len(state_entries) != 0:",
"or, used as a key to store the new data",
"namespaces associating with the handler object. \"\"\" return [self._namespace_prefix] ################################################################################",
"> 0: msg = msg.split(\"\\n\") length = max(len(line) for line",
"\"+\") for line in msg: LOGGER.debug(\"+ \" + line.center(length) +",
"the License. # You may obtain a copy of the",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"for the specific language governing permissions and # limitations under",
"action: raise InvalidTransaction(\"Action is required\") if action not in (\"AddArtifact\",",
"be used to recover the associated UUID if the artifact",
"to in writing, software # distributed under the License is",
"be stored on the state storage. \"\"\" return { \"uuid\"",
"non-existing uuid * If \"Add...\" were called on non-existing uuid",
"associates the uuid and the namespace prefix. \"\"\" return namespace_prefix",
"# See the License for the specific language governing permissions",
"before creating final payload for the state storage if len(state_entries)",
"(str): The UTC time for when the transaction was submitted",
"stored_artifact_id = stored_artifact[\"uuid\"] except ValueError: raise InternalError(\"Failed to deserialize data.\")",
"(str): The openchain of the artifact prev (str): The previous",
"language governing permissions and # limitations under the License. #",
"of the artifact artifact_label (str): The label of the artifact",
"or agreed to in writing, software # distributed under the",
"alias of the artifact artifact_name (str): The name of the",
"ledger Returns: type: State The new state of the ledger,",
"len(state_entries) != 0: try: stored_artifact = json.loads(state_entries[0].data.decode()) stored_artifact_id = stored_artifact[\"uuid\"]",
"required by applicable law or agreed to in writing, software",
"transaction (default \"0\") cur (str): the current block id of",
"\"artifact_list\" : artifact_list, \"uri_list\" : uri_list } def validate_transaction(artifact_id, action):",
"LOGGER.debug(\"+\" + (length + 2) * \"-\" + \"+\") for",
"License. # ------------------------------------------------------------------------------ ################################################################################ # LIBRARIES & DEPENDENCIES # ################################################################################",
"new data into the state storage. Args: namespace_prefix (str): The",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"or action == \"AddURI\": if stored_artifact_id is None: raise InvalidTransaction(",
"encoding scheme used for the data for the handler object.",
"artifact_list, uri_list) elif action == \"AddArtifact\" or action == \"AddURI\":",
"with the License. # You may obtain a copy of",
"and returned to be stored on the state storage. \"\"\"",
"uuid * If \"amend\" was called on non-existing uuid *",
"context): \"\"\" Applys the payload from transaction onto the state",
"required\") if action not in (\"AddArtifact\", \"create\", \"AddURI\", \"amend\"): raise",
"!= 0: try: stored_artifact = json.loads(state_entries[0].data.decode()) stored_artifact_id = stored_artifact[\"uuid\"] except",
"the transaction family \"\"\" self._namespace_prefix = namespace_prefix @property def family_name(self):",
"artifact artifact_name (str): The name of the artifact artifact_type (str):",
"def create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur,",
"if n > 0: msg = msg.split(\"\\n\") length = max(len(line)",
"def namespaces(self): \"\"\" type: list of str Returns the namespaces",
"payload[\"uri_list\"] except ValueError: raise InvalidTransaction(\"Invalid payload serialization\") # Soft sanity",
"namepsace prefix of the transaction family \"\"\" self._namespace_prefix = namespace_prefix",
"limitations under the License. # ------------------------------------------------------------------------------ ################################################################################ # LIBRARIES &",
"non-existing uuid * If invalid operation was called InternalError: *",
"payload[\"alias\"] artifact_name = payload[\"name\"] artifact_type = payload[\"content_type\"] artifact_checksum = payload[\"checksum\"]",
"compliance with the License. # You may obtain a copy",
"None if action == \"create\" and stored_artifact_id is not None:",
"agreed to in writing, software # distributed under the License",
"= payload[\"uuid\"] artifact_alias = payload[\"alias\"] artifact_name = payload[\"name\"] artifact_type =",
"UTC time for when the transaction was submitted artifact_list (list",
"return [self._namespace_prefix] ################################################################################ # FUNCTIONS # ################################################################################ def apply(self, transaction,",
"the payload from transaction onto the state storage. Args: transaction",
"InvalidTransaction(\"Invalid action: {}\".format(action)) def make_artifact_address(namespace_prefix, artifact_id): \"\"\" Creates an artifact",
"str The address-to-be, which associates the uuid and the namespace",
"distributed under the License is distributed on an \"AS IS\"",
"check before creating final payload for the state storage if",
"debug logger. Args: msg (str): The message that is to",
"else: stored_artifact_id = stored_artifact = None if action == \"create\"",
"make_artifact_address(self._namespace_prefix, artifact_id) state_entries = context.get_state([data_address]) # Hard sanity check before",
"(str): The checksum of the artifact artifact_label (str): The label",
"Raises: InvalidTransaction: * If deserialization for payload from transaction failed",
"def make_artifact_address(namespace_prefix, artifact_id): \"\"\" Creates an artifact address which will",
"Action-requires an existing artifact.\" ) artifact = create_artifact(artifact_id, artifact_alias, artifact_name,",
"of the transaction family \"\"\" def __init__(self, namespace_prefix): \"\"\" Constructs",
"on the state storage. Raises: InvalidTransaction: * If deserialization for",
"express or implied. # See the License for the specific",
"except in compliance with the License. # You may obtain",
"+\") LOGGER.debug(\"+\" + (length + 2) * \"-\" + \"+\")",
"the debug logger \"\"\" n = msg.count(\"\\n\") if n >",
"Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"deserialization of State.data failed \"\"\" # Parsing required fields from",
"artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp, artifact_list, uri_list) # Adding",
"not use this file except in compliance with the License.",
"stored on the state storage. Raises: InvalidTransaction: * If deserialization",
"action = payload[\"action\"] prev = payload[\"prev_block\"] cur = payload[\"cur_block\"] timestamp",
"stored_artifact[\"uuid\"] except ValueError: raise InternalError(\"Failed to deserialize data.\") else: stored_artifact_id",
"writing, software # distributed under the License is distributed on",
"in or the action is not a valid action. \"\"\"",
"msg = msg.split(\"\\n\") length = max(len(line) for line in msg)",
"= context.set_state({data_address:data}) return addresses ################################################################################ # HELPER FUNCTIONS # ################################################################################",
"raise InvalidTransaction(\"Invalid payload serialization\") # Soft sanity check and loading",
"onto the state storage. Args: transaction (Transaction): The transaction pertaining",
"you may not use this file except in compliance with",
"final payload to the state storage data = json.dumps(artifact).encode() addresses",
"# Licensed under the Apache License, Version 2.0 (the \"License\");",
"uuid * If \"Add...\" were called on non-existing uuid *",
"* If \"create\" was called on non-unique uuid * If",
"action is not a valid action. \"\"\" if not artifact_id:",
"handler object. \"\"\" return [self._namespace_prefix] ################################################################################ # FUNCTIONS # ################################################################################",
"transaction family artifact_id (str): The uuid of the artifact Returns:",
"return [\"csv-utf8\"] @property def namespaces(self): \"\"\" type: list of str",
"@property def namespaces(self): \"\"\" type: list of str Returns the",
"artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp, artifact_list, uri_list) #",
"* If deserialization for payload from transaction failed * If",
"HELPER FUNCTIONS # ################################################################################ def create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum,",
"The name of the artifact artifact_type (str): The type of",
"\"uri_list\" : uri_list } def validate_transaction(artifact_id, action): \"\"\" Performs soft",
"state storage data = json.dumps(artifact).encode() addresses = context.set_state({data_address:data}) return addresses",
"storage; or, used as a key to store the new",
"elif action == \"AddArtifact\" or action == \"AddURI\": if stored_artifact_id",
"CONDITIONS OF ANY KIND, either express or implied. # See",
"artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp, artifact_list, uri_list) elif",
"under the License. # ------------------------------------------------------------------------------ ################################################################################ # LIBRARIES & DEPENDENCIES",
"namespaces(self): \"\"\" type: list of str Returns the namespaces associating",
"storage. Args: namespace_prefix (str): The prefix associating with the transaction",
"addresses = context.set_state({data_address:data}) return addresses ################################################################################ # HELPER FUNCTIONS #",
"param is created and returned to be stored on the",
"Wind River # Licensed under the Apache License, Version 2.0",
"artifact_list, \"uri_list\" : uri_list } def validate_transaction(artifact_id, action): \"\"\" Performs",
"len(msg) msg = [msg] LOGGER.debug(\"+\" + (length + 2) *",
"(str): The alias of the artifact artifact_name (str): The name",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"try: stored_artifact = json.loads(state_entries[0].data.decode()) stored_artifact_id = stored_artifact[\"uuid\"] except ValueError: raise",
"The checksum of the artifact artifact_label (str): The label of",
"storage. Args: artifact_uuid (str): The uuid of the artifact artifact_alias",
"River # Licensed under the Apache License, Version 2.0 (the",
"namespace_prefix @property def family_name(self): \"\"\" type: str Returns the family",
"object. \"\"\" return [self._namespace_prefix] ################################################################################ # FUNCTIONS # ################################################################################ def",
"transaction (Transaction): The transaction pertaining the payload context (State): The",
"the payload context (State): The current state of the ledger",
"except ValueError: raise InternalError(\"Failed to deserialize data.\") else: stored_artifact_id =",
"artifact_openchain, prev, cur, timestamp, artifact_list, uri_list) # Adding the final",
"of the artifact artifact_name (str): The name of the artifact",
"label of the artifact artifact_openchain (str): The openchain of the",
"which will be used to recover the associated UUID if",
"(str): The namespace prefix of the transaction family \"\"\" def",
"json.loads(state_entries[0].data.decode()) stored_artifact_id = stored_artifact[\"uuid\"] except ValueError: raise InternalError(\"Failed to deserialize",
"already exists in the state storage; or, used as a",
"(str): The name of the artifact artifact_type (str): The type",
"an existing artifact.\" ) artifact = create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type,",
"the transaction was submitted artifact_list (list of dict): The list",
"{}\".format(action)) def make_artifact_address(namespace_prefix, artifact_id): \"\"\" Creates an artifact address which",
"of the artifact artifact_openchain (str): The openchain of the artifact",
"# HANDLER OBJ # ################################################################################ class ArtifactTransactionHandler: \"\"\" Class for",
"family_name(self): \"\"\" type: str Returns the family name of the",
"was called InternalError: * If deserialization of State.data failed \"\"\"",
"required\") if not action: raise InvalidTransaction(\"Action is required\") if action",
"Family : Artifact Attributes: namespace_prefix (str): The namespace prefix of",
"of the handler object. \"\"\" return \"artifact\" @property def family_versions(self):",
"= json.loads(transaction.payload.decode()) artifact_id = payload[\"uuid\"] artifact_alias = payload[\"alias\"] artifact_name =",
"artifact address which will be used to recover the associated",
"\"\"\" return \"artifact\" @property def family_versions(self): \"\"\" type: list of",
"address which will be used to recover the associated UUID",
"def encodings(self): \"\"\" type: list of str Returns the encoding",
"\"uuid\" : artifact_id, \"alias\" : artifact_alias, \"name\" : artifact_name, \"content_type\"",
": Artifact Attributes: namespace_prefix (str): The namespace prefix of the",
"be stored on the state storage. Raises: InvalidTransaction: * If",
"payload for the state storage if len(state_entries) != 0: try:",
"the state storage. Args: namespace_prefix (str): The prefix associating with",
"The prefix associating with the transaction family artifact_id (str): The",
"logging import json from collections import OrderedDict from sawtooth_sdk.processor.exceptions import",
"is not None: artifact = create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum,",
"invalid operation was called InternalError: * If deserialization of State.data",
"not None: artifact = create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label,",
"namespace_prefix (str): The prefix associating with the transaction family artifact_id",
"artifact_checksum, \"label\" : artifact_label, \"openchain\" : artifact_openchain, \"prev_block\" : prev,",
"ValueError: raise InvalidTransaction(\"Invalid payload serialization\") # Soft sanity check and",
"0: try: stored_artifact = json.loads(state_entries[0].data.decode()) stored_artifact_id = stored_artifact[\"uuid\"] except ValueError:",
"\"Invalid Action-requires an existing artifact.\" ) artifact = create_artifact(artifact_id, artifact_alias,",
"transaction, is returned to be stored on the state storage.",
"[self._namespace_prefix] ################################################################################ # FUNCTIONS # ################################################################################ def apply(self, transaction, context):",
"Returns: type: dict The dictionary pertaining all the param is",
"(State): The current state of the ledger Returns: type: State",
"were called on non-existing uuid * If invalid operation was",
"artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp, artifact_list=[], uri_list=[]): \"\"\" Constructs",
"family artifact_id (str): The uuid of the artifact Returns: type:",
"OR CONDITIONS OF ANY KIND, either express or implied. #",
"artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp, artifact_list=[], uri_list=[]): \"\"\"",
"################################################################################ # FUNCTIONS # ################################################################################ def apply(self, transaction, context): \"\"\"",
"import json from collections import OrderedDict from sawtooth_sdk.processor.exceptions import InvalidTransaction",
"from collections import OrderedDict from sawtooth_sdk.processor.exceptions import InvalidTransaction from sawtooth_sdk.processor.exceptions",
"__init__(self, namespace_prefix): \"\"\" Constructs the ArtifactTransactionHandler object. Args: namespace_prefix (str):",
"action == \"create\" and stored_artifact_id is not None: raise InvalidTransaction(\"Invalid",
"the License is distributed on an \"AS IS\" BASIS, #",
"the handler object. \"\"\" return [self._namespace_prefix] ################################################################################ # FUNCTIONS #",
"artifact artifact_openchain (str): The openchain of the artifact prev (str):",
"+ 2) * \"-\" + \"+\") for line in msg:",
"\"\"\" type: str Returns the family name of the handler",
"Soft sanity check and loading required data validate_transaction(artifact_id, action) data_address",
"data_address = make_artifact_address(self._namespace_prefix, artifact_id) state_entries = context.get_state([data_address]) # Hard sanity",
"@property def family_versions(self): \"\"\" type: list of str Returns the",
"are not passed in or the action is not a",
"(\"AddArtifact\", \"create\", \"AddURI\", \"amend\"): raise InvalidTransaction(\"Invalid action: {}\".format(action)) def make_artifact_address(namespace_prefix,",
"be stored in the state storage. Args: artifact_uuid (str): The",
"name of the handler object. \"\"\" return \"artifact\" @property def",
"of the artifact action (str): The command to be performed",
"(str): The type of the artifact artifact_checksum (str): The checksum",
"return [\"1.0\"] @property def encodings(self): \"\"\" type: list of str",
": timestamp, \"artifact_list\" : artifact_list, \"uri_list\" : uri_list } def",
"of str Returns the encoding scheme used for the data",
"payload[\"checksum\"] artifact_label = payload[\"label\"] artifact_openchain = payload[\"openchain\"] action = payload[\"action\"]",
"timestamp = payload[\"timestamp\"] artifact_list = payload[\"artifact_list\"] uri_list = payload[\"uri_list\"] except",
"object. \"\"\" return [\"csv-utf8\"] @property def namespaces(self): \"\"\" type: list",
"with the handler object. \"\"\" return [self._namespace_prefix] ################################################################################ # FUNCTIONS",
"(str): The uuid of the artifact artifact_alias (str): The alias",
"soft sanity check in order to improve runtime by eliminating",
"store the new data into the state storage. Args: namespace_prefix",
"uuid of the artifact Returns: type: str The address-to-be, which",
"the obvious exception errors. Args: artifact_id (str): The uuid of",
"\"label\" : artifact_label, \"openchain\" : artifact_openchain, \"prev_block\" : prev, \"cur_block\"",
"# Hard sanity check before creating final payload for the",
"block id of the transaction (default \"0\") cur (str): the",
"artifact (default []) uri_list (list of dict): The list of",
"artifact_name (str): The name of the artifact artifact_type (str): The",
"artifact artifact_type (str): The type of the artifact artifact_checksum (str):",
"Logs the message to the debug logger. Args: msg (str):",
"2016 Intel Corporation # Copyright 2017 Wind River # Licensed",
"of the artifact prev (str): The previous block id of",
"dictionary pertaining all the param is created and returned to",
"and # limitations under the License. # ------------------------------------------------------------------------------ ################################################################################ #",
"(str): The label of the artifact artifact_openchain (str): The openchain",
": artifact_id, \"alias\" : artifact_alias, \"name\" : artifact_name, \"content_type\" :",
"\"alias\" : artifact_alias, \"name\" : artifact_name, \"content_type\" : artifact_type, \"checksum\"",
"artifact already exists in the state storage; or, used as",
"Args: transaction (Transaction): The transaction pertaining the payload context (State):",
"into the state storage. Args: namespace_prefix (str): The prefix associating",
"logger. Args: msg (str): The message that is to be",
"law or agreed to in writing, software # distributed under",
"of str Returns the family version of the handler object.",
": artifact_alias, \"name\" : artifact_name, \"content_type\" : artifact_type, \"checksum\" :",
"previous block id of the transaction (default \"0\") cur (str):",
"# Adding the final payload to the state storage data",
"json.loads(transaction.payload.decode()) artifact_id = payload[\"uuid\"] artifact_alias = payload[\"alias\"] artifact_name = payload[\"name\"]",
"Performs soft sanity check in order to improve runtime by",
"The address-to-be, which associates the uuid and the namespace prefix.",
"return addresses ################################################################################ # HELPER FUNCTIONS # ################################################################################ def create_artifact(artifact_id,",
"(Transaction): The transaction pertaining the payload context (State): The current",
"artifact_checksum = payload[\"checksum\"] artifact_label = payload[\"label\"] artifact_openchain = payload[\"openchain\"] action",
"@property def encodings(self): \"\"\" type: list of str Returns the",
"cur, timestamp) elif action == \"amend\" and stored_artifact_id is not",
"* If invalid operation was called InternalError: * If deserialization",
"= payload[\"label\"] artifact_openchain = payload[\"openchain\"] action = payload[\"action\"] prev =",
"[]) uri_list (list of dict): The list of the uri",
"validate_transaction(artifact_id, action): \"\"\" Performs soft sanity check in order to",
"artifact Returns: type: str The address-to-be, which associates the uuid",
"the artifact artifact_label (str): The label of the artifact artifact_openchain",
"of the artifact uuid associated with the artifact (default [])",
"prefix of the transaction family \"\"\" self._namespace_prefix = namespace_prefix @property",
"== \"AddArtifact\" or action == \"AddURI\": if stored_artifact_id is None:",
"artifact_openchain (str): The openchain of the artifact prev (str): The",
"storage data = json.dumps(artifact).encode() addresses = context.set_state({data_address:data}) return addresses ################################################################################",
"address-to-be, which associates the uuid and the namespace prefix. \"\"\"",
"If the uuid or the action are not passed in",
"handler object. \"\"\" return [\"csv-utf8\"] @property def namespaces(self): \"\"\" type:",
"is created and returned to be stored on the state",
"from sawtooth_sdk.processor.handler import TransactionHandler LOGGER = logging.getLogger(__name__) ################################################################################ # HANDLER",
"= stored_artifact[\"uuid\"] except ValueError: raise InternalError(\"Failed to deserialize data.\") else:",
"= json.loads(state_entries[0].data.decode()) stored_artifact_id = stored_artifact[\"uuid\"] except ValueError: raise InternalError(\"Failed to",
"may obtain a copy of the License at # #",
"= payload[\"name\"] artifact_type = payload[\"content_type\"] artifact_checksum = payload[\"checksum\"] artifact_label =",
"{ \"uuid\" : artifact_id, \"alias\" : artifact_alias, \"name\" : artifact_name,",
"= namespace_prefix @property def family_name(self): \"\"\" type: str Returns the",
"was submitted artifact_list (list of dict): The list of the",
"already exists.\") elif action == \"create\": artifact = create_artifact(artifact_id, artifact_alias,",
"the message to the debug logger. Args: msg (str): The",
"Adding the final payload to the state storage data =",
"artifact_list=[], uri_list=[]): \"\"\" Constructs the payload to be stored in",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"handler object. \"\"\" return [\"1.0\"] @property def encodings(self): \"\"\" type:",
"context (State): The current state of the ledger Returns: type:",
"uri_list } def validate_transaction(artifact_id, action): \"\"\" Performs soft sanity check",
"Copyright 2016 Intel Corporation # Copyright 2017 Wind River #",
"may not use this file except in compliance with the",
"= [msg] LOGGER.debug(\"+\" + (length + 2) * \"-\" +",
"payload[\"cur_block\"] timestamp = payload[\"timestamp\"] artifact_list = payload[\"artifact_list\"] uri_list = payload[\"uri_list\"]",
"artifact_type (str): The type of the artifact artifact_checksum (str): The",
"state storage; or, used as a key to store the",
"# HELPER FUNCTIONS # ################################################################################ def create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type,",
"exists.\") elif action == \"create\": artifact = create_artifact(artifact_id, artifact_alias, artifact_name,",
"the uri associated with the artifact (default []) Returns: type:",
"The uuid of the artifact action (str): The command to",
"action == \"AddURI\": if stored_artifact_id is None: raise InvalidTransaction( \"Invalid",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"governing permissions and # limitations under the License. # ------------------------------------------------------------------------------",
"OBJ # ################################################################################ class ArtifactTransactionHandler: \"\"\" Class for handling the",
"artifact_name = payload[\"name\"] artifact_type = payload[\"content_type\"] artifact_checksum = payload[\"checksum\"] artifact_label",
"be logged into the debug logger \"\"\" n = msg.count(\"\\n\")",
"str Returns the encoding scheme used for the data for",
"failed * If \"create\" was called on non-unique uuid *",
"this file except in compliance with the License. # You",
"\"amend\" was called on non-existing uuid * If \"Add...\" were",
"sanity check and loading required data validate_transaction(artifact_id, action) data_address =",
"command to be performed Raises: InvalidTransaction: If the uuid or",
"timestamp) elif action == \"amend\" and stored_artifact_id is not None:",
"Creates an artifact address which will be used to recover",
"def __init__(self, namespace_prefix): \"\"\" Constructs the ArtifactTransactionHandler object. Args: namespace_prefix",
"\"checksum\" : artifact_checksum, \"label\" : artifact_label, \"openchain\" : artifact_openchain, \"prev_block\"",
"into the debug logger \"\"\" n = msg.count(\"\\n\") if n",
"== \"amend\" and stored_artifact_id is not None: artifact = create_artifact(artifact_id,",
"storage if len(state_entries) != 0: try: stored_artifact = json.loads(state_entries[0].data.decode()) stored_artifact_id",
"\"\"\" type: list of str Returns the namespaces associating with",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"return namespace_prefix + \\ hashlib.sha512(artifact_id.encode(\"utf-8\")).hexdigest()[:64] def _display(msg): \"\"\" Logs the",
"(str): The message that is to be logged into the",
"Returns the encoding scheme used for the data for the",
"\"\"\" return [\"1.0\"] @property def encodings(self): \"\"\" type: list of",
"If deserialization for payload from transaction failed * If \"create\"",
"raise InvalidTransaction(\"Artifact ID is required\") if not action: raise InvalidTransaction(\"Action",
"file except in compliance with the License. # You may",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"the state storage. Raises: InvalidTransaction: * If deserialization for payload",
"prev, cur, timestamp) elif action == \"amend\" and stored_artifact_id is",
"the debug logger. Args: msg (str): The message that is",
"uri_list (list of dict): The list of the uri associated",
"[\"csv-utf8\"] @property def namespaces(self): \"\"\" type: list of str Returns",
"namespace prefix of the transaction family \"\"\" def __init__(self, namespace_prefix):",
"raise InvalidTransaction( \"Invalid Action-requires an existing artifact.\" ) artifact =",
"type: str Returns the family name of the handler object.",
"if stored_artifact_id is None: raise InvalidTransaction( \"Invalid Action-requires an existing",
"HANDLER OBJ # ################################################################################ class ArtifactTransactionHandler: \"\"\" Class for handling",
"################################################################################ import hashlib import logging import json from collections import",
"runtime by eliminating the obvious exception errors. Args: artifact_id (str):",
"_display(msg): \"\"\" Logs the message to the debug logger. Args:",
"\"create\" was called on non-unique uuid * If \"amend\" was",
"the state storage; or, used as a key to store",
"InternalError(\"Failed to deserialize data.\") else: stored_artifact_id = stored_artifact = None",
"make_artifact_address(namespace_prefix, artifact_id): \"\"\" Creates an artifact address which will be",
"list of the uri associated with the artifact (default [])",
"exists in the state storage; or, used as a key",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"artifact_list (list of dict): The list of the artifact uuid",
"the artifact artifact_alias (str): The alias of the artifact artifact_name",
"The previous block id of the transaction (default \"0\") cur",
"the namespaces associating with the handler object. \"\"\" return [self._namespace_prefix]",
"sanity check before creating final payload for the state storage",
"for the data for the handler object. \"\"\" return [\"csv-utf8\"]",
"the action are not passed in or the action is",
"\" + line.center(length) + \" +\") LOGGER.debug(\"+\" + (length +",
"import TransactionHandler LOGGER = logging.getLogger(__name__) ################################################################################ # HANDLER OBJ #",
"state storage. \"\"\" return { \"uuid\" : artifact_id, \"alias\" :",
"\"0\") cur (str): the current block id of the transaction",
"the artifact artifact_openchain (str): The openchain of the artifact prev",
"\"\"\" Class for handling the Transaction Family : Artifact Attributes:",
"prev, \"cur_block\" : cur, \"timestamp\" : timestamp, \"artifact_list\" : artifact_list,",
"type: State The new state of the ledger, which includes",
"state storage. Args: namespace_prefix (str): The prefix associating with the",
"associated with the artifact (default []) Returns: type: dict The",
"payload[\"timestamp\"] artifact_list = payload[\"artifact_list\"] uri_list = payload[\"uri_list\"] except ValueError: raise",
"json.dumps(artifact).encode() addresses = context.set_state({data_address:data}) return addresses ################################################################################ # HELPER FUNCTIONS",
"action not in (\"AddArtifact\", \"create\", \"AddURI\", \"amend\"): raise InvalidTransaction(\"Invalid action:",
"LOGGER.debug(\"+\" + (length + 2) * \"-\" + \"+\") ################################################################################",
"\"amend\"): raise InvalidTransaction(\"Invalid action: {}\".format(action)) def make_artifact_address(namespace_prefix, artifact_id): \"\"\" Creates",
"is None: raise InvalidTransaction( \"Invalid Action-requires an existing artifact.\" )",
"line in msg: LOGGER.debug(\"+ \" + line.center(length) + \" +\")",
"used as a key to store the new data into",
"(str): The uuid of the artifact action (str): The command",
"\"\"\" return [\"csv-utf8\"] @property def namespaces(self): \"\"\" type: list of",
"time for when the transaction was submitted artifact_list (list of",
"creating final payload for the state storage if len(state_entries) !=",
"the final payload to the state storage data = json.dumps(artifact).encode()",
"Intel Corporation # Copyright 2017 Wind River # Licensed under",
"If \"create\" was called on non-unique uuid * If \"amend\"",
"non-unique uuid * If \"amend\" was called on non-existing uuid",
"of the artifact Returns: type: str The address-to-be, which associates",
"\"\"\" self._namespace_prefix = namespace_prefix @property def family_name(self): \"\"\" type: str",
"def apply(self, transaction, context): \"\"\" Applys the payload from transaction",
"prev (str): The previous block id of the transaction (default",
"check in order to improve runtime by eliminating the obvious",
"of the transaction (default \"0\") cur (str): the current block",
"UUID if the artifact already exists in the state storage;",
"prev, cur, timestamp, artifact_list=[], uri_list=[]): \"\"\" Constructs the payload to",
"action. \"\"\" if not artifact_id: raise InvalidTransaction(\"Artifact ID is required\")",
"\"content_type\" : artifact_type, \"checksum\" : artifact_checksum, \"label\" : artifact_label, \"openchain\"",
"of the ledger Returns: type: State The new state of",
"payload[\"content_type\"] artifact_checksum = payload[\"checksum\"] artifact_label = payload[\"label\"] artifact_openchain = payload[\"openchain\"]",
"the state storage if len(state_entries) != 0: try: stored_artifact =",
"payload to the state storage data = json.dumps(artifact).encode() addresses =",
"[\"1.0\"] @property def encodings(self): \"\"\" type: list of str Returns",
"FUNCTIONS # ################################################################################ def create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label,",
"the transaction, is returned to be stored on the state",
"artifact_openchain = payload[\"openchain\"] action = payload[\"action\"] prev = payload[\"prev_block\"] cur",
"################################################################################ # HELPER FUNCTIONS # ################################################################################ def create_artifact(artifact_id, artifact_alias, artifact_name,",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"used for the data for the handler object. \"\"\" return",
"is to be logged into the debug logger \"\"\" n",
"str Returns the family name of the handler object. \"\"\"",
"storage. \"\"\" return { \"uuid\" : artifact_id, \"alias\" : artifact_alias,",
"type: str The address-to-be, which associates the uuid and the",
"ValueError: raise InternalError(\"Failed to deserialize data.\") else: stored_artifact_id = stored_artifact",
"includes the data from the transaction, is returned to be",
"transaction failed * If \"create\" was called on non-unique uuid",
"The current state of the ledger Returns: type: State The",
"2017 Wind River # Licensed under the Apache License, Version",
"namespace_prefix (str): The namepsace prefix of the transaction family \"\"\"",
"= payload[\"uri_list\"] except ValueError: raise InvalidTransaction(\"Invalid payload serialization\") # Soft",
"timestamp, artifact_list, uri_list) # Adding the final payload to the",
"(str): The namepsace prefix of the transaction family \"\"\" self._namespace_prefix",
"InvalidTransaction(\"Invalid payload serialization\") # Soft sanity check and loading required",
"name of the artifact artifact_type (str): The type of the",
"The openchain of the artifact prev (str): The previous block",
"artifact_alias, \"name\" : artifact_name, \"content_type\" : artifact_type, \"checksum\" : artifact_checksum,",
"if action not in (\"AddArtifact\", \"create\", \"AddURI\", \"amend\"): raise InvalidTransaction(\"Invalid",
"# ################################################################################ def apply(self, transaction, context): \"\"\" Applys the payload",
"or implied. # See the License for the specific language",
"artifact_id) state_entries = context.get_state([data_address]) # Hard sanity check before creating",
"a key to store the new data into the state",
"with the transaction family artifact_id (str): The uuid of the",
"\"\"\" Constructs the ArtifactTransactionHandler object. Args: namespace_prefix (str): The namepsace",
"type: list of str Returns the namespaces associating with the",
"to store the new data into the state storage. Args:",
"loading required data validate_transaction(artifact_id, action) data_address = make_artifact_address(self._namespace_prefix, artifact_id) state_entries",
"state of the ledger Returns: type: State The new state",
": prev, \"cur_block\" : cur, \"timestamp\" : timestamp, \"artifact_list\" :",
"\"cur_block\" : cur, \"timestamp\" : timestamp, \"artifact_list\" : artifact_list, \"uri_list\"",
"KIND, either express or implied. # See the License for",
"specific language governing permissions and # limitations under the License.",
"# ################################################################################ def create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain,",
"(length + 2) * \"-\" + \"+\") ################################################################################ # #",
"artifact artifact_label (str): The label of the artifact artifact_openchain (str):",
"= make_artifact_address(self._namespace_prefix, artifact_id) state_entries = context.get_state([data_address]) # Hard sanity check",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"+ \\ hashlib.sha512(artifact_id.encode(\"utf-8\")).hexdigest()[:64] def _display(msg): \"\"\" Logs the message to",
"is not a valid action. \"\"\" if not artifact_id: raise",
"addresses ################################################################################ # HELPER FUNCTIONS # ################################################################################ def create_artifact(artifact_id, artifact_alias,",
"\"amend\" and stored_artifact_id is not None: artifact = create_artifact(artifact_id, artifact_alias,",
"required fields from transaction payload try: payload = json.loads(transaction.payload.decode()) artifact_id",
"artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp) elif action ==",
"LOGGER.debug(\"+ \" + line.center(length) + \" +\") LOGGER.debug(\"+\" + (length",
"payload = json.loads(transaction.payload.decode()) artifact_id = payload[\"uuid\"] artifact_alias = payload[\"alias\"] artifact_name",
"message to the debug logger. Args: msg (str): The message",
"uuid of the artifact artifact_alias (str): The alias of the",
"msg: LOGGER.debug(\"+ \" + line.center(length) + \" +\") LOGGER.debug(\"+\" +",
"create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp,",
"@property def family_name(self): \"\"\" type: str Returns the family name",
"(the \"License\"); # you may not use this file except",
"\"artifact\" @property def family_versions(self): \"\"\" type: list of str Returns",
"state_entries = context.get_state([data_address]) # Hard sanity check before creating final",
"Args: namespace_prefix (str): The namepsace prefix of the transaction family",
"# you may not use this file except in compliance",
"if len(state_entries) != 0: try: stored_artifact = json.loads(state_entries[0].data.decode()) stored_artifact_id =",
"The new state of the ledger, which includes the data",
"deserialize data.\") else: stored_artifact_id = stored_artifact = None if action",
"artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp) elif action == \"amend\"",
"uri_list) # Adding the final payload to the state storage",
"dict): The list of the artifact uuid associated with the",
"object. \"\"\" return [\"1.0\"] @property def encodings(self): \"\"\" type: list",
"called on non-unique uuid * If \"amend\" was called on",
"was called on non-unique uuid * If \"amend\" was called",
"from the transaction, is returned to be stored on the",
"\"AddArtifact\" or action == \"AddURI\": if stored_artifact_id is None: raise",
"family name of the handler object. \"\"\" return \"artifact\" @property",
"artifact artifact_alias (str): The alias of the artifact artifact_name (str):",
"for line in msg: LOGGER.debug(\"+ \" + line.center(length) + \"",
"pertaining all the param is created and returned to be",
"and stored_artifact_id is not None: raise InvalidTransaction(\"Invalid Action-artifact already exists.\")",
"the state storage data = json.dumps(artifact).encode() addresses = context.set_state({data_address:data}) return",
"action == \"AddArtifact\" or action == \"AddURI\": if stored_artifact_id is",
"Args: msg (str): The message that is to be logged",
"the encoding scheme used for the data for the handler",
"check and loading required data validate_transaction(artifact_id, action) data_address = make_artifact_address(self._namespace_prefix,",
"def family_name(self): \"\"\" type: str Returns the family name of",
"# Copyright 2017 Wind River # Licensed under the Apache",
"is not None: raise InvalidTransaction(\"Invalid Action-artifact already exists.\") elif action",
"\"\"\" Performs soft sanity check in order to improve runtime",
"the artifact (default []) Returns: type: dict The dictionary pertaining",
"################################################################################ # LIBRARIES & DEPENDENCIES # ################################################################################ import hashlib import",
"# # Unless required by applicable law or agreed to",
"not artifact_id: raise InvalidTransaction(\"Artifact ID is required\") if not action:",
"data.\") else: stored_artifact_id = stored_artifact = None if action ==",
"+ (length + 2) * \"-\" + \"+\") for line",
"(str): The uuid of the artifact Returns: type: str The",
"of State.data failed \"\"\" # Parsing required fields from transaction",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"The uuid of the artifact artifact_alias (str): The alias of",
"(length + 2) * \"-\" + \"+\") for line in",
"= msg.split(\"\\n\") length = max(len(line) for line in msg) else:",
"Version 2.0 (the \"License\"); # you may not use this",
"\"\"\" n = msg.count(\"\\n\") if n > 0: msg =",
"State The new state of the ledger, which includes the",
"\"create\", \"AddURI\", \"amend\"): raise InvalidTransaction(\"Invalid action: {}\".format(action)) def make_artifact_address(namespace_prefix, artifact_id):",
"the artifact uuid associated with the artifact (default []) uri_list",
"for when the transaction was submitted artifact_list (list of dict):",
"for line in msg) else: length = len(msg) msg =",
"= stored_artifact = None if action == \"create\" and stored_artifact_id",
"+ \"+\") for line in msg: LOGGER.debug(\"+ \" + line.center(length)",
"prev, cur, timestamp, artifact_list, uri_list) # Adding the final payload",
"family_versions(self): \"\"\" type: list of str Returns the family version",
"context.get_state([data_address]) # Hard sanity check before creating final payload for",
"artifact_id, \"alias\" : artifact_alias, \"name\" : artifact_name, \"content_type\" : artifact_type,",
"Raises: InvalidTransaction: If the uuid or the action are not",
"line.center(length) + \" +\") LOGGER.debug(\"+\" + (length + 2) *",
"logger \"\"\" n = msg.count(\"\\n\") if n > 0: msg",
"# limitations under the License. # ------------------------------------------------------------------------------ ################################################################################ # LIBRARIES",
"type: list of str Returns the family version of the",
"implied. # See the License for the specific language governing",
"artifact_label, artifact_openchain, prev, cur, timestamp, artifact_list, uri_list) elif action ==",
"to the debug logger. Args: msg (str): The message that",
"under the Apache License, Version 2.0 (the \"License\"); # you",
"of str Returns the namespaces associating with the handler object.",
"# Parsing required fields from transaction payload try: payload =",
"payload try: payload = json.loads(transaction.payload.decode()) artifact_id = payload[\"uuid\"] artifact_alias =",
"(str): The command to be performed Raises: InvalidTransaction: If the",
"the ArtifactTransactionHandler object. Args: namespace_prefix (str): The namepsace prefix of",
"\"-\" + \"+\") for line in msg: LOGGER.debug(\"+ \" +",
"called on non-existing uuid * If \"Add...\" were called on",
"msg (str): The message that is to be logged into",
"msg.split(\"\\n\") length = max(len(line) for line in msg) else: length",
"artifact prev (str): The previous block id of the transaction",
"The label of the artifact artifact_openchain (str): The openchain of",
"InvalidTransaction from sawtooth_sdk.processor.exceptions import InternalError from sawtooth_sdk.processor.handler import TransactionHandler LOGGER",
"= logging.getLogger(__name__) ################################################################################ # HANDLER OBJ # ################################################################################ class ArtifactTransactionHandler:",
"artifact_openchain, prev, cur, timestamp, artifact_list, uri_list) elif action == \"AddArtifact\"",
"length = max(len(line) for line in msg) else: length =",
"line in msg) else: length = len(msg) msg = [msg]",
"stored_artifact = json.loads(state_entries[0].data.decode()) stored_artifact_id = stored_artifact[\"uuid\"] except ValueError: raise InternalError(\"Failed",
"dict): The list of the uri associated with the artifact",
"\"AddURI\": if stored_artifact_id is None: raise InvalidTransaction( \"Invalid Action-requires an",
"artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp, artifact_list, uri_list) elif action",
"eliminating the obvious exception errors. Args: artifact_id (str): The uuid",
"by applicable law or agreed to in writing, software #",
"that is to be logged into the debug logger \"\"\"",
"failed \"\"\" # Parsing required fields from transaction payload try:",
"a valid action. \"\"\" if not artifact_id: raise InvalidTransaction(\"Artifact ID",
"object. Args: namespace_prefix (str): The namepsace prefix of the transaction",
"msg.count(\"\\n\") if n > 0: msg = msg.split(\"\\n\") length =",
"msg) else: length = len(msg) msg = [msg] LOGGER.debug(\"+\" +",
"str Returns the family version of the handler object. \"\"\"",
"artifact_type = payload[\"content_type\"] artifact_checksum = payload[\"checksum\"] artifact_label = payload[\"label\"] artifact_openchain",
"str Returns the namespaces associating with the handler object. \"\"\"",
"artifact_label (str): The label of the artifact artifact_openchain (str): The",
"elif action == \"amend\" and stored_artifact_id is not None: artifact",
"and the namespace prefix. \"\"\" return namespace_prefix + \\ hashlib.sha512(artifact_id.encode(\"utf-8\")).hexdigest()[:64]",
"the action is not a valid action. \"\"\" if not",
"the artifact artifact_name (str): The name of the artifact artifact_type",
"in the state storage. Args: artifact_uuid (str): The uuid of",
"the associated UUID if the artifact already exists in the",
"the current block id of the transaction timestamp (str): The",
"data for the handler object. \"\"\" return [\"csv-utf8\"] @property def",
"the handler object. \"\"\" return [\"csv-utf8\"] @property def namespaces(self): \"\"\"",
"sanity check in order to improve runtime by eliminating the",
"InvalidTransaction: If the uuid or the action are not passed",
"is required\") if action not in (\"AddArtifact\", \"create\", \"AddURI\", \"amend\"):",
"Action-artifact already exists.\") elif action == \"create\": artifact = create_artifact(artifact_id,",
"\"Add...\" were called on non-existing uuid * If invalid operation",
"artifact artifact_checksum (str): The checksum of the artifact artifact_label (str):",
"the Transaction Family : Artifact Attributes: namespace_prefix (str): The namespace",
"The list of the uri associated with the artifact (default",
"timestamp, artifact_list=[], uri_list=[]): \"\"\" Constructs the payload to be stored",
"payload[\"name\"] artifact_type = payload[\"content_type\"] artifact_checksum = payload[\"checksum\"] artifact_label = payload[\"label\"]",
"new state of the ledger, which includes the data from",
"(str): The previous block id of the transaction (default \"0\")",
"artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp, artifact_list, uri_list)",
"exception errors. Args: artifact_id (str): The uuid of the artifact",
"the transaction family \"\"\" def __init__(self, namespace_prefix): \"\"\" Constructs the",
"\\ hashlib.sha512(artifact_id.encode(\"utf-8\")).hexdigest()[:64] def _display(msg): \"\"\" Logs the message to the",
"raise InvalidTransaction(\"Invalid action: {}\".format(action)) def make_artifact_address(namespace_prefix, artifact_id): \"\"\" Creates an",
"Returns the family name of the handler object. \"\"\" return",
"required data validate_transaction(artifact_id, action) data_address = make_artifact_address(self._namespace_prefix, artifact_id) state_entries =",
"created and returned to be stored on the state storage.",
"payload[\"label\"] artifact_openchain = payload[\"openchain\"] action = payload[\"action\"] prev = payload[\"prev_block\"]",
"artifact_label, \"openchain\" : artifact_openchain, \"prev_block\" : prev, \"cur_block\" : cur,",
"will be used to recover the associated UUID if the",
"= max(len(line) for line in msg) else: length = len(msg)",
"return \"artifact\" @property def family_versions(self): \"\"\" type: list of str",
"list of the artifact uuid associated with the artifact (default",
"used to recover the associated UUID if the artifact already",
"openchain of the artifact prev (str): The previous block id",
"class ArtifactTransactionHandler: \"\"\" Class for handling the Transaction Family :",
"else: length = len(msg) msg = [msg] LOGGER.debug(\"+\" + (length",
"raise InvalidTransaction(\"Invalid Action-artifact already exists.\") elif action == \"create\": artifact",
"artifact.\" ) artifact = create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label,",
"transaction was submitted artifact_list (list of dict): The list of",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"to be performed Raises: InvalidTransaction: If the uuid or the",
"the namespace prefix. \"\"\" return namespace_prefix + \\ hashlib.sha512(artifact_id.encode(\"utf-8\")).hexdigest()[:64] def",
"Unless required by applicable law or agreed to in writing,",
"which associates the uuid and the namespace prefix. \"\"\" return",
"Args: artifact_uuid (str): The uuid of the artifact artifact_alias (str):",
"Parsing required fields from transaction payload try: payload = json.loads(transaction.payload.decode())",
"id of the transaction (default \"0\") cur (str): the current",
": artifact_type, \"checksum\" : artifact_checksum, \"label\" : artifact_label, \"openchain\" :",
"as a key to store the new data into the",
"validate_transaction(artifact_id, action) data_address = make_artifact_address(self._namespace_prefix, artifact_id) state_entries = context.get_state([data_address]) #",
"existing artifact.\" ) artifact = create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum,",
"uuid or the action are not passed in or the",
"import logging import json from collections import OrderedDict from sawtooth_sdk.processor.exceptions",
"current state of the ledger Returns: type: State The new",
"action == \"amend\" and stored_artifact_id is not None: artifact =",
"family version of the handler object. \"\"\" return [\"1.0\"] @property",
"the specific language governing permissions and # limitations under the",
"(str): the current block id of the transaction timestamp (str):",
"the artifact action (str): The command to be performed Raises:",
"stored_artifact_id is not None: artifact = create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type,",
"action: {}\".format(action)) def make_artifact_address(namespace_prefix, artifact_id): \"\"\" Creates an artifact address",
"the family name of the handler object. \"\"\" return \"artifact\"",
"Copyright 2017 Wind River # Licensed under the Apache License,",
"applicable law or agreed to in writing, software # distributed",
"InvalidTransaction( \"Invalid Action-requires an existing artifact.\" ) artifact = create_artifact(artifact_id,",
"to be stored in the state storage. Args: artifact_uuid (str):",
"list of str Returns the encoding scheme used for the",
"Constructs the ArtifactTransactionHandler object. Args: namespace_prefix (str): The namepsace prefix",
"import InternalError from sawtooth_sdk.processor.handler import TransactionHandler LOGGER = logging.getLogger(__name__) ################################################################################",
"= None if action == \"create\" and stored_artifact_id is not",
"stored on the state storage. \"\"\" return { \"uuid\" :",
"context.set_state({data_address:data}) return addresses ################################################################################ # HELPER FUNCTIONS # ################################################################################ def",
"which includes the data from the transaction, is returned to",
": cur, \"timestamp\" : timestamp, \"artifact_list\" : artifact_list, \"uri_list\" :",
"on non-existing uuid * If \"Add...\" were called on non-existing",
"in writing, software # distributed under the License is distributed",
": uri_list } def validate_transaction(artifact_id, action): \"\"\" Performs soft sanity",
"cur, timestamp, artifact_list=[], uri_list=[]): \"\"\" Constructs the payload to be",
"artifact_alias = payload[\"alias\"] artifact_name = payload[\"name\"] artifact_type = payload[\"content_type\"] artifact_checksum",
"\"\"\" # Parsing required fields from transaction payload try: payload",
") artifact = create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain,",
"handler object. \"\"\" return \"artifact\" @property def family_versions(self): \"\"\" type:",
"and stored_artifact_id is not None: artifact = create_artifact(artifact_id, artifact_alias, artifact_name,",
"uri_list = payload[\"uri_list\"] except ValueError: raise InvalidTransaction(\"Invalid payload serialization\") #",
"payload from transaction failed * If \"create\" was called on",
"The dictionary pertaining all the param is created and returned",
"InternalError from sawtooth_sdk.processor.handler import TransactionHandler LOGGER = logging.getLogger(__name__) ################################################################################ #",
"\"openchain\" : artifact_openchain, \"prev_block\" : prev, \"cur_block\" : cur, \"timestamp\"",
"not None: raise InvalidTransaction(\"Invalid Action-artifact already exists.\") elif action ==",
"transaction timestamp (str): The UTC time for when the transaction",
"recover the associated UUID if the artifact already exists in",
"the handler object. \"\"\" return \"artifact\" @property def family_versions(self): \"\"\"",
"################################################################################ def create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev,",
"on non-unique uuid * If \"amend\" was called on non-existing",
"to the state storage data = json.dumps(artifact).encode() addresses = context.set_state({data_address:data})",
"artifact (default []) Returns: type: dict The dictionary pertaining all",
"or the action is not a valid action. \"\"\" if",
"raise InvalidTransaction(\"Action is required\") if action not in (\"AddArtifact\", \"create\",",
"artifact_label, artifact_openchain, prev, cur, timestamp) elif action == \"amend\" and",
"version of the handler object. \"\"\" return [\"1.0\"] @property def",
"artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp, artifact_list=[], uri_list=[]):",
"artifact_uuid (str): The uuid of the artifact artifact_alias (str): The",
"raise InternalError(\"Failed to deserialize data.\") else: stored_artifact_id = stored_artifact =",
"InvalidTransaction(\"Action is required\") if action not in (\"AddArtifact\", \"create\", \"AddURI\",",
"artifact_list, uri_list) # Adding the final payload to the state",
"storage. Raises: InvalidTransaction: * If deserialization for payload from transaction",
"max(len(line) for line in msg) else: length = len(msg) msg",
"The type of the artifact artifact_checksum (str): The checksum of",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"by eliminating the obvious exception errors. Args: artifact_id (str): The",
"import OrderedDict from sawtooth_sdk.processor.exceptions import InvalidTransaction from sawtooth_sdk.processor.exceptions import InternalError",
"sawtooth_sdk.processor.handler import TransactionHandler LOGGER = logging.getLogger(__name__) ################################################################################ # HANDLER OBJ",
"License, Version 2.0 (the \"License\"); # you may not use",
"type of the artifact artifact_checksum (str): The checksum of the",
": artifact_checksum, \"label\" : artifact_label, \"openchain\" : artifact_openchain, \"prev_block\" :",
": artifact_openchain, \"prev_block\" : prev, \"cur_block\" : cur, \"timestamp\" :",
"# You may obtain a copy of the License at",
"= payload[\"timestamp\"] artifact_list = payload[\"artifact_list\"] uri_list = payload[\"uri_list\"] except ValueError:",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"the artifact prev (str): The previous block id of the",
"timestamp, artifact_list, uri_list) elif action == \"AddArtifact\" or action ==",
"None: artifact = create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain,",
"ledger, which includes the data from the transaction, is returned",
"errors. Args: artifact_id (str): The uuid of the artifact action",
": artifact_list, \"uri_list\" : uri_list } def validate_transaction(artifact_id, action): \"\"\"",
"on the state storage. \"\"\" return { \"uuid\" : artifact_id,",
"state storage if len(state_entries) != 0: try: stored_artifact = json.loads(state_entries[0].data.decode())",
"returned to be stored on the state storage. Raises: InvalidTransaction:",
"the payload to be stored in the state storage. Args:",
"with the artifact (default []) uri_list (list of dict): The",
"# FUNCTIONS # ################################################################################ def apply(self, transaction, context): \"\"\" Applys",
"action): \"\"\" Performs soft sanity check in order to improve",
"or the action are not passed in or the action",
"stored_artifact_id = stored_artifact = None if action == \"create\" and",
"on non-existing uuid * If invalid operation was called InternalError:",
"transaction pertaining the payload context (State): The current state of",
"== \"create\" and stored_artifact_id is not None: raise InvalidTransaction(\"Invalid Action-artifact",
"the License for the specific language governing permissions and #",
"[msg] LOGGER.debug(\"+\" + (length + 2) * \"-\" + \"+\")",
"the transaction family artifact_id (str): The uuid of the artifact",
"namespace prefix. \"\"\" return namespace_prefix + \\ hashlib.sha512(artifact_id.encode(\"utf-8\")).hexdigest()[:64] def _display(msg):",
"Apache License, Version 2.0 (the \"License\"); # you may not",
"Artifact Attributes: namespace_prefix (str): The namespace prefix of the transaction",
"import hashlib import logging import json from collections import OrderedDict",
"Applys the payload from transaction onto the state storage. Args:",
"in the state storage; or, used as a key to",
"either express or implied. # See the License for the",
"for the state storage if len(state_entries) != 0: try: stored_artifact",
"stored_artifact_id is not None: raise InvalidTransaction(\"Invalid Action-artifact already exists.\") elif",
"if not action: raise InvalidTransaction(\"Action is required\") if action not",
"artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp) elif action",
"artifact_label, artifact_openchain, prev, cur, timestamp, artifact_list=[], uri_list=[]): \"\"\" Constructs the",
"uuid of the artifact action (str): The command to be",
"(str): The prefix associating with the transaction family artifact_id (str):",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"passed in or the action is not a valid action.",
"TransactionHandler LOGGER = logging.getLogger(__name__) ################################################################################ # HANDLER OBJ # ################################################################################",
"Returns: type: str The address-to-be, which associates the uuid and",
"artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp) elif",
"the artifact artifact_checksum (str): The checksum of the artifact artifact_label",
"[]) Returns: type: dict The dictionary pertaining all the param",
"transaction family \"\"\" self._namespace_prefix = namespace_prefix @property def family_name(self): \"\"\"",
"of dict): The list of the artifact uuid associated with",
"logged into the debug logger \"\"\" n = msg.count(\"\\n\") if",
"from sawtooth_sdk.processor.exceptions import InternalError from sawtooth_sdk.processor.handler import TransactionHandler LOGGER =",
"If invalid operation was called InternalError: * If deserialization of",
"None: raise InvalidTransaction(\"Invalid Action-artifact already exists.\") elif action == \"create\":",
"################################################################################ class ArtifactTransactionHandler: \"\"\" Class for handling the Transaction Family",
"artifact_id): \"\"\" Creates an artifact address which will be used",
"key to store the new data into the state storage.",
"of the transaction family \"\"\" self._namespace_prefix = namespace_prefix @property def",
"\"AddURI\", \"amend\"): raise InvalidTransaction(\"Invalid action: {}\".format(action)) def make_artifact_address(namespace_prefix, artifact_id): \"\"\"",
"the uuid and the namespace prefix. \"\"\" return namespace_prefix +",
"state storage. Raises: InvalidTransaction: * If deserialization for payload from",
"cur = payload[\"cur_block\"] timestamp = payload[\"timestamp\"] artifact_list = payload[\"artifact_list\"] uri_list",
"the License. # ------------------------------------------------------------------------------ ################################################################################ # LIBRARIES & DEPENDENCIES #",
"payload[\"openchain\"] action = payload[\"action\"] prev = payload[\"prev_block\"] cur = payload[\"cur_block\"]",
"submitted artifact_list (list of dict): The list of the artifact",
"logging.getLogger(__name__) ################################################################################ # HANDLER OBJ # ################################################################################ class ArtifactTransactionHandler: \"\"\"",
"\"\"\" def __init__(self, namespace_prefix): \"\"\" Constructs the ArtifactTransactionHandler object. Args:",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"LOGGER = logging.getLogger(__name__) ################################################################################ # HANDLER OBJ # ################################################################################ class",
"artifact uuid associated with the artifact (default []) uri_list (list",
"(list of dict): The list of the uri associated with",
"------------------------------------------------------------------------------ ################################################################################ # LIBRARIES & DEPENDENCIES # ################################################################################ import hashlib",
"type: dict The dictionary pertaining all the param is created",
"transaction onto the state storage. Args: transaction (Transaction): The transaction",
"pertaining the payload context (State): The current state of the",
"of the ledger, which includes the data from the transaction,",
"in order to improve runtime by eliminating the obvious exception",
"\"\"\" Logs the message to the debug logger. Args: msg",
"in msg) else: length = len(msg) msg = [msg] LOGGER.debug(\"+\"",
"payload to be stored in the state storage. Args: artifact_uuid",
"LIBRARIES & DEPENDENCIES # ################################################################################ import hashlib import logging import",
"cur, timestamp, artifact_list, uri_list) elif action == \"AddArtifact\" or action",
"= payload[\"action\"] prev = payload[\"prev_block\"] cur = payload[\"cur_block\"] timestamp =",
"return { \"uuid\" : artifact_id, \"alias\" : artifact_alias, \"name\" :",
"= payload[\"artifact_list\"] uri_list = payload[\"uri_list\"] except ValueError: raise InvalidTransaction(\"Invalid payload",
"\"\"\" Applys the payload from transaction onto the state storage.",
"# ################################################################################ class ArtifactTransactionHandler: \"\"\" Class for handling the Transaction",
"== \"AddURI\": if stored_artifact_id is None: raise InvalidTransaction( \"Invalid Action-requires",
"the param is created and returned to be stored on",
"artifact_openchain, prev, cur, timestamp) elif action == \"amend\" and stored_artifact_id",
"handling the Transaction Family : Artifact Attributes: namespace_prefix (str): The",
"= payload[\"content_type\"] artifact_checksum = payload[\"checksum\"] artifact_label = payload[\"label\"] artifact_openchain =",
"of the transaction timestamp (str): The UTC time for when",
"storage. Args: transaction (Transaction): The transaction pertaining the payload context",
"to improve runtime by eliminating the obvious exception errors. Args:",
"obvious exception errors. Args: artifact_id (str): The uuid of the",
"* \"-\" + \"+\") for line in msg: LOGGER.debug(\"+ \"",
"# Copyright 2016 Intel Corporation # Copyright 2017 Wind River",
"the state storage. Args: transaction (Transaction): The transaction pertaining the",
"in (\"AddArtifact\", \"create\", \"AddURI\", \"amend\"): raise InvalidTransaction(\"Invalid action: {}\".format(action)) def",
"\"License\"); # you may not use this file except in",
"stored in the state storage. Args: artifact_uuid (str): The uuid",
"scheme used for the data for the handler object. \"\"\"",
"artifact_checksum (str): The checksum of the artifact artifact_label (str): The",
"the family version of the handler object. \"\"\" return [\"1.0\"]",
"to be stored on the state storage. \"\"\" return {",
"def validate_transaction(artifact_id, action): \"\"\" Performs soft sanity check in order",
"The command to be performed Raises: InvalidTransaction: If the uuid",
"associated UUID if the artifact already exists in the state",
"+ (length + 2) * \"-\" + \"+\") ################################################################################ #",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"action == \"create\": artifact = create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type, artifact_checksum,",
"except ValueError: raise InvalidTransaction(\"Invalid payload serialization\") # Soft sanity check",
"to be stored on the state storage. Raises: InvalidTransaction: *",
"returned to be stored on the state storage. \"\"\" return",
"(default []) Returns: type: dict The dictionary pertaining all the",
"data from the transaction, is returned to be stored on",
"action) data_address = make_artifact_address(self._namespace_prefix, artifact_id) state_entries = context.get_state([data_address]) # Hard",
"# distributed under the License is distributed on an \"AS",
"the data for the handler object. \"\"\" return [\"csv-utf8\"] @property",
"= json.dumps(artifact).encode() addresses = context.set_state({data_address:data}) return addresses ################################################################################ # HELPER",
"the transaction timestamp (str): The UTC time for when the",
"improve runtime by eliminating the obvious exception errors. Args: artifact_id",
"if not artifact_id: raise InvalidTransaction(\"Artifact ID is required\") if not",
"None: raise InvalidTransaction( \"Invalid Action-requires an existing artifact.\" ) artifact",
"# Unless required by applicable law or agreed to in",
"artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp, artifact_list=[],",
"with the artifact (default []) Returns: type: dict The dictionary",
"stored_artifact = None if action == \"create\" and stored_artifact_id is",
"an artifact address which will be used to recover the",
"if the artifact already exists in the state storage; or,",
"DEPENDENCIES # ################################################################################ import hashlib import logging import json from",
"artifact_alias, artifact_name, artifact_type, artifact_checksum, artifact_label, artifact_openchain, prev, cur, timestamp, artifact_list,",
"operation was called InternalError: * If deserialization of State.data failed",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"################################################################################ # HANDLER OBJ # ################################################################################ class ArtifactTransactionHandler: \"\"\" Class",
"transaction payload try: payload = json.loads(transaction.payload.decode()) artifact_id = payload[\"uuid\"] artifact_alias",
"from transaction onto the state storage. Args: transaction (Transaction): The",
"def family_versions(self): \"\"\" type: list of str Returns the family",
"\"create\" and stored_artifact_id is not None: raise InvalidTransaction(\"Invalid Action-artifact already",
"of the artifact artifact_alias (str): The alias of the artifact",
"timestamp (str): The UTC time for when the transaction was",
"the uuid or the action are not passed in or",
"the artifact Returns: type: str The address-to-be, which associates the",
"and loading required data validate_transaction(artifact_id, action) data_address = make_artifact_address(self._namespace_prefix, artifact_id)",
"artifact_label, artifact_openchain, prev, cur, timestamp, artifact_list, uri_list) # Adding the",
"uri associated with the artifact (default []) Returns: type: dict",
"You may obtain a copy of the License at #",
"the ledger Returns: type: State The new state of the",
"uuid associated with the artifact (default []) uri_list (list of",
"list of str Returns the namespaces associating with the handler",
"current block id of the transaction timestamp (str): The UTC",
"if action == \"create\" and stored_artifact_id is not None: raise",
"performed Raises: InvalidTransaction: If the uuid or the action are",
"\"\"\" if not artifact_id: raise InvalidTransaction(\"Artifact ID is required\") if",
"cur (str): the current block id of the transaction timestamp",
"The namepsace prefix of the transaction family \"\"\" self._namespace_prefix =",
"be performed Raises: InvalidTransaction: If the uuid or the action",
"from transaction failed * If \"create\" was called on non-unique",
"# ------------------------------------------------------------------------------ ################################################################################ # LIBRARIES & DEPENDENCIES # ################################################################################ import",
"\"timestamp\" : timestamp, \"artifact_list\" : artifact_list, \"uri_list\" : uri_list }",
": artifact_label, \"openchain\" : artifact_openchain, \"prev_block\" : prev, \"cur_block\" :",
"+ \" +\") LOGGER.debug(\"+\" + (length + 2) * \"-\"",
"the Apache License, Version 2.0 (the \"License\"); # you may",
"sawtooth_sdk.processor.exceptions import InvalidTransaction from sawtooth_sdk.processor.exceptions import InternalError from sawtooth_sdk.processor.handler import",
"family \"\"\" def __init__(self, namespace_prefix): \"\"\" Constructs the ArtifactTransactionHandler object.",
"artifact_id: raise InvalidTransaction(\"Artifact ID is required\") if not action: raise",
"namespace_prefix (str): The namespace prefix of the transaction family \"\"\"",
"InvalidTransaction(\"Invalid Action-artifact already exists.\") elif action == \"create\": artifact =",
"hashlib.sha512(artifact_id.encode(\"utf-8\")).hexdigest()[:64] def _display(msg): \"\"\" Logs the message to the debug",
"state storage. Args: transaction (Transaction): The transaction pertaining the payload",
"message that is to be logged into the debug logger"
] |
[
"with: scrapy runspider ReviewsCollector.py \"\"\" import scrapy import json class",
"self.data: if (item['url'] is not None): yield scrapy.Request(url=item['url'], headers={'Referer':'http://www.google.com/'}, callback=self.parse)",
"json class ReviewsCollector(scrapy.Spider): def start_requests(self): with open(\"data/books.json\") as f: self.data",
"drive Start with: scrapy runspider ReviewsCollector.py \"\"\" import scrapy import",
"of urls in the json files and downloads the html",
"Start with: scrapy runspider ReviewsCollector.py \"\"\" import scrapy import json",
"self.data = json.load(f) for item in self.data: if (item['url'] is",
"runspider ReviewsCollector.py \"\"\" import scrapy import json class ReviewsCollector(scrapy.Spider): def",
"files to local drive Start with: scrapy runspider ReviewsCollector.py \"\"\"",
"f: self.data = json.load(f) for item in self.data: if (item['url']",
"callback=self.parse) def parse(self, response): filename = response.url.split(\"/\")[-1] + '.html' with",
"and downloads the html files to local drive Start with:",
"as f: self.data = json.load(f) for item in self.data: if",
"the list of urls in the json files and downloads",
"Takes the list of urls in the json files and",
"filename = response.url.split(\"/\")[-1] + '.html' with open('data/reviews/' + filename, 'wb+')",
"in the json files and downloads the html files to",
"= response.url.split(\"/\")[-1] + '.html' with open('data/reviews/' + filename, 'wb+') as",
"STEP 2 Takes the list of urls in the json",
"def start_requests(self): with open(\"data/books.json\") as f: self.data = json.load(f) for",
"html files to local drive Start with: scrapy runspider ReviewsCollector.py",
"response): filename = response.url.split(\"/\")[-1] + '.html' with open('data/reviews/' + filename,",
"to local drive Start with: scrapy runspider ReviewsCollector.py \"\"\" import",
"python3 # -*- coding: utf-8 -*- \"\"\" STEP 2 Takes",
"-*- \"\"\" STEP 2 Takes the list of urls in",
"downloads the html files to local drive Start with: scrapy",
"item in self.data: if (item['url'] is not None): yield scrapy.Request(url=item['url'],",
"import json class ReviewsCollector(scrapy.Spider): def start_requests(self): with open(\"data/books.json\") as f:",
"ReviewsCollector.py \"\"\" import scrapy import json class ReviewsCollector(scrapy.Spider): def start_requests(self):",
"local drive Start with: scrapy runspider ReviewsCollector.py \"\"\" import scrapy",
"(item['url'] is not None): yield scrapy.Request(url=item['url'], headers={'Referer':'http://www.google.com/'}, callback=self.parse) def parse(self,",
"is not None): yield scrapy.Request(url=item['url'], headers={'Referer':'http://www.google.com/'}, callback=self.parse) def parse(self, response):",
"parse(self, response): filename = response.url.split(\"/\")[-1] + '.html' with open('data/reviews/' +",
"response.url.split(\"/\")[-1] + '.html' with open('data/reviews/' + filename, 'wb+') as f:",
"-*- coding: utf-8 -*- \"\"\" STEP 2 Takes the list",
"scrapy runspider ReviewsCollector.py \"\"\" import scrapy import json class ReviewsCollector(scrapy.Spider):",
"json files and downloads the html files to local drive",
"for item in self.data: if (item['url'] is not None): yield",
"# -*- coding: utf-8 -*- \"\"\" STEP 2 Takes the",
"yield scrapy.Request(url=item['url'], headers={'Referer':'http://www.google.com/'}, callback=self.parse) def parse(self, response): filename = response.url.split(\"/\")[-1]",
"def parse(self, response): filename = response.url.split(\"/\")[-1] + '.html' with open('data/reviews/'",
"the json files and downloads the html files to local",
"class ReviewsCollector(scrapy.Spider): def start_requests(self): with open(\"data/books.json\") as f: self.data =",
"json.load(f) for item in self.data: if (item['url'] is not None):",
"urls in the json files and downloads the html files",
"if (item['url'] is not None): yield scrapy.Request(url=item['url'], headers={'Referer':'http://www.google.com/'}, callback=self.parse) def",
"list of urls in the json files and downloads the",
"\"\"\" STEP 2 Takes the list of urls in the",
"+ '.html' with open('data/reviews/' + filename, 'wb+') as f: f.write(response.body)",
"scrapy import json class ReviewsCollector(scrapy.Spider): def start_requests(self): with open(\"data/books.json\") as",
"\"\"\" import scrapy import json class ReviewsCollector(scrapy.Spider): def start_requests(self): with",
"with open(\"data/books.json\") as f: self.data = json.load(f) for item in",
"scrapy.Request(url=item['url'], headers={'Referer':'http://www.google.com/'}, callback=self.parse) def parse(self, response): filename = response.url.split(\"/\")[-1] +",
"coding: utf-8 -*- \"\"\" STEP 2 Takes the list of",
"files and downloads the html files to local drive Start",
"open(\"data/books.json\") as f: self.data = json.load(f) for item in self.data:",
"2 Takes the list of urls in the json files",
"the html files to local drive Start with: scrapy runspider",
"utf-8 -*- \"\"\" STEP 2 Takes the list of urls",
"not None): yield scrapy.Request(url=item['url'], headers={'Referer':'http://www.google.com/'}, callback=self.parse) def parse(self, response): filename",
"import scrapy import json class ReviewsCollector(scrapy.Spider): def start_requests(self): with open(\"data/books.json\")",
"in self.data: if (item['url'] is not None): yield scrapy.Request(url=item['url'], headers={'Referer':'http://www.google.com/'},",
"None): yield scrapy.Request(url=item['url'], headers={'Referer':'http://www.google.com/'}, callback=self.parse) def parse(self, response): filename =",
"start_requests(self): with open(\"data/books.json\") as f: self.data = json.load(f) for item",
"#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" STEP 2",
"headers={'Referer':'http://www.google.com/'}, callback=self.parse) def parse(self, response): filename = response.url.split(\"/\")[-1] + '.html'",
"ReviewsCollector(scrapy.Spider): def start_requests(self): with open(\"data/books.json\") as f: self.data = json.load(f)",
"= json.load(f) for item in self.data: if (item['url'] is not"
] |
[
"of this light.\"\"\" raise NotImplementedError class LightDevice(ABC): @classmethod def __subclasshook__(cls,",
"\"\"\"Set the color of all the lights in the LightSystem.\"\"\"",
"color: Color): \"\"\"Set the color of all the lights in",
"and callable(subclass.discover_lights) and hasattr(subclass, 'set_color_all_lights') and callable(subclass.set_color_all_lights)) @abstractmethod def discover_lights(self):",
"this group.\"\"\" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time: int): \"\"\"Set",
"color: Color): \"\"\"Set the color of this light.\"\"\" raise NotImplementedError",
"from abc import ABC, abstractmethod from .color import Color class",
"'set_color_all_lights') and callable(subclass.set_color_all_lights)) @abstractmethod def discover_lights(self): \"\"\"Discover the lights and",
"'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass, 'set_color') and callable(subclass.set_color)) @abstractmethod def",
"and callable(subclass.set_transition_time) and hasattr(subclass, 'discover_lights') and callable(subclass.discover_lights) and hasattr(subclass, 'set_color_all_lights')",
"set_color(self, color: Color): \"\"\"Set the color of all the lights",
"the lights in this group.\"\"\" raise NotImplementedError @abstractmethod def set_transition_time(self,",
"import Color class LightSystem(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass,",
"class LightGroup(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass, 'turn_on') and",
"to transition.\"\"\" raise NotImplementedError @abstractmethod def set_color(self, color: Color): \"\"\"Set",
"hasattr(subclass, 'set_color_all_lights') and callable(subclass.set_color_all_lights)) @abstractmethod def discover_lights(self): \"\"\"Discover the lights",
"from .color import Color class LightSystem(ABC): @classmethod def __subclasshook__(cls, subclass):",
"and callable(subclass.turn_on) and hasattr(subclass, 'turn_off') and callable(subclass.turn_off) and hasattr(subclass, 'set_transition_time')",
"the color of this light.\"\"\" raise NotImplementedError class LightDevice(ABC): @classmethod",
"def turn_off(self): \"\"\"Turn off the lights in this group.\"\"\" raise",
"subclass): return (hasattr(subclass, 'turn_on') and callable(subclass.turn_on) and hasattr(subclass, 'turn_off') and",
"@abstractmethod def turn_on(self): \"\"\"Turn on this light.\"\"\" raise NotImplementedError @abstractmethod",
"ABC, abstractmethod from .color import Color class LightSystem(ABC): @classmethod def",
"on the lights in this group.\"\"\" raise NotImplementedError @abstractmethod def",
"@abstractmethod def turn_off(self): \"\"\"Turn off the lights in this group.\"\"\"",
"the color of all the lights in the LightSystem.\"\"\" raise",
"return (hasattr(subclass, 'turn_on') and callable(subclass.turn_on) and hasattr(subclass, 'turn_off') and callable(subclass.turn_off)",
"this light.\"\"\" raise NotImplementedError class LightDevice(ABC): @classmethod def __subclasshook__(cls, subclass):",
"abstractmethod from .color import Color class LightSystem(ABC): @classmethod def __subclasshook__(cls,",
"@abstractmethod def turn_off(self): \"\"\"Turn off the light.\"\"\" raise NotImplementedError @abstractmethod",
"def set_color(self, color: Color): \"\"\"Set the color of all the",
"def __subclasshook__(cls, subclass): return (hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass,",
"turn_off(self): \"\"\"Turn off the lights in this group.\"\"\" raise NotImplementedError",
"color of all the lights in the LightSystem.\"\"\" raise NotImplementedError",
"long it takes in milliseconds for colors to transition.\"\"\" raise",
"callable(subclass.set_color)) @abstractmethod def turn_on(self): \"\"\"Turn on this light.\"\"\" raise NotImplementedError",
"discover_lights(self): \"\"\"Discover the lights and groups in this LightSystem.\"\"\" raise",
"\"\"\"Turn on this light.\"\"\" raise NotImplementedError @abstractmethod def turn_off(self): \"\"\"Turn",
"and callable(subclass.set_color)) @abstractmethod def turn_on(self): \"\"\"Turn on the lights in",
"'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass, 'discover_lights') and callable(subclass.discover_lights) and hasattr(subclass,",
"raise NotImplementedError class LightGroup(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass,",
"all the lights in the LightSystem.\"\"\" raise NotImplementedError class LightGroup(ABC):",
"callable(subclass.turn_off) and hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass, 'set_color') and",
"hasattr(subclass, 'set_color') and callable(subclass.set_color)) @abstractmethod def turn_on(self): \"\"\"Turn on the",
"@classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass, 'turn_on') and callable(subclass.turn_on) and",
"callable(subclass.turn_on) and hasattr(subclass, 'turn_off') and callable(subclass.turn_off) and hasattr(subclass, 'set_transition_time') and",
"transition.\"\"\" raise NotImplementedError @abstractmethod def set_color(self, color: Color): \"\"\"Set the",
"turn_on(self): \"\"\"Turn on this light.\"\"\" raise NotImplementedError @abstractmethod def turn_off(self):",
"milliseconds for colors to transition.\"\"\" raise NotImplementedError @abstractmethod def set_color(self,",
"light.\"\"\" raise NotImplementedError @abstractmethod def turn_off(self): \"\"\"Turn off the light.\"\"\"",
"LightSystem(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time)",
"@abstractmethod def set_color(self, color: Color): \"\"\"Set the color of all",
"hasattr(subclass, 'set_color') and callable(subclass.set_color)) @abstractmethod def turn_on(self): \"\"\"Turn on this",
"lights and groups in this LightSystem.\"\"\" raise NotImplementedError @abstractmethod def",
"and hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass, 'set_color') and callable(subclass.set_color))",
"turn_on(self): \"\"\"Turn on the lights in this group.\"\"\" raise NotImplementedError",
"Color): \"\"\"Set the color of all the lights in the",
"and hasattr(subclass, 'turn_off') and callable(subclass.turn_off) and hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time)",
"takes in milliseconds for colors to transition.\"\"\" raise NotImplementedError @abstractmethod",
"light.\"\"\" raise NotImplementedError class LightDevice(ABC): @classmethod def __subclasshook__(cls, subclass): return",
"<gh_stars>10-100 from abc import ABC, abstractmethod from .color import Color",
"group.\"\"\" raise NotImplementedError @abstractmethod def turn_off(self): \"\"\"Turn off the lights",
"class LightDevice(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass, 'turn_on') and",
"raise NotImplementedError @abstractmethod def set_color(self, color: Color): \"\"\"Set the color",
"Color): \"\"\"Set the color of this light.\"\"\" raise NotImplementedError class",
"int): \"\"\"Set how long it takes in milliseconds for colors",
"\"\"\"Turn on the lights in this group.\"\"\" raise NotImplementedError @abstractmethod",
"raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time: int): \"\"\"Set how long",
"callable(subclass.set_transition_time) and hasattr(subclass, 'set_color') and callable(subclass.set_color)) @abstractmethod def turn_on(self): \"\"\"Turn",
"and callable(subclass.set_transition_time) and hasattr(subclass, 'set_color') and callable(subclass.set_color)) @abstractmethod def turn_on(self):",
"lights in the LightSystem.\"\"\" raise NotImplementedError class LightGroup(ABC): @classmethod def",
"this group.\"\"\" raise NotImplementedError @abstractmethod def turn_off(self): \"\"\"Turn off the",
"def set_color(self, color: Color): \"\"\"Set the color of this light.\"\"\"",
"NotImplementedError class LightDevice(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass, 'turn_on')",
"callable(subclass.set_transition_time) and hasattr(subclass, 'discover_lights') and callable(subclass.discover_lights) and hasattr(subclass, 'set_color_all_lights') and",
"set_transition_time(self, transition_time: int): \"\"\"Set how long it takes in milliseconds",
"for colors to transition.\"\"\" raise NotImplementedError @abstractmethod def set_color(self, color:",
"light.\"\"\" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time: int): \"\"\"Set how",
"(hasattr(subclass, 'turn_on') and callable(subclass.turn_on) and hasattr(subclass, 'turn_off') and callable(subclass.turn_off) and",
"it takes in milliseconds for colors to transition.\"\"\" raise NotImplementedError",
"\"\"\"Set the color of this light.\"\"\" raise NotImplementedError class LightDevice(ABC):",
"this light.\"\"\" raise NotImplementedError @abstractmethod def turn_off(self): \"\"\"Turn off the",
"LightDevice(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass, 'turn_on') and callable(subclass.turn_on)",
"raise NotImplementedError @abstractmethod def turn_off(self): \"\"\"Turn off the lights in",
"groups in this LightSystem.\"\"\" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time:",
"@abstractmethod def discover_lights(self): \"\"\"Discover the lights and groups in this",
"'turn_on') and callable(subclass.turn_on) and hasattr(subclass, 'turn_off') and callable(subclass.turn_off) and hasattr(subclass,",
"@abstractmethod def turn_on(self): \"\"\"Turn on the lights in this group.\"\"\"",
"LightGroup(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass, 'turn_on') and callable(subclass.turn_on)",
"__subclasshook__(cls, subclass): return (hasattr(subclass, 'turn_on') and callable(subclass.turn_on) and hasattr(subclass, 'turn_off')",
"callable(subclass.discover_lights) and hasattr(subclass, 'set_color_all_lights') and callable(subclass.set_color_all_lights)) @abstractmethod def discover_lights(self): \"\"\"Discover",
"class LightSystem(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass, 'set_transition_time') and",
"of all the lights in the LightSystem.\"\"\" raise NotImplementedError class",
"@abstractmethod def set_transition_time(self, transition_time: int): \"\"\"Set how long it takes",
"group.\"\"\" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time: int): \"\"\"Set how",
"and callable(subclass.turn_off) and hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass, 'set_color')",
"'discover_lights') and callable(subclass.discover_lights) and hasattr(subclass, 'set_color_all_lights') and callable(subclass.set_color_all_lights)) @abstractmethod def",
"'turn_off') and callable(subclass.turn_off) and hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass,",
"transition_time: int): \"\"\"Set how long it takes in milliseconds for",
"callable(subclass.set_color_all_lights)) @abstractmethod def discover_lights(self): \"\"\"Discover the lights and groups in",
"the light.\"\"\" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time: int): \"\"\"Set",
"in this LightSystem.\"\"\" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time: int):",
"turn_off(self): \"\"\"Turn off the light.\"\"\" raise NotImplementedError @abstractmethod def set_transition_time(self,",
"how long it takes in milliseconds for colors to transition.\"\"\"",
"the LightSystem.\"\"\" raise NotImplementedError class LightGroup(ABC): @classmethod def __subclasshook__(cls, subclass):",
"'set_color') and callable(subclass.set_color)) @abstractmethod def turn_on(self): \"\"\"Turn on the lights",
"on this light.\"\"\" raise NotImplementedError @abstractmethod def turn_off(self): \"\"\"Turn off",
"the lights in this group.\"\"\" raise NotImplementedError @abstractmethod def turn_off(self):",
"def turn_off(self): \"\"\"Turn off the light.\"\"\" raise NotImplementedError @abstractmethod def",
"lights in this group.\"\"\" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time:",
"@classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and",
"def set_transition_time(self, transition_time: int): \"\"\"Set how long it takes in",
"def __subclasshook__(cls, subclass): return (hasattr(subclass, 'turn_on') and callable(subclass.turn_on) and hasattr(subclass,",
"the lights in the LightSystem.\"\"\" raise NotImplementedError class LightGroup(ABC): @classmethod",
"raise NotImplementedError class LightDevice(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass,",
"in this group.\"\"\" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time: int):",
"__subclasshook__(cls, subclass): return (hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass, 'discover_lights')",
"'set_color') and callable(subclass.set_color)) @abstractmethod def turn_on(self): \"\"\"Turn on this light.\"\"\"",
"\"\"\"Set how long it takes in milliseconds for colors to",
"subclass): return (hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass, 'discover_lights') and",
"def discover_lights(self): \"\"\"Discover the lights and groups in this LightSystem.\"\"\"",
"in milliseconds for colors to transition.\"\"\" raise NotImplementedError @abstractmethod def",
"and groups in this LightSystem.\"\"\" raise NotImplementedError @abstractmethod def set_transition_time(self,",
"and hasattr(subclass, 'set_color_all_lights') and callable(subclass.set_color_all_lights)) @abstractmethod def discover_lights(self): \"\"\"Discover the",
"the lights and groups in this LightSystem.\"\"\" raise NotImplementedError @abstractmethod",
"NotImplementedError @abstractmethod def set_transition_time(self, transition_time: int): \"\"\"Set how long it",
"this LightSystem.\"\"\" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time: int): \"\"\"Set",
"off the light.\"\"\" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time: int):",
"and callable(subclass.set_color)) @abstractmethod def turn_on(self): \"\"\"Turn on this light.\"\"\" raise",
"return (hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass, 'discover_lights') and callable(subclass.discover_lights)",
"(hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass, 'discover_lights') and callable(subclass.discover_lights) and",
".color import Color class LightSystem(ABC): @classmethod def __subclasshook__(cls, subclass): return",
"NotImplementedError class LightGroup(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass, 'turn_on')",
"LightSystem.\"\"\" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time: int): \"\"\"Set how",
"colors to transition.\"\"\" raise NotImplementedError @abstractmethod def set_color(self, color: Color):",
"off the lights in this group.\"\"\" raise NotImplementedError @abstractmethod def",
"and hasattr(subclass, 'discover_lights') and callable(subclass.discover_lights) and hasattr(subclass, 'set_color_all_lights') and callable(subclass.set_color_all_lights))",
"hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass, 'set_color') and callable(subclass.set_color)) @abstractmethod",
"@abstractmethod def set_color(self, color: Color): \"\"\"Set the color of this",
"NotImplementedError @abstractmethod def turn_off(self): \"\"\"Turn off the lights in this",
"\"\"\"Turn off the light.\"\"\" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time:",
"raise NotImplementedError @abstractmethod def turn_off(self): \"\"\"Turn off the light.\"\"\" raise",
"NotImplementedError @abstractmethod def set_color(self, color: Color): \"\"\"Set the color of",
"Color class LightSystem(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass, 'set_transition_time')",
"NotImplementedError @abstractmethod def turn_off(self): \"\"\"Turn off the light.\"\"\" raise NotImplementedError",
"in this group.\"\"\" raise NotImplementedError @abstractmethod def turn_off(self): \"\"\"Turn off",
"\"\"\"Turn off the lights in this group.\"\"\" raise NotImplementedError @abstractmethod",
"callable(subclass.set_color)) @abstractmethod def turn_on(self): \"\"\"Turn on the lights in this",
"set_color(self, color: Color): \"\"\"Set the color of this light.\"\"\" raise",
"and callable(subclass.set_color_all_lights)) @abstractmethod def discover_lights(self): \"\"\"Discover the lights and groups",
"hasattr(subclass, 'turn_off') and callable(subclass.turn_off) and hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and",
"lights in this group.\"\"\" raise NotImplementedError @abstractmethod def turn_off(self): \"\"\"Turn",
"in the LightSystem.\"\"\" raise NotImplementedError class LightGroup(ABC): @classmethod def __subclasshook__(cls,",
"color of this light.\"\"\" raise NotImplementedError class LightDevice(ABC): @classmethod def",
"abc import ABC, abstractmethod from .color import Color class LightSystem(ABC):",
"hasattr(subclass, 'discover_lights') and callable(subclass.discover_lights) and hasattr(subclass, 'set_color_all_lights') and callable(subclass.set_color_all_lights)) @abstractmethod",
"def turn_on(self): \"\"\"Turn on the lights in this group.\"\"\" raise",
"LightSystem.\"\"\" raise NotImplementedError class LightGroup(ABC): @classmethod def __subclasshook__(cls, subclass): return",
"and hasattr(subclass, 'set_color') and callable(subclass.set_color)) @abstractmethod def turn_on(self): \"\"\"Turn on",
"def turn_on(self): \"\"\"Turn on this light.\"\"\" raise NotImplementedError @abstractmethod def",
"\"\"\"Discover the lights and groups in this LightSystem.\"\"\" raise NotImplementedError",
"import ABC, abstractmethod from .color import Color class LightSystem(ABC): @classmethod"
] |
[
"self.EYES def Apply_eyes(self): self.eyes = self.EYES Daughter = Family('Ashley', 'Smith',",
"Kids(Family): pass #Eyes are marked as Grey because they are",
"self.eyes = self.EYES Daughter = Family('Ashley', 'Smith', 'Brown') Son =",
"now # hair colors are brown because brown is the",
"first self.last = last self.hair = hair def fullname(self): return",
"= last self.hair = hair def fullname(self): return '{} {}'.format(self.first,self.last)",
"hair): self.first = first self.last = last self.hair = hair",
"temp = random.choice([1,2]) #using the punnet square in genetics we",
"or blue if (temp == 1): self.EYES = (\"Brown\") else:",
"#that the childs eyes will be brown or blue if",
"Grey because they are unknown for now # hair colors",
"color hair and eyes # they may randomly get through",
"hair def fullname(self): return '{} {}'.format(self.first,self.last) def eyefind(self): temp =",
"= Family('Kevin', 'Smith', 'Brown') print(Daughter.eyes) print(Son.eyes) #with the kids being",
"donor #with blue eyes and one with brown makes it",
"brown makes it 50/50 odds #that the childs eyes will",
"hair and eyes # they may randomly get through inheritance",
"1): self.EYES = (\"Brown\") else: self.EYES = (\"Blue\") return self.EYES",
"colors are brown because brown is the dominant hair color",
"'Brown' ) Son = Kids('Kevin','Smith','Brown') print(Daughter.eyes) print(Son.eyes) Daughter.Apply_eyes() Son.Apply_eyes() print(Daughter.eyes)",
"= Kids('Danielle', 'Smith', 'Brown' ) Son = Kids('Kevin','Smith','Brown') print(Daughter.eyes) print(Son.eyes)",
"= first self.last = last self.hair = hair def fullname(self):",
"def fullname(self): return '{} {}'.format(self.first,self.last) def eyefind(self): temp = random.choice([1,2])",
"odds #that the childs eyes will be brown or blue",
"Daughter = Kids('Danielle', 'Smith', 'Brown' ) Son = Kids('Kevin','Smith','Brown') print(Daughter.eyes)",
"will define what color hair and eyes # they may",
"self.EYES Daughter = Family('Ashley', 'Smith', 'Brown') Son = Family('Kevin', 'Smith',",
"and eyes # they may randomly get through inheritance class",
"we know thatt a donor #with blue eyes and one",
"#with blue eyes and one with brown makes it 50/50",
"# hair colors are brown because brown is the dominant",
"self.first = first self.last = last self.hair = hair def",
"def Apply_eyes(self): self.eyes = self.EYES Daughter = Family('Ashley', 'Smith', 'Brown')",
"brown because brown is the dominant hair color Daughter =",
"be brown or blue if (temp == 1): self.EYES =",
"one with brown makes it 50/50 odds #that the childs",
"the childs eyes will be brown or blue if (temp",
"makes it 50/50 odds #that the childs eyes will be",
"'Smith', 'Brown') print(Daughter.eyes) print(Son.eyes) #with the kids being born it",
"self.EYES = (\"Brown\") else: self.EYES = (\"Blue\") return self.EYES def",
"= Family('Ashley', 'Smith', 'Brown') Son = Family('Kevin', 'Smith', 'Brown') print(Daughter.eyes)",
"print(Son.eyes) #with the kids being born it will define what",
"fullname(self): return '{} {}'.format(self.first,self.last) def eyefind(self): temp = random.choice([1,2]) #using",
"<reponame>cadeng23/oop-cjgustafson import random class Family: def __init__(self,first, last, hair): self.first",
"= self.EYES Daughter = Family('Ashley', 'Smith', 'Brown') Son = Family('Kevin',",
"{}'.format(self.first,self.last) def eyefind(self): temp = random.choice([1,2]) #using the punnet square",
"the kids being born it will define what color hair",
"childs eyes will be brown or blue if (temp ==",
"born it will define what color hair and eyes #",
"being born it will define what color hair and eyes",
"color Daughter = Kids('Danielle', 'Smith', 'Brown' ) Son = Kids('Kevin','Smith','Brown')",
"last self.hair = hair def fullname(self): return '{} {}'.format(self.first,self.last) def",
"Son = Family('Kevin', 'Smith', 'Brown') print(Daughter.eyes) print(Son.eyes) #with the kids",
"what color hair and eyes # they may randomly get",
"randomly get through inheritance class Kids(Family): pass #Eyes are marked",
"def eyefind(self): temp = random.choice([1,2]) #using the punnet square in",
"def __init__(self,first, last, hair): self.first = first self.last = last",
"= hair def fullname(self): return '{} {}'.format(self.first,self.last) def eyefind(self): temp",
") Son = Kids('Kevin','Smith','Brown') print(Daughter.eyes) print(Son.eyes) Daughter.Apply_eyes() Son.Apply_eyes() print(Daughter.eyes) print(Son.eyes)",
"dominant hair color Daughter = Kids('Danielle', 'Smith', 'Brown' ) Son",
"last, hair): self.first = first self.last = last self.hair =",
"#Eyes are marked as Grey because they are unknown for",
"eyes will be brown or blue if (temp == 1):",
"as Grey because they are unknown for now # hair",
"return '{} {}'.format(self.first,self.last) def eyefind(self): temp = random.choice([1,2]) #using the",
"= (\"Brown\") else: self.EYES = (\"Blue\") return self.EYES def Apply_eyes(self):",
"are marked as Grey because they are unknown for now",
"because brown is the dominant hair color Daughter = Kids('Danielle',",
"Family('Ashley', 'Smith', 'Brown') Son = Family('Kevin', 'Smith', 'Brown') print(Daughter.eyes) print(Son.eyes)",
"= random.choice([1,2]) #using the punnet square in genetics we know",
"== 1): self.EYES = (\"Brown\") else: self.EYES = (\"Blue\") return",
"print(Daughter.eyes) print(Son.eyes) #with the kids being born it will define",
"Family('Kevin', 'Smith', 'Brown') print(Daughter.eyes) print(Son.eyes) #with the kids being born",
"#with the kids being born it will define what color",
"are unknown for now # hair colors are brown because",
"will be brown or blue if (temp == 1): self.EYES",
"'Brown') Son = Family('Kevin', 'Smith', 'Brown') print(Daughter.eyes) print(Son.eyes) #with the",
"hair color Daughter = Kids('Danielle', 'Smith', 'Brown' ) Son =",
"the dominant hair color Daughter = Kids('Danielle', 'Smith', 'Brown' )",
"Daughter = Family('Ashley', 'Smith', 'Brown') Son = Family('Kevin', 'Smith', 'Brown')",
"random class Family: def __init__(self,first, last, hair): self.first = first",
"(\"Blue\") return self.EYES def Apply_eyes(self): self.eyes = self.EYES Daughter =",
"it will define what color hair and eyes # they",
"marked as Grey because they are unknown for now #",
"eyes # they may randomly get through inheritance class Kids(Family):",
"may randomly get through inheritance class Kids(Family): pass #Eyes are",
"hair colors are brown because brown is the dominant hair",
"'{} {}'.format(self.first,self.last) def eyefind(self): temp = random.choice([1,2]) #using the punnet",
"kids being born it will define what color hair and",
"(\"Brown\") else: self.EYES = (\"Blue\") return self.EYES def Apply_eyes(self): self.eyes",
"class Family: def __init__(self,first, last, hair): self.first = first self.last",
"class Kids(Family): pass #Eyes are marked as Grey because they",
"if (temp == 1): self.EYES = (\"Brown\") else: self.EYES =",
"are brown because brown is the dominant hair color Daughter",
"genetics we know thatt a donor #with blue eyes and",
"#using the punnet square in genetics we know thatt a",
"punnet square in genetics we know thatt a donor #with",
"return self.EYES def Apply_eyes(self): self.eyes = self.EYES Daughter = Family('Ashley',",
"self.hair = hair def fullname(self): return '{} {}'.format(self.first,self.last) def eyefind(self):",
"because they are unknown for now # hair colors are",
"__init__(self,first, last, hair): self.first = first self.last = last self.hair",
"in genetics we know thatt a donor #with blue eyes",
"Apply_eyes(self): self.eyes = self.EYES Daughter = Family('Ashley', 'Smith', 'Brown') Son",
"self.last = last self.hair = hair def fullname(self): return '{}",
"(temp == 1): self.EYES = (\"Brown\") else: self.EYES = (\"Blue\")",
"random.choice([1,2]) #using the punnet square in genetics we know thatt",
"get through inheritance class Kids(Family): pass #Eyes are marked as",
"pass #Eyes are marked as Grey because they are unknown",
"50/50 odds #that the childs eyes will be brown or",
"they are unknown for now # hair colors are brown",
"'Smith', 'Brown') Son = Family('Kevin', 'Smith', 'Brown') print(Daughter.eyes) print(Son.eyes) #with",
"# they may randomly get through inheritance class Kids(Family): pass",
"unknown for now # hair colors are brown because brown",
"blue if (temp == 1): self.EYES = (\"Brown\") else: self.EYES",
"thatt a donor #with blue eyes and one with brown",
"is the dominant hair color Daughter = Kids('Danielle', 'Smith', 'Brown'",
"inheritance class Kids(Family): pass #Eyes are marked as Grey because",
"Kids('Danielle', 'Smith', 'Brown' ) Son = Kids('Kevin','Smith','Brown') print(Daughter.eyes) print(Son.eyes) Daughter.Apply_eyes()",
"blue eyes and one with brown makes it 50/50 odds",
"square in genetics we know thatt a donor #with blue",
"a donor #with blue eyes and one with brown makes",
"brown or blue if (temp == 1): self.EYES = (\"Brown\")",
"self.EYES = (\"Blue\") return self.EYES def Apply_eyes(self): self.eyes = self.EYES",
"with brown makes it 50/50 odds #that the childs eyes",
"and one with brown makes it 50/50 odds #that the",
"eyefind(self): temp = random.choice([1,2]) #using the punnet square in genetics",
"define what color hair and eyes # they may randomly",
"eyes and one with brown makes it 50/50 odds #that",
"it 50/50 odds #that the childs eyes will be brown",
"'Smith', 'Brown' ) Son = Kids('Kevin','Smith','Brown') print(Daughter.eyes) print(Son.eyes) Daughter.Apply_eyes() Son.Apply_eyes()",
"know thatt a donor #with blue eyes and one with",
"Family: def __init__(self,first, last, hair): self.first = first self.last =",
"= (\"Blue\") return self.EYES def Apply_eyes(self): self.eyes = self.EYES Daughter",
"through inheritance class Kids(Family): pass #Eyes are marked as Grey",
"import random class Family: def __init__(self,first, last, hair): self.first =",
"they may randomly get through inheritance class Kids(Family): pass #Eyes",
"for now # hair colors are brown because brown is",
"the punnet square in genetics we know thatt a donor",
"'Brown') print(Daughter.eyes) print(Son.eyes) #with the kids being born it will",
"brown is the dominant hair color Daughter = Kids('Danielle', 'Smith',",
"else: self.EYES = (\"Blue\") return self.EYES def Apply_eyes(self): self.eyes ="
] |
[
"JSON logging.getLogger(__name__).error( 'Unable to parse payload as JSON: %s', payload)",
"platform, please refer to the documentation at https://home-assistant.io/components/device_tracker.owntracks/ \"\"\" import",
"as JSON: %s', payload) return if not isinstance(data, dict) or",
"data.get('_type') != 'location': return parts = topic.split('/') kwargs = {",
"\"\"\" # Docs on available data: # http://owntracks.org/booklet/tech/json/#_typelocation try: data",
"ValueError: # If invalid JSON logging.getLogger(__name__).error( 'Unable to parse payload",
"return if not isinstance(data, dict) or data.get('_type') != 'location': return",
"parts[2]), 'host_name': parts[1], 'gps': (data['lat'], data['lon']), } if 'acc' in",
"for the device tracker. For more details about this platform,",
"setup_scanner(hass, config, see): \"\"\" Set up a OwnTracksks tracker. \"\"\"",
"invalid JSON logging.getLogger(__name__).error( 'Unable to parse payload as JSON: %s',",
"qos): \"\"\" MQTT message received. \"\"\" # Docs on available",
"import json import logging import homeassistant.components.mqtt as mqtt DEPENDENCIES =",
"'owntracks/+/+' def setup_scanner(hass, config, see): \"\"\" Set up a OwnTracksks",
"'host_name': parts[1], 'gps': (data['lat'], data['lon']), } if 'acc' in data:",
"For more details about this platform, please refer to the",
"payload, qos): \"\"\" MQTT message received. \"\"\" # Docs on",
"kwargs = { 'dev_id': '{}_{}'.format(parts[1], parts[2]), 'host_name': parts[1], 'gps': (data['lat'],",
"DEPENDENCIES = ['mqtt'] LOCATION_TOPIC = 'owntracks/+/+' def setup_scanner(hass, config, see):",
"= 'owntracks/+/+' def setup_scanner(hass, config, see): \"\"\" Set up a",
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OwnTracks platform for the device tracker. For more details",
"payload as JSON: %s', payload) return if not isinstance(data, dict)",
"def setup_scanner(hass, config, see): \"\"\" Set up a OwnTracksks tracker.",
"%s', payload) return if not isinstance(data, dict) or data.get('_type') !=",
"if 'batt' in data: kwargs['battery'] = data['batt'] see(**kwargs) mqtt.subscribe(hass, LOCATION_TOPIC,",
"= topic.split('/') kwargs = { 'dev_id': '{}_{}'.format(parts[1], parts[2]), 'host_name': parts[1],",
"in data: kwargs['battery'] = data['batt'] see(**kwargs) mqtt.subscribe(hass, LOCATION_TOPIC, owntracks_location_update, 1)",
"'location': return parts = topic.split('/') kwargs = { 'dev_id': '{}_{}'.format(parts[1],",
"JSON: %s', payload) return if not isinstance(data, dict) or data.get('_type')",
"message received. \"\"\" # Docs on available data: # http://owntracks.org/booklet/tech/json/#_typelocation",
"json import logging import homeassistant.components.mqtt as mqtt DEPENDENCIES = ['mqtt']",
"\"\"\" def owntracks_location_update(topic, payload, qos): \"\"\" MQTT message received. \"\"\"",
"= json.loads(payload) except ValueError: # If invalid JSON logging.getLogger(__name__).error( 'Unable",
"the documentation at https://home-assistant.io/components/device_tracker.owntracks/ \"\"\" import json import logging import",
"def owntracks_location_update(topic, payload, qos): \"\"\" MQTT message received. \"\"\" #",
"'dev_id': '{}_{}'.format(parts[1], parts[2]), 'host_name': parts[1], 'gps': (data['lat'], data['lon']), } if",
"details about this platform, please refer to the documentation at",
"data: kwargs['battery'] = data['batt'] see(**kwargs) mqtt.subscribe(hass, LOCATION_TOPIC, owntracks_location_update, 1) return",
"homeassistant.components.mqtt as mqtt DEPENDENCIES = ['mqtt'] LOCATION_TOPIC = 'owntracks/+/+' def",
"documentation at https://home-assistant.io/components/device_tracker.owntracks/ \"\"\" import json import logging import homeassistant.components.mqtt",
"not isinstance(data, dict) or data.get('_type') != 'location': return parts =",
"see): \"\"\" Set up a OwnTracksks tracker. \"\"\" def owntracks_location_update(topic,",
"# If invalid JSON logging.getLogger(__name__).error( 'Unable to parse payload as",
"MQTT message received. \"\"\" # Docs on available data: #",
"isinstance(data, dict) or data.get('_type') != 'location': return parts = topic.split('/')",
"https://home-assistant.io/components/device_tracker.owntracks/ \"\"\" import json import logging import homeassistant.components.mqtt as mqtt",
"dict) or data.get('_type') != 'location': return parts = topic.split('/') kwargs",
"json.loads(payload) except ValueError: # If invalid JSON logging.getLogger(__name__).error( 'Unable to",
"except ValueError: # If invalid JSON logging.getLogger(__name__).error( 'Unable to parse",
"data: # http://owntracks.org/booklet/tech/json/#_typelocation try: data = json.loads(payload) except ValueError: #",
"parts = topic.split('/') kwargs = { 'dev_id': '{}_{}'.format(parts[1], parts[2]), 'host_name':",
"the device tracker. For more details about this platform, please",
"or data.get('_type') != 'location': return parts = topic.split('/') kwargs =",
"on available data: # http://owntracks.org/booklet/tech/json/#_typelocation try: data = json.loads(payload) except",
"available data: # http://owntracks.org/booklet/tech/json/#_typelocation try: data = json.loads(payload) except ValueError:",
"!= 'location': return parts = topic.split('/') kwargs = { 'dev_id':",
"try: data = json.loads(payload) except ValueError: # If invalid JSON",
"\"\"\" Set up a OwnTracksks tracker. \"\"\" def owntracks_location_update(topic, payload,",
"topic.split('/') kwargs = { 'dev_id': '{}_{}'.format(parts[1], parts[2]), 'host_name': parts[1], 'gps':",
"kwargs['gps_accuracy'] = data['acc'] if 'batt' in data: kwargs['battery'] = data['batt']",
"this platform, please refer to the documentation at https://home-assistant.io/components/device_tracker.owntracks/ \"\"\"",
"up a OwnTracksks tracker. \"\"\" def owntracks_location_update(topic, payload, qos): \"\"\"",
"to parse payload as JSON: %s', payload) return if not",
"if 'acc' in data: kwargs['gps_accuracy'] = data['acc'] if 'batt' in",
"logging.getLogger(__name__).error( 'Unable to parse payload as JSON: %s', payload) return",
"{ 'dev_id': '{}_{}'.format(parts[1], parts[2]), 'host_name': parts[1], 'gps': (data['lat'], data['lon']), }",
"\"\"\" homeassistant.components.device_tracker.owntracks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OwnTracks platform for the device tracker. For",
"parts[1], 'gps': (data['lat'], data['lon']), } if 'acc' in data: kwargs['gps_accuracy']",
"= ['mqtt'] LOCATION_TOPIC = 'owntracks/+/+' def setup_scanner(hass, config, see): \"\"\"",
"received. \"\"\" # Docs on available data: # http://owntracks.org/booklet/tech/json/#_typelocation try:",
"# http://owntracks.org/booklet/tech/json/#_typelocation try: data = json.loads(payload) except ValueError: # If",
"import logging import homeassistant.components.mqtt as mqtt DEPENDENCIES = ['mqtt'] LOCATION_TOPIC",
"return parts = topic.split('/') kwargs = { 'dev_id': '{}_{}'.format(parts[1], parts[2]),",
"'batt' in data: kwargs['battery'] = data['batt'] see(**kwargs) mqtt.subscribe(hass, LOCATION_TOPIC, owntracks_location_update,",
"tracker. \"\"\" def owntracks_location_update(topic, payload, qos): \"\"\" MQTT message received.",
"parse payload as JSON: %s', payload) return if not isinstance(data,",
"refer to the documentation at https://home-assistant.io/components/device_tracker.owntracks/ \"\"\" import json import",
"'gps': (data['lat'], data['lon']), } if 'acc' in data: kwargs['gps_accuracy'] =",
"to the documentation at https://home-assistant.io/components/device_tracker.owntracks/ \"\"\" import json import logging",
"as mqtt DEPENDENCIES = ['mqtt'] LOCATION_TOPIC = 'owntracks/+/+' def setup_scanner(hass,",
"data['lon']), } if 'acc' in data: kwargs['gps_accuracy'] = data['acc'] if",
"kwargs['battery'] = data['batt'] see(**kwargs) mqtt.subscribe(hass, LOCATION_TOPIC, owntracks_location_update, 1) return True",
"data['acc'] if 'batt' in data: kwargs['battery'] = data['batt'] see(**kwargs) mqtt.subscribe(hass,",
"mqtt DEPENDENCIES = ['mqtt'] LOCATION_TOPIC = 'owntracks/+/+' def setup_scanner(hass, config,",
"OwnTracksks tracker. \"\"\" def owntracks_location_update(topic, payload, qos): \"\"\" MQTT message",
"['mqtt'] LOCATION_TOPIC = 'owntracks/+/+' def setup_scanner(hass, config, see): \"\"\" Set",
"more details about this platform, please refer to the documentation",
"in data: kwargs['gps_accuracy'] = data['acc'] if 'batt' in data: kwargs['battery']",
"'Unable to parse payload as JSON: %s', payload) return if",
"homeassistant.components.device_tracker.owntracks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ OwnTracks platform for the device tracker. For more",
"about this platform, please refer to the documentation at https://home-assistant.io/components/device_tracker.owntracks/",
"LOCATION_TOPIC = 'owntracks/+/+' def setup_scanner(hass, config, see): \"\"\" Set up",
"if not isinstance(data, dict) or data.get('_type') != 'location': return parts",
"= { 'dev_id': '{}_{}'.format(parts[1], parts[2]), 'host_name': parts[1], 'gps': (data['lat'], data['lon']),",
"logging import homeassistant.components.mqtt as mqtt DEPENDENCIES = ['mqtt'] LOCATION_TOPIC =",
"config, see): \"\"\" Set up a OwnTracksks tracker. \"\"\" def",
"a OwnTracksks tracker. \"\"\" def owntracks_location_update(topic, payload, qos): \"\"\" MQTT",
"(data['lat'], data['lon']), } if 'acc' in data: kwargs['gps_accuracy'] = data['acc']",
"Docs on available data: # http://owntracks.org/booklet/tech/json/#_typelocation try: data = json.loads(payload)",
"'{}_{}'.format(parts[1], parts[2]), 'host_name': parts[1], 'gps': (data['lat'], data['lon']), } if 'acc'",
"= data['acc'] if 'batt' in data: kwargs['battery'] = data['batt'] see(**kwargs)",
"# Docs on available data: # http://owntracks.org/booklet/tech/json/#_typelocation try: data =",
"OwnTracks platform for the device tracker. For more details about",
"owntracks_location_update(topic, payload, qos): \"\"\" MQTT message received. \"\"\" # Docs",
"payload) return if not isinstance(data, dict) or data.get('_type') != 'location':",
"please refer to the documentation at https://home-assistant.io/components/device_tracker.owntracks/ \"\"\" import json",
"device tracker. For more details about this platform, please refer",
"tracker. For more details about this platform, please refer to",
"'acc' in data: kwargs['gps_accuracy'] = data['acc'] if 'batt' in data:",
"\"\"\" MQTT message received. \"\"\" # Docs on available data:",
"data: kwargs['gps_accuracy'] = data['acc'] if 'batt' in data: kwargs['battery'] =",
"Set up a OwnTracksks tracker. \"\"\" def owntracks_location_update(topic, payload, qos):",
"} if 'acc' in data: kwargs['gps_accuracy'] = data['acc'] if 'batt'",
"\"\"\" import json import logging import homeassistant.components.mqtt as mqtt DEPENDENCIES",
"import homeassistant.components.mqtt as mqtt DEPENDENCIES = ['mqtt'] LOCATION_TOPIC = 'owntracks/+/+'",
"http://owntracks.org/booklet/tech/json/#_typelocation try: data = json.loads(payload) except ValueError: # If invalid",
"data = json.loads(payload) except ValueError: # If invalid JSON logging.getLogger(__name__).error(",
"platform for the device tracker. For more details about this",
"If invalid JSON logging.getLogger(__name__).error( 'Unable to parse payload as JSON:",
"at https://home-assistant.io/components/device_tracker.owntracks/ \"\"\" import json import logging import homeassistant.components.mqtt as"
] |
[
"attentive_span_extractor_dim = text_field_embedder.get_output_dim() self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim, combination=\"x,y\", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size) self._attentive_span_extractor",
"* (1 - pairwise_labels_indicator) self._coref_label_metric(torch.sum(pairwise_labels).item()) self._nil_label_metric(torch.sum(type_antecedent_labels[:, :, 0]).item()) self._type_label_metric(torch.sum(type_antecedent_labels[:, :,",
"procedure is `generic across the batch`. The reason this is",
"-> Dict[str, float]: mention_result = self._mention_f1_score.get_metric(reset) coref_precision, coref_recall, coref_f1 =",
"-1. predicted_antecedents -= 1 output_dict = {\"top_spans\": top_spans, \"antecedent_indices\": valid_antecedent_indices,",
"do # some comparisons based on span widths when we",
"retain with respect to the number of words in the",
"= 1.0, bce_pos_weight: float = None, local_window_size: int = 10,",
"torch.softmax(event_prob, -1) event_rep = torch.bmm(event_prob[:, :, 1:], event_embeddings) + event_prob[:,",
"num_spans, embedding_size) endpoint_span_embeddings = self._endpoint_span_extractor(text_embeddings, spans) span_embeddings_list += [endpoint_span_embeddings] span_embeddings",
"type_antecedent_labels = type_antecedent_labels * (1 - pairwise_labels_indicator) self._coref_label_metric(torch.sum(pairwise_labels).item()) self._nil_label_metric(torch.sum(type_antecedent_labels[:, :,",
"Returns ------- The same output dictionary, but with an additional",
"our variables in terms of num_spans_to_keep, we need to #",
"# top_spans. The spans are in document order, so we",
"multiplier between zero and one which controls what percentage of",
"valid_antecedent_indices).squeeze(-1) # Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size) candidate_antecedent_embeddings",
"scores. num_spans_to_keep_according_doc_len = int(math.floor(self._spans_per_word * document_length)) (top_embeddings, top_mask, top_indices, top_scores)",
"pruned_event_type_labels = torch.gather(event_type_labels, 1, top_indices) type_antecedent_labels = self._get_type_antecedent_labels(pruned_event_type_labels) # Find",
"embedding_size). antecedent_offsets : ``torch.IntTensor``, required. The offsets between each top",
"are none to select from. Similarly, each element can only",
"antecedent at the zeroth position, which represents the prediction that",
"# the number of spans we consider after the pruning",
"util.batched_index_select(coref_labels.unsqueeze(-1), top_indices, flat_top_span_indices) antecedent_labels = util.flattened_index_select(pruned_gold_labels, valid_antecedent_indices).squeeze(-1) antecedent_labels += valid_antecedent_log_mask.long()",
"EndpointSpanExtractor(endpoint_span_extractor_dim, combination=\"x,y\", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size) else: self._endpoint_span_extractor = None self._attentive_span_extractor =",
") if self._endpoint_span_extractor is not None: span_embedding_size = self._attentive_span_extractor.get_output_dim() +",
"num_spans_to_keep, event_type_size + max_antecedents) candidate_antecedent_mention_scores = torch.cat([event_type_prior_scores, candidate_antecedent_mention_scores], -1) #",
"coreference_log_probs + gold_antecedent_labels.log() negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum() coref_loss = negative_marginal_log_likelihood *",
"max_span_width self._spans_per_word = spans_per_word self._max_antecedents = max_antecedents self._mention_f1_score = TopSpanMentionTypeF1()",
"is a valid objective for # clustering as we don't",
"it's just a function of the span's position in #",
"mentions. # Shape: (batch_size, num_spans_to_keep, 2) top_spans = util.batched_index_select(spans, top_indices,",
"\"\"\" return node_decode(output_dict, self.vocab, decoding_algorithm=self._decoding, positive_label_size=self._positive_label_size, type_threshold=self._type_threshold) @overrides def get_metrics(self,",
"-self._local_window_size) new_contextualized_embeddings = self._attention_layer(raw_contextualized_embeddings, new_attention_mask) return new_contextualized_embeddings @overrides def forward(self,",
"clusters : ``List[List[List[Tuple[int, int]]]]`` A nested list, representing, for each",
"span. # Shape: (batch_size, num_spans, 2) spans = F.relu(spans.float()).long() if",
"2) spans = F.relu(spans.float()).long() if self._context_layer: # Shape: (batch_size, document_length,",
"+ event_type_size, embedding_size) antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape) # Shape: (batch_size, num_spans_to_keep,",
"self).__init__(vocab, regularizer) logger.info(vocab) self._text_field_embedder = text_field_embedder self._context_layer = context_layer self._antecedent_feedforward",
"attended_span_embeddings], -1) span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1) else: raw_contextualized_embeddings =",
"can use to make coreference decisions between valid span pairs.",
"pruned_gold_labels = util.batched_index_select(coref_labels.unsqueeze(-1), top_indices, flat_top_span_indices) antecedent_labels = util.flattened_index_select(pruned_gold_labels, valid_antecedent_indices).squeeze(-1) antecedent_labels",
"of shape (batch_size, num_spans), representing the realis label of the",
"* encoding_dim + feature_size) endpoint_span_embeddings = self._endpoint_span_extractor(new_contextualized_embeddings, spans) # Shape:",
"predicted_antecedents, \"coreference_scores\": coreference_scores, } if coref_labels is not None and",
"bucket_values.new_zeros((1, self._positive_label_size)) # Shape: (1, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings",
"to consider with respect to the top k spans. Has",
"representing the unormalised score for each (span, antecedent) pair we",
"prune away spans that are unlikely to occur in a",
"and predicted antecedent indices into clusters of spans for each",
"self._bce_loss = BCEWithLogitsLoss(reduction='none') if lexical_dropout > 0: self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout)",
"# Shape: (batch_size, num_spans_to_keep, max_antecedents) candidate_antecedent_mention_scores = util.flattened_index_select(top_scores, valid_antecedent_indices).squeeze(-1) #",
"embedded representation of each span in the document. These span",
"``TextField`` we get as input to the model. context_layer :",
"The cluster id label for every antecedent span. The id",
"incorporates contextual information for each word in the document. mention_feedforward",
"num_spans_to_keep, max_antecedents) valid_antecedent_log_mask = (raw_antecedent_indices >= 0).float().unsqueeze(0).log() # Shape: (num_spans_to_keep,",
"combination='2', num_attention_heads=num_head ) if self._endpoint_span_extractor is not None: span_embedding_size =",
"num_spans_to_keep, max_antecedents + 1) coreference_scores = torch.cat([dummy_scores, antecedent_scores], -1) return",
"= antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape) # Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size)",
"number of considered spans (i.e not the word distance between",
"self._decoding = decoding self._type_threshold = type_threshold logger.info(vocab.get_token_from_index(0, \"labels\")) if context_layer",
"2, span_embedding_size)), torch.nn.Sigmoid() ) else: self._type_refine_gate = None # NIL",
"spans (i.e not the word distance between the spans). Has",
"Subtract one here because index 0 is the \"no antecedent\"",
"what percentage of candidate mention spans we retain with respect",
"is independent # of the batch dimension - it's just",
"log likelihood of all antecedents which are in the #",
"self._type_refine_embedding(top_embeddings, event_embeddings) # Select tensors relating to the antecedent spans.",
"attention_mask new_attention_mask = torch.triu(torch.tril(new_attention_mask, self._local_window_size), -self._local_window_size) new_contextualized_embeddings = self._attention_layer(raw_contextualized_embeddings, new_attention_mask)",
"required. For each mention which survives the pruning stage, we",
"+ 1) pairwise_labels_with_dummy_label = torch.cat([type_antecedent_labels, pairwise_labels], -1) return pairwise_labels_with_dummy_label def",
"order, so we can just use the relative # index",
"predict previous spans, so this returns a matrix of shape",
"\"coref\": self._coref_label_metric.get_metric(reset), \"t_l\": self._type_loss_metric.get_metric(reset), \"c_l\": self._coref_loss_metric.get_metric(reset), \"a_f1\": (mention_result['f1-score'] + coref_f1)",
"span_embeddings_list = list() attended_span_embeddings = self._attentive_span_extractor(new_contextualized_embeddings, spans) span_embeddings_list += [attended_span_embeddings]",
"+ event_type_size, embedding_size) span_pair_embeddings = torch.cat([target_embeddings, antecedent_embeddings, antecedent_embeddings * target_embeddings,",
"refine_gate) * event_rep return top_embeddings def _local_attention(self, raw_contextualized_embeddings, text_mask): device",
"specific span. realis_labels : ``torch.IntTensor``, optional (default = None). A",
"(target_labels >= 0).float() pairwise_labels = same_cluster_indicator * non_dummy_indicator if self._pretrain_ed:",
"type_antecedent_labels * (1 - pairwise_labels_indicator) self._coref_label_metric(torch.sum(pairwise_labels).item()) self._nil_label_metric(torch.sum(type_antecedent_labels[:, :, 0]).item()) self._type_label_metric(torch.sum(type_antecedent_labels[:,",
"(default=``InitializerApplicator()``) Used to initialize the model parameters. regularizer : ``RegularizerApplicator``,",
"event_indices = util.get_range_vector(self.vocab.get_vocab_size('labels'), device=util.get_device_of(top_event_type_labels)) top_event_type_labels = top_event_type_labels.unsqueeze(-1).expand([top_event_type_labels.size(0), top_event_type_labels.size(1), event_indices.size(0)]) type_antecedent_labels",
"# coreference cluster that would be valid antecedents. Our loss",
"links on bad spans, enabling the pruning strategy used in",
"For the dummy label, the score is always zero. For",
"as mentions. # Shape: (batch_size, num_spans_to_keep, 2) top_spans = util.batched_index_select(spans,",
"spans, enabling the pruning strategy used in the forward pass.",
"* non_dummy_indicator if self._pretrain_ed: pairwise_labels = pairwise_labels * 0 else:",
"scores for every pair of spans. Additionally, a dummy label",
"batch. We use the \"original_text\" and \"clusters\" keys from this",
"clusters for that instance. Returns ------- An output dictionary consisting",
"zero. For the true antecedent spans, the score consists of",
"the batch, the list of clusters, which are in turn",
"0: self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout) else: self._lexical_dropout = lambda x: x",
"on span widths when we attend over the # span",
"not None: top_embeddings = self._type_refine_embedding(top_embeddings, event_embeddings) # Select tensors relating",
"_generate_valid_antecedents(num_spans_to_keep_according_doc_len, max_antecedents, util.get_device_of(text_mask)) if self._type_refine_gate is not None: top_embeddings =",
"device = 'cpu' attention_mask = torch.ones((text_mask.size(1), text_mask.size(1)), device=device) # attention_mask",
"m in metadata] self._mention_f1_score(pred_label_spans_list, gold_label_spans_list, ) self._conll_coref_scores(decoded_result['clusters'], metadata, pred_label_spans_list, gold_label_spans_list)",
"we considered. \"\"\" antecedent_log_mask = torch.cat([antecedent_log_mask.new_zeros((antecedent_log_mask.size(0), antecedent_log_mask.size(1), self._positive_label_size)), antecedent_log_mask], -1)",
"batch`. The reason this is the case is that each",
"is 0 top_mask = top_mask.expand_as(coreference_scores).clone() top_mask[:, :, self._positive_label_size + 2:]",
"# the multiple calls to util.batched_index_select below more efficient. flat_top_span_indices",
"+ gold_antecedent_labels.log() negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum() coref_loss = negative_marginal_log_likelihood * self._coref_loss_weight",
"``torch.FloatTensor`` Embedding representation of the pair of spans to consider.",
"# Shape: (1, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings = self._distance_embedding(",
"# print(top_span_labels) # print(antecedent_labels) target_labels = top_span_labels.expand_as(antecedent_labels) same_cluster_indicator = (target_labels",
"= same_cluster_indicator * non_dummy_indicator if self._pretrain_ed: pairwise_labels = pairwise_labels *",
"2) top_spans = util.batched_index_select(spans, top_indices, flat_top_span_indices) # Compute indices for",
"the mask to be -inf # in order to not",
": ``torch.IntTensor``, required. The cluster id label for every span.",
"src.utils.cluster_decoding_utils import node_decode logger = logging.getLogger(__name__) # pylint: disable=invalid-name @Model.register(\"end-to-end-event-coreference\")",
"same_cluster_indicator = (target_labels == antecedent_labels).float() non_dummy_indicator = (target_labels >= 0).float()",
"candidate_antecedent_mention_scores.size(0), candidate_antecedent_mention_scores.size(1), -1) # (batch_size, num_spans_to_keep, event_type_size + max_antecedents) candidate_antecedent_mention_scores",
"torch.FloatTensor, antecedent_log_mask: torch.FloatTensor) -> torch.FloatTensor: \"\"\" Computes scores for every",
"indices to take into account their # index into the",
"valid antecedents. For example, the first span in the document",
"= self._attentive_span_extractor(text_embeddings, spans) # Shape: (batch_size, num_spans, embedding_size + 2",
"(batch_size, document_length) text_mask = util.get_text_field_mask(text).float() # Shape: (batch_size, num_spans) span_mask",
"from allennlp.data import Vocabulary from allennlp.models.model import Model from allennlp.modules",
"possible distance buckets. self._num_distance_buckets = 10 self._distance_embedding = Embedding(self._num_distance_buckets, feature_size)",
"data, in the sense that we are minimising, for a",
"coreference cluster. if self._pretrain_ed: # All antecedent mask is 0",
"about the clustering. Has shape (batch_size, num_spans_to_keep). antecedent_labels : ``torch.IntTensor``,",
"@staticmethod def _combine_event_embeddings_and_cluster_antecedent_embeddings(event_embeddings: torch.FloatTensor, antecedent_embeddings: torch.FloatTensor): \"\"\" event_embeddings: ``torch.FloatTensor``, required.",
"+ 2 * encoding_dim + feature_size) # span_embeddings = torch.cat([endpoint_span_embeddings,",
"return span_pair_embeddings def _compute_antecedent_gold_labels(self, top_span_labels: torch.IntTensor, type_antecedent_labels: torch.IntTensor, antecedent_labels: torch.IntTensor):",
"scores for the span and its antecedent. The factoring allows",
"Coreference Resolution\" <https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83> by Lee et al., 2017. The basic",
": ``torch.IntTensor``, required. The cluster id label for every antecedent",
"# Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size) candidate_antecedent_embeddings =",
"= [x[\"original_text\"] for x in metadata] output_dict[\"offset\"] = [x[\"token_offset\"] for",
"during training. \"\"\" def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, mention_feedforward:",
"A nested list, representing, for each instance in the batch,",
"(batch_size, num_spans_to_keep, max_antecedents, embedding_size). return: (batch_size, num_spans_to_keep, max_antecedents + event_type_size,",
"index to the indices of its allowed antecedents. Note that",
"spans). Has shape ``(1, max_antecedents)``. valid_antecedent_log_mask : ``torch.FloatTensor`` The logged",
"None: new_contextualized_embeddings = self._local_attention( raw_contextualized_embeddings=raw_contextualized_embeddings, text_mask=text_mask ) else: new_contextualized_embeddings =",
"self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim, similarity_function=similarity_function, combination='2', num_attention_heads=num_head ) if self._endpoint_span_extractor is",
"allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder from allennlp.modules.seq2seq_encoders import IntraSentenceAttentionEncoder from",
"self._pretrain_coref = pretrain_coref self._mention_pruner = Pruner(self._event_scorer) self._antecedent_scorer = TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1))",
"decoding self._type_threshold = type_threshold logger.info(vocab.get_token_from_index(0, \"labels\")) if context_layer is not",
"# Prune based on mention scores. num_spans_to_keep_according_doc_len = int(math.floor(self._spans_per_word *",
"top_event_type_labels.size(1), event_indices.size(0)]) type_antecedent_labels = (top_event_type_labels == event_indices).float() return type_antecedent_labels def",
"= Embedding(self._num_distance_buckets, feature_size) self._coref_loss_weight = coref_loss_weight self._bce_loss_weight = bce_loss_weight self._bce_pos_weight",
"of the pairwise antecedent score and the unary mention scores",
"number of spans that were kept while pruning. max_antecedents :",
"\"labels\")) if context_layer is not None: endpoint_span_extractor_dim = context_layer.get_output_dim() attentive_span_extractor_dim",
"+ (1 - refine_gate) * event_rep return top_embeddings def _local_attention(self,",
"the annotated gold coreference clusters for that instance. Returns -------",
"event_type_size + max_antecedents) candidate_antecedent_mention_scores = torch.cat([event_type_prior_scores, candidate_antecedent_mention_scores], -1) # Shape:",
"for each top span. Has shape (batch_size, num_spans_to_keep, event_type_size +",
"whether a given pair of spans belong to the same",
"antecedent_log_mask: torch.FloatTensor) -> torch.FloatTensor: \"\"\" Computes scores for every pair",
": ``int``, required. The number of spans that were kept",
"self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) span_embedding_size = self._endpoint_span_extractor.get_output_dim() + self._attentive_span_extractor.get_output_dim() if self._local_window_size",
"src.metrics.event_coref_scores import EventCorefScores from src.metrics.mention_f1 import TopSpanMentionTypeF1 from src.utils.cluster_decoding_utils import",
"distance buckets. self._num_distance_buckets = 10 self._distance_embedding = Embedding(self._num_distance_buckets, feature_size) self._coref_loss_weight",
"------- An output dictionary consisting of: top_spans : ``torch.IntTensor`` A",
"span's position in # top_spans. The spans are in document",
"other spans are allowed antecedents. # Once we have this",
"belong to the same cluster in the gold clustering. Has",
"allowed antecedents. Note that this is independent # of the",
"index (with respect to top_spans) of the possible antecedents the",
"# Compute labels. # Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)",
"- valid_antecedent_offsets # In our matrix of indices, the upper",
"tensor of shape (batch_size, num_spans_to_keep, max_antecedents + 1), representing the",
"# Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings =",
"annotated gold coreference clusters for that instance. Returns ------- An",
"import SelfAttentiveSpanExtractor, EndpointSpanExtractor from allennlp.modules.token_embedders import Embedding from allennlp.nn import",
"do is construct a matrix mapping a span's # index",
"likely antecedent. -1 means there was no predicted link. loss",
"(spans[:, :, 0] >= 0).squeeze(-1).float() # SpanFields return -1 when",
"``torch.IntTensor``, optional (default = None). A tensor of shape (batch_size,",
"device=util.get_device_of(span_mask)) + 1 event_indices = torch.stack([torch.zeros_like(event_indices), event_indices]).transpose(0, 1) event_indices =",
"each span. This gives us variables with shapes # like",
"embedding size for all the embedded features, such as distances",
"need them to be <= 0. This is only relevant",
"antecedents, and we only consider up to max_antecedents # prior",
"(batch_size, num_spans_to_keep, event_type_size) event_type_prior_scores = event_type_prior_scores.transpose(1, 2).expand( candidate_antecedent_mention_scores.size(0), candidate_antecedent_mention_scores.size(1), -1)",
"= self._compute_antecedent_gold_labels(pruned_gold_labels, type_antecedent_labels, antecedent_labels) bce_loss = self._bce_loss.forward(self._event_scorer.forward(span_embeddings).squeeze(-1), (event_type_labels > 0).float())",
"\"\"\" # Shape: (num_spans_to_keep, 1) target_indices = util.get_range_vector(num_spans_to_keep, device).unsqueeze(1) #",
"local_window_size: int = 10, attention_type: str = 'dot', decoding: str",
"(batch_size, num_spans_to_keep, max_antecedents, embedding_size) candidate_antecedent_embeddings = util.flattened_index_select(top_embeddings, valid_antecedent_indices) # Shape:",
"\"\"\" This ``Model`` implements the coreference resolution model described \"End-to-end",
"emb_size) bmm event_prob = torch.bmm(top_embeddings, torch.transpose(event_embeddings, 1, 2)) shape =",
"text_mask=text_mask ) else: new_contextualized_embeddings = raw_contextualized_embeddings span_embeddings_list = list() attended_span_embeddings",
"int(math.floor(self._spans_per_word * document_length)) (top_embeddings, top_mask, top_indices, top_scores) = self._mention_pruner(span_embeddings, span_mask,",
"-1) else: raw_contextualized_embeddings = text_embeddings if self._attention_layer is not None:",
"top_span_embeddings : ``torch.FloatTensor``, required. Embedding representations of the top spans.",
"= torch.cat([antecedent_log_mask.new_zeros((antecedent_log_mask.size(0), antecedent_log_mask.size(1), self._positive_label_size)), antecedent_log_mask], -1) # Shape: (batch_size, num_spans_to_keep,",
"for x in metadata] return output_dict @overrides def decode(self, output_dict:",
"spans we retain with respect to the number of words",
"antecedent_embeddings.size(2), antecedent_distance_embeddings.size(-1)) # Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size)",
"attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans) # Shape: (batch_size, num_spans, embedding_size +",
"\"\"\" event_embeddings: ``torch.FloatTensor``, required. Embedding representations of the event types.",
"# Shape: (1, num_spans_to_keep, max_antecedents) valid_antecedent_log_mask = (raw_antecedent_indices >= 0).float().unsqueeze(0).log()",
"-> None: super(End2EndEventCoreferenceResolver, self).__init__(vocab, regularizer) logger.info(vocab) self._text_field_embedder = text_field_embedder self._context_layer",
"# Shape: (batch_size, num_spans, num_event_realis_label) # Shape: (batch_size, num_spans, num_event_realis_label)",
"into the batch. We precompute this here to make #",
"representing, for each instance in the batch, the list of",
"(1, num_spans_to_keep, max_antecedents) valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask = \\ _generate_valid_antecedents(num_spans_to_keep_according_doc_len, max_antecedents,",
"span the index (with respect to top_spans) of the possible",
"the # same gold cluster as the span we are",
"} if coref_labels is not None and event_type_labels is not",
"use the \"original_text\" and \"clusters\" keys from this dictionary, which",
"None: super(End2EndEventCoreferenceResolver, self).__init__(vocab, regularizer) logger.info(vocab) self._text_field_embedder = text_field_embedder self._context_layer =",
"This is only relevant in edge cases where # the",
"max_span_width: int = 1, spans_per_word: float = 0.1, max_antecedents: int",
"# Compute antecedent scores. # Shape: (batch_size, num_spans_to_keep, event_type_size +",
"self._pretrain_ed: pairwise_labels = pairwise_labels * 0 else: # for pairwise_labels",
"pairwise_labels).prod(-1, keepdim=True) # Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents +",
"in the batch, the list of clusters, which are in",
"several prior mentions k in the same # coreference cluster",
"Shape: (batch_size, num_spans_to_keep, 2) top_spans = util.batched_index_select(spans, top_indices, flat_top_span_indices) #",
"span widths when we attend over the # span representations",
"``torch.FloatTensor``, required. Embedding representations of pairs of spans. Has shape",
"Event self._event_embedding = Embedding(num_embeddings=vocab.get_vocab_size('labels'), embedding_dim=span_embedding_size) self._event_embedding_map = torch.nn.Linear(self._event_embedding.get_output_dim() * 2,",
"# Shape: (batch_size, num_spans) span_mask = (spans[:, :, 0] >=",
"attention_type: str = 'dot', decoding: str = 'type-guided', type_threshold: float",
"the negative marginal log-likelihood. # This is equal to the",
"want to predict, per span. # We're generating a logspace",
") else: new_contextualized_embeddings = raw_contextualized_embeddings # Shape: (batch_size, num_spans, 2",
"coref_precision, coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset) return {\"c_p\": coref_precision, \"c_r\": coref_recall,",
"num_spans_to_keep, max_antecedents). antecedent_log_mask: ``torch.FloatTensor``, required. The log of the mask",
"into account their # index into the batch. We precompute",
"we get as input to the model. context_layer : ``Seq2SeqEncoder``",
"antecedents for each span. This gives us variables with shapes",
"shape ``(1, max_antecedents)``. valid_antecedent_log_mask : ``torch.FloatTensor`` The logged mask representing",
"only accepts 1D indices, but here # we need to",
"span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1) else: raw_contextualized_embeddings = text_embeddings if",
"to consider. Has shape (batch_size, num_spans_to_keep, max_antecedents, embedding_size) \"\"\" #",
"we will eventually create a # distribution over these indices,",
"coref_f1 = self._conll_coref_scores.get_metric(reset) return {\"c_p\": coref_precision, \"c_r\": coref_recall, \"c_f1\": coref_f1,",
"bool = True, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] =",
"allennlp.modules.token_embedders import Embedding from allennlp.nn import util, InitializerApplicator, RegularizerApplicator from",
"some comparisons based on span widths when we attend over",
"(batch_size * num_spans_to_keep) # torch.index_select only accepts 1D indices, but",
"2), representing the inclusive start and end indices of candidate",
"The number of spans that were kept while pruning. max_antecedents",
"any antecedent. Parameters ---------- top_span_labels : ``torch.IntTensor``, required. The cluster",
"a predicted antecedent. This implies a clustering if we group",
"(1 - pairwise_labels).prod(-1, keepdim=True) # Shape: (batch_size, num_spans_to_keep, event_type_size +",
"torch.nn.functional as F from allennlp.data import Vocabulary from allennlp.models.model import",
"indices, the upper triangular part will be negative # because",
"``Model`` implements the coreference resolution model described \"End-to-end Neural Coreference",
"spans_per_word self._max_antecedents = max_antecedents self._mention_f1_score = TopSpanMentionTypeF1() self._conll_coref_scores = EventCorefScores(mapping_type=type_match_in_eval)",
"embedding_size) \"\"\" event_embeddings = event_embeddings.unsqueeze(1).expand((antecedent_embeddings.size(0), antecedent_embeddings.size(1), event_embeddings.size(1), antecedent_embeddings.size(3),)) return torch.cat([event_embeddings,",
"= True, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None)",
"= self._conll_coref_scores.get_metric(reset) return {\"c_p\": coref_precision, \"c_r\": coref_recall, \"c_f1\": coref_f1, \"m_p\":",
"event_embeddings.reshape(event_embeddings.size(0), event_embeddings.size(1) * event_embeddings.size(2)) event_embeddings = self._event_embedding_map.forward(event_embeddings) event_embeddings = event_embeddings.unsqueeze(0).expand(span_mask.size(0),",
"top span. Has shape (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size).",
"max_span_width: ``int`` The maximum width of candidate spans. spans_per_word: float,",
"ids of each span, or -1 for those which do",
"= top_mask.unsqueeze(-1) # Shape: (batch_size * num_spans_to_keep) # torch.index_select only",
"-1 when they are used as padding. As we do",
"representing whether each antecedent span is valid. Required since different",
"max_antecedents) candidate_antecedent_mention_scores = torch.cat([event_type_prior_scores, candidate_antecedent_mention_scores], -1) # Shape: (batch_size, num_spans_to_keep,",
"coreference cluster that would be valid antecedents. Our loss is",
"---------- num_spans_to_keep : ``int``, required. The number of spans that",
"distance between the span and each of its antecedents in",
"single antecedent j, but there might be several prior mentions",
"which are in turn comprised of a list of (start,",
"its allowed antecedents. Note that this is independent # of",
"top_spans, \"antecedent_indices\": valid_antecedent_indices, \"predicted_antecedents\": predicted_antecedents, \"coreference_scores\": coreference_scores, } if coref_labels",
"allennlp.modules.seq2seq_encoders import IntraSentenceAttentionEncoder from allennlp.modules.similarity_functions import DotProductSimilarity from allennlp.modules.span_extractors import",
"``torch.IntTensor``, required. The cluster id label for every span. The",
"# Select tensors relating to the antecedent spans. # Shape:",
"of words in the document. max_antecedents: int, required. For each",
"context_layer : ``Seq2SeqEncoder`` This layer incorporates contextual information for each",
"(batch, top_span_size, positive_label_size) \"\"\" event_indices = util.get_range_vector(self.vocab.get_vocab_size('labels'), device=util.get_device_of(top_event_type_labels)) top_event_type_labels =",
"the multiple calls to util.batched_index_select below more efficient. flat_top_span_indices =",
"self._coref_loss_weight = coref_loss_weight self._bce_loss_weight = bce_loss_weight self._bce_pos_weight = bce_pos_weight self._max_span_width",
"else: # for pairwise_labels without type_antecedent_labels pairwise_labels_indicator = (pairwise_labels.sum(-1, keepdim=True)",
"= 10 self._distance_embedding = Embedding(self._num_distance_buckets, feature_size) self._coref_loss_weight = coref_loss_weight self._bce_loss_weight",
"antecedents. # Once we have this matrix, we reformat our",
"long as they are in # the same coreference cluster.",
"event_type_size) event_type_prior_scores = event_type_prior_scores.transpose(1, 2).expand( candidate_antecedent_mention_scores.size(0), candidate_antecedent_mention_scores.size(1), -1) # (batch_size,",
"@Model.register(\"end-to-end-event-coreference\") class End2EndEventCoreferenceResolver(Model): \"\"\" This ``Model`` implements the coreference resolution",
"a batch can be coreferent with any previous span, but",
"if self._local_window_size <= 0: self._attention_layer = None else: if self._attention_type",
"up with actual spans if the prediction # is greater",
"num_spans, embedding_size) attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans) # Shape: (batch_size, num_spans,",
"DotProductSimilarity from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor from allennlp.modules.token_embedders import Embedding",
"num_attention_heads=num_head ) else: attentive_span_extractor_dim = text_field_embedder.get_output_dim() if max_span_width > 1:",
"otherwise. Parameters ---------- num_spans_to_keep : ``int``, required. The number of",
"will eventually create a # distribution over these indices, so",
": ``Vocabulary`` text_field_embedder : ``TextFieldEmbedder`` Used to embed the ``text``",
"= type_antecedent_labels * (1 - pairwise_labels_indicator) self._coref_label_metric(torch.sum(pairwise_labels).item()) self._nil_label_metric(torch.sum(type_antecedent_labels[:, :, 0]).item())",
"self._event_classifier.forward(span_embeddings) # Shape: (batch_size, num_spans, num_event_realis_label) # Shape: (batch_size, num_spans,",
"event_embeddings = event_embeddings.unsqueeze(0).expand(span_mask.size(0), event_embeddings.size(0), event_embeddings.size(1), ) return event_embeddings def _get_type_antecedent_labels(self,",
"torch.nn.Sequential( TimeDistributed(torch.nn.Linear(span_embedding_size * 2, span_embedding_size)), torch.nn.Sigmoid() ) else: self._type_refine_gate =",
"j <= i, or zero otherwise. Parameters ---------- num_spans_to_keep :",
"always zero. For the true antecedent spans, the score consists",
"pairwise scoring function to consider. This includes both the original",
"but with an additional ``clusters`` key: clusters : ``List[List[List[Tuple[int, int]]]]``",
"to get an embedded representation of each span in the",
"endpoint_span_embeddings = self._endpoint_span_extractor(new_contextualized_embeddings, spans) # Shape: (batch_size, num_spans, embedding_size) attended_span_embeddings",
"the model parameters. regularizer : ``RegularizerApplicator``, optional (default=``None``) If provided,",
"True, type_match_in_eval: bool = True, initializer: InitializerApplicator = InitializerApplicator(), regularizer:",
"event_indices = util.get_range_vector(self._positive_label_size, device=util.get_device_of(span_mask)) + 1 event_indices = torch.stack([torch.zeros_like(event_indices), event_indices]).transpose(0,",
"util.get_device_of(text_mask)) if self._type_refine_gate is not None: top_embeddings = self._type_refine_embedding(top_embeddings, event_embeddings)",
"num_spans, num_event_realis_label) # event_realis_scores = self._event_realis_classifier.forward(span_embeddings) # Prune based on",
"represents the prediction that a span does not have any",
"the model. context_layer : ``Seq2SeqEncoder`` This layer incorporates contextual information",
"text: Dict[str, torch.LongTensor], spans: torch.IntTensor, coref_labels: torch.IntTensor = None, event_type_labels:",
"def forward(self, # type: ignore text: Dict[str, torch.LongTensor], spans: torch.IntTensor,",
"when they are used as padding. As we do #",
"spans to know which other spans are allowed antecedents. #",
"with anything. For the dummy label, the score is always",
"1]).item()) # print(pairwise_labels) # # # Shape: (batch_size, num_spans_to_keep, 1)",
"that would be valid antecedents. Our loss is the sum",
"document. mention_feedforward : ``FeedForward`` This feedforward network is applied to",
"gold cluster as the span we are currently considering. Each",
"num_spans_to_keep, max_antecedents) valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask = \\ _generate_valid_antecedents(num_spans_to_keep_according_doc_len, max_antecedents, util.get_device_of(text_mask))",
"the \"original_text\" and \"clusters\" keys from this dictionary, which respectively",
"this many antecedents. lexical_dropout: ``int`` The probability of dropping out",
"(mention_result['f1-score'] + coref_f1) / 2.} @staticmethod def _combine_event_embeddings_and_cluster_antecedent_embeddings(event_embeddings: torch.FloatTensor, antecedent_embeddings:",
"every antecedent to consider with respect to the top k",
"# compare span pairs to decide each span's antecedent. Each",
"= util.batched_index_select(spans, top_indices, flat_top_span_indices) # Compute indices for antecedent spans",
"combination=\"x,y\", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size) else: self._endpoint_span_extractor = None self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim)",
"0 is the \"no antecedent\" class, # so this makes",
"the text of the document. spans : ``torch.IntTensor``, required. A",
"print(antecedent_labels) target_labels = top_span_labels.expand_as(antecedent_labels) same_cluster_indicator = (target_labels == antecedent_labels).float() non_dummy_indicator",
"have any antecedents, because there are none to select from.",
":, None] * attention_mask new_attention_mask = torch.triu(torch.tril(new_attention_mask, self._local_window_size), -self._local_window_size) new_contextualized_embeddings",
"augmented with a dummy antecedent at the zeroth position, which",
"0] >= 0).squeeze(-1).float() # SpanFields return -1 when they are",
"described \"End-to-end Neural Coreference Resolution\" <https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83> by Lee et al.,",
"indices line up with actual spans if the prediction #",
"the sum of the # probability assigned to all valid",
"pairwise_labels], -1) return pairwise_labels_with_dummy_label def _compute_coreference_scores(self, pairwise_embeddings: torch.FloatTensor, top_span_mention_scores: torch.FloatTensor,",
"+ max_antecedents + 1) pairwise_labels_with_dummy_label = torch.cat([type_antecedent_labels, pairwise_labels], -1) return",
"None: # Shape: (batch_size, num_spans, embedding_size) endpoint_span_embeddings = self._endpoint_span_extractor(text_embeddings, spans)",
"antecedents. For example, the first span in the document should",
"valid_antecedent_indices : ``torch.IntTensor`` The indices of every antecedent to consider",
"torch.FloatTensor, antecedent_mention_scores: torch.FloatTensor, antecedent_log_mask: torch.FloatTensor) -> torch.FloatTensor: \"\"\" Computes scores",
"# index of the spans to know which other spans",
"every antecedent span. The id is arbitrary, as we just",
"in the batch. We use the \"original_text\" and \"clusters\" keys",
"(1, event_type) label_bucket_values = bucket_values.new_zeros((1, self._positive_label_size)) # Shape: (1, max_antecedents",
"# NIL for Unified Event self._event_embedding = Embedding(num_embeddings=vocab.get_vocab_size('labels'), embedding_dim=span_embedding_size) self._event_embedding_map",
"survived the pruning stage. This procedure is `generic across the",
"just care about the clustering. Has shape (batch_size, num_spans_to_keep). antecedent_labels",
"2) def _compute_span_pair_embeddings(self, top_span_embeddings: torch.FloatTensor, antecedent_embeddings: torch.FloatTensor, antecedent_offsets: torch.FloatTensor): \"\"\"",
"self._positive_label_size + 1]).item()) # print(pairwise_labels) # # # Shape: (batch_size,",
"top_spans = util.batched_index_select(spans, top_indices, flat_top_span_indices) # Compute indices for antecedent",
"of shape ``(batch_size, num_spans_to_keep, 2)`` representing the start and end",
"top_spans. The spans are in document order, so we can",
"the pruning strategy used in the forward pass. Parameters ----------",
"span _cannot_ have any antecedents, because there are none to",
"decoded_result['pred_label_spans'] gold_label_spans_list = [m['gold_label_spans'] for m in metadata] self._mention_f1_score(pred_label_spans_list, gold_label_spans_list,",
"layer. antecedent_feedforward: ``FeedForward`` This feedforward network is applied to pairs",
"= torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1) span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1) else:",
"------- The same output dictionary, but with an additional ``clusters``",
"by a linear layer. feature_size: ``int`` The embedding size for",
"are scored and used to prune away spans that are",
"feature_size) self._coref_loss_weight = coref_loss_weight self._bce_loss_weight = bce_loss_weight self._bce_pos_weight = bce_pos_weight",
"# top_span_embeddings = top_span_embeddings.detach() # top_span_mention_scores = top_span_mention_scores.detach() # Now",
"an additional ``clusters`` key: clusters : ``List[List[List[Tuple[int, int]]]]`` A nested",
"pair of spans belong to the same cluster. The labels",
"positive_label_size) \"\"\" event_indices = util.get_range_vector(self._positive_label_size, device=util.get_device_of(span_mask)) + 1 event_indices =",
"the top spans. Has shape (batch_size, num_spans_to_keep, embedding_size). antecedent_embeddings :",
"only relevant in edge cases where # the number of",
"in a coreference cluster. For the remaining spans, the model",
"+ event_prob[:, :, :1] * top_embeddings refine_gate = self._type_refine_gate(torch.cat([event_rep, top_embeddings],",
"event_type_size, embedding_size) \"\"\" event_embeddings = event_embeddings.unsqueeze(1).expand((antecedent_embeddings.size(0), antecedent_embeddings.size(1), event_embeddings.size(1), antecedent_embeddings.size(3),)) return",
"if type_refine: self._type_refine_gate = torch.nn.Sequential( TimeDistributed(torch.nn.Linear(span_embedding_size * 2, span_embedding_size)), torch.nn.Sigmoid()",
"* 2, self._event_embedding.get_output_dim()) self._positive_label_size = vocab.get_vocab_size('labels') - 1 # 10",
"``torch.IntTensor``, required. A tensor of shape (batch_size, num_spans, 2), representing",
"+ max_antecedents) candidate_antecedent_mention_scores = torch.cat([event_type_prior_scores, candidate_antecedent_mention_scores], -1) # Shape: (batch_size,",
"event_type_labels: torch.IntTensor = None, realis_labels: torch.IntTensor = None, metadata: List[Dict[str,",
"max_antecedents: int, required. For each mention which survives the pruning",
"# Shape: (batch_size, num_spans_to_keep, 1) shape = [antecedent_scores.size(0), antecedent_scores.size(1), 1]",
"is not None: span_embedding_size = self._attentive_span_extractor.get_output_dim() + self._endpoint_span_extractor.get_output_dim() else: span_embedding_size",
"of candidate spans for mentions. Comes from a ``ListField[SpanField]`` of",
"score consists of the pairwise antecedent score and the unary",
"raw_contextualized_embeddings = text_embeddings if self._attention_layer is not None: new_contextualized_embeddings =",
"= self._mention_pruner(span_embeddings, span_mask, num_spans_to_keep_according_doc_len, ) event_embeddings = self._get_event_embedding(span_mask) top_mask =",
"and its antecedent. The factoring allows the model to blame",
"to embed the ``text`` ``TextField`` we get as input to",
"valid_antecedent_log_mask = \\ _generate_valid_antecedents(num_spans_to_keep_according_doc_len, max_antecedents, util.get_device_of(text_mask)) if self._type_refine_gate is not",
"spans belong to the same cluster. The labels are augmented",
"not coreferent with anything. For the dummy label, the score",
"torch.cat(span_embeddings_list, -1) # event_scores = self._event_classifier.forward(span_embeddings) # Shape: (batch_size, num_spans,",
"A tensor of shape ``(batch_size, num_spans_to_keep, 2)`` representing the start",
"(top_event_type_labels == event_indices).float() return type_antecedent_labels def _type_refine_embedding(self, top_embeddings, event_embeddings): #",
"k spans. Has shape ``(num_spans_to_keep, max_antecedents)``. valid_antecedent_offsets : ``torch.IntTensor`` The",
"= torch.cat([event_type_prior_scores, candidate_antecedent_mention_scores], -1) # Shape: (batch_size, num_spans_to_keep, 1 +",
"= bce_loss_weight self._bce_pos_weight = bce_pos_weight self._max_span_width = max_span_width self._spans_per_word =",
"an embedded representation of each span in the document. These",
"num_spans_to_keep_according_doc_len = int(math.floor(self._spans_per_word * document_length)) (top_embeddings, top_mask, top_indices, top_scores) =",
"top_mask.expand_as(coreference_scores).clone() top_mask[:, :, self._positive_label_size + 2:] = 0 coreference_log_probs =",
"node_decode(output_dict, self.vocab, decoding_algorithm=self._decoding, positive_label_size=self._positive_label_size, type_threshold=self._type_threshold) @overrides def get_metrics(self, reset: bool",
"span_embeddings = torch.cat(span_embeddings_list, -1) # event_scores = self._event_classifier.forward(span_embeddings) # Shape:",
"Used to embed the ``text`` ``TextField`` we get as input",
"in the sense that we are minimising, for a #",
"additional ``clusters`` key: clusters : ``List[List[List[Tuple[int, int]]]]`` A nested list,",
"clustering. Has shape (batch_size, num_spans_to_keep, max_antecedents + 1). \"\"\" #",
"respect to antecedent_indices) of the most likely antecedent. -1 means",
"if self._pretrain_ed: pairwise_labels = pairwise_labels * 0 else: # for",
"the case is that each span in a batch can",
"target_labels = top_span_labels.expand_as(antecedent_labels) same_cluster_indicator = (target_labels == antecedent_labels).float() non_dummy_indicator =",
"lambda x: x initializer(self) def _get_event_embedding(self, span_mask): \"\"\" :param span_mask:",
"each span in a batch can be coreferent with any",
"This layer incorporates contextual information for each word in the",
"which survived the pruning stage, # a predicted antecedent. This",
"def _compute_coreference_scores(self, pairwise_embeddings: torch.FloatTensor, top_span_mention_scores: torch.FloatTensor, antecedent_mention_scores: torch.FloatTensor, antecedent_log_mask: torch.FloatTensor)",
"Shape: (batch_size, num_spans_to_keep, 1) # dummy_labels = (1 - pairwise_labels).prod(-1,",
"of clusters, which are in turn comprised of a list",
"the span and its antecedent. The factoring allows the model",
":return: (batch, top_span_size, positive_label_size) \"\"\" event_indices = util.get_range_vector(self.vocab.get_vocab_size('labels'), device=util.get_device_of(top_event_type_labels)) top_event_type_labels",
"as antecedents, and we only consider up to max_antecedents #",
"Embedding representation of the pair of spans to consider. Has",
"= context_layer.get_output_dim() attentive_span_extractor_dim = text_field_embedder.get_output_dim() self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim, combination=\"x,y\", num_width_embeddings=max_span_width,",
"type_antecedent_labels: torch.IntTensor, antecedent_labels: torch.IntTensor): \"\"\" Generates a binary indicator for",
"for every span. device: ``int``, required. The CUDA device to",
"feedforward network is applied to pairs of span representation, along",
"self._event_embedding.get_output_dim()) self._positive_label_size = vocab.get_vocab_size('labels') - 1 # 10 possible distance",
"self._conll_coref_scores(decoded_result['clusters'], metadata, pred_label_spans_list, gold_label_spans_list) self._type_loss_metric(bce_loss.item()) self._coref_loss_metric(negative_marginal_log_likelihood.item()) else: self._coref_loss_metric(0.) if metadata",
"torch.IntTensor, type_antecedent_labels: torch.IntTensor, antecedent_labels: torch.IntTensor): \"\"\" Generates a binary indicator",
"relevant in edge cases where # the number of spans",
") self._pretrain_ed = pretrain_ed self._pretrain_coref = pretrain_coref self._mention_pruner = Pruner(self._event_scorer)",
"batch. # This reformats the indices to take into account",
"= Embedding(num_embeddings=vocab.get_vocab_size('labels'), embedding_dim=span_embedding_size) self._event_embedding_map = torch.nn.Linear(self._event_embedding.get_output_dim() * 2, self._event_embedding.get_output_dim()) self._positive_label_size",
"text_field_embedder.get_output_dim() if max_span_width > 1: endpoint_span_extractor_dim = text_field_embedder.get_output_dim() self._endpoint_span_extractor =",
"# pylint: disable=invalid-name @Model.register(\"end-to-end-event-coreference\") class End2EndEventCoreferenceResolver(Model): \"\"\" This ``Model`` implements",
"return event_embeddings def _get_type_antecedent_labels(self, top_event_type_labels): \"\"\" :param top_event_type_labels: (batch, top_span_size,",
"coreference clusters for that instance. Returns ------- An output dictionary",
"is predicted, so long as they are in # the",
"span_mask bce_loss = bce_loss.sum() * self._bce_loss_weight # Now, compute the",
"1) event_type_prior_scores = self._event_scorer(event_embeddings) # (batch_size, num_spans_to_keep, event_type_size) event_type_prior_scores =",
"top_embeddings], -1)) top_embeddings = refine_gate * top_embeddings + (1 -",
"gives us variables with shapes # like (batch_size, num_spans_to_keep, max_antecedents,",
"return new_contextualized_embeddings @overrides def forward(self, # type: ignore text: Dict[str,",
"\"antecedent_indices\": valid_antecedent_indices, \"predicted_antecedents\": predicted_antecedents, \"coreference_scores\": coreference_scores, } if coref_labels is",
"+= valid_antecedent_log_mask.long() # Compute labels. # Shape: (batch_size, num_spans_to_keep, max_antecedents",
"(num_spans_to_keep, max_antecedents), # (1, max_antecedents), # (1, num_spans_to_keep, max_antecedents) valid_antecedent_indices,",
"min(self._max_antecedents, num_spans_to_keep_according_doc_len) # top_span_embeddings = top_span_embeddings.detach() # top_span_mention_scores = top_span_mention_scores.detach()",
"= pretrain_ed self._pretrain_coref = pretrain_coref self._mention_pruner = Pruner(self._event_scorer) self._antecedent_scorer =",
"Optional, Tuple import torch import torch.nn.functional as F from allennlp.data",
"torch import torch.nn.functional as F from allennlp.data import Vocabulary from",
"\"t_l\": self._type_loss_metric.get_metric(reset), \"c_l\": self._coref_loss_metric.get_metric(reset), \"a_f1\": (mention_result['f1-score'] + coref_f1) / 2.}",
"document. max_antecedents: int, required. For each mention which survives the",
"a clustering of the spans in the document. Parameters ----------",
"start and end indices of candidate spans for mentions. Comes",
"Shape: (batch_size, num_spans_to_keep, 1 + event_type_size + max_antecedents) coreference_scores =",
"to top_spans) of the possible antecedents the model considered. predicted_antecedents",
"the forward pass. Parameters ---------- pairwise_embeddings: ``torch.FloatTensor``, required. Embedding representations",
"predicted, so long as they are in # the same",
"self._positive_label_size)), antecedent_log_mask], -1) # Shape: (batch_size, num_spans_to_keep, max_antecedents) antecedent_scores =",
"output_dict @overrides def decode(self, output_dict: Dict[str, torch.Tensor]): \"\"\" Converts the",
"These span representations are scored and used to prune away",
"self._endpoint_span_extractor is not None: # Shape: (batch_size, num_spans, embedding_size) endpoint_span_embeddings",
"import torch.nn.functional as F from allennlp.data import Vocabulary from allennlp.models.model",
"(batch_size, event_type_size, 1) event_type_prior_scores = self._event_scorer(event_embeddings) # (batch_size, num_spans_to_keep, event_type_size)",
"= text_mask[:, :, None] * attention_mask new_attention_mask = torch.triu(torch.tril(new_attention_mask, self._local_window_size),",
"span in a batch can be coreferent with any previous",
"with the data, in the sense that we are minimising,",
"comprised of a list of (start, end) inclusive spans into",
"self._context_layer(text_embeddings, text_mask) if self._attention_layer is not None: new_contextualized_embeddings = self._local_attention(",
"predicted antecedent. This implies a clustering if we group #",
"None: pruned_event_type_labels = torch.gather(event_type_labels, 1, top_indices) type_antecedent_labels = self._get_type_antecedent_labels(pruned_event_type_labels) #",
"target_embeddings, antecedent_distance_embeddings], -1) return span_pair_embeddings def _compute_antecedent_gold_labels(self, top_span_labels: torch.IntTensor, type_antecedent_labels:",
"id label for every span. The id is arbitrary, as",
"can be coreferent with any previous span, but here we",
"= int(math.floor(self._spans_per_word * document_length)) (top_embeddings, top_mask, top_indices, top_scores) = self._mention_pruner(span_embeddings,",
"torch.nn.Sigmoid() ) else: self._type_refine_gate = None # NIL for Unified",
"document order, so we can just use the relative #",
"event_prob.new_zeros(*shape) event_prob = torch.cat([dummy_scores, event_prob], -1) event_prob = torch.softmax(event_prob, -1)",
"span. Has shape (batch_size, num_spans_to_keep, max_antecedents). antecedent_mention_scores: ``torch.FloatTensor``, required. Mention",
"1.0, bce_loss_weight: float = 1.0, bce_pos_weight: float = None, local_window_size:",
"of spans for each element in the batch. Parameters ----------",
"(batch_size, num_spans_to_keep, 1 + event_type_size + max_antecedents) coreference_scores = self._compute_coreference_scores(span_pair_embeddings,",
"TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(), 1)) ) self._pretrain_ed = pretrain_ed self._pretrain_coref = pretrain_coref self._mention_pruner",
"Resolution\" <https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83> by Lee et al., 2017. The basic outline",
"likelihood of all antecedents which are in the # same",
"self._lexical_dropout = lambda x: x initializer(self) def _get_event_embedding(self, span_mask): \"\"\"",
"terms of spans we are considering. Has shape (1, max_antecedents).",
"None] * attention_mask new_attention_mask = torch.triu(torch.tril(new_attention_mask, self._local_window_size), -self._local_window_size) new_contextualized_embeddings =",
"spans. This label is one if and only if the",
"factoring allows the model to blame many of the absent",
"not None: span_embedding_size = self._attentive_span_extractor.get_output_dim() + self._endpoint_span_extractor.get_output_dim() else: span_embedding_size =",
"of antecedent spans to consider for every span. device: ``int``,",
"refer to each other in a chain. # Shape: (batch_size,",
"Average() self._coref_loss_metric = Average() self._coref_label_metric = Average() self._type_label_metric = Average()",
"max_antecedents)`` representing for each top span the index (with respect",
"type_threshold: float = -1., type_refine: bool = True, type_match_in_eval: bool",
"padding. As we do # some comparisons based on span",
"antecedent_labels += valid_antecedent_log_mask.long() # Compute labels. # Shape: (batch_size, num_spans_to_keep,",
"types. Has shape (batch_size, event_type_size, embedding_size). antecedent_embeddings : ``torch.FloatTensor``, required.",
"the possible `indices` of these spans. So, regardless of the",
"= None, max_span_width: int = 1, spans_per_word: float = 0.1,",
"num_spans), representing the event label of the specific span. realis_labels",
"calling :func:`forward` on an instance or batch of instances. Returns",
"raw_contextualized_embeddings span_embeddings_list = list() attended_span_embeddings = self._attentive_span_extractor(new_contextualized_embeddings, spans) span_embeddings_list +=",
"event_indices.size(1)]) event_embeddings = self._event_embedding(event_indices) event_embeddings = event_embeddings.reshape(event_embeddings.size(0), event_embeddings.size(1) * event_embeddings.size(2))",
"type_threshold logger.info(vocab.get_token_from_index(0, \"labels\")) if context_layer is not None: endpoint_span_extractor_dim =",
"similarity_function=similarity_function, combination='2', num_attention_heads=num_head ) if self._endpoint_span_extractor is not None: span_embedding_size",
"antecedent scores. # Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size)",
"def decode(self, output_dict: Dict[str, torch.Tensor]): \"\"\" Converts the list of",
"after the pruning stage is >= the # total number",
"-= 1 output_dict = {\"top_spans\": top_spans, \"antecedent_indices\": valid_antecedent_indices, \"predicted_antecedents\": predicted_antecedents,",
"any clusters. event_type_labels : ``torch.IntTensor``, optional (default = None). A",
"\"\"\" Computes an embedding representation of pairs of spans for",
"num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size) else: self._endpoint_span_extractor = None self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) if",
"each of its antecedents in terms of the number of",
"span is not coreferent with anything. For the dummy label,",
"-1) # event_scores = self._event_classifier.forward(span_embeddings) # Shape: (batch_size, num_spans, num_event_realis_label)",
"of calling :func:`forward` on an instance or batch of instances.",
"required. The cluster id label for every span. The id",
"is arbitrary, as we just care about the clustering. Has",
"antecedent is predicted, so long as they are in #",
"util.get_device_of(raw_contextualized_embeddings) if device < 0: device = 'cpu' attention_mask =",
"text and the annotated gold coreference clusters for that instance.",
"consists of the pairwise antecedent score and the unary mention",
"mask to be -inf # in order to not mess",
"logger = logging.getLogger(__name__) # pylint: disable=invalid-name @Model.register(\"end-to-end-event-coreference\") class End2EndEventCoreferenceResolver(Model): \"\"\"",
"= False, coref_loss_weight: float = 1.0, bce_loss_weight: float = 1.0,",
"num_spans_to_keep, event_type_size + max_antecedents, embedding_size) span_pair_embeddings = self._compute_span_pair_embeddings(top_embeddings, candidate_antecedent_embeddings, valid_antecedent_offsets)",
"-> Dict[str, torch.Tensor]: # pylint: disable=arguments-differ \"\"\" Parameters ---------- text",
"pair of spans. Additionally, a dummy label is included, representing",
"(batch_size, num_spans_to_keep, max_antecedents + 1). \"\"\" # Shape: (batch_size, num_spans_to_keep,",
"optional (default=``None``) If provided, will be used to calculate the",
"spans in terms of spans we are considering. Has shape",
"= Average() self._realis_loss_metric = Average() self._coref_loss_metric = Average() self._coref_label_metric =",
"Each span can only # have prior spans as antecedents,",
"over these indices, so we need the 0 elements of",
"which antecedent is predicted, so long as they are in",
"than -1. predicted_antecedents -= 1 output_dict = {\"top_spans\": top_spans, \"antecedent_indices\":",
"of the antecedent spans we are considering for each top",
"and one which controls what percentage of candidate mention spans",
"TimeDistributed(torch.nn.Linear(span_embedding_size * 2, span_embedding_size)), torch.nn.Sigmoid() ) else: self._type_refine_gate = None",
"these indices, so we need the 0 elements of the",
":return: (batch, top_span_size, positive_label_size) \"\"\" event_indices = util.get_range_vector(self._positive_label_size, device=util.get_device_of(span_mask)) +",
"= torch.bmm(event_prob[:, :, 1:], event_embeddings) + event_prob[:, :, :1] *",
"required. Embedding representations of the top spans. Has shape (batch_size,",
"pairwise_embeddings: torch.FloatTensor, top_span_mention_scores: torch.FloatTensor, antecedent_mention_scores: torch.FloatTensor, antecedent_log_mask: torch.FloatTensor) -> torch.FloatTensor:",
"+ event_type_size, embedding_size) antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0) expanded_distance_embeddings_shape = (antecedent_embeddings.size(0), antecedent_embeddings.size(1),",
"the batch. We use the \"original_text\" and \"clusters\" keys from",
"the score is always zero. For the true antecedent spans,",
"Average() self._nil_label_metric = Average() if self._bce_pos_weight: self._bce_loss = BCEWithLogitsLoss(reduction='none', pos_weight=torch.tensor(self._bce_pos_weight))",
"required. Embedding representations of the event types. Has shape (batch_size,",
"the batch. We precompute this here to make # the",
"scalar loss to be optimised. \"\"\" # Shape: (batch_size, document_length,",
"TimeDistributed(mention_feedforward), TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(), 1)) ) self._pretrain_ed = pretrain_ed self._pretrain_coref = pretrain_coref",
"make # the multiple calls to util.batched_index_select below more efficient.",
"each top span the index (with respect to top_spans) of",
"returns a matrix of shape (num_spans_to_keep, max_antecedents), where the (i,j)-th",
"float = 1.0, bce_loss_weight: float = 1.0, bce_pos_weight: float =",
"from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder from allennlp.modules.seq2seq_encoders import IntraSentenceAttentionEncoder",
"torch.ones((text_mask.size(1), text_mask.size(1)), device=device) # attention_mask = attention_mask - torch.eye(text_mask.size(1), #",
"2)`` representing the start and end word indices of the",
"This label is one if and only if the pair",
"event_type_size, embedding_size) antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0) expanded_distance_embeddings_shape = (antecedent_embeddings.size(0), antecedent_embeddings.size(1), antecedent_embeddings.size(2),",
"without type_antecedent_labels pairwise_labels_indicator = (pairwise_labels.sum(-1, keepdim=True) > 0).float() type_antecedent_labels =",
"valid_antecedent_offsets = (util.get_range_vector(max_antecedents, device) + 1).unsqueeze(0) # This is a",
"valid_antecedent_indices).squeeze(-1) antecedent_labels += valid_antecedent_log_mask.long() # Compute labels. # Shape: (batch_size,",
"Parameters ---------- output_dict : ``Dict[str, torch.Tensor]``, required. The result of",
"embedding_size) candidate_antecedent_embeddings = self._combine_event_embeddings_and_cluster_antecedent_embeddings( event_embeddings, candidate_antecedent_embeddings) # Compute antecedent scores.",
"EventCorefScores(mapping_type=type_match_in_eval) self._type_loss_metric = Average() self._realis_loss_metric = Average() self._coref_loss_metric = Average()",
"> 0).float()) * span_mask bce_loss = bce_loss.sum() * self._bce_loss_weight #",
"survived the pruning stage. antecedent_indices : ``torch.IntTensor`` A tensor of",
"(i - 1) - j if j <= i, or",
"consider. This includes both the original span representations, the element-wise",
"index is equal to (i - 1) - j if",
"``int``, required. The number of spans that were kept while",
"cluster id label for every antecedent span. The id is",
"num_spans_to_keep, we need to # compare span pairs to decide",
"(batch_size, num_spans_to_keep, max_antecedents). Returns ------- pairwise_labels_with_dummy_label : ``torch.FloatTensor`` A binary",
"use to make coreference decisions between valid span pairs. #",
"= BCEWithLogitsLoss(reduction='none') if lexical_dropout > 0: self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout) else:",
"consider as mentions. # Shape: (batch_size, num_spans_to_keep, 2) top_spans =",
"the index (with respect to top_spans) of the possible antecedents",
"-1) # Shape: (batch_size, num_spans_to_keep, max_antecedents) antecedent_scores = self._antecedent_scorer( self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1)",
"if self._bce_pos_weight: self._bce_loss = BCEWithLogitsLoss(reduction='none', pos_weight=torch.tensor(self._bce_pos_weight)) else: self._bce_loss = BCEWithLogitsLoss(reduction='none')",
"actual spans if the prediction # is greater than -1.",
"[x[\"token_offset\"] for x in metadata] output_dict['doc_id'] = [x.get(\"doc_id\", None) for",
"the 0 elements of the mask to be -inf #",
"the same # coreference cluster that would be valid antecedents.",
"if metadata is not None: output_dict[\"document\"] = [x[\"original_text\"] for x",
"gold_antecedent_labels.log() negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum() coref_loss = negative_marginal_log_likelihood * self._coref_loss_weight output_dict[\"loss\"]",
"then scored by a linear layer. antecedent_feedforward: ``FeedForward`` This feedforward",
"output_dict = {\"top_spans\": top_spans, \"antecedent_indices\": valid_antecedent_indices, \"predicted_antecedents\": predicted_antecedents, \"coreference_scores\": coreference_scores,",
"= context_layer self._antecedent_feedforward = TimeDistributed(antecedent_feedforward) self._event_scorer = torch.nn.Sequential( TimeDistributed(mention_feedforward), TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(),",
"representations of the top spans. Has shape (batch_size, num_spans_to_keep, embedding_size).",
"end) inclusive spans into the original document. \"\"\" return node_decode(output_dict,",
"``RegularizerApplicator``, optional (default=``None``) If provided, will be used to calculate",
"1) pairwise_labels_with_dummy_label = torch.cat([type_antecedent_labels, pairwise_labels], -1) return pairwise_labels_with_dummy_label def _compute_coreference_scores(self,",
"spans. Additionally, a dummy label is included, representing the decision",
"True, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) ->",
"layer incorporates contextual information for each word in the document.",
"(batch, top_span_size, 1) :return: (batch, top_span_size, positive_label_size) \"\"\" event_indices =",
"torch.FloatTensor, antecedent_offsets: torch.FloatTensor): \"\"\" Computes an embedding representation of pairs",
"antecedent_embeddings], 2) def _compute_span_pair_embeddings(self, top_span_embeddings: torch.FloatTensor, antecedent_embeddings: torch.FloatTensor, antecedent_offsets: torch.FloatTensor):",
"1) - j if j <= i, or zero otherwise.",
": ``int``, required. The maximum number of antecedent spans to",
"on 2019-09-10 # Mostly by AllenNLP import logging import math",
"All antecedent mask is 0 top_mask = top_mask.expand_as(coreference_scores).clone() top_mask[:, :,",
"required. The number of spans that were kept while pruning.",
"decisions between valid span pairs. # Shapes: # (num_spans_to_keep, max_antecedents),",
"max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0) expanded_distance_embeddings_shape = (antecedent_embeddings.size(0),",
"all the embedded features, such as distances or span widths.",
"new_contextualized_embeddings = raw_contextualized_embeddings # Shape: (batch_size, num_spans, 2 * encoding_dim",
"mentions k in the same # coreference cluster that would",
"the distribution. # Shape: (1, num_spans_to_keep, max_antecedents) valid_antecedent_log_mask = (raw_antecedent_indices",
"decoding_algorithm=self._decoding, positive_label_size=self._positive_label_size, type_threshold=self._type_threshold) @overrides def get_metrics(self, reset: bool = False)",
"absent links on bad spans, enabling the pruning strategy used",
"max_antecedents, embedding_size) \"\"\" # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size) target_embeddings",
"representations which is then scored by a linear layer. antecedent_feedforward:",
"lexical_dropout: ``int`` The probability of dropping out dimensions of the",
"self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1) antecedent_scores += top_span_mention_scores + antecedent_mention_scores antecedent_scores += antecedent_log_mask #",
"reason this is the case is that each span in",
"torch.cat([event_type_prior_scores, candidate_antecedent_mention_scores], -1) # Shape: (batch_size, num_spans_to_keep, 1 + event_type_size",
"and the annotated gold coreference clusters for that instance. Returns",
"device: ``int``, required. The CUDA device to use. Returns -------",
"self._type_refine_gate is not None: top_embeddings = self._type_refine_embedding(top_embeddings, event_embeddings) # Select",
"max_antecedents : ``int``, required. The maximum number of antecedent spans",
"representing the decision that the span is not coreferent with",
"= IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim, similarity_function=similarity_function, combination='2', num_attention_heads=num_head ) else: attentive_span_extractor_dim = text_field_embedder.get_output_dim()",
"that we have our variables in terms of num_spans_to_keep, we",
"1) # dummy_labels = (1 - pairwise_labels).prod(-1, keepdim=True) # Shape:",
"top_embeddings, event_embeddings): # (batch, top_span_size, emb_size) bmm event_prob = torch.bmm(top_embeddings,",
"indices, we # need them to be <= 0. This",
"``TextField`` representing the text of the document. spans : ``torch.IntTensor``,",
"gold coreference clusters for that instance. Returns ------- An output",
"new_contextualized_embeddings = raw_contextualized_embeddings span_embeddings_list = list() attended_span_embeddings = self._attentive_span_extractor(new_contextualized_embeddings, spans)",
"indices of its allowed antecedents. Note that this is independent",
"a # single antecedent j, but there might be several",
"spans to consider as mentions. # Shape: (batch_size, num_spans_to_keep, 2)",
"valid antecedents. This is a valid objective for # clustering",
"import TopSpanMentionTypeF1 from src.utils.cluster_decoding_utils import node_decode logger = logging.getLogger(__name__) #",
"self._attentive_span_extractor.get_output_dim() if self._local_window_size <= 0: self._attention_layer = None else: if",
"be > the target indices. We want to mask these,",
"type: ignore text: Dict[str, torch.LongTensor], spans: torch.IntTensor, coref_labels: torch.IntTensor =",
"precompute this here to make # the multiple calls to",
"required. The offsets between each top span and its antecedent",
"import Average from overrides import overrides from torch.nn import BCEWithLogitsLoss",
"of candidate spans. spans_per_word: float, required. A multiplier between zero",
"spans) span_embeddings_list += [endpoint_span_embeddings] span_embeddings = torch.cat(span_embeddings_list, -1) # event_scores",
"Tuple[torch.IntTensor, torch.IntTensor, torch.FloatTensor]: \"\"\" This method generates possible antecedents per",
"embedding_size). top_span_embeddings : ``torch.FloatTensor``, required. Embedding representations of the top",
"50, lexical_dropout: float = 0.2, pretrain_ed: bool = False, pretrain_coref:",
"Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size) candidate_antecedent_embeddings = util.flattened_index_select(top_embeddings, valid_antecedent_indices) #",
"strategy used in the forward pass. Parameters ---------- pairwise_embeddings: ``torch.FloatTensor``,",
"Has shape (1, max_antecedents). Returns ------- span_pair_embeddings : ``torch.FloatTensor`` Embedding",
"antecedent_embeddings.size(3),)) return torch.cat([event_embeddings, antecedent_embeddings], 2) def _compute_span_pair_embeddings(self, top_span_embeddings: torch.FloatTensor, antecedent_embeddings:",
"import IntraSentenceAttentionEncoder from allennlp.modules.similarity_functions import DotProductSimilarity from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor,",
"Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder from allennlp.modules.seq2seq_encoders import IntraSentenceAttentionEncoder from allennlp.modules.similarity_functions import",
"target indices. We want to mask these, # because these",
"coref_loss_weight: float = 1.0, bce_loss_weight: float = 1.0, bce_pos_weight: float",
"that would be consistent with the data, in the sense",
"pairs to decide each span's antecedent. Each span can only",
"span. metadata : ``List[Dict[str, Any]]``, optional (default = None). A",
"embedded text. initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``) Used to initialize",
"the start and end word indices of the top spans",
"we are considering for each top span. Has shape (batch_size,",
"% self._attention_type) self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim, similarity_function=similarity_function, combination='2', num_attention_heads=num_head ) else:",
"includes both the original span representations, the element-wise similarity of",
"bmm event_prob = torch.bmm(top_embeddings, torch.transpose(event_embeddings, 1, 2)) shape = [event_prob.size(0),",
"A tensor of shape (batch_size, num_spans), representing the cluster ids",
"antecedent_distance_embeddings = self._distance_embedding( torch.cat([bucket_values, label_bucket_values], 1) ) # Shape: (1,",
"valid antecedents. Returns ------- coreference_scores: ``torch.FloatTensor`` A tensor of shape",
"self._type_label_metric.get_metric(reset), \"coref\": self._coref_label_metric.get_metric(reset), \"t_l\": self._type_loss_metric.get_metric(reset), \"c_l\": self._coref_loss_metric.get_metric(reset), \"a_f1\": (mention_result['f1-score'] +",
"embedding_size) candidate_antecedent_embeddings = util.flattened_index_select(top_embeddings, valid_antecedent_indices) # Shape: (batch_size, num_spans_to_keep, max_antecedents)",
"spans are allowed antecedents. # Once we have this matrix,",
"\"\"\" def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, mention_feedforward: FeedForward, antecedent_feedforward:",
"import torch import torch.nn.functional as F from allennlp.data import Vocabulary",
"as padding. As we do # some comparisons based on",
"span representations which is then scored by a linear layer.",
"the span representations which is then scored by a linear",
"= antecedent_distance_embeddings.unsqueeze(0) expanded_distance_embeddings_shape = (antecedent_embeddings.size(0), antecedent_embeddings.size(1), antecedent_embeddings.size(2), antecedent_distance_embeddings.size(-1)) # Shape:",
"a dummy label is included, representing the decision that the",
"torch.stack([torch.zeros_like(event_indices), event_indices]).transpose(0, 1) event_indices = event_indices.expand([event_indices.size(0), event_indices.size(1)]) event_embeddings = self._event_embedding(event_indices)",
"number of spans we consider after the pruning stage is",
"# Compute final predictions for which spans to consider as",
"the clustering. Has shape (batch_size, num_spans_to_keep). antecedent_labels : ``torch.IntTensor``, required.",
"def _get_type_antecedent_labels(self, top_event_type_labels): \"\"\" :param top_event_type_labels: (batch, top_span_size, 1) :return:",
"For the true antecedent spans, the score consists of the",
"distance between the two spans. Parameters ---------- shape (batch_size, event_type_size,",
"``Dict[str, torch.Tensor]``, required. The result of calling :func:`forward` on an",
"Dict[str, float]: mention_result = self._mention_f1_score.get_metric(reset) coref_precision, coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset)",
"# we can use to make coreference decisions between valid",
"and an embedding representation of the distance between the two",
"get_metrics(self, reset: bool = False) -> Dict[str, float]: mention_result =",
"candidate_antecedent_mention_scores = util.flattened_index_select(top_scores, valid_antecedent_indices).squeeze(-1) # Shape: (batch_size, num_spans_to_keep, event_type_size +",
"Converts the list of spans and predicted antecedent indices into",
"\"\"\" Converts the list of spans and predicted antecedent indices",
"the cluster ids of each span, or -1 for those",
"applying transitivity, imply a clustering of the spans in the",
"attended_span_embeddings = self._attentive_span_extractor(new_contextualized_embeddings, spans) span_embeddings_list += [attended_span_embeddings] if self._endpoint_span_extractor is",
"of spans and predicted antecedent indices into clusters of spans",
"probability of dropping out dimensions of the embedded text. initializer",
"self._attention_layer is not None: new_contextualized_embeddings = self._local_attention( raw_contextualized_embeddings=raw_contextualized_embeddings, text_mask=text_mask )",
"Shape: (1, 1, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0)",
"target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings) # Shape: (1, max_antecedents) bucket_values = util.bucket_values(antecedent_offsets,",
"Embedding(num_embeddings=vocab.get_vocab_size('labels'), embedding_dim=span_embedding_size) self._event_embedding_map = torch.nn.Linear(self._event_embedding.get_output_dim() * 2, self._event_embedding.get_output_dim()) self._positive_label_size =",
"to select spans for each element in the batch. #",
"binary tensor representing whether a given pair of spans belong",
"the data, in the sense that we are minimising, for",
"dimensions of the embedded text. initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)",
"from src.utils.cluster_decoding_utils import node_decode logger = logging.getLogger(__name__) # pylint: disable=invalid-name",
"1)) ) self._pretrain_ed = pretrain_ed self._pretrain_coref = pretrain_coref self._mention_pruner =",
"that instance. Returns ------- An output dictionary consisting of: top_spans",
"= self._local_attention( raw_contextualized_embeddings=raw_contextualized_embeddings, text_mask=text_mask ) else: new_contextualized_embeddings = raw_contextualized_embeddings span_embeddings_list",
"The offsets between each top span and its antecedent spans",
"new_attention_mask) return new_contextualized_embeddings @overrides def forward(self, # type: ignore text:",
"zeroth position, which represents the prediction that a span does",
"1) event_indices = event_indices.expand([event_indices.size(0), event_indices.size(1)]) event_embeddings = self._event_embedding(event_indices) event_embeddings =",
"An output dictionary consisting of: top_spans : ``torch.IntTensor`` A tensor",
"pass. Parameters ---------- pairwise_embeddings: ``torch.FloatTensor``, required. Embedding representations of pairs",
"(batch_size, num_spans_to_keep) _, predicted_antecedents = coreference_scores.max(2) # Subtract one here",
"applied to pairs of span representation, along with any pairwise",
"(batch_size, num_spans, 2), representing the inclusive start and end indices",
"= IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim, similarity_function=similarity_function, combination='2', num_attention_heads=num_head ) if self._endpoint_span_extractor is not",
"torch.bmm(event_prob[:, :, 1:], event_embeddings) + event_prob[:, :, :1] * top_embeddings",
"if self._type_refine_gate is not None: top_embeddings = self._type_refine_embedding(top_embeddings, event_embeddings) #",
"each top span and its antecedent spans in terms of",
"metadata is not None: output_dict[\"document\"] = [x[\"original_text\"] for x in",
"top spans. Has shape (batch_size, num_spans_to_keep, embedding_size). antecedent_embeddings : ``torch.FloatTensor``,",
"be several prior mentions k in the same # coreference",
"the span we are currently considering. Each span i predicts",
"1). \"\"\" # Shape: (batch_size, num_spans_to_keep, max_antecedents) # print(top_span_labels) #",
"We use the \"original_text\" and \"clusters\" keys from this dictionary,",
"-1 for those which do not appear in any clusters.",
"construct a matrix mapping a span's # index to the",
"self._positive_label_size)) # Shape: (1, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings =",
"antecedent_log_mask.size(1), self._positive_label_size)), antecedent_log_mask], -1) # Shape: (batch_size, num_spans_to_keep, max_antecedents) antecedent_scores",
"# top_span_mention_scores = top_span_mention_scores.detach() # Now that we have our",
"batch dimension - it's just a function of the span's",
"care about the clustering. Has shape (batch_size, num_spans_to_keep). antecedent_labels :",
"from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor from allennlp.modules.token_embedders import Embedding from",
"num_spans_to_keep) _, predicted_antecedents = coreference_scores.max(2) # Subtract one here because",
"top_indices, flat_top_span_indices) antecedent_labels = util.flattened_index_select(pruned_gold_labels, valid_antecedent_indices).squeeze(-1) antecedent_labels += valid_antecedent_log_mask.long() #",
"are considering for each top span. Has shape (batch_size, num_spans_to_keep,",
"between the two spans. Parameters ---------- shape (batch_size, event_type_size, embedding_size).",
"any) they are coreferent with. The resulting coreference links, after",
"the coreference resolution model described \"End-to-end Neural Coreference Resolution\" <https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83>",
"use. Returns ------- valid_antecedent_indices : ``torch.IntTensor`` The indices of every",
"the specific span. metadata : ``List[Dict[str, Any]]``, optional (default =",
"log-likelihood. # This is equal to the log of the",
"(batch_size, num_spans_to_keep, max_antecedents + 1) coreference_scores = torch.cat([dummy_scores, antecedent_scores], -1)",
"BCEWithLogitsLoss from src.metrics.event_coref_scores import EventCorefScores from src.metrics.mention_f1 import TopSpanMentionTypeF1 from",
"= bucket_values.new_zeros((1, self._positive_label_size)) # Shape: (1, max_antecedents + event_type_size, embedding_size)",
"the zeroth position, which represents the prediction that a span",
"of span representation, along with any pairwise features, which is",
"top_span_size, 1) :return: (batch, top_span_size, positive_label_size) \"\"\" event_indices = util.get_range_vector(self._positive_label_size,",
"= self._event_realis_classifier.forward(span_embeddings) # Prune based on mention scores. num_spans_to_keep_according_doc_len =",
"# Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents + 1) pairwise_labels_with_dummy_label",
"span pairs to decide each span's antecedent. Each span can",
"import Any, Dict, List, Optional, Tuple import torch import torch.nn.functional",
"be <= 0. This is only relevant in edge cases",
"-1) # (batch_size, num_spans_to_keep, event_type_size + max_antecedents) candidate_antecedent_mention_scores = torch.cat([event_type_prior_scores,",
"representation of the pair of spans to consider. Has shape",
"The cluster id label for every span. The id is",
"the document. max_antecedents: int, required. For each mention which survives",
"Model from allennlp.modules import FeedForward, Pruner from allennlp.modules import Seq2SeqEncoder,",
"max_antecedents, encoding_dim) top_span_mention_scores: ``torch.FloatTensor``, required. Mention scores for every span.",
"allennlp.nn import util, InitializerApplicator, RegularizerApplicator from allennlp.training.metrics import Average from",
"same_cluster_indicator * non_dummy_indicator if self._pretrain_ed: pairwise_labels = pairwise_labels * 0",
"<= 0. This is only relevant in edge cases where",
"event_embeddings.unsqueeze(1).expand((antecedent_embeddings.size(0), antecedent_embeddings.size(1), event_embeddings.size(1), antecedent_embeddings.size(3),)) return torch.cat([event_embeddings, antecedent_embeddings], 2) def _compute_span_pair_embeddings(self,",
"while pruning. max_antecedents : ``int``, required. The maximum number of",
"(batch_size, num_spans), representing the realis label of the specific span.",
"is not None: # Shape: (batch_size, num_spans, embedding_size) endpoint_span_embeddings =",
"an instance or batch of instances. Returns ------- The same",
"self._attention_layer = None else: if self._attention_type == 'dot': similarity_function =",
"Dict[str, torch.LongTensor], spans: torch.IntTensor, coref_labels: torch.IntTensor = None, event_type_labels: torch.IntTensor",
"attention_mask - torch.eye(text_mask.size(1), # device=util.get_device_of(contextualized_embeddings)) new_attention_mask = text_mask[:, :, None]",
"have different numbers of valid antecedents. For example, the first",
"# type: ignore text: Dict[str, torch.LongTensor], spans: torch.IntTensor, coref_labels: torch.IntTensor",
"were kept while pruning. max_antecedents : ``int``, required. The maximum",
"group # mentions which refer to each other in a",
"else: if self._attention_type == 'dot': similarity_function = DotProductSimilarity(scale_output=True) num_head =",
"allennlp.data import Vocabulary from allennlp.models.model import Model from allennlp.modules import",
"= self._context_layer(text_embeddings, text_mask) if self._attention_layer is not None: new_contextualized_embeddings =",
"list of (start, end) inclusive spans into the original document.",
"away spans that are unlikely to occur in a coreference",
"candidate_antecedent_embeddings, valid_antecedent_offsets) # (batch_size, event_type_size, 1) event_type_prior_scores = self._event_scorer(event_embeddings) #",
"antecedent_embeddings * target_embeddings, antecedent_distance_embeddings], -1) return span_pair_embeddings def _compute_antecedent_gold_labels(self, top_span_labels:",
"so this makes the indices line up with actual spans",
"-> torch.FloatTensor: \"\"\" Computes scores for every pair of spans.",
"the same cluster. The labels are augmented with a dummy",
"valid span pairs. # Shapes: # (num_spans_to_keep, max_antecedents), # (1,",
"A metadata dictionary for each instance in the batch. We",
"the pruning stage is >= the # total number of",
"self._event_realis_classifier.forward(span_embeddings) # Prune based on mention scores. num_spans_to_keep_according_doc_len = int(math.floor(self._spans_per_word",
"which survived the pruning stage. This procedure is `generic across",
"distribution over these indices, so we need the 0 elements",
"again to get embeddings # for all valid antecedents for",
"event_prob = torch.softmax(event_prob, -1) event_rep = torch.bmm(event_prob[:, :, 1:], event_embeddings)",
"as we don't mind which antecedent is predicted, so long",
"End2EndEventCoreferenceResolver(Model): \"\"\" This ``Model`` implements the coreference resolution model described",
"and each of its antecedents in terms of the number",
"TimeDistributed, TextFieldEmbedder from allennlp.modules.seq2seq_encoders import IntraSentenceAttentionEncoder from allennlp.modules.similarity_functions import DotProductSimilarity",
"self._context_layer: # Shape: (batch_size, document_length, encoding_dim) raw_contextualized_embeddings = self._context_layer(text_embeddings, text_mask)",
": ``FeedForward`` This feedforward network is applied to the span",
"= (antecedent_embeddings.size(0), antecedent_embeddings.size(1), antecedent_embeddings.size(2), antecedent_distance_embeddings.size(-1)) # Shape: (batch_size, num_spans_to_keep, max_antecedents",
"representations, the element-wise similarity of the span representations, and an",
"candidate_antecedent_embeddings = self._combine_event_embeddings_and_cluster_antecedent_embeddings( event_embeddings, candidate_antecedent_embeddings) # Compute antecedent scores. #",
"antecedent_indices) of the most likely antecedent. -1 means there was",
"> 0).float() type_antecedent_labels = type_antecedent_labels * (1 - pairwise_labels_indicator) self._coref_label_metric(torch.sum(pairwise_labels).item())",
"\"original_text\" and \"clusters\" keys from this dictionary, which respectively have",
"= F.relu(spans.float()).long() if self._context_layer: # Shape: (batch_size, document_length, encoding_dim) raw_contextualized_embeddings",
"torch.eye(text_mask.size(1), # device=util.get_device_of(contextualized_embeddings)) new_attention_mask = text_mask[:, :, None] * attention_mask",
"initialize the model parameters. regularizer : ``RegularizerApplicator``, optional (default=``None``) If",
"1] dummy_scores = antecedent_scores.new_zeros(*shape) # Shape: (batch_size, num_spans_to_keep, max_antecedents +",
": ``TextFieldEmbedder`` Used to embed the ``text`` ``TextField`` we get",
"span_width_embedding_dim=feature_size) else: self._endpoint_span_extractor = None self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) if self._local_window_size",
"self._attention_type) self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim, similarity_function=similarity_function, combination='2', num_attention_heads=num_head ) if self._endpoint_span_extractor",
"antecedent_labels).float() non_dummy_indicator = (target_labels >= 0).float() pairwise_labels = same_cluster_indicator *",
"with respect to the top k spans. Has shape ``(num_spans_to_keep,",
"of spans to consider. Has shape (batch_size, num_spans_to_keep, max_antecedents, embedding_size)",
"# pylint: disable=arguments-differ \"\"\" Parameters ---------- text : ``Dict[str, torch.LongTensor]``,",
"float = 1.0, bce_pos_weight: float = None, local_window_size: int =",
"al., 2017. The basic outline of this model is to",
"non_dummy_indicator if self._pretrain_ed: pairwise_labels = pairwise_labels * 0 else: #",
"dropping out dimensions of the embedded text. initializer : ``InitializerApplicator``,",
"of spans we are considering. Has shape (1, max_antecedents). Returns",
"def _type_refine_embedding(self, top_embeddings, event_embeddings): # (batch, top_span_size, emb_size) bmm event_prob",
"clusters. event_type_labels : ``torch.IntTensor``, optional (default = None). A tensor",
"top spans that survived the pruning stage. antecedent_indices : ``torch.IntTensor``",
"for which spans to consider as mentions. # Shape: (batch_size,",
"be valid antecedents. Our loss is the sum of the",
"generating a logspace mask here because we will eventually create",
"distribution. # Shape: (1, num_spans_to_keep, max_antecedents) valid_antecedent_log_mask = (raw_antecedent_indices >=",
"top_span_size, emb_size) bmm event_prob = torch.bmm(top_embeddings, torch.transpose(event_embeddings, 1, 2)) shape",
"1, spans_per_word: float = 0.1, max_antecedents: int = 50, lexical_dropout:",
"-1) span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1) else: raw_contextualized_embeddings = text_embeddings",
"there was no predicted link. loss : ``torch.FloatTensor``, optional A",
"distances or span widths. max_span_width: ``int`` The maximum width of",
"self._pretrain_ed = pretrain_ed self._pretrain_coref = pretrain_coref self._mention_pruner = Pruner(self._event_scorer) self._antecedent_scorer",
"\"type\": self._type_label_metric.get_metric(reset), \"coref\": self._coref_label_metric.get_metric(reset), \"t_l\": self._type_loss_metric.get_metric(reset), \"c_l\": self._coref_loss_metric.get_metric(reset), \"a_f1\": (mention_result['f1-score']",
"into the original document. \"\"\" return node_decode(output_dict, self.vocab, decoding_algorithm=self._decoding, positive_label_size=self._positive_label_size,",
"\"\"\" # Shape: (batch_size, num_spans_to_keep, max_antecedents) # print(top_span_labels) # print(antecedent_labels)",
"label of the specific span. metadata : ``List[Dict[str, Any]]``, optional",
"This is equal to the log of the sum of",
"* span_mask bce_loss = bce_loss.sum() * self._bce_loss_weight # Now, compute",
": ``torch.FloatTensor`` A binary tensor representing whether a given pair",
"of the pair of spans to consider. Has shape (batch_size,",
"the pruning stage. This procedure is `generic across the batch`.",
"network is applied to the span representations which is then",
": ``Dict[str, torch.Tensor]``, required. The result of calling :func:`forward` on",
"self._type_loss_metric.get_metric(reset), \"c_l\": self._coref_loss_metric.get_metric(reset), \"a_f1\": (mention_result['f1-score'] + coref_f1) / 2.} @staticmethod",
"a ``TextField`` representing the text of the document. spans :",
"offsets will be > the target indices. We want to",
"= self._type_refine_embedding(top_embeddings, event_embeddings) # Select tensors relating to the antecedent",
"possible we might # consider a masked span. # Shape:",
"List, Optional, Tuple import torch import torch.nn.functional as F from",
"# clustering as we don't mind which antecedent is predicted,",
"for each element in the batch. # This reformats the",
"antecedent. Parameters ---------- top_span_labels : ``torch.IntTensor``, required. The cluster id",
"``torch.IntTensor`` The indices of every antecedent to consider with respect",
"(batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size) \"\"\" event_embeddings = event_embeddings.unsqueeze(1).expand((antecedent_embeddings.size(0),",
":param top_event_type_labels: (batch, top_span_size, 1) :return: (batch, top_span_size, positive_label_size) \"\"\"",
"the normalisation of the distribution. # Shape: (1, num_spans_to_keep, max_antecedents)",
"= raw_contextualized_embeddings # Shape: (batch_size, num_spans, 2 * encoding_dim +",
"in the forward pass. Parameters ---------- pairwise_embeddings: ``torch.FloatTensor``, required. Embedding",
": ``torch.IntTensor``, required. The offsets between each top span and",
") else: self._type_refine_gate = None # NIL for Unified Event",
"where the (i,j)-th index is equal to (i - 1)",
"position, which represents the prediction that a span does not",
"``int``, required. The CUDA device to use. Returns ------- valid_antecedent_indices",
"of: top_spans : ``torch.IntTensor`` A tensor of shape ``(batch_size, num_spans_to_keep,",
"Has shape (batch_size, num_spans_to_keep, max_antecedents, embedding_size). return: (batch_size, num_spans_to_keep, max_antecedents",
"top_span_embeddings = top_span_embeddings.detach() # top_span_mention_scores = top_span_mention_scores.detach() # Now that",
"coreference_log_probs = util.masked_log_softmax(coreference_scores, top_mask) correct_antecedent_log_probs = coreference_log_probs + gold_antecedent_labels.log() negative_marginal_log_likelihood",
"util.masked_log_softmax(coreference_scores, top_mask) correct_antecedent_log_probs = coreference_log_probs + gold_antecedent_labels.log() negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum()",
"labels. # Shape: (batch_size, num_spans_to_keep, max_antecedents + 1) gold_antecedent_labels =",
"method generates possible antecedents per span which survived the pruning",
"considered spans (i.e not the word distance between the spans).",
"respect to the number of words in the document. max_antecedents:",
"\"clusters\" keys from this dictionary, which respectively have the original",
"event types. Has shape (batch_size, event_type_size, embedding_size). antecedent_embeddings : ``torch.FloatTensor``,",
"in metadata] return output_dict @overrides def decode(self, output_dict: Dict[str, torch.Tensor]):",
"= torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1) else: raw_contextualized_embeddings = text_embeddings if self._attention_layer",
"representing whether a given pair of spans belong to the",
"'cpu' attention_mask = torch.ones((text_mask.size(1), text_mask.size(1)), device=device) # attention_mask = attention_mask",
"(1, max_antecedents) valid_antecedent_offsets = (util.get_range_vector(max_antecedents, device) + 1).unsqueeze(0) # This",
"across the batch`. The reason this is the case is",
"of all antecedents which are in the # same gold",
"torch.FloatTensor) -> torch.FloatTensor: \"\"\" Computes scores for every pair of",
"reformat our variables again to get embeddings # for all",
"required. The cluster id label for every antecedent span. The",
"torch.IntTensor = None, event_type_labels: torch.IntTensor = None, realis_labels: torch.IntTensor =",
"stage, we consider this many antecedents. lexical_dropout: ``int`` The probability",
"of the mask for valid antecedents. Returns ------- coreference_scores: ``torch.FloatTensor``",
"labels for the spans which we kept. pruned_gold_labels = util.batched_index_select(coref_labels.unsqueeze(-1),",
"the offsets will be > the target indices. We want",
"is that each span in a batch can be coreferent",
"here we are computing the possible `indices` of these spans.",
"span, the index (with respect to antecedent_indices) of the most",
"pairs of spans for the pairwise scoring function to consider.",
"pylint: disable=arguments-differ \"\"\" Parameters ---------- text : ``Dict[str, torch.LongTensor]``, required.",
"we are currently considering. Each span i predicts a #",
"model is to get an embedded representation of each span",
"antecedent_offsets : ``torch.IntTensor``, required. The offsets between each top span",
"# Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size) span_pair_embeddings =",
"shape = [event_prob.size(0), event_prob.size(1), 1] dummy_scores = event_prob.new_zeros(*shape) event_prob =",
"occur in a coreference cluster. For the remaining spans, the",
"= self._endpoint_span_extractor(text_embeddings, spans) span_embeddings_list += [endpoint_span_embeddings] span_embeddings = torch.cat(span_embeddings_list, -1)",
"device: int) -> Tuple[torch.IntTensor, torch.IntTensor, torch.FloatTensor]: \"\"\" This method generates",
"util.batched_index_select below more efficient. flat_top_span_indices = util.flatten_and_batch_shift_indices(top_indices, num_spans) # Compute",
"+ 1). \"\"\" # Shape: (batch_size, num_spans_to_keep, max_antecedents) # print(top_span_labels)",
"for a # given span, the negative marginal log likelihood",
"of the number of considered spans (i.e not the word",
"import BCEWithLogitsLoss from src.metrics.event_coref_scores import EventCorefScores from src.metrics.mention_f1 import TopSpanMentionTypeF1",
"int, required. For each mention which survives the pruning stage,",
"= Pruner(self._event_scorer) self._antecedent_scorer = TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1)) self._local_window_size = local_window_size self._attention_type",
"of the document. spans : ``torch.IntTensor``, required. A tensor of",
"log of the sum of the probabilities of all antecedent",
"predicted antecedent indices into clusters of spans for each element",
"_type_refine_embedding(self, top_embeddings, event_embeddings): # (batch, top_span_size, emb_size) bmm event_prob =",
"enabling the pruning strategy used in the forward pass. Parameters",
"Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size) target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings) # Shape:",
"antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape) # Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size,",
"label of the specific span. realis_labels : ``torch.IntTensor``, optional (default",
"representing, for each top span, the index (with respect to",
"is possible we might # consider a masked span. #",
"# (batch_size, num_spans_to_keep, event_type_size) event_type_prior_scores = event_type_prior_scores.transpose(1, 2).expand( candidate_antecedent_mention_scores.size(0), candidate_antecedent_mention_scores.size(1),",
"Find the gold labels for the spans which we kept.",
"device < 0: device = 'cpu' attention_mask = torch.ones((text_mask.size(1), text_mask.size(1)),",
"max_span_width > 1: endpoint_span_extractor_dim = text_field_embedder.get_output_dim() self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim, combination=\"x,y\",",
"= self._attentive_span_extractor(new_contextualized_embeddings, spans) span_embeddings_list += [attended_span_embeddings] if self._endpoint_span_extractor is not",
"negative marginal log likelihood of all antecedents which are in",
"2 * encoding_dim + feature_size) endpoint_span_embeddings = self._endpoint_span_extractor(new_contextualized_embeddings, spans) #",
"self._local_attention( raw_contextualized_embeddings=raw_contextualized_embeddings, text_mask=text_mask ) else: new_contextualized_embeddings = raw_contextualized_embeddings span_embeddings_list =",
"(num_spans_to_keep, max_antecedents), where the (i,j)-th index is equal to (i",
"= torch.nn.Dropout(p=lexical_dropout) else: self._lexical_dropout = lambda x: x initializer(self) def",
"in # the same coreference cluster. if self._pretrain_ed: # All",
"span which survived the pruning stage. This procedure is `generic",
"event_embeddings): # (batch, top_span_size, emb_size) bmm event_prob = torch.bmm(top_embeddings, torch.transpose(event_embeddings,",
"matrix, we reformat our variables again to get embeddings #",
"get as input to the model. context_layer : ``Seq2SeqEncoder`` This",
"Average() self._realis_loss_metric = Average() self._coref_loss_metric = Average() self._coref_label_metric = Average()",
"pair of spans to consider. Has shape (batch_size, num_spans_to_keep, max_antecedents,",
"= coreference_log_probs + gold_antecedent_labels.log() negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum() coref_loss = negative_marginal_log_likelihood",
"torch.Tensor]: # pylint: disable=arguments-differ \"\"\" Parameters ---------- text : ``Dict[str,",
"this here to make # the multiple calls to util.batched_index_select",
"metadata : ``List[Dict[str, Any]]``, optional (default = None). A metadata",
"do not appear in any clusters. event_type_labels : ``torch.IntTensor``, optional",
"= (spans[:, :, 0] >= 0).squeeze(-1).float() # SpanFields return -1",
"the pruning stage. antecedent_indices : ``torch.IntTensor`` A tensor of shape",
"model. context_layer : ``Seq2SeqEncoder`` This layer incorporates contextual information for",
"import node_decode logger = logging.getLogger(__name__) # pylint: disable=invalid-name @Model.register(\"end-to-end-event-coreference\") class",
"This feedforward network is applied to pairs of span representation,",
"to be optimised. \"\"\" # Shape: (batch_size, document_length, embedding_size) text_embeddings",
"(batch_size, num_spans_to_keep, max_antecedents). antecedent_log_mask: ``torch.FloatTensor``, required. The log of the",
"else: attentive_span_extractor_dim = text_field_embedder.get_output_dim() if max_span_width > 1: endpoint_span_extractor_dim =",
"word distance between the spans). Has shape ``(1, max_antecedents)``. valid_antecedent_log_mask",
"As we do # some comparisons based on span widths",
"and end word indices of the top spans that survived",
"new_attention_mask = torch.triu(torch.tril(new_attention_mask, self._local_window_size), -self._local_window_size) new_contextualized_embeddings = self._attention_layer(raw_contextualized_embeddings, new_attention_mask) return",
"dummy label is included, representing the decision that the span",
"torch.nn import BCEWithLogitsLoss from src.metrics.event_coref_scores import EventCorefScores from src.metrics.mention_f1 import",
"required. Embedding representations of the antecedent spans we are considering",
"the dummy label, the score is always zero. For the",
"blame many of the absent links on bad spans, enabling",
"-1 means there was no predicted link. loss : ``torch.FloatTensor``,",
"Type: %s' % self._attention_type) self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim, similarity_function=similarity_function, combination='2', num_attention_heads=num_head",
"indices which we don't want to predict, per span. #",
"The output of a ``TextField`` representing the text of the",
"variables again to get embeddings # for all valid antecedents",
": ``torch.IntTensor`` A tensor of shape ``(batch_size, num_spans_to_keep)`` representing, for",
"(start, end) inclusive spans into the original document. \"\"\" return",
"{\"top_spans\": top_spans, \"antecedent_indices\": valid_antecedent_indices, \"predicted_antecedents\": predicted_antecedents, \"coreference_scores\": coreference_scores, } if",
"torch.Tensor]): \"\"\" Converts the list of spans and predicted antecedent",
"self._distance_embedding( torch.cat([bucket_values, label_bucket_values], 1) ) # Shape: (1, 1, max_antecedents",
"spans = F.relu(spans.float()).long() if self._context_layer: # Shape: (batch_size, document_length, encoding_dim)",
": ``torch.FloatTensor`` The logged mask representing whether each antecedent span",
"this is independent # of the batch dimension - it's",
"need the 0 elements of the mask to be -inf",
"# like (batch_size, num_spans_to_keep, max_antecedents, embedding_size), which # we can",
"the text of the document. coref_labels : ``torch.IntTensor``, optional (default",
"# (num_spans_to_keep, max_antecedents), # (1, max_antecedents), # (1, num_spans_to_keep, max_antecedents)",
"num_event_realis_label) # Shape: (batch_size, num_spans, num_event_realis_label) # event_realis_scores = self._event_realis_classifier.forward(span_embeddings)",
"a span does not have any antecedent. Parameters ---------- top_span_labels",
"(num_spans_to_keep, 1) target_indices = util.get_range_vector(num_spans_to_keep, device).unsqueeze(1) # Shape: (1, max_antecedents)",
"the pair of spans to consider. Has shape (batch_size, num_spans_to_keep,",
"mapping a span's # index to the indices of its",
"(1, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings = self._distance_embedding( torch.cat([bucket_values, label_bucket_values],",
"The indices of every antecedent to consider with respect to",
"span. # We're generating a logspace mask here because we",
"imply a clustering of the spans in the document. Parameters",
"we can just use the relative # index of the",
"pruning stage, we consider this many antecedents. lexical_dropout: ``int`` The",
"survived the pruning stage, # a predicted antecedent. This implies",
"the probabilities of all antecedent predictions # that would be",
"= self.decode(output_dict) pred_label_spans_list = decoded_result['pred_label_spans'] gold_label_spans_list = [m['gold_label_spans'] for m",
"by a linear layer. antecedent_feedforward: ``FeedForward`` This feedforward network is",
"or -1 for those which do not appear in any",
"terms of num_spans_to_keep, we need to # compare span pairs",
"mention_result['f1-score'], \"nil\": self._nil_label_metric.get_metric(reset), \"type\": self._type_label_metric.get_metric(reset), \"coref\": self._coref_label_metric.get_metric(reset), \"t_l\": self._type_loss_metric.get_metric(reset), \"c_l\":",
"the embedded text. initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``) Used to",
"self._coref_loss_metric(0.) if metadata is not None: output_dict[\"document\"] = [x[\"original_text\"] for",
"text_mask[:, :, None] * attention_mask new_attention_mask = torch.triu(torch.tril(new_attention_mask, self._local_window_size), -self._local_window_size)",
"want to mask these, # because these are exactly the",
"same coreference cluster. if self._pretrain_ed: # All antecedent mask is",
"-1) event_rep = torch.bmm(event_prob[:, :, 1:], event_embeddings) + event_prob[:, :,",
"like (batch_size, num_spans_to_keep, max_antecedents, embedding_size), which # we can use",
"Parameters ---------- text : ``Dict[str, torch.LongTensor]``, required. The output of",
"raw_antecedent_indices = target_indices - valid_antecedent_offsets # In our matrix of",
"each word in the document. mention_feedforward : ``FeedForward`` This feedforward",
"allowed antecedents. # Once we have this matrix, we reformat",
"to (i - 1) - j if j <= i,",
"# (batch_size, event_type_size, 1) event_type_prior_scores = self._event_scorer(event_embeddings) # (batch_size, num_spans_to_keep,",
"antecedents. This is a valid objective for # clustering as",
"10, attention_type: str = 'dot', decoding: str = 'type-guided', type_threshold:",
"``torch.FloatTensor`` The logged mask representing whether each antecedent span is",
"document. spans : ``torch.IntTensor``, required. A tensor of shape (batch_size,",
"Any]] = None) -> Dict[str, torch.Tensor]: # pylint: disable=arguments-differ \"\"\"",
"in terms of spans we are considering. Has shape (1,",
"-util.logsumexp(correct_antecedent_log_probs).sum() coref_loss = negative_marginal_log_likelihood * self._coref_loss_weight output_dict[\"loss\"] = coref_loss +",
"many antecedents. lexical_dropout: ``int`` The probability of dropping out dimensions",
"the specific span. realis_labels : ``torch.IntTensor``, optional (default = None).",
"Vocabulary from allennlp.models.model import Model from allennlp.modules import FeedForward, Pruner",
"because index 0 is the \"no antecedent\" class, # so",
"decide each span's antecedent. Each span can only # have",
"if the prediction # is greater than -1. predicted_antecedents -=",
"top_span_size, positive_label_size) \"\"\" event_indices = util.get_range_vector(self._positive_label_size, device=util.get_device_of(span_mask)) + 1 event_indices",
"marginal log likelihood of all antecedents which are in the",
"(batch_size, num_spans_to_keep, max_antecedents, embedding_size), which # we can use to",
"(batch_size, num_spans), representing the cluster ids of each span, or",
"self._bce_loss.forward(self._event_scorer.forward(span_embeddings).squeeze(-1), (event_type_labels > 0).float()) * span_mask bce_loss = bce_loss.sum() *",
"is construct a matrix mapping a span's # index to",
"pos_weight=torch.tensor(self._bce_pos_weight)) else: self._bce_loss = BCEWithLogitsLoss(reduction='none') if lexical_dropout > 0: self._lexical_dropout",
"0.2, pretrain_ed: bool = False, pretrain_coref: bool = False, coref_loss_weight:",
"= None, event_type_labels: torch.IntTensor = None, realis_labels: torch.IntTensor = None,",
"# We now have, for each span which survived the",
"= [x[\"token_offset\"] for x in metadata] output_dict['doc_id'] = [x.get(\"doc_id\", None)",
"top_embeddings + (1 - refine_gate) * event_rep return top_embeddings def",
"event_indices]).transpose(0, 1) event_indices = event_indices.expand([event_indices.size(0), event_indices.size(1)]) event_embeddings = self._event_embedding(event_indices) event_embeddings",
"max_antecedents) valid_antecedent_offsets = (util.get_range_vector(max_antecedents, device) + 1).unsqueeze(0) # This is",
"= coreference_scores.max(2) # Subtract one here because index 0 is",
"= Average() self._coref_label_metric = Average() self._type_label_metric = Average() self._nil_label_metric =",
"else: self._coref_loss_metric(0.) if metadata is not None: output_dict[\"document\"] = [x[\"original_text\"]",
"embedding representation of the distance between the two spans. Parameters",
"of the mask to be -inf # in order to",
"antecedent\" class, # so this makes the indices line up",
"event_prob.size(1), 1] dummy_scores = event_prob.new_zeros(*shape) event_prob = torch.cat([dummy_scores, event_prob], -1)",
"for # clustering as we don't mind which antecedent is",
"self._coref_label_metric = Average() self._type_label_metric = Average() self._nil_label_metric = Average() if",
"device=util.get_device_of(top_event_type_labels)) top_event_type_labels = top_event_type_labels.unsqueeze(-1).expand([top_event_type_labels.size(0), top_event_type_labels.size(1), event_indices.size(0)]) type_antecedent_labels = (top_event_type_labels ==",
"predicted_antecedents -= 1 output_dict = {\"top_spans\": top_spans, \"antecedent_indices\": valid_antecedent_indices, \"predicted_antecedents\":",
"# Shape: (batch_size, document_length) text_mask = util.get_text_field_mask(text).float() # Shape: (batch_size,",
"valid antecedents for each span. This gives us variables with",
"document_length)) (top_embeddings, top_mask, top_indices, top_scores) = self._mention_pruner(span_embeddings, span_mask, num_spans_to_keep_according_doc_len, )",
"span_pair_embeddings = self._compute_span_pair_embeddings(top_embeddings, candidate_antecedent_embeddings, valid_antecedent_offsets) # (batch_size, event_type_size, 1) event_type_prior_scores",
"antecedent spans to consider. max_antecedents = min(self._max_antecedents, num_spans_to_keep_according_doc_len) # top_span_embeddings",
"will be > the target indices. We want to mask",
"for Unified Event self._event_embedding = Embedding(num_embeddings=vocab.get_vocab_size('labels'), embedding_dim=span_embedding_size) self._event_embedding_map = torch.nn.Linear(self._event_embedding.get_output_dim()",
"target_indices = util.get_range_vector(num_spans_to_keep, device).unsqueeze(1) # Shape: (1, max_antecedents) valid_antecedent_offsets =",
"per span. # We're generating a logspace mask here because",
"but here # we need to select spans for each",
"None: span_embedding_size = self._attentive_span_extractor.get_output_dim() + self._endpoint_span_extractor.get_output_dim() else: span_embedding_size = self._attentive_span_extractor.get_output_dim()",
"num_spans_to_keep, max_antecedents + 1), representing the unormalised score for each",
"\"\"\" Parameters ---------- text : ``Dict[str, torch.LongTensor]``, required. The output",
"the upper triangular part will be negative # because the",
"max_antecedents) candidate_antecedent_mention_scores = util.flattened_index_select(top_scores, valid_antecedent_indices).squeeze(-1) # Shape: (batch_size, num_spans_to_keep, event_type_size",
"+ max_antecedents, embedding_size). antecedent_offsets : ``torch.IntTensor``, required. The offsets between",
"= util.flattened_index_select(top_embeddings, valid_antecedent_indices) # Shape: (batch_size, num_spans_to_keep, max_antecedents) candidate_antecedent_mention_scores =",
"feature_size) # span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1) span_embeddings = torch.cat([endpoint_span_embeddings,",
"The resulting coreference links, after applying transitivity, imply a clustering",
"required. Embedding representations of pairs of spans. Has shape (batch_size,",
"event_prob = torch.cat([dummy_scores, event_prob], -1) event_prob = torch.softmax(event_prob, -1) event_rep",
"spans) span_embeddings_list += [attended_span_embeddings] if self._endpoint_span_extractor is not None: #",
"device to use. Returns ------- valid_antecedent_indices : ``torch.IntTensor`` The indices",
"context_layer: Seq2SeqEncoder = None, max_span_width: int = 1, spans_per_word: float",
"antecedent_embeddings.size(1), event_embeddings.size(1), antecedent_embeddings.size(3),)) return torch.cat([event_embeddings, antecedent_embeddings], 2) def _compute_span_pair_embeddings(self, top_span_embeddings:",
"= type_threshold logger.info(vocab.get_token_from_index(0, \"labels\")) if context_layer is not None: endpoint_span_extractor_dim",
"every span. Has shape (batch_size, num_spans_to_keep, max_antecedents). antecedent_mention_scores: ``torch.FloatTensor``, required.",
"(batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size). antecedent_offsets : ``torch.IntTensor``, required.",
"= self._type_refine_gate(torch.cat([event_rep, top_embeddings], -1)) top_embeddings = refine_gate * top_embeddings +",
"new_contextualized_embeddings @overrides def forward(self, # type: ignore text: Dict[str, torch.LongTensor],",
"event_embeddings def _get_type_antecedent_labels(self, top_event_type_labels): \"\"\" :param top_event_type_labels: (batch, top_span_size, 1)",
"top_scores, candidate_antecedent_mention_scores, valid_antecedent_log_mask) # We now have, for each span",
"# for pairwise_labels without type_antecedent_labels pairwise_labels_indicator = (pairwise_labels.sum(-1, keepdim=True) >",
"in the batch. # This reformats the indices to take",
"decoded_result = self.decode(output_dict) pred_label_spans_list = decoded_result['pred_label_spans'] gold_label_spans_list = [m['gold_label_spans'] for",
"List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]: # pylint: disable=arguments-differ",
"# Shape: (num_spans_to_keep, max_antecedents) raw_antecedent_indices = target_indices - valid_antecedent_offsets #",
"[x.get(\"doc_id\", None) for x in metadata] return output_dict @overrides def",
"to the same cluster in the gold clustering. Has shape",
"for x in metadata] output_dict['doc_id'] = [x.get(\"doc_id\", None) for x",
"pairs of spans. Has shape (batch_size, num_spans_to_keep, max_antecedents, encoding_dim) top_span_mention_scores:",
"coreferent with any previous span, but here we are computing",
"realis_labels: torch.IntTensor = None, metadata: List[Dict[str, Any]] = None) ->",
"torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1) else: raw_contextualized_embeddings = text_embeddings if self._attention_layer is",
"document. \"\"\" return node_decode(output_dict, self.vocab, decoding_algorithm=self._decoding, positive_label_size=self._positive_label_size, type_threshold=self._type_threshold) @overrides def",
"antecedents, because there are none to select from. Similarly, each",
"coref_f1) / 2.} @staticmethod def _combine_event_embeddings_and_cluster_antecedent_embeddings(event_embeddings: torch.FloatTensor, antecedent_embeddings: torch.FloatTensor): \"\"\"",
"considered. predicted_antecedents : ``torch.IntTensor`` A tensor of shape ``(batch_size, num_spans_to_keep)``",
"self._endpoint_span_extractor is not None: span_embedding_size = self._attentive_span_extractor.get_output_dim() + self._endpoint_span_extractor.get_output_dim() else:",
"the batch. # This reformats the indices to take into",
"span widths. max_span_width: ``int`` The maximum width of candidate spans.",
"`indices` of these spans. So, regardless of the batch, the",
"of shape (batch_size, num_spans_to_keep, max_antecedents + 1), representing the unormalised",
"required. The CUDA device to use. Returns ------- valid_antecedent_indices :",
"= event_embeddings.reshape(event_embeddings.size(0), event_embeddings.size(1) * event_embeddings.size(2)) event_embeddings = self._event_embedding_map.forward(event_embeddings) event_embeddings =",
"self._mention_f1_score = TopSpanMentionTypeF1() self._conll_coref_scores = EventCorefScores(mapping_type=type_match_in_eval) self._type_loss_metric = Average() self._realis_loss_metric",
"we group # mentions which refer to each other in",
"dictionary consisting of: top_spans : ``torch.IntTensor`` A tensor of shape",
"-1) return pairwise_labels_with_dummy_label def _compute_coreference_scores(self, pairwise_embeddings: torch.FloatTensor, top_span_mention_scores: torch.FloatTensor, antecedent_mention_scores:",
"Additionally, a dummy label is included, representing the decision that",
"spans that survived the pruning stage. antecedent_indices : ``torch.IntTensor`` A",
"Returns ------- An output dictionary consisting of: top_spans : ``torch.IntTensor``",
"attend over the # span representations that we generate from",
"antecedent. The factoring allows the model to blame many of",
"model parameters. regularizer : ``RegularizerApplicator``, optional (default=``None``) If provided, will",
"(batch_size, document_length, encoding_dim) raw_contextualized_embeddings = self._context_layer(text_embeddings, text_mask) if self._attention_layer is",
"attention_type self._decoding = decoding self._type_threshold = type_threshold logger.info(vocab.get_token_from_index(0, \"labels\")) if",
"= None, realis_labels: torch.IntTensor = None, metadata: List[Dict[str, Any]] =",
"bool = True, type_match_in_eval: bool = True, initializer: InitializerApplicator =",
"refine_gate = self._type_refine_gate(torch.cat([event_rep, top_embeddings], -1)) top_embeddings = refine_gate * top_embeddings",
"which is then scored by a linear layer. feature_size: ``int``",
"``int``, required. The maximum number of antecedent spans to consider",
"the indices which we don't want to predict, per span.",
"model described \"End-to-end Neural Coreference Resolution\" <https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83> by Lee et",
"indicator for every pair of spans. This label is one",
"(i,j)-th index is equal to (i - 1) - j",
"return pairwise_labels_with_dummy_label def _compute_coreference_scores(self, pairwise_embeddings: torch.FloatTensor, top_span_mention_scores: torch.FloatTensor, antecedent_mention_scores: torch.FloatTensor,",
"optimised. \"\"\" # Shape: (batch_size, document_length, embedding_size) text_embeddings = self._lexical_dropout(self._text_field_embedder(text))",
"int = 1, spans_per_word: float = 0.1, max_antecedents: int =",
"= event_indices.expand([event_indices.size(0), event_indices.size(1)]) event_embeddings = self._event_embedding(event_indices) event_embeddings = event_embeddings.reshape(event_embeddings.size(0), event_embeddings.size(1)",
"document_length) text_mask = util.get_text_field_mask(text).float() # Shape: (batch_size, num_spans) span_mask =",
"src.metrics.mention_f1 import TopSpanMentionTypeF1 from src.utils.cluster_decoding_utils import node_decode logger = logging.getLogger(__name__)",
"= self._compute_span_pair_embeddings(top_embeddings, candidate_antecedent_embeddings, valid_antecedent_offsets) # (batch_size, event_type_size, 1) event_type_prior_scores =",
"num_spans_to_keep, max_antecedents + event_type_size, embedding_size) span_pair_embeddings = torch.cat([target_embeddings, antecedent_embeddings, antecedent_embeddings",
"2019-09-10 # Mostly by AllenNLP import logging import math from",
"torch.transpose(event_embeddings, 1, 2)) shape = [event_prob.size(0), event_prob.size(1), 1] dummy_scores =",
"(default=``None``) If provided, will be used to calculate the regularization",
"text_field_embedder: TextFieldEmbedder, mention_feedforward: FeedForward, antecedent_feedforward: FeedForward, feature_size: int, context_layer: Seq2SeqEncoder",
"required. Mention scores for every antecedent. Has shape (batch_size, num_spans_to_keep,",
"the word distance between the spans). Has shape ``(1, max_antecedents)``.",
"This is a valid objective for # clustering as we",
"final predictions for which spans to consider as mentions. #",
"antecedents in terms of the number of considered spans (i.e",
"Select tensors relating to the antecedent spans. # Shape: (batch_size,",
"the indices to take into account their # index into",
"basic outline of this model is to get an embedded",
"(batch_size, num_spans_to_keep, max_antecedents, embedding_size) target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings) # Shape: (1,",
"the distance between the two spans. Parameters ---------- shape (batch_size,",
"# # # Shape: (batch_size, num_spans_to_keep, 1) # dummy_labels =",
"Shape: (batch_size, num_spans_to_keep, max_antecedents + 1) gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels, type_antecedent_labels,",
"that were kept while pruning. max_antecedents : ``int``, required. The",
"log of the mask for valid antecedents. Returns ------- coreference_scores:",
"span representations, and an embedding representation of the distance between",
"this makes the indices line up with actual spans if",
"and its antecedent spans in terms of spans we are",
"in order to not mess up the normalisation of the",
"None, metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]: #",
"AllenNLP import logging import math from typing import Any, Dict,",
"# we need to select spans for each element in",
"'type-guided', type_threshold: float = -1., type_refine: bool = True, type_match_in_eval:",
"prior mentions k in the same # coreference cluster that",
"loss is the sum of the # probability assigned to",
"``torch.FloatTensor``, required. The log of the mask for valid antecedents.",
"\"predicted_antecedents\": predicted_antecedents, \"coreference_scores\": coreference_scores, } if coref_labels is not None",
"- pairwise_labels).prod(-1, keepdim=True) # Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents",
"\"\"\" event_indices = util.get_range_vector(self._positive_label_size, device=util.get_device_of(span_mask)) + 1 event_indices = torch.stack([torch.zeros_like(event_indices),",
"matrix of indices, the upper triangular part will be negative",
"attention_mask = attention_mask - torch.eye(text_mask.size(1), # device=util.get_device_of(contextualized_embeddings)) new_attention_mask = text_mask[:,",
"to consider. This includes both the original span representations, the",
"up the normalisation of the distribution. # Shape: (1, num_spans_to_keep,",
"torch.cat([bucket_values, label_bucket_values], 1) ) # Shape: (1, 1, max_antecedents +",
"* encoding_dim + feature_size) # span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1)",
"or zero otherwise. Parameters ---------- num_spans_to_keep : ``int``, required. The",
"bce_loss = self._bce_loss.forward(self._event_scorer.forward(span_embeddings).squeeze(-1), (event_type_labels > 0).float()) * span_mask bce_loss =",
"not appear in any clusters. event_type_labels : ``torch.IntTensor``, optional (default",
"the realis label of the specific span. metadata : ``List[Dict[str,",
"the 1st span _cannot_ have any antecedents, because there are",
"in # top_spans. The spans are in document order, so",
"(batch_size, num_spans_to_keep, max_antecedents) candidate_antecedent_mention_scores = util.flattened_index_select(top_scores, valid_antecedent_indices).squeeze(-1) # Shape: (batch_size,",
"# Find the gold labels for the spans which we",
"or batch of instances. Returns ------- The same output dictionary,",
"# attention_mask = attention_mask - torch.eye(text_mask.size(1), # device=util.get_device_of(contextualized_embeddings)) new_attention_mask =",
"all valid antecedents for each span. This gives us variables",
"allennlp.training.metrics import Average from overrides import overrides from torch.nn import",
"span_mask, num_spans_to_keep_according_doc_len, ) event_embeddings = self._get_event_embedding(span_mask) top_mask = top_mask.unsqueeze(-1) #",
"(batch_size, num_spans_to_keep, max_antecedents) # print(top_span_labels) # print(antecedent_labels) target_labels = top_span_labels.expand_as(antecedent_labels)",
"= util.masked_log_softmax(coreference_scores, top_mask) correct_antecedent_log_probs = coreference_log_probs + gold_antecedent_labels.log() negative_marginal_log_likelihood =",
"A binary tensor representing whether a given pair of spans",
"bce_pos_weight: float = None, local_window_size: int = 10, attention_type: str",
"import util, InitializerApplicator, RegularizerApplicator from allennlp.training.metrics import Average from overrides",
"spans_per_word: float = 0.1, max_antecedents: int = 50, lexical_dropout: float",
"# that would be consistent with the data, in the",
"an embedding representation of pairs of spans for the pairwise",
"keepdim=True) > 0).float() type_antecedent_labels = type_antecedent_labels * (1 - pairwise_labels_indicator)",
"The logged mask representing whether each antecedent span is valid.",
"to all valid antecedents. This is a valid objective for",
"event_type_size + max_antecedents, embedding_size) span_pair_embeddings = self._compute_span_pair_embeddings(top_embeddings, candidate_antecedent_embeddings, valid_antecedent_offsets) #",
"max_antecedents + event_type_size, embedding_size) span_pair_embeddings = torch.cat([target_embeddings, antecedent_embeddings, antecedent_embeddings *",
"\"no antecedent\" class, # so this makes the indices line",
"this is the case is that each span in a",
"num_spans_to_keep, event_type_size) event_type_prior_scores = event_type_prior_scores.transpose(1, 2).expand( candidate_antecedent_mention_scores.size(0), candidate_antecedent_mention_scores.size(1), -1) #",
": ``torch.IntTensor``, required. A tensor of shape (batch_size, num_spans, 2),",
"torch.cat([event_embeddings, antecedent_embeddings], 2) def _compute_span_pair_embeddings(self, top_span_embeddings: torch.FloatTensor, antecedent_embeddings: torch.FloatTensor, antecedent_offsets:",
"unlikely to occur in a coreference cluster. For the remaining",
"pretrain_coref self._mention_pruner = Pruner(self._event_scorer) self._antecedent_scorer = TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1)) self._local_window_size =",
"consider after the pruning stage is >= the # total",
"if we group # mentions which refer to each other",
"batch can be coreferent with any previous span, but here",
"use the relative # index of the spans to know",
"device).unsqueeze(1) # Shape: (1, max_antecedents) valid_antecedent_offsets = (util.get_range_vector(max_antecedents, device) +",
"event_type_prior_scores = event_type_prior_scores.transpose(1, 2).expand( candidate_antecedent_mention_scores.size(0), candidate_antecedent_mention_scores.size(1), -1) # (batch_size, num_spans_to_keep,",
"need to # compare span pairs to decide each span's",
"might be several prior mentions k in the same #",
"when we attend over the # span representations that we",
"util.flatten_and_batch_shift_indices(top_indices, num_spans) # Compute final predictions for which spans to",
"> 0: self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout) else: self._lexical_dropout = lambda x:",
"multiple calls to util.batched_index_select below more efficient. flat_top_span_indices = util.flatten_and_batch_shift_indices(top_indices,",
"max_antecedents), where the (i,j)-th index is equal to (i -",
"output_dict: Dict[str, torch.Tensor]): \"\"\" Converts the list of spans and",
"``Dict[str, torch.LongTensor]``, required. The output of a ``TextField`` representing the",
"the first thing we do is construct a matrix mapping",
"pair we considered. \"\"\" antecedent_log_mask = torch.cat([antecedent_log_mask.new_zeros((antecedent_log_mask.size(0), antecedent_log_mask.size(1), self._positive_label_size)), antecedent_log_mask],",
"initializer(self) def _get_event_embedding(self, span_mask): \"\"\" :param span_mask: (batch, top_span_size, 1)",
"attentive_span_extractor_dim = text_field_embedder.get_output_dim() if max_span_width > 1: endpoint_span_extractor_dim = text_field_embedder.get_output_dim()",
"num_spans_to_keep, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape) # Shape:",
"+ 1) gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels, type_antecedent_labels, antecedent_labels) bce_loss = self._bce_loss.forward(self._event_scorer.forward(span_embeddings).squeeze(-1),",
"print(pairwise_labels) # # # Shape: (batch_size, num_spans_to_keep, 1) # dummy_labels",
"# have prior spans as antecedents, and we only consider",
"pruning stage is >= the # total number of spans,",
"antecedent_scores = self._antecedent_scorer( self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1) antecedent_scores += top_span_mention_scores + antecedent_mention_scores antecedent_scores",
"spans belong to the same cluster in the gold clustering.",
"= self._combine_event_embeddings_and_cluster_antecedent_embeddings( event_embeddings, candidate_antecedent_embeddings) # Compute antecedent scores. # Shape:",
"= torch.gather(event_type_labels, 1, top_indices) type_antecedent_labels = self._get_type_antecedent_labels(pruned_event_type_labels) # Find the",
"= (top_event_type_labels == event_indices).float() return type_antecedent_labels def _type_refine_embedding(self, top_embeddings, event_embeddings):",
"0).float()) * span_mask bce_loss = bce_loss.sum() * self._bce_loss_weight # Now,",
"self._type_threshold = type_threshold logger.info(vocab.get_token_from_index(0, \"labels\")) if context_layer is not None:",
"the spans in the document. Parameters ---------- vocab : ``Vocabulary``",
"feature_size: ``int`` The embedding size for all the embedded features,",
"indices of candidate spans for mentions. Comes from a ``ListField[SpanField]``",
"antecedents. Has shape ``(1, num_spans_to_keep, max_antecedents)``. \"\"\" # Shape: (num_spans_to_keep,",
"spans which we kept. pruned_gold_labels = util.batched_index_select(coref_labels.unsqueeze(-1), top_indices, flat_top_span_indices) antecedent_labels",
"* event_rep return top_embeddings def _local_attention(self, raw_contextualized_embeddings, text_mask): device =",
"element-wise similarity of the span representations, and an embedding representation",
">= 0).squeeze(-1).float() # SpanFields return -1 when they are used",
"TextFieldEmbedder from allennlp.modules.seq2seq_encoders import IntraSentenceAttentionEncoder from allennlp.modules.similarity_functions import DotProductSimilarity from",
"endpoint_span_extractor_dim = text_field_embedder.get_output_dim() self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim, combination=\"x,y\", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size) else:",
"+= top_span_mention_scores + antecedent_mention_scores antecedent_scores += antecedent_log_mask # Shape: (batch_size,",
"means there was no predicted link. loss : ``torch.FloatTensor``, optional",
"first thing we do is construct a matrix mapping a",
"document should have no valid antecedents. Has shape ``(1, num_spans_to_keep,",
"def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, mention_feedforward: FeedForward, antecedent_feedforward: FeedForward,",
"util.get_text_field_mask(text).float() # Shape: (batch_size, num_spans) span_mask = (spans[:, :, 0]",
"same gold cluster as the span we are currently considering.",
"cluster that would be valid antecedents. Our loss is the",
"of shape (batch_size, num_spans), representing the event label of the",
"Parameters ---------- num_spans_to_keep : ``int``, required. The number of spans",
"- 1 # 10 possible distance buckets. self._num_distance_buckets = 10",
"for antecedent spans to consider. max_antecedents = min(self._max_antecedents, num_spans_to_keep_according_doc_len) #",
"antecedent. Has shape (batch_size, num_spans_to_keep, max_antecedents). antecedent_log_mask: ``torch.FloatTensor``, required. The",
"TopSpanMentionTypeF1() self._conll_coref_scores = EventCorefScores(mapping_type=type_match_in_eval) self._type_loss_metric = Average() self._realis_loss_metric = Average()",
"pair of spans. This label is one if and only",
"torch.cat([dummy_scores, event_prob], -1) event_prob = torch.softmax(event_prob, -1) event_rep = torch.bmm(event_prob[:,",
"the document. mention_feedforward : ``FeedForward`` This feedforward network is applied",
"embedded features, such as distances or span widths. max_span_width: ``int``",
"bool = False) -> Dict[str, float]: mention_result = self._mention_f1_score.get_metric(reset) coref_precision,",
"(batch_size, num_spans, embedding_size) attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans) # Shape: (batch_size,",
"FeedForward, Pruner from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder from allennlp.modules.seq2seq_encoders",
"to the same cluster. The labels are augmented with a",
"self._mention_f1_score(pred_label_spans_list, gold_label_spans_list, ) self._conll_coref_scores(decoded_result['clusters'], metadata, pred_label_spans_list, gold_label_spans_list) self._type_loss_metric(bce_loss.item()) self._coref_loss_metric(negative_marginal_log_likelihood.item()) else:",
"tensor of shape (batch_size, num_spans), representing the event label of",
"negative marginal log-likelihood. # This is equal to the log",
"the (i,j)-th index is equal to (i - 1) -",
"= coref_loss + bce_loss decoded_result = self.decode(output_dict) pred_label_spans_list = decoded_result['pred_label_spans']",
"torch.FloatTensor: \"\"\" Computes scores for every pair of spans. Additionally,",
"(batch_size, event_type_size, embedding_size). top_span_embeddings : ``torch.FloatTensor``, required. Embedding representations of",
"self._lexical_dropout(self._text_field_embedder(text)) document_length = text_embeddings.size(1) num_spans = spans.size(1) # Shape: (batch_size,",
"type_antecedent_labels pairwise_labels_indicator = (pairwise_labels.sum(-1, keepdim=True) > 0).float() type_antecedent_labels = type_antecedent_labels",
"[attended_span_embeddings] if self._endpoint_span_extractor is not None: # Shape: (batch_size, num_spans,",
"each element in the batch. Parameters ---------- output_dict : ``Dict[str,",
"in the gold clustering. Has shape (batch_size, num_spans_to_keep, max_antecedents +",
"maximum width of candidate spans. spans_per_word: float, required. A multiplier",
"------- coreference_scores: ``torch.FloatTensor`` A tensor of shape (batch_size, num_spans_to_keep, max_antecedents",
"= None self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) if self._local_window_size <= 0: self._attention_layer",
"antecedents. Our loss is the sum of the # probability",
"Shape: (batch_size, num_spans_to_keep, max_antecedents) # print(top_span_labels) # print(antecedent_labels) target_labels =",
"# event_scores = self._event_classifier.forward(span_embeddings) # Shape: (batch_size, num_spans, num_event_realis_label) #",
"= False, pretrain_coref: bool = False, coref_loss_weight: float = 1.0,",
"type_antecedent_labels = self._get_type_antecedent_labels(pruned_event_type_labels) # Find the gold labels for the",
"to the top k spans. Has shape ``(num_spans_to_keep, max_antecedents)``. valid_antecedent_offsets",
"torch.cat([target_embeddings, antecedent_embeddings, antecedent_embeddings * target_embeddings, antecedent_distance_embeddings], -1) return span_pair_embeddings def",
"Parameters ---------- shape (batch_size, event_type_size, embedding_size). top_span_embeddings : ``torch.FloatTensor``, required.",
"self._conll_coref_scores = EventCorefScores(mapping_type=type_match_in_eval) self._type_loss_metric = Average() self._realis_loss_metric = Average() self._coref_loss_metric",
"have this matrix, we reformat our variables again to get",
"The probability of dropping out dimensions of the embedded text.",
"= logging.getLogger(__name__) # pylint: disable=invalid-name @Model.register(\"end-to-end-event-coreference\") class End2EndEventCoreferenceResolver(Model): \"\"\" This",
"from allennlp.training.metrics import Average from overrides import overrides from torch.nn",
"make coreference decisions between valid span pairs. # Shapes: #",
"span. Has shape (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size). antecedent_offsets",
"self._text_field_embedder = text_field_embedder self._context_layer = context_layer self._antecedent_feedforward = TimeDistributed(antecedent_feedforward) self._event_scorer",
"of (start, end) inclusive spans into the original document. \"\"\"",
"with. The resulting coreference links, after applying transitivity, imply a",
"the target indices. We want to mask these, # because",
"= torch.softmax(event_prob, -1) event_rep = torch.bmm(event_prob[:, :, 1:], event_embeddings) +",
"metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]: # pylint:",
"so we need the 0 elements of the mask to",
"top_spans) of the possible antecedents the model considered. predicted_antecedents :",
"clustering. Has shape (batch_size, num_spans_to_keep). antecedent_labels : ``torch.IntTensor``, required. The",
"based on mention scores. num_spans_to_keep_according_doc_len = int(math.floor(self._spans_per_word * document_length)) (top_embeddings,",
"max_antecedents, embedding_size). return: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size) \"\"\"",
"the document. coref_labels : ``torch.IntTensor``, optional (default = None). A",
"compute the loss using the negative marginal log-likelihood. # This",
"= util.get_device_of(raw_contextualized_embeddings) if device < 0: device = 'cpu' attention_mask",
"features, such as distances or span widths. max_span_width: ``int`` The",
"the # total number of spans, because in this case,",
"output dictionary consisting of: top_spans : ``torch.IntTensor`` A tensor of",
"candidate_antecedent_mention_scores], -1) # Shape: (batch_size, num_spans_to_keep, 1 + event_type_size +",
"predict, per span. # We're generating a logspace mask here",
"import math from typing import Any, Dict, List, Optional, Tuple",
"non_dummy_indicator = (target_labels >= 0).float() pairwise_labels = same_cluster_indicator * non_dummy_indicator",
"of all antecedent predictions # that would be consistent with",
"span which survived the pruning stage, # a predicted antecedent.",
"= self._event_scorer(event_embeddings) # (batch_size, num_spans_to_keep, event_type_size) event_type_prior_scores = event_type_prior_scores.transpose(1, 2).expand(",
"mention_feedforward : ``FeedForward`` This feedforward network is applied to the",
"metadata, pred_label_spans_list, gold_label_spans_list) self._type_loss_metric(bce_loss.item()) self._coref_loss_metric(negative_marginal_log_likelihood.item()) else: self._coref_loss_metric(0.) if metadata is",
"Required since different spans have different numbers of valid antecedents.",
"num_spans_to_keep, max_antecedents, embedding_size) \"\"\" # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)",
"tensor of shape ``(batch_size, num_spans_to_keep, 2)`` representing the start and",
"shape (batch_size, event_type_size, embedding_size). top_span_embeddings : ``torch.FloatTensor``, required. Embedding representations",
"max_antecedents) raw_antecedent_indices = target_indices - valid_antecedent_offsets # In our matrix",
"= util.batched_index_select(coref_labels.unsqueeze(-1), top_indices, flat_top_span_indices) antecedent_labels = util.flattened_index_select(pruned_gold_labels, valid_antecedent_indices).squeeze(-1) antecedent_labels +=",
"self._type_label_metric = Average() self._nil_label_metric = Average() if self._bce_pos_weight: self._bce_loss =",
"spans) # Shape: (batch_size, num_spans, embedding_size + 2 * encoding_dim",
">= 0).float() pairwise_labels = same_cluster_indicator * non_dummy_indicator if self._pretrain_ed: pairwise_labels",
"or span widths. max_span_width: ``int`` The maximum width of candidate",
"# Shape: (batch_size, num_spans, 2) spans = F.relu(spans.float()).long() if self._context_layer:",
"spans are in document order, so we can just use",
"event_embeddings.unsqueeze(0).expand(span_mask.size(0), event_embeddings.size(0), event_embeddings.size(1), ) return event_embeddings def _get_type_antecedent_labels(self, top_event_type_labels): \"\"\"",
"each mention which survives the pruning stage, we consider this",
"\"\"\" :param span_mask: (batch, top_span_size, 1) :return: (batch, top_span_size, positive_label_size)",
"antecedent_distance_embeddings], -1) return span_pair_embeddings def _compute_antecedent_gold_labels(self, top_span_labels: torch.IntTensor, type_antecedent_labels: torch.IntTensor,",
"of the span representations, and an embedding representation of the",
"_local_attention(self, raw_contextualized_embeddings, text_mask): device = util.get_device_of(raw_contextualized_embeddings) if device < 0:",
"top_mask = top_mask.unsqueeze(-1) # Shape: (batch_size * num_spans_to_keep) # torch.index_select",
"= pairwise_labels * 0 else: # for pairwise_labels without type_antecedent_labels",
"included, representing the decision that the span is not coreferent",
"linear layer. antecedent_feedforward: ``FeedForward`` This feedforward network is applied to",
"part will be negative # because the offsets will be",
"logging import math from typing import Any, Dict, List, Optional,",
"# # Shape: (batch_size, num_spans_to_keep, 1) # dummy_labels = (1",
"and end indices of candidate spans for mentions. Comes from",
": ``List[List[List[Tuple[int, int]]]]`` A nested list, representing, for each instance",
"antecedent_embeddings, antecedent_embeddings * target_embeddings, antecedent_distance_embeddings], -1) return span_pair_embeddings def _compute_antecedent_gold_labels(self,",
"if context_layer is not None: endpoint_span_extractor_dim = context_layer.get_output_dim() attentive_span_extractor_dim =",
"torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1) span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1) else: raw_contextualized_embeddings",
"the two spans. Parameters ---------- shape (batch_size, event_type_size, embedding_size). top_span_embeddings",
"the batch dimension - it's just a function of the",
"Has shape (batch_size, num_spans_to_keep). antecedent_labels : ``torch.IntTensor``, required. The cluster",
"Created by Roger on 2019-09-10 # Mostly by AllenNLP import",
"= top_mask.expand_as(coreference_scores).clone() top_mask[:, :, self._positive_label_size + 2:] = 0 coreference_log_probs",
"max_antecedents + 1) gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels, type_antecedent_labels, antecedent_labels) bce_loss =",
": ``torch.FloatTensor`` Embedding representation of the pair of spans to",
"spans for each element in the batch. # This reformats",
"= local_window_size self._attention_type = attention_type self._decoding = decoding self._type_threshold =",
"keepdim=True) # Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents + 1)",
"event_indices.expand([event_indices.size(0), event_indices.size(1)]) event_embeddings = self._event_embedding(event_indices) event_embeddings = event_embeddings.reshape(event_embeddings.size(0), event_embeddings.size(1) *",
"(batch_size, num_spans_to_keep, 1) shape = [antecedent_scores.size(0), antecedent_scores.size(1), 1] dummy_scores =",
"num_spans_to_keep, 2)`` representing the start and end word indices of",
"# (1, event_type) label_bucket_values = bucket_values.new_zeros((1, self._positive_label_size)) # Shape: (1,",
"encoding_dim) raw_contextualized_embeddings = self._context_layer(text_embeddings, text_mask) if self._attention_layer is not None:",
"valid_antecedent_log_mask) # We now have, for each span which survived",
": ``torch.IntTensor`` A tensor of shape ``(num_spans_to_keep, max_antecedents)`` representing for",
"max_antecedents, embedding_size), which # we can use to make coreference",
"output dictionary, but with an additional ``clusters`` key: clusters :",
"the true antecedent spans, the score consists of the pairwise",
"2).expand( candidate_antecedent_mention_scores.size(0), candidate_antecedent_mention_scores.size(1), -1) # (batch_size, num_spans_to_keep, event_type_size + max_antecedents)",
"= DotProductSimilarity(scale_output=True) num_head = 1 else: raise NotImplementedError('Attention Type: %s'",
"in edge cases where # the number of spans we",
"is to get an embedded representation of each span in",
"antecedent_mention_scores antecedent_scores += antecedent_log_mask # Shape: (batch_size, num_spans_to_keep, 1) shape",
"from these indices, we # need them to be <=",
"* num_spans_to_keep) # torch.index_select only accepts 1D indices, but here",
"if j <= i, or zero otherwise. Parameters ---------- num_spans_to_keep",
"Shape: (batch_size, num_spans, embedding_size) attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans) # Shape:",
"dictionary, but with an additional ``clusters`` key: clusters : ``List[List[List[Tuple[int,",
"generate from these indices, we # need them to be",
"that a span does not have any antecedent. Parameters ----------",
"the same cluster in the gold clustering. Has shape (batch_size,",
": ``torch.IntTensor`` The indices of every antecedent to consider with",
"allennlp.models.model import Model from allennlp.modules import FeedForward, Pruner from allennlp.modules",
"= spans_per_word self._max_antecedents = max_antecedents self._mention_f1_score = TopSpanMentionTypeF1() self._conll_coref_scores =",
"Any]]``, optional (default = None). A metadata dictionary for each",
"0).squeeze(-1).float() # SpanFields return -1 when they are used as",
"take into account their # index into the batch. We",
"for each instance in the batch, the list of clusters,",
"0: device = 'cpu' attention_mask = torch.ones((text_mask.size(1), text_mask.size(1)), device=device) #",
"each top span. Has shape (batch_size, num_spans_to_keep, max_antecedents, embedding_size). return:",
"does not have any antecedent. Parameters ---------- top_span_labels : ``torch.IntTensor``,",
"``(1, max_antecedents)``. valid_antecedent_log_mask : ``torch.FloatTensor`` The logged mask representing whether",
"return type_antecedent_labels def _type_refine_embedding(self, top_embeddings, event_embeddings): # (batch, top_span_size, emb_size)",
"max_antecedents = min(self._max_antecedents, num_spans_to_keep_according_doc_len) # top_span_embeddings = top_span_embeddings.detach() # top_span_mention_scores",
"of its allowed antecedents. Note that this is independent #",
"the pairwise antecedent score and the unary mention scores for",
"only if the pair of spans belong to the same",
"the model decides which antecedent span (if any) they are",
"we have this matrix, we reformat our variables again to",
"coreference_scores: ``torch.FloatTensor`` A tensor of shape (batch_size, num_spans_to_keep, max_antecedents +",
"id is arbitrary, as we just care about the clustering.",
"span pairs. # Shapes: # (num_spans_to_keep, max_antecedents), # (1, max_antecedents),",
"the pairwise scoring function to consider. This includes both the",
"clustering as we don't mind which antecedent is predicted, so",
"representations of the event types. Has shape (batch_size, event_type_size, embedding_size).",
"not None: # Shape: (batch_size, num_spans, embedding_size) endpoint_span_embeddings = self._endpoint_span_extractor(text_embeddings,",
"bad spans, enabling the pruning strategy used in the forward",
"`generic across the batch`. The reason this is the case",
"we are computing the possible `indices` of these spans. So,",
"can just use the relative # index of the spans",
"are used as padding. As we do # some comparisons",
"# probability assigned to all valid antecedents. This is a",
"masked span. # Shape: (batch_size, num_spans, 2) spans = F.relu(spans.float()).long()",
"self._type_refine_gate = torch.nn.Sequential( TimeDistributed(torch.nn.Linear(span_embedding_size * 2, span_embedding_size)), torch.nn.Sigmoid() ) else:",
"pairwise_labels = same_cluster_indicator * non_dummy_indicator if self._pretrain_ed: pairwise_labels = pairwise_labels",
"device = util.get_device_of(raw_contextualized_embeddings) if device < 0: device = 'cpu'",
"disable=invalid-name @Model.register(\"end-to-end-event-coreference\") class End2EndEventCoreferenceResolver(Model): \"\"\" This ``Model`` implements the coreference",
"None) for x in metadata] return output_dict @overrides def decode(self,",
"= EndpointSpanExtractor(endpoint_span_extractor_dim, combination=\"x,y\", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size) self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) span_embedding_size =",
"# torch.index_select only accepts 1D indices, but here # we",
"IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim, similarity_function=similarity_function, combination='2', num_attention_heads=num_head ) if self._endpoint_span_extractor is not None:",
"of spans we consider after the pruning stage is >=",
"\"m_p\": mention_result['precision'], \"m_r\": mention_result['recall'], \"m_f1\": mention_result['f1-score'], \"nil\": self._nil_label_metric.get_metric(reset), \"type\": self._type_label_metric.get_metric(reset),",
"element in the batch. Parameters ---------- output_dict : ``Dict[str, torch.Tensor]``,",
"each (span, antecedent) pair we considered. \"\"\" antecedent_log_mask = torch.cat([antecedent_log_mask.new_zeros((antecedent_log_mask.size(0),",
"be -inf # in order to not mess up the",
"are in document order, so we can just use the",
"respectively have the original text and the annotated gold coreference",
"stage. This procedure is `generic across the batch`. The reason",
"total number of spans, because in this case, it is",
"element can only predict previous spans, so this returns a",
"not None: new_contextualized_embeddings = self._local_attention( raw_contextualized_embeddings=raw_contextualized_embeddings, text_mask=text_mask ) else: new_contextualized_embeddings",
"= None). A tensor of shape (batch_size, num_spans), representing the",
"same cluster in the gold clustering. Has shape (batch_size, num_spans_to_keep,",
"= 50, lexical_dropout: float = 0.2, pretrain_ed: bool = False,",
"1) :return: (batch, top_span_size, positive_label_size) \"\"\" event_indices = util.get_range_vector(self._positive_label_size, device=util.get_device_of(span_mask))",
"consider. max_antecedents = min(self._max_antecedents, num_spans_to_keep_according_doc_len) # top_span_embeddings = top_span_embeddings.detach() #",
"pruning. max_antecedents : ``int``, required. The maximum number of antecedent",
"of the sum of the probabilities of all antecedent predictions",
"is the case is that each span in a batch",
"between each top span and its antecedent spans in terms",
"the number of words in the document. max_antecedents: int, required.",
"False) -> Dict[str, float]: mention_result = self._mention_f1_score.get_metric(reset) coref_precision, coref_recall, coref_f1",
"cluster. For the remaining spans, the model decides which antecedent",
"= 0.1, max_antecedents: int = 50, lexical_dropout: float = 0.2,",
"that this is independent # of the batch dimension -",
"= self._compute_coreference_scores(span_pair_embeddings, top_scores, candidate_antecedent_mention_scores, valid_antecedent_log_mask) # We now have, for",
"[antecedent_scores.size(0), antecedent_scores.size(1), 1] dummy_scores = antecedent_scores.new_zeros(*shape) # Shape: (batch_size, num_spans_to_keep,",
"num_spans, 2) spans = F.relu(spans.float()).long() if self._context_layer: # Shape: (batch_size,",
"antecedent_distance_embeddings.unsqueeze(0) expanded_distance_embeddings_shape = (antecedent_embeddings.size(0), antecedent_embeddings.size(1), antecedent_embeddings.size(2), antecedent_distance_embeddings.size(-1)) # Shape: (batch_size,",
"for each instance in the batch. We use the \"original_text\"",
"antecedent_labels: torch.IntTensor): \"\"\" Generates a binary indicator for every pair",
"num_spans = spans.size(1) # Shape: (batch_size, document_length) text_mask = util.get_text_field_mask(text).float()",
"# total number of spans, because in this case, it",
": ``InitializerApplicator``, optional (default=``InitializerApplicator()``) Used to initialize the model parameters.",
"shape (batch_size, num_spans, 2), representing the inclusive start and end",
"of its antecedents in terms of the number of considered",
"for the pairwise scoring function to consider. This includes both",
"generates possible antecedents per span which survived the pruning stage.",
"but here we are computing the possible `indices` of these",
"(num_spans_to_keep, max_antecedents) raw_antecedent_indices = target_indices - valid_antecedent_offsets # In our",
"binary indicator for every pair of spans. This label is",
"# so this makes the indices line up with actual",
"self._coref_loss_metric = Average() self._coref_label_metric = Average() self._type_label_metric = Average() self._nil_label_metric",
"each span which survived the pruning stage, # a predicted",
"logged mask representing whether each antecedent span is valid. Required",
"``InitializerApplicator``, optional (default=``InitializerApplicator()``) Used to initialize the model parameters. regularizer",
"of shape ``(batch_size, num_spans_to_keep)`` representing, for each top span, the",
"decision that the span is not coreferent with anything. For",
"allows the model to blame many of the absent links",
"= bce_pos_weight self._max_span_width = max_span_width self._spans_per_word = spans_per_word self._max_antecedents =",
"self._coref_label_metric(torch.sum(pairwise_labels).item()) self._nil_label_metric(torch.sum(type_antecedent_labels[:, :, 0]).item()) self._type_label_metric(torch.sum(type_antecedent_labels[:, :, 1: self._positive_label_size + 1]).item())",
"The labels are augmented with a dummy antecedent at the",
"torch.IntTensor, coref_labels: torch.IntTensor = None, event_type_labels: torch.IntTensor = None, realis_labels:",
"a # given span, the negative marginal log likelihood of",
"+ self._attentive_span_extractor.get_output_dim() if self._local_window_size <= 0: self._attention_layer = None else:",
"pruning stage. This procedure is `generic across the batch`. The",
"to # compare span pairs to decide each span's antecedent.",
"have, for each span which survived the pruning stage, #",
"the negative marginal log likelihood of all antecedents which are",
"self._mention_pruner(span_embeddings, span_mask, num_spans_to_keep_according_doc_len, ) event_embeddings = self._get_event_embedding(span_mask) top_mask = top_mask.unsqueeze(-1)",
"torch.IntTensor = None, metadata: List[Dict[str, Any]] = None) -> Dict[str,",
"bool = False, pretrain_coref: bool = False, coref_loss_weight: float =",
"implies a clustering if we group # mentions which refer",
"= util.bucket_values(antecedent_offsets, num_total_buckets=self._num_distance_buckets) # (1, event_type) label_bucket_values = bucket_values.new_zeros((1, self._positive_label_size))",
"document_length = text_embeddings.size(1) num_spans = spans.size(1) # Shape: (batch_size, document_length)",
"antecedent_log_mask], -1) # Shape: (batch_size, num_spans_to_keep, max_antecedents) antecedent_scores = self._antecedent_scorer(",
"create a # distribution over these indices, so we need",
"number of words in the document. max_antecedents: int, required. For",
"span can only # have prior spans as antecedents, and",
"top_span_mention_scores + antecedent_mention_scores antecedent_scores += antecedent_log_mask # Shape: (batch_size, num_spans_to_keep,",
"antecedent_labels = util.flattened_index_select(pruned_gold_labels, valid_antecedent_indices).squeeze(-1) antecedent_labels += valid_antecedent_log_mask.long() # Compute labels.",
"antecedent_embeddings.size(1), antecedent_embeddings.size(2), antecedent_distance_embeddings.size(-1)) # Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size,",
"``torch.IntTensor``, required. The cluster id label for every antecedent span.",
"type_refine: bool = True, type_match_in_eval: bool = True, initializer: InitializerApplicator",
"care about the clustering. Has shape (batch_size, num_spans_to_keep, max_antecedents). Returns",
"position in # top_spans. The spans are in document order,",
"the model to blame many of the absent links on",
"gold labels for the spans which we kept. pruned_gold_labels =",
"case, it is possible we might # consider a masked",
"of spans. This label is one if and only if",
"return node_decode(output_dict, self.vocab, decoding_algorithm=self._decoding, positive_label_size=self._positive_label_size, type_threshold=self._type_threshold) @overrides def get_metrics(self, reset:",
"shape (batch_size, num_spans_to_keep, max_antecedents, embedding_size). return: (batch_size, num_spans_to_keep, max_antecedents +",
"self._local_window_size <= 0: self._attention_layer = None else: if self._attention_type ==",
"# Shape: (batch_size, num_spans, num_event_realis_label) # event_realis_scores = self._event_realis_classifier.forward(span_embeddings) #",
"text of the document. spans : ``torch.IntTensor``, required. A tensor",
"self._attentive_span_extractor(new_contextualized_embeddings, spans) span_embeddings_list += [attended_span_embeddings] if self._endpoint_span_extractor is not None:",
"``torch.FloatTensor``, required. Embedding representations of the top spans. Has shape",
"util.batched_index_select(spans, top_indices, flat_top_span_indices) # Compute indices for antecedent spans to",
"self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim, combination=\"x,y\", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size) else: self._endpoint_span_extractor = None",
"be optimised. \"\"\" # Shape: (batch_size, document_length, embedding_size) text_embeddings =",
"for each span. This gives us variables with shapes #",
"span_pair_embeddings : ``torch.FloatTensor`` Embedding representation of the pair of spans",
"num_spans_to_keep, max_antecedents + 1). \"\"\" # Shape: (batch_size, num_spans_to_keep, max_antecedents)",
"``clusters`` key: clusters : ``List[List[List[Tuple[int, int]]]]`` A nested list, representing,",
"pairwise_labels_with_dummy_label = torch.cat([type_antecedent_labels, pairwise_labels], -1) return pairwise_labels_with_dummy_label def _compute_coreference_scores(self, pairwise_embeddings:",
"which represents the prediction that a span does not have",
"indices into the text of the document. coref_labels : ``torch.IntTensor``,",
"of valid antecedents. For example, the first span in the",
"applied to the span representations which is then scored by",
"pretrain_coref: bool = False, coref_loss_weight: float = 1.0, bce_loss_weight: float",
"Shape: (batch_size, num_spans_to_keep, max_antecedents + 1) coreference_scores = torch.cat([dummy_scores, antecedent_scores],",
"for m in metadata] self._mention_f1_score(pred_label_spans_list, gold_label_spans_list, ) self._conll_coref_scores(decoded_result['clusters'], metadata, pred_label_spans_list,",
"so this returns a matrix of shape (num_spans_to_keep, max_antecedents), where",
"1) :return: (batch, top_span_size, positive_label_size) \"\"\" event_indices = util.get_range_vector(self.vocab.get_vocab_size('labels'), device=util.get_device_of(top_event_type_labels))",
"\"m_f1\": mention_result['f1-score'], \"nil\": self._nil_label_metric.get_metric(reset), \"type\": self._type_label_metric.get_metric(reset), \"coref\": self._coref_label_metric.get_metric(reset), \"t_l\": self._type_loss_metric.get_metric(reset),",
"= None, local_window_size: int = 10, attention_type: str = 'dot',",
"text_mask) if self._attention_layer is not None: new_contextualized_embeddings = self._local_attention( raw_contextualized_embeddings=raw_contextualized_embeddings,",
"each antecedent span is valid. Required since different spans have",
"the ``text`` ``TextField`` we get as input to the model.",
"to util.batched_index_select below more efficient. flat_top_span_indices = util.flatten_and_batch_shift_indices(top_indices, num_spans) #",
"order to not mess up the normalisation of the distribution.",
"= decoding self._type_threshold = type_threshold logger.info(vocab.get_token_from_index(0, \"labels\")) if context_layer is",
"antecedent_indices : ``torch.IntTensor`` A tensor of shape ``(num_spans_to_keep, max_antecedents)`` representing",
"= text_field_embedder.get_output_dim() self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim, combination=\"x,y\", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size) self._attentive_span_extractor =",
": ``List[Dict[str, Any]]``, optional (default = None). A metadata dictionary",
"Mention scores for every antecedent. Has shape (batch_size, num_spans_to_keep, max_antecedents).",
"(with respect to antecedent_indices) of the most likely antecedent. -1",
"to not mess up the normalisation of the distribution. #",
"representing for each top span the index (with respect to",
"A tensor of shape (batch_size, num_spans_to_keep, max_antecedents + 1), representing",
"top k spans. Has shape ``(num_spans_to_keep, max_antecedents)``. valid_antecedent_offsets : ``torch.IntTensor``",
"to the number of words in the document. max_antecedents: int,",
"pairs. # Shapes: # (num_spans_to_keep, max_antecedents), # (1, max_antecedents), #",
"# is greater than -1. predicted_antecedents -= 1 output_dict =",
"is not None: output_dict[\"document\"] = [x[\"original_text\"] for x in metadata]",
"(antecedent_embeddings.size(0), antecedent_embeddings.size(1), antecedent_embeddings.size(2), antecedent_distance_embeddings.size(-1)) # Shape: (batch_size, num_spans_to_keep, max_antecedents +",
"a matrix mapping a span's # index to the indices",
"tensor of shape ``(num_spans_to_keep, max_antecedents)`` representing for each top span",
"event_type_prior_scores = self._event_scorer(event_embeddings) # (batch_size, num_spans_to_keep, event_type_size) event_type_prior_scores = event_type_prior_scores.transpose(1,",
"bce_loss decoded_result = self.decode(output_dict) pred_label_spans_list = decoded_result['pred_label_spans'] gold_label_spans_list = [m['gold_label_spans']",
"------- pairwise_labels_with_dummy_label : ``torch.FloatTensor`` A binary tensor representing whether a",
"(with respect to top_spans) of the possible antecedents the model",
"exactly the indices which we don't want to predict, per",
"= util.get_range_vector(self._positive_label_size, device=util.get_device_of(span_mask)) + 1 event_indices = torch.stack([torch.zeros_like(event_indices), event_indices]).transpose(0, 1)",
"+= [attended_span_embeddings] if self._endpoint_span_extractor is not None: # Shape: (batch_size,",
"# (batch_size, num_spans_to_keep, event_type_size + max_antecedents) candidate_antecedent_mention_scores = torch.cat([event_type_prior_scores, candidate_antecedent_mention_scores],",
"A multiplier between zero and one which controls what percentage",
"provided, will be used to calculate the regularization penalty during",
"are computing the possible `indices` of these spans. So, regardless",
"import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder from allennlp.modules.seq2seq_encoders import IntraSentenceAttentionEncoder from allennlp.modules.similarity_functions",
"``(batch_size, num_spans_to_keep, 2)`` representing the start and end word indices",
"we need the 0 elements of the mask to be",
"top span and its antecedent spans in terms of spans",
"embedding_size) span_pair_embeddings = torch.cat([target_embeddings, antecedent_embeddings, antecedent_embeddings * target_embeddings, antecedent_distance_embeddings], -1)",
"Shape: (batch_size, document_length, encoding_dim) raw_contextualized_embeddings = self._context_layer(text_embeddings, text_mask) if self._attention_layer",
"id label for every antecedent span. The id is arbitrary,",
"The embedding size for all the embedded features, such as",
"there might be several prior mentions k in the same",
"candidate spans for mentions. Comes from a ``ListField[SpanField]`` of indices",
"antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0) expanded_distance_embeddings_shape = (antecedent_embeddings.size(0), antecedent_embeddings.size(1), antecedent_embeddings.size(2), antecedent_distance_embeddings.size(-1)) #",
"we generate from these indices, we # need them to",
"0: self._attention_layer = None else: if self._attention_type == 'dot': similarity_function",
"Embedding representations of the event types. Has shape (batch_size, event_type_size,",
"# because these are exactly the indices which we don't",
"the original document. \"\"\" return node_decode(output_dict, self.vocab, decoding_algorithm=self._decoding, positive_label_size=self._positive_label_size, type_threshold=self._type_threshold)",
"float]: mention_result = self._mention_f1_score.get_metric(reset) coref_precision, coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset) return",
"positive_label_size=self._positive_label_size, type_threshold=self._type_threshold) @overrides def get_metrics(self, reset: bool = False) ->",
"event_embeddings, candidate_antecedent_embeddings) # Compute antecedent scores. # Shape: (batch_size, num_spans_to_keep,",
"i predicts a # single antecedent j, but there might",
"If provided, will be used to calculate the regularization penalty",
"just care about the clustering. Has shape (batch_size, num_spans_to_keep, max_antecedents).",
"list() attended_span_embeddings = self._attentive_span_extractor(new_contextualized_embeddings, spans) span_embeddings_list += [attended_span_embeddings] if self._endpoint_span_extractor",
"util.flattened_index_select(pruned_gold_labels, valid_antecedent_indices).squeeze(-1) antecedent_labels += valid_antecedent_log_mask.long() # Compute labels. # Shape:",
"to mask these, # because these are exactly the indices",
"``FeedForward`` This feedforward network is applied to the span representations",
"text_embeddings.size(1) num_spans = spans.size(1) # Shape: (batch_size, document_length) text_mask =",
"shape (batch_size, num_spans), representing the realis label of the specific",
"label is included, representing the decision that the span is",
"spans) # Shape: (batch_size, num_spans, embedding_size) attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans)",
"which antecedent span (if any) they are coreferent with. The",
"similarity_function = DotProductSimilarity(scale_output=True) num_head = 1 else: raise NotImplementedError('Attention Type:",
"top_event_type_labels.unsqueeze(-1).expand([top_event_type_labels.size(0), top_event_type_labels.size(1), event_indices.size(0)]) type_antecedent_labels = (top_event_type_labels == event_indices).float() return type_antecedent_labels",
"= None) -> None: super(End2EndEventCoreferenceResolver, self).__init__(vocab, regularizer) logger.info(vocab) self._text_field_embedder =",
"1 # 10 possible distance buckets. self._num_distance_buckets = 10 self._distance_embedding",
"shape (batch_size, num_spans_to_keep, max_antecedents). antecedent_mention_scores: ``torch.FloatTensor``, required. Mention scores for",
"spans have different numbers of valid antecedents. For example, the",
"a masked span. # Shape: (batch_size, num_spans, 2) spans =",
"specific span. metadata : ``List[Dict[str, Any]]``, optional (default = None).",
"tensors relating to the antecedent spans. # Shape: (batch_size, num_spans_to_keep,",
"else: span_embedding_size = self._attentive_span_extractor.get_output_dim() if type_refine: self._type_refine_gate = torch.nn.Sequential( TimeDistributed(torch.nn.Linear(span_embedding_size",
"/ 2.} @staticmethod def _combine_event_embeddings_and_cluster_antecedent_embeddings(event_embeddings: torch.FloatTensor, antecedent_embeddings: torch.FloatTensor): \"\"\" event_embeddings:",
"know which other spans are allowed antecedents. # Once we",
"candidate_antecedent_mention_scores.size(1), -1) # (batch_size, num_spans_to_keep, event_type_size + max_antecedents) candidate_antecedent_mention_scores =",
"embedding_size + 2 * encoding_dim + feature_size) # span_embeddings =",
"coref_loss + bce_loss decoded_result = self.decode(output_dict) pred_label_spans_list = decoded_result['pred_label_spans'] gold_label_spans_list",
"import Model from allennlp.modules import FeedForward, Pruner from allennlp.modules import",
"if self._attention_layer is not None: new_contextualized_embeddings = self._local_attention( raw_contextualized_embeddings=raw_contextualized_embeddings, text_mask=text_mask",
"self.vocab, decoding_algorithm=self._decoding, positive_label_size=self._positive_label_size, type_threshold=self._type_threshold) @overrides def get_metrics(self, reset: bool =",
"pruning stage. antecedent_indices : ``torch.IntTensor`` A tensor of shape ``(num_spans_to_keep,",
"2017. The basic outline of this model is to get",
"chain. # Shape: (batch_size, num_spans_to_keep) _, predicted_antecedents = coreference_scores.max(2) #",
"_generate_valid_antecedents(num_spans_to_keep: int, max_antecedents: int, device: int) -> Tuple[torch.IntTensor, torch.IntTensor, torch.FloatTensor]:",
"max_antecedents, embedding_size) target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings) # Shape: (1, max_antecedents) bucket_values",
"So the first thing we do is construct a matrix",
"(batch_size, num_spans_to_keep, 1) # dummy_labels = (1 - pairwise_labels).prod(-1, keepdim=True)",
"kept. pruned_gold_labels = util.batched_index_select(coref_labels.unsqueeze(-1), top_indices, flat_top_span_indices) antecedent_labels = util.flattened_index_select(pruned_gold_labels, valid_antecedent_indices).squeeze(-1)",
"one which controls what percentage of candidate mention spans we",
"float, required. A multiplier between zero and one which controls",
"which we kept. pruned_gold_labels = util.batched_index_select(coref_labels.unsqueeze(-1), top_indices, flat_top_span_indices) antecedent_labels =",
"raw_contextualized_embeddings=raw_contextualized_embeddings, text_mask=text_mask ) else: new_contextualized_embeddings = raw_contextualized_embeddings span_embeddings_list = list()",
"for each top span the index (with respect to top_spans)",
"event_embeddings.size(1), ) return event_embeddings def _get_type_antecedent_labels(self, top_event_type_labels): \"\"\" :param top_event_type_labels:",
"self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim, similarity_function=similarity_function, combination='2', num_attention_heads=num_head ) else: attentive_span_extractor_dim =",
"else: self._endpoint_span_extractor = None self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) if self._local_window_size <=",
"these are exactly the indices which we don't want to",
"top_embeddings refine_gate = self._type_refine_gate(torch.cat([event_rep, top_embeddings], -1)) top_embeddings = refine_gate *",
"embedding representation of pairs of spans for the pairwise scoring",
"are currently considering. Each span i predicts a # single",
"# span representations that we generate from these indices, we",
"word indices of the top spans that survived the pruning",
"sum of the # probability assigned to all valid antecedents.",
"the remaining spans, the model decides which antecedent span (if",
"don't mind which antecedent is predicted, so long as they",
"cluster in the gold clustering. Has shape (batch_size, num_spans_to_keep, max_antecedents",
"embedding_size) text_embeddings = self._lexical_dropout(self._text_field_embedder(text)) document_length = text_embeddings.size(1) num_spans = spans.size(1)",
"antecedent_scores], -1) return coreference_scores def _generate_valid_antecedents(num_spans_to_keep: int, max_antecedents: int, device:",
"mask representing whether each antecedent span is valid. Required since",
"from allennlp.modules.similarity_functions import DotProductSimilarity from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor from",
"# Once we have this matrix, we reformat our variables",
"two spans. Parameters ---------- shape (batch_size, event_type_size, embedding_size). top_span_embeddings :",
"relating to the antecedent spans. # Shape: (batch_size, num_spans_to_keep, max_antecedents,",
"is then scored by a linear layer. antecedent_feedforward: ``FeedForward`` This",
"along with any pairwise features, which is then scored by",
"that we are minimising, for a # given span, the",
"event_prob[:, :, :1] * top_embeddings refine_gate = self._type_refine_gate(torch.cat([event_rep, top_embeddings], -1))",
"= TimeDistributed(antecedent_feedforward) self._event_scorer = torch.nn.Sequential( TimeDistributed(mention_feedforward), TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(), 1)) ) self._pretrain_ed",
"Shape: (num_spans_to_keep, 1) target_indices = util.get_range_vector(num_spans_to_keep, device).unsqueeze(1) # Shape: (1,",
"{\"c_p\": coref_precision, \"c_r\": coref_recall, \"c_f1\": coref_f1, \"m_p\": mention_result['precision'], \"m_r\": mention_result['recall'],",
"(i.e not the word distance between the spans). Has shape",
"(1, max_antecedents). Returns ------- span_pair_embeddings : ``torch.FloatTensor`` Embedding representation of",
"\"\"\" # Shape: (batch_size, document_length, embedding_size) text_embeddings = self._lexical_dropout(self._text_field_embedder(text)) document_length",
"from typing import Any, Dict, List, Optional, Tuple import torch",
"= EndpointSpanExtractor(endpoint_span_extractor_dim, combination=\"x,y\", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size) else: self._endpoint_span_extractor = None self._attentive_span_extractor",
"1, 2)) shape = [event_prob.size(0), event_prob.size(1), 1] dummy_scores = event_prob.new_zeros(*shape)",
"for each top span. Has shape (batch_size, num_spans_to_keep, max_antecedents, embedding_size).",
"(top_embeddings, top_mask, top_indices, top_scores) = self._mention_pruner(span_embeddings, span_mask, num_spans_to_keep_according_doc_len, ) event_embeddings",
"type_antecedent_labels, antecedent_labels) bce_loss = self._bce_loss.forward(self._event_scorer.forward(span_embeddings).squeeze(-1), (event_type_labels > 0).float()) * span_mask",
"return coreference_scores def _generate_valid_antecedents(num_spans_to_keep: int, max_antecedents: int, device: int) ->",
"type_antecedent_labels def _type_refine_embedding(self, top_embeddings, event_embeddings): # (batch, top_span_size, emb_size) bmm",
"event_indices = torch.stack([torch.zeros_like(event_indices), event_indices]).transpose(0, 1) event_indices = event_indices.expand([event_indices.size(0), event_indices.size(1)]) event_embeddings",
"raw_contextualized_embeddings=raw_contextualized_embeddings, text_mask=text_mask ) else: new_contextualized_embeddings = raw_contextualized_embeddings # Shape: (batch_size,",
"# This is a broadcasted subtraction. # Shape: (num_spans_to_keep, max_antecedents)",
"inclusive spans into the original document. \"\"\" return node_decode(output_dict, self.vocab,",
"different spans have different numbers of valid antecedents. For example,",
"regularizer) logger.info(vocab) self._text_field_embedder = text_field_embedder self._context_layer = context_layer self._antecedent_feedforward =",
"= torch.cat([dummy_scores, event_prob], -1) event_prob = torch.softmax(event_prob, -1) event_rep =",
"Has shape (batch_size, num_spans_to_keep, max_antecedents). antecedent_mention_scores: ``torch.FloatTensor``, required. Mention scores",
"with any previous span, but here we are computing the",
"num_spans_to_keep, max_antecedents). antecedent_mention_scores: ``torch.FloatTensor``, required. Mention scores for every antecedent.",
"\"m_r\": mention_result['recall'], \"m_f1\": mention_result['f1-score'], \"nil\": self._nil_label_metric.get_metric(reset), \"type\": self._type_label_metric.get_metric(reset), \"coref\": self._coref_label_metric.get_metric(reset),",
"information for each word in the document. mention_feedforward : ``FeedForward``",
"which do not appear in any clusters. event_type_labels : ``torch.IntTensor``,",
"scored and used to prune away spans that are unlikely",
"text_field_embedder.get_output_dim() self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim, combination=\"x,y\", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size) self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim)",
"that we generate from these indices, we # need them",
"top_span_size, 1) :return: (batch, top_span_size, positive_label_size) \"\"\" event_indices = util.get_range_vector(self.vocab.get_vocab_size('labels'),",
"consider a masked span. # Shape: (batch_size, num_spans, 2) spans",
"self._positive_label_size + 2:] = 0 coreference_log_probs = util.masked_log_softmax(coreference_scores, top_mask) correct_antecedent_log_probs",
"Comes from a ``ListField[SpanField]`` of indices into the text of",
"coreference_scores def _generate_valid_antecedents(num_spans_to_keep: int, max_antecedents: int, device: int) -> Tuple[torch.IntTensor,",
"# (1, num_spans_to_keep, max_antecedents) valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask = \\ _generate_valid_antecedents(num_spans_to_keep_according_doc_len,",
"(util.get_range_vector(max_antecedents, device) + 1).unsqueeze(0) # This is a broadcasted subtraction.",
"target_indices - valid_antecedent_offsets # In our matrix of indices, the",
"Unified Event self._event_embedding = Embedding(num_embeddings=vocab.get_vocab_size('labels'), embedding_dim=span_embedding_size) self._event_embedding_map = torch.nn.Linear(self._event_embedding.get_output_dim() *",
"span_pair_embeddings def _compute_antecedent_gold_labels(self, top_span_labels: torch.IntTensor, type_antecedent_labels: torch.IntTensor, antecedent_labels: torch.IntTensor): \"\"\"",
"else: raise NotImplementedError('Attention Type: %s' % self._attention_type) self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim,",
"to max_antecedents # prior spans. So the first thing we",
"terms of the number of considered spans (i.e not the",
"was no predicted link. loss : ``torch.FloatTensor``, optional A scalar",
"(if any) they are coreferent with. The resulting coreference links,",
"a list of (start, end) inclusive spans into the original",
"Returns ------- valid_antecedent_indices : ``torch.IntTensor`` The indices of every antecedent",
"max_antecedents self._mention_f1_score = TopSpanMentionTypeF1() self._conll_coref_scores = EventCorefScores(mapping_type=type_match_in_eval) self._type_loss_metric = Average()",
"from src.metrics.event_coref_scores import EventCorefScores from src.metrics.mention_f1 import TopSpanMentionTypeF1 from src.utils.cluster_decoding_utils",
"document_length, encoding_dim) raw_contextualized_embeddings = self._context_layer(text_embeddings, text_mask) if self._attention_layer is not",
"zero and one which controls what percentage of candidate mention",
"# Shape: (batch_size, num_spans_to_keep) _, predicted_antecedents = coreference_scores.max(2) # Subtract",
"max_antecedents + event_type_size, embedding_size) \"\"\" event_embeddings = event_embeddings.unsqueeze(1).expand((antecedent_embeddings.size(0), antecedent_embeddings.size(1), event_embeddings.size(1),",
"return: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size) \"\"\" event_embeddings =",
"_get_event_embedding(self, span_mask): \"\"\" :param span_mask: (batch, top_span_size, 1) :return: (batch,",
"top_spans : ``torch.IntTensor`` A tensor of shape ``(batch_size, num_spans_to_keep, 2)``",
":param span_mask: (batch, top_span_size, 1) :return: (batch, top_span_size, positive_label_size) \"\"\"",
"1) ) # Shape: (1, 1, max_antecedents + event_type_size, embedding_size)",
"def _get_event_embedding(self, span_mask): \"\"\" :param span_mask: (batch, top_span_size, 1) :return:",
"= top_span_labels.expand_as(antecedent_labels) same_cluster_indicator = (target_labels == antecedent_labels).float() non_dummy_indicator = (target_labels",
"number of antecedent spans to consider for every span. device:",
"self._event_embedding_map.forward(event_embeddings) event_embeddings = event_embeddings.unsqueeze(0).expand(span_mask.size(0), event_embeddings.size(0), event_embeddings.size(1), ) return event_embeddings def",
"scored by a linear layer. feature_size: ``int`` The embedding size",
"pairwise_labels_indicator = (pairwise_labels.sum(-1, keepdim=True) > 0).float() type_antecedent_labels = type_antecedent_labels *",
"+ 1), representing the unormalised score for each (span, antecedent)",
"to occur in a coreference cluster. For the remaining spans,",
"i, or zero otherwise. Parameters ---------- num_spans_to_keep : ``int``, required.",
"subtraction. # Shape: (num_spans_to_keep, max_antecedents) raw_antecedent_indices = target_indices - valid_antecedent_offsets",
"event_indices = event_indices.expand([event_indices.size(0), event_indices.size(1)]) event_embeddings = self._event_embedding(event_indices) event_embeddings = event_embeddings.reshape(event_embeddings.size(0),",
"of the top spans that survived the pruning stage. antecedent_indices",
"from allennlp.modules import FeedForward, Pruner from allennlp.modules import Seq2SeqEncoder, TimeDistributed,",
"top_scores) = self._mention_pruner(span_embeddings, span_mask, num_spans_to_keep_according_doc_len, ) event_embeddings = self._get_event_embedding(span_mask) top_mask",
"to be <= 0. This is only relevant in edge",
"self._bce_pos_weight: self._bce_loss = BCEWithLogitsLoss(reduction='none', pos_weight=torch.tensor(self._bce_pos_weight)) else: self._bce_loss = BCEWithLogitsLoss(reduction='none') if",
"index 0 is the \"no antecedent\" class, # so this",
"encoding_dim + feature_size) endpoint_span_embeddings = self._endpoint_span_extractor(new_contextualized_embeddings, spans) # Shape: (batch_size,",
"representing the realis label of the specific span. metadata :",
"on mention scores. num_spans_to_keep_according_doc_len = int(math.floor(self._spans_per_word * document_length)) (top_embeddings, top_mask,",
"is not None: endpoint_span_extractor_dim = context_layer.get_output_dim() attentive_span_extractor_dim = text_field_embedder.get_output_dim() self._endpoint_span_extractor",
"relative # index of the spans to know which other",
"event_type_size, embedding_size). top_span_embeddings : ``torch.FloatTensor``, required. Embedding representations of the",
"consider for every span. device: ``int``, required. The CUDA device",
"= decoded_result['pred_label_spans'] gold_label_spans_list = [m['gold_label_spans'] for m in metadata] self._mention_f1_score(pred_label_spans_list,",
"10 possible distance buckets. self._num_distance_buckets = 10 self._distance_embedding = Embedding(self._num_distance_buckets,",
"its antecedent spans in terms of spans we are considering.",
"spans we are considering. Has shape (1, max_antecedents). Returns -------",
"max_antecedents). Returns ------- span_pair_embeddings : ``torch.FloatTensor`` Embedding representation of the",
"max_antecedents + 1), representing the unormalised score for each (span,",
"valid antecedents. Our loss is the sum of the #",
"only # have prior spans as antecedents, and we only",
"= text_embeddings if self._attention_layer is not None: new_contextualized_embeddings = self._local_attention(",
"shape ``(batch_size, num_spans_to_keep)`` representing, for each top span, the index",
"== antecedent_labels).float() non_dummy_indicator = (target_labels >= 0).float() pairwise_labels = same_cluster_indicator",
"compare span pairs to decide each span's antecedent. Each span",
"num_spans_to_keep, max_antecedents) candidate_antecedent_mention_scores = util.flattened_index_select(top_scores, valid_antecedent_indices).squeeze(-1) # Shape: (batch_size, num_spans_to_keep,",
"sense that we are minimising, for a # given span,",
"torch.LongTensor]``, required. The output of a ``TextField`` representing the text",
":, 0]).item()) self._type_label_metric(torch.sum(type_antecedent_labels[:, :, 1: self._positive_label_size + 1]).item()) # print(pairwise_labels)",
"``torch.FloatTensor``, optional A scalar loss to be optimised. \"\"\" #",
"list, representing, for each instance in the batch, the list",
"required. The result of calling :func:`forward` on an instance or",
"then scored by a linear layer. feature_size: ``int`` The embedding",
"util.get_range_vector(self._positive_label_size, device=util.get_device_of(span_mask)) + 1 event_indices = torch.stack([torch.zeros_like(event_indices), event_indices]).transpose(0, 1) event_indices",
"\"\"\" event_indices = util.get_range_vector(self.vocab.get_vocab_size('labels'), device=util.get_device_of(top_event_type_labels)) top_event_type_labels = top_event_type_labels.unsqueeze(-1).expand([top_event_type_labels.size(0), top_event_type_labels.size(1), event_indices.size(0)])",
"span_mask = (spans[:, :, 0] >= 0).squeeze(-1).float() # SpanFields return",
"event_scores = self._event_classifier.forward(span_embeddings) # Shape: (batch_size, num_spans, num_event_realis_label) # Shape:",
"dimension - it's just a function of the span's position",
"The same output dictionary, but with an additional ``clusters`` key:",
"prediction that a span does not have any antecedent. Parameters",
"score and the unary mention scores for the span and",
"metadata dictionary for each instance in the batch. We use",
"type_match_in_eval: bool = True, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator]",
"0).float().unsqueeze(0).log() # Shape: (num_spans_to_keep, max_antecedents) valid_antecedent_indices = F.relu(raw_antecedent_indices.float()).long() return valid_antecedent_indices,",
"with any pairwise features, which is then scored by a",
"# Shape: (batch_size, num_spans_to_keep, 2) top_spans = util.batched_index_select(spans, top_indices, flat_top_span_indices)",
"tensor of shape (batch_size, num_spans), representing the cluster ids of",
"spans. Parameters ---------- shape (batch_size, event_type_size, embedding_size). top_span_embeddings : ``torch.FloatTensor``,",
"for every span. The id is arbitrary, as we just",
"torch.FloatTensor]: \"\"\" This method generates possible antecedents per span which",
"= self._endpoint_span_extractor(new_contextualized_embeddings, spans) # Shape: (batch_size, num_spans, embedding_size) attended_span_embeddings =",
"mention which survives the pruning stage, we consider this many",
"a # distribution over these indices, so we need the",
"shape (batch_size, num_spans_to_keep, max_antecedents, encoding_dim) top_span_mention_scores: ``torch.FloatTensor``, required. Mention scores",
"representations, and an embedding representation of the distance between the",
"1).unsqueeze(0) # This is a broadcasted subtraction. # Shape: (num_spans_to_keep,",
"valid. Required since different spans have different numbers of valid",
"metadata] output_dict[\"offset\"] = [x[\"token_offset\"] for x in metadata] output_dict['doc_id'] =",
"and only if the pair of spans belong to the",
"self._max_antecedents = max_antecedents self._mention_f1_score = TopSpanMentionTypeF1() self._conll_coref_scores = EventCorefScores(mapping_type=type_match_in_eval) self._type_loss_metric",
"num_event_realis_label) # event_realis_scores = self._event_realis_classifier.forward(span_embeddings) # Prune based on mention",
"the indices of its allowed antecedents. Note that this is",
"Roger on 2019-09-10 # Mostly by AllenNLP import logging import",
"A tensor of shape ``(num_spans_to_keep, max_antecedents)`` representing for each top",
"Average() self._coref_label_metric = Average() self._type_label_metric = Average() self._nil_label_metric = Average()",
"we might # consider a masked span. # Shape: (batch_size,",
"antecedent j, but there might be several prior mentions k",
"* event_embeddings.size(2)) event_embeddings = self._event_embedding_map.forward(event_embeddings) event_embeddings = event_embeddings.unsqueeze(0).expand(span_mask.size(0), event_embeddings.size(0), event_embeddings.size(1),",
"eventually create a # distribution over these indices, so we",
"util, InitializerApplicator, RegularizerApplicator from allennlp.training.metrics import Average from overrides import",
"of each span, or -1 for those which do not",
"a valid objective for # clustering as we don't mind",
"# SpanFields return -1 when they are used as padding.",
"return torch.cat([event_embeddings, antecedent_embeddings], 2) def _compute_span_pair_embeddings(self, top_span_embeddings: torch.FloatTensor, antecedent_embeddings: torch.FloatTensor,",
"consisting of: top_spans : ``torch.IntTensor`` A tensor of shape ``(batch_size,",
"num_spans) # Compute final predictions for which spans to consider",
"Note that this is independent # of the batch dimension",
"(batch_size, num_spans_to_keep, event_type_size + max_antecedents) candidate_antecedent_mention_scores = torch.cat([event_type_prior_scores, candidate_antecedent_mention_scores], -1)",
"pruning stage, # a predicted antecedent. This implies a clustering",
"This ``Model`` implements the coreference resolution model described \"End-to-end Neural",
"event_rep return top_embeddings def _local_attention(self, raw_contextualized_embeddings, text_mask): device = util.get_device_of(raw_contextualized_embeddings)",
"valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask = \\ _generate_valid_antecedents(num_spans_to_keep_according_doc_len, max_antecedents, util.get_device_of(text_mask)) if self._type_refine_gate",
"+ antecedent_mention_scores antecedent_scores += antecedent_log_mask # Shape: (batch_size, num_spans_to_keep, 1)",
"span. device: ``int``, required. The CUDA device to use. Returns",
"span. realis_labels : ``torch.IntTensor``, optional (default = None). A tensor",
"loss : ``torch.FloatTensor``, optional A scalar loss to be optimised.",
"_compute_coreference_scores(self, pairwise_embeddings: torch.FloatTensor, top_span_mention_scores: torch.FloatTensor, antecedent_mention_scores: torch.FloatTensor, antecedent_log_mask: torch.FloatTensor) ->",
"_get_type_antecedent_labels(self, top_event_type_labels): \"\"\" :param top_event_type_labels: (batch, top_span_size, 1) :return: (batch,",
"span's # index to the indices of its allowed antecedents.",
"1 else: raise NotImplementedError('Attention Type: %s' % self._attention_type) self._attention_layer =",
"---------- vocab : ``Vocabulary`` text_field_embedder : ``TextFieldEmbedder`` Used to embed",
"(batch_size, num_spans, embedding_size) endpoint_span_embeddings = self._endpoint_span_extractor(text_embeddings, spans) span_embeddings_list += [endpoint_span_embeddings]",
"coreference cluster. For the remaining spans, the model decides which",
"more efficient. flat_top_span_indices = util.flatten_and_batch_shift_indices(top_indices, num_spans) # Compute final predictions",
"where # the number of spans we consider after the",
"context_layer self._antecedent_feedforward = TimeDistributed(antecedent_feedforward) self._event_scorer = torch.nn.Sequential( TimeDistributed(mention_feedforward), TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(), 1))",
"util.get_range_vector(self.vocab.get_vocab_size('labels'), device=util.get_device_of(top_event_type_labels)) top_event_type_labels = top_event_type_labels.unsqueeze(-1).expand([top_event_type_labels.size(0), top_event_type_labels.size(1), event_indices.size(0)]) type_antecedent_labels = (top_event_type_labels",
"1] dummy_scores = event_prob.new_zeros(*shape) event_prob = torch.cat([dummy_scores, event_prob], -1) event_prob",
"loss to be optimised. \"\"\" # Shape: (batch_size, document_length, embedding_size)",
"(default = None). A tensor of shape (batch_size, num_spans), representing",
"num_attention_heads=num_head ) if self._endpoint_span_extractor is not None: span_embedding_size = self._attentive_span_extractor.get_output_dim()",
"respect to top_spans) of the possible antecedents the model considered.",
"span_embedding_size = self._endpoint_span_extractor.get_output_dim() + self._attentive_span_extractor.get_output_dim() if self._local_window_size <= 0: self._attention_layer",
"cluster ids of each span, or -1 for those which",
"output_dict['doc_id'] = [x.get(\"doc_id\", None) for x in metadata] return output_dict",
"Embedding representations of the top spans. Has shape (batch_size, num_spans_to_keep,",
"raw_contextualized_embeddings, text_mask): device = util.get_device_of(raw_contextualized_embeddings) if device < 0: device",
"pred_label_spans_list = decoded_result['pred_label_spans'] gold_label_spans_list = [m['gold_label_spans'] for m in metadata]",
"optional A scalar loss to be optimised. \"\"\" # Shape:",
"equal to the log of the sum of the probabilities",
"is `generic across the batch`. The reason this is the",
"class End2EndEventCoreferenceResolver(Model): \"\"\" This ``Model`` implements the coreference resolution model",
"indices of the top spans that survived the pruning stage.",
"shape ``(1, num_spans_to_keep, max_antecedents)``. \"\"\" # Shape: (num_spans_to_keep, 1) target_indices",
"spans. Has shape (batch_size, num_spans_to_keep, embedding_size). antecedent_embeddings : ``torch.FloatTensor``, required.",
"shape = [antecedent_scores.size(0), antecedent_scores.size(1), 1] dummy_scores = antecedent_scores.new_zeros(*shape) # Shape:",
"of pairs of spans. Has shape (batch_size, num_spans_to_keep, max_antecedents, encoding_dim)",
"Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size) span_pair_embeddings = torch.cat([target_embeddings,",
"about the clustering. Has shape (batch_size, num_spans_to_keep, max_antecedents). Returns -------",
"False, coref_loss_weight: float = 1.0, bce_loss_weight: float = 1.0, bce_pos_weight:",
"top span, the index (with respect to antecedent_indices) of the",
"by Roger on 2019-09-10 # Mostly by AllenNLP import logging",
"embeddings # for all valid antecedents for each span. This",
"antecedent span. The id is arbitrary, as we just care",
"coreference_scores = torch.cat([dummy_scores, antecedent_scores], -1) return coreference_scores def _generate_valid_antecedents(num_spans_to_keep: int,",
"text of the document. coref_labels : ``torch.IntTensor``, optional (default =",
"have the original text and the annotated gold coreference clusters",
"0 elements of the mask to be -inf # in",
"which controls what percentage of candidate mention spans we retain",
"= attention_type self._decoding = decoding self._type_threshold = type_threshold logger.info(vocab.get_token_from_index(0, \"labels\"))",
"torch.LongTensor], spans: torch.IntTensor, coref_labels: torch.IntTensor = None, event_type_labels: torch.IntTensor =",
"currently considering. Each span i predicts a # single antecedent",
"top_span_mention_scores: ``torch.FloatTensor``, required. Mention scores for every span. Has shape",
"this dictionary, which respectively have the original text and the",
"them to be <= 0. This is only relevant in",
"device) + 1).unsqueeze(0) # This is a broadcasted subtraction. #",
"float = 0.1, max_antecedents: int = 50, lexical_dropout: float =",
"the document. spans : ``torch.IntTensor``, required. A tensor of shape",
"j, but there might be several prior mentions k in",
"max_antecedents) valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask = \\ _generate_valid_antecedents(num_spans_to_keep_according_doc_len, max_antecedents, util.get_device_of(text_mask)) if",
"just a function of the span's position in # top_spans.",
"= self._get_type_antecedent_labels(pruned_event_type_labels) # Find the gold labels for the spans",
"the sense that we are minimising, for a # given",
"our matrix of indices, the upper triangular part will be",
"to the log of the sum of the probabilities of",
"index into the batch. We precompute this here to make",
": ``Dict[str, torch.LongTensor]``, required. The output of a ``TextField`` representing",
"to pairs of span representation, along with any pairwise features,",
"index (with respect to antecedent_indices) of the most likely antecedent.",
"1:], event_embeddings) + event_prob[:, :, :1] * top_embeddings refine_gate =",
"antecedent. This implies a clustering if we group # mentions",
"cases where # the number of spans we consider after",
"pairwise_labels without type_antecedent_labels pairwise_labels_indicator = (pairwise_labels.sum(-1, keepdim=True) > 0).float() type_antecedent_labels",
"output_dict : ``Dict[str, torch.Tensor]``, required. The result of calling :func:`forward`",
"instances. Returns ------- The same output dictionary, but with an",
"assigned to all valid antecedents. This is a valid objective",
"TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1)) self._local_window_size = local_window_size self._attention_type = attention_type self._decoding =",
"NotImplementedError('Attention Type: %s' % self._attention_type) self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim, similarity_function=similarity_function, combination='2',",
"antecedent_log_mask: ``torch.FloatTensor``, required. The log of the mask for valid",
"self._get_type_antecedent_labels(pruned_event_type_labels) # Find the gold labels for the spans which",
"The reason this is the case is that each span",
"Returns ------- span_pair_embeddings : ``torch.FloatTensor`` Embedding representation of the pair",
"num_spans), representing the cluster ids of each span, or -1",
"<= i, or zero otherwise. Parameters ---------- num_spans_to_keep : ``int``,",
"torch.nn.Dropout(p=lexical_dropout) else: self._lexical_dropout = lambda x: x initializer(self) def _get_event_embedding(self,",
"``(num_spans_to_keep, max_antecedents)`` representing for each top span the index (with",
"None). A metadata dictionary for each instance in the batch.",
"antecedent. -1 means there was no predicted link. loss :",
":, 1:], event_embeddings) + event_prob[:, :, :1] * top_embeddings refine_gate",
"= [m['gold_label_spans'] for m in metadata] self._mention_f1_score(pred_label_spans_list, gold_label_spans_list, ) self._conll_coref_scores(decoded_result['clusters'],",
"first span in the document should have no valid antecedents.",
"max_antecedents), # (1, max_antecedents), # (1, num_spans_to_keep, max_antecedents) valid_antecedent_indices, valid_antecedent_offsets,",
"buckets. self._num_distance_buckets = 10 self._distance_embedding = Embedding(self._num_distance_buckets, feature_size) self._coref_loss_weight =",
"should have no valid antecedents. Has shape ``(1, num_spans_to_keep, max_antecedents)``.",
"resolution model described \"End-to-end Neural Coreference Resolution\" <https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83> by Lee",
"event_prob], -1) event_prob = torch.softmax(event_prob, -1) event_rep = torch.bmm(event_prob[:, :,",
"combination=\"x,y\", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size) self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) span_embedding_size = self._endpoint_span_extractor.get_output_dim() +",
": ``torch.IntTensor``, optional (default = None). A tensor of shape",
"@overrides def get_metrics(self, reset: bool = False) -> Dict[str, float]:",
"line up with actual spans if the prediction # is",
"= self._event_embedding(event_indices) event_embeddings = event_embeddings.reshape(event_embeddings.size(0), event_embeddings.size(1) * event_embeddings.size(2)) event_embeddings =",
"in the document. Parameters ---------- vocab : ``Vocabulary`` text_field_embedder :",
"for x in metadata] output_dict[\"offset\"] = [x[\"token_offset\"] for x in",
"j if j <= i, or zero otherwise. Parameters ----------",
"if self._context_layer: # Shape: (batch_size, document_length, encoding_dim) raw_contextualized_embeddings = self._context_layer(text_embeddings,",
"import overrides from torch.nn import BCEWithLogitsLoss from src.metrics.event_coref_scores import EventCorefScores",
"we consider this many antecedents. lexical_dropout: ``int`` The probability of",
"to initialize the model parameters. regularizer : ``RegularizerApplicator``, optional (default=``None``)",
"= (util.get_range_vector(max_antecedents, device) + 1).unsqueeze(0) # This is a broadcasted",
"bce_loss_weight self._bce_pos_weight = bce_pos_weight self._max_span_width = max_span_width self._spans_per_word = spans_per_word",
"one here because index 0 is the \"no antecedent\" class,",
"= Average() self._type_label_metric = Average() self._nil_label_metric = Average() if self._bce_pos_weight:",
"representing the start and end word indices of the top",
"# This is equal to the log of the sum",
"[endpoint_span_embeddings] span_embeddings = torch.cat(span_embeddings_list, -1) # event_scores = self._event_classifier.forward(span_embeddings) #",
"+ coref_f1) / 2.} @staticmethod def _combine_event_embeddings_and_cluster_antecedent_embeddings(event_embeddings: torch.FloatTensor, antecedent_embeddings: torch.FloatTensor):",
"num_spans, 2 * encoding_dim + feature_size) endpoint_span_embeddings = self._endpoint_span_extractor(new_contextualized_embeddings, spans)",
"(batch_size, num_spans_to_keep, max_antecedents, encoding_dim) top_span_mention_scores: ``torch.FloatTensor``, required. Mention scores for",
"event_embeddings) # Select tensors relating to the antecedent spans. #",
"torch.IntTensor, torch.FloatTensor]: \"\"\" This method generates possible antecedents per span",
"``torch.IntTensor`` A tensor of shape ``(batch_size, num_spans_to_keep)`` representing, for each",
"torch.gather(event_type_labels, 1, top_indices) type_antecedent_labels = self._get_type_antecedent_labels(pruned_event_type_labels) # Find the gold",
"* document_length)) (top_embeddings, top_mask, top_indices, top_scores) = self._mention_pruner(span_embeddings, span_mask, num_spans_to_keep_according_doc_len,",
"# Subtract one here because index 0 is the \"no",
"num_spans_to_keep : ``int``, required. The number of spans that were",
"valid_antecedent_log_mask : ``torch.FloatTensor`` The logged mask representing whether each antecedent",
"(batch_size, num_spans, 2) spans = F.relu(spans.float()).long() if self._context_layer: # Shape:",
"as distances or span widths. max_span_width: ``int`` The maximum width",
"antecedent_scores.size(1), 1] dummy_scores = antecedent_scores.new_zeros(*shape) # Shape: (batch_size, num_spans_to_keep, max_antecedents",
"links, after applying transitivity, imply a clustering of the spans",
"the top spans that survived the pruning stage. antecedent_indices :",
"and used to prune away spans that are unlikely to",
"top_embeddings = self._type_refine_embedding(top_embeddings, event_embeddings) # Select tensors relating to the",
"+ 1).unsqueeze(0) # This is a broadcasted subtraction. # Shape:",
"(batch_size, num_spans_to_keep, event_type_size + max_antecedents + 1) pairwise_labels_with_dummy_label = torch.cat([type_antecedent_labels,",
"in the document. max_antecedents: int, required. For each mention which",
"top_span_mention_scores: torch.FloatTensor, antecedent_mention_scores: torch.FloatTensor, antecedent_log_mask: torch.FloatTensor) -> torch.FloatTensor: \"\"\" Computes",
"# Shape: (num_spans_to_keep, max_antecedents) valid_antecedent_indices = F.relu(raw_antecedent_indices.float()).long() return valid_antecedent_indices, valid_antecedent_offsets,",
"(batch_size, event_type_size, embedding_size). antecedent_embeddings : ``torch.FloatTensor``, required. Embedding representations of",
"import Vocabulary from allennlp.models.model import Model from allennlp.modules import FeedForward,",
"# Shape: (batch_size, num_spans_to_keep, max_antecedents) antecedent_scores = self._antecedent_scorer( self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1) antecedent_scores",
"forward(self, # type: ignore text: Dict[str, torch.LongTensor], spans: torch.IntTensor, coref_labels:",
"num_spans_to_keep, 2) top_spans = util.batched_index_select(spans, top_indices, flat_top_span_indices) # Compute indices",
"span_embedding_size)), torch.nn.Sigmoid() ) else: self._type_refine_gate = None # NIL for",
"clustering of the spans in the document. Parameters ---------- vocab",
"def _local_attention(self, raw_contextualized_embeddings, text_mask): device = util.get_device_of(raw_contextualized_embeddings) if device <",
"function to consider. This includes both the original span representations,",
"clusters, which are in turn comprised of a list of",
"is >= the # total number of spans, because in",
"self._nil_label_metric.get_metric(reset), \"type\": self._type_label_metric.get_metric(reset), \"coref\": self._coref_label_metric.get_metric(reset), \"t_l\": self._type_loss_metric.get_metric(reset), \"c_l\": self._coref_loss_metric.get_metric(reset), \"a_f1\":",
"The distance between the span and each of its antecedents",
"because these are exactly the indices which we don't want",
"self._coref_label_metric.get_metric(reset), \"t_l\": self._type_loss_metric.get_metric(reset), \"c_l\": self._coref_loss_metric.get_metric(reset), \"a_f1\": (mention_result['f1-score'] + coref_f1) /",
"pretrain_ed self._pretrain_coref = pretrain_coref self._mention_pruner = Pruner(self._event_scorer) self._antecedent_scorer = TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(),",
"encoding_dim) top_span_mention_scores: ``torch.FloatTensor``, required. Mention scores for every span. Has",
"bucket_values = util.bucket_values(antecedent_offsets, num_total_buckets=self._num_distance_buckets) # (1, event_type) label_bucket_values = bucket_values.new_zeros((1,",
"attention_mask = torch.ones((text_mask.size(1), text_mask.size(1)), device=device) # attention_mask = attention_mask -",
"self._local_window_size), -self._local_window_size) new_contextualized_embeddings = self._attention_layer(raw_contextualized_embeddings, new_attention_mask) return new_contextualized_embeddings @overrides def",
"antecedent_scores += antecedent_log_mask # Shape: (batch_size, num_spans_to_keep, 1) shape =",
"indices, so we need the 0 elements of the mask",
"text_field_embedder : ``TextFieldEmbedder`` Used to embed the ``text`` ``TextField`` we",
"spans that were kept while pruning. max_antecedents : ``int``, required.",
"to the indices of its allowed antecedents. Note that this",
"we do # some comparisons based on span widths when",
"is equal to the log of the sum of the",
"embedding_size) attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans) # Shape: (batch_size, num_spans, embedding_size",
"max_antecedents + 1) coreference_scores = torch.cat([dummy_scores, antecedent_scores], -1) return coreference_scores",
"* self._coref_loss_weight output_dict[\"loss\"] = coref_loss + bce_loss decoded_result = self.decode(output_dict)",
"``Vocabulary`` text_field_embedder : ``TextFieldEmbedder`` Used to embed the ``text`` ``TextField``",
"util.bucket_values(antecedent_offsets, num_total_buckets=self._num_distance_buckets) # (1, event_type) label_bucket_values = bucket_values.new_zeros((1, self._positive_label_size)) #",
"lexical_dropout: float = 0.2, pretrain_ed: bool = False, pretrain_coref: bool",
"end word indices of the top spans that survived the",
"span_embeddings_list += [endpoint_span_embeddings] span_embeddings = torch.cat(span_embeddings_list, -1) # event_scores =",
"over the # span representations that we generate from these",
"for every antecedent span. The id is arbitrary, as we",
"this case, it is possible we might # consider a",
"None: top_embeddings = self._type_refine_embedding(top_embeddings, event_embeddings) # Select tensors relating to",
"= {\"top_spans\": top_spans, \"antecedent_indices\": valid_antecedent_indices, \"predicted_antecedents\": predicted_antecedents, \"coreference_scores\": coreference_scores, }",
"output_dict[\"offset\"] = [x[\"token_offset\"] for x in metadata] output_dict['doc_id'] = [x.get(\"doc_id\",",
"self._attention_layer(raw_contextualized_embeddings, new_attention_mask) return new_contextualized_embeddings @overrides def forward(self, # type: ignore",
"None). A tensor of shape (batch_size, num_spans), representing the cluster",
"span in the document should have no valid antecedents. Has",
"account their # index into the batch. We precompute this",
"spans and predicted antecedent indices into clusters of spans for",
"top_span_embeddings: torch.FloatTensor, antecedent_embeddings: torch.FloatTensor, antecedent_offsets: torch.FloatTensor): \"\"\" Computes an embedding",
"to prune away spans that are unlikely to occur in",
"coref_recall, \"c_f1\": coref_f1, \"m_p\": mention_result['precision'], \"m_r\": mention_result['recall'], \"m_f1\": mention_result['f1-score'], \"nil\":",
"of every antecedent to consider with respect to the top",
"Shape: (num_spans_to_keep, max_antecedents) raw_antecedent_indices = target_indices - valid_antecedent_offsets # In",
"of indices, the upper triangular part will be negative #",
"they are used as padding. As we do # some",
"spans for each element in the batch. Parameters ---------- output_dict",
"1, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0) expanded_distance_embeddings_shape =",
"objective for # clustering as we don't mind which antecedent",
"span representation, along with any pairwise features, which is then",
"of the top spans. Has shape (batch_size, num_spans_to_keep, embedding_size). antecedent_embeddings",
"coreference links, after applying transitivity, imply a clustering of the",
"Parameters ---------- vocab : ``Vocabulary`` text_field_embedder : ``TextFieldEmbedder`` Used to",
"metadata] output_dict['doc_id'] = [x.get(\"doc_id\", None) for x in metadata] return",
"None, max_span_width: int = 1, spans_per_word: float = 0.1, max_antecedents:",
"# Shape: (batch_size, num_spans_to_keep, 1 + event_type_size + max_antecedents) coreference_scores",
"Pruner from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder from allennlp.modules.seq2seq_encoders import",
"that are unlikely to occur in a coreference cluster. For",
"pairwise_labels_indicator) self._coref_label_metric(torch.sum(pairwise_labels).item()) self._nil_label_metric(torch.sum(type_antecedent_labels[:, :, 0]).item()) self._type_label_metric(torch.sum(type_antecedent_labels[:, :, 1: self._positive_label_size +",
"event_rep = torch.bmm(event_prob[:, :, 1:], event_embeddings) + event_prob[:, :, :1]",
"we don't want to predict, per span. # We're generating",
"metadata] self._mention_f1_score(pred_label_spans_list, gold_label_spans_list, ) self._conll_coref_scores(decoded_result['clusters'], metadata, pred_label_spans_list, gold_label_spans_list) self._type_loss_metric(bce_loss.item()) self._coref_loss_metric(negative_marginal_log_likelihood.item())",
"max_antecedents) coreference_scores = self._compute_coreference_scores(span_pair_embeddings, top_scores, candidate_antecedent_mention_scores, valid_antecedent_log_mask) # We now",
"(raw_antecedent_indices >= 0).float().unsqueeze(0).log() # Shape: (num_spans_to_keep, max_antecedents) valid_antecedent_indices = F.relu(raw_antecedent_indices.float()).long()",
"these indices, we # need them to be <= 0.",
"(batch_size, num_spans_to_keep, 2) top_spans = util.batched_index_select(spans, top_indices, flat_top_span_indices) # Compute",
"none to select from. Similarly, each element can only predict",
"of this model is to get an embedded representation of",
"This includes both the original span representations, the element-wise similarity",
"+ self._endpoint_span_extractor.get_output_dim() else: span_embedding_size = self._attentive_span_extractor.get_output_dim() if type_refine: self._type_refine_gate =",
"num_spans_to_keep, max_antecedents, embedding_size) target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings) # Shape: (1, max_antecedents)",
"score is always zero. For the true antecedent spans, the",
"A tensor of shape (batch_size, num_spans), representing the event label",
"antecedent_scores += top_span_mention_scores + antecedent_mention_scores antecedent_scores += antecedent_log_mask # Shape:",
"makes the indices line up with actual spans if the",
"not have any antecedent. Parameters ---------- top_span_labels : ``torch.IntTensor``, required.",
"because we will eventually create a # distribution over these",
"below more efficient. flat_top_span_indices = util.flatten_and_batch_shift_indices(top_indices, num_spans) # Compute final",
"document_length, embedding_size) text_embeddings = self._lexical_dropout(self._text_field_embedder(text)) document_length = text_embeddings.size(1) num_spans =",
"clusters of spans for each element in the batch. Parameters",
"original text and the annotated gold coreference clusters for that",
"is included, representing the decision that the span is not",
"max_antecedents + 1) pairwise_labels_with_dummy_label = torch.cat([type_antecedent_labels, pairwise_labels], -1) return pairwise_labels_with_dummy_label",
"+ max_antecedents, embedding_size) candidate_antecedent_embeddings = self._combine_event_embeddings_and_cluster_antecedent_embeddings( event_embeddings, candidate_antecedent_embeddings) # Compute",
") self._conll_coref_scores(decoded_result['clusters'], metadata, pred_label_spans_list, gold_label_spans_list) self._type_loss_metric(bce_loss.item()) self._coref_loss_metric(negative_marginal_log_likelihood.item()) else: self._coref_loss_metric(0.) if",
"coreference decisions between valid span pairs. # Shapes: # (num_spans_to_keep,",
"output_dict[\"document\"] = [x[\"original_text\"] for x in metadata] output_dict[\"offset\"] = [x[\"token_offset\"]",
"``(batch_size, num_spans_to_keep)`` representing, for each top span, the index (with",
"= top_span_embeddings.detach() # top_span_mention_scores = top_span_mention_scores.detach() # Now that we",
":, 0] >= 0).squeeze(-1).float() # SpanFields return -1 when they",
"considering. Has shape (1, max_antecedents). Returns ------- span_pair_embeddings : ``torch.FloatTensor``",
"None) -> None: super(End2EndEventCoreferenceResolver, self).__init__(vocab, regularizer) logger.info(vocab) self._text_field_embedder = text_field_embedder",
"be negative # because the offsets will be > the",
"disable=arguments-differ \"\"\" Parameters ---------- text : ``Dict[str, torch.LongTensor]``, required. The",
"int, device: int) -> Tuple[torch.IntTensor, torch.IntTensor, torch.FloatTensor]: \"\"\" This method",
"used to calculate the regularization penalty during training. \"\"\" def",
"return top_embeddings def _local_attention(self, raw_contextualized_embeddings, text_mask): device = util.get_device_of(raw_contextualized_embeddings) if",
"= EventCorefScores(mapping_type=type_match_in_eval) self._type_loss_metric = Average() self._realis_loss_metric = Average() self._coref_loss_metric =",
"zero otherwise. Parameters ---------- num_spans_to_keep : ``int``, required. The number",
"= Average() self._coref_loss_metric = Average() self._coref_label_metric = Average() self._type_label_metric =",
"document. These span representations are scored and used to prune",
"refine_gate * top_embeddings + (1 - refine_gate) * event_rep return",
"self._local_window_size = local_window_size self._attention_type = attention_type self._decoding = decoding self._type_threshold",
"number of spans, because in this case, it is possible",
"# We're generating a logspace mask here because we will",
"required. A tensor of shape (batch_size, num_spans, 2), representing the",
"of the batch, the 1st span _cannot_ have any antecedents,",
"which respectively have the original text and the annotated gold",
"consistent with the data, in the sense that we are",
"scoring function to consider. This includes both the original span",
"coreference_scores, } if coref_labels is not None and event_type_labels is",
"# dummy_labels = (1 - pairwise_labels).prod(-1, keepdim=True) # Shape: (batch_size,",
"from this dictionary, which respectively have the original text and",
"``TextFieldEmbedder`` Used to embed the ``text`` ``TextField`` we get as",
"Shapes: # (num_spans_to_keep, max_antecedents), # (1, max_antecedents), # (1, num_spans_to_keep,",
"torch.Tensor]``, required. The result of calling :func:`forward` on an instance",
"type_threshold=self._type_threshold) @overrides def get_metrics(self, reset: bool = False) -> Dict[str,",
"valid_antecedent_log_mask = (raw_antecedent_indices >= 0).float().unsqueeze(0).log() # Shape: (num_spans_to_keep, max_antecedents) valid_antecedent_indices",
"``(num_spans_to_keep, max_antecedents)``. valid_antecedent_offsets : ``torch.IntTensor`` The distance between the span",
"label for every span. The id is arbitrary, as we",
"x in metadata] return output_dict @overrides def decode(self, output_dict: Dict[str,",
"We now have, for each span which survived the pruning",
"top_mask = top_mask.expand_as(coreference_scores).clone() top_mask[:, :, self._positive_label_size + 2:] = 0",
"Shape: (1, max_antecedents) valid_antecedent_offsets = (util.get_range_vector(max_antecedents, device) + 1).unsqueeze(0) #",
"keys from this dictionary, which respectively have the original text",
"self._nil_label_metric = Average() if self._bce_pos_weight: self._bce_loss = BCEWithLogitsLoss(reduction='none', pos_weight=torch.tensor(self._bce_pos_weight)) else:",
"mess up the normalisation of the distribution. # Shape: (1,",
"typing import Any, Dict, List, Optional, Tuple import torch import",
"(batch_size, num_spans, num_event_realis_label) # event_realis_scores = self._event_realis_classifier.forward(span_embeddings) # Prune based",
"coref_f1, \"m_p\": mention_result['precision'], \"m_r\": mention_result['recall'], \"m_f1\": mention_result['f1-score'], \"nil\": self._nil_label_metric.get_metric(reset), \"type\":",
"= 10, attention_type: str = 'dot', decoding: str = 'type-guided',",
"(batch_size, num_spans_to_keep, max_antecedents) antecedent_scores = self._antecedent_scorer( self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1) antecedent_scores += top_span_mention_scores",
"allennlp.modules import FeedForward, Pruner from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder",
"gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels, type_antecedent_labels, antecedent_labels) bce_loss = self._bce_loss.forward(self._event_scorer.forward(span_embeddings).squeeze(-1), (event_type_labels >",
"be coreferent with any previous span, but here we are",
"are in # the same coreference cluster. if self._pretrain_ed: #",
"is applied to pairs of span representation, along with any",
"event_embeddings.size(0), event_embeddings.size(1), ) return event_embeddings def _get_type_antecedent_labels(self, top_event_type_labels): \"\"\" :param",
"with actual spans if the prediction # is greater than",
"input to the model. context_layer : ``Seq2SeqEncoder`` This layer incorporates",
"for every pair of spans. Additionally, a dummy label is",
"cluster. if self._pretrain_ed: # All antecedent mask is 0 top_mask",
"the document. These span representations are scored and used to",
"top_span_embeddings.detach() # top_span_mention_scores = top_span_mention_scores.detach() # Now that we have",
"is not None and event_type_labels is not None: pruned_event_type_labels =",
"penalty during training. \"\"\" def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder,",
"= True, type_match_in_eval: bool = True, initializer: InitializerApplicator = InitializerApplicator(),",
"to calculate the regularization penalty during training. \"\"\" def __init__(self,",
"embedding_dim=span_embedding_size) self._event_embedding_map = torch.nn.Linear(self._event_embedding.get_output_dim() * 2, self._event_embedding.get_output_dim()) self._positive_label_size = vocab.get_vocab_size('labels')",
"instance in the batch. We use the \"original_text\" and \"clusters\"",
"spans, the score consists of the pairwise antecedent score and",
"spans. # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size) candidate_antecedent_embeddings = util.flattened_index_select(top_embeddings,",
"None, realis_labels: torch.IntTensor = None, metadata: List[Dict[str, Any]] = None)",
"the element-wise similarity of the span representations, and an embedding",
"result of calling :func:`forward` on an instance or batch of",
":, :1] * top_embeddings refine_gate = self._type_refine_gate(torch.cat([event_rep, top_embeddings], -1)) top_embeddings",
"scores. # Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size) span_pair_embeddings",
"event_type_size, 1) event_type_prior_scores = self._event_scorer(event_embeddings) # (batch_size, num_spans_to_keep, event_type_size) event_type_prior_scores",
"Shape: (1, max_antecedents) bucket_values = util.bucket_values(antecedent_offsets, num_total_buckets=self._num_distance_buckets) # (1, event_type)",
"(1, max_antecedents) bucket_values = util.bucket_values(antecedent_offsets, num_total_buckets=self._num_distance_buckets) # (1, event_type) label_bucket_values",
"math from typing import Any, Dict, List, Optional, Tuple import",
"for each top span, the index (with respect to antecedent_indices)",
"the indices line up with actual spans if the prediction",
"the span representations, and an embedding representation of the distance",
"get embeddings # for all valid antecedents for each span.",
"that each span in a batch can be coreferent with",
"parameters. regularizer : ``RegularizerApplicator``, optional (default=``None``) If provided, will be",
"# 10 possible distance buckets. self._num_distance_buckets = 10 self._distance_embedding =",
"TextFieldEmbedder, mention_feedforward: FeedForward, antecedent_feedforward: FeedForward, feature_size: int, context_layer: Seq2SeqEncoder =",
"all antecedents which are in the # same gold cluster",
"logger.info(vocab.get_token_from_index(0, \"labels\")) if context_layer is not None: endpoint_span_extractor_dim = context_layer.get_output_dim()",
"= event_prob.new_zeros(*shape) event_prob = torch.cat([dummy_scores, event_prob], -1) event_prob = torch.softmax(event_prob,",
"TimeDistributed(antecedent_feedforward) self._event_scorer = torch.nn.Sequential( TimeDistributed(mention_feedforward), TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(), 1)) ) self._pretrain_ed =",
"a broadcasted subtraction. # Shape: (num_spans_to_keep, max_antecedents) raw_antecedent_indices = target_indices",
"This procedure is `generic across the batch`. The reason this",
"torch.nn.Linear(self._event_embedding.get_output_dim() * 2, self._event_embedding.get_output_dim()) self._positive_label_size = vocab.get_vocab_size('labels') - 1 #",
"any pairwise features, which is then scored by a linear",
"here to make # the multiple calls to util.batched_index_select below",
"each instance in the batch, the list of clusters, which",
"same cluster. The labels are augmented with a dummy antecedent",
"top_indices, top_scores) = self._mention_pruner(span_embeddings, span_mask, num_spans_to_keep_according_doc_len, ) event_embeddings = self._get_event_embedding(span_mask)",
"maximum number of antecedent spans to consider for every span.",
") else: new_contextualized_embeddings = raw_contextualized_embeddings span_embeddings_list = list() attended_span_embeddings =",
"endpoint_span_extractor_dim = context_layer.get_output_dim() attentive_span_extractor_dim = text_field_embedder.get_output_dim() self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim, combination=\"x,y\",",
"required. Mention scores for every span. Has shape (batch_size, num_spans_to_keep,",
"used as padding. As we do # some comparisons based",
"each span in the document. These span representations are scored",
"``List[List[List[Tuple[int, int]]]]`` A nested list, representing, for each instance in",
"num_spans_to_keep, max_antecedents). Returns ------- pairwise_labels_with_dummy_label : ``torch.FloatTensor`` A binary tensor",
"Parameters ---------- pairwise_embeddings: ``torch.FloatTensor``, required. Embedding representations of pairs of",
"previous spans, so this returns a matrix of shape (num_spans_to_keep,",
"used to prune away spans that are unlikely to occur",
"each top span, the index (with respect to antecedent_indices) of",
"if coref_labels is not None and event_type_labels is not None:",
"+ 2:] = 0 coreference_log_probs = util.masked_log_softmax(coreference_scores, top_mask) correct_antecedent_log_probs =",
"event_embeddings.size(2)) event_embeddings = self._event_embedding_map.forward(event_embeddings) event_embeddings = event_embeddings.unsqueeze(0).expand(span_mask.size(0), event_embeddings.size(0), event_embeddings.size(1), )",
"each span's antecedent. Each span can only # have prior",
"is one if and only if the pair of spans",
"top_span_labels : ``torch.IntTensor``, required. The cluster id label for every",
"of each span in the document. These span representations are",
"decode(self, output_dict: Dict[str, torch.Tensor]): \"\"\" Converts the list of spans",
"valid_antecedent_log_mask.long() # Compute labels. # Shape: (batch_size, num_spans_to_keep, max_antecedents +",
"dictionary for each instance in the batch. We use the",
"allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor from allennlp.modules.token_embedders import Embedding from allennlp.nn",
"of the distance between the two spans. Parameters ---------- shape",
") return event_embeddings def _get_type_antecedent_labels(self, top_event_type_labels): \"\"\" :param top_event_type_labels: (batch,",
"since different spans have different numbers of valid antecedents. For",
"Tuple import torch import torch.nn.functional as F from allennlp.data import",
"This feedforward network is applied to the span representations which",
"the pruning stage, we consider this many antecedents. lexical_dropout: ``int``",
"self._conll_coref_scores.get_metric(reset) return {\"c_p\": coref_precision, \"c_r\": coref_recall, \"c_f1\": coref_f1, \"m_p\": mention_result['precision'],",
"util.flattened_index_select(top_scores, valid_antecedent_indices).squeeze(-1) # Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size)",
"antecedent_feedforward: ``FeedForward`` This feedforward network is applied to pairs of",
"we consider after the pruning stage is >= the #",
"\"coreference_scores\": coreference_scores, } if coref_labels is not None and event_type_labels",
"We precompute this here to make # the multiple calls",
"we are considering. Has shape (1, max_antecedents). Returns ------- span_pair_embeddings",
"triangular part will be negative # because the offsets will",
"num_spans_to_keep, max_antecedents, encoding_dim) top_span_mention_scores: ``torch.FloatTensor``, required. Mention scores for every",
"prediction # is greater than -1. predicted_antecedents -= 1 output_dict",
"torch.nn.Sequential( TimeDistributed(mention_feedforward), TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(), 1)) ) self._pretrain_ed = pretrain_ed self._pretrain_coref =",
"if and only if the pair of spans belong to",
"of the most likely antecedent. -1 means there was no",
"self._event_embedding_map = torch.nn.Linear(self._event_embedding.get_output_dim() * 2, self._event_embedding.get_output_dim()) self._positive_label_size = vocab.get_vocab_size('labels') -",
"= lambda x: x initializer(self) def _get_event_embedding(self, span_mask): \"\"\" :param",
"shape (batch_size, num_spans), representing the cluster ids of each span,",
"(batch_size, num_spans, num_event_realis_label) # Shape: (batch_size, num_spans, num_event_realis_label) # event_realis_scores",
"[x[\"original_text\"] for x in metadata] output_dict[\"offset\"] = [x[\"token_offset\"] for x",
"Embedding(self._num_distance_buckets, feature_size) self._coref_loss_weight = coref_loss_weight self._bce_loss_weight = bce_loss_weight self._bce_pos_weight =",
"(event_type_labels > 0).float()) * span_mask bce_loss = bce_loss.sum() * self._bce_loss_weight",
"new_contextualized_embeddings = self._local_attention( raw_contextualized_embeddings=raw_contextualized_embeddings, text_mask=text_mask ) else: new_contextualized_embeddings = raw_contextualized_embeddings",
"Vocabulary, text_field_embedder: TextFieldEmbedder, mention_feedforward: FeedForward, antecedent_feedforward: FeedForward, feature_size: int, context_layer:",
"# print(pairwise_labels) # # # Shape: (batch_size, num_spans_to_keep, 1) #",
"\"\"\" event_embeddings = event_embeddings.unsqueeze(1).expand((antecedent_embeddings.size(0), antecedent_embeddings.size(1), event_embeddings.size(1), antecedent_embeddings.size(3),)) return torch.cat([event_embeddings, antecedent_embeddings],",
"the inclusive start and end indices of candidate spans for",
"to each other in a chain. # Shape: (batch_size, num_spans_to_keep)",
"bool = False, coref_loss_weight: float = 1.0, bce_loss_weight: float =",
"are augmented with a dummy antecedent at the zeroth position,",
"case is that each span in a batch can be",
"Average from overrides import overrides from torch.nn import BCEWithLogitsLoss from",
"float = None, local_window_size: int = 10, attention_type: str =",
"num_spans_to_keep, max_antecedents, embedding_size) candidate_antecedent_embeddings = util.flattened_index_select(top_embeddings, valid_antecedent_indices) # Shape: (batch_size,",
"x in metadata] output_dict['doc_id'] = [x.get(\"doc_id\", None) for x in",
"might # consider a masked span. # Shape: (batch_size, num_spans,",
"* attention_mask new_attention_mask = torch.triu(torch.tril(new_attention_mask, self._local_window_size), -self._local_window_size) new_contextualized_embeddings = self._attention_layer(raw_contextualized_embeddings,",
"candidate spans. spans_per_word: float, required. A multiplier between zero and",
"is not coreferent with anything. For the dummy label, the",
"% self._attention_type) self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim, similarity_function=similarity_function, combination='2', num_attention_heads=num_head ) if",
"event_type_labels : ``torch.IntTensor``, optional (default = None). A tensor of",
"but there might be several prior mentions k in the",
"the spans). Has shape ``(1, max_antecedents)``. valid_antecedent_log_mask : ``torch.FloatTensor`` The",
"# prior spans. So the first thing we do is",
"device=util.get_device_of(contextualized_embeddings)) new_attention_mask = text_mask[:, :, None] * attention_mask new_attention_mask =",
"None: endpoint_span_extractor_dim = context_layer.get_output_dim() attentive_span_extractor_dim = text_field_embedder.get_output_dim() self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim,",
"probability assigned to all valid antecedents. This is a valid",
"Shape: (batch_size, num_spans, num_event_realis_label) # event_realis_scores = self._event_realis_classifier.forward(span_embeddings) # Prune",
"as input to the model. context_layer : ``Seq2SeqEncoder`` This layer",
"# device=util.get_device_of(contextualized_embeddings)) new_attention_mask = text_mask[:, :, None] * attention_mask new_attention_mask",
"= negative_marginal_log_likelihood * self._coref_loss_weight output_dict[\"loss\"] = coref_loss + bce_loss decoded_result",
"num_spans_to_keep, max_antecedents, embedding_size), which # we can use to make",
"get an embedded representation of each span in the document.",
"self._attentive_span_extractor.get_output_dim() + self._endpoint_span_extractor.get_output_dim() else: span_embedding_size = self._attentive_span_extractor.get_output_dim() if type_refine: self._type_refine_gate",
"antecedent_log_mask # Shape: (batch_size, num_spans_to_keep, 1) shape = [antecedent_scores.size(0), antecedent_scores.size(1),",
"valid_antecedent_indices, \"predicted_antecedents\": predicted_antecedents, \"coreference_scores\": coreference_scores, } if coref_labels is not",
"Has shape (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size). antecedent_offsets :",
"def _combine_event_embeddings_and_cluster_antecedent_embeddings(event_embeddings: torch.FloatTensor, antecedent_embeddings: torch.FloatTensor): \"\"\" event_embeddings: ``torch.FloatTensor``, required. Embedding",
"antecedent spans in terms of spans we are considering. Has",
"+ event_type_size, embedding_size) antecedent_distance_embeddings = self._distance_embedding( torch.cat([bucket_values, label_bucket_values], 1) )",
"top span. Has shape (batch_size, num_spans_to_keep, max_antecedents, embedding_size). return: (batch_size,",
"allennlp.modules.similarity_functions import DotProductSimilarity from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor from allennlp.modules.token_embedders",
"just use the relative # index of the spans to",
": ``Seq2SeqEncoder`` This layer incorporates contextual information for each word",
"# Shape: (batch_size, document_length, embedding_size) text_embeddings = self._lexical_dropout(self._text_field_embedder(text)) document_length =",
"arbitrary, as we just care about the clustering. Has shape",
"# same gold cluster as the span we are currently",
"text_mask=text_mask ) else: new_contextualized_embeddings = raw_contextualized_embeddings # Shape: (batch_size, num_spans,",
"torch.FloatTensor, top_span_mention_scores: torch.FloatTensor, antecedent_mention_scores: torch.FloatTensor, antecedent_log_mask: torch.FloatTensor) -> torch.FloatTensor: \"\"\"",
"embedding_size), which # we can use to make coreference decisions",
"False, pretrain_coref: bool = False, coref_loss_weight: float = 1.0, bce_loss_weight:",
"# Shape: (batch_size, num_spans, embedding_size) endpoint_span_embeddings = self._endpoint_span_extractor(text_embeddings, spans) span_embeddings_list",
"# Mostly by AllenNLP import logging import math from typing",
"regardless of the batch, the 1st span _cannot_ have any",
"dummy antecedent at the zeroth position, which represents the prediction",
"raise NotImplementedError('Attention Type: %s' % self._attention_type) self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim, similarity_function=similarity_function,",
"instance in the batch, the list of clusters, which are",
"pairwise_labels_with_dummy_label def _compute_coreference_scores(self, pairwise_embeddings: torch.FloatTensor, top_span_mention_scores: torch.FloatTensor, antecedent_mention_scores: torch.FloatTensor, antecedent_log_mask:",
"its antecedents in terms of the number of considered spans",
"the loss using the negative marginal log-likelihood. # This is",
"x in metadata] output_dict[\"offset\"] = [x[\"token_offset\"] for x in metadata]",
"top_mask.unsqueeze(-1) # Shape: (batch_size * num_spans_to_keep) # torch.index_select only accepts",
"we have our variables in terms of num_spans_to_keep, we need",
"matrix of shape (num_spans_to_keep, max_antecedents), where the (i,j)-th index is",
"device=device) # attention_mask = attention_mask - torch.eye(text_mask.size(1), # device=util.get_device_of(contextualized_embeddings)) new_attention_mask",
"optional (default = None). A tensor of shape (batch_size, num_spans),",
"the possible antecedents the model considered. predicted_antecedents : ``torch.IntTensor`` A",
"Now that we have our variables in terms of num_spans_to_keep,",
"text_field_embedder self._context_layer = context_layer self._antecedent_feedforward = TimeDistributed(antecedent_feedforward) self._event_scorer = torch.nn.Sequential(",
"is not None: pruned_event_type_labels = torch.gather(event_type_labels, 1, top_indices) type_antecedent_labels =",
"to blame many of the absent links on bad spans,",
"== 'dot': similarity_function = DotProductSimilarity(scale_output=True) num_head = 1 else: raise",
"1 output_dict = {\"top_spans\": top_spans, \"antecedent_indices\": valid_antecedent_indices, \"predicted_antecedents\": predicted_antecedents, \"coreference_scores\":",
"cluster. The labels are augmented with a dummy antecedent at",
"(batch_size, num_spans_to_keep, max_antecedents). antecedent_mention_scores: ``torch.FloatTensor``, required. Mention scores for every",
"spans, the model decides which antecedent span (if any) they",
"coref_labels: torch.IntTensor = None, event_type_labels: torch.IntTensor = None, realis_labels: torch.IntTensor",
"EventCorefScores from src.metrics.mention_f1 import TopSpanMentionTypeF1 from src.utils.cluster_decoding_utils import node_decode logger",
"is only relevant in edge cases where # the number",
"Compute labels. # Shape: (batch_size, num_spans_to_keep, max_antecedents + 1) gold_antecedent_labels",
"spans for mentions. Comes from a ``ListField[SpanField]`` of indices into",
"else: self._lexical_dropout = lambda x: x initializer(self) def _get_event_embedding(self, span_mask):",
"text_embeddings if self._attention_layer is not None: new_contextualized_embeddings = self._local_attention( raw_contextualized_embeddings=raw_contextualized_embeddings,",
"self._max_span_width = max_span_width self._spans_per_word = spans_per_word self._max_antecedents = max_antecedents self._mention_f1_score",
"the unormalised score for each (span, antecedent) pair we considered.",
"to make coreference decisions between valid span pairs. # Shapes:",
"torch.cat([dummy_scores, antecedent_scores], -1) return coreference_scores def _generate_valid_antecedents(num_spans_to_keep: int, max_antecedents: int,",
"class, # so this makes the indices line up with",
"event_embeddings = self._get_event_embedding(span_mask) top_mask = top_mask.unsqueeze(-1) # Shape: (batch_size *",
"So, regardless of the batch, the 1st span _cannot_ have",
"spans. So the first thing we do is construct a",
"(batch_size, num_spans_to_keep, max_antecedents + 1), representing the unormalised score for",
"Shape: (batch_size, num_spans_to_keep) _, predicted_antecedents = coreference_scores.max(2) # Subtract one",
"event_embeddings = event_embeddings.reshape(event_embeddings.size(0), event_embeddings.size(1) * event_embeddings.size(2)) event_embeddings = self._event_embedding_map.forward(event_embeddings) event_embeddings",
"``ListField[SpanField]`` of indices into the text of the document. coref_labels",
"correct_antecedent_log_probs = coreference_log_probs + gold_antecedent_labels.log() negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum() coref_loss =",
"2.} @staticmethod def _combine_event_embeddings_and_cluster_antecedent_embeddings(event_embeddings: torch.FloatTensor, antecedent_embeddings: torch.FloatTensor): \"\"\" event_embeddings: ``torch.FloatTensor``,",
"self._event_embedding = Embedding(num_embeddings=vocab.get_vocab_size('labels'), embedding_dim=span_embedding_size) self._event_embedding_map = torch.nn.Linear(self._event_embedding.get_output_dim() * 2, self._event_embedding.get_output_dim())",
"span. Has shape (batch_size, num_spans_to_keep, max_antecedents, embedding_size). return: (batch_size, num_spans_to_keep,",
"= self._endpoint_span_extractor.get_output_dim() + self._attentive_span_extractor.get_output_dim() if self._local_window_size <= 0: self._attention_layer =",
"Similarly, each element can only predict previous spans, so this",
"stage, # a predicted antecedent. This implies a clustering if",
"# of the batch dimension - it's just a function",
"predictions # that would be consistent with the data, in",
"self.decode(output_dict) pred_label_spans_list = decoded_result['pred_label_spans'] gold_label_spans_list = [m['gold_label_spans'] for m in",
"shape ``(num_spans_to_keep, max_antecedents)`` representing for each top span the index",
"on an instance or batch of instances. Returns ------- The",
"we reformat our variables again to get embeddings # for",
"from src.metrics.mention_f1 import TopSpanMentionTypeF1 from src.utils.cluster_decoding_utils import node_decode logger =",
"python # -*- coding:utf-8 -*- # Created by Roger on",
"if self._endpoint_span_extractor is not None: span_embedding_size = self._attentive_span_extractor.get_output_dim() + self._endpoint_span_extractor.get_output_dim()",
"in a batch can be coreferent with any previous span,",
"every span. device: ``int``, required. The CUDA device to use.",
"max_antecedents: int, device: int) -> Tuple[torch.IntTensor, torch.IntTensor, torch.FloatTensor]: \"\"\" This",
"consider with respect to the top k spans. Has shape",
"The spans are in document order, so we can just",
"the same coreference cluster. if self._pretrain_ed: # All antecedent mask",
"self._spans_per_word = spans_per_word self._max_antecedents = max_antecedents self._mention_f1_score = TopSpanMentionTypeF1() self._conll_coref_scores",
"for the spans which we kept. pruned_gold_labels = util.batched_index_select(coref_labels.unsqueeze(-1), top_indices,",
"vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, mention_feedforward: FeedForward, antecedent_feedforward: FeedForward, feature_size: int,",
"(batch, top_span_size, positive_label_size) \"\"\" event_indices = util.get_range_vector(self._positive_label_size, device=util.get_device_of(span_mask)) + 1",
"considering for each top span. Has shape (batch_size, num_spans_to_keep, event_type_size",
"str = 'type-guided', type_threshold: float = -1., type_refine: bool =",
"InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super(End2EndEventCoreferenceResolver,",
"num_spans, num_event_realis_label) # Shape: (batch_size, num_spans, num_event_realis_label) # event_realis_scores =",
"event_type_prior_scores.transpose(1, 2).expand( candidate_antecedent_mention_scores.size(0), candidate_antecedent_mention_scores.size(1), -1) # (batch_size, num_spans_to_keep, event_type_size +",
"span i predicts a # single antecedent j, but there",
"@overrides def decode(self, output_dict: Dict[str, torch.Tensor]): \"\"\" Converts the list",
"self._antecedent_scorer = TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1)) self._local_window_size = local_window_size self._attention_type = attention_type",
"None: output_dict[\"document\"] = [x[\"original_text\"] for x in metadata] output_dict[\"offset\"] =",
">= the # total number of spans, because in this",
"between the spans). Has shape ``(1, max_antecedents)``. valid_antecedent_log_mask : ``torch.FloatTensor``",
"+ event_type_size + max_antecedents) coreference_scores = self._compute_coreference_scores(span_pair_embeddings, top_scores, candidate_antecedent_mention_scores, valid_antecedent_log_mask)",
"representation of the distance between the two spans. Parameters ----------",
"spans. Has shape ``(num_spans_to_keep, max_antecedents)``. valid_antecedent_offsets : ``torch.IntTensor`` The distance",
"these spans. So, regardless of the batch, the 1st span",
"sum of the probabilities of all antecedent predictions # that",
"and the unary mention scores for the span and its",
"# -*- coding:utf-8 -*- # Created by Roger on 2019-09-10",
"the # probability assigned to all valid antecedents. This is",
"-1) return coreference_scores def _generate_valid_antecedents(num_spans_to_keep: int, max_antecedents: int, device: int)",
"None, local_window_size: int = 10, attention_type: str = 'dot', decoding:",
"variables with shapes # like (batch_size, num_spans_to_keep, max_antecedents, embedding_size), which",
"spans to consider. Has shape (batch_size, num_spans_to_keep, max_antecedents, embedding_size) \"\"\"",
"after applying transitivity, imply a clustering of the spans in",
"= self._mention_f1_score.get_metric(reset) coref_precision, coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset) return {\"c_p\": coref_precision,",
"= None # NIL for Unified Event self._event_embedding = Embedding(num_embeddings=vocab.get_vocab_size('labels'),",
"which we don't want to predict, per span. # We're",
"expanded_distance_embeddings_shape = (antecedent_embeddings.size(0), antecedent_embeddings.size(1), antecedent_embeddings.size(2), antecedent_distance_embeddings.size(-1)) # Shape: (batch_size, num_spans_to_keep,",
"if self._endpoint_span_extractor is not None: # Shape: (batch_size, num_spans, embedding_size)",
"they are in # the same coreference cluster. if self._pretrain_ed:",
"valid_antecedent_offsets, valid_antecedent_log_mask = \\ _generate_valid_antecedents(num_spans_to_keep_according_doc_len, max_antecedents, util.get_device_of(text_mask)) if self._type_refine_gate is",
"coref_labels is not None and event_type_labels is not None: pruned_event_type_labels",
"type_refine: self._type_refine_gate = torch.nn.Sequential( TimeDistributed(torch.nn.Linear(span_embedding_size * 2, span_embedding_size)), torch.nn.Sigmoid() )",
"no valid antecedents. Has shape ``(1, num_spans_to_keep, max_antecedents)``. \"\"\" #",
"the clustering. Has shape (batch_size, num_spans_to_keep, max_antecedents). Returns ------- pairwise_labels_with_dummy_label",
"text_field_embedder.get_output_dim() self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim, combination=\"x,y\", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size) else: self._endpoint_span_extractor =",
"self._endpoint_span_extractor.get_output_dim() + self._attentive_span_extractor.get_output_dim() if self._local_window_size <= 0: self._attention_layer = None",
"max_antecedents, embedding_size) span_pair_embeddings = self._compute_span_pair_embeddings(top_embeddings, candidate_antecedent_embeddings, valid_antecedent_offsets) # (batch_size, event_type_size,",
"top_span_labels: torch.IntTensor, type_antecedent_labels: torch.IntTensor, antecedent_labels: torch.IntTensor): \"\"\" Generates a binary",
"attended_span_embeddings], -1) else: raw_contextualized_embeddings = text_embeddings if self._attention_layer is not",
"shape (batch_size, event_type_size, embedding_size). antecedent_embeddings : ``torch.FloatTensor``, required. Embedding representations",
"be used to calculate the regularization penalty during training. \"\"\"",
"Shape: (batch_size, num_spans_to_keep, 1) shape = [antecedent_scores.size(0), antecedent_scores.size(1), 1] dummy_scores",
"spans we consider after the pruning stage is >= the",
"event_realis_scores = self._event_realis_classifier.forward(span_embeddings) # Prune based on mention scores. num_spans_to_keep_according_doc_len",
"= top_event_type_labels.unsqueeze(-1).expand([top_event_type_labels.size(0), top_event_type_labels.size(1), event_indices.size(0)]) type_antecedent_labels = (top_event_type_labels == event_indices).float() return",
"self._mention_f1_score.get_metric(reset) coref_precision, coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset) return {\"c_p\": coref_precision, \"c_r\":",
"not None: endpoint_span_extractor_dim = context_layer.get_output_dim() attentive_span_extractor_dim = text_field_embedder.get_output_dim() self._endpoint_span_extractor =",
"to decide each span's antecedent. Each span can only #",
"overrides from torch.nn import BCEWithLogitsLoss from src.metrics.event_coref_scores import EventCorefScores from",
"feature_size: int, context_layer: Seq2SeqEncoder = None, max_span_width: int = 1,",
"and we only consider up to max_antecedents # prior spans.",
"upper triangular part will be negative # because the offsets",
"``torch.IntTensor`` A tensor of shape ``(batch_size, num_spans_to_keep, 2)`` representing the",
"to the antecedent spans. # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)",
"1: self._positive_label_size + 1]).item()) # print(pairwise_labels) # # # Shape:",
"mask these, # because these are exactly the indices which",
"= list() attended_span_embeddings = self._attentive_span_extractor(new_contextualized_embeddings, spans) span_embeddings_list += [attended_span_embeddings] if",
"considering for each top span. Has shape (batch_size, num_spans_to_keep, max_antecedents,",
"Each span i predicts a # single antecedent j, but",
"max_antecedents, embedding_size). antecedent_offsets : ``torch.IntTensor``, required. The offsets between each",
"ignore text: Dict[str, torch.LongTensor], spans: torch.IntTensor, coref_labels: torch.IntTensor = None,",
": ``torch.IntTensor`` The distance between the span and each of",
"valid objective for # clustering as we don't mind which",
"Our loss is the sum of the # probability assigned",
"Shape: (batch_size, num_spans_to_keep, max_antecedents) candidate_antecedent_mention_scores = util.flattened_index_select(top_scores, valid_antecedent_indices).squeeze(-1) # Shape:",
"num_spans_to_keep_according_doc_len, ) event_embeddings = self._get_event_embedding(span_mask) top_mask = top_mask.unsqueeze(-1) # Shape:",
"max_antecedents). antecedent_log_mask: ``torch.FloatTensor``, required. The log of the mask for",
"= TopSpanMentionTypeF1() self._conll_coref_scores = EventCorefScores(mapping_type=type_match_in_eval) self._type_loss_metric = Average() self._realis_loss_metric =",
"mind which antecedent is predicted, so long as they are",
"This implies a clustering if we group # mentions which",
"# in order to not mess up the normalisation of",
"import logging import math from typing import Any, Dict, List,",
"self._type_loss_metric = Average() self._realis_loss_metric = Average() self._coref_loss_metric = Average() self._coref_label_metric",
"independent # of the batch dimension - it's just a",
"prior spans. So the first thing we do is construct",
"pairwise_labels = pairwise_labels * 0 else: # for pairwise_labels without",
"= (target_labels >= 0).float() pairwise_labels = same_cluster_indicator * non_dummy_indicator if",
"torch.FloatTensor, antecedent_embeddings: torch.FloatTensor): \"\"\" event_embeddings: ``torch.FloatTensor``, required. Embedding representations of",
"from. Similarly, each element can only predict previous spans, so",
"``(1, num_spans_to_keep, max_antecedents)``. \"\"\" # Shape: (num_spans_to_keep, 1) target_indices =",
"num_spans_to_keep, max_antecedents)``. \"\"\" # Shape: (num_spans_to_keep, 1) target_indices = util.get_range_vector(num_spans_to_keep,",
"self._compute_antecedent_gold_labels(pruned_gold_labels, type_antecedent_labels, antecedent_labels) bce_loss = self._bce_loss.forward(self._event_scorer.forward(span_embeddings).squeeze(-1), (event_type_labels > 0).float()) *",
"Has shape (batch_size, num_spans_to_keep, max_antecedents, embedding_size) \"\"\" # Shape: (batch_size,",
"of shape (num_spans_to_keep, max_antecedents), where the (i,j)-th index is equal",
"indices, but here # we need to select spans for",
"the top k spans. Has shape ``(num_spans_to_keep, max_antecedents)``. valid_antecedent_offsets :",
"a function of the span's position in # top_spans. The",
"Shape: (1, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings = self._distance_embedding( torch.cat([bucket_values,",
"return -1 when they are used as padding. As we",
"1, top_indices) type_antecedent_labels = self._get_type_antecedent_labels(pruned_event_type_labels) # Find the gold labels",
"import DotProductSimilarity from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor from allennlp.modules.token_embedders import",
"self._bce_loss = BCEWithLogitsLoss(reduction='none', pos_weight=torch.tensor(self._bce_pos_weight)) else: self._bce_loss = BCEWithLogitsLoss(reduction='none') if lexical_dropout",
"pruning strategy used in the forward pass. Parameters ---------- pairwise_embeddings:",
"The log of the mask for valid antecedents. Returns -------",
"from a ``ListField[SpanField]`` of indices into the text of the",
"else: new_contextualized_embeddings = raw_contextualized_embeddings span_embeddings_list = list() attended_span_embeddings = self._attentive_span_extractor(new_contextualized_embeddings,",
"pairwise_embeddings: ``torch.FloatTensor``, required. Embedding representations of pairs of spans. Has",
"bce_loss_weight: float = 1.0, bce_pos_weight: float = None, local_window_size: int",
"Dict, List, Optional, Tuple import torch import torch.nn.functional as F",
"will be used to calculate the regularization penalty during training.",
"event_embeddings = self._event_embedding(event_indices) event_embeddings = event_embeddings.reshape(event_embeddings.size(0), event_embeddings.size(1) * event_embeddings.size(2)) event_embeddings",
"Shape: (batch_size, num_spans_to_keep, max_antecedents) antecedent_scores = self._antecedent_scorer( self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1) antecedent_scores +=",
"the pruning stage, # a predicted antecedent. This implies a",
"= raw_contextualized_embeddings span_embeddings_list = list() attended_span_embeddings = self._attentive_span_extractor(new_contextualized_embeddings, spans) span_embeddings_list",
"= self._event_embedding_map.forward(event_embeddings) event_embeddings = event_embeddings.unsqueeze(0).expand(span_mask.size(0), event_embeddings.size(0), event_embeddings.size(1), ) return event_embeddings",
"= event_embeddings.unsqueeze(0).expand(span_mask.size(0), event_embeddings.size(0), event_embeddings.size(1), ) return event_embeddings def _get_type_antecedent_labels(self, top_event_type_labels):",
"mention_feedforward: FeedForward, antecedent_feedforward: FeedForward, feature_size: int, context_layer: Seq2SeqEncoder = None,",
"= target_indices - valid_antecedent_offsets # In our matrix of indices,",
"None). A tensor of shape (batch_size, num_spans), representing the event",
"Computes an embedding representation of pairs of spans for the",
"= text_field_embedder.get_output_dim() self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim, combination=\"x,y\", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size) else: self._endpoint_span_extractor",
"possible `indices` of these spans. So, regardless of the batch,",
"* top_embeddings refine_gate = self._type_refine_gate(torch.cat([event_rep, top_embeddings], -1)) top_embeddings = refine_gate",
"This method generates possible antecedents per span which survived the",
"We want to mask these, # because these are exactly",
"Average() if self._bce_pos_weight: self._bce_loss = BCEWithLogitsLoss(reduction='none', pos_weight=torch.tensor(self._bce_pos_weight)) else: self._bce_loss =",
"list of spans and predicted antecedent indices into clusters of",
"the absent links on bad spans, enabling the pruning strategy",
"to be -inf # in order to not mess up",
"scores for every span. Has shape (batch_size, num_spans_to_keep, max_antecedents). antecedent_mention_scores:",
"shape (batch_size, num_spans), representing the event label of the specific",
"# span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1) span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings],",
"self._bce_pos_weight = bce_pos_weight self._max_span_width = max_span_width self._spans_per_word = spans_per_word self._max_antecedents",
"mentions which refer to each other in a chain. #",
"antecedent_log_mask = torch.cat([antecedent_log_mask.new_zeros((antecedent_log_mask.size(0), antecedent_log_mask.size(1), self._positive_label_size)), antecedent_log_mask], -1) # Shape: (batch_size,",
"greater than -1. predicted_antecedents -= 1 output_dict = {\"top_spans\": top_spans,",
"the spans which we kept. pruned_gold_labels = util.batched_index_select(coref_labels.unsqueeze(-1), top_indices, flat_top_span_indices)",
"of pairs of spans for the pairwise scoring function to",
"each element in the batch. # This reformats the indices",
"------- span_pair_embeddings : ``torch.FloatTensor`` Embedding representation of the pair of",
"have our variables in terms of num_spans_to_keep, we need to",
"labels are augmented with a dummy antecedent at the zeroth",
"kept while pruning. max_antecedents : ``int``, required. The maximum number",
"1) target_indices = util.get_range_vector(num_spans_to_keep, device).unsqueeze(1) # Shape: (1, max_antecedents) valid_antecedent_offsets",
"str = 'dot', decoding: str = 'type-guided', type_threshold: float =",
"BCEWithLogitsLoss(reduction='none', pos_weight=torch.tensor(self._bce_pos_weight)) else: self._bce_loss = BCEWithLogitsLoss(reduction='none') if lexical_dropout > 0:",
"= util.flatten_and_batch_shift_indices(top_indices, num_spans) # Compute final predictions for which spans",
"Computes scores for every pair of spans. Additionally, a dummy",
"the gold clustering. Has shape (batch_size, num_spans_to_keep, max_antecedents + 1).",
"A tensor of shape (batch_size, num_spans), representing the realis label",
"we need to select spans for each element in the",
"marginal log-likelihood. # This is equal to the log of",
"\"c_f1\": coref_f1, \"m_p\": mention_result['precision'], \"m_r\": mention_result['recall'], \"m_f1\": mention_result['f1-score'], \"nil\": self._nil_label_metric.get_metric(reset),",
"= self._attentive_span_extractor.get_output_dim() if type_refine: self._type_refine_gate = torch.nn.Sequential( TimeDistributed(torch.nn.Linear(span_embedding_size * 2,",
"# Shape: (batch_size, num_spans, embedding_size + 2 * encoding_dim +",
"num_spans_to_keep, 1) # dummy_labels = (1 - pairwise_labels).prod(-1, keepdim=True) #",
"predicted_antecedents : ``torch.IntTensor`` A tensor of shape ``(batch_size, num_spans_to_keep)`` representing,",
"< 0: device = 'cpu' attention_mask = torch.ones((text_mask.size(1), text_mask.size(1)), device=device)",
"= coref_loss_weight self._bce_loss_weight = bce_loss_weight self._bce_pos_weight = bce_pos_weight self._max_span_width =",
"antecedent_labels) bce_loss = self._bce_loss.forward(self._event_scorer.forward(span_embeddings).squeeze(-1), (event_type_labels > 0).float()) * span_mask bce_loss",
"text : ``Dict[str, torch.LongTensor]``, required. The output of a ``TextField``",
"clustering if we group # mentions which refer to each",
"num_spans_to_keep, event_type_size + max_antecedents + 1) pairwise_labels_with_dummy_label = torch.cat([type_antecedent_labels, pairwise_labels],",
"computing the possible `indices` of these spans. So, regardless of",
": ``RegularizerApplicator``, optional (default=``None``) If provided, will be used to",
"(batch_size, num_spans_to_keep, max_antecedents + 1) gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels, type_antecedent_labels, antecedent_labels)",
"for mentions. Comes from a ``ListField[SpanField]`` of indices into the",
"max_antecedents). antecedent_mention_scores: ``torch.FloatTensor``, required. Mention scores for every antecedent. Has",
"this returns a matrix of shape (num_spans_to_keep, max_antecedents), where the",
") else: attentive_span_extractor_dim = text_field_embedder.get_output_dim() if max_span_width > 1: endpoint_span_extractor_dim",
"are in the # same gold cluster as the span",
"Shape: (batch_size, document_length, embedding_size) text_embeddings = self._lexical_dropout(self._text_field_embedder(text)) document_length = text_embeddings.size(1)",
"realis label of the specific span. metadata : ``List[Dict[str, Any]]``,",
"Embedding representations of pairs of spans. Has shape (batch_size, num_spans_to_keep,",
"max_antecedents) antecedent_scores = self._antecedent_scorer( self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1) antecedent_scores += top_span_mention_scores + antecedent_mention_scores",
"same # coreference cluster that would be valid antecedents. Our",
"if self._attention_type == 'dot': similarity_function = DotProductSimilarity(scale_output=True) num_head = 1",
"we kept. pruned_gold_labels = util.batched_index_select(coref_labels.unsqueeze(-1), top_indices, flat_top_span_indices) antecedent_labels = util.flattened_index_select(pruned_gold_labels,",
"required. The output of a ``TextField`` representing the text of",
"of spans. Additionally, a dummy label is included, representing the",
"regularization penalty during training. \"\"\" def __init__(self, vocab: Vocabulary, text_field_embedder:",
"= torch.nn.Linear(self._event_embedding.get_output_dim() * 2, self._event_embedding.get_output_dim()) self._positive_label_size = vocab.get_vocab_size('labels') - 1",
"not None: pruned_event_type_labels = torch.gather(event_type_labels, 1, top_indices) type_antecedent_labels = self._get_type_antecedent_labels(pruned_event_type_labels)",
"embedding_size) target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings) # Shape: (1, max_antecedents) bucket_values =",
"# In our matrix of indices, the upper triangular part",
"Seq2SeqEncoder = None, max_span_width: int = 1, spans_per_word: float =",
"accepts 1D indices, but here # we need to select",
"not None and event_type_labels is not None: pruned_event_type_labels = torch.gather(event_type_labels,",
"top_span_size, positive_label_size) \"\"\" event_indices = util.get_range_vector(self.vocab.get_vocab_size('labels'), device=util.get_device_of(top_event_type_labels)) top_event_type_labels = top_event_type_labels.unsqueeze(-1).expand([top_event_type_labels.size(0),",
"predicts a # single antecedent j, but there might be",
"the embedded features, such as distances or span widths. max_span_width:",
"words in the document. max_antecedents: int, required. For each mention",
"start and end word indices of the top spans that",
"for valid antecedents. Returns ------- coreference_scores: ``torch.FloatTensor`` A tensor of",
"output of a ``TextField`` representing the text of the document.",
"# Compute indices for antecedent spans to consider. max_antecedents =",
"cluster as the span we are currently considering. Each span",
"features, which is then scored by a linear layer. feature_size:",
"FeedForward, feature_size: int, context_layer: Seq2SeqEncoder = None, max_span_width: int =",
"DotProductSimilarity(scale_output=True) num_head = 1 else: raise NotImplementedError('Attention Type: %s' %",
"score for each (span, antecedent) pair we considered. \"\"\" antecedent_log_mask",
"in the document. mention_feedforward : ``FeedForward`` This feedforward network is",
"span_embedding_size = self._attentive_span_extractor.get_output_dim() if type_refine: self._type_refine_gate = torch.nn.Sequential( TimeDistributed(torch.nn.Linear(span_embedding_size *",
"0 else: # for pairwise_labels without type_antecedent_labels pairwise_labels_indicator = (pairwise_labels.sum(-1,",
"link. loss : ``torch.FloatTensor``, optional A scalar loss to be",
"here # we need to select spans for each element",
"Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape)",
"mentions. Comes from a ``ListField[SpanField]`` of indices into the text",
"shape (batch_size, num_spans_to_keep, max_antecedents, embedding_size) \"\"\" # Shape: (batch_size, num_spans_to_keep,",
"self._context_layer = context_layer self._antecedent_feedforward = TimeDistributed(antecedent_feedforward) self._event_scorer = torch.nn.Sequential( TimeDistributed(mention_feedforward),",
"+ event_type_size, embedding_size) \"\"\" event_embeddings = event_embeddings.unsqueeze(1).expand((antecedent_embeddings.size(0), antecedent_embeddings.size(1), event_embeddings.size(1), antecedent_embeddings.size(3),))",
"of the specific span. metadata : ``List[Dict[str, Any]]``, optional (default",
"= None). A metadata dictionary for each instance in the",
"'dot', decoding: str = 'type-guided', type_threshold: float = -1., type_refine:",
"optional (default = None). A metadata dictionary for each instance",
"* target_embeddings, antecedent_distance_embeddings], -1) return span_pair_embeddings def _compute_antecedent_gold_labels(self, top_span_labels: torch.IntTensor,",
"Has shape (batch_size, num_spans_to_keep, max_antecedents + 1). \"\"\" # Shape:",
"are allowed antecedents. # Once we have this matrix, we",
"other in a chain. # Shape: (batch_size, num_spans_to_keep) _, predicted_antecedents",
"which spans to consider as mentions. # Shape: (batch_size, num_spans_to_keep,",
"respect to the top k spans. Has shape ``(num_spans_to_keep, max_antecedents)``.",
"The result of calling :func:`forward` on an instance or batch",
"bce_pos_weight self._max_span_width = max_span_width self._spans_per_word = spans_per_word self._max_antecedents = max_antecedents",
"top_mask[:, :, self._positive_label_size + 2:] = 0 coreference_log_probs = util.masked_log_softmax(coreference_scores,",
"considered. \"\"\" antecedent_log_mask = torch.cat([antecedent_log_mask.new_zeros((antecedent_log_mask.size(0), antecedent_log_mask.size(1), self._positive_label_size)), antecedent_log_mask], -1) #",
"# Shape: (batch_size * num_spans_to_keep) # torch.index_select only accepts 1D",
"None) -> Dict[str, torch.Tensor]: # pylint: disable=arguments-differ \"\"\" Parameters ----------",
"\"c_r\": coref_recall, \"c_f1\": coref_f1, \"m_p\": mention_result['precision'], \"m_r\": mention_result['recall'], \"m_f1\": mention_result['f1-score'],",
"of a list of (start, end) inclusive spans into the",
"scores for every antecedent. Has shape (batch_size, num_spans_to_keep, max_antecedents). antecedent_log_mask:",
"of shape (batch_size, num_spans, 2), representing the inclusive start and",
"Lee et al., 2017. The basic outline of this model",
"``int`` The maximum width of candidate spans. spans_per_word: float, required.",
"# Shape: (batch_size, num_spans_to_keep, 1) # dummy_labels = (1 -",
"distance between the spans). Has shape ``(1, max_antecedents)``. valid_antecedent_log_mask :",
"def get_metrics(self, reset: bool = False) -> Dict[str, float]: mention_result",
"FeedForward, antecedent_feedforward: FeedForward, feature_size: int, context_layer: Seq2SeqEncoder = None, max_span_width:",
": ``torch.FloatTensor``, optional A scalar loss to be optimised. \"\"\"",
"consider this many antecedents. lexical_dropout: ``int`` The probability of dropping",
"coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset) return {\"c_p\": coref_precision, \"c_r\": coref_recall, \"c_f1\":",
"is always zero. For the true antecedent spans, the score",
"torch.IntTensor): \"\"\" Generates a binary indicator for every pair of",
"top_span_mention_scores = top_span_mention_scores.detach() # Now that we have our variables",
"The maximum width of candidate spans. spans_per_word: float, required. A",
"Has shape (batch_size, num_spans_to_keep, max_antecedents). Returns ------- pairwise_labels_with_dummy_label : ``torch.FloatTensor``",
"dummy_scores = antecedent_scores.new_zeros(*shape) # Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)",
"each other in a chain. # Shape: (batch_size, num_spans_to_keep) _,",
"= 1 else: raise NotImplementedError('Attention Type: %s' % self._attention_type) self._attention_layer",
"# This reformats the indices to take into account their",
"1) coreference_scores = torch.cat([dummy_scores, antecedent_scores], -1) return coreference_scores def _generate_valid_antecedents(num_spans_to_keep:",
"normalisation of the distribution. # Shape: (1, num_spans_to_keep, max_antecedents) valid_antecedent_log_mask",
"embedding_size) antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0) expanded_distance_embeddings_shape = (antecedent_embeddings.size(0), antecedent_embeddings.size(1), antecedent_embeddings.size(2), antecedent_distance_embeddings.size(-1))",
"of num_spans_to_keep, we need to # compare span pairs to",
"The factoring allows the model to blame many of the",
"need to select spans for each element in the batch.",
"- j if j <= i, or zero otherwise. Parameters",
"both the original span representations, the element-wise similarity of the",
"``torch.FloatTensor``, required. Embedding representations of the event types. Has shape",
"consider up to max_antecedents # prior spans. So the first",
"Shape: (batch_size, num_spans) span_mask = (spans[:, :, 0] >= 0).squeeze(-1).float()",
"\"c_l\": self._coref_loss_metric.get_metric(reset), \"a_f1\": (mention_result['f1-score'] + coref_f1) / 2.} @staticmethod def",
"= pretrain_coref self._mention_pruner = Pruner(self._event_scorer) self._antecedent_scorer = TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1)) self._local_window_size",
"self._antecedent_feedforward = TimeDistributed(antecedent_feedforward) self._event_scorer = torch.nn.Sequential( TimeDistributed(mention_feedforward), TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(), 1)) )",
"= max_span_width self._spans_per_word = spans_per_word self._max_antecedents = max_antecedents self._mention_f1_score =",
"event_embeddings: ``torch.FloatTensor``, required. Embedding representations of the event types. Has",
"embed the ``text`` ``TextField`` we get as input to the",
"num_spans_to_keep, 1) shape = [antecedent_scores.size(0), antecedent_scores.size(1), 1] dummy_scores = antecedent_scores.new_zeros(*shape)",
"In our matrix of indices, the upper triangular part will",
"vocab : ``Vocabulary`` text_field_embedder : ``TextFieldEmbedder`` Used to embed the",
"+ bce_loss decoded_result = self.decode(output_dict) pred_label_spans_list = decoded_result['pred_label_spans'] gold_label_spans_list =",
"(batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape) #",
"self._combine_event_embeddings_and_cluster_antecedent_embeddings( event_embeddings, candidate_antecedent_embeddings) # Compute antecedent scores. # Shape: (batch_size,",
"coref_precision, \"c_r\": coref_recall, \"c_f1\": coref_f1, \"m_p\": mention_result['precision'], \"m_r\": mention_result['recall'], \"m_f1\":",
"def _compute_span_pair_embeddings(self, top_span_embeddings: torch.FloatTensor, antecedent_embeddings: torch.FloatTensor, antecedent_offsets: torch.FloatTensor): \"\"\" Computes",
"None # NIL for Unified Event self._event_embedding = Embedding(num_embeddings=vocab.get_vocab_size('labels'), embedding_dim=span_embedding_size)",
"between valid span pairs. # Shapes: # (num_spans_to_keep, max_antecedents), #",
"regularizer : ``RegularizerApplicator``, optional (default=``None``) If provided, will be used",
"new_attention_mask = text_mask[:, :, None] * attention_mask new_attention_mask = torch.triu(torch.tril(new_attention_mask,",
"int = 50, lexical_dropout: float = 0.2, pretrain_ed: bool =",
"to consider. max_antecedents = min(self._max_antecedents, num_spans_to_keep_according_doc_len) # top_span_embeddings = top_span_embeddings.detach()",
"using the negative marginal log-likelihood. # This is equal to",
"SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) span_embedding_size = self._endpoint_span_extractor.get_output_dim() + self._attentive_span_extractor.get_output_dim() if self._local_window_size <= 0:",
"- it's just a function of the span's position in",
"consider. Has shape (batch_size, num_spans_to_keep, max_antecedents, embedding_size) \"\"\" # Shape:",
"from allennlp.modules.token_embedders import Embedding from allennlp.nn import util, InitializerApplicator, RegularizerApplicator",
"valid_antecedent_indices) # Shape: (batch_size, num_spans_to_keep, max_antecedents) candidate_antecedent_mention_scores = util.flattened_index_select(top_scores, valid_antecedent_indices).squeeze(-1)",
"0]).item()) self._type_label_metric(torch.sum(type_antecedent_labels[:, :, 1: self._positive_label_size + 1]).item()) # print(pairwise_labels) #",
"coreference_scores.max(2) # Subtract one here because index 0 is the",
"of spans that were kept while pruning. max_antecedents : ``int``,",
"the original text and the annotated gold coreference clusters for",
"torch.cat([antecedent_log_mask.new_zeros((antecedent_log_mask.size(0), antecedent_log_mask.size(1), self._positive_label_size)), antecedent_log_mask], -1) # Shape: (batch_size, num_spans_to_keep, max_antecedents)",
"Has shape (batch_size, num_spans_to_keep, max_antecedents, encoding_dim) top_span_mention_scores: ``torch.FloatTensor``, required. Mention",
"event_type_size + max_antecedents, embedding_size). antecedent_offsets : ``torch.IntTensor``, required. The offsets",
"with a dummy antecedent at the zeroth position, which represents",
"= self._local_attention( raw_contextualized_embeddings=raw_contextualized_embeddings, text_mask=text_mask ) else: new_contextualized_embeddings = raw_contextualized_embeddings #",
"valid_antecedent_offsets : ``torch.IntTensor`` The distance between the span and each",
"# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size) target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings) #",
"- pairwise_labels_indicator) self._coref_label_metric(torch.sum(pairwise_labels).item()) self._nil_label_metric(torch.sum(type_antecedent_labels[:, :, 0]).item()) self._type_label_metric(torch.sum(type_antecedent_labels[:, :, 1: self._positive_label_size",
"to predict, per span. # We're generating a logspace mask",
"into the text of the document. coref_labels : ``torch.IntTensor``, optional",
"mention spans we retain with respect to the number of",
"in terms of the number of considered spans (i.e not",
"representations of pairs of spans. Has shape (batch_size, num_spans_to_keep, max_antecedents,",
"= torch.cat([dummy_scores, antecedent_scores], -1) return coreference_scores def _generate_valid_antecedents(num_spans_to_keep: int, max_antecedents:",
"the index (with respect to antecedent_indices) of the most likely",
"pairs of span representation, along with any pairwise features, which",
"here because we will eventually create a # distribution over",
"k in the same # coreference cluster that would be",
"in metadata] self._mention_f1_score(pred_label_spans_list, gold_label_spans_list, ) self._conll_coref_scores(decoded_result['clusters'], metadata, pred_label_spans_list, gold_label_spans_list) self._type_loss_metric(bce_loss.item())",
"0.1, max_antecedents: int = 50, lexical_dropout: float = 0.2, pretrain_ed:",
"many of the absent links on bad spans, enabling the",
"to select from. Similarly, each element can only predict previous",
"event_embeddings) + event_prob[:, :, :1] * top_embeddings refine_gate = self._type_refine_gate(torch.cat([event_rep,",
"span, but here we are computing the possible `indices` of",
"the list of clusters, which are in turn comprised of",
"The id is arbitrary, as we just care about the",
"top_span_mention_scores.detach() # Now that we have our variables in terms",
"batch. Parameters ---------- output_dict : ``Dict[str, torch.Tensor]``, required. The result",
"each top span. Has shape (batch_size, num_spans_to_keep, event_type_size + max_antecedents,",
"inclusive start and end indices of candidate spans for mentions.",
"= self._attentive_span_extractor.get_output_dim() + self._endpoint_span_extractor.get_output_dim() else: span_embedding_size = self._attentive_span_extractor.get_output_dim() if type_refine:",
"the document. Parameters ---------- vocab : ``Vocabulary`` text_field_embedder : ``TextFieldEmbedder``",
"= Average() if self._bce_pos_weight: self._bce_loss = BCEWithLogitsLoss(reduction='none', pos_weight=torch.tensor(self._bce_pos_weight)) else: self._bce_loss",
"regularizer: Optional[RegularizerApplicator] = None) -> None: super(End2EndEventCoreferenceResolver, self).__init__(vocab, regularizer) logger.info(vocab)",
"int) -> Tuple[torch.IntTensor, torch.IntTensor, torch.FloatTensor]: \"\"\" This method generates possible",
"comparisons based on span widths when we attend over the",
"``torch.FloatTensor``, required. Embedding representations of the antecedent spans we are",
"= None, metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:",
"decoding: str = 'type-guided', type_threshold: float = -1., type_refine: bool",
"true antecedent spans, the score consists of the pairwise antecedent",
"loss using the negative marginal log-likelihood. # This is equal",
"each span, or -1 for those which do not appear",
"Embedding from allennlp.nn import util, InitializerApplicator, RegularizerApplicator from allennlp.training.metrics import",
"1D indices, but here # we need to select spans",
"spans, so this returns a matrix of shape (num_spans_to_keep, max_antecedents),",
"shape ``(batch_size, num_spans_to_keep, 2)`` representing the start and end word",
"# the same coreference cluster. if self._pretrain_ed: # All antecedent",
"= self._attention_layer(raw_contextualized_embeddings, new_attention_mask) return new_contextualized_embeddings @overrides def forward(self, # type:",
"valid_antecedent_offsets # In our matrix of indices, the upper triangular",
"Mention scores for every span. Has shape (batch_size, num_spans_to_keep, max_antecedents).",
"= text_field_embedder self._context_layer = context_layer self._antecedent_feedforward = TimeDistributed(antecedent_feedforward) self._event_scorer =",
"indices. We want to mask these, # because these are",
"have prior spans as antecedents, and we only consider up",
"@overrides def forward(self, # type: ignore text: Dict[str, torch.LongTensor], spans:",
"event label of the specific span. realis_labels : ``torch.IntTensor``, optional",
"coref_loss_weight self._bce_loss_weight = bce_loss_weight self._bce_pos_weight = bce_pos_weight self._max_span_width = max_span_width",
"given span, the negative marginal log likelihood of all antecedents",
"indices of every antecedent to consider with respect to the",
"predicted_antecedents = coreference_scores.max(2) # Subtract one here because index 0",
"max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape) # Shape: (batch_size,",
"event_type_size, embedding_size) antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape) # Shape: (batch_size, num_spans_to_keep, max_antecedents",
"\"\"\" Computes scores for every pair of spans. Additionally, a",
"can only # have prior spans as antecedents, and we",
"to the span representations which is then scored by a",
"mention_result['precision'], \"m_r\": mention_result['recall'], \"m_f1\": mention_result['f1-score'], \"nil\": self._nil_label_metric.get_metric(reset), \"type\": self._type_label_metric.get_metric(reset), \"coref\":",
"Shape: (batch_size, num_spans, 2 * encoding_dim + feature_size) endpoint_span_embeddings =",
"Shape: (batch_size * num_spans_to_keep) # torch.index_select only accepts 1D indices,",
"_cannot_ have any antecedents, because there are none to select",
"= torch.cat(span_embeddings_list, -1) # event_scores = self._event_classifier.forward(span_embeddings) # Shape: (batch_size,",
"in terms of num_spans_to_keep, we need to # compare span",
"# Shape: (batch_size, document_length, encoding_dim) raw_contextualized_embeddings = self._context_layer(text_embeddings, text_mask) if",
"pretrain_ed: bool = False, pretrain_coref: bool = False, coref_loss_weight: float",
"* 2, span_embedding_size)), torch.nn.Sigmoid() ) else: self._type_refine_gate = None #",
"Used to initialize the model parameters. regularizer : ``RegularizerApplicator``, optional",
"candidate_antecedent_embeddings = util.flattened_index_select(top_embeddings, valid_antecedent_indices) # Shape: (batch_size, num_spans_to_keep, max_antecedents) candidate_antecedent_mention_scores",
"span representations are scored and used to prune away spans",
"in the # same gold cluster as the span we",
"that the span is not coreferent with anything. For the",
"to take into account their # index into the batch.",
"1) shape = [antecedent_scores.size(0), antecedent_scores.size(1), 1] dummy_scores = antecedent_scores.new_zeros(*shape) #",
"example, the first span in the document should have no",
") event_embeddings = self._get_event_embedding(span_mask) top_mask = top_mask.unsqueeze(-1) # Shape: (batch_size",
"endpoint_span_embeddings = self._endpoint_span_extractor(text_embeddings, spans) span_embeddings_list += [endpoint_span_embeddings] span_embeddings = torch.cat(span_embeddings_list,",
"new_contextualized_embeddings = self._attention_layer(raw_contextualized_embeddings, new_attention_mask) return new_contextualized_embeddings @overrides def forward(self, #",
"int, max_antecedents: int, device: int) -> Tuple[torch.IntTensor, torch.IntTensor, torch.FloatTensor]: \"\"\"",
"max_antecedents) bucket_values = util.bucket_values(antecedent_offsets, num_total_buckets=self._num_distance_buckets) # (1, event_type) label_bucket_values =",
"embedding_size) antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape) # Shape: (batch_size, num_spans_to_keep, max_antecedents +",
"= None else: if self._attention_type == 'dot': similarity_function = DotProductSimilarity(scale_output=True)",
"= False) -> Dict[str, float]: mention_result = self._mention_f1_score.get_metric(reset) coref_precision, coref_recall,",
"are considering. Has shape (1, max_antecedents). Returns ------- span_pair_embeddings :",
"we just care about the clustering. Has shape (batch_size, num_spans_to_keep,",
"max_antecedents, util.get_device_of(text_mask)) if self._type_refine_gate is not None: top_embeddings = self._type_refine_embedding(top_embeddings,",
"on bad spans, enabling the pruning strategy used in the",
"(1, max_antecedents), # (1, num_spans_to_keep, max_antecedents) valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask =",
"_combine_event_embeddings_and_cluster_antecedent_embeddings(event_embeddings: torch.FloatTensor, antecedent_embeddings: torch.FloatTensor): \"\"\" event_embeddings: ``torch.FloatTensor``, required. Embedding representations",
"Neural Coreference Resolution\" <https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83> by Lee et al., 2017. The",
"num_head = 1 else: raise NotImplementedError('Attention Type: %s' % self._attention_type)",
"self._num_distance_buckets = 10 self._distance_embedding = Embedding(self._num_distance_buckets, feature_size) self._coref_loss_weight = coref_loss_weight",
"stage is >= the # total number of spans, because",
"from allennlp.modules.seq2seq_encoders import IntraSentenceAttentionEncoder from allennlp.modules.similarity_functions import DotProductSimilarity from allennlp.modules.span_extractors",
"list of clusters, which are in turn comprised of a",
"numbers of valid antecedents. For example, the first span in",
"indices for antecedent spans to consider. max_antecedents = min(self._max_antecedents, num_spans_to_keep_according_doc_len)",
"not None: output_dict[\"document\"] = [x[\"original_text\"] for x in metadata] output_dict[\"offset\"]",
"every pair of spans. Additionally, a dummy label is included,",
"required. The maximum number of antecedent spans to consider for",
"self._bce_loss_weight = bce_loss_weight self._bce_pos_weight = bce_pos_weight self._max_span_width = max_span_width self._spans_per_word",
"is the \"no antecedent\" class, # so this makes the",
"to antecedent_indices) of the most likely antecedent. -1 means there",
"num_spans_to_keep, max_antecedents) # print(top_span_labels) # print(antecedent_labels) target_labels = top_span_labels.expand_as(antecedent_labels) same_cluster_indicator",
"is valid. Required since different spans have different numbers of",
"reformats the indices to take into account their # index",
"no predicted link. loss : ``torch.FloatTensor``, optional A scalar loss",
"# Shape: (batch_size, num_spans, embedding_size) attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans) #",
"negative # because the offsets will be > the target",
"# Created by Roger on 2019-09-10 # Mostly by AllenNLP",
"self._compute_coreference_scores(span_pair_embeddings, top_scores, candidate_antecedent_mention_scores, valid_antecedent_log_mask) # We now have, for each",
"dummy_labels = (1 - pairwise_labels).prod(-1, keepdim=True) # Shape: (batch_size, num_spans_to_keep,",
"event_type_size, embedding_size) span_pair_embeddings = torch.cat([target_embeddings, antecedent_embeddings, antecedent_embeddings * target_embeddings, antecedent_distance_embeddings],",
"For example, the first span in the document should have",
"every pair of spans. This label is one if and",
"(batch, top_span_size, emb_size) bmm event_prob = torch.bmm(top_embeddings, torch.transpose(event_embeddings, 1, 2))",
"* self._bce_loss_weight # Now, compute the loss using the negative",
"text. initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``) Used to initialize the",
": ``torch.IntTensor`` A tensor of shape ``(batch_size, num_spans_to_keep, 2)`` representing",
"2)) shape = [event_prob.size(0), event_prob.size(1), 1] dummy_scores = event_prob.new_zeros(*shape) event_prob",
"self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) if self._local_window_size <= 0: self._attention_layer = None",
"Embedding representations of the antecedent spans we are considering for",
"= torch.nn.Sequential( TimeDistributed(torch.nn.Linear(span_embedding_size * 2, span_embedding_size)), torch.nn.Sigmoid() ) else: self._type_refine_gate",
"overrides import overrides from torch.nn import BCEWithLogitsLoss from src.metrics.event_coref_scores import",
"Average() self._type_label_metric = Average() self._nil_label_metric = Average() if self._bce_pos_weight: self._bce_loss",
"output_dict[\"loss\"] = coref_loss + bce_loss decoded_result = self.decode(output_dict) pred_label_spans_list =",
"of these spans. So, regardless of the batch, the 1st",
"top_indices) type_antecedent_labels = self._get_type_antecedent_labels(pruned_event_type_labels) # Find the gold labels for",
"key: clusters : ``List[List[List[Tuple[int, int]]]]`` A nested list, representing, for",
"(1 - refine_gate) * event_rep return top_embeddings def _local_attention(self, raw_contextualized_embeddings,",
"span, or -1 for those which do not appear in",
"+ max_antecedents, embedding_size) span_pair_embeddings = self._compute_span_pair_embeddings(top_embeddings, candidate_antecedent_embeddings, valid_antecedent_offsets) # (batch_size,",
"0 top_mask = top_mask.expand_as(coreference_scores).clone() top_mask[:, :, self._positive_label_size + 2:] =",
"+ max_antecedents) coreference_scores = self._compute_coreference_scores(span_pair_embeddings, top_scores, candidate_antecedent_mention_scores, valid_antecedent_log_mask) # We",
"(batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size) candidate_antecedent_embeddings = self._combine_event_embeddings_and_cluster_antecedent_embeddings( event_embeddings,",
"candidate mention spans we retain with respect to the number",
"= [event_prob.size(0), event_prob.size(1), 1] dummy_scores = event_prob.new_zeros(*shape) event_prob = torch.cat([dummy_scores,",
"representing the inclusive start and end indices of candidate spans",
"type_antecedent_labels = (top_event_type_labels == event_indices).float() return type_antecedent_labels def _type_refine_embedding(self, top_embeddings,",
"based on span widths when we attend over the #",
"equal to (i - 1) - j if j <=",
"num_spans_to_keep, 1 + event_type_size + max_antecedents) coreference_scores = self._compute_coreference_scores(span_pair_embeddings, top_scores,",
"top_event_type_labels: (batch, top_span_size, 1) :return: (batch, top_span_size, positive_label_size) \"\"\" event_indices",
"is greater than -1. predicted_antecedents -= 1 output_dict = {\"top_spans\":",
"span and its antecedent. The factoring allows the model to",
"shape (batch_size, num_spans_to_keep, max_antecedents). antecedent_log_mask: ``torch.FloatTensor``, required. The log of",
"def _generate_valid_antecedents(num_spans_to_keep: int, max_antecedents: int, device: int) -> Tuple[torch.IntTensor, torch.IntTensor,",
"tensor of shape ``(batch_size, num_spans_to_keep)`` representing, for each top span,",
"span_mask): \"\"\" :param span_mask: (batch, top_span_size, 1) :return: (batch, top_span_size,",
"candidate_antecedent_mention_scores, valid_antecedent_log_mask) # We now have, for each span which",
"training. \"\"\" def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, mention_feedforward: FeedForward,",
"batch of instances. Returns ------- The same output dictionary, but",
"representing the event label of the specific span. realis_labels :",
") # Shape: (1, 1, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings",
"as they are in # the same coreference cluster. if",
"num_spans_to_keep, max_antecedents + 1) gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels, type_antecedent_labels, antecedent_labels) bce_loss",
"shape (batch_size, num_spans_to_keep, embedding_size). antecedent_embeddings : ``torch.FloatTensor``, required. Embedding representations",
"coreference_scores = self._compute_coreference_scores(span_pair_embeddings, top_scores, candidate_antecedent_mention_scores, valid_antecedent_log_mask) # We now have,",
"antecedent_labels : ``torch.IntTensor``, required. The cluster id label for every",
"previous span, but here we are computing the possible `indices`",
"shape (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size). antecedent_offsets : ``torch.IntTensor``,",
"the prediction # is greater than -1. predicted_antecedents -= 1",
"elements of the mask to be -inf # in order",
"shape (batch_size, num_spans_to_keep, max_antecedents + 1). \"\"\" # Shape: (batch_size,",
"text_mask = util.get_text_field_mask(text).float() # Shape: (batch_size, num_spans) span_mask = (spans[:,",
"which other spans are allowed antecedents. # Once we have",
"self._type_loss_metric(bce_loss.item()) self._coref_loss_metric(negative_marginal_log_likelihood.item()) else: self._coref_loss_metric(0.) if metadata is not None: output_dict[\"document\"]",
"torch.IntTensor, antecedent_labels: torch.IntTensor): \"\"\" Generates a binary indicator for every",
"span and its antecedent spans in terms of spans we",
"will be negative # because the offsets will be >",
"max_antecedents # prior spans. So the first thing we do",
"self._attention_type = attention_type self._decoding = decoding self._type_threshold = type_threshold logger.info(vocab.get_token_from_index(0,",
"top_mask) correct_antecedent_log_probs = coreference_log_probs + gold_antecedent_labels.log() negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum() coref_loss",
"else: raw_contextualized_embeddings = text_embeddings if self._attention_layer is not None: new_contextualized_embeddings",
"we can use to make coreference decisions between valid span",
"shape (batch_size, num_spans_to_keep). antecedent_labels : ``torch.IntTensor``, required. The cluster id",
"cluster id label for every span. The id is arbitrary,",
"spans to consider for every span. device: ``int``, required. The",
"self._coref_loss_metric(negative_marginal_log_likelihood.item()) else: self._coref_loss_metric(0.) if metadata is not None: output_dict[\"document\"] =",
"those which do not appear in any clusters. event_type_labels :",
"initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``) Used to initialize the model",
"1 event_indices = torch.stack([torch.zeros_like(event_indices), event_indices]).transpose(0, 1) event_indices = event_indices.expand([event_indices.size(0), event_indices.size(1)])",
"Optional[RegularizerApplicator] = None) -> None: super(End2EndEventCoreferenceResolver, self).__init__(vocab, regularizer) logger.info(vocab) self._text_field_embedder",
"with respect to the number of words in the document.",
"if self._pretrain_ed: # All antecedent mask is 0 top_mask =",
"---------- output_dict : ``Dict[str, torch.Tensor]``, required. The result of calling",
"# Shape: (1, 1, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings =",
"for every pair of spans. This label is one if",
"end indices of candidate spans for mentions. Comes from a",
"a dummy antecedent at the zeroth position, which represents the",
"(1, num_spans_to_keep, max_antecedents) valid_antecedent_log_mask = (raw_antecedent_indices >= 0).float().unsqueeze(0).log() # Shape:",
"# Shape: (batch_size, num_spans_to_keep, max_antecedents + 1) coreference_scores = torch.cat([dummy_scores,",
"antecedents per span which survived the pruning stage. This procedure",
"self._pretrain_ed: # All antecedent mask is 0 top_mask = top_mask.expand_as(coreference_scores).clone()",
"used in the forward pass. Parameters ---------- pairwise_embeddings: ``torch.FloatTensor``, required.",
"self._event_scorer = torch.nn.Sequential( TimeDistributed(mention_feedforward), TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(), 1)) ) self._pretrain_ed = pretrain_ed",
"is not None: top_embeddings = self._type_refine_embedding(top_embeddings, event_embeddings) # Select tensors",
"spans, because in this case, it is possible we might",
"a span's # index to the indices of its allowed",
"_compute_antecedent_gold_labels(self, top_span_labels: torch.IntTensor, type_antecedent_labels: torch.IntTensor, antecedent_labels: torch.IntTensor): \"\"\" Generates a",
"flat_top_span_indices) # Compute indices for antecedent spans to consider. max_antecedents",
"required. The log of the mask for valid antecedents. Returns",
"(span, antecedent) pair we considered. \"\"\" antecedent_log_mask = torch.cat([antecedent_log_mask.new_zeros((antecedent_log_mask.size(0), antecedent_log_mask.size(1),",
"tensor representing whether a given pair of spans belong to",
"because the offsets will be > the target indices. We",
"of shape ``(num_spans_to_keep, max_antecedents)`` representing for each top span the",
"spans as antecedents, and we only consider up to max_antecedents",
"# single antecedent j, but there might be several prior",
"predicted link. loss : ``torch.FloatTensor``, optional A scalar loss to",
"self._type_refine_gate(torch.cat([event_rep, top_embeddings], -1)) top_embeddings = refine_gate * top_embeddings + (1",
"embedding_size). antecedent_embeddings : ``torch.FloatTensor``, required. Embedding representations of the antecedent",
"indices into clusters of spans for each element in the",
"coref_loss = negative_marginal_log_likelihood * self._coref_loss_weight output_dict[\"loss\"] = coref_loss + bce_loss",
"self._coref_loss_weight output_dict[\"loss\"] = coref_loss + bce_loss decoded_result = self.decode(output_dict) pred_label_spans_list",
"num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size) self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) span_embedding_size = self._endpoint_span_extractor.get_output_dim() + self._attentive_span_extractor.get_output_dim()",
"torch.IntTensor = None, realis_labels: torch.IntTensor = None, metadata: List[Dict[str, Any]]",
"it is possible we might # consider a masked span.",
"as the span we are currently considering. Each span i",
"representations that we generate from these indices, we # need",
"Compute final predictions for which spans to consider as mentions.",
"antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape) # Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size) span_pair_embeddings",
"widths when we attend over the # span representations that",
"of candidate mention spans we retain with respect to the",
"candidate_antecedent_embeddings) # Compute antecedent scores. # Shape: (batch_size, num_spans_to_keep, event_type_size",
"EndpointSpanExtractor(endpoint_span_extractor_dim, combination=\"x,y\", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size) self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) span_embedding_size = self._endpoint_span_extractor.get_output_dim()",
"= [x.get(\"doc_id\", None) for x in metadata] return output_dict @overrides",
":, 1: self._positive_label_size + 1]).item()) # print(pairwise_labels) # # #",
"= self._bce_loss.forward(self._event_scorer.forward(span_embeddings).squeeze(-1), (event_type_labels > 0).float()) * span_mask bce_loss = bce_loss.sum()",
"mention scores for the span and its antecedent. The factoring",
"the batch. Parameters ---------- output_dict : ``Dict[str, torch.Tensor]``, required. The",
"stage. antecedent_indices : ``torch.IntTensor`` A tensor of shape ``(num_spans_to_keep, max_antecedents)``",
"self._compute_span_pair_embeddings(top_embeddings, candidate_antecedent_embeddings, valid_antecedent_offsets) # (batch_size, event_type_size, 1) event_type_prior_scores = self._event_scorer(event_embeddings)",
"now have, for each span which survived the pruning stage,",
"max_antecedents) # print(top_span_labels) # print(antecedent_labels) target_labels = top_span_labels.expand_as(antecedent_labels) same_cluster_indicator =",
"each instance in the batch. We use the \"original_text\" and",
"spans: torch.IntTensor, coref_labels: torch.IntTensor = None, event_type_labels: torch.IntTensor = None,",
"int, context_layer: Seq2SeqEncoder = None, max_span_width: int = 1, spans_per_word:",
"spans. Has shape (batch_size, num_spans_to_keep, max_antecedents, encoding_dim) top_span_mention_scores: ``torch.FloatTensor``, required.",
"mask for valid antecedents. Returns ------- coreference_scores: ``torch.FloatTensor`` A tensor",
"------- valid_antecedent_indices : ``torch.IntTensor`` The indices of every antecedent to",
"which is then scored by a linear layer. antecedent_feedforward: ``FeedForward``",
"antecedent_distance_embeddings.size(-1)) # Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings",
"Returns ------- coreference_scores: ``torch.FloatTensor`` A tensor of shape (batch_size, num_spans_to_keep,",
"``torch.FloatTensor``, required. Mention scores for every antecedent. Has shape (batch_size,",
"antecedent_embeddings: torch.FloatTensor): \"\"\" event_embeddings: ``torch.FloatTensor``, required. Embedding representations of the",
"raw_contextualized_embeddings = self._context_layer(text_embeddings, text_mask) if self._attention_layer is not None: new_contextualized_embeddings",
"Parameters ---------- top_span_labels : ``torch.IntTensor``, required. The cluster id label",
"(default = None). A metadata dictionary for each instance in",
"don't want to predict, per span. # We're generating a",
"self._type_label_metric(torch.sum(type_antecedent_labels[:, :, 1: self._positive_label_size + 1]).item()) # print(pairwise_labels) # #",
"\\ _generate_valid_antecedents(num_spans_to_keep_according_doc_len, max_antecedents, util.get_device_of(text_mask)) if self._type_refine_gate is not None: top_embeddings",
"Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size) span_pair_embeddings = self._compute_span_pair_embeddings(top_embeddings,",
"None self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) if self._local_window_size <= 0: self._attention_layer =",
"self._endpoint_span_extractor.get_output_dim() else: span_embedding_size = self._attentive_span_extractor.get_output_dim() if type_refine: self._type_refine_gate = torch.nn.Sequential(",
"would be valid antecedents. Our loss is the sum of",
"mention_result = self._mention_f1_score.get_metric(reset) coref_precision, coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset) return {\"c_p\":",
"forward pass. Parameters ---------- pairwise_embeddings: ``torch.FloatTensor``, required. Embedding representations of",
"span_pair_embeddings = torch.cat([target_embeddings, antecedent_embeddings, antecedent_embeddings * target_embeddings, antecedent_distance_embeddings], -1) return",
"event_embeddings.size(1) * event_embeddings.size(2)) event_embeddings = self._event_embedding_map.forward(event_embeddings) event_embeddings = event_embeddings.unsqueeze(0).expand(span_mask.size(0), event_embeddings.size(0),",
"antecedent spans to consider for every span. device: ``int``, required.",
"if max_span_width > 1: endpoint_span_extractor_dim = text_field_embedder.get_output_dim() self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim,",
"antecedent indices into clusters of spans for each element in",
"self._antecedent_scorer( self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1) antecedent_scores += top_span_mention_scores + antecedent_mention_scores antecedent_scores += antecedent_log_mask",
"max_antecedents)``. \"\"\" # Shape: (num_spans_to_keep, 1) target_indices = util.get_range_vector(num_spans_to_keep, device).unsqueeze(1)",
"top_span_labels.expand_as(antecedent_labels) same_cluster_indicator = (target_labels == antecedent_labels).float() non_dummy_indicator = (target_labels >=",
"RegularizerApplicator from allennlp.training.metrics import Average from overrides import overrides from",
"predictions for which spans to consider as mentions. # Shape:",
"the \"no antecedent\" class, # so this makes the indices",
"# some comparisons based on span widths when we attend",
"of shape (batch_size, num_spans), representing the cluster ids of each",
"= refine_gate * top_embeddings + (1 - refine_gate) * event_rep",
"lexical_dropout > 0: self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout) else: self._lexical_dropout = lambda",
"spans if the prediction # is greater than -1. predicted_antecedents",
"of the specific span. realis_labels : ``torch.IntTensor``, optional (default =",
"most likely antecedent. -1 means there was no predicted link.",
"None, event_type_labels: torch.IntTensor = None, realis_labels: torch.IntTensor = None, metadata:",
"antecedents. Note that this is independent # of the batch",
"embedding_size) antecedent_distance_embeddings = self._distance_embedding( torch.cat([bucket_values, label_bucket_values], 1) ) # Shape:",
": ``torch.FloatTensor``, required. Embedding representations of the antecedent spans we",
"self._type_refine_gate = None # NIL for Unified Event self._event_embedding =",
"(pairwise_labels.sum(-1, keepdim=True) > 0).float() type_antecedent_labels = type_antecedent_labels * (1 -",
"unary mention scores for the span and its antecedent. The",
"here because index 0 is the \"no antecedent\" class, #",
"event_type_size + max_antecedents + 1) pairwise_labels_with_dummy_label = torch.cat([type_antecedent_labels, pairwise_labels], -1)",
"of the # probability assigned to all valid antecedents. This",
"the event types. Has shape (batch_size, event_type_size, embedding_size). antecedent_embeddings :",
"0).float() type_antecedent_labels = type_antecedent_labels * (1 - pairwise_labels_indicator) self._coref_label_metric(torch.sum(pairwise_labels).item()) self._nil_label_metric(torch.sum(type_antecedent_labels[:,",
":func:`forward` on an instance or batch of instances. Returns -------",
"= text_field_embedder.get_output_dim() if max_span_width > 1: endpoint_span_extractor_dim = text_field_embedder.get_output_dim() self._endpoint_span_extractor",
"top_indices, flat_top_span_indices) # Compute indices for antecedent spans to consider.",
"dummy_scores = event_prob.new_zeros(*shape) event_prob = torch.cat([dummy_scores, event_prob], -1) event_prob =",
"into clusters of spans for each element in the batch.",
"Any, Dict, List, Optional, Tuple import torch import torch.nn.functional as",
"for each word in the document. mention_feedforward : ``FeedForward`` This",
"event_type_labels is not None: pruned_event_type_labels = torch.gather(event_type_labels, 1, top_indices) type_antecedent_labels",
"NIL for Unified Event self._event_embedding = Embedding(num_embeddings=vocab.get_vocab_size('labels'), embedding_dim=span_embedding_size) self._event_embedding_map =",
"of spans belong to the same cluster in the gold",
"num_spans, 2), representing the inclusive start and end indices of",
"Shape: (batch_size, document_length) text_mask = util.get_text_field_mask(text).float() # Shape: (batch_size, num_spans)",
"spans that are unlikely to occur in a coreference cluster.",
"= event_embeddings.unsqueeze(1).expand((antecedent_embeddings.size(0), antecedent_embeddings.size(1), event_embeddings.size(1), antecedent_embeddings.size(3),)) return torch.cat([event_embeddings, antecedent_embeddings], 2) def",
"us variables with shapes # like (batch_size, num_spans_to_keep, max_antecedents, embedding_size),",
"as we just care about the clustering. Has shape (batch_size,",
"-> Tuple[torch.IntTensor, torch.IntTensor, torch.FloatTensor]: \"\"\" This method generates possible antecedents",
"= text_embeddings.size(1) num_spans = spans.size(1) # Shape: (batch_size, document_length) text_mask",
"For the remaining spans, the model decides which antecedent span",
"of the spans in the document. Parameters ---------- vocab :",
"there are none to select from. Similarly, each element can",
"the decision that the span is not coreferent with anything.",
"shape (batch_size, num_spans_to_keep, max_antecedents). Returns ------- pairwise_labels_with_dummy_label : ``torch.FloatTensor`` A",
"self._attentive_span_extractor.get_output_dim() if type_refine: self._type_refine_gate = torch.nn.Sequential( TimeDistributed(torch.nn.Linear(span_embedding_size * 2, span_embedding_size)),",
"from torch.nn import BCEWithLogitsLoss from src.metrics.event_coref_scores import EventCorefScores from src.metrics.mention_f1",
"the original span representations, the element-wise similarity of the span",
"self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout) else: self._lexical_dropout = lambda x: x initializer(self)",
"efficient. flat_top_span_indices = util.flatten_and_batch_shift_indices(top_indices, num_spans) # Compute final predictions for",
"event_indices.size(0)]) type_antecedent_labels = (top_event_type_labels == event_indices).float() return type_antecedent_labels def _type_refine_embedding(self,",
"1: endpoint_span_extractor_dim = text_field_embedder.get_output_dim() self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim, combination=\"x,y\", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size)",
"event_type_size + max_antecedents, embedding_size) candidate_antecedent_embeddings = self._combine_event_embeddings_and_cluster_antecedent_embeddings( event_embeddings, candidate_antecedent_embeddings) #",
"= (pairwise_labels.sum(-1, keepdim=True) > 0).float() type_antecedent_labels = type_antecedent_labels * (1",
"the span and each of its antecedents in terms of",
"not mess up the normalisation of the distribution. # Shape:",
"logspace mask here because we will eventually create a #",
"the span is not coreferent with anything. For the dummy",
"any previous span, but here we are computing the possible",
"# a predicted antecedent. This implies a clustering if we",
"= 1, spans_per_word: float = 0.1, max_antecedents: int = 50,",
"We're generating a logspace mask here because we will eventually",
"The CUDA device to use. Returns ------- valid_antecedent_indices : ``torch.IntTensor``",
"edge cases where # the number of spans we consider",
"antecedent spans, the score consists of the pairwise antecedent score",
"num_spans_to_keep). antecedent_labels : ``torch.IntTensor``, required. The cluster id label for",
"the gold labels for the spans which we kept. pruned_gold_labels",
"to consider for every span. device: ``int``, required. The CUDA",
"None and event_type_labels is not None: pruned_event_type_labels = torch.gather(event_type_labels, 1,",
"for each (span, antecedent) pair we considered. \"\"\" antecedent_log_mask =",
"scored by a linear layer. antecedent_feedforward: ``FeedForward`` This feedforward network",
"import Embedding from allennlp.nn import util, InitializerApplicator, RegularizerApplicator from allennlp.training.metrics",
"mention scores. num_spans_to_keep_according_doc_len = int(math.floor(self._spans_per_word * document_length)) (top_embeddings, top_mask, top_indices,",
"num_spans_to_keep, event_type_size + max_antecedents, embedding_size). antecedent_offsets : ``torch.IntTensor``, required. The",
"Has shape (batch_size, num_spans_to_keep, embedding_size). antecedent_embeddings : ``torch.FloatTensor``, required. Embedding",
"of the span's position in # top_spans. The spans are",
"the batch`. The reason this is the case is that",
"int]]]]`` A nested list, representing, for each instance in the",
"event_type_size, embedding_size) antecedent_distance_embeddings = self._distance_embedding( torch.cat([bucket_values, label_bucket_values], 1) ) #",
"belong to the same cluster. The labels are augmented with",
"our variables again to get embeddings # for all valid",
"the score consists of the pairwise antecedent score and the",
"embedding_size) endpoint_span_embeddings = self._endpoint_span_extractor(text_embeddings, spans) span_embeddings_list += [endpoint_span_embeddings] span_embeddings =",
"contextual information for each word in the document. mention_feedforward :",
"from allennlp.models.model import Model from allennlp.modules import FeedForward, Pruner from",
"of indices into the text of the document. coref_labels :",
"Shape: (batch_size, num_spans, num_event_realis_label) # Shape: (batch_size, num_spans, num_event_realis_label) #",
"between zero and one which controls what percentage of candidate",
"= torch.bmm(top_embeddings, torch.transpose(event_embeddings, 1, 2)) shape = [event_prob.size(0), event_prob.size(1), 1]",
"Dict[str, torch.Tensor]: # pylint: disable=arguments-differ \"\"\" Parameters ---------- text :",
"# Shape: (batch_size, num_spans, 2 * encoding_dim + feature_size) endpoint_span_embeddings",
"= self._antecedent_scorer( self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1) antecedent_scores += top_span_mention_scores + antecedent_mention_scores antecedent_scores +=",
"self._local_attention( raw_contextualized_embeddings=raw_contextualized_embeddings, text_mask=text_mask ) else: new_contextualized_embeddings = raw_contextualized_embeddings # Shape:",
"else: new_contextualized_embeddings = raw_contextualized_embeddings # Shape: (batch_size, num_spans, 2 *",
"# Shape: (num_spans_to_keep, 1) target_indices = util.get_range_vector(num_spans_to_keep, device).unsqueeze(1) # Shape:",
"+ feature_size) # span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1) span_embeddings =",
"in metadata] output_dict['doc_id'] = [x.get(\"doc_id\", None) for x in metadata]",
"Shape: (batch_size, num_spans, 2) spans = F.relu(spans.float()).long() if self._context_layer: #",
"of spans for the pairwise scoring function to consider. This",
"to know which other spans are allowed antecedents. # Once",
"we # need them to be <= 0. This is",
"span is valid. Required since different spans have different numbers",
"of instances. Returns ------- The same output dictionary, but with",
"reset: bool = False) -> Dict[str, float]: mention_result = self._mention_f1_score.get_metric(reset)",
"are exactly the indices which we don't want to predict,",
"would be consistent with the data, in the sense that",
"# Shape: (batch_size, num_spans_to_keep, max_antecedents) # print(top_span_labels) # print(antecedent_labels) target_labels",
"of dropping out dimensions of the embedded text. initializer :",
"-1)) top_embeddings = refine_gate * top_embeddings + (1 - refine_gate)",
"+ 1 event_indices = torch.stack([torch.zeros_like(event_indices), event_indices]).transpose(0, 1) event_indices = event_indices.expand([event_indices.size(0),",
"word in the document. mention_feedforward : ``FeedForward`` This feedforward network",
"we don't mind which antecedent is predicted, so long as",
"util.get_range_vector(num_spans_to_keep, device).unsqueeze(1) # Shape: (1, max_antecedents) valid_antecedent_offsets = (util.get_range_vector(max_antecedents, device)",
"import EventCorefScores from src.metrics.mention_f1 import TopSpanMentionTypeF1 from src.utils.cluster_decoding_utils import node_decode",
"x initializer(self) def _get_event_embedding(self, span_mask): \"\"\" :param span_mask: (batch, top_span_size,",
"def _compute_antecedent_gold_labels(self, top_span_labels: torch.IntTensor, type_antecedent_labels: torch.IntTensor, antecedent_labels: torch.IntTensor): \"\"\" Generates",
"in a chain. # Shape: (batch_size, num_spans_to_keep) _, predicted_antecedents =",
"---------- top_span_labels : ``torch.IntTensor``, required. The cluster id label for",
"= spans.size(1) # Shape: (batch_size, document_length) text_mask = util.get_text_field_mask(text).float() #",
"= \\ _generate_valid_antecedents(num_spans_to_keep_according_doc_len, max_antecedents, util.get_device_of(text_mask)) if self._type_refine_gate is not None:",
"this model is to get an embedded representation of each",
"self._attentive_span_extractor(text_embeddings, spans) # Shape: (batch_size, num_spans, embedding_size + 2 *",
"the regularization penalty during training. \"\"\" def __init__(self, vocab: Vocabulary,",
"self._event_embedding(event_indices) event_embeddings = event_embeddings.reshape(event_embeddings.size(0), event_embeddings.size(1) * event_embeddings.size(2)) event_embeddings = self._event_embedding_map.forward(event_embeddings)",
"event_indices).float() return type_antecedent_labels def _type_refine_embedding(self, top_embeddings, event_embeddings): # (batch, top_span_size,",
"tensor of shape (batch_size, num_spans, 2), representing the inclusive start",
"torch.index_select only accepts 1D indices, but here # we need",
"tensor of shape (batch_size, num_spans), representing the realis label of",
"we only consider up to max_antecedents # prior spans. So",
"top_event_type_labels = top_event_type_labels.unsqueeze(-1).expand([top_event_type_labels.size(0), top_event_type_labels.size(1), event_indices.size(0)]) type_antecedent_labels = (top_event_type_labels == event_indices).float()",
"combination='2', num_attention_heads=num_head ) else: attentive_span_extractor_dim = text_field_embedder.get_output_dim() if max_span_width >",
"a linear layer. antecedent_feedforward: ``FeedForward`` This feedforward network is applied",
"are coreferent with. The resulting coreference links, after applying transitivity,",
"all antecedent predictions # that would be consistent with the",
"spans to consider. max_antecedents = min(self._max_antecedents, num_spans_to_keep_according_doc_len) # top_span_embeddings =",
"= Average() self._nil_label_metric = Average() if self._bce_pos_weight: self._bce_loss = BCEWithLogitsLoss(reduction='none',",
"This gives us variables with shapes # like (batch_size, num_spans_to_keep,",
"num_spans_to_keep, max_antecedents, embedding_size). return: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size)",
"``torch.FloatTensor``, required. Mention scores for every span. Has shape (batch_size,",
"probabilities of all antecedent predictions # that would be consistent",
"minimising, for a # given span, the negative marginal log",
"max_antecedents, embedding_size) candidate_antecedent_embeddings = self._combine_event_embeddings_and_cluster_antecedent_embeddings( event_embeddings, candidate_antecedent_embeddings) # Compute antecedent",
"antecedents the model considered. predicted_antecedents : ``torch.IntTensor`` A tensor of",
"label is one if and only if the pair of",
"size for all the embedded features, such as distances or",
"1), representing the unormalised score for each (span, antecedent) pair",
"original span representations, the element-wise similarity of the span representations,",
"%s' % self._attention_type) self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim, similarity_function=similarity_function, combination='2', num_attention_heads=num_head )",
"max_antecedents + 1). \"\"\" # Shape: (batch_size, num_spans_to_keep, max_antecedents) #",
"> 1: endpoint_span_extractor_dim = text_field_embedder.get_output_dim() self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim, combination=\"x,y\", num_width_embeddings=max_span_width,",
"= max_antecedents self._mention_f1_score = TopSpanMentionTypeF1() self._conll_coref_scores = EventCorefScores(mapping_type=type_match_in_eval) self._type_loss_metric =",
"1) gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels, type_antecedent_labels, antecedent_labels) bce_loss = self._bce_loss.forward(self._event_scorer.forward(span_embeddings).squeeze(-1), (event_type_labels",
"is then scored by a linear layer. feature_size: ``int`` The",
"# index to the indices of its allowed antecedents. Note",
"coding:utf-8 -*- # Created by Roger on 2019-09-10 # Mostly",
"representation of pairs of spans for the pairwise scoring function",
"representation, along with any pairwise features, which is then scored",
"of the probabilities of all antecedent predictions # that would",
"pair of spans belong to the same cluster in the",
"for every antecedent. Has shape (batch_size, num_spans_to_keep, max_antecedents). antecedent_log_mask: ``torch.FloatTensor``,",
"unormalised score for each (span, antecedent) pair we considered. \"\"\"",
"Has shape (batch_size, num_spans_to_keep, max_antecedents). antecedent_log_mask: ``torch.FloatTensor``, required. The log",
"we are minimising, for a # given span, the negative",
"1)) self._local_window_size = local_window_size self._attention_type = attention_type self._decoding = decoding",
"Mostly by AllenNLP import logging import math from typing import",
"InitializerApplicator, RegularizerApplicator from allennlp.training.metrics import Average from overrides import overrides",
"``int`` The probability of dropping out dimensions of the embedded",
"self._event_scorer(event_embeddings) # (batch_size, num_spans_to_keep, event_type_size) event_type_prior_scores = event_type_prior_scores.transpose(1, 2).expand( candidate_antecedent_mention_scores.size(0),",
"top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings) # Shape: (1, max_antecedents) bucket_values = util.bucket_values(antecedent_offsets, num_total_buckets=self._num_distance_buckets) #",
"2, self._event_embedding.get_output_dim()) self._positive_label_size = vocab.get_vocab_size('labels') - 1 # 10 possible",
"representations are scored and used to prune away spans that",
"up to max_antecedents # prior spans. So the first thing",
"Returns ------- pairwise_labels_with_dummy_label : ``torch.FloatTensor`` A binary tensor representing whether",
"TopSpanMentionTypeF1 from src.utils.cluster_decoding_utils import node_decode logger = logging.getLogger(__name__) # pylint:",
"in the document should have no valid antecedents. Has shape",
"``torch.IntTensor``, required. The offsets between each top span and its",
"spans_per_word: float, required. A multiplier between zero and one which",
"select spans for each element in the batch. # This",
"max_antecedents)``. valid_antecedent_log_mask : ``torch.FloatTensor`` The logged mask representing whether each",
"= SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) if self._local_window_size <= 0: self._attention_layer = None else:",
"# (batch, top_span_size, emb_size) bmm event_prob = torch.bmm(top_embeddings, torch.transpose(event_embeddings, 1,",
"gold clustering. Has shape (batch_size, num_spans_to_keep, max_antecedents + 1). \"\"\"",
"10 self._distance_embedding = Embedding(self._num_distance_buckets, feature_size) self._coref_loss_weight = coref_loss_weight self._bce_loss_weight =",
"instance or batch of instances. Returns ------- The same output",
"---------- shape (batch_size, event_type_size, embedding_size). top_span_embeddings : ``torch.FloatTensor``, required. Embedding",
":1] * top_embeddings refine_gate = self._type_refine_gate(torch.cat([event_rep, top_embeddings], -1)) top_embeddings =",
"num_spans_to_keep_according_doc_len) # top_span_embeddings = top_span_embeddings.detach() # top_span_mention_scores = top_span_mention_scores.detach() #",
"so long as they are in # the same coreference",
"span_mask: (batch, top_span_size, 1) :return: (batch, top_span_size, positive_label_size) \"\"\" event_indices",
"given pair of spans belong to the same cluster in",
"antecedent_mention_scores: torch.FloatTensor, antecedent_log_mask: torch.FloatTensor) -> torch.FloatTensor: \"\"\" Computes scores for",
"``torch.IntTensor`` A tensor of shape ``(num_spans_to_keep, max_antecedents)`` representing for each",
"possible antecedents the model considered. predicted_antecedents : ``torch.IntTensor`` A tensor",
"a chain. # Shape: (batch_size, num_spans_to_keep) _, predicted_antecedents = coreference_scores.max(2)",
"for all valid antecedents for each span. This gives us",
"antecedent score and the unary mention scores for the span",
"clustering. Has shape (batch_size, num_spans_to_keep, max_antecedents). Returns ------- pairwise_labels_with_dummy_label :",
"# for all valid antecedents for each span. This gives",
"instance. Returns ------- An output dictionary consisting of: top_spans :",
"(batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size) span_pair_embeddings = self._compute_span_pair_embeddings(top_embeddings, candidate_antecedent_embeddings,",
"raw_contextualized_embeddings # Shape: (batch_size, num_spans, 2 * encoding_dim + feature_size)",
"spans into the original document. \"\"\" return node_decode(output_dict, self.vocab, decoding_algorithm=self._decoding,",
"event_type) label_bucket_values = bucket_values.new_zeros((1, self._positive_label_size)) # Shape: (1, max_antecedents +",
"anything. For the dummy label, the score is always zero.",
"= None) -> Dict[str, torch.Tensor]: # pylint: disable=arguments-differ \"\"\" Parameters",
"0).float() pairwise_labels = same_cluster_indicator * non_dummy_indicator if self._pretrain_ed: pairwise_labels =",
"of the embedded text. initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``) Used",
"= min(self._max_antecedents, num_spans_to_keep_according_doc_len) # top_span_embeddings = top_span_embeddings.detach() # top_span_mention_scores =",
"# Shapes: # (num_spans_to_keep, max_antecedents), # (1, max_antecedents), # (1,",
"label_bucket_values = bucket_values.new_zeros((1, self._positive_label_size)) # Shape: (1, max_antecedents + event_type_size,",
"of the document. coref_labels : ``torch.IntTensor``, optional (default = None).",
"antecedents. lexical_dropout: ``int`` The probability of dropping out dimensions of",
"= (1 - pairwise_labels).prod(-1, keepdim=True) # Shape: (batch_size, num_spans_to_keep, event_type_size",
"self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim, combination=\"x,y\", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size) self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) span_embedding_size",
"which refer to each other in a chain. # Shape:",
"batch, the list of clusters, which are in turn comprised",
"one if and only if the pair of spans belong",
"max_antecedents)``. valid_antecedent_offsets : ``torch.IntTensor`` The distance between the span and",
"-1., type_refine: bool = True, type_match_in_eval: bool = True, initializer:",
"with shapes # like (batch_size, num_spans_to_keep, max_antecedents, embedding_size), which #",
"torch.triu(torch.tril(new_attention_mask, self._local_window_size), -self._local_window_size) new_contextualized_embeddings = self._attention_layer(raw_contextualized_embeddings, new_attention_mask) return new_contextualized_embeddings @overrides",
"span_embeddings_list += [attended_span_embeddings] if self._endpoint_span_extractor is not None: # Shape:",
"-1) # Shape: (batch_size, num_spans_to_keep, 1 + event_type_size + max_antecedents)",
"= torch.cat([target_embeddings, antecedent_embeddings, antecedent_embeddings * target_embeddings, antecedent_distance_embeddings], -1) return span_pair_embeddings",
"label_bucket_values], 1) ) # Shape: (1, 1, max_antecedents + event_type_size,",
"int = 10, attention_type: str = 'dot', decoding: str =",
"Has shape ``(num_spans_to_keep, max_antecedents)``. valid_antecedent_offsets : ``torch.IntTensor`` The distance between",
"A tensor of shape (batch_size, num_spans, 2), representing the inclusive",
"== event_indices).float() return type_antecedent_labels def _type_refine_embedding(self, top_embeddings, event_embeddings): # (batch,",
"select from. Similarly, each element can only predict previous spans,",
"``torch.IntTensor`` The distance between the span and each of its",
"# Now, compute the loss using the negative marginal log-likelihood.",
"Once we have this matrix, we reformat our variables again",
"of spans belong to the same cluster. The labels are",
"super(End2EndEventCoreferenceResolver, self).__init__(vocab, regularizer) logger.info(vocab) self._text_field_embedder = text_field_embedder self._context_layer = context_layer",
"'dot': similarity_function = DotProductSimilarity(scale_output=True) num_head = 1 else: raise NotImplementedError('Attention",
"is equal to (i - 1) - j if j",
"shape ``(num_spans_to_keep, max_antecedents)``. valid_antecedent_offsets : ``torch.IntTensor`` The distance between the",
"antecedent_mention_scores: ``torch.FloatTensor``, required. Mention scores for every antecedent. Has shape",
"of the event types. Has shape (batch_size, event_type_size, embedding_size). antecedent_embeddings",
"spans. So, regardless of the batch, the 1st span _cannot_",
"which are in the # same gold cluster as the",
"(1, 1, max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0) expanded_distance_embeddings_shape",
"# need them to be <= 0. This is only",
"+ 1) coreference_scores = torch.cat([dummy_scores, antecedent_scores], -1) return coreference_scores def",
"self._nil_label_metric(torch.sum(type_antecedent_labels[:, :, 0]).item()) self._type_label_metric(torch.sum(type_antecedent_labels[:, :, 1: self._positive_label_size + 1]).item()) #",
"self._mention_pruner = Pruner(self._event_scorer) self._antecedent_scorer = TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1)) self._local_window_size = local_window_size",
"SpanFields return -1 when they are used as padding. As",
"for the span and its antecedent. The factoring allows the",
"this matrix, we reformat our variables again to get embeddings",
"the relative # index of the spans to know which",
"different numbers of valid antecedents. For example, the first span",
"is a broadcasted subtraction. # Shape: (num_spans_to_keep, max_antecedents) raw_antecedent_indices =",
"to make # the multiple calls to util.batched_index_select below more",
"gold_label_spans_list, ) self._conll_coref_scores(decoded_result['clusters'], metadata, pred_label_spans_list, gold_label_spans_list) self._type_loss_metric(bce_loss.item()) self._coref_loss_metric(negative_marginal_log_likelihood.item()) else: self._coref_loss_metric(0.)",
"num_spans_to_keep, max_antecedents) antecedent_scores = self._antecedent_scorer( self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1) antecedent_scores += top_span_mention_scores +",
"node_decode logger = logging.getLogger(__name__) # pylint: disable=invalid-name @Model.register(\"end-to-end-event-coreference\") class End2EndEventCoreferenceResolver(Model):",
"network is applied to pairs of span representation, along with",
"can only predict previous spans, so this returns a matrix",
"width of candidate spans. spans_per_word: float, required. A multiplier between",
"event_embeddings = event_embeddings.unsqueeze(1).expand((antecedent_embeddings.size(0), antecedent_embeddings.size(1), event_embeddings.size(1), antecedent_embeddings.size(3),)) return torch.cat([event_embeddings, antecedent_embeddings], 2)",
"shape (batch_size, num_spans_to_keep, max_antecedents + 1), representing the unormalised score",
"Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents + 1) pairwise_labels_with_dummy_label =",
"The maximum number of antecedent spans to consider for every",
"representing the text of the document. spans : ``torch.IntTensor``, required.",
"antecedent spans we are considering for each top span. Has",
"# (1, max_antecedents), # (1, num_spans_to_keep, max_antecedents) valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask",
"model decides which antecedent span (if any) they are coreferent",
"torch.FloatTensor): \"\"\" Computes an embedding representation of pairs of spans",
"implements the coreference resolution model described \"End-to-end Neural Coreference Resolution\"",
"BCEWithLogitsLoss(reduction='none') if lexical_dropout > 0: self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout) else: self._lexical_dropout",
"torch.FloatTensor, antecedent_embeddings: torch.FloatTensor, antecedent_offsets: torch.FloatTensor): \"\"\" Computes an embedding representation",
"outline of this model is to get an embedded representation",
"valid antecedents. Has shape ``(1, num_spans_to_keep, max_antecedents)``. \"\"\" # Shape:",
"span we are currently considering. Each span i predicts a",
"the antecedent spans we are considering for each top span.",
"antecedent predictions # that would be consistent with the data,",
"function of the span's position in # top_spans. The spans",
"= InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super(End2EndEventCoreferenceResolver, self).__init__(vocab,",
"torch.bmm(top_embeddings, torch.transpose(event_embeddings, 1, 2)) shape = [event_prob.size(0), event_prob.size(1), 1] dummy_scores",
"mask here because we will eventually create a # distribution",
"coreference resolution model described \"End-to-end Neural Coreference Resolution\" <https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83> by",
"variables in terms of num_spans_to_keep, we need to # compare",
"max_antecedents), # (1, num_spans_to_keep, max_antecedents) valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask = \\",
"print(top_span_labels) # print(antecedent_labels) target_labels = top_span_labels.expand_as(antecedent_labels) same_cluster_indicator = (target_labels ==",
"shape (1, max_antecedents). Returns ------- span_pair_embeddings : ``torch.FloatTensor`` Embedding representation",
"= torch.nn.Sequential( TimeDistributed(mention_feedforward), TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(), 1)) ) self._pretrain_ed = pretrain_ed self._pretrain_coref",
"# Now that we have our variables in terms of",
"- torch.eye(text_mask.size(1), # device=util.get_device_of(contextualized_embeddings)) new_attention_mask = text_mask[:, :, None] *",
"[event_prob.size(0), event_prob.size(1), 1] dummy_scores = event_prob.new_zeros(*shape) event_prob = torch.cat([dummy_scores, event_prob],",
"max_antecedents, embedding_size) candidate_antecedent_embeddings = util.flattened_index_select(top_embeddings, valid_antecedent_indices) # Shape: (batch_size, num_spans_to_keep,",
"which # we can use to make coreference decisions between",
"with an additional ``clusters`` key: clusters : ``List[List[List[Tuple[int, int]]]]`` A",
"= bce_loss.sum() * self._bce_loss_weight # Now, compute the loss using",
"the pair of spans belong to the same cluster. The",
"top_embeddings = refine_gate * top_embeddings + (1 - refine_gate) *",
"# index into the batch. We precompute this here to",
"context_layer.get_output_dim() attentive_span_extractor_dim = text_field_embedder.get_output_dim() self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim, combination=\"x,y\", num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size)",
"of a ``TextField`` representing the text of the document. spans",
"= 0.2, pretrain_ed: bool = False, pretrain_coref: bool = False,",
"the document should have no valid antecedents. Has shape ``(1,",
": ``torch.FloatTensor``, required. Embedding representations of the top spans. Has",
"antecedent. Each span can only # have prior spans as",
"\"nil\": self._nil_label_metric.get_metric(reset), \"type\": self._type_label_metric.get_metric(reset), \"coref\": self._coref_label_metric.get_metric(reset), \"t_l\": self._type_loss_metric.get_metric(reset), \"c_l\": self._coref_loss_metric.get_metric(reset),",
"num_total_buckets=self._num_distance_buckets) # (1, event_type) label_bucket_values = bucket_values.new_zeros((1, self._positive_label_size)) # Shape:",
"\"a_f1\": (mention_result['f1-score'] + coref_f1) / 2.} @staticmethod def _combine_event_embeddings_and_cluster_antecedent_embeddings(event_embeddings: torch.FloatTensor,",
"max_antecedents) valid_antecedent_log_mask = (raw_antecedent_indices >= 0).float().unsqueeze(0).log() # Shape: (num_spans_to_keep, max_antecedents)",
"broadcasted subtraction. # Shape: (num_spans_to_keep, max_antecedents) raw_antecedent_indices = target_indices -",
"the most likely antecedent. -1 means there was no predicted",
"2 * encoding_dim + feature_size) # span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings],",
"InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None: super(End2EndEventCoreferenceResolver, self).__init__(vocab, regularizer)",
"= top_span_mention_scores.detach() # Now that we have our variables in",
"IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim, similarity_function=similarity_function, combination='2', num_attention_heads=num_head ) else: attentive_span_extractor_dim = text_field_embedder.get_output_dim() if",
"in the batch. Parameters ---------- output_dict : ``Dict[str, torch.Tensor]``, required.",
"``Seq2SeqEncoder`` This layer incorporates contextual information for each word in",
"is applied to the span representations which is then scored",
"return output_dict @overrides def decode(self, output_dict: Dict[str, torch.Tensor]): \"\"\" Converts",
"controls what percentage of candidate mention spans we retain with",
"(batch_size, num_spans, embedding_size + 2 * encoding_dim + feature_size) #",
"torch.FloatTensor): \"\"\" event_embeddings: ``torch.FloatTensor``, required. Embedding representations of the event",
"__init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, mention_feedforward: FeedForward, antecedent_feedforward: FeedForward, feature_size:",
"the # span representations that we generate from these indices,",
"we do is construct a matrix mapping a span's #",
"have no valid antecedents. Has shape ``(1, num_spans_to_keep, max_antecedents)``. \"\"\"",
"we need to # compare span pairs to decide each",
"at the zeroth position, which represents the prediction that a",
"= 'dot', decoding: str = 'type-guided', type_threshold: float = -1.,",
"a binary indicator for every pair of spans. This label",
"in document order, so we can just use the relative",
"= vocab.get_vocab_size('labels') - 1 # 10 possible distance buckets. self._num_distance_buckets",
"= 1.0, bce_loss_weight: float = 1.0, bce_pos_weight: float = None,",
"matrix mapping a span's # index to the indices of",
"of the batch dimension - it's just a function of",
"return {\"c_p\": coref_precision, \"c_r\": coref_recall, \"c_f1\": coref_f1, \"m_p\": mention_result['precision'], \"m_r\":",
"to consider as mentions. # Shape: (batch_size, num_spans_to_keep, 2) top_spans",
"shapes # like (batch_size, num_spans_to_keep, max_antecedents, embedding_size), which # we",
"the list of spans and predicted antecedent indices into clusters",
"span, the negative marginal log likelihood of all antecedents which",
"only consider up to max_antecedents # prior spans. So the",
"of spans. Has shape (batch_size, num_spans_to_keep, max_antecedents, encoding_dim) top_span_mention_scores: ``torch.FloatTensor``,",
"top_mask, top_indices, top_scores) = self._mention_pruner(span_embeddings, span_mask, num_spans_to_keep_according_doc_len, ) event_embeddings =",
"self._distance_embedding = Embedding(self._num_distance_buckets, feature_size) self._coref_loss_weight = coref_loss_weight self._bce_loss_weight = bce_loss_weight",
"self._endpoint_span_extractor(text_embeddings, spans) span_embeddings_list += [endpoint_span_embeddings] span_embeddings = torch.cat(span_embeddings_list, -1) #",
"antecedent_embeddings : ``torch.FloatTensor``, required. Embedding representations of the antecedent spans",
"per span which survived the pruning stage. This procedure is",
"event_embeddings.size(1), antecedent_embeddings.size(3),)) return torch.cat([event_embeddings, antecedent_embeddings], 2) def _compute_span_pair_embeddings(self, top_span_embeddings: torch.FloatTensor,",
"gold_label_spans_list = [m['gold_label_spans'] for m in metadata] self._mention_f1_score(pred_label_spans_list, gold_label_spans_list, )",
"model to blame many of the absent links on bad",
"event_prob = torch.bmm(top_embeddings, torch.transpose(event_embeddings, 1, 2)) shape = [event_prob.size(0), event_prob.size(1),",
"= torch.stack([torch.zeros_like(event_indices), event_indices]).transpose(0, 1) event_indices = event_indices.expand([event_indices.size(0), event_indices.size(1)]) event_embeddings =",
"flat_top_span_indices = util.flatten_and_batch_shift_indices(top_indices, num_spans) # Compute final predictions for which",
"to the model. context_layer : ``Seq2SeqEncoder`` This layer incorporates contextual",
"the sum of the probabilities of all antecedent predictions #",
"(batch_size, num_spans_to_keep, embedding_size). antecedent_embeddings : ``torch.FloatTensor``, required. Embedding representations of",
"of considered spans (i.e not the word distance between the",
"num_spans_to_keep) # torch.index_select only accepts 1D indices, but here #",
"else: self._type_refine_gate = None # NIL for Unified Event self._event_embedding",
"pairwise_labels_with_dummy_label : ``torch.FloatTensor`` A binary tensor representing whether a given",
"= torch.triu(torch.tril(new_attention_mask, self._local_window_size), -self._local_window_size) new_contextualized_embeddings = self._attention_layer(raw_contextualized_embeddings, new_attention_mask) return new_contextualized_embeddings",
"- refine_gate) * event_rep return top_embeddings def _local_attention(self, raw_contextualized_embeddings, text_mask):",
"transitivity, imply a clustering of the spans in the document.",
"\"End-to-end Neural Coreference Resolution\" <https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83> by Lee et al., 2017.",
"each element can only predict previous spans, so this returns",
"text_mask): device = util.get_device_of(raw_contextualized_embeddings) if device < 0: device =",
"self._endpoint_span_extractor = None self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) if self._local_window_size <= 0:",
"bce_loss.sum() * self._bce_loss_weight # Now, compute the loss using the",
"top_event_type_labels): \"\"\" :param top_event_type_labels: (batch, top_span_size, 1) :return: (batch, top_span_size,",
"(batch_size, num_spans), representing the event label of the specific span.",
"same output dictionary, but with an additional ``clusters`` key: clusters",
"F from allennlp.data import Vocabulary from allennlp.models.model import Model from",
"= util.get_range_vector(self.vocab.get_vocab_size('labels'), device=util.get_device_of(top_event_type_labels)) top_event_type_labels = top_event_type_labels.unsqueeze(-1).expand([top_event_type_labels.size(0), top_event_type_labels.size(1), event_indices.size(0)]) type_antecedent_labels =",
"survives the pruning stage, we consider this many antecedents. lexical_dropout:",
"Compute indices for antecedent spans to consider. max_antecedents = min(self._max_antecedents,",
"# consider a masked span. # Shape: (batch_size, num_spans, 2)",
"antecedent span is valid. Required since different spans have different",
"# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size) candidate_antecedent_embeddings = util.flattened_index_select(top_embeddings, valid_antecedent_indices)",
"not the word distance between the spans). Has shape ``(1,",
"representing the cluster ids of each span, or -1 for",
"shape (num_spans_to_keep, max_antecedents), where the (i,j)-th index is equal to",
"num_spans_to_keep, embedding_size). antecedent_embeddings : ``torch.FloatTensor``, required. Embedding representations of the",
"dummy label, the score is always zero. For the true",
"# because the offsets will be > the target indices.",
"Shape: (1, num_spans_to_keep, max_antecedents) valid_antecedent_log_mask = (raw_antecedent_indices >= 0).float().unsqueeze(0).log() #",
"= self._distance_embedding( torch.cat([bucket_values, label_bucket_values], 1) ) # Shape: (1, 1,",
"widths. max_span_width: ``int`` The maximum width of candidate spans. spans_per_word:",
"* top_embeddings + (1 - refine_gate) * event_rep return top_embeddings",
"= antecedent_scores.new_zeros(*shape) # Shape: (batch_size, num_spans_to_keep, max_antecedents + 1) coreference_scores",
"label, the score is always zero. For the true antecedent",
"= [antecedent_scores.size(0), antecedent_scores.size(1), 1] dummy_scores = antecedent_scores.new_zeros(*shape) # Shape: (batch_size,",
"et al., 2017. The basic outline of this model is",
"calls to util.batched_index_select below more efficient. flat_top_span_indices = util.flatten_and_batch_shift_indices(top_indices, num_spans)",
"(batch_size, num_spans, 2 * encoding_dim + feature_size) endpoint_span_embeddings = self._endpoint_span_extractor(new_contextualized_embeddings,",
"self._bce_loss_weight # Now, compute the loss using the negative marginal",
"realis_labels : ``torch.IntTensor``, optional (default = None). A tensor of",
"= 0 coreference_log_probs = util.masked_log_softmax(coreference_scores, top_mask) correct_antecedent_log_probs = coreference_log_probs +",
"because there are none to select from. Similarly, each element",
"#!/usr/bin/env python # -*- coding:utf-8 -*- # Created by Roger",
"context_layer is not None: endpoint_span_extractor_dim = context_layer.get_output_dim() attentive_span_extractor_dim = text_field_embedder.get_output_dim()",
"offsets between each top span and its antecedent spans in",
"event_type_size, embedding_size). antecedent_embeddings : ``torch.FloatTensor``, required. Embedding representations of the",
"have any antecedent. Parameters ---------- top_span_labels : ``torch.IntTensor``, required. The",
"are minimising, for a # given span, the negative marginal",
"which survives the pruning stage, we consider this many antecedents.",
"self._realis_loss_metric = Average() self._coref_loss_metric = Average() self._coref_label_metric = Average() self._type_label_metric",
"= 'cpu' attention_mask = torch.ones((text_mask.size(1), text_mask.size(1)), device=device) # attention_mask =",
"required. A multiplier between zero and one which controls what",
"batch. We precompute this here to make # the multiple",
"max_antecedents). Returns ------- pairwise_labels_with_dummy_label : ``torch.FloatTensor`` A binary tensor representing",
"prior spans as antecedents, and we only consider up to",
"# Shape: (1, max_antecedents) bucket_values = util.bucket_values(antecedent_offsets, num_total_buckets=self._num_distance_buckets) # (1,",
"they are coreferent with. The resulting coreference links, after applying",
"``int`` The embedding size for all the embedded features, such",
"because in this case, it is possible we might #",
"# given span, the negative marginal log likelihood of all",
"a logspace mask here because we will eventually create a",
"\"\"\" This method generates possible antecedents per span which survived",
"the log of the sum of the probabilities of all",
"in the same # coreference cluster that would be valid",
"-1) return span_pair_embeddings def _compute_antecedent_gold_labels(self, top_span_labels: torch.IntTensor, type_antecedent_labels: torch.IntTensor, antecedent_labels:",
"for each element in the batch. Parameters ---------- output_dict :",
"(1 - pairwise_labels_indicator) self._coref_label_metric(torch.sum(pairwise_labels).item()) self._nil_label_metric(torch.sum(type_antecedent_labels[:, :, 0]).item()) self._type_label_metric(torch.sum(type_antecedent_labels[:, :, 1:",
"``torch.FloatTensor`` A tensor of shape (batch_size, num_spans_to_keep, max_antecedents + 1),",
"span's antecedent. Each span can only # have prior spans",
"---------- pairwise_embeddings: ``torch.FloatTensor``, required. Embedding representations of pairs of spans.",
"vocab.get_vocab_size('labels') - 1 # 10 possible distance buckets. self._num_distance_buckets =",
"text_embeddings = self._lexical_dropout(self._text_field_embedder(text)) document_length = text_embeddings.size(1) num_spans = spans.size(1) #",
"+= [endpoint_span_embeddings] span_embeddings = torch.cat(span_embeddings_list, -1) # event_scores = self._event_classifier.forward(span_embeddings)",
"of the distribution. # Shape: (1, num_spans_to_keep, max_antecedents) valid_antecedent_log_mask =",
"<= 0: self._attention_layer = None else: if self._attention_type == 'dot':",
"coreferent with. The resulting coreference links, after applying transitivity, imply",
"candidate_antecedent_mention_scores = torch.cat([event_type_prior_scores, candidate_antecedent_mention_scores], -1) # Shape: (batch_size, num_spans_to_keep, 1",
"event_type_size + max_antecedents) coreference_scores = self._compute_coreference_scores(span_pair_embeddings, top_scores, candidate_antecedent_mention_scores, valid_antecedent_log_mask) #",
"0 coreference_log_probs = util.masked_log_softmax(coreference_scores, top_mask) correct_antecedent_log_probs = coreference_log_probs + gold_antecedent_labels.log()",
"every span. The id is arbitrary, as we just care",
"2:] = 0 coreference_log_probs = util.masked_log_softmax(coreference_scores, top_mask) correct_antecedent_log_probs = coreference_log_probs",
"any antecedents, because there are none to select from. Similarly,",
"This reformats the indices to take into account their #",
"for pairwise_labels without type_antecedent_labels pairwise_labels_indicator = (pairwise_labels.sum(-1, keepdim=True) > 0).float()",
"spans in the document. Parameters ---------- vocab : ``Vocabulary`` text_field_embedder",
"a matrix of shape (num_spans_to_keep, max_antecedents), where the (i,j)-th index",
"1 + event_type_size + max_antecedents) coreference_scores = self._compute_coreference_scores(span_pair_embeddings, top_scores, candidate_antecedent_mention_scores,",
"of spans, because in this case, it is possible we",
"to use. Returns ------- valid_antecedent_indices : ``torch.IntTensor`` The indices of",
"# Shape: (1, max_antecedents) valid_antecedent_offsets = (util.get_range_vector(max_antecedents, device) + 1).unsqueeze(0)",
"original document. \"\"\" return node_decode(output_dict, self.vocab, decoding_algorithm=self._decoding, positive_label_size=self._positive_label_size, type_threshold=self._type_threshold) @overrides",
"This is a broadcasted subtraction. # Shape: (num_spans_to_keep, max_antecedents) raw_antecedent_indices",
"EndpointSpanExtractor from allennlp.modules.token_embedders import Embedding from allennlp.nn import util, InitializerApplicator,",
"= torch.cat([type_antecedent_labels, pairwise_labels], -1) return pairwise_labels_with_dummy_label def _compute_coreference_scores(self, pairwise_embeddings: torch.FloatTensor,",
"self._get_event_embedding(span_mask) top_mask = top_mask.unsqueeze(-1) # Shape: (batch_size * num_spans_to_keep) #",
"Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size) candidate_antecedent_embeddings = self._combine_event_embeddings_and_cluster_antecedent_embeddings(",
"in turn comprised of a list of (start, end) inclusive",
"the antecedent spans. # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size) candidate_antecedent_embeddings",
"self._positive_label_size = vocab.get_vocab_size('labels') - 1 # 10 possible distance buckets.",
"nested list, representing, for each instance in the batch, the",
"antecedent to consider with respect to the top k spans.",
"> the target indices. We want to mask these, #",
"= (target_labels == antecedent_labels).float() non_dummy_indicator = (target_labels >= 0).float() pairwise_labels",
"in metadata] output_dict[\"offset\"] = [x[\"token_offset\"] for x in metadata] output_dict['doc_id']",
"pairwise_labels * 0 else: # for pairwise_labels without type_antecedent_labels pairwise_labels_indicator",
"0. This is only relevant in edge cases where #",
"a ``ListField[SpanField]`` of indices into the text of the document.",
"we attend over the # span representations that we generate",
"embedding_size) \"\"\" # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size) target_embeddings =",
"metadata] return output_dict @overrides def decode(self, output_dict: Dict[str, torch.Tensor]): \"\"\"",
"spans for the pairwise scoring function to consider. This includes",
"decides which antecedent span (if any) they are coreferent with.",
"of the possible antecedents the model considered. predicted_antecedents : ``torch.IntTensor``",
"spans : ``torch.IntTensor``, required. A tensor of shape (batch_size, num_spans,",
"spans.size(1) # Shape: (batch_size, document_length) text_mask = util.get_text_field_mask(text).float() # Shape:",
"1.0, bce_pos_weight: float = None, local_window_size: int = 10, attention_type:",
"turn comprised of a list of (start, end) inclusive spans",
"the event label of the specific span. realis_labels : ``torch.IntTensor``,",
"(batch_size, document_length, embedding_size) text_embeddings = self._lexical_dropout(self._text_field_embedder(text)) document_length = text_embeddings.size(1) num_spans",
"= -util.logsumexp(correct_antecedent_log_probs).sum() coref_loss = negative_marginal_log_likelihood * self._coref_loss_weight output_dict[\"loss\"] = coref_loss",
"to get embeddings # for all valid antecedents for each",
"linear layer. feature_size: ``int`` The embedding size for all the",
"and \"clusters\" keys from this dictionary, which respectively have the",
"document. Parameters ---------- vocab : ``Vocabulary`` text_field_embedder : ``TextFieldEmbedder`` Used",
"= BCEWithLogitsLoss(reduction='none', pos_weight=torch.tensor(self._bce_pos_weight)) else: self._bce_loss = BCEWithLogitsLoss(reduction='none') if lexical_dropout >",
"- 1) - j if j <= i, or zero",
"= -1., type_refine: bool = True, type_match_in_eval: bool = True,",
"_, predicted_antecedents = coreference_scores.max(2) # Subtract one here because index",
"mask is 0 top_mask = top_mask.expand_as(coreference_scores).clone() top_mask[:, :, self._positive_label_size +",
"num_spans) span_mask = (spans[:, :, 0] >= 0).squeeze(-1).float() # SpanFields",
"span and each of its antecedents in terms of the",
"span (if any) they are coreferent with. The resulting coreference",
"for those which do not appear in any clusters. event_type_labels",
"antecedent) pair we considered. \"\"\" antecedent_log_mask = torch.cat([antecedent_log_mask.new_zeros((antecedent_log_mask.size(0), antecedent_log_mask.size(1), self._positive_label_size)),",
"num_spans_to_keep, event_type_size + max_antecedents, embedding_size) candidate_antecedent_embeddings = self._combine_event_embeddings_and_cluster_antecedent_embeddings( event_embeddings, candidate_antecedent_embeddings)",
"only predict previous spans, so this returns a matrix of",
"layer. feature_size: ``int`` The embedding size for all the embedded",
"-1) event_prob = torch.softmax(event_prob, -1) event_rep = torch.bmm(event_prob[:, :, 1:],",
"the mask for valid antecedents. Returns ------- coreference_scores: ``torch.FloatTensor`` A",
"mention_result['recall'], \"m_f1\": mention_result['f1-score'], \"nil\": self._nil_label_metric.get_metric(reset), \"type\": self._type_label_metric.get_metric(reset), \"coref\": self._coref_label_metric.get_metric(reset), \"t_l\":",
"torch.cat([type_antecedent_labels, pairwise_labels], -1) return pairwise_labels_with_dummy_label def _compute_coreference_scores(self, pairwise_embeddings: torch.FloatTensor, top_span_mention_scores:",
"# All antecedent mask is 0 top_mask = top_mask.expand_as(coreference_scores).clone() top_mask[:,",
"# event_realis_scores = self._event_realis_classifier.forward(span_embeddings) # Prune based on mention scores.",
"between the span and each of its antecedents in terms",
"flat_top_span_indices) antecedent_labels = util.flattened_index_select(pruned_gold_labels, valid_antecedent_indices).squeeze(-1) antecedent_labels += valid_antecedent_log_mask.long() # Compute",
"from allennlp.nn import util, InitializerApplicator, RegularizerApplicator from allennlp.training.metrics import Average",
"top_embeddings def _local_attention(self, raw_contextualized_embeddings, text_mask): device = util.get_device_of(raw_contextualized_embeddings) if device",
"= TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1)) self._local_window_size = local_window_size self._attention_type = attention_type self._decoding",
"valid_antecedent_offsets) # (batch_size, event_type_size, 1) event_type_prior_scores = self._event_scorer(event_embeddings) # (batch_size,",
"Dict[str, torch.Tensor]): \"\"\" Converts the list of spans and predicted",
"+ 1]).item()) # print(pairwise_labels) # # # Shape: (batch_size, num_spans_to_keep,",
"1st span _cannot_ have any antecedents, because there are none",
"self._coref_loss_metric.get_metric(reset), \"a_f1\": (mention_result['f1-score'] + coref_f1) / 2.} @staticmethod def _combine_event_embeddings_and_cluster_antecedent_embeddings(event_embeddings:",
"# distribution over these indices, so we need the 0",
"max_antecedents + event_type_size, embedding_size) antecedent_distance_embeddings = self._distance_embedding( torch.cat([bucket_values, label_bucket_values], 1)",
"Pruner(self._event_scorer) self._antecedent_scorer = TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1)) self._local_window_size = local_window_size self._attention_type =",
"= torch.ones((text_mask.size(1), text_mask.size(1)), device=device) # attention_mask = attention_mask - torch.eye(text_mask.size(1),",
"every antecedent. Has shape (batch_size, num_spans_to_keep, max_antecedents). antecedent_log_mask: ``torch.FloatTensor``, required.",
"pylint: disable=invalid-name @Model.register(\"end-to-end-event-coreference\") class End2EndEventCoreferenceResolver(Model): \"\"\" This ``Model`` implements the",
"are unlikely to occur in a coreference cluster. For the",
"by Lee et al., 2017. The basic outline of this",
"span_embedding_size = self._attentive_span_extractor.get_output_dim() + self._endpoint_span_extractor.get_output_dim() else: span_embedding_size = self._attentive_span_extractor.get_output_dim() if",
"for every span. Has shape (batch_size, num_spans_to_keep, max_antecedents). antecedent_mention_scores: ``torch.FloatTensor``,",
"\"\"\" antecedent_log_mask = torch.cat([antecedent_log_mask.new_zeros((antecedent_log_mask.size(0), antecedent_log_mask.size(1), self._positive_label_size)), antecedent_log_mask], -1) # Shape:",
"antecedent_offsets: torch.FloatTensor): \"\"\" Computes an embedding representation of pairs of",
"num_spans), representing the realis label of the specific span. metadata",
"span representations that we generate from these indices, we #",
"similarity of the span representations, and an embedding representation of",
"= util.get_text_field_mask(text).float() # Shape: (batch_size, num_spans) span_mask = (spans[:, :,",
"pairwise features, which is then scored by a linear layer.",
"the spans to know which other spans are allowed antecedents.",
"we just care about the clustering. Has shape (batch_size, num_spans_to_keep).",
"x: x initializer(self) def _get_event_embedding(self, span_mask): \"\"\" :param span_mask: (batch,",
"antecedent spans. # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size) candidate_antecedent_embeddings =",
"the unary mention scores for the span and its antecedent.",
"a linear layer. feature_size: ``int`` The embedding size for all",
"\"\"\" :param top_event_type_labels: (batch, top_span_size, 1) :return: (batch, top_span_size, positive_label_size)",
"the number of spans we consider after the pruning stage",
"spans we are considering for each top span. Has shape",
"coreferent with anything. For the dummy label, the score is",
"antecedent span (if any) they are coreferent with. The resulting",
"so we can just use the relative # index of",
"Has shape ``(1, num_spans_to_keep, max_antecedents)``. \"\"\" # Shape: (num_spans_to_keep, 1)",
"label for every antecedent span. The id is arbitrary, as",
"= top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings) # Shape: (1, max_antecedents) bucket_values = util.bucket_values(antecedent_offsets, num_total_buckets=self._num_distance_buckets)",
"None else: if self._attention_type == 'dot': similarity_function = DotProductSimilarity(scale_output=True) num_head",
"event_embeddings = self._event_embedding_map.forward(event_embeddings) event_embeddings = event_embeddings.unsqueeze(0).expand(span_mask.size(0), event_embeddings.size(0), event_embeddings.size(1), ) return",
"the model considered. predicted_antecedents : ``torch.IntTensor`` A tensor of shape",
"(batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size) span_pair_embeddings = torch.cat([target_embeddings, antecedent_embeddings,",
"antecedents. Returns ------- coreference_scores: ``torch.FloatTensor`` A tensor of shape (batch_size,",
"util.flattened_index_select(top_embeddings, valid_antecedent_indices) # Shape: (batch_size, num_spans_to_keep, max_antecedents) candidate_antecedent_mention_scores = util.flattened_index_select(top_scores,",
"= util.get_range_vector(num_spans_to_keep, device).unsqueeze(1) # Shape: (1, max_antecedents) valid_antecedent_offsets = (util.get_range_vector(max_antecedents,",
"num_spans_to_keep, max_antecedents + event_type_size, embedding_size) \"\"\" event_embeddings = event_embeddings.unsqueeze(1).expand((antecedent_embeddings.size(0), antecedent_embeddings.size(1),",
"is not None: new_contextualized_embeddings = self._local_attention( raw_contextualized_embeddings=raw_contextualized_embeddings, text_mask=text_mask ) else:",
"Has shape (batch_size, event_type_size, embedding_size). antecedent_embeddings : ``torch.FloatTensor``, required. Embedding",
"\"\"\" # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size) target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings)",
"batch, the 1st span _cannot_ have any antecedents, because there",
"their # index into the batch. We precompute this here",
"= 'type-guided', type_threshold: float = -1., type_refine: bool = True,",
">= 0).float().unsqueeze(0).log() # Shape: (num_spans_to_keep, max_antecedents) valid_antecedent_indices = F.relu(raw_antecedent_indices.float()).long() return",
"``FeedForward`` This feedforward network is applied to pairs of span",
"spans. spans_per_word: float, required. A multiplier between zero and one",
"span representations, the element-wise similarity of the span representations, and",
"are in turn comprised of a list of (start, end)",
"---------- text : ``Dict[str, torch.LongTensor]``, required. The output of a",
"representations of the antecedent spans we are considering for each",
"antecedent_feedforward: FeedForward, feature_size: int, context_layer: Seq2SeqEncoder = None, max_span_width: int",
"from overrides import overrides from torch.nn import BCEWithLogitsLoss from src.metrics.event_coref_scores",
"-*- # Created by Roger on 2019-09-10 # Mostly by",
"its antecedent. The factoring allows the model to blame many",
"a clustering if we group # mentions which refer to",
"= self._event_classifier.forward(span_embeddings) # Shape: (batch_size, num_spans, num_event_realis_label) # Shape: (batch_size,",
"+ feature_size) endpoint_span_embeddings = self._endpoint_span_extractor(new_contextualized_embeddings, spans) # Shape: (batch_size, num_spans,",
"for each span which survived the pruning stage, # a",
"A tensor of shape ``(batch_size, num_spans_to_keep)`` representing, for each top",
"positive_label_size) \"\"\" event_indices = util.get_range_vector(self.vocab.get_vocab_size('labels'), device=util.get_device_of(top_event_type_labels)) top_event_type_labels = top_event_type_labels.unsqueeze(-1).expand([top_event_type_labels.size(0), top_event_type_labels.size(1),",
"pred_label_spans_list, gold_label_spans_list) self._type_loss_metric(bce_loss.item()) self._coref_loss_metric(negative_marginal_log_likelihood.item()) else: self._coref_loss_metric(0.) if metadata is not",
"span. The id is arbitrary, as we just care about",
"Shape: (num_spans_to_keep, max_antecedents) valid_antecedent_indices = F.relu(raw_antecedent_indices.float()).long() return valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask",
"else: self._bce_loss = BCEWithLogitsLoss(reduction='none') if lexical_dropout > 0: self._lexical_dropout =",
"the span's position in # top_spans. The spans are in",
"of the spans to know which other spans are allowed",
"appear in any clusters. event_type_labels : ``torch.IntTensor``, optional (default =",
"embedding_size). return: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size) \"\"\" event_embeddings",
"\"\"\" Generates a binary indicator for every pair of spans.",
"local_window_size self._attention_type = attention_type self._decoding = decoding self._type_threshold = type_threshold",
"the prediction that a span does not have any antecedent.",
"= attention_mask - torch.eye(text_mask.size(1), # device=util.get_device_of(contextualized_embeddings)) new_attention_mask = text_mask[:, :,",
"such as distances or span widths. max_span_width: ``int`` The maximum",
"``List[Dict[str, Any]]``, optional (default = None). A metadata dictionary for",
"we retain with respect to the number of words in",
"if lexical_dropout > 0: self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout) else: self._lexical_dropout =",
"optional (default=``InitializerApplicator()``) Used to initialize the model parameters. regularizer :",
"coref_labels : ``torch.IntTensor``, optional (default = None). A tensor of",
"the first span in the document should have no valid",
"be consistent with the data, in the sense that we",
"num_spans, embedding_size + 2 * encoding_dim + feature_size) # span_embeddings",
"for all the embedded features, such as distances or span",
"(batch_size, num_spans_to_keep). antecedent_labels : ``torch.IntTensor``, required. The cluster id label",
"index of the spans to know which other spans are",
"num_spans_to_keep)`` representing, for each top span, the index (with respect",
"import FeedForward, Pruner from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder from",
"pairwise antecedent score and the unary mention scores for the",
"embedding_size) span_pair_embeddings = self._compute_span_pair_embeddings(top_embeddings, candidate_antecedent_embeddings, valid_antecedent_offsets) # (batch_size, event_type_size, 1)",
"antecedent_scores.new_zeros(*shape) # Shape: (batch_size, num_spans_to_keep, max_antecedents + 1) coreference_scores =",
"Shape: (batch_size, num_spans, embedding_size + 2 * encoding_dim + feature_size)",
"remaining spans, the model decides which antecedent span (if any)",
"feature_size) endpoint_span_embeddings = self._endpoint_span_extractor(new_contextualized_embeddings, spans) # Shape: (batch_size, num_spans, embedding_size)",
"is the sum of the # probability assigned to all",
"A scalar loss to be optimised. \"\"\" # Shape: (batch_size,",
"= event_type_prior_scores.transpose(1, 2).expand( candidate_antecedent_mention_scores.size(0), candidate_antecedent_mention_scores.size(1), -1) # (batch_size, num_spans_to_keep, event_type_size",
"text_mask.size(1)), device=device) # attention_mask = attention_mask - torch.eye(text_mask.size(1), # device=util.get_device_of(contextualized_embeddings))",
":, self._positive_label_size + 2:] = 0 coreference_log_probs = util.masked_log_softmax(coreference_scores, top_mask)",
"the batch, the 1st span _cannot_ have any antecedents, because",
"feedforward network is applied to the span representations which is",
"element in the batch. # This reformats the indices to",
"span_width_embedding_dim=feature_size) self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) span_embedding_size = self._endpoint_span_extractor.get_output_dim() + self._attentive_span_extractor.get_output_dim() if",
"a coreference cluster. For the remaining spans, the model decides",
"``text`` ``TextField`` we get as input to the model. context_layer",
"For each mention which survives the pruning stage, we consider",
"similarity_function=similarity_function, combination='2', num_attention_heads=num_head ) else: attentive_span_extractor_dim = text_field_embedder.get_output_dim() if max_span_width",
"Now, compute the loss using the negative marginal log-likelihood. #",
"= SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) span_embedding_size = self._endpoint_span_extractor.get_output_dim() + self._attentive_span_extractor.get_output_dim() if self._local_window_size <=",
"= self._lexical_dropout(self._text_field_embedder(text)) document_length = text_embeddings.size(1) num_spans = spans.size(1) # Shape:",
"self._attention_type) self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim, similarity_function=similarity_function, combination='2', num_attention_heads=num_head ) else: attentive_span_extractor_dim",
"Compute antecedent scores. # Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents,",
"an embedding representation of the distance between the two spans.",
"<https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83> by Lee et al., 2017. The basic outline of",
"encoding_dim + feature_size) # span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1) span_embeddings",
"bce_loss = bce_loss.sum() * self._bce_loss_weight # Now, compute the loss",
"CUDA device to use. Returns ------- valid_antecedent_indices : ``torch.IntTensor`` The",
"(target_labels == antecedent_labels).float() non_dummy_indicator = (target_labels >= 0).float() pairwise_labels =",
"gold_label_spans_list) self._type_loss_metric(bce_loss.item()) self._coref_loss_metric(negative_marginal_log_likelihood.item()) else: self._coref_loss_metric(0.) if metadata is not None:",
"logger.info(vocab) self._text_field_embedder = text_field_embedder self._context_layer = context_layer self._antecedent_feedforward = TimeDistributed(antecedent_feedforward)",
"document. coref_labels : ``torch.IntTensor``, optional (default = None). A tensor",
"(batch_size, num_spans) span_mask = (spans[:, :, 0] >= 0).squeeze(-1).float() #",
"F.relu(spans.float()).long() if self._context_layer: # Shape: (batch_size, document_length, encoding_dim) raw_contextualized_embeddings =",
"self._endpoint_span_extractor(new_contextualized_embeddings, spans) # Shape: (batch_size, num_spans, embedding_size) attended_span_embeddings = self._attentive_span_extractor(text_embeddings,",
"negative_marginal_log_likelihood * self._coref_loss_weight output_dict[\"loss\"] = coref_loss + bce_loss decoded_result =",
"+= antecedent_log_mask # Shape: (batch_size, num_spans_to_keep, 1) shape = [antecedent_scores.size(0),",
"in this case, it is possible we might # consider",
"= self._get_event_embedding(span_mask) top_mask = top_mask.unsqueeze(-1) # Shape: (batch_size * num_spans_to_keep)",
"resulting coreference links, after applying transitivity, imply a clustering of",
"span does not have any antecedent. Parameters ---------- top_span_labels :",
"[m['gold_label_spans'] for m in metadata] self._mention_f1_score(pred_label_spans_list, gold_label_spans_list, ) self._conll_coref_scores(decoded_result['clusters'], metadata,",
"float = 0.2, pretrain_ed: bool = False, pretrain_coref: bool =",
"``torch.FloatTensor`` A binary tensor representing whether a given pair of",
"IntraSentenceAttentionEncoder from allennlp.modules.similarity_functions import DotProductSimilarity from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor",
"top span the index (with respect to top_spans) of the",
"span. This gives us variables with shapes # like (batch_size,",
"and event_type_labels is not None: pruned_event_type_labels = torch.gather(event_type_labels, 1, top_indices)",
"possible antecedents per span which survived the pruning stage. This",
"representation of each span in the document. These span representations",
"_compute_span_pair_embeddings(self, top_span_embeddings: torch.FloatTensor, antecedent_embeddings: torch.FloatTensor, antecedent_offsets: torch.FloatTensor): \"\"\" Computes an",
"out dimensions of the embedded text. initializer : ``InitializerApplicator``, optional",
"considering. Each span i predicts a # single antecedent j,",
"if the pair of spans belong to the same cluster.",
"# print(antecedent_labels) target_labels = top_span_labels.expand_as(antecedent_labels) same_cluster_indicator = (target_labels == antecedent_labels).float()",
"The basic outline of this model is to get an",
"(batch_size, num_spans_to_keep, max_antecedents, embedding_size) \"\"\" # Shape: (batch_size, num_spans_to_keep, max_antecedents,",
"of the absent links on bad spans, enabling the pruning",
"SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim) if self._local_window_size <= 0: self._attention_layer = None else: if",
"negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum() coref_loss = negative_marginal_log_likelihood * self._coref_loss_weight output_dict[\"loss\"] =",
"the number of considered spans (i.e not the word distance",
"as F from allennlp.data import Vocabulary from allennlp.models.model import Model",
"= util.flattened_index_select(pruned_gold_labels, valid_antecedent_indices).squeeze(-1) antecedent_labels += valid_antecedent_log_mask.long() # Compute labels. #",
"float = -1., type_refine: bool = True, type_match_in_eval: bool =",
"initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None) -> None:",
"-*- coding:utf-8 -*- # Created by Roger on 2019-09-10 #",
"dictionary, which respectively have the original text and the annotated",
"= (raw_antecedent_indices >= 0).float().unsqueeze(0).log() # Shape: (num_spans_to_keep, max_antecedents) valid_antecedent_indices =",
"= util.flattened_index_select(top_scores, valid_antecedent_indices).squeeze(-1) # Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents,",
"self._attention_type == 'dot': similarity_function = DotProductSimilarity(scale_output=True) num_head = 1 else:",
"in the document. These span representations are scored and used",
"logging.getLogger(__name__) # pylint: disable=invalid-name @Model.register(\"end-to-end-event-coreference\") class End2EndEventCoreferenceResolver(Model): \"\"\" This ``Model``",
"span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1) span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1)",
"that survived the pruning stage. antecedent_indices : ``torch.IntTensor`` A tensor",
"whether each antecedent span is valid. Required since different spans",
"antecedents which are in the # same gold cluster as",
"# Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size) span_pair_embeddings =",
"Has shape ``(1, max_antecedents)``. valid_antecedent_log_mask : ``torch.FloatTensor`` The logged mask",
"None). A tensor of shape (batch_size, num_spans), representing the realis",
"these, # because these are exactly the indices which we",
"Generates a binary indicator for every pair of spans. This",
"-inf # in order to not mess up the normalisation",
"in any clusters. event_type_labels : ``torch.IntTensor``, optional (default = None).",
"model considered. predicted_antecedents : ``torch.IntTensor`` A tensor of shape ``(batch_size,",
"# Shape: (batch_size, num_spans_to_keep, max_antecedents + 1) gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels,",
"max_antecedents: int = 50, lexical_dropout: float = 0.2, pretrain_ed: bool",
"thing we do is construct a matrix mapping a span's",
"calculate the regularization penalty during training. \"\"\" def __init__(self, vocab:",
"Prune based on mention scores. num_spans_to_keep_according_doc_len = int(math.floor(self._spans_per_word * document_length))",
"# mentions which refer to each other in a chain.",
"antecedent mask is 0 top_mask = top_mask.expand_as(coreference_scores).clone() top_mask[:, :, self._positive_label_size",
"a given pair of spans belong to the same cluster",
"all valid antecedents. This is a valid objective for #",
"percentage of candidate mention spans we retain with respect to",
"span in the document. These span representations are scored and",
"for that instance. Returns ------- An output dictionary consisting of:",
"* 0 else: # for pairwise_labels without type_antecedent_labels pairwise_labels_indicator =",
"by AllenNLP import logging import math from typing import Any,",
"antecedent_embeddings: torch.FloatTensor, antecedent_offsets: torch.FloatTensor): \"\"\" Computes an embedding representation of",
"Shape: (batch_size, num_spans, embedding_size) endpoint_span_embeddings = self._endpoint_span_extractor(text_embeddings, spans) span_embeddings_list +=",
"SelfAttentiveSpanExtractor, EndpointSpanExtractor from allennlp.modules.token_embedders import Embedding from allennlp.nn import util,",
"if device < 0: device = 'cpu' attention_mask = torch.ones((text_mask.size(1),"
] |
[
"= [[0, 0, 0]] CAPACITIES = (3, 5, 9) solutions_count",
"current_state[j], current_state[i]) new_state[j] += liters_change new_state[i] -= liters_change if new_state",
"= list(current_state) new_state[i] = CAPACITIES[i] if new_state not in current_path:",
"solutions to get 7 liters \"\"\" current_path = [[0, 0,",
"\"__main__\": try: current_state = [0, 0, 0] move_to_new_state(current_state) print(solutions_count) except",
"= CAPACITIES[i] if new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop()",
"j and current_state[i] != 0 and current_state[j] != CAPACITIES[j]: new_state",
"current_state = [0, 0, 0] move_to_new_state(current_state) print(solutions_count) except KeyboardInterrupt: print(solutions_count)",
"if 7 in current_state: solutions_count += 1 else: # Empty",
"and current_state[j] != CAPACITIES[j]: new_state = list(current_state) liters_change = min(CAPACITIES[j]",
"Empty bottle for i in range(3): if current_state[i] != 0:",
"possible solutions to get 7 liters \"\"\" current_path = [[0,",
"if current_state[i] != 0: new_state = list(current_state) new_state[i] = 0",
"solutions_count = 0 def move_to_new_state(current_state): global solutions_count, current_path if 7",
"solutions_count, current_path if 7 in current_state: solutions_count += 1 else:",
"0] move_to_new_state(current_state) print(solutions_count) except KeyboardInterrupt: print(solutions_count) # Result: at least",
"9 liters, count number of all possible solutions to get",
"= 0 if new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop()",
"liters \"\"\" current_path = [[0, 0, 0]] CAPACITIES = (3,",
"bottle to another for i in range(3): for j in",
"0 if new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() #",
"count number of all possible solutions to get 7 liters",
"new_state[i] = CAPACITIES[i] if new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state)",
"if new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() # Fill",
"move_to_new_state(new_state) current_path.pop() # Pour from one bottle to another for",
"current_path if 7 in current_state: solutions_count += 1 else: #",
"for i in range(3): for j in range(3): if i",
"current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() # Fill bottle for i in",
"not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() # Pour from one",
"!= 0 and current_state[j] != CAPACITIES[j]: new_state = list(current_state) liters_change",
"if __name__ == \"__main__\": try: current_state = [0, 0, 0]",
"+= liters_change new_state[i] -= liters_change if new_state not in current_path:",
"current_path.pop() if __name__ == \"__main__\": try: current_state = [0, 0,",
"i != j and current_state[i] != 0 and current_state[j] !=",
"min(CAPACITIES[j] - current_state[j], current_state[i]) new_state[j] += liters_change new_state[i] -= liters_change",
"in current_state: solutions_count += 1 else: # Empty bottle for",
"current_path.pop() # Pour from one bottle to another for i",
"liters_change if new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() if",
"9) solutions_count = 0 def move_to_new_state(current_state): global solutions_count, current_path if",
"current_state[j] != CAPACITIES[j]: new_state = list(current_state) liters_change = min(CAPACITIES[j] -",
"0, 0]] CAPACITIES = (3, 5, 9) solutions_count = 0",
"liters, count number of all possible solutions to get 7",
"and 9 liters, count number of all possible solutions to",
"!= CAPACITIES[j]: new_state = list(current_state) liters_change = min(CAPACITIES[j] - current_state[j],",
"not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() # Fill bottle for",
"1 else: # Empty bottle for i in range(3): if",
"liters_change new_state[i] -= liters_change if new_state not in current_path: current_path.append(new_state)",
"of capacities 3, 5, and 9 liters, count number of",
"range(3): if current_state[i] != CAPACITIES[i]: new_state = list(current_state) new_state[i] =",
"(3, 5, 9) solutions_count = 0 def move_to_new_state(current_state): global solutions_count,",
"number of all possible solutions to get 7 liters \"\"\"",
"not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() if __name__ == \"__main__\":",
"= min(CAPACITIES[j] - current_state[j], current_state[i]) new_state[j] += liters_change new_state[i] -=",
"all possible solutions to get 7 liters \"\"\" current_path =",
"# Fill bottle for i in range(3): if current_state[i] !=",
"new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() if __name__ ==",
"if current_state[i] != CAPACITIES[i]: new_state = list(current_state) new_state[i] = CAPACITIES[i]",
"if new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() if __name__",
"j in range(3): if i != j and current_state[i] !=",
"CAPACITIES = (3, 5, 9) solutions_count = 0 def move_to_new_state(current_state):",
"current_path.append(new_state) move_to_new_state(new_state) current_path.pop() # Pour from one bottle to another",
"def move_to_new_state(current_state): global solutions_count, current_path if 7 in current_state: solutions_count",
"in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() # Fill bottle for i",
"0 and current_state[j] != CAPACITIES[j]: new_state = list(current_state) liters_change =",
"move_to_new_state(new_state) current_path.pop() if __name__ == \"__main__\": try: current_state = [0,",
"move_to_new_state(current_state) print(solutions_count) except KeyboardInterrupt: print(solutions_count) # Result: at least 44900799",
"of all possible solutions to get 7 liters \"\"\" current_path",
"current_path = [[0, 0, 0]] CAPACITIES = (3, 5, 9)",
"another for i in range(3): for j in range(3): if",
"current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() # Pour from one bottle to",
"to get 7 liters \"\"\" current_path = [[0, 0, 0]]",
"get 7 liters \"\"\" current_path = [[0, 0, 0]] CAPACITIES",
"in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() # Pour from one bottle",
"5, and 9 liters, count number of all possible solutions",
"global solutions_count, current_path if 7 in current_state: solutions_count += 1",
"\"\"\" current_path = [[0, 0, 0]] CAPACITIES = (3, 5,",
"CAPACITIES[i] if new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() #",
"!= CAPACITIES[i]: new_state = list(current_state) new_state[i] = CAPACITIES[i] if new_state",
"<filename>week2/7litersProblem.py \"\"\" Given 3 bottles of capacities 3, 5, and",
"range(3): if current_state[i] != 0: new_state = list(current_state) new_state[i] =",
"in range(3): if i != j and current_state[i] != 0",
"+= 1 else: # Empty bottle for i in range(3):",
"if new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() # Pour",
"7 in current_state: solutions_count += 1 else: # Empty bottle",
"from one bottle to another for i in range(3): for",
"print(solutions_count) except KeyboardInterrupt: print(solutions_count) # Result: at least 44900799 solution",
"= 0 def move_to_new_state(current_state): global solutions_count, current_path if 7 in",
"!= 0: new_state = list(current_state) new_state[i] = 0 if new_state",
"new_state = list(current_state) new_state[i] = CAPACITIES[i] if new_state not in",
"try: current_state = [0, 0, 0] move_to_new_state(current_state) print(solutions_count) except KeyboardInterrupt:",
"in range(3): if current_state[i] != 0: new_state = list(current_state) new_state[i]",
"current_path.append(new_state) move_to_new_state(new_state) current_path.pop() if __name__ == \"__main__\": try: current_state =",
"CAPACITIES[i]: new_state = list(current_state) new_state[i] = CAPACITIES[i] if new_state not",
"5, 9) solutions_count = 0 def move_to_new_state(current_state): global solutions_count, current_path",
"Fill bottle for i in range(3): if current_state[i] != CAPACITIES[i]:",
"else: # Empty bottle for i in range(3): if current_state[i]",
"# Empty bottle for i in range(3): if current_state[i] !=",
"in range(3): for j in range(3): if i != j",
"[0, 0, 0] move_to_new_state(current_state) print(solutions_count) except KeyboardInterrupt: print(solutions_count) # Result:",
"current_state[i] != 0 and current_state[j] != CAPACITIES[j]: new_state = list(current_state)",
"0: new_state = list(current_state) new_state[i] = 0 if new_state not",
"move_to_new_state(new_state) current_path.pop() # Fill bottle for i in range(3): if",
"current_state: solutions_count += 1 else: # Empty bottle for i",
"3 bottles of capacities 3, 5, and 9 liters, count",
"one bottle to another for i in range(3): for j",
"current_state[i] != CAPACITIES[i]: new_state = list(current_state) new_state[i] = CAPACITIES[i] if",
"[[0, 0, 0]] CAPACITIES = (3, 5, 9) solutions_count =",
"i in range(3): if current_state[i] != 0: new_state = list(current_state)",
"current_path.append(new_state) move_to_new_state(new_state) current_path.pop() # Fill bottle for i in range(3):",
"!= j and current_state[i] != 0 and current_state[j] != CAPACITIES[j]:",
"liters_change = min(CAPACITIES[j] - current_state[j], current_state[i]) new_state[j] += liters_change new_state[i]",
"bottle for i in range(3): if current_state[i] != 0: new_state",
"in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() if __name__ == \"__main__\": try:",
"i in range(3): if current_state[i] != CAPACITIES[i]: new_state = list(current_state)",
"= [0, 0, 0] move_to_new_state(current_state) print(solutions_count) except KeyboardInterrupt: print(solutions_count) #",
"new_state = list(current_state) liters_change = min(CAPACITIES[j] - current_state[j], current_state[i]) new_state[j]",
"0 def move_to_new_state(current_state): global solutions_count, current_path if 7 in current_state:",
"7 liters \"\"\" current_path = [[0, 0, 0]] CAPACITIES =",
"new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() # Fill bottle",
"= list(current_state) liters_change = min(CAPACITIES[j] - current_state[j], current_state[i]) new_state[j] +=",
"0]] CAPACITIES = (3, 5, 9) solutions_count = 0 def",
"= (3, 5, 9) solutions_count = 0 def move_to_new_state(current_state): global",
"3, 5, and 9 liters, count number of all possible",
"list(current_state) new_state[i] = CAPACITIES[i] if new_state not in current_path: current_path.append(new_state)",
"range(3): for j in range(3): if i != j and",
"range(3): if i != j and current_state[i] != 0 and",
"bottles of capacities 3, 5, and 9 liters, count number",
"Given 3 bottles of capacities 3, 5, and 9 liters,",
"new_state[i] = 0 if new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state)",
"i in range(3): for j in range(3): if i !=",
"list(current_state) new_state[i] = 0 if new_state not in current_path: current_path.append(new_state)",
"solutions_count += 1 else: # Empty bottle for i in",
"== \"__main__\": try: current_state = [0, 0, 0] move_to_new_state(current_state) print(solutions_count)",
"# Pour from one bottle to another for i in",
"bottle for i in range(3): if current_state[i] != CAPACITIES[i]: new_state",
"and current_state[i] != 0 and current_state[j] != CAPACITIES[j]: new_state =",
"list(current_state) liters_change = min(CAPACITIES[j] - current_state[j], current_state[i]) new_state[j] += liters_change",
"0, 0] move_to_new_state(current_state) print(solutions_count) except KeyboardInterrupt: print(solutions_count) # Result: at",
"capacities 3, 5, and 9 liters, count number of all",
"= list(current_state) new_state[i] = 0 if new_state not in current_path:",
"current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() if __name__ == \"__main__\": try: current_state",
"__name__ == \"__main__\": try: current_state = [0, 0, 0] move_to_new_state(current_state)",
"for i in range(3): if current_state[i] != 0: new_state =",
"- current_state[j], current_state[i]) new_state[j] += liters_change new_state[i] -= liters_change if",
"if i != j and current_state[i] != 0 and current_state[j]",
"new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop() # Pour from",
"to another for i in range(3): for j in range(3):",
"current_path.pop() # Fill bottle for i in range(3): if current_state[i]",
"-= liters_change if new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state) current_path.pop()",
"\"\"\" Given 3 bottles of capacities 3, 5, and 9",
"for i in range(3): if current_state[i] != CAPACITIES[i]: new_state =",
"new_state[i] -= liters_change if new_state not in current_path: current_path.append(new_state) move_to_new_state(new_state)",
"current_state[i] != 0: new_state = list(current_state) new_state[i] = 0 if",
"Pour from one bottle to another for i in range(3):",
"move_to_new_state(current_state): global solutions_count, current_path if 7 in current_state: solutions_count +=",
"in range(3): if current_state[i] != CAPACITIES[i]: new_state = list(current_state) new_state[i]",
"CAPACITIES[j]: new_state = list(current_state) liters_change = min(CAPACITIES[j] - current_state[j], current_state[i])",
"current_state[i]) new_state[j] += liters_change new_state[i] -= liters_change if new_state not",
"new_state[j] += liters_change new_state[i] -= liters_change if new_state not in",
"new_state = list(current_state) new_state[i] = 0 if new_state not in",
"for j in range(3): if i != j and current_state[i]"
] |
[
"down here. Also, update could # lead to removal of",
"Number of rules registered. :rtype: ``int`` \"\"\" # Register packs",
"pack_field)) metadata_file = content_utils.get_relative_path_to_pack_file(pack_ref=pack, file_path=rule, use_pack_cache=True) content['metadata_file'] = metadata_file rule_api",
"the rules from the provided pack. :return: Number of rules",
"2.0 (the \"License\"); # you may not use this file",
"from pack: %s', rules_dir) return registered_count def register_from_pack(self, pack_dir): \"\"\"",
"return registered_count LOG.debug('Registering rules from pack %s:, dir: %s', pack,",
"= RuleAPI.to_model(rule_api) # Migration from rule without pack to rule",
"rules_dir) try: rules = self._get_rules_from_pack(rules_dir=rules_dir) registered_count = self._register_rules_from_pack(pack=pack, rules=rules) except",
"pack, rules_dir in six.iteritems(content): if not rules_dir: LOG.debug('Pack %s does",
"os import six from st2common import log as logging from",
"pack `default` # generated in migration script. In this case,",
"ResourceReference.to_string_reference(name=content['name'], pack=DEFAULT_PACK_NAME) LOG.debug('Looking for rule %s in pack %s', content['name'],",
"e: if self._fail_on_failure: msg = ('Failed to register rule \"%s\"",
"pack_dir = pack_dir[:-1] if pack_dir.endswith('/') else pack_dir _, pack =",
"there was an existing rule then the ref count was",
"``int`` \"\"\" # Register packs first self.register_packs(base_dirs=base_dirs) registered_count = 0",
"not contain rules.', pack) continue try: LOG.debug('Registering rules from pack:",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"same name but in pack `default` # generated in migration",
"LOG.debug('Looking for rule %s in pack %s', content['name'], DEFAULT_PACK_NAME) existing",
"try: rules = self._get_rules_from_pack(rules_dir=rules_dir) registered_count = self._register_rules_from_pack(pack=pack, rules=rules) except Exception",
"LOG.debug('Found rule in pack default: %s; Deleting.', rule_ref) Rule.delete(existing) except:",
"pack_field: content['pack'] = pack pack_field = pack if pack_field !=",
"registered_count LOG.debug('Registering rules from pack %s:, dir: %s', pack, rules_dir)",
"packs_base_paths = content_utils.get_packs_base_paths() registrar = RulesRegistrar(use_pack_cache=use_pack_cache, fail_on_failure=fail_on_failure) if pack_dir: result",
"Register pack first self.register_pack(pack_name=pack, pack_dir=pack_dir) registered_count = 0 if not",
"!= pack: raise Exception('Model is in pack \"%s\" but field",
"from st2common.models.api.rule import RuleAPI from st2common.models.system.common import ResourceReference from st2common.persistence.rule",
"return registered_count def register_from_pack(self, pack_dir): \"\"\" Register all the rules",
"for pack, rules_dir in six.iteritems(content): if not rules_dir: LOG.debug('Pack %s",
"language governing permissions and # limitations under the License. from",
"pack_dir=pack_dir) registered_count = 0 if not rules_dir: return registered_count LOG.debug('Registering",
"# If there was an existing rule then the ref",
"= pack pack_field = pack if pack_field != pack: raise",
"= self._get_rules_from_pack(rules_dir=rules_dir) registered_count = self._register_rules_from_pack(pack=pack, rules=rules) except Exception as e:",
"with id: %s', rule_ref, existing.id) except coditationDBObjectNotFoundError: LOG.debug('Rule %s not",
"= 0 content = self._pack_loader.get_content(base_dirs=base_dirs, content_type='rules') for pack, rules_dir in",
"pack_field != DEFAULT_PACK_NAME: try: rule_ref = ResourceReference.to_string_reference(name=content['name'], pack=DEFAULT_PACK_NAME) LOG.debug('Looking for",
"use this file except in compliance with the License. #",
"count = self._register_rules_from_pack(pack, rules) registered_count += count except Exception as",
"st2common.bootstrap.base import ResourceRegistrar from st2common.models.api.rule import RuleAPI from st2common.models.system.common import",
"existing: rule_db.id = existing.id LOG.debug('Found existing rule: %s with id:",
"% (rule, pack, six.text_type(e))) raise ValueError(msg) LOG.exception('Failed registering rule from",
"existing) if existing: LOG.debug('Found rule in pack default: %s; Deleting.',",
"try: rule_db = Rule.add_or_update(rule_db) increment_trigger_ref_count(rule_api=rule_api) extra = {'rule_db': rule_db} LOG.audit('Rule",
"pack %s:, dir: %s', pack, rules_dir) try: rules = self._get_rules_from_pack(rules_dir=rules_dir)",
"monstrosity for rule in rules: LOG.debug('Loading rule from %s.', rule)",
"LOG.debug('Registering rules from pack %s:, dir: %s', pack, rules_dir) try:",
"`default` # generated in migration script. In this case, we",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"= [ 'RulesRegistrar', 'register_rules' ] LOG = logging.getLogger(__name__) class RulesRegistrar(ResourceRegistrar):",
"License. # You may obtain a copy of the License",
"except: LOG.exception('Exception deleting rule from %s pack.', DEFAULT_PACK_NAME) try: rule_ref",
"= RulesRegistrar(use_pack_cache=use_pack_cache, fail_on_failure=fail_on_failure) if pack_dir: result = registrar.register_from_pack(pack_dir=pack_dir) else: result",
"rule_db, rule, extra=extra) except Exception: LOG.exception('Failed to create rule %s.',",
"def register_from_packs(self, base_dirs): \"\"\" :return: Number of rules registered. :rtype:",
"under the License is distributed on an \"AS IS\" BASIS,",
"('Failed to register rule \"%s\" from pack \"%s\": %s' %",
"License for the specific language governing permissions and # limitations",
"pack=content['pack']) existing = Rule.get_by_ref(rule_ref) if existing: rule_db.id = existing.id LOG.debug('Found",
"updated in # to_model so it needs to be adjusted",
"first self.register_packs(base_dirs=base_dirs) registered_count = 0 content = self._pack_loader.get_content(base_dirs=base_dirs, content_type='rules') for",
"= pack_dir[:-1] if pack_dir.endswith('/') else pack_dir _, pack = os.path.split(pack_dir)",
"%s in pack %s', content['name'], DEFAULT_PACK_NAME) existing = Rule.get_by_ref(rule_ref) LOG.debug('Existing",
"DEFAULT_PACK_NAME from st2common.bootstrap.base import ResourceRegistrar from st2common.models.api.rule import RuleAPI from",
"= ResourceReference.to_string_reference(name=content['name'], pack=content['pack']) existing = Rule.get_by_ref(rule_ref) if existing: rule_db.id =",
"[ 'RulesRegistrar', 'register_rules' ] LOG = logging.getLogger(__name__) class RulesRegistrar(ResourceRegistrar): ALLOWED_EXTENSIONS",
"Deleting.', rule_ref) Rule.delete(existing) except: LOG.exception('Exception deleting rule from %s pack.',",
"os.path.split(pack_dir) rules_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir, content_type='rules') # Register pack first self.register_pack(pack_name=pack,",
"%s does not contain rules.', pack) continue try: LOG.debug('Registering rules",
"content_type='rules') for pack, rules_dir in six.iteritems(content): if not rules_dir: LOG.debug('Pack",
"Exception('Model is in pack \"%s\" but field \"pack\" is different:",
"without pack to rule with pack. # There might be",
"rules from pack: %s', rules_dir) return registered_count def register_from_pack(self, pack_dir):",
"new one.', rule) try: rule_db = Rule.add_or_update(rule_db) increment_trigger_ref_count(rule_api=rule_api) extra =",
"to create rule %s.', rule_api.name) # If there was an",
"If there was an existing rule then the ref count",
"(rule, pack, six.text_type(e))) raise ValueError(msg) LOG.exception('Failed registering rule from %s.',",
"in compliance with the License. # You may obtain a",
"def _get_rules_from_pack(self, rules_dir): return self.get_resources_from_pack(resources_dir=rules_dir) def _register_rules_from_pack(self, pack, rules): registered_count",
"software # distributed under the License is distributed on an",
"RulesRegistrar(ResourceRegistrar): ALLOWED_EXTENSIONS = ALLOWED_EXTS def register_from_packs(self, base_dirs): \"\"\" :return: Number",
"not rules_dir: return registered_count LOG.debug('Registering rules from pack %s:, dir:",
"registrar = RulesRegistrar(use_pack_cache=use_pack_cache, fail_on_failure=fail_on_failure) if pack_dir: result = registrar.register_from_pack(pack_dir=pack_dir) else:",
"from %s.', rule_db, rule, extra=extra) except Exception: LOG.exception('Failed to create",
"from st2common import log as logging from st2common.constants.meta import ALLOWED_EXTS",
"_register_rules_from_pack(self, pack, rules): registered_count = 0 # TODO: Refactor this",
"a Trigger so now is a good time for book-keeping.",
"so we don't have duplicates. if pack_field != DEFAULT_PACK_NAME: try:",
"in pack %s', content['name'], DEFAULT_PACK_NAME) existing = Rule.get_by_ref(rule_ref) LOG.debug('Existing =",
"st2common.persistence.rule import Rule from st2common.services.triggers import cleanup_trigger_db_for_rule, increment_trigger_ref_count from st2common.exceptions.db",
"if pack_field != pack: raise Exception('Model is in pack \"%s\"",
"field \"pack\" is different: %s' % (pack, pack_field)) metadata_file =",
"existing = Rule.get_by_ref(rule_ref) if existing: rule_db.id = existing.id LOG.debug('Found existing",
"self._get_rules_from_pack(rules_dir) count = self._register_rules_from_pack(pack, rules) registered_count += count except Exception",
"msg = ('Failed to register rule \"%s\" from pack \"%s\":",
"if pack_dir: result = registrar.register_from_pack(pack_dir=pack_dir) else: result = registrar.register_from_packs(base_dirs=packs_base_paths) return",
"rule: %s with id: %s', rule_ref, existing.id) except coditationDBObjectNotFoundError: LOG.debug('Rule",
"good time for book-keeping. if existing: cleanup_trigger_db_for_rule(existing) except Exception as",
"rule_ref = ResourceReference.to_string_reference(name=content['name'], pack=DEFAULT_PACK_NAME) LOG.debug('Looking for rule %s in pack",
"def register_from_pack(self, pack_dir): \"\"\" Register all the rules from the",
"DEFAULT_PACK_NAME: try: rule_ref = ResourceReference.to_string_reference(name=content['name'], pack=DEFAULT_PACK_NAME) LOG.debug('Looking for rule %s",
"0 # TODO: Refactor this monstrosity for rule in rules:",
"all rules from pack: %s', rules_dir) return registered_count def register_from_pack(self,",
"from st2common.persistence.rule import Rule from st2common.services.triggers import cleanup_trigger_db_for_rule, increment_trigger_ref_count from",
"Refactor this monstrosity for rule in rules: LOG.debug('Loading rule from",
"= Rule.add_or_update(rule_db) increment_trigger_ref_count(rule_api=rule_api) extra = {'rule_db': rule_db} LOG.audit('Rule updated. Rule",
"LOG.debug('Loading rule from %s.', rule) try: content = self._meta_loader.load(rule) pack_field",
"# Migration from rule without pack to rule with pack.",
"%s.', rule_api.name) # If there was an existing rule then",
"self._fail_on_failure: raise e LOG.exception('Failed registering all rules from pack: %s',",
"extra=extra) except Exception: LOG.exception('Failed to create rule %s.', rule_api.name) #",
"rule_ref = ResourceReference.to_string_reference(name=content['name'], pack=content['pack']) existing = Rule.get_by_ref(rule_ref) if existing: rule_db.id",
"to be adjusted down here. Also, update could # lead",
"\"\"\" Register all the rules from the provided pack. :return:",
"%s', pack) rules = self._get_rules_from_pack(rules_dir) count = self._register_rules_from_pack(pack, rules) registered_count",
"self._register_rules_from_pack(pack, rules) registered_count += count except Exception as e: if",
"to rule with pack. # There might be a rule",
"registered. :rtype: ``int`` \"\"\" pack_dir = pack_dir[:-1] if pack_dir.endswith('/') else",
"OF ANY KIND, either express or implied. # See the",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"rule in pack default: %s; Deleting.', rule_ref) Rule.delete(existing) except: LOG.exception('Exception",
"ANY KIND, either express or implied. # See the License",
"See the License for the specific language governing permissions and",
"found. Creating new one.', rule) try: rule_db = Rule.add_or_update(rule_db) increment_trigger_ref_count(rule_api=rule_api)",
"the License. # You may obtain a copy of the",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"for the specific language governing permissions and # limitations under",
"to in writing, software # distributed under the License is",
"LOG.audit('Rule updated. Rule %s from %s.', rule_db, rule, extra=extra) except",
"\"%s\" from pack \"%s\": %s' % (rule, pack, six.text_type(e))) raise",
"# See the License for the specific language governing permissions",
"pack_dir: result = registrar.register_from_pack(pack_dir=pack_dir) else: result = registrar.register_from_packs(base_dirs=packs_base_paths) return result",
"or agreed to in writing, software # distributed under the",
"Migration from rule without pack to rule with pack. #",
"= self._meta_loader.load(rule) pack_field = content.get('pack', None) if not pack_field: content['pack']",
"required by applicable law or agreed to in writing, software",
"\"\"\" # Register packs first self.register_packs(base_dirs=base_dirs) registered_count = 0 content",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"# TODO: Refactor this monstrosity for rule in rules: LOG.debug('Loading",
"LOG = logging.getLogger(__name__) class RulesRegistrar(ResourceRegistrar): ALLOWED_EXTENSIONS = ALLOWED_EXTS def register_from_packs(self,",
"with the License. # You may obtain a copy of",
"pack pack_field = pack if pack_field != pack: raise Exception('Model",
"pack first self.register_pack(pack_name=pack, pack_dir=pack_dir) registered_count = 0 if not rules_dir:",
"pack. # There might be a rule with same name",
"return self.get_resources_from_pack(resources_dir=rules_dir) def _register_rules_from_pack(self, pack, rules): registered_count = 0 #",
"ResourceReference.to_string_reference(name=content['name'], pack=content['pack']) existing = Rule.get_by_ref(rule_ref) if existing: rule_db.id = existing.id",
"pack_dir=None, use_pack_cache=True, fail_on_failure=False): if packs_base_paths: assert isinstance(packs_base_paths, list) if not",
"metadata_file rule_api = RuleAPI(**content) rule_api.validate() rule_db = RuleAPI.to_model(rule_api) # Migration",
"Rule.get_by_ref(rule_ref) if existing: rule_db.id = existing.id LOG.debug('Found existing rule: %s",
"a good time for book-keeping. if existing: cleanup_trigger_db_for_rule(existing) except Exception",
"was updated in # to_model so it needs to be",
"from rule without pack to rule with pack. # There",
"pack if pack_field != pack: raise Exception('Model is in pack",
"compliance with the License. # You may obtain a copy",
"RuleAPI from st2common.models.system.common import ResourceReference from st2common.persistence.rule import Rule from",
"agreed to in writing, software # distributed under the License",
"pack %s', content['name'], DEFAULT_PACK_NAME) existing = Rule.get_by_ref(rule_ref) LOG.debug('Existing = %s',",
"registered. :rtype: ``int`` \"\"\" # Register packs first self.register_packs(base_dirs=base_dirs) registered_count",
"# limitations under the License. from __future__ import absolute_import import",
"rule from %s pack.', DEFAULT_PACK_NAME) try: rule_ref = ResourceReference.to_string_reference(name=content['name'], pack=content['pack'])",
"distributed under the License is distributed on an \"AS IS\"",
"self.get_resources_from_pack(resources_dir=rules_dir) def _register_rules_from_pack(self, pack, rules): registered_count = 0 # TODO:",
"we want to # delete so we don't have duplicates.",
"# Register packs first self.register_packs(base_dirs=base_dirs) registered_count = 0 content =",
"different: %s' % (pack, pack_field)) metadata_file = content_utils.get_relative_path_to_pack_file(pack_ref=pack, file_path=rule, use_pack_cache=True)",
"pack: %s', rules_dir) return registered_count def register_from_pack(self, pack_dir): \"\"\" Register",
"express or implied. # See the License for the specific",
"except in compliance with the License. # You may obtain",
"delete so we don't have duplicates. if pack_field != DEFAULT_PACK_NAME:",
"from pack \"%s\": %s' % (rule, pack, six.text_type(e))) raise ValueError(msg)",
"Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"not use this file except in compliance with the License.",
"assert isinstance(packs_base_paths, list) if not packs_base_paths: packs_base_paths = content_utils.get_packs_base_paths() registrar",
"writing, software # distributed under the License is distributed on",
"+= count except Exception as e: if self._fail_on_failure: raise e",
"you may not use this file except in compliance with",
"= self._pack_loader.get_content(base_dirs=base_dirs, content_type='rules') for pack, rules_dir in six.iteritems(content): if not",
"pack: raise Exception('Model is in pack \"%s\" but field \"pack\"",
"pack to rule with pack. # There might be a",
"rule with same name but in pack `default` # generated",
"is a good time for book-keeping. if existing: cleanup_trigger_db_for_rule(existing) except",
"# Licensed under the Apache License, Version 2.0 (the \"License\");",
"License. from __future__ import absolute_import import os import six from",
"of a Trigger so now is a good time for",
"for book-keeping. if existing: cleanup_trigger_db_for_rule(existing) except Exception as e: if",
"ALLOWED_EXTS def register_from_packs(self, base_dirs): \"\"\" :return: Number of rules registered.",
"the ref count was updated in # to_model so it",
"= %s', existing) if existing: LOG.debug('Found rule in pack default:",
"0 if not rules_dir: return registered_count LOG.debug('Registering rules from pack",
"except Exception as e: if self._fail_on_failure: raise e LOG.exception('Failed registering",
"rules_dir: LOG.debug('Pack %s does not contain rules.', pack) continue try:",
"LOG.debug('Pack %s does not contain rules.', pack) continue try: LOG.debug('Registering",
"content['pack'] = pack pack_field = pack if pack_field != pack:",
"st2common.exceptions.db import coditationDBObjectNotFoundError import st2common.content.utils as content_utils __all__ = [",
"CONDITIONS OF ANY KIND, either express or implied. # See",
"from st2common.constants.pack import DEFAULT_PACK_NAME from st2common.bootstrap.base import ResourceRegistrar from st2common.models.api.rule",
"is in pack \"%s\" but field \"pack\" is different: %s'",
"from %s pack.', DEFAULT_PACK_NAME) try: rule_ref = ResourceReference.to_string_reference(name=content['name'], pack=content['pack']) existing",
"in # to_model so it needs to be adjusted down",
"%s', existing) if existing: LOG.debug('Found rule in pack default: %s;",
"from %s.', rule) try: content = self._meta_loader.load(rule) pack_field = content.get('pack',",
"!= DEFAULT_PACK_NAME: try: rule_ref = ResourceReference.to_string_reference(name=content['name'], pack=DEFAULT_PACK_NAME) LOG.debug('Looking for rule",
"not packs_base_paths: packs_base_paths = content_utils.get_packs_base_paths() registrar = RulesRegistrar(use_pack_cache=use_pack_cache, fail_on_failure=fail_on_failure) if",
"rules registered. :rtype: ``int`` \"\"\" # Register packs first self.register_packs(base_dirs=base_dirs)",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
":rtype: ``int`` \"\"\" # Register packs first self.register_packs(base_dirs=base_dirs) registered_count =",
"so it needs to be adjusted down here. Also, update",
"LOG.exception('Failed registering rule from %s.', rule) else: registered_count += 1",
"rules.', pack) continue try: LOG.debug('Registering rules from pack: %s', pack)",
"return registered_count def register_rules(packs_base_paths=None, pack_dir=None, use_pack_cache=True, fail_on_failure=False): if packs_base_paths: assert",
"dir: %s', pack, rules_dir) try: rules = self._get_rules_from_pack(rules_dir=rules_dir) registered_count =",
"rules = self._get_rules_from_pack(rules_dir=rules_dir) registered_count = self._register_rules_from_pack(pack=pack, rules=rules) except Exception as",
"rule %s in pack %s', content['name'], DEFAULT_PACK_NAME) existing = Rule.get_by_ref(rule_ref)",
"import coditationDBObjectNotFoundError import st2common.content.utils as content_utils __all__ = [ 'RulesRegistrar',",
"from st2common.exceptions.db import coditationDBObjectNotFoundError import st2common.content.utils as content_utils __all__ =",
"might be a rule with same name but in pack",
"fail_on_failure=False): if packs_base_paths: assert isinstance(packs_base_paths, list) if not packs_base_paths: packs_base_paths",
"import log as logging from st2common.constants.meta import ALLOWED_EXTS from st2common.constants.pack",
"``int`` \"\"\" pack_dir = pack_dir[:-1] if pack_dir.endswith('/') else pack_dir _,",
"case, we want to # delete so we don't have",
"= self._pack_loader.get_content_from_pack(pack_dir=pack_dir, content_type='rules') # Register pack first self.register_pack(pack_name=pack, pack_dir=pack_dir) registered_count",
"from st2common.bootstrap.base import ResourceRegistrar from st2common.models.api.rule import RuleAPI from st2common.models.system.common",
"%s with id: %s', rule_ref, existing.id) except coditationDBObjectNotFoundError: LOG.debug('Rule %s",
"rules) registered_count += count except Exception as e: if self._fail_on_failure:",
"e: if self._fail_on_failure: raise e LOG.exception('Failed registering all rules from",
"OR CONDITIONS OF ANY KIND, either express or implied. #",
"%s', rules_dir) return registered_count def register_from_pack(self, pack_dir): \"\"\" Register all",
"Also, update could # lead to removal of a Trigger",
"Rule.delete(existing) except: LOG.exception('Exception deleting rule from %s pack.', DEFAULT_PACK_NAME) try:",
"registered_count = 0 # TODO: Refactor this monstrosity for rule",
"the License is distributed on an \"AS IS\" BASIS, #",
"from %s.', rule) else: registered_count += 1 return registered_count def",
"try: rule_ref = ResourceReference.to_string_reference(name=content['name'], pack=DEFAULT_PACK_NAME) LOG.debug('Looking for rule %s in",
"register_from_pack(self, pack_dir): \"\"\" Register all the rules from the provided",
"not rules_dir: LOG.debug('Pack %s does not contain rules.', pack) continue",
"rule_db = Rule.add_or_update(rule_db) increment_trigger_ref_count(rule_api=rule_api) extra = {'rule_db': rule_db} LOG.audit('Rule updated.",
"registered_count = 0 if not rules_dir: return registered_count LOG.debug('Registering rules",
"try: LOG.debug('Registering rules from pack: %s', pack) rules = self._get_rules_from_pack(rules_dir)",
"self.register_pack(pack_name=pack, pack_dir=pack_dir) registered_count = 0 if not rules_dir: return registered_count",
"not pack_field: content['pack'] = pack pack_field = pack if pack_field",
"be a rule with same name but in pack `default`",
"LOG.exception('Failed registering all rules from pack: %s', rules_dir) return registered_count",
"RuleAPI.to_model(rule_api) # Migration from rule without pack to rule with",
"rule then the ref count was updated in # to_model",
"if not pack_field: content['pack'] = pack pack_field = pack if",
"import os import six from st2common import log as logging",
"but in pack `default` # generated in migration script. In",
"registered_count def register_from_pack(self, pack_dir): \"\"\" Register all the rules from",
"one.', rule) try: rule_db = Rule.add_or_update(rule_db) increment_trigger_ref_count(rule_api=rule_api) extra = {'rule_db':",
"here. Also, update could # lead to removal of a",
"to register rule \"%s\" from pack \"%s\": %s' % (rule,",
"packs_base_paths: assert isinstance(packs_base_paths, list) if not packs_base_paths: packs_base_paths = content_utils.get_packs_base_paths()",
"ref count was updated in # to_model so it needs",
"existing: LOG.debug('Found rule in pack default: %s; Deleting.', rule_ref) Rule.delete(existing)",
"law or agreed to in writing, software # distributed under",
"rules = self._get_rules_from_pack(rules_dir) count = self._register_rules_from_pack(pack, rules) registered_count += count",
"rules_dir: return registered_count LOG.debug('Registering rules from pack %s:, dir: %s',",
"st2common.services.triggers import cleanup_trigger_db_for_rule, increment_trigger_ref_count from st2common.exceptions.db import coditationDBObjectNotFoundError import st2common.content.utils",
"rules from pack: %s', rules_dir) return registered_count def _get_rules_from_pack(self, rules_dir):",
"as e: if self._fail_on_failure: msg = ('Failed to register rule",
"packs first self.register_packs(base_dirs=base_dirs) registered_count = 0 content = self._pack_loader.get_content(base_dirs=base_dirs, content_type='rules')",
"# Register pack first self.register_pack(pack_name=pack, pack_dir=pack_dir) registered_count = 0 if",
"but field \"pack\" is different: %s' % (pack, pack_field)) metadata_file",
"generated in migration script. In this case, we want to",
"Trigger so now is a good time for book-keeping. if",
"rules=rules) except Exception as e: if self._fail_on_failure: raise e LOG.exception('Failed",
"rule from %s.', rule) else: registered_count += 1 return registered_count",
"= ResourceReference.to_string_reference(name=content['name'], pack=DEFAULT_PACK_NAME) LOG.debug('Looking for rule %s in pack %s',",
"existing.id LOG.debug('Found existing rule: %s with id: %s', rule_ref, existing.id)",
"all the rules from the provided pack. :return: Number of",
"st2common.content.utils as content_utils __all__ = [ 'RulesRegistrar', 'register_rules' ] LOG",
"DEFAULT_PACK_NAME) existing = Rule.get_by_ref(rule_ref) LOG.debug('Existing = %s', existing) if existing:",
"Number of rules registered. :rtype: ``int`` \"\"\" pack_dir = pack_dir[:-1]",
"rules from pack: %s', pack) rules = self._get_rules_from_pack(rules_dir) count =",
"fail_on_failure=fail_on_failure) if pack_dir: result = registrar.register_from_pack(pack_dir=pack_dir) else: result = registrar.register_from_packs(base_dirs=packs_base_paths)",
"pack: %s', pack) rules = self._get_rules_from_pack(rules_dir) count = self._register_rules_from_pack(pack, rules)",
"= existing.id LOG.debug('Found existing rule: %s with id: %s', rule_ref,",
"count was updated in # to_model so it needs to",
"six.text_type(e))) raise ValueError(msg) LOG.exception('Failed registering rule from %s.', rule) else:",
"try: content = self._meta_loader.load(rule) pack_field = content.get('pack', None) if not",
"may obtain a copy of the License at # #",
"we don't have duplicates. if pack_field != DEFAULT_PACK_NAME: try: rule_ref",
"rule) try: content = self._meta_loader.load(rule) pack_field = content.get('pack', None) if",
"# generated in migration script. In this case, we want",
"if self._fail_on_failure: msg = ('Failed to register rule \"%s\" from",
"= ALLOWED_EXTS def register_from_packs(self, base_dirs): \"\"\" :return: Number of rules",
"# Copyright 2019 Extreme Networks, Inc. # # Licensed under",
"st2common.constants.meta import ALLOWED_EXTS from st2common.constants.pack import DEFAULT_PACK_NAME from st2common.bootstrap.base import",
"and # limitations under the License. from __future__ import absolute_import",
"rule_api.validate() rule_db = RuleAPI.to_model(rule_api) # Migration from rule without pack",
"with same name but in pack `default` # generated in",
"0 content = self._pack_loader.get_content(base_dirs=base_dirs, content_type='rules') for pack, rules_dir in six.iteritems(content):",
"(pack, pack_field)) metadata_file = content_utils.get_relative_path_to_pack_file(pack_ref=pack, file_path=rule, use_pack_cache=True) content['metadata_file'] = metadata_file",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"Inc. # # Licensed under the Apache License, Version 2.0",
"else pack_dir _, pack = os.path.split(pack_dir) rules_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir, content_type='rules')",
"= content_utils.get_relative_path_to_pack_file(pack_ref=pack, file_path=rule, use_pack_cache=True) content['metadata_file'] = metadata_file rule_api = RuleAPI(**content)",
"# There might be a rule with same name but",
"= Rule.get_by_ref(rule_ref) LOG.debug('Existing = %s', existing) if existing: LOG.debug('Found rule",
"coditationDBObjectNotFoundError import st2common.content.utils as content_utils __all__ = [ 'RulesRegistrar', 'register_rules'",
"existing rule: %s with id: %s', rule_ref, existing.id) except coditationDBObjectNotFoundError:",
"if not packs_base_paths: packs_base_paths = content_utils.get_packs_base_paths() registrar = RulesRegistrar(use_pack_cache=use_pack_cache, fail_on_failure=fail_on_failure)",
"if existing: rule_db.id = existing.id LOG.debug('Found existing rule: %s with",
"LOG.exception('Exception deleting rule from %s pack.', DEFAULT_PACK_NAME) try: rule_ref =",
"may not use this file except in compliance with the",
"2019 Extreme Networks, Inc. # # Licensed under the Apache",
"{'rule_db': rule_db} LOG.audit('Rule updated. Rule %s from %s.', rule_db, rule,",
"def register_rules(packs_base_paths=None, pack_dir=None, use_pack_cache=True, fail_on_failure=False): if packs_base_paths: assert isinstance(packs_base_paths, list)",
"content['metadata_file'] = metadata_file rule_api = RuleAPI(**content) rule_api.validate() rule_db = RuleAPI.to_model(rule_api)",
"existing.id) except coditationDBObjectNotFoundError: LOG.debug('Rule %s not found. Creating new one.',",
"cleanup_trigger_db_for_rule(existing) except Exception as e: if self._fail_on_failure: msg = ('Failed",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"None) if not pack_field: content['pack'] = pack pack_field = pack",
"this file except in compliance with the License. # You",
"rules): registered_count = 0 # TODO: Refactor this monstrosity for",
"existing: cleanup_trigger_db_for_rule(existing) except Exception as e: if self._fail_on_failure: msg =",
"be adjusted down here. Also, update could # lead to",
"cleanup_trigger_db_for_rule, increment_trigger_ref_count from st2common.exceptions.db import coditationDBObjectNotFoundError import st2common.content.utils as content_utils",
"e LOG.exception('Failed registering all rules from pack: %s', rules_dir) return",
"needs to be adjusted down here. Also, update could #",
"if not rules_dir: LOG.debug('Pack %s does not contain rules.', pack)",
"%s.', rule) try: content = self._meta_loader.load(rule) pack_field = content.get('pack', None)",
"increment_trigger_ref_count(rule_api=rule_api) extra = {'rule_db': rule_db} LOG.audit('Rule updated. Rule %s from",
"ALLOWED_EXTS from st2common.constants.pack import DEFAULT_PACK_NAME from st2common.bootstrap.base import ResourceRegistrar from",
"rules from the provided pack. :return: Number of rules registered.",
"rule_db.id = existing.id LOG.debug('Found existing rule: %s with id: %s',",
"content.get('pack', None) if not pack_field: content['pack'] = pack pack_field =",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"= 0 # TODO: Refactor this monstrosity for rule in",
"content = self._meta_loader.load(rule) pack_field = content.get('pack', None) if not pack_field:",
"else: registered_count += 1 return registered_count def register_rules(packs_base_paths=None, pack_dir=None, use_pack_cache=True,",
"name but in pack `default` # generated in migration script.",
"# # Licensed under the Apache License, Version 2.0 (the",
":return: Number of rules registered. :rtype: ``int`` \"\"\" # Register",
"Exception: LOG.exception('Failed to create rule %s.', rule_api.name) # If there",
"file except in compliance with the License. # You may",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"does not contain rules.', pack) continue try: LOG.debug('Registering rules from",
"isinstance(packs_base_paths, list) if not packs_base_paths: packs_base_paths = content_utils.get_packs_base_paths() registrar =",
"rule_db = RuleAPI.to_model(rule_api) # Migration from rule without pack to",
"coditationDBObjectNotFoundError: LOG.debug('Rule %s not found. Creating new one.', rule) try:",
"registering rule from %s.', rule) else: registered_count += 1 return",
"if existing: LOG.debug('Found rule in pack default: %s; Deleting.', rule_ref)",
"if existing: cleanup_trigger_db_for_rule(existing) except Exception as e: if self._fail_on_failure: msg",
"rules_dir in six.iteritems(content): if not rules_dir: LOG.debug('Pack %s does not",
"st2common.models.system.common import ResourceReference from st2common.persistence.rule import Rule from st2common.services.triggers import",
"Rule from st2common.services.triggers import cleanup_trigger_db_for_rule, increment_trigger_ref_count from st2common.exceptions.db import coditationDBObjectNotFoundError",
"rule_ref, existing.id) except coditationDBObjectNotFoundError: LOG.debug('Rule %s not found. Creating new",
"if packs_base_paths: assert isinstance(packs_base_paths, list) if not packs_base_paths: packs_base_paths =",
"] LOG = logging.getLogger(__name__) class RulesRegistrar(ResourceRegistrar): ALLOWED_EXTENSIONS = ALLOWED_EXTS def",
"for rule in rules: LOG.debug('Loading rule from %s.', rule) try:",
"pack_dir _, pack = os.path.split(pack_dir) rules_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir, content_type='rules') #",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"self.register_packs(base_dirs=base_dirs) registered_count = 0 content = self._pack_loader.get_content(base_dirs=base_dirs, content_type='rules') for pack,",
"could # lead to removal of a Trigger so now",
"book-keeping. if existing: cleanup_trigger_db_for_rule(existing) except Exception as e: if self._fail_on_failure:",
"import DEFAULT_PACK_NAME from st2common.bootstrap.base import ResourceRegistrar from st2common.models.api.rule import RuleAPI",
"logging.getLogger(__name__) class RulesRegistrar(ResourceRegistrar): ALLOWED_EXTENSIONS = ALLOWED_EXTS def register_from_packs(self, base_dirs): \"\"\"",
"LOG.debug('Found existing rule: %s with id: %s', rule_ref, existing.id) except",
"in rules: LOG.debug('Loading rule from %s.', rule) try: content =",
"rule %s.', rule_api.name) # If there was an existing rule",
"LOG.debug('Registering rules from pack: %s', pack) rules = self._get_rules_from_pack(rules_dir) count",
"self._fail_on_failure: msg = ('Failed to register rule \"%s\" from pack",
"LOG.debug('Rule %s not found. Creating new one.', rule) try: rule_db",
"pack) rules = self._get_rules_from_pack(rules_dir) count = self._register_rules_from_pack(pack, rules) registered_count +=",
"rule) else: registered_count += 1 return registered_count def register_rules(packs_base_paths=None, pack_dir=None,",
"pack_dir): \"\"\" Register all the rules from the provided pack.",
"# to_model so it needs to be adjusted down here.",
"all rules from pack: %s', rules_dir) return registered_count def _get_rules_from_pack(self,",
"adjusted down here. Also, update could # lead to removal",
"self._pack_loader.get_content(base_dirs=base_dirs, content_type='rules') for pack, rules_dir in six.iteritems(content): if not rules_dir:",
"except Exception: LOG.exception('Failed to create rule %s.', rule_api.name) # If",
"registered_count = 0 content = self._pack_loader.get_content(base_dirs=base_dirs, content_type='rules') for pack, rules_dir",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"an existing rule then the ref count was updated in",
"to # delete so we don't have duplicates. if pack_field",
"1 return registered_count def register_rules(packs_base_paths=None, pack_dir=None, use_pack_cache=True, fail_on_failure=False): if packs_base_paths:",
"self._get_rules_from_pack(rules_dir=rules_dir) registered_count = self._register_rules_from_pack(pack=pack, rules=rules) except Exception as e: if",
"%s' % (rule, pack, six.text_type(e))) raise ValueError(msg) LOG.exception('Failed registering rule",
"Exception as e: if self._fail_on_failure: msg = ('Failed to register",
"rule) try: rule_db = Rule.add_or_update(rule_db) increment_trigger_ref_count(rule_api=rule_api) extra = {'rule_db': rule_db}",
"was an existing rule then the ref count was updated",
"don't have duplicates. if pack_field != DEFAULT_PACK_NAME: try: rule_ref =",
"or implied. # See the License for the specific language",
"governing permissions and # limitations under the License. from __future__",
"content_type='rules') # Register pack first self.register_pack(pack_name=pack, pack_dir=pack_dir) registered_count = 0",
"DEFAULT_PACK_NAME) try: rule_ref = ResourceReference.to_string_reference(name=content['name'], pack=content['pack']) existing = Rule.get_by_ref(rule_ref) if",
"%s:, dir: %s', pack, rules_dir) try: rules = self._get_rules_from_pack(rules_dir=rules_dir) registered_count",
"provided pack. :return: Number of rules registered. :rtype: ``int`` \"\"\"",
"pack \"%s\" but field \"pack\" is different: %s' % (pack,",
"= metadata_file rule_api = RuleAPI(**content) rule_api.validate() rule_db = RuleAPI.to_model(rule_api) #",
"KIND, either express or implied. # See the License for",
"specific language governing permissions and # limitations under the License.",
"= content.get('pack', None) if not pack_field: content['pack'] = pack pack_field",
":rtype: ``int`` \"\"\" pack_dir = pack_dir[:-1] if pack_dir.endswith('/') else pack_dir",
"= Rule.get_by_ref(rule_ref) if existing: rule_db.id = existing.id LOG.debug('Found existing rule:",
"try: rule_ref = ResourceReference.to_string_reference(name=content['name'], pack=content['pack']) existing = Rule.get_by_ref(rule_ref) if existing:",
"RuleAPI(**content) rule_api.validate() rule_db = RuleAPI.to_model(rule_api) # Migration from rule without",
"pack, six.text_type(e))) raise ValueError(msg) LOG.exception('Failed registering rule from %s.', rule)",
"rules: LOG.debug('Loading rule from %s.', rule) try: content = self._meta_loader.load(rule)",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"continue try: LOG.debug('Registering rules from pack: %s', pack) rules =",
"registered_count def register_rules(packs_base_paths=None, pack_dir=None, use_pack_cache=True, fail_on_failure=False): if packs_base_paths: assert isinstance(packs_base_paths,",
"duplicates. if pack_field != DEFAULT_PACK_NAME: try: rule_ref = ResourceReference.to_string_reference(name=content['name'], pack=DEFAULT_PACK_NAME)",
"LOG.debug('Existing = %s', existing) if existing: LOG.debug('Found rule in pack",
"In this case, we want to # delete so we",
"There might be a rule with same name but in",
"from st2common.models.system.common import ResourceReference from st2common.persistence.rule import Rule from st2common.services.triggers",
"limitations under the License. from __future__ import absolute_import import os",
"# delete so we don't have duplicates. if pack_field !=",
"(the \"License\"); # you may not use this file except",
"if pack_dir.endswith('/') else pack_dir _, pack = os.path.split(pack_dir) rules_dir =",
"# you may not use this file except in compliance",
"Register all the rules from the provided pack. :return: Number",
"not found. Creating new one.', rule) try: rule_db = Rule.add_or_update(rule_db)",
"script. In this case, we want to # delete so",
"of rules registered. :rtype: ``int`` \"\"\" pack_dir = pack_dir[:-1] if",
"Extreme Networks, Inc. # # Licensed under the Apache License,",
"ValueError(msg) LOG.exception('Failed registering rule from %s.', rule) else: registered_count +=",
"Rule %s from %s.', rule_db, rule, extra=extra) except Exception: LOG.exception('Failed",
"%s; Deleting.', rule_ref) Rule.delete(existing) except: LOG.exception('Exception deleting rule from %s",
"rule in rules: LOG.debug('Loading rule from %s.', rule) try: content",
"= self._register_rules_from_pack(pack=pack, rules=rules) except Exception as e: if self._fail_on_failure: raise",
"Creating new one.', rule) try: rule_db = Rule.add_or_update(rule_db) increment_trigger_ref_count(rule_api=rule_api) extra",
"permissions and # limitations under the License. from __future__ import",
"Rule.get_by_ref(rule_ref) LOG.debug('Existing = %s', existing) if existing: LOG.debug('Found rule in",
"packs_base_paths: packs_base_paths = content_utils.get_packs_base_paths() registrar = RulesRegistrar(use_pack_cache=use_pack_cache, fail_on_failure=fail_on_failure) if pack_dir:",
"this monstrosity for rule in rules: LOG.debug('Loading rule from %s.',",
"self._meta_loader.load(rule) pack_field = content.get('pack', None) if not pack_field: content['pack'] =",
"existing = Rule.get_by_ref(rule_ref) LOG.debug('Existing = %s', existing) if existing: LOG.debug('Found",
"it needs to be adjusted down here. Also, update could",
"= RuleAPI(**content) rule_api.validate() rule_db = RuleAPI.to_model(rule_api) # Migration from rule",
"# # Unless required by applicable law or agreed to",
"if not rules_dir: return registered_count LOG.debug('Registering rules from pack %s:,",
"rules_dir) return registered_count def _get_rules_from_pack(self, rules_dir): return self.get_resources_from_pack(resources_dir=rules_dir) def _register_rules_from_pack(self,",
"from pack %s:, dir: %s', pack, rules_dir) try: rules =",
"%s', content['name'], DEFAULT_PACK_NAME) existing = Rule.get_by_ref(rule_ref) LOG.debug('Existing = %s', existing)",
"%s pack.', DEFAULT_PACK_NAME) try: rule_ref = ResourceReference.to_string_reference(name=content['name'], pack=content['pack']) existing =",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"pack.', DEFAULT_PACK_NAME) try: rule_ref = ResourceReference.to_string_reference(name=content['name'], pack=content['pack']) existing = Rule.get_by_ref(rule_ref)",
"= {'rule_db': rule_db} LOG.audit('Rule updated. Rule %s from %s.', rule_db,",
"Version 2.0 (the \"License\"); # you may not use this",
":return: Number of rules registered. :rtype: ``int`` \"\"\" pack_dir =",
"content['name'], DEFAULT_PACK_NAME) existing = Rule.get_by_ref(rule_ref) LOG.debug('Existing = %s', existing) if",
"rule, extra=extra) except Exception: LOG.exception('Failed to create rule %s.', rule_api.name)",
"in migration script. In this case, we want to #",
"return registered_count def _get_rules_from_pack(self, rules_dir): return self.get_resources_from_pack(resources_dir=rules_dir) def _register_rules_from_pack(self, pack,",
"_get_rules_from_pack(self, rules_dir): return self.get_resources_from_pack(resources_dir=rules_dir) def _register_rules_from_pack(self, pack, rules): registered_count =",
"except coditationDBObjectNotFoundError: LOG.debug('Rule %s not found. Creating new one.', rule)",
"pack=DEFAULT_PACK_NAME) LOG.debug('Looking for rule %s in pack %s', content['name'], DEFAULT_PACK_NAME)",
"log as logging from st2common.constants.meta import ALLOWED_EXTS from st2common.constants.pack import",
"\"%s\": %s' % (rule, pack, six.text_type(e))) raise ValueError(msg) LOG.exception('Failed registering",
"content_utils.get_relative_path_to_pack_file(pack_ref=pack, file_path=rule, use_pack_cache=True) content['metadata_file'] = metadata_file rule_api = RuleAPI(**content) rule_api.validate()",
"rule without pack to rule with pack. # There might",
"implied. # See the License for the specific language governing",
"Register packs first self.register_packs(base_dirs=base_dirs) registered_count = 0 content = self._pack_loader.get_content(base_dirs=base_dirs,",
"under the Apache License, Version 2.0 (the \"License\"); # you",
"class RulesRegistrar(ResourceRegistrar): ALLOWED_EXTENSIONS = ALLOWED_EXTS def register_from_packs(self, base_dirs): \"\"\" :return:",
"except Exception as e: if self._fail_on_failure: msg = ('Failed to",
"self._register_rules_from_pack(pack=pack, rules=rules) except Exception as e: if self._fail_on_failure: raise e",
"by applicable law or agreed to in writing, software #",
"def _register_rules_from_pack(self, pack, rules): registered_count = 0 # TODO: Refactor",
"rule from %s.', rule) try: content = self._meta_loader.load(rule) pack_field =",
"%s', pack, rules_dir) try: rules = self._get_rules_from_pack(rules_dir=rules_dir) registered_count = self._register_rules_from_pack(pack=pack,",
"= os.path.split(pack_dir) rules_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir, content_type='rules') # Register pack first",
"rule with pack. # There might be a rule with",
"from __future__ import absolute_import import os import six from st2common",
"create rule %s.', rule_api.name) # If there was an existing",
"content = self._pack_loader.get_content(base_dirs=base_dirs, content_type='rules') for pack, rules_dir in six.iteritems(content): if",
"pack default: %s; Deleting.', rule_ref) Rule.delete(existing) except: LOG.exception('Exception deleting rule",
"raise e LOG.exception('Failed registering all rules from pack: %s', rules_dir)",
"migration script. In this case, we want to # delete",
"six from st2common import log as logging from st2common.constants.meta import",
"register_rules(packs_base_paths=None, pack_dir=None, use_pack_cache=True, fail_on_failure=False): if packs_base_paths: assert isinstance(packs_base_paths, list) if",
"pack. :return: Number of rules registered. :rtype: ``int`` \"\"\" pack_dir",
"pack_field = pack if pack_field != pack: raise Exception('Model is",
"__future__ import absolute_import import os import six from st2common import",
"self._pack_loader.get_content_from_pack(pack_dir=pack_dir, content_type='rules') # Register pack first self.register_pack(pack_name=pack, pack_dir=pack_dir) registered_count =",
"'RulesRegistrar', 'register_rules' ] LOG = logging.getLogger(__name__) class RulesRegistrar(ResourceRegistrar): ALLOWED_EXTENSIONS =",
"ResourceRegistrar from st2common.models.api.rule import RuleAPI from st2common.models.system.common import ResourceReference from",
"is different: %s' % (pack, pack_field)) metadata_file = content_utils.get_relative_path_to_pack_file(pack_ref=pack, file_path=rule,",
"pack_field = content.get('pack', None) if not pack_field: content['pack'] = pack",
"in six.iteritems(content): if not rules_dir: LOG.debug('Pack %s does not contain",
"%s' % (pack, pack_field)) metadata_file = content_utils.get_relative_path_to_pack_file(pack_ref=pack, file_path=rule, use_pack_cache=True) content['metadata_file']",
"%s.', rule_db, rule, extra=extra) except Exception: LOG.exception('Failed to create rule",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"import st2common.content.utils as content_utils __all__ = [ 'RulesRegistrar', 'register_rules' ]",
"rule_ref) Rule.delete(existing) except: LOG.exception('Exception deleting rule from %s pack.', DEFAULT_PACK_NAME)",
"Unless required by applicable law or agreed to in writing,",
"id: %s', rule_ref, existing.id) except coditationDBObjectNotFoundError: LOG.debug('Rule %s not found.",
"LOG.exception('Failed to create rule %s.', rule_api.name) # If there was",
"registered_count += 1 return registered_count def register_rules(packs_base_paths=None, pack_dir=None, use_pack_cache=True, fail_on_failure=False):",
"TODO: Refactor this monstrosity for rule in rules: LOG.debug('Loading rule",
"from st2common.services.triggers import cleanup_trigger_db_for_rule, increment_trigger_ref_count from st2common.exceptions.db import coditationDBObjectNotFoundError import",
"as e: if self._fail_on_failure: raise e LOG.exception('Failed registering all rules",
"from pack: %s', rules_dir) return registered_count def _get_rules_from_pack(self, rules_dir): return",
"update could # lead to removal of a Trigger so",
"file_path=rule, use_pack_cache=True) content['metadata_file'] = metadata_file rule_api = RuleAPI(**content) rule_api.validate() rule_db",
"%s', rule_ref, existing.id) except coditationDBObjectNotFoundError: LOG.debug('Rule %s not found. Creating",
"registered_count = self._register_rules_from_pack(pack=pack, rules=rules) except Exception as e: if self._fail_on_failure:",
"import RuleAPI from st2common.models.system.common import ResourceReference from st2common.persistence.rule import Rule",
"from pack: %s', pack) rules = self._get_rules_from_pack(rules_dir) count = self._register_rules_from_pack(pack,",
"now is a good time for book-keeping. if existing: cleanup_trigger_db_for_rule(existing)",
"the specific language governing permissions and # limitations under the",
"to removal of a Trigger so now is a good",
"list) if not packs_base_paths: packs_base_paths = content_utils.get_packs_base_paths() registrar = RulesRegistrar(use_pack_cache=use_pack_cache,",
"rule_api.name) # If there was an existing rule then the",
"applicable law or agreed to in writing, software # distributed",
"six.iteritems(content): if not rules_dir: LOG.debug('Pack %s does not contain rules.',",
"then the ref count was updated in # to_model so",
"in writing, software # distributed under the License is distributed",
"# lead to removal of a Trigger so now is",
"= content_utils.get_packs_base_paths() registrar = RulesRegistrar(use_pack_cache=use_pack_cache, fail_on_failure=fail_on_failure) if pack_dir: result =",
"in pack `default` # generated in migration script. In this",
"removal of a Trigger so now is a good time",
"first self.register_pack(pack_name=pack, pack_dir=pack_dir) registered_count = 0 if not rules_dir: return",
"pack = os.path.split(pack_dir) rules_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir, content_type='rules') # Register pack",
"%s.', rule) else: registered_count += 1 return registered_count def register_rules(packs_base_paths=None,",
"\"\"\" pack_dir = pack_dir[:-1] if pack_dir.endswith('/') else pack_dir _, pack",
"existing rule then the ref count was updated in #",
"= pack if pack_field != pack: raise Exception('Model is in",
"contain rules.', pack) continue try: LOG.debug('Registering rules from pack: %s',",
"default: %s; Deleting.', rule_ref) Rule.delete(existing) except: LOG.exception('Exception deleting rule from",
"as logging from st2common.constants.meta import ALLOWED_EXTS from st2common.constants.pack import DEFAULT_PACK_NAME",
"with pack. # There might be a rule with same",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"content_utils __all__ = [ 'RulesRegistrar', 'register_rules' ] LOG = logging.getLogger(__name__)",
"pack, rules_dir) try: rules = self._get_rules_from_pack(rules_dir=rules_dir) registered_count = self._register_rules_from_pack(pack=pack, rules=rules)",
"logging from st2common.constants.meta import ALLOWED_EXTS from st2common.constants.pack import DEFAULT_PACK_NAME from",
"License, Version 2.0 (the \"License\"); # you may not use",
"# You may obtain a copy of the License at",
"\"%s\" but field \"pack\" is different: %s' % (pack, pack_field))",
"%s', rules_dir) return registered_count def _get_rules_from_pack(self, rules_dir): return self.get_resources_from_pack(resources_dir=rules_dir) def",
"base_dirs): \"\"\" :return: Number of rules registered. :rtype: ``int`` \"\"\"",
"= 0 if not rules_dir: return registered_count LOG.debug('Registering rules from",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"pack) continue try: LOG.debug('Registering rules from pack: %s', pack) rules",
"%s not found. Creating new one.', rule) try: rule_db =",
"% (pack, pack_field)) metadata_file = content_utils.get_relative_path_to_pack_file(pack_ref=pack, file_path=rule, use_pack_cache=True) content['metadata_file'] =",
"for rule %s in pack %s', content['name'], DEFAULT_PACK_NAME) existing =",
"import ResourceReference from st2common.persistence.rule import Rule from st2common.services.triggers import cleanup_trigger_db_for_rule,",
"updated. Rule %s from %s.', rule_db, rule, extra=extra) except Exception:",
"%s from %s.', rule_db, rule, extra=extra) except Exception: LOG.exception('Failed to",
"from the provided pack. :return: Number of rules registered. :rtype:",
"+= 1 return registered_count def register_rules(packs_base_paths=None, pack_dir=None, use_pack_cache=True, fail_on_failure=False): if",
"content_utils.get_packs_base_paths() registrar = RulesRegistrar(use_pack_cache=use_pack_cache, fail_on_failure=fail_on_failure) if pack_dir: result = registrar.register_from_pack(pack_dir=pack_dir)",
"this case, we want to # delete so we don't",
"import Rule from st2common.services.triggers import cleanup_trigger_db_for_rule, increment_trigger_ref_count from st2common.exceptions.db import",
"have duplicates. if pack_field != DEFAULT_PACK_NAME: try: rule_ref = ResourceReference.to_string_reference(name=content['name'],",
"the License for the specific language governing permissions and #",
"if pack_field != DEFAULT_PACK_NAME: try: rule_ref = ResourceReference.to_string_reference(name=content['name'], pack=DEFAULT_PACK_NAME) LOG.debug('Looking",
"import ALLOWED_EXTS from st2common.constants.pack import DEFAULT_PACK_NAME from st2common.bootstrap.base import ResourceRegistrar",
"Apache License, Version 2.0 (the \"License\"); # you may not",
"pack_dir[:-1] if pack_dir.endswith('/') else pack_dir _, pack = os.path.split(pack_dir) rules_dir",
"the provided pack. :return: Number of rules registered. :rtype: ``int``",
"either express or implied. # See the License for the",
"__all__ = [ 'RulesRegistrar', 'register_rules' ] LOG = logging.getLogger(__name__) class",
"in pack default: %s; Deleting.', rule_ref) Rule.delete(existing) except: LOG.exception('Exception deleting",
"st2common.constants.pack import DEFAULT_PACK_NAME from st2common.bootstrap.base import ResourceRegistrar from st2common.models.api.rule import",
"in pack \"%s\" but field \"pack\" is different: %s' %",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"registering all rules from pack: %s', rules_dir) return registered_count def",
"of rules registered. :rtype: ``int`` \"\"\" # Register packs first",
"\"\"\" :return: Number of rules registered. :rtype: ``int`` \"\"\" #",
"lead to removal of a Trigger so now is a",
"extra = {'rule_db': rule_db} LOG.audit('Rule updated. Rule %s from %s.',",
"rule \"%s\" from pack \"%s\": %s' % (rule, pack, six.text_type(e)))",
"RulesRegistrar(use_pack_cache=use_pack_cache, fail_on_failure=fail_on_failure) if pack_dir: result = registrar.register_from_pack(pack_dir=pack_dir) else: result =",
"Copyright 2019 Extreme Networks, Inc. # # Licensed under the",
"register_from_packs(self, base_dirs): \"\"\" :return: Number of rules registered. :rtype: ``int``",
"= self._register_rules_from_pack(pack, rules) registered_count += count except Exception as e:",
"import cleanup_trigger_db_for_rule, increment_trigger_ref_count from st2common.exceptions.db import coditationDBObjectNotFoundError import st2common.content.utils as",
"= ('Failed to register rule \"%s\" from pack \"%s\": %s'",
"count except Exception as e: if self._fail_on_failure: raise e LOG.exception('Failed",
"_, pack = os.path.split(pack_dir) rules_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir, content_type='rules') # Register",
"the License. from __future__ import absolute_import import os import six",
"pack, rules): registered_count = 0 # TODO: Refactor this monstrosity",
"want to # delete so we don't have duplicates. if",
"metadata_file = content_utils.get_relative_path_to_pack_file(pack_ref=pack, file_path=rule, use_pack_cache=True) content['metadata_file'] = metadata_file rule_api =",
"Rule.add_or_update(rule_db) increment_trigger_ref_count(rule_api=rule_api) extra = {'rule_db': rule_db} LOG.audit('Rule updated. Rule %s",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"registered_count def _get_rules_from_pack(self, rules_dir): return self.get_resources_from_pack(resources_dir=rules_dir) def _register_rules_from_pack(self, pack, rules):",
"pack_dir.endswith('/') else pack_dir _, pack = os.path.split(pack_dir) rules_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir,",
"st2common import log as logging from st2common.constants.meta import ALLOWED_EXTS from",
"raise ValueError(msg) LOG.exception('Failed registering rule from %s.', rule) else: registered_count",
"time for book-keeping. if existing: cleanup_trigger_db_for_rule(existing) except Exception as e:",
"as content_utils __all__ = [ 'RulesRegistrar', 'register_rules' ] LOG =",
"rules from pack %s:, dir: %s', pack, rules_dir) try: rules",
"rules_dir) return registered_count def register_from_pack(self, pack_dir): \"\"\" Register all the",
"under the License. from __future__ import absolute_import import os import",
"pack_field != pack: raise Exception('Model is in pack \"%s\" but",
"rule_api = RuleAPI(**content) rule_api.validate() rule_db = RuleAPI.to_model(rule_api) # Migration from",
"<gh_stars>0 # Copyright 2019 Extreme Networks, Inc. # # Licensed",
"st2common.models.api.rule import RuleAPI from st2common.models.system.common import ResourceReference from st2common.persistence.rule import",
"\"License\"); # you may not use this file except in",
"so now is a good time for book-keeping. if existing:",
"ResourceReference from st2common.persistence.rule import Rule from st2common.services.triggers import cleanup_trigger_db_for_rule, increment_trigger_ref_count",
"a rule with same name but in pack `default` #",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"from st2common.constants.meta import ALLOWED_EXTS from st2common.constants.pack import DEFAULT_PACK_NAME from st2common.bootstrap.base",
"ALLOWED_EXTENSIONS = ALLOWED_EXTS def register_from_packs(self, base_dirs): \"\"\" :return: Number of",
"import absolute_import import os import six from st2common import log",
"rules registered. :rtype: ``int`` \"\"\" pack_dir = pack_dir[:-1] if pack_dir.endswith('/')",
"# distributed under the License is distributed on an \"AS",
"registered_count += count except Exception as e: if self._fail_on_failure: raise",
"# Unless required by applicable law or agreed to in",
"use_pack_cache=True, fail_on_failure=False): if packs_base_paths: assert isinstance(packs_base_paths, list) if not packs_base_paths:",
"absolute_import import os import six from st2common import log as",
"\"pack\" is different: %s' % (pack, pack_field)) metadata_file = content_utils.get_relative_path_to_pack_file(pack_ref=pack,",
"Networks, Inc. # # Licensed under the Apache License, Version",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"import six from st2common import log as logging from st2common.constants.meta",
"pack \"%s\": %s' % (rule, pack, six.text_type(e))) raise ValueError(msg) LOG.exception('Failed",
"rules_dir = self._pack_loader.get_content_from_pack(pack_dir=pack_dir, content_type='rules') # Register pack first self.register_pack(pack_name=pack, pack_dir=pack_dir)",
"You may obtain a copy of the License at #",
"import ResourceRegistrar from st2common.models.api.rule import RuleAPI from st2common.models.system.common import ResourceReference",
"Exception as e: if self._fail_on_failure: raise e LOG.exception('Failed registering all",
"to_model so it needs to be adjusted down here. Also,",
"register rule \"%s\" from pack \"%s\": %s' % (rule, pack,",
"deleting rule from %s pack.', DEFAULT_PACK_NAME) try: rule_ref = ResourceReference.to_string_reference(name=content['name'],",
"rule_db} LOG.audit('Rule updated. Rule %s from %s.', rule_db, rule, extra=extra)",
"if self._fail_on_failure: raise e LOG.exception('Failed registering all rules from pack:",
"raise Exception('Model is in pack \"%s\" but field \"pack\" is",
"use_pack_cache=True) content['metadata_file'] = metadata_file rule_api = RuleAPI(**content) rule_api.validate() rule_db =",
"the Apache License, Version 2.0 (the \"License\"); # you may",
"increment_trigger_ref_count from st2common.exceptions.db import coditationDBObjectNotFoundError import st2common.content.utils as content_utils __all__",
"= logging.getLogger(__name__) class RulesRegistrar(ResourceRegistrar): ALLOWED_EXTENSIONS = ALLOWED_EXTS def register_from_packs(self, base_dirs):",
"'register_rules' ] LOG = logging.getLogger(__name__) class RulesRegistrar(ResourceRegistrar): ALLOWED_EXTENSIONS = ALLOWED_EXTS",
"= self._get_rules_from_pack(rules_dir) count = self._register_rules_from_pack(pack, rules) registered_count += count except",
"rules_dir): return self.get_resources_from_pack(resources_dir=rules_dir) def _register_rules_from_pack(self, pack, rules): registered_count = 0",
"pack: %s', rules_dir) return registered_count def _get_rules_from_pack(self, rules_dir): return self.get_resources_from_pack(resources_dir=rules_dir)"
] |
[
"actual usage, forecast represents forecasted data and UsageAndForecast represents both",
"tag: Has comparison expression for a tag. :type tag: ~azure.mgmt.costmanagement.models.QueryComparisonExpression",
"super(ReportConfigSorting, self).__init__(**kwargs) self.direction = direction self.name = name class ReportConfigTimePeriod(msrest.serialization.Model):",
"filter expression to be used in the report. :param and_property:",
"filter class ReportConfigDatasetAutoGenerated(msrest.serialization.Model): \"\"\"The definition of data present in the",
"list of operations and a URL link to get the",
"\"None\", \"EqualTo\", \"GreaterThan\", \"GreaterThanOrEqualTo\", \"LessThan\", \"LessThanOrEqualTo\". :type operator: str or",
"__init__( self, *, destination: \"ExportDeliveryDestination\", **kwargs ): super(ExportDeliveryInfo, self).__init__(**kwargs) self.destination",
"include: \"ActualCost\", \"AmortizedCost\", \"AHUB\". :type metric: str or ~azure.mgmt.costmanagement.models.MetricType :param",
"= None, dimension: Optional[\"QueryComparisonExpression\"] = None, tag: Optional[\"QueryComparisonExpression\"] = None,",
"be populated in order to send to Azure. :param name:",
"\"\"\" _attribute_map = { 'columns': {'key': 'columns', 'type': '[str]'}, }",
":ivar name: Operation name: {provider}/{resource}/{operation}. :vartype name: str :param display:",
"Optional[str] = None, **kwargs ): super(AlertPropertiesDetails, self).__init__(**kwargs) self.time_grain_type = time_grain_type",
"self, *, delivery_info: \"ExportDeliveryInfo\", definition: \"ExportDefinition\", format: Optional[Union[str, \"FormatType\"]] =",
"bool :param data: :type data: list[str] :ivar total: Total number",
"Cost Analysis. Variables are only populated by the server, and",
"pulling data for the report. If custom, then a specific",
"'type': 'ReportConfigFilterAutoGenerated'}, } def __init__( self, *, granularity: Optional[Union[str, \"ReportGranularityType\"]]",
"float :param contact_emails: list of emails to contact. :type contact_emails:",
"The name of the column to group. :type name: str",
"'type': 'str'}, 'current_spend': {'key': 'currentSpend', 'type': 'float'}, 'contact_emails': {'key': 'contactEmails',",
"__init__( self, *, definition: Optional[\"AlertPropertiesDefinition\"] = None, description: Optional[str] =",
"include: \"Budget\", \"Invoice\", \"Credit\", \"Quota\", \"General\", \"xCloud\", \"BudgetForecast\". :type type:",
"values include: \"MonthToDate\", \"BillingMonthToDate\", \"TheLastMonth\", \"TheLastBillingMonth\", \"WeekToDate\", \"Custom\". :type timeframe:",
"e_tag: str :param format: The format of the export being",
"query. If custom, then a specific time period must be",
":ivar category: Dimension category. :vartype category: str :ivar usage_start: Usage",
"class ReportConfigDataset(msrest.serialization.Model): \"\"\"The definition of data present in the report.",
"values include: \"Cost\", \"Usage\", \"Billing\", \"System\". :type category: str or",
":type overriding_alert: str \"\"\" _attribute_map = { 'time_grain_type': {'key': 'timeGrainType',",
"'resourceId', 'type': 'str'}, 'container': {'key': 'container', 'type': 'str'}, 'root_folder_path': {'key':",
":param definition: Has the definition for the export. :type definition:",
"granularity of rows in the export. Currently only 'Daily' is",
"{'key': 'periodStartDate', 'type': 'str'}, 'triggered_by': {'key': 'triggeredBy', 'type': 'str'}, 'resource_group_filter':",
"criteria: str or ~azure.mgmt.costmanagement.models.AlertCriteria \"\"\" _attribute_map = { 'type': {'key':",
"__init__( self, *, and_property: Optional[List[\"ReportConfigFilterAutoGenerated\"]] = None, or_property: Optional[List[\"ReportConfigFilterAutoGenerated\"]] =",
"to 'ActualCost' and is applicable to exports that do not",
"Optional[Union[str, \"ChartType\"]] = None, accumulated: Optional[Union[str, \"AccumulatedType\"]] = None, metric:",
"link (url) to the next page of results. :vartype next_link:",
"'value', 'type': '[View]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def",
"= { 'type': {'key': 'type', 'type': 'str'}, 'id': {'key': 'id',",
"**kwargs ): super(ExportListResult, self).__init__(**kwargs) self.value = None class ExportProperties(CommonExportProperties): \"\"\"The",
"Possible values include: \"In\", \"Contains\". :type operator: str or ~azure.mgmt.costmanagement.models.OperatorType",
"should only be specified with timeFrame set to 'Custom'. The",
"'[ExportExecution]'}, } def __init__( self, **kwargs ): super(ExportExecutionListResult, self).__init__(**kwargs) self.value",
"= None self.next_link = None class DimensionsListResult(msrest.serialization.Model): \"\"\"Result of listing",
"threshold percentage as a decimal which activated this alert. :type",
"by the server, and will be ignored when sending a",
"True}, 'message': {'readonly': True}, } _attribute_map = { 'code': {'key':",
"'ExportDeliveryInfo'}, 'definition': {'key': 'properties.definition', 'type': 'ExportDefinition'}, 'run_history': {'key': 'properties.runHistory', 'type':",
"to be included in the export. If not provided then",
"schedule: ~azure.mgmt.costmanagement.models.ExportSchedule \"\"\" _validation = { 'delivery_info': {'required': True}, 'definition':",
"expression to use in the report. Report can have up",
"{ 'recurrence': {'required': True}, } _attribute_map = { 'status': {'key':",
"of data for the dimension. :vartype total: int :ivar category:",
"last known status of the export execution. Possible values include:",
"\"\"\"The definition of data present in the forecast. :param granularity:",
"rows. :type rows: list[list[object]] \"\"\" _validation = { 'id': {'readonly':",
"for the export. :type definition: ~azure.mgmt.costmanagement.models.ExportDefinition :param run_history: If requested,",
"details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails :param cost_entity_id: related budget. :type cost_entity_id: str :param",
"{'key': 'tags', 'type': '{str}'}, 'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'}, 'description':",
"request. All required parameters must be populated in order to",
"'dimension', 'type': 'QueryComparisonExpression'}, 'tag': {'key': 'tag', 'type': 'QueryComparisonExpression'}, } def",
"float :param operator: operator used to compare currentSpend with amount.",
"All required parameters must be populated in order to send",
"list[str] :ivar total: Total number of data for the dimension.",
"for ExternalBillingAccount scope, and '/providers/Microsoft.CostManagement/externalSubscriptions/{externalSubscriptionName}' for ExternalSubscription scope. :type scope:",
"the export execution finished. :type processing_end_time: ~datetime.datetime :param file_name: The",
"comparison. :type values: list[str] \"\"\" _validation = { 'name': {'required':",
"delivery_info: Required. Has delivery information for the export. :type delivery_info:",
"not. :type e_tag: str :param format: The format of the",
"list[~azure.mgmt.costmanagement.models.View] :ivar next_link: The link (url) to the next page",
"to Azure. :param type: Required. Has type of the column",
"definition: ~azure.mgmt.costmanagement.models.ExportDefinition :param run_history: If requested, has the most recent",
"of column names to be included in the report. Any",
"was created. :type creation_time: str :param close_time: dateTime in which",
":param resource_filter: array of resources to filter by. :type resource_filter:",
"= None self.type = None self.e_tag = e_tag class Export(ProxyResource):",
"values include: \"Tag\", \"Dimension\". :type type: str or ~azure.mgmt.costmanagement.models.QueryColumnType :param",
"*, granularity: Optional[Union[str, \"GranularityType\"]] = None, configuration: Optional[\"ExportDatasetConfiguration\"] = None,",
"ReportConfigAggregation(msrest.serialization.Model): \"\"\"The aggregation expression to be used in the report.",
":type e_tag: str \"\"\" _validation = { 'id': {'readonly': True},",
"\"AlertStatus\"]] = None, creation_time: Optional[str] = None, close_time: Optional[str] =",
"identifier for the entity that executed the export. For OnDemand",
"'str'}, 'resource_group_filter': {'key': 'resourceGroupFilter', 'type': '[object]'}, 'resource_filter': {'key': 'resourceFilter', 'type':",
"from_property: Required. The start date for export data. :type from_property:",
"current_spend: float :param contact_emails: list of emails to contact. :type",
"{'key': 'value', 'type': '[Operation]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, }",
"resource_group_filter self.resource_filter = resource_filter self.meter_filter = meter_filter self.tag_filter = tag_filter",
"to: Required. The end date for export data. :type to:",
"include: \"Sum\". :type function: str or ~azure.mgmt.costmanagement.models.FunctionType \"\"\" _validation =",
"'configuration': {'key': 'configuration', 'type': 'ReportConfigDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{ReportConfigAggregation}'},",
"'str'}, } def __init__( self, **kwargs ): super(OperationDisplay, self).__init__(**kwargs) self.provider",
"'dataset', 'type': 'ReportConfigDatasetAutoGenerated'}, } def __init__( self, *, type: Union[str,",
":type threshold: float :param operator: operator used to compare currentSpend",
"'run_history': {'key': 'runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'},",
"} def __init__( self, *, and_property: Optional[List[\"ReportConfigFilter\"]] = None, or_property:",
"data: list[str] :ivar total: Total number of data for the",
"'type': 'str'}, 'time_period': {'key': 'properties.query.timePeriod', 'type': 'ReportConfigTimePeriod'}, 'dataset': {'key': 'properties.query.dataset',",
"alert. :type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition :param description: Alert description. :type description:",
"~azure.mgmt.costmanagement.models.AlertOperator :param amount: budget threshold amount. :type amount: float :param",
"sending a request. All required parameters must be populated in",
"_attribute_map = { 'columns': {'key': 'columns', 'type': '[str]'}, } def",
"is regenerated. # -------------------------------------------------------------------------- import datetime from typing import Dict,",
"Optional[str] = None, type: Optional[str] = None, **kwargs ): super(QueryColumn,",
"Possible values include: \"Ascending\", \"Descending\". :type direction: str or ~azure.mgmt.costmanagement.models.ReportConfigSortingDirection",
":ivar next_run_time_estimate: If the export has an active schedule, provides",
"Required. The name of the aggregation function to use. Possible",
"= None, format: Optional[Union[str, \"FormatType\"]] = None, delivery_info: Optional[\"ExportDeliveryInfo\"] =",
"Array of group by expression to use in the query.",
"= None, processing_end_time: Optional[datetime.datetime] = None, file_name: Optional[str] = None,",
"True}, 'resource': {'readonly': True}, 'operation': {'readonly': True}, } _attribute_map =",
"operation failed. :vartype message: str \"\"\" _validation = { 'code':",
"Optional[\"QueryDataset\"] = None, **kwargs ): super(QueryDefinition, self).__init__(**kwargs) self.type = type",
"columns class ReportConfigDefinition(msrest.serialization.Model): \"\"\"The definition of a report config. All",
"3 sub-views in the Cost Analysis UI. :type pivots: list[~azure.mgmt.costmanagement.models.PivotProperties]",
"modified. :type status_modification_time: str \"\"\" _attribute_map = { 'definition': {'key':",
"} def __init__( self, *, name: str, operator: Union[str, \"OperatorType\"],",
"values include: \"Daily\", \"Weekly\", \"Monthly\", \"Annually\". :type recurrence: str or",
"end date of the recurrence. The start date must be",
"For scheduled executions it is 'System'. :type submitted_by: str :param",
"'name': {'readonly': True}, 'type': {'readonly': True}, 'tags': {'readonly': True}, }",
"by. :type resource_group_filter: list[object] :param resource_filter: array of resources to",
"\"\"\"Result of listing cost management operations. It contains a list",
"send to Azure. :param resource_id: Required. The resource id of",
"'type': 'str'}, 'enabled': {'key': 'enabled', 'type': 'bool'}, } def __init__(",
"**kwargs ): super(ReportConfigSorting, self).__init__(**kwargs) self.direction = direction self.name = name",
"'ReportConfigDatasetAutoGenerated'}, } def __init__( self, *, type: Union[str, \"ReportType\"], timeframe:",
"alerts. Variables are only populated by the server, and will",
"destination for the export being delivered. :type destination: ~azure.mgmt.costmanagement.models.ExportDeliveryDestination \"\"\"",
"pull data from. :type from_property: ~datetime.datetime :param to: Required. The",
"to group. Possible values include: \"Tag\", \"Dimension\". :type type: str",
"type of alert. :type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition :param description: Alert description.",
"https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services . All required parameters must be populated in order",
"= type self.id = id self.enabled = enabled class Operation(msrest.serialization.Model):",
"destination: ~azure.mgmt.costmanagement.models.ExportDeliveryDestination \"\"\" _validation = { 'destination': {'required': True}, }",
"= { 'from_property': {'required': True}, 'to': {'required': True}, } _attribute_map",
"\"\"\"Error response indicates that the service is not able to",
"None self.message = None class ErrorResponse(msrest.serialization.Model): \"\"\"Error response indicates that",
"'recurrence': {'required': True}, } _attribute_map = { 'status': {'key': 'status',",
"type of the report. Usage represents actual usage, forecast represents",
":type error: ~azure.mgmt.costmanagement.models.ErrorDetails \"\"\" _attribute_map = { 'error': {'key': 'error',",
"represents forecasted data and UsageAndForecast represents both usage and forecasted",
":param status_modification_time: dateTime in which the alert status was last",
"this report config. :type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDatasetAutoGenerated \"\"\" _validation = {",
"self, *, type: Optional[Union[str, \"KpiType\"]] = None, id: Optional[str] =",
"{'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type':",
"name: str :param function: Required. The name of the aggregation",
"Analysis. Required. Possible values include: \"Area\", \"Line\", \"StackedColumn\", \"GroupedColumn\", \"Table\".",
"\"Scheduled\". :type execution_type: str or ~azure.mgmt.costmanagement.models.ExecutionType :param status: The last",
"recurrence. The start date must be in future. If present,",
"timeframe: Optional[Union[str, \"ReportTimeframeType\"]] = None, time_period: Optional[\"ReportConfigTimePeriod\"] = None, dataset:",
":param name: Data field to show in view. :type name:",
"\"\"\"The comparison expression to be used in the report. All",
"name: str, operator: Union[str, \"OperatorType\"], values: List[str], **kwargs ): super(ReportConfigComparisonExpression,",
"to filter by. :type meter_filter: list[object] :param tag_filter: tags to",
"\"\"\"The details of the error. Variables are only populated by",
"a tag. :type tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression \"\"\" _validation = { 'and_property':",
"**kwargs ): super(ReportConfigAggregation, self).__init__(**kwargs) self.name = name self.function = function",
"individual alert. Variables are only populated by the server, and",
"'properties.format', 'type': 'str'}, 'delivery_info': {'key': 'properties.deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition': {'key':",
"is applicable to exports that do not yet provide data",
"list results if there are any. :vartype next_link: str \"\"\"",
"{'key': 'triggeredBy', 'type': 'str'}, 'resource_group_filter': {'key': 'resourceGroupFilter', 'type': '[object]'}, 'resource_filter':",
"'resource_group_filter': {'key': 'resourceGroupFilter', 'type': '[object]'}, 'resource_filter': {'key': 'resourceFilter', 'type': '[object]'},",
"function to use. Possible values include: \"Sum\". :type function: str",
"'name': {'readonly': True}, 'type': {'readonly': True}, 'next_run_time_estimate': {'readonly': True}, }",
"\"GeneralThresholdError\". :type criteria: str or ~azure.mgmt.costmanagement.models.AlertCriteria \"\"\" _attribute_map = {",
"None, meter_filter: Optional[List[object]] = None, tag_filter: Optional[object] = None, threshold:",
"the export being delivered. Currently only 'Csv' is supported. Possible",
"in the query. :param columns: Array of column names to",
"at least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param not_property: The",
"'sorting', 'type': '[ReportConfigSorting]'}, 'filter': {'key': 'filter', 'type': 'ReportConfigFilterAutoGenerated'}, } def",
":vartype value: list[~azure.mgmt.costmanagement.models.Dimension] \"\"\" _validation = { 'value': {'readonly': True},",
"Export(ProxyResource): \"\"\"An export resource. Variables are only populated by the",
"~datetime.datetime :param processing_start_time: The time when export was picked up",
"column. :type type: str \"\"\" _attribute_map = { 'name': {'key':",
"definition of data present in the report. :param granularity: The",
"{'key': 'properties.query.timePeriod', 'type': 'ReportConfigTimePeriod'}, 'dataset': {'key': 'properties.query.dataset', 'type': 'ReportConfigDataset'}, }",
"modification_time self.status_modification_user_name = status_modification_user_name self.status_modification_time = status_modification_time class ErrorDetails(msrest.serialization.Model): \"\"\"The",
"definition of a query. All required parameters must be populated",
"str or ~azure.mgmt.costmanagement.models.ExportType :param timeframe: Required. The time frame for",
"in the report. Possible values include: \"Daily\", \"Monthly\". :type granularity:",
"not_property: Optional[\"ReportConfigFilterAutoGenerated\"] = None, dimension: Optional[\"ReportConfigComparisonExpression\"] = None, tag: Optional[\"ReportConfigComparisonExpression\"]",
"'properties.filterEnabled', 'type': 'bool'}, 'grouping_enabled': {'key': 'properties.groupingEnabled', 'type': 'bool'}, 'data': {'key':",
"self.from_property = from_property self.to = to class ExportSchedule(msrest.serialization.Model): \"\"\"The schedule",
"* 429 TooManyRequests - Request is throttled. Retry after waiting",
"UI?. :type enabled: bool \"\"\" _attribute_map = { 'type': {'key':",
"export will include all available columns. The available columns can",
"id of the storage account where exports will be delivered.",
"= description self.source = source self.details = details self.cost_entity_id =",
"= None, operator: Optional[Union[str, \"AlertOperator\"]] = None, amount: Optional[float] =",
":type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Required. Has the definition for",
"pivots: Optional[List[\"PivotProperties\"]] = None, type_properties_query_type: Optional[Union[str, \"ReportType\"]] = None, timeframe:",
"period_start_date: Optional[str] = None, triggered_by: Optional[str] = None, resource_group_filter: Optional[List[object]]",
"= name self.type = type class QueryComparisonExpression(msrest.serialization.Model): \"\"\"The comparison expression",
"self).__init__(**kwargs) self.format = format self.delivery_info = delivery_info self.definition = definition",
"a request. :ivar value: The list of exports. :vartype value:",
"Optional[str] = None, **kwargs ): super(QueryColumn, self).__init__(**kwargs) self.name = name",
"of aggregation expression to use in the forecast. The key",
"Has time period for pulling data for the query. :type",
"None, schedule: Optional[\"ExportSchedule\"] = None, **kwargs ): super(Export, self).__init__(e_tag=e_tag, **kwargs)",
"to group. :type name: str \"\"\" _validation = { 'type':",
"class ReportConfigSorting(msrest.serialization.Model): \"\"\"The order by expression to be used in",
"if FreshPartialCost will be included. :type include_fresh_partial_cost: bool \"\"\" _validation",
"self.value = None class DismissAlertPayload(msrest.serialization.Model): \"\"\"The request payload to update",
"None, configuration: Optional[\"ExportDatasetConfiguration\"] = None, **kwargs ): super(ExportDataset, self).__init__(**kwargs) self.granularity",
"type self.category = category self.criteria = criteria class AlertPropertiesDetails(msrest.serialization.Model): \"\"\"Alert",
"of dataset in the report. :param columns: Array of column",
"then a specific time period must be provided. Possible values",
"details of the error. :type error: ~azure.mgmt.costmanagement.models.ErrorDetails \"\"\" _attribute_map =",
"'str'}, 'function': {'key': 'function', 'type': 'str'}, } def __init__( self,",
"~azure.mgmt.costmanagement.models.AlertPropertiesDetails :param cost_entity_id: related budget. :type cost_entity_id: str :param status:",
"'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'definition': {'key': 'properties.definition',",
"{ 'value': {'key': 'value', 'type': '[Export]'}, } def __init__( self,",
"in the forecast. :type filter: ~azure.mgmt.costmanagement.models.QueryFilter \"\"\" _attribute_map = {",
"True}, } _attribute_map = { 'provider': {'key': 'provider', 'type': 'str'},",
"'configuration', 'type': 'QueryDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'}, 'filter': {'key':",
"data in the export. :param granularity: The granularity of rows",
"'type': 'str'}, 'display_name': {'key': 'properties.displayName', 'type': 'str'}, 'scope': {'key': 'properties.scope',",
"an estimate of the next execution time. :vartype next_run_time_estimate: ~datetime.datetime",
"next set of alerts results if there are any. :vartype",
"'QueryDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'}, 'filter': {'key': 'filter', 'type':",
":vartype name: str :param display: The object that represents the",
"include: \"In\", \"Contains\". :type operator: str or ~azure.mgmt.costmanagement.models.OperatorType :param values:",
"order to send to Azure. :param status: The status of",
"str] :param execution_type: The type of the export execution. Possible",
"dataset: ~azure.mgmt.costmanagement.models.QueryDataset \"\"\" _validation = { 'type': {'required': True}, 'timeframe':",
"chart: Chart type of the main view in Cost Analysis.",
"definition of a report config. All required parameters must be",
"self.source = source self.details = details self.cost_entity_id = cost_entity_id self.status",
"executed. :type processing_start_time: ~datetime.datetime :param processing_end_time: The time when the",
"timeframe: Union[str, \"ReportTimeframeType\"], time_period: Optional[\"ReportConfigTimePeriod\"] = None, dataset: Optional[\"ReportConfigDatasetAutoGenerated\"] =",
"_attribute_map = { 'resource_id': {'key': 'resourceId', 'type': 'str'}, 'container': {'key':",
"= timeframe self.time_period = time_period self.dataset = dataset class ViewListResult(msrest.serialization.Model):",
"self, *, type: Optional[Union[str, \"PivotType\"]] = None, name: Optional[str] =",
"} def __init__( self, *, data: Optional[List[str]] = None, **kwargs",
"{'key': 'properties.scope', 'type': 'str'}, 'created_on': {'key': 'properties.createdOn', 'type': 'iso-8601'}, 'modified_on':",
":vartype type: str :param e_tag: eTag of the resource. To",
"Required. The time frame for pulling data for the query.",
"to be included in the query. Any valid query column",
"'str'}, 'scope': {'key': 'properties.scope', 'type': 'str'}, 'created_on': {'key': 'properties.createdOn', 'type':",
"name: str, function: Union[str, \"FunctionType\"], **kwargs ): super(QueryAggregation, self).__init__(**kwargs) self.name",
"Optional[Union[str, \"PivotType\"]] = None, name: Optional[str] = None, **kwargs ):",
"the definition for the export. :type definition: ~azure.mgmt.costmanagement.models.ExportDefinition :param run_history:",
"results if there are any. :vartype next_link: str \"\"\" _validation",
"the error. :type error: ~azure.mgmt.costmanagement.models.ErrorDetails \"\"\" _attribute_map = { 'error':",
"self.creation_time = creation_time self.close_time = close_time self.modification_time = modification_time self.status_modification_user_name",
"recurrence: str or ~azure.mgmt.costmanagement.models.RecurrenceType :param recurrence_period: Has start and end",
"~azure.mgmt.costmanagement.models.ExportRecurrencePeriod \"\"\" _validation = { 'recurrence': {'required': True}, } _attribute_map",
"str :param submitted_time: The time when export was queued to",
"str, **kwargs ): super(ReportConfigGrouping, self).__init__(**kwargs) self.type = type self.name =",
"in order to send to Azure. :param from_property: Required. The",
"None self.display = display class OperationDisplay(msrest.serialization.Model): \"\"\"The object that represents",
"'runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'}, 'schedule': {'key':",
"'type': '[str]'}, 'total': {'key': 'properties.total', 'type': 'int'}, 'category': {'key': 'properties.category',",
"Optional[List[\"QueryFilter\"]] = None, not_property: Optional[\"QueryFilter\"] = None, dimension: Optional[\"QueryComparisonExpression\"] =",
"import datetime from typing import Dict, List, Optional, Union from",
"time. Possible values include: \"true\", \"false\". :type accumulated: str or",
"self.dataset = dataset self.include_actual_cost = include_actual_cost self.include_fresh_partial_cost = include_fresh_partial_cost class",
"URL to get the next set of operation list results",
"include_actual_cost: bool :param include_fresh_partial_cost: a boolean determining if FreshPartialCost will",
"= None, filter: Optional[\"ReportConfigFilter\"] = None, **kwargs ): super(ReportConfigDataset, self).__init__(**kwargs)",
"{'readonly': True}, 'total': {'readonly': True}, 'category': {'readonly': True}, 'usage_start': {'readonly':",
"def __init__( self, *, e_tag: Optional[str] = None, format: Optional[Union[str,",
"pulling data for the query. :type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod :param dataset:",
"and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param or_property: The logical \"OR\" expression. Must have",
"429 TooManyRequests - Request is throttled. Retry after waiting for",
"status_modification_time: str \"\"\" _attribute_map = { 'definition': {'key': 'properties.definition', 'type':",
"data in the export. :type data_set: ~azure.mgmt.costmanagement.models.ExportDataset \"\"\" _validation =",
":type columns: list[str] \"\"\" _attribute_map = { 'columns': {'key': 'columns',",
"direction: Direction of sort. Possible values include: \"Ascending\", \"Descending\". :type",
":param type_properties_query_type: The type of the report. Usage represents actual",
"= None, **kwargs ): super(View, self).__init__(e_tag=e_tag, **kwargs) self.display_name = display_name",
"{'required': True, 'min_items': 1}, } _attribute_map = { 'name': {'key':",
"\"ExecutionType\"]] = None, status: Optional[Union[str, \"ExecutionStatus\"]] = None, submitted_by: Optional[str]",
"of alert. :type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition :param description: Alert description. :type",
"'type': 'str'}, 'delivery_info': {'key': 'properties.deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition': {'key': 'properties.definition',",
"): super(QueryGrouping, self).__init__(**kwargs) self.type = type self.name = name class",
"'type': 'ReportConfigFilter'}, 'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'}, 'tag': {'key': 'tag',",
"The status of the export's schedule. If 'Inactive', the export's",
"2}, } _attribute_map = { 'and_property': {'key': 'and', 'type': '[QueryFilter]'},",
"status_modification_user_name self.status_modification_time = status_modification_time class AlertPropertiesDefinition(msrest.serialization.Model): \"\"\"defines the type of",
"or ~azure.mgmt.costmanagement.models.QueryColumnType :param name: Required. The name of the column",
"timeframe: Required. The time frame for pulling data for the",
"report. :param columns: Array of column names to be included",
"} _attribute_map = { 'format': {'key': 'format', 'type': 'str'}, 'delivery_info':",
"export execution. Possible values include: \"OnDemand\", \"Scheduled\". :type execution_type: str",
"in the project root for license information. # Code generated",
"to send to Azure. :param name: Required. The name of",
"aggregation expression to be used in the query. All required",
"period_start_date: str :param triggered_by: notificationId that triggered this alert. :type",
"True}, 'created_on': {'readonly': True}, 'modified_on': {'readonly': True}, } _attribute_map =",
"\"\"\"The filter expression to be used in the export. :param",
"the alias for the aggregated column. Report can have up",
"str :ivar created_on: Date the user created this view. :vartype",
"str or ~azure.mgmt.costmanagement.models.AlertCriteria \"\"\" _attribute_map = { 'type': {'key': 'type',",
"'ReportConfigTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'ReportConfigDatasetAutoGenerated'}, } def __init__( self,",
"True}, 'modified_on': {'readonly': True}, } _attribute_map = { 'id': {'key':",
"= { 'and_property': {'min_items': 2}, 'or_property': {'min_items': 2}, } _attribute_map",
"portal, it is done automatically, however API users need to",
"a forecast. All required parameters must be populated in order",
"{'key': 'properties.data', 'type': '[str]'}, 'total': {'key': 'properties.total', 'type': 'int'}, 'category':",
"= None, dataset: Optional[\"ReportConfigDataset\"] = None, **kwargs ): super(View, self).__init__(e_tag=e_tag,",
"str or ~azure.mgmt.costmanagement.models.ForecastType :param timeframe: Required. The time frame for",
"when displaying costs. Possible values include: \"ActualCost\", \"AmortizedCost\", \"AHUB\". :type",
"of the next execution time. :vartype next_run_time_estimate: ~datetime.datetime \"\"\" _validation",
"UsageAndForecast represents both usage and forecasted data. Actual usage and",
"export. For OnDemand executions it is the user email. For",
"report config. :type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDatasetAutoGenerated \"\"\" _validation = { 'type':",
"'ExportTimePeriod'}, 'data_set': {'key': 'dataSet', 'type': 'ExportDataset'}, } def __init__( self,",
"the time specified in the \"Retry-After\" header. :param error: The",
"definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition :param description: Alert description. :type description: str :param",
"header. :param error: The details of the error. :type error:",
"= None, submitted_by: Optional[str] = None, submitted_time: Optional[datetime.datetime] = None,",
"data: :type data: list[str] :ivar total: Total number of data",
"to the next page of results. :type next_link: str :param",
"type of the column to group. Possible values include: \"Tag\",",
"This includes 'subscriptions/{subscriptionId}' for subscription scope, 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope,",
"time_period: Optional[\"QueryTimePeriod\"] = None, dataset: Optional[\"ForecastDataset\"] = None, include_actual_cost: Optional[bool]",
"class Dimension(Resource): \"\"\"Dimension. Variables are only populated by the server,",
"True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'},",
"filter expression to use in the forecast. :type filter: ~azure.mgmt.costmanagement.models.QueryFilter",
"aggregation self.grouping = grouping self.sorting = sorting self.filter = filter",
"\"GreaterThanOrEqualTo\", \"LessThan\", \"LessThanOrEqualTo\". :type operator: str or ~azure.mgmt.costmanagement.models.AlertOperator :param amount:",
"dataset: Has definition for data in this forecast. :type dataset:",
":param run_settings: The export settings that were in effect for",
"*, columns: Optional[List[str]] = None, **kwargs ): super(QueryDatasetConfiguration, self).__init__(**kwargs) self.columns",
"provider: Service provider: Microsoft.CostManagement. :vartype provider: str :ivar resource: Resource",
"= None, not_property: Optional[\"QueryFilter\"] = None, dimension: Optional[\"QueryComparisonExpression\"] = None,",
"None, format: Optional[Union[str, \"FormatType\"]] = None, delivery_info: Optional[\"ExportDeliveryInfo\"] = None,",
"): super(QueryColumn, self).__init__(**kwargs) self.name = name self.type = type class",
"None, overriding_alert: Optional[str] = None, **kwargs ): super(AlertPropertiesDetails, self).__init__(**kwargs) self.time_grain_type",
"A list of export executions. :vartype value: list[~azure.mgmt.costmanagement.models.ExportExecution] \"\"\" _validation",
":param to: The end date of recurrence. :type to: ~datetime.datetime",
"Union[str, \"ForecastTimeframeType\"], time_period: Optional[\"QueryTimePeriod\"] = None, dataset: Optional[\"ForecastDataset\"] = None,",
"self, *, from_property: datetime.datetime, to: datetime.datetime, **kwargs ): super(ExportTimePeriod, self).__init__(**kwargs)",
"set of tags. Resource tags. :vartype tags: dict[str, str] :ivar",
"type: str or ~azure.mgmt.costmanagement.models.ReportConfigColumnType :param name: Required. The name of",
"**kwargs ): super(ForecastDefinition, self).__init__(**kwargs) self.type = type self.timeframe = timeframe",
"Required. The name of the column to group. :type name:",
"values include: \"Sum\". :type function: str or ~azure.mgmt.costmanagement.models.FunctionType \"\"\" _validation",
"__init__( self, *, type: Optional[Union[str, \"AlertType\"]] = None, category: Optional[Union[str,",
"include: \"Daily\", \"Weekly\", \"Monthly\", \"Annually\". :type recurrence: str or ~azure.mgmt.costmanagement.models.RecurrenceType",
"for pulling data for the export. If custom, then a",
"Code generated by Microsoft (R) AutoRest Code Generator. # Changes",
":type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param or_property: The logical \"OR\" expression. Must",
"tags. Resource tags. :vartype tags: dict[str, str] :ivar description: Dimension",
"{'key': 'properties.error', 'type': 'ErrorDetails'}, } def __init__( self, *, execution_type:",
"'accumulated': {'key': 'properties.accumulated', 'type': 'str'}, 'metric': {'key': 'properties.metric', 'type': 'str'},",
"scenario, this field will be used to determine whether the",
"Optional[\"ReportConfigDatasetConfiguration\"] = None, aggregation: Optional[Dict[str, \"ReportConfigAggregation\"]] = None, grouping: Optional[List[\"ReportConfigGrouping\"]]",
"datetime.datetime, **kwargs ): super(QueryTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to =",
"'type': '[ReportConfigFilterAutoGenerated]'}, 'not_property': {'key': 'not', 'type': 'ReportConfigFilterAutoGenerated'}, 'dimension': {'key': 'dimension',",
"from_property self.to = to class ExportSchedule(msrest.serialization.Model): \"\"\"The schedule associated with",
"self.id = None self.name = None self.type = None self.tags",
"None, amount: Optional[float] = None, unit: Optional[str] = None, current_spend:",
"{'min_items': 2}, 'or_property': {'min_items': 2}, } _attribute_map = { 'and_property':",
":param delivery_info: Required. Has delivery information for the export. :type",
"'type': 'ErrorDetails'}, } def __init__( self, *, error: Optional[\"ErrorDetails\"] =",
"period must be provided. Possible values include: \"MonthToDate\", \"BillingMonthToDate\", \"TheLastMonth\",",
"{'key': 'operator', 'type': 'str'}, 'amount': {'key': 'amount', 'type': 'float'}, 'unit':",
"{'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'execution_type':",
"Optional[List[str]] = None, **kwargs ): super(ExportDatasetConfiguration, self).__init__(**kwargs) self.columns = columns",
"): super(ErrorDetails, self).__init__(**kwargs) self.code = None self.message = None class",
"None, contact_roles: Optional[List[str]] = None, overriding_alert: Optional[str] = None, **kwargs",
"\"ReportTimeframeType\"]] = None, time_period: Optional[\"ReportConfigTimePeriod\"] = None, dataset: Optional[\"ReportConfigDataset\"] =",
"file_name self.run_settings = run_settings self.error = error class ExportExecutionListResult(msrest.serialization.Model): \"\"\"Result",
"'iso-8601'}, 'to': {'key': 'to', 'type': 'iso-8601'}, } def __init__( self,",
"user email. For scheduled executions it is 'System'. :type submitted_by:",
"'run_history': {'key': 'properties.runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'properties.nextRunTimeEstimate', 'type': 'iso-8601'},",
"from_property: ~datetime.datetime :param to: The end date of recurrence. :type",
"self.run_settings = run_settings self.error = error class ExportExecutionListResult(msrest.serialization.Model): \"\"\"Result of",
"must contain a 'type' and 'enabled' key. :param type: KPI",
"filter by. :type meter_filter: list[object] :param tag_filter: tags to filter",
"tag. :type tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression \"\"\" _validation = { 'and_property': {'min_items':",
"super(ExportDeliveryInfo, self).__init__(**kwargs) self.destination = destination class ExportExecution(Resource): \"\"\"An export execution.",
"~azure.mgmt.costmanagement.models.ExportDeliveryDestination \"\"\" _validation = { 'destination': {'required': True}, } _attribute_map",
"super(ReportConfigDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration self.aggregation =",
"\"GranularityType\"]] = None, configuration: Optional[\"QueryDatasetConfiguration\"] = None, aggregation: Optional[Dict[str, \"QueryAggregation\"]]",
"scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}' for InvoiceSection scope, 'providers/Microsoft.Management/managementGroups/{managementGroupId}' for Management Group scope,",
":ivar usage_end: Usage end. :vartype usage_end: ~datetime.datetime :ivar next_link: The",
"The name of the exported file. :type file_name: str :param",
"): super(QueryDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class QueryDefinition(msrest.serialization.Model): \"\"\"The definition",
"then query includes all columns. :type columns: list[str] \"\"\" _attribute_map",
"msrest.serialization from ._cost_management_client_enums import * class Resource(msrest.serialization.Model): \"\"\"The Resource model",
"'and', 'type': '[ReportConfigFilterAutoGenerated]'}, 'or_property': {'key': 'or', 'type': '[ReportConfigFilterAutoGenerated]'}, 'not_property': {'key':",
"of Cost Analysis. Variables are only populated by the server,",
"'run_settings': {'key': 'properties.runSettings', 'type': 'CommonExportProperties'}, 'error': {'key': 'properties.error', 'type': 'ErrorDetails'},",
"The name of the directory where exports will be uploaded.",
"executed the export. For OnDemand executions it is the user",
"~azure.mgmt.costmanagement.models.ExportSchedule \"\"\" _validation = { 'delivery_info': {'required': True}, 'definition': {'required':",
"= filter class QueryDatasetConfiguration(msrest.serialization.Model): \"\"\"The configuration of dataset in the",
":ivar created_on: Date the user created this view. :vartype created_on:",
"export's schedule. If 'Inactive', the export's schedule is paused. Possible",
"~azure.mgmt.costmanagement.models.QueryFilter \"\"\" _validation = { 'grouping': {'max_items': 2, 'min_items': 0},",
"definition: Has the definition for the export. :type definition: ~azure.mgmt.costmanagement.models.ExportDefinition",
":type recurrence_period: ~azure.mgmt.costmanagement.models.ExportRecurrencePeriod \"\"\" _validation = { 'recurrence': {'required': True},",
"Optional[Union[str, \"MetricType\"]] = None, kpis: Optional[List[\"KpiProperties\"]] = None, pivots: Optional[List[\"PivotProperties\"]]",
"'name': {'key': 'name', 'type': 'str'}, 'function': {'key': 'function', 'type': 'str'},",
"to use in the report. :type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilter \"\"\" _validation",
"Optional[Union[str, \"GranularityType\"]] = None, configuration: Optional[\"ExportDatasetConfiguration\"] = None, **kwargs ):",
"Resource model definition. Variables are only populated by the server,",
"the user is updating the latest version or not. :type",
"\"ReportType\"], timeframe: Union[str, \"ReportTimeframeType\"], time_period: Optional[\"ReportConfigTimePeriod\"] = None, dataset: Optional[\"ReportConfigDatasetAutoGenerated\"]",
"of alert. :param type: type of alert. Possible values include:",
"'type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def",
"Optional[List[List[object]]] = None, **kwargs ): super(QueryResult, self).__init__(**kwargs) self.next_link = next_link",
"maximum date range is 3 months. All required parameters must",
"data: Optional[List[str]] = None, **kwargs ): super(Dimension, self).__init__(**kwargs) self.description =",
"'schedule': {'key': 'properties.schedule', 'type': 'ExportSchedule'}, } def __init__( self, *,",
":type resource_id: str :param container: Required. The name of the",
"def __init__( self, *, columns: Optional[List[str]] = None, **kwargs ):",
"\"Credit\", \"Quota\", \"General\", \"xCloud\", \"BudgetForecast\". :type type: str or ~azure.mgmt.costmanagement.models.AlertType",
"each item in the dictionary is the alias for the",
"expression to use in the forecast. :type filter: ~azure.mgmt.costmanagement.models.QueryFilter \"\"\"",
"'properties.nextRunTimeEstimate', 'type': 'iso-8601'}, 'schedule': {'key': 'properties.schedule', 'type': 'ExportSchedule'}, } def",
"results. :vartype next_link: str \"\"\" _validation = { 'value': {'readonly':",
":type timeframe: str or ~azure.mgmt.costmanagement.models.TimeframeType :param time_period: Has time period",
"self.data_set = data_set class ExportDeliveryDestination(msrest.serialization.Model): \"\"\"The destination information for the",
"'configuration': {'key': 'configuration', 'type': 'ExportDatasetConfiguration'}, } def __init__( self, *,",
"type: The type of column. :type type: str \"\"\" _attribute_map",
"None, processing_start_time: Optional[datetime.datetime] = None, processing_end_time: Optional[datetime.datetime] = None, file_name:",
"It contains a list of operations and a URL link",
"function class QueryColumn(msrest.serialization.Model): \"\"\"QueryColumn. :param name: The name of column.",
":type type: str or ~azure.mgmt.costmanagement.models.ExportType :param timeframe: Required. The time",
"*, name: str, function: Union[str, \"FunctionType\"], **kwargs ): super(ReportConfigAggregation, self).__init__(**kwargs)",
"a specific time period must be provided. Possible values include:",
"Department scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}' for EnrollmentAccount scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}' for BillingProfile scope,",
"= None self.grouping_enabled = None self.data = data self.total =",
"to be used in the report. :param and_property: The logical",
"export. Variables are only populated by the server, and will",
"of recurrence. :type to: ~datetime.datetime \"\"\" _validation = { 'from_property':",
"'type': 'ErrorDetails'}, } def __init__( self, *, execution_type: Optional[Union[str, \"ExecutionType\"]]",
"scope, 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope,",
"timeframe: str or ~azure.mgmt.costmanagement.models.TimeframeType :param time_period: Has time period for",
"\"\"\" _validation = { 'id': {'readonly': True}, 'name': {'readonly': True},",
"= None, **kwargs ): super(Dimension, self).__init__(**kwargs) self.description = None self.filter_enabled",
"display class OperationDisplay(msrest.serialization.Model): \"\"\"The object that represents the operation. Variables",
"'dataset': {'key': 'dataset', 'type': 'ReportConfigDatasetAutoGenerated'}, } def __init__( self, *,",
":param close_time: dateTime in which alert was closed. :type close_time:",
"query column name is allowed. If not provided, then query",
"for resourceGroup scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}' for",
"'message': {'readonly': True}, } _attribute_map = { 'code': {'key': 'code',",
"Optional, Union from azure.core.exceptions import HttpResponseError import msrest.serialization from ._cost_management_client_enums",
"recurrence. :type from_property: ~datetime.datetime :param to: The end date of",
"pulling data for the query. If custom, then a specific",
"= timeframe self.time_period = time_period self.dataset = dataset self.include_actual_cost =",
"executions it is 'System'. :type submitted_by: str :param submitted_time: The",
"self).__init__(**kwargs) self.and_property = and_property self.or_property = or_property self.not_property = not_property",
":param direction: Direction of sort. Possible values include: \"Ascending\", \"Descending\".",
"= None, **kwargs ): super(ExportDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class",
"'Usage' is equivalent to 'ActualCost' and is applicable to exports",
"to broadcast to. :type contact_groups: list[str] :param contact_roles: list of",
"are any. :vartype next_link: str \"\"\" _validation = { 'value':",
"where exports will be uploaded. :type container: str :param root_folder_path:",
":param threshold: notification threshold percentage as a decimal which activated",
"tags. :vartype tags: dict[str, str] :param execution_type: The type of",
"): super(ReportConfigGrouping, self).__init__(**kwargs) self.type = type self.name = name class",
"'str'}, 'period_start_date': {'key': 'periodStartDate', 'type': 'str'}, 'triggered_by': {'key': 'triggeredBy', 'type':",
"Optional[\"ExportExecutionListResult\"] = None, schedule: Optional[\"ExportSchedule\"] = None, **kwargs ): super(ExportProperties,",
"in the query. Query can have up to 2 group",
"metric: Metric to use when displaying costs. Possible values include:",
"type_properties_query_type: str or ~azure.mgmt.costmanagement.models.ReportType :param timeframe: The time frame for",
"rows in the forecast. Possible values include: \"Daily\". :type granularity:",
"export. If custom, then a specific time period must be",
"{'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type':",
"ExternalSubscription scope. :type scope: str :ivar created_on: Date the user",
"\"ReportType\"]] = None, timeframe: Optional[Union[str, \"ReportTimeframeType\"]] = None, time_period: Optional[\"ReportConfigTimePeriod\"]",
"in the report. Report can have up to 2 group",
"aggregated column. Report can have up to 2 aggregation clauses.",
"Optional[\"ExportSchedule\"] = None, **kwargs ): super(Export, self).__init__(e_tag=e_tag, **kwargs) self.format =",
"operator self.values = values class QueryDataset(msrest.serialization.Model): \"\"\"The definition of data",
"class ReportConfigComparisonExpression(msrest.serialization.Model): \"\"\"The comparison expression to be used in the",
"file. :type file_name: str :param run_settings: The export settings that",
"def __init__( self, *, name: str, direction: Optional[Union[str, \"ReportConfigSortingDirection\"]] =",
"None, aggregation: Optional[Dict[str, \"QueryAggregation\"]] = None, grouping: Optional[List[\"QueryGrouping\"]] = None,",
"to 2 aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation] :param filter:",
"def __init__( self, **kwargs ): super(DimensionsListResult, self).__init__(**kwargs) self.value = None",
"or amortization for service reservations. Possible values include: \"Usage\", \"ActualCost\",",
"close_time: Optional[str] = None, modification_time: Optional[str] = None, status_modification_user_name: Optional[str]",
"class QueryGrouping(msrest.serialization.Model): \"\"\"The group by expression to be used in",
"str or ~azure.mgmt.costmanagement.models.ChartType :param accumulated: Show costs accumulated over time.",
"= None, **kwargs ): super(KpiProperties, self).__init__(**kwargs) self.type = type self.id",
"unit self.current_spend = current_spend self.contact_emails = contact_emails self.contact_groups = contact_groups",
"in view. Possible values include: \"Dimension\", \"TagKey\". :type type: str",
"Optional[Union[str, \"FormatType\"]] = None, run_history: Optional[\"ExportExecutionListResult\"] = None, schedule: Optional[\"ExportSchedule\"]",
"self.cost_entity_id = cost_entity_id self.status = status self.creation_time = creation_time self.close_time",
"amount: budget threshold amount. :type amount: float :param unit: unit",
"included. :type include_actual_cost: bool :param include_fresh_partial_cost: a boolean determining if",
"status: Optional[Union[str, \"AlertStatus\"]] = None, creation_time: Optional[str] = None, close_time:",
"str :ivar tags: A set of tags. Resource tags. :vartype",
"~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated :param dimension: Has comparison expression for a dimension. :type",
"'eTag', 'type': 'str'}, } def __init__( self, *, e_tag: Optional[str]",
":param category: Alert category. Possible values include: \"Cost\", \"Usage\", \"Billing\",",
"True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[ExportExecution]'},",
"Azure. :param from_property: Required. The start date of recurrence. :type",
"= None, dataset: Optional[\"QueryDataset\"] = None, **kwargs ): super(QueryDefinition, self).__init__(**kwargs)",
"export has an active schedule, provides an estimate of the",
"order to send to Azure. :param type: Required. The type",
"next_run_time_estimate: ~datetime.datetime \"\"\" _validation = { 'delivery_info': {'required': True}, 'definition':",
"group by clauses. :type grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping] :param sorting: Array of",
":ivar grouping_enabled: Grouping enabled. :vartype grouping_enabled: bool :param data: :type",
"'operation', 'type': 'str'}, } def __init__( self, **kwargs ): super(OperationDisplay,",
"column name is allowed. If not provided, then query includes",
"class KpiProperties(msrest.serialization.Model): \"\"\"Each KPI must contain a 'type' and 'enabled'",
"'name', 'type': 'str'}, } def __init__( self, *, type: Optional[Union[str,",
"None, **kwargs ): super(ForecastDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration =",
"{'key': 'nextRunTimeEstimate', 'type': 'iso-8601'}, 'schedule': {'key': 'schedule', 'type': 'ExportSchedule'}, }",
"} def __init__( self, *, time_grain_type: Optional[Union[str, \"AlertTimeGrainType\"]] = None,",
":param creation_time: dateTime in which alert was created. :type creation_time:",
"= e_tag class Export(ProxyResource): \"\"\"An export resource. Variables are only",
"{'readonly': True}, 'filter_enabled': {'readonly': True}, 'grouping_enabled': {'readonly': True}, 'total': {'readonly':",
"{'key': 'runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'}, 'schedule':",
"dimension: Optional[\"QueryComparisonExpression\"] = None, tag: Optional[\"QueryComparisonExpression\"] = None, **kwargs ):",
"to class ExportSchedule(msrest.serialization.Model): \"\"\"The schedule associated with the export. All",
"= None, **kwargs ): super(ExportRecurrencePeriod, self).__init__(**kwargs) self.from_property = from_property self.to",
"to be included in the report. Any valid report column",
"in the report. :type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilter \"\"\" _validation = {",
"values include: \"Tag\", \"Dimension\". :type type: str or ~azure.mgmt.costmanagement.models.ReportConfigColumnType :param",
"to be used in the query. All required parameters must",
"the aggregation function to use. Possible values include: \"Sum\". :type",
"values class ReportConfigDataset(msrest.serialization.Model): \"\"\"The definition of data present in the",
"time_period: Optional[\"ReportConfigTimePeriod\"] = None, dataset: Optional[\"ReportConfigDatasetAutoGenerated\"] = None, **kwargs ):",
"for data in the export. :param granularity: The granularity of",
"granularity self.configuration = configuration self.aggregation = aggregation self.grouping = grouping",
"\"\"\"The destination information for the delivery of the export. To",
"None, data_set: Optional[\"ExportDataset\"] = None, **kwargs ): super(ExportDefinition, self).__init__(**kwargs) self.type",
"{'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'e_tag':",
"self.name = None self.type = None self.e_tag = e_tag class",
"= None, creation_time: Optional[str] = None, close_time: Optional[str] = None,",
"names to be included in the query. Any valid query",
"self.granularity = granularity self.configuration = configuration self.aggregation = aggregation self.filter",
"): super(ReportConfigTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to = to class",
"self, *, name: str, direction: Optional[Union[str, \"ReportConfigSortingDirection\"]] = None, **kwargs",
"**kwargs ): super(ExportDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class ExportDefinition(msrest.serialization.Model): \"\"\"The",
"properties of the export. Variables are only populated by the",
"~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Required. Has the definition for the export.",
"to the next page of results. :vartype next_link: str \"\"\"",
"'type': 'str'}, 'modification_time': {'key': 'properties.modificationTime', 'type': 'str'}, 'status_modification_user_name': {'key': 'properties.statusModificationUserName',",
"rows class QueryTimePeriod(msrest.serialization.Model): \"\"\"The start and end date for pulling",
"Optional[\"ErrorDetails\"] = None, **kwargs ): super(ErrorResponse, self).__init__(**kwargs) self.error = error",
"self, *, columns: Optional[List[str]] = None, **kwargs ): super(ReportConfigDatasetConfiguration, self).__init__(**kwargs)",
"where exports will be uploaded. :type root_folder_path: str \"\"\" _validation",
"'definition': {'key': 'definition', 'type': 'ExportDefinition'}, 'run_history': {'key': 'runHistory', 'type': 'ExportExecutionListResult'},",
"~datetime.datetime \"\"\" _validation = { 'from_property': {'required': True}, 'to': {'required':",
"Optional[Union[str, \"AlertStatus\"]] = None, creation_time: Optional[str] = None, close_time: Optional[str]",
"be executed. :type processing_start_time: ~datetime.datetime :param processing_end_time: The time when",
"for the export. :type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule \"\"\" _validation = {",
"delivery_info self.definition = definition self.run_history = run_history self.next_run_time_estimate = None",
"container where exports will be uploaded. :type container: str :param",
"the export execution. Possible values include: \"OnDemand\", \"Scheduled\". :type execution_type:",
"~azure.mgmt.costmanagement.models.ExportSchedule \"\"\" _validation = { 'id': {'readonly': True}, 'name': {'readonly':",
"None, include_fresh_partial_cost: Optional[bool] = None, **kwargs ): super(ForecastDefinition, self).__init__(**kwargs) self.type",
"'properties.columns', 'type': '[QueryColumn]'}, 'rows': {'key': 'properties.rows', 'type': '[[object]]'}, } def",
"type: Union[str, \"QueryColumnType\"], name: str, **kwargs ): super(QueryGrouping, self).__init__(**kwargs) self.type",
"def __init__( self, **kwargs ): super(ErrorDetails, self).__init__(**kwargs) self.code = None",
"alias for the aggregated column. Query can have up to",
":param delivery_info: Has delivery information for the export. :type delivery_info:",
"time period must be provided. Possible values include: \"WeekToDate\", \"MonthToDate\",",
"by customer channel (see examples). :type columns: list[str] \"\"\" _attribute_map",
":type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDataset \"\"\" _validation = { 'id': {'readonly': True},",
"definition: Optional[\"ExportDefinition\"] = None, run_history: Optional[\"ExportExecutionListResult\"] = None, schedule: Optional[\"ExportSchedule\"]",
"True}, 'type': {'readonly': True}, 'tags': {'readonly': True}, } _attribute_map =",
"= None, unit: Optional[str] = None, current_spend: Optional[float] = None,",
"to update an alert. :param definition: defines the type of",
"type: str or ~azure.mgmt.costmanagement.models.ReportType :param timeframe: Required. The time frame",
"from_property: ~datetime.datetime :param to: Required. The end date to pull",
"None, **kwargs ): super(KpiProperties, self).__init__(**kwargs) self.type = type self.id =",
"_validation = { 'delivery_info': {'required': True}, 'definition': {'required': True}, 'next_run_time_estimate':",
"{'key': 'properties.category', 'type': 'str'}, 'usage_start': {'key': 'properties.usageStart', 'type': 'iso-8601'}, 'usage_end':",
"= None, scope: Optional[str] = None, chart: Optional[Union[str, \"ChartType\"]] =",
"'unit': {'key': 'unit', 'type': 'str'}, 'current_spend': {'key': 'currentSpend', 'type': 'float'},",
"{'key': 'configuration', 'type': 'ExportDatasetConfiguration'}, } def __init__( self, *, granularity:",
"{'key': 'properties.nextLink', 'type': 'str'}, } def __init__( self, *, data:",
"Required. The type of the forecast. Possible values include: \"Usage\",",
"= None, or_property: Optional[List[\"ReportConfigFilterAutoGenerated\"]] = None, not_property: Optional[\"ReportConfigFilterAutoGenerated\"] = None,",
"pull data to. :type to: ~datetime.datetime \"\"\" _validation = {",
"'str'}, 'type': {'key': 'type', 'type': 'str'}, } def __init__( self,",
"str or ~azure.mgmt.costmanagement.models.GranularityType :param configuration: The export dataset configuration. :type",
"users need to register the subscription. For more information see",
"description self.source = source self.details = details self.cost_entity_id = cost_entity_id",
"'type': {'readonly': True}, 'created_on': {'readonly': True}, 'modified_on': {'readonly': True}, }",
"of dimensions. :vartype value: list[~azure.mgmt.costmanagement.models.Dimension] \"\"\" _validation = { 'value':",
"comparison. Possible values include: \"In\", \"Contains\". :type operator: str or",
":param meter_filter: array of meters to filter by. :type meter_filter:",
"*, e_tag: Optional[str] = None, format: Optional[Union[str, \"FormatType\"]] = None,",
"None, description: Optional[str] = None, source: Optional[Union[str, \"AlertSource\"]] = None,",
"for the forecast. If custom, then a specific time period",
"datetime.datetime, **kwargs ): super(ReportConfigTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to =",
"up to 2 group by clauses. :type grouping: list[~azure.mgmt.costmanagement.models.QueryGrouping] :param",
"} _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name':",
"recurrence. :type to: ~datetime.datetime \"\"\" _validation = { 'from_property': {'required':",
"{'key': 'tags', 'type': '{str}'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'filter_enabled':",
"'properties.description', 'type': 'str'}, 'filter_enabled': {'key': 'properties.filterEnabled', 'type': 'bool'}, 'grouping_enabled': {'key':",
"at least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.QueryFilter] :param not_property: The",
"*, granularity: Optional[Union[str, \"GranularityType\"]] = None, configuration: Optional[\"QueryDatasetConfiguration\"] = None,",
"concurrent update scenario, this field will be used to determine",
"overriding_alert class AlertsResult(msrest.serialization.Model): \"\"\"Result of alerts. Variables are only populated",
"configuration: Optional[\"ExportDatasetConfiguration\"] = None, **kwargs ): super(ExportDataset, self).__init__(**kwargs) self.granularity =",
"Required. The time frame for pulling data for the forecast.",
"management operations. It contains a list of operations and a",
"type self.name = name class ReportConfigSorting(msrest.serialization.Model): \"\"\"The order by expression",
"Optional[List[\"ReportConfigGrouping\"]] = None, sorting: Optional[List[\"ReportConfigSorting\"]] = None, filter: Optional[\"ReportConfigFilter\"] =",
"in the export. Currently only 'Daily' is supported. Possible values",
"resource_filter: list[object] :param meter_filter: array of meters to filter by.",
"{'readonly': True}, 'category': {'readonly': True}, 'usage_start': {'readonly': True}, 'usage_end': {'readonly':",
"will be used to determine whether the user is updating",
"picked up to be executed. :type processing_start_time: ~datetime.datetime :param processing_end_time:",
"= not_property self.dimension = dimension self.tag = tag class QueryGrouping(msrest.serialization.Model):",
"scheduled executions it is 'System'. :type submitted_by: str :param submitted_time:",
"*, e_tag: Optional[str] = None, display_name: Optional[str] = None, scope:",
"{'key': 'amount', 'type': 'float'}, 'unit': {'key': 'unit', 'type': 'str'}, 'current_spend':",
"activated this alert. :type threshold: float :param operator: operator used",
"the column to use in comparison. :type name: str :param",
"time period must be provided. Possible values include: \"MonthToDate\", \"BillingMonthToDate\",",
"**kwargs ): super(ViewListResult, self).__init__(**kwargs) self.value = None self.next_link = None",
"*, execution_type: Optional[Union[str, \"ExecutionType\"]] = None, status: Optional[Union[str, \"ExecutionStatus\"]] =",
"{'key': 'contactRoles', 'type': '[str]'}, 'overriding_alert': {'key': 'overridingAlert', 'type': 'str'}, }",
"dimensions. It contains a list of available dimensions. Variables are",
"'type': '{ReportConfigAggregation}'}, 'grouping': {'key': 'grouping', 'type': '[ReportConfigGrouping]'}, 'sorting': {'key': 'sorting',",
"*, destination: \"ExportDeliveryDestination\", **kwargs ): super(ExportDeliveryInfo, self).__init__(**kwargs) self.destination = destination",
"of an export. Variables are only populated by the server,",
"name self.function = function class QueryColumn(msrest.serialization.Model): \"\"\"QueryColumn. :param name: The",
"self, *, type: Union[str, \"ReportConfigColumnType\"], name: str, **kwargs ): super(ReportConfigGrouping,",
"included in the export. If not provided then the export",
"= { 'type': {'key': 'type', 'type': 'str'}, 'name': {'key': 'name',",
"\"\"\" _validation = { 'code': {'readonly': True}, 'message': {'readonly': True},",
"'properties.runSettings', 'type': 'CommonExportProperties'}, 'error': {'key': 'properties.error', 'type': 'ErrorDetails'}, } def",
"*, from_property: datetime.datetime, to: datetime.datetime, **kwargs ): super(QueryTimePeriod, self).__init__(**kwargs) self.from_property",
":type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation] :param grouping: Array of group by",
"self.provider = None self.resource = None self.operation = None class",
"schedule: Optional[\"ExportSchedule\"] = None, **kwargs ): super(Export, self).__init__(e_tag=e_tag, **kwargs) self.format",
"schedule, provides an estimate of the next execution time. :vartype",
"in the report. Any valid report column name is allowed.",
"granularity: The granularity of rows in the forecast. Possible values",
"rows in the report. Possible values include: \"Daily\", \"Monthly\". :type",
"status of the export's schedule. If 'Inactive', the export's schedule",
"time. :vartype next_run_time_estimate: ~datetime.datetime :param schedule: Has schedule information for",
"None, grouping: Optional[List[\"QueryGrouping\"]] = None, filter: Optional[\"QueryFilter\"] = None, **kwargs",
"str :param status: alert status. Possible values include: \"None\", \"Active\",",
":param container: Required. The name of the container where exports",
"= None self.resource = None self.operation = None class OperationListResult(msrest.serialization.Model):",
"def __init__( self, **kwargs ): super(Resource, self).__init__(**kwargs) self.id = None",
"'properties.statusModificationTime', 'type': 'str'}, } def __init__( self, *, definition: Optional[\"AlertPropertiesDefinition\"]",
"to metric (budget). :type id: str :param enabled: show the",
"code is regenerated. # -------------------------------------------------------------------------- import datetime from typing import",
"the report. :type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod :param dataset: Has definition for",
"~azure.mgmt.costmanagement.models.ReportConfigColumnType :param name: Required. The name of the column to",
"list[object] :param tag_filter: tags to filter by. :type tag_filter: object",
"{'key': 'or', 'type': '[ReportConfigFilterAutoGenerated]'}, 'not_property': {'key': 'not', 'type': 'ReportConfigFilterAutoGenerated'}, 'dimension':",
"object that represents the operation. :type display: ~azure.mgmt.costmanagement.models.OperationDisplay \"\"\" _validation",
"error: The details of any error. :type error: ~azure.mgmt.costmanagement.models.ErrorDetails \"\"\"",
"name: Optional[str] = None, **kwargs ): super(PivotProperties, self).__init__(**kwargs) self.type =",
":param dataset: Has definition for data in this query. :type",
"resourceGroups to filter by. :type resource_group_filter: list[object] :param resource_filter: array",
"subscription with the Microsoft.CostManagementExports resource provider. This is required once",
"the report. :param and_property: The logical \"AND\" expression. Must have",
"self.direction = direction self.name = name class ReportConfigTimePeriod(msrest.serialization.Model): \"\"\"The start",
"granularity: The granularity of rows in the export. Currently only",
"self.value = None self.next_link = None class PivotProperties(msrest.serialization.Model): \"\"\"Each pivot",
":param function: Required. The name of the aggregation function to",
"'type': '[str]'}, } def __init__( self, *, columns: Optional[List[str]] =",
"class ExportDeliveryDestination(msrest.serialization.Model): \"\"\"The destination information for the delivery of the",
"import HttpResponseError import msrest.serialization from ._cost_management_client_enums import * class Resource(msrest.serialization.Model):",
"def __init__( self, **kwargs ): super(OperationDisplay, self).__init__(**kwargs) self.provider = None",
"time_grain_type: Type of timegrain cadence. Possible values include: \"None\", \"Monthly\",",
"definition for data in the export. :param granularity: The granularity",
"lost if the code is regenerated. # -------------------------------------------------------------------------- import datetime",
"super(ReportConfigAggregation, self).__init__(**kwargs) self.name = name self.function = function class ReportConfigComparisonExpression(msrest.serialization.Model):",
"in this query. :type dataset: ~azure.mgmt.costmanagement.models.QueryDataset \"\"\" _validation = {",
"super(KpiProperties, self).__init__(**kwargs) self.type = type self.id = id self.enabled =",
"expression for a tag. :type tag: ~azure.mgmt.costmanagement.models.QueryComparisonExpression \"\"\" _validation =",
"'providers/Microsoft.Management/managementGroups/{managementGroupId}' for Management Group scope, '/providers/Microsoft.CostManagement/externalBillingAccounts/{externalBillingAccountName}' for ExternalBillingAccount scope, and",
"report. The configuration will be ignored if aggregation and grouping",
"being delivered. Currently only 'Csv' is supported. Possible values include:",
":param configuration: The export dataset configuration. :type configuration: ~azure.mgmt.costmanagement.models.ExportDatasetConfiguration \"\"\"",
"a request. :ivar name: Operation name: {provider}/{resource}/{operation}. :vartype name: str",
"self.dimension = dimension self.tag = tag class ReportConfigGrouping(msrest.serialization.Model): \"\"\"The group",
"= None class ExportListResult(msrest.serialization.Model): \"\"\"Result of listing exports. It contains",
"include: \"Usage\", \"ActualCost\", \"AmortizedCost\". :type type: str or ~azure.mgmt.costmanagement.models.ForecastType :param",
"FreshPartialCost will be included. :type include_fresh_partial_cost: bool \"\"\" _validation =",
"self).__init__(e_tag=e_tag, **kwargs) self.display_name = display_name self.scope = scope self.created_on =",
"type: str or ~azure.mgmt.costmanagement.models.KpiType :param id: ID of resource related",
"~azure.mgmt.costmanagement.models.AlertSource :param details: Alert details. :type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails :param cost_entity_id:",
"'type': 'ReportConfigComparisonExpression'}, } def __init__( self, *, and_property: Optional[List[\"ReportConfigFilterAutoGenerated\"]] =",
"= None, meter_filter: Optional[List[object]] = None, tag_filter: Optional[object] = None,",
"'type': 'QueryTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'ForecastDataset'}, 'include_actual_cost': {'key': 'includeActualCost',",
"be included in the query. Any valid query column name",
"created. :type creation_time: str :param close_time: dateTime in which alert",
"sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting] :param filter: Has filter expression to use in",
"'properties.modifiedOn', 'type': 'iso-8601'}, 'chart': {'key': 'properties.chart', 'type': 'str'}, 'accumulated': {'key':",
"'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'ExportDatasetConfiguration'}, } def",
":type current_spend: float :param contact_emails: list of emails to contact.",
"Retry after waiting for the time specified in the \"x-ms-ratelimit-microsoft.consumption-retry-after\"",
"Optional[List[str]] = None, **kwargs ): super(Dimension, self).__init__(**kwargs) self.description = None",
"where exports will be delivered. :type resource_id: str :param container:",
"Request is throttled. Retry after waiting for the time specified",
"'type': 'str'}, 'operator': {'key': 'operator', 'type': 'str'}, 'values': {'key': 'values',",
"float :param unit: unit of currency being used. :type unit:",
"_attribute_map = { 'format': {'key': 'format', 'type': 'str'}, 'delivery_info': {'key':",
"'properties.scope', 'type': 'str'}, 'created_on': {'key': 'properties.createdOn', 'type': 'iso-8601'}, 'modified_on': {'key':",
"destination: Required. Has destination for the export being delivered. :type",
"includes all columns. :type columns: list[str] \"\"\" _attribute_map = {",
"'str'}, 'enabled': {'key': 'enabled', 'type': 'bool'}, } def __init__( self,",
":type format: str or ~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Has delivery information",
"'type': '{str}'}, 'next_link': {'key': 'properties.nextLink', 'type': 'str'}, 'columns': {'key': 'properties.columns',",
"resource_id: str :param container: Required. The name of the container",
"values include: \"Budget\", \"Invoice\", \"Credit\", \"Quota\", \"General\", \"xCloud\", \"BudgetForecast\". :type",
"{'key': 'format', 'type': 'str'}, 'delivery_info': {'key': 'deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition':",
"of the export. Variables are only populated by the server,",
":type submitted_by: str :param submitted_time: The time when export was",
"filter: Has filter expression to use in the report. :type",
"\"\"\" _validation = { 'provider': {'readonly': True}, 'resource': {'readonly': True},",
"'next_run_time_estimate': {'key': 'properties.nextRunTimeEstimate', 'type': 'iso-8601'}, 'schedule': {'key': 'properties.schedule', 'type': 'ExportSchedule'},",
"= None, filter: Optional[\"QueryFilter\"] = None, **kwargs ): super(ForecastDataset, self).__init__(**kwargs)",
"list of emails to contact. :type contact_emails: list[str] :param contact_groups:",
"dataset self.include_actual_cost = include_actual_cost self.include_fresh_partial_cost = include_fresh_partial_cost class KpiProperties(msrest.serialization.Model): \"\"\"Each",
"Account scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}' for Department scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}' for EnrollmentAccount scope,",
"the report. The configuration will be ignored if aggregation and",
"\"ForecastTimeframeType\"], time_period: Optional[\"QueryTimePeriod\"] = None, dataset: Optional[\"ForecastDataset\"] = None, include_actual_cost:",
"periodStartDate. :type period_start_date: str :param triggered_by: notificationId that triggered this",
"name of column. :type name: str :param type: The type",
"Possible values include: \"OnDemand\", \"Scheduled\". :type execution_type: str or ~azure.mgmt.costmanagement.models.ExecutionType",
"granularity self.configuration = configuration class ExportDatasetConfiguration(msrest.serialization.Model): \"\"\"The export dataset configuration.",
"{'required': True}, } _attribute_map = { 'resource_id': {'key': 'resourceId', 'type':",
"dataset configuration. :type configuration: ~azure.mgmt.costmanagement.models.ExportDatasetConfiguration \"\"\" _attribute_map = { 'granularity':",
"root_folder_path class ExportDeliveryInfo(msrest.serialization.Model): \"\"\"The delivery information associated with a export.",
"'ErrorDetails'}, } def __init__( self, *, error: Optional[\"ErrorDetails\"] = None,",
"determine whether the user is updating the latest version or",
"} _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'name':",
"up to be executed. :type processing_start_time: ~datetime.datetime :param processing_end_time: The",
"process the incoming request. The reason is provided in the",
"} def __init__( self, *, and_property: Optional[List[\"ReportConfigFilterAutoGenerated\"]] = None, or_property:",
"aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation] :param grouping: Array of",
"= configuration class ExportDatasetConfiguration(msrest.serialization.Model): \"\"\"The export dataset configuration. Allows columns",
"Metric to use when displaying costs. Possible values include: \"ActualCost\",",
"dimension self.tag = tag class ReportConfigGrouping(msrest.serialization.Model): \"\"\"The group by expression",
"None, **kwargs ): super(AlertPropertiesDefinition, self).__init__(**kwargs) self.type = type self.category =",
"'recurrence': {'key': 'recurrence', 'type': 'str'}, 'recurrence_period': {'key': 'recurrencePeriod', 'type': 'ExportRecurrencePeriod'},",
"name is allowed. If not provided, then report includes all",
"to aggregate. :type name: str :param function: Required. The name",
"list[~azure.mgmt.costmanagement.models.Alert] :ivar next_link: URL to get the next set of",
"definition self.description = description self.source = source self.details = details",
"QueryComparisonExpression(msrest.serialization.Model): \"\"\"The comparison expression to be used in the query.",
"\"\"\"Result of listing the execution history of an export. Variables",
"'type': 'ExportDeliveryInfo'}, 'definition': {'key': 'definition', 'type': 'ExportDefinition'}, 'run_history': {'key': 'runHistory',",
"columns: Array of column names to be included in the",
"when sending a request. :ivar value: A list of export",
"Cost management REST API operation. Variables are only populated by",
"'type': 'ExportDeliveryInfo'}, 'definition': {'key': 'properties.definition', 'type': 'ExportDefinition'}, 'run_history': {'key': 'properties.runHistory',",
"Array of column names to be included in the query.",
"self).__init__(**kwargs) self.type = type self.timeframe = timeframe self.time_period = time_period",
"} def __init__( self, *, display: Optional[\"OperationDisplay\"] = None, **kwargs",
"= None, triggered_by: Optional[str] = None, resource_group_filter: Optional[List[object]] = None,",
"'recurrence', 'type': 'str'}, 'recurrence_period': {'key': 'recurrencePeriod', 'type': 'ExportRecurrencePeriod'}, } def",
"the query. Query can have up to 2 group by",
"comparison expression for a tag. :type tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression \"\"\" _validation",
"next_link: str \"\"\" _validation = { 'id': {'readonly': True}, 'name':",
"self, *, columns: Optional[List[str]] = None, **kwargs ): super(ExportDatasetConfiguration, self).__init__(**kwargs)",
"the type of alert. :type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition :param description: Alert",
"least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.QueryFilter] :param not_property: The logical",
"'float'}, 'contact_emails': {'key': 'contactEmails', 'type': '[str]'}, 'contact_groups': {'key': 'contactGroups', 'type':",
"= dimension self.tag = tag class ReportConfigGrouping(msrest.serialization.Model): \"\"\"The group by",
"list of available dimensions. Variables are only populated by the",
"\"\"\"The aggregation expression to be used in the report. All",
"to pull data to. :type to: ~datetime.datetime \"\"\" _validation =",
"or_property: list[~azure.mgmt.costmanagement.models.QueryFilter] :param not_property: The logical \"NOT\" expression. :type not_property:",
":type accumulated: str or ~azure.mgmt.costmanagement.models.AccumulatedType :param metric: Metric to use",
"QueryFilter(msrest.serialization.Model): \"\"\"The filter expression to be used in the export.",
"time_period: Has time period for pulling data for the report.",
"be differentiated based on dates. Possible values include: \"Usage\". :type",
"_attribute_map = { 'time_grain_type': {'key': 'timeGrainType', 'type': 'str'}, 'period_start_date': {'key':",
"export being delivered. :type destination: ~azure.mgmt.costmanagement.models.ExportDeliveryDestination \"\"\" _validation = {",
"being delivered. :type destination: ~azure.mgmt.costmanagement.models.ExportDeliveryDestination \"\"\" _validation = { 'destination':",
"{'readonly': True}, 'type': {'readonly': True}, 'next_run_time_estimate': {'readonly': True}, } _attribute_map",
"ReportConfigGrouping(msrest.serialization.Model): \"\"\"The group by expression to be used in the",
"This should only be specified with timeFrame set to 'Custom'.",
"in the dictionary is the alias for the aggregated column.",
"self).__init__(**kwargs) self.value = None class ExportProperties(CommonExportProperties): \"\"\"The properties of the",
"includes 'subscriptions/{subscriptionId}' for subscription scope, 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}'",
"'properties.category', 'type': 'str'}, 'usage_start': {'key': 'properties.usageStart', 'type': 'iso-8601'}, 'usage_end': {'key':",
"Union[str, \"FunctionType\"], **kwargs ): super(QueryAggregation, self).__init__(**kwargs) self.name = name self.function",
":vartype tags: dict[str, str] :param definition: defines the type of",
"'tags', 'type': '{str}'}, 'execution_type': {'key': 'properties.executionType', 'type': 'str'}, 'status': {'key':",
"listing dimensions. It contains a list of available dimensions. Variables",
"is allowed. If not provided, then query includes all columns.",
"None, filter: Optional[\"QueryFilter\"] = None, **kwargs ): super(QueryDataset, self).__init__(**kwargs) self.granularity",
"'[Operation]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self,",
"amount. Possible values include: \"None\", \"EqualTo\", \"GreaterThan\", \"GreaterThanOrEqualTo\", \"LessThan\", \"LessThanOrEqualTo\".",
"Group scope, '/providers/Microsoft.CostManagement/externalBillingAccounts/{externalBillingAccountName}' for ExternalBillingAccount scope, and '/providers/Microsoft.CostManagement/externalSubscriptions/{externalSubscriptionName}' for ExternalSubscription",
"to filter by. :type resource_filter: list[object] :param meter_filter: array of",
"~azure.mgmt.costmanagement.models.ReportConfigDatasetAutoGenerated \"\"\" _validation = { 'type': {'required': True}, 'timeframe': {'required':",
"by. :type resource_filter: list[object] :param meter_filter: array of meters to",
"category: str or ~azure.mgmt.costmanagement.models.AlertCategory :param criteria: Criteria that triggered alert.",
":ivar next_link: URL to get the next set of operation",
"Optional[\"QueryFilter\"] = None, **kwargs ): super(QueryDataset, self).__init__(**kwargs) self.granularity = granularity",
"report. Possible values include: \"Daily\", \"Monthly\". :type granularity: str or",
"'e_tag': {'key': 'eTag', 'type': 'str'}, } def __init__( self, *,",
"and_property: Optional[List[\"ReportConfigFilter\"]] = None, or_property: Optional[List[\"ReportConfigFilter\"]] = None, not_property: Optional[\"ReportConfigFilter\"]",
"True}, 'usage_start': {'readonly': True}, 'usage_end': {'readonly': True}, 'next_link': {'readonly': True},",
"'tags': {'key': 'tags', 'type': '{str}'}, 'execution_type': {'key': 'properties.executionType', 'type': 'str'},",
"*, type: Union[str, \"ForecastType\"], timeframe: Union[str, \"ForecastTimeframeType\"], time_period: Optional[\"QueryTimePeriod\"] =",
"self.filter = filter class ForecastDefinition(msrest.serialization.Model): \"\"\"The definition of a forecast.",
"~azure.mgmt.costmanagement.models.QueryTimePeriod :param dataset: Has definition for data in this forecast.",
"filter expression to be used in the export. :param and_property:",
"'or', 'type': '[ReportConfigFilter]'}, 'not_property': {'key': 'not', 'type': 'ReportConfigFilter'}, 'dimension': {'key':",
"The logical \"OR\" expression. Must have at least 2 items.",
"pivots self.type_properties_query_type = type_properties_query_type self.timeframe = timeframe self.time_period = time_period",
"'type': 'str'}, 'source': {'key': 'properties.source', 'type': 'str'}, 'details': {'key': 'properties.details',",
"**kwargs) self.schedule = schedule class ExportRecurrencePeriod(msrest.serialization.Model): \"\"\"The start and end",
"= None self.tags = None class Alert(Resource): \"\"\"An individual alert.",
"{ 'value': {'key': 'value', 'type': '[View]'}, 'next_link': {'key': 'nextLink', 'type':",
"'next_link': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id',",
"include_fresh_partial_cost: bool \"\"\" _validation = { 'type': {'required': True}, 'timeframe':",
"'ReportConfigComparisonExpression'}, 'tag': {'key': 'tag', 'type': 'ReportConfigComparisonExpression'}, } def __init__( self,",
"_validation = { 'recurrence': {'required': True}, } _attribute_map = {",
"of the report. Usage represents actual usage, forecast represents forecasted",
"information see https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services . All required parameters must be populated",
"{'readonly': True}, 'type': {'readonly': True}, 'tags': {'readonly': True}, 'description': {'readonly':",
"to filter by. :type resource_group_filter: list[object] :param resource_filter: array of",
"dataset: Has definition for data in this report config. :type",
"time frame for pulling data for the query. If custom,",
"'values': {'required': True, 'min_items': 1}, } _attribute_map = { 'name':",
"results. :vartype next_link: str \"\"\" _validation = { 'id': {'readonly':",
"behavior and will be lost if the code is regenerated.",
"show in Cost Analysis UI. :type kpis: list[~azure.mgmt.costmanagement.models.KpiProperties] :param pivots:",
"{'key': 'type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, }",
"request. :ivar value: The list of dimensions. :vartype value: list[~azure.mgmt.costmanagement.models.Dimension]",
"name: str, **kwargs ): super(ReportConfigGrouping, self).__init__(**kwargs) self.type = type self.name",
"self.submitted_time = submitted_time self.processing_start_time = processing_start_time self.processing_end_time = processing_end_time self.file_name",
"\"Annually\". :type recurrence: str or ~azure.mgmt.costmanagement.models.RecurrenceType :param recurrence_period: Has start",
"True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[Dimension]'},",
"end date for export data. :type to: ~datetime.datetime \"\"\" _validation",
"The logical \"NOT\" expression. :type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilter :param dimension: Has",
"an active schedule, provides an estimate of the next execution",
"ignored when sending a request. :ivar value: A list of",
"ViewListResult(msrest.serialization.Model): \"\"\"Result of listing views. It contains a list of",
"The configuration will be ignored if aggregation and grouping are",
"the column to aggregate. :type name: str :param function: Required.",
"accumulated: str or ~azure.mgmt.costmanagement.models.AccumulatedType :param metric: Metric to use when",
"\"Timeout\", \"NewDataNotAvailable\", \"DataNotAvailable\". :type status: str or ~azure.mgmt.costmanagement.models.ExecutionStatus :param submitted_by:",
"None, run_settings: Optional[\"CommonExportProperties\"] = None, error: Optional[\"ErrorDetails\"] = None, **kwargs",
"} def __init__( self, **kwargs ): super(DimensionsListResult, self).__init__(**kwargs) self.value =",
"in order to send to Azure. :param resource_id: Required. The",
"self.message = None class ErrorResponse(msrest.serialization.Model): \"\"\"Error response indicates that the",
"customer channel (see examples). :type columns: list[str] \"\"\" _attribute_map =",
"forecast. If custom, then a specific time period must be",
"'operation': {'readonly': True}, } _attribute_map = { 'provider': {'key': 'provider',",
"tags. Resource tags. :vartype tags: dict[str, str] \"\"\" _validation =",
"recurrence_period: ~azure.mgmt.costmanagement.models.ExportRecurrencePeriod \"\"\" _validation = { 'recurrence': {'required': True}, }",
"'[QueryFilter]'}, 'not_property': {'key': 'not', 'type': 'QueryFilter'}, 'dimension': {'key': 'dimension', 'type':",
"'str'}, 'status': {'key': 'properties.status', 'type': 'str'}, 'creation_time': {'key': 'properties.creationTime', 'type':",
"= details self.cost_entity_id = cost_entity_id self.status = status self.creation_time =",
"order to send to Azure. :param destination: Required. Has destination",
"message: str \"\"\" _validation = { 'code': {'readonly': True}, 'message':",
"include all available columns. The available columns can vary by",
"'type': 'str'}, } def __init__( self, *, type: Optional[Union[str, \"AlertType\"]]",
"scope: str :ivar created_on: Date the user created this view.",
"'ReportConfigFilter'}, 'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'}, 'tag': {'key': 'tag', 'type':",
"str :param type: The type of column. :type type: str",
"for the aggregated column. forecast can have up to 2",
"{'key': 'unit', 'type': 'str'}, 'current_spend': {'key': 'currentSpend', 'type': 'float'}, 'contact_emails':",
"'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'description': {'key': 'properties.description', 'type':",
"at least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.QueryFilter] :param or_property: The",
"in effect for this execution. :type run_settings: ~azure.mgmt.costmanagement.models.CommonExportProperties :param error:",
":type name: str :param operator: Required. The operator to use",
"self).__init__(**kwargs) self.definition = definition self.description = description self.source = source",
"Optional[\"ReportConfigComparisonExpression\"] = None, tag: Optional[\"ReportConfigComparisonExpression\"] = None, **kwargs ): super(ReportConfigFilter,",
"'resourceFilter', 'type': '[object]'}, 'meter_filter': {'key': 'meterFilter', 'type': '[object]'}, 'tag_filter': {'key':",
"self.error = error class ExportExecutionListResult(msrest.serialization.Model): \"\"\"Result of listing the execution",
"requested, has the most recent execution history for the export.",
"Note that 'Usage' is equivalent to 'ActualCost' and is applicable",
"None, display_name: Optional[str] = None, scope: Optional[str] = None, chart:",
"under groupings and aggregation. Variables are only populated by the",
"and aggregation. Variables are only populated by the server, and",
"): super(View, self).__init__(e_tag=e_tag, **kwargs) self.display_name = display_name self.scope = scope",
"'direction': {'key': 'direction', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'},",
"\"AlertCriteria\"]] = None, **kwargs ): super(AlertPropertiesDefinition, self).__init__(**kwargs) self.type = type",
"name: str :param display: The object that represents the operation.",
"super(OperationDisplay, self).__init__(**kwargs) self.provider = None self.resource = None self.operation =",
"Must have at least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.QueryFilter] :param",
"'value': {'key': 'value', 'type': '[View]'}, 'next_link': {'key': 'nextLink', 'type': 'str'},",
"the error message. Some Error responses: * 429 TooManyRequests -",
"'type': 'iso-8601'}, 'next_link': {'key': 'properties.nextLink', 'type': 'str'}, } def __init__(",
"the next page of results. :vartype next_link: str \"\"\" _validation",
"timeframe: Union[str, \"TimeframeType\"], time_period: Optional[\"QueryTimePeriod\"] = None, dataset: Optional[\"QueryDataset\"] =",
"\"Billing\", \"System\". :type category: str or ~azure.mgmt.costmanagement.models.AlertCategory :param criteria: Criteria",
"of rows in the forecast. Possible values include: \"Daily\". :type",
"provide data for charges or amortization for service reservations. Possible",
"which alert was created. :type creation_time: str :param close_time: dateTime",
"\"\"\"The Resource model definition. Variables are only populated by the",
"'iso-8601'}, 'processing_end_time': {'key': 'properties.processingEndTime', 'type': 'iso-8601'}, 'file_name': {'key': 'properties.fileName', 'type':",
"self, *, type: Optional[Union[str, \"AlertType\"]] = None, category: Optional[Union[str, \"AlertCategory\"]]",
"close_time self.modification_time = modification_time self.status_modification_user_name = status_modification_user_name self.status_modification_time = status_modification_time",
"'filter_enabled': {'readonly': True}, 'grouping_enabled': {'readonly': True}, 'total': {'readonly': True}, 'category':",
"\"Active\", \"Overridden\", \"Resolved\", \"Dismissed\". :type status: str or ~azure.mgmt.costmanagement.models.AlertStatus :param",
"time_grain_type: str or ~azure.mgmt.costmanagement.models.AlertTimeGrainType :param period_start_date: datetime of periodStartDate. :type",
"{'readonly': True}, 'next_run_time_estimate': {'readonly': True}, } _attribute_map = { 'id':",
"'str'}, 'submitted_time': {'key': 'properties.submittedTime', 'type': 'iso-8601'}, 'processing_start_time': {'key': 'properties.processingStartTime', 'type':",
"class ExportExecutionListResult(msrest.serialization.Model): \"\"\"Result of listing the execution history of an",
"{'key': 'properties.modificationTime', 'type': 'str'}, 'status_modification_user_name': {'key': 'properties.statusModificationUserName', 'type': 'str'}, 'status_modification_time':",
"Required. The operator to use for comparison. Possible values include:",
"~azure.mgmt.costmanagement.models.OperationDisplay \"\"\" _validation = { 'name': {'readonly': True}, } _attribute_map",
"subscription. When creating an export in the Azure portal, it",
"when sending a request. :ivar id: Resource Id. :vartype id:",
"the export. All required parameters must be populated in order",
"send to Azure. :param type: Required. Has type of the",
"a dimension. :type dimension: ~azure.mgmt.costmanagement.models.QueryComparisonExpression :param tag: Has comparison expression",
"view. Required. :type display_name: str :param scope: Cost Management scope",
"resource provider. :vartype value: list[~azure.mgmt.costmanagement.models.Operation] :ivar next_link: URL to get",
"{'key': 'value', 'type': '[ExportExecution]'}, } def __init__( self, **kwargs ):",
"'type': '[QueryGrouping]'}, 'filter': {'key': 'filter', 'type': 'QueryFilter'}, } def __init__(",
"columns: Optional[List[str]] = None, **kwargs ): super(ExportDatasetConfiguration, self).__init__(**kwargs) self.columns =",
"type: str or ~azure.mgmt.costmanagement.models.ForecastType :param timeframe: Required. The time frame",
"use in the report. The key of each item in",
"of alerts. :vartype value: list[~azure.mgmt.costmanagement.models.Alert] :ivar next_link: URL to get",
":type container: str :param root_folder_path: The name of the directory",
"next execution time. :vartype next_run_time_estimate: ~datetime.datetime \"\"\" _validation = {",
"{'readonly': True}, 'message': {'readonly': True}, } _attribute_map = { 'code':",
"include: \"Area\", \"Line\", \"StackedColumn\", \"GroupedColumn\", \"Table\". :type chart: str or",
"have up to 2 group by clauses. :type grouping: list[~azure.mgmt.costmanagement.models.QueryGrouping]",
"'str'}, 'current_spend': {'key': 'currentSpend', 'type': 'float'}, 'contact_emails': {'key': 'contactEmails', 'type':",
"None, not_property: Optional[\"ReportConfigFilterAutoGenerated\"] = None, dimension: Optional[\"ReportConfigComparisonExpression\"] = None, tag:",
"in the Cost Analysis UI. :type pivots: list[~azure.mgmt.costmanagement.models.PivotProperties] :param type_properties_query_type:",
"License. See License.txt in the project root for license information.",
"\"EqualTo\", \"GreaterThan\", \"GreaterThanOrEqualTo\", \"LessThan\", \"LessThanOrEqualTo\". :type operator: str or ~azure.mgmt.costmanagement.models.AlertOperator",
"Optional[Union[str, \"AlertSource\"]] = None, details: Optional[\"AlertPropertiesDetails\"] = None, cost_entity_id: Optional[str]",
"} _attribute_map = { 'value': {'key': 'value', 'type': '[Dimension]'}, }",
"used in the export. :param and_property: The logical \"AND\" expression.",
"None, timeframe: Optional[Union[str, \"ReportTimeframeType\"]] = None, time_period: Optional[\"ReportConfigTimePeriod\"] = None,",
"able to process the incoming request. The reason is provided",
"bool :ivar grouping_enabled: Grouping enabled. :vartype grouping_enabled: bool :param data:",
"in which alert was created. :type creation_time: str :param close_time:",
"'str'}, 'close_time': {'key': 'properties.closeTime', 'type': 'str'}, 'modification_time': {'key': 'properties.modificationTime', 'type':",
"configuration: ~azure.mgmt.costmanagement.models.ExportDatasetConfiguration \"\"\" _attribute_map = { 'granularity': {'key': 'granularity', 'type':",
"for ExternalSubscription scope. :type scope: str :ivar created_on: Date the",
"None, modification_time: Optional[str] = None, status_modification_user_name: Optional[str] = None, status_modification_time:",
"'ExportSchedule'}, } def __init__( self, *, delivery_info: \"ExportDeliveryInfo\", definition: \"ExportDefinition\",",
"Optional[str] = None, triggered_by: Optional[str] = None, resource_group_filter: Optional[List[object]] =",
"values include: \"Active\", \"Inactive\". :type status: str or ~azure.mgmt.costmanagement.models.StatusType :param",
"'kpis': {'key': 'properties.kpis', 'type': '[KpiProperties]'}, 'pivots': {'key': 'properties.pivots', 'type': '[PivotProperties]'},",
"\"ReportConfigSortingDirection\"]] = None, **kwargs ): super(ReportConfigSorting, self).__init__(**kwargs) self.direction = direction",
"None self.next_link = None class PivotProperties(msrest.serialization.Model): \"\"\"Each pivot must contain",
"'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'created_on':",
"2 aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation] :param filter: Has",
"value: list[~azure.mgmt.costmanagement.models.ExportExecution] \"\"\" _validation = { 'value': {'readonly': True}, }",
"'value': {'key': 'value', 'type': '[ExportExecution]'}, } def __init__( self, **kwargs",
"next_link: URL to get the next set of operation list",
"\"QuotaThresholdReached\", \"MultiCurrency\", \"ForecastCostThresholdExceeded\", \"ForecastUsageThresholdExceeded\", \"InvoiceDueDateApproaching\", \"InvoiceDueDateReached\", \"CrossCloudNewDataAvailable\", \"CrossCloudCollectionError\", \"GeneralThresholdError\". :type",
"= definition self.run_history = run_history self.next_run_time_estimate = None self.schedule =",
"None self.chart = chart self.accumulated = accumulated self.metric = metric",
"{'key': 'grouping', 'type': '[QueryGrouping]'}, 'filter': {'key': 'filter', 'type': 'QueryFilter'}, }",
"'type': '[ReportConfigSorting]'}, 'filter': {'key': 'filter', 'type': 'ReportConfigFilterAutoGenerated'}, } def __init__(",
"self.name = name class ReportConfigTimePeriod(msrest.serialization.Model): \"\"\"The start and end date",
"= None, filter: Optional[\"QueryFilter\"] = None, **kwargs ): super(QueryDataset, self).__init__(**kwargs)",
"class QueryFilter(msrest.serialization.Model): \"\"\"The filter expression to be used in the",
"*, type: Union[str, \"QueryColumnType\"], name: str, **kwargs ): super(QueryGrouping, self).__init__(**kwargs)",
"type_properties_query_type self.timeframe = timeframe self.time_period = time_period self.dataset = dataset",
"query. :type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod :param dataset: Has definition for data",
"None class Alert(Resource): \"\"\"An individual alert. Variables are only populated",
"{'readonly': True}, } _attribute_map = { 'value': {'key': 'value', 'type':",
"Grouping enabled. :vartype grouping_enabled: bool :param data: :type data: list[str]",
"class AlertPropertiesDetails(msrest.serialization.Model): \"\"\"Alert details. :param time_grain_type: Type of timegrain cadence.",
"__init__( self, *, and_property: Optional[List[\"ReportConfigFilter\"]] = None, or_property: Optional[List[\"ReportConfigFilter\"]] =",
"schedule. If 'Inactive', the export's schedule is paused. Possible values",
"\"TheLastBillingMonth\", \"WeekToDate\", \"Custom\". :type timeframe: str or ~azure.mgmt.costmanagement.models.ForecastTimeframeType :param time_period:",
"\"Invoice\", \"Credit\", \"Quota\", \"General\", \"xCloud\", \"BudgetForecast\". :type type: str or",
"QueryDataset(msrest.serialization.Model): \"\"\"The definition of data present in the query. :param",
"class ForecastDefinition(msrest.serialization.Model): \"\"\"The definition of a forecast. All required parameters",
"value: List of cost management operations supported by the Microsoft.CostManagement",
"Management Group scope, '/providers/Microsoft.CostManagement/externalBillingAccounts/{externalBillingAccountName}' for ExternalBillingAccount scope, and '/providers/Microsoft.CostManagement/externalSubscriptions/{externalSubscriptionName}' for",
"Has the definition for the export. :type definition: ~azure.mgmt.costmanagement.models.ExportDefinition :param",
"__init__( self, *, name: str, direction: Optional[Union[str, \"ReportConfigSortingDirection\"]] = None,",
"status_modification_user_name: :type status_modification_user_name: str :param status_modification_time: dateTime in which the",
"configuration information for the data in the export. The configuration",
"None self.grouping_enabled = None self.data = data self.total = None",
"None, triggered_by: Optional[str] = None, resource_group_filter: Optional[List[object]] = None, resource_filter:",
"expression to use in the query. Query can have up",
"dimensions. Variables are only populated by the server, and will",
"_attribute_map = { 'error': {'key': 'error', 'type': 'ErrorDetails'}, } def",
"str :param display: The object that represents the operation. :type",
"_validation = { 'value': {'readonly': True}, } _attribute_map = {",
"to save the view on. This includes 'subscriptions/{subscriptionId}' for subscription",
"None, aggregation: Optional[Dict[str, \"QueryAggregation\"]] = None, filter: Optional[\"QueryFilter\"] = None,",
"\"FunctionType\"], **kwargs ): super(ReportConfigAggregation, self).__init__(**kwargs) self.name = name self.function =",
"tag class ReportConfigGrouping(msrest.serialization.Model): \"\"\"The group by expression to be used",
"None, **kwargs ): super(QueryColumn, self).__init__(**kwargs) self.name = name self.type =",
"} _attribute_map = { 'and_property': {'key': 'and', 'type': '[ReportConfigFilterAutoGenerated]'}, 'or_property':",
"{'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'created_on': {'readonly':",
"= None self.schedule = schedule class ExportDataset(msrest.serialization.Model): \"\"\"The definition for",
"self.total = None self.category = None self.usage_start = None self.usage_end",
"'type': 'bool'}, 'data': {'key': 'properties.data', 'type': '[str]'}, 'total': {'key': 'properties.total',",
"self.e_tag = e_tag class Export(ProxyResource): \"\"\"An export resource. Variables are",
"'or', 'type': '[ReportConfigFilterAutoGenerated]'}, 'not_property': {'key': 'not', 'type': 'ReportConfigFilterAutoGenerated'}, 'dimension': {'key':",
"'QueryComparisonExpression'}, 'tag': {'key': 'tag', 'type': 'QueryComparisonExpression'}, } def __init__( self,",
"**kwargs ): super(Operation, self).__init__(**kwargs) self.name = None self.display = display",
"'tags': {'key': 'tags', 'type': '{str}'}, 'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'},",
"the report. Report can have up to 2 group by",
"use in the report. :type sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting] :param filter: Has",
"'sorting', 'type': '[ReportConfigSorting]'}, 'filter': {'key': 'filter', 'type': 'ReportConfigFilter'}, } def",
"and configurations of Cost Analysis. Variables are only populated by",
"name. :vartype name: str :ivar type: Resource type. :vartype type:",
"def __init__( self, *, next_link: Optional[str] = None, columns: Optional[List[\"QueryColumn\"]]",
"query. The key of each item in the dictionary is",
"usage_end: ~datetime.datetime :ivar next_link: The link (url) to the next",
":type dataset: ~azure.mgmt.costmanagement.models.QueryDataset \"\"\" _validation = { 'type': {'required': True},",
":ivar value: List of alerts. :vartype value: list[~azure.mgmt.costmanagement.models.Alert] :ivar next_link:",
"{ 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type':",
"self.name = None self.type = None self.tags = None class",
"logical \"NOT\" expression. :type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilter :param dimension: Has comparison",
"Optional[\"ReportConfigComparisonExpression\"] = None, tag: Optional[\"ReportConfigComparisonExpression\"] = None, **kwargs ): super(ReportConfigFilterAutoGenerated,",
"list[str] :param contact_groups: list of action groups to broadcast to.",
"is provided in the error message. Some Error responses: *",
"rows in the query. Possible values include: \"Daily\". :type granularity:",
"in the report. :param granularity: The granularity of rows in",
"comparison expression for a tag. :type tag: ~azure.mgmt.costmanagement.models.QueryComparisonExpression \"\"\" _validation",
"super(QueryResult, self).__init__(**kwargs) self.next_link = next_link self.columns = columns self.rows =",
"{ 'delivery_info': {'required': True}, 'definition': {'required': True}, 'next_run_time_estimate': {'readonly': True},",
":ivar value: The list of exports. :vartype value: list[~azure.mgmt.costmanagement.models.Export] \"\"\"",
"to 2 group by clauses. :type grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping] :param sorting:",
"for a dimension. :type dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression :param tag: Has comparison",
"name: str :ivar type: Resource type. :vartype type: str :ivar",
"~azure.mgmt.costmanagement.models.OperatorType :param values: Required. Array of values to use for",
"True}, } _attribute_map = { 'code': {'key': 'code', 'type': 'str'},",
"if the code is regenerated. # -------------------------------------------------------------------------- import datetime from",
"'provider': {'readonly': True}, 'resource': {'readonly': True}, 'operation': {'readonly': True}, }",
"): super(ExportExecutionListResult, self).__init__(**kwargs) self.value = None class ExportListResult(msrest.serialization.Model): \"\"\"Result of",
"__init__( self, *, type: Optional[Union[str, \"PivotType\"]] = None, name: Optional[str]",
"**kwargs ): super(PivotProperties, self).__init__(**kwargs) self.type = type self.name = name",
"None, file_name: Optional[str] = None, run_settings: Optional[\"CommonExportProperties\"] = None, error:",
"export being delivered. Currently only 'Csv' is supported. Possible values",
"= time_period self.dataset = dataset self.include_actual_cost = include_actual_cost self.include_fresh_partial_cost =",
"): super(ExportTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to = to class",
"ignored when sending a request. :ivar id: Resource Id. :vartype",
"self.time_period = time_period self.dataset = dataset class ReportConfigFilter(msrest.serialization.Model): \"\"\"The filter",
"'type': 'iso-8601'}, 'modified_on': {'key': 'properties.modifiedOn', 'type': 'iso-8601'}, 'chart': {'key': 'properties.chart',",
"will be delivered. :type resource_id: str :param container: Required. The",
"__init__( self, *, from_property: datetime.datetime, to: datetime.datetime, **kwargs ): super(QueryTimePeriod,",
"logical \"AND\" expression. Must have at least 2 items. :type",
"execution_type self.status = status self.submitted_by = submitted_by self.submitted_time = submitted_time",
"not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated :param dimension: Has comparison expression for a dimension.",
"'type' and 'enabled' key. :param type: KPI type (Forecast, Budget).",
"resource: str :ivar operation: Operation type: Read, write, delete, etc.",
"_attribute_map = { 'status': {'key': 'status', 'type': 'str'}, 'recurrence': {'key':",
"True, 'min_items': 1}, } _attribute_map = { 'name': {'key': 'name',",
"Possible values include: \"Tag\", \"Dimension\". :type type: str or ~azure.mgmt.costmanagement.models.QueryColumnType",
"super(ReportConfigDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class ReportConfigDefinition(msrest.serialization.Model): \"\"\"The definition of",
"{'key': 'aggregation', 'type': '{QueryAggregation}'}, 'grouping': {'key': 'grouping', 'type': '[QueryGrouping]'}, 'filter':",
"known status of the export execution. Possible values include: \"Queued\",",
"display: The object that represents the operation. :type display: ~azure.mgmt.costmanagement.models.OperationDisplay",
":ivar provider: Service provider: Microsoft.CostManagement. :vartype provider: str :ivar resource:",
"use in the forecast. The key of each item in",
"'type': '[[object]]'}, } def __init__( self, *, next_link: Optional[str] =",
"self).__init__(**kwargs) self.from_property = from_property self.to = to class View(ProxyResource): \"\"\"States",
"{'required': True}, } _attribute_map = { 'type': {'key': 'type', 'type':",
"'category': {'key': 'properties.category', 'type': 'str'}, 'usage_start': {'key': 'properties.usageStart', 'type': 'iso-8601'},",
"self.resource_filter = resource_filter self.meter_filter = meter_filter self.tag_filter = tag_filter self.threshold",
"in Cost Analysis UI. :type kpis: list[~azure.mgmt.costmanagement.models.KpiProperties] :param pivots: Configuration",
"for the query. If custom, then a specific time period",
"type self.name = name class QueryAggregation(msrest.serialization.Model): \"\"\"The aggregation expression to",
"'and_property': {'key': 'and', 'type': '[ReportConfigFilterAutoGenerated]'}, 'or_property': {'key': 'or', 'type': '[ReportConfigFilterAutoGenerated]'},",
"'dimension': {'key': 'dimension', 'type': 'QueryComparisonExpression'}, 'tag': {'key': 'tag', 'type': 'QueryComparisonExpression'},",
"queued to be executed. :type submitted_time: ~datetime.datetime :param processing_start_time: The",
"has an active schedule, provides an estimate of the next",
"comparison. :type name: str :param operator: Required. The operator to",
"(url) to the next page of results. :type next_link: str",
"~azure.mgmt.costmanagement.models.QueryDatasetConfiguration :param aggregation: Dictionary of aggregation expression to use in",
"= None, amount: Optional[float] = None, unit: Optional[str] = None,",
"incoming request. The reason is provided in the error message.",
"~azure.mgmt.costmanagement.models.RecurrenceType :param recurrence_period: Has start and end date of the",
"Optional[\"AlertPropertiesDetails\"] = None, cost_entity_id: Optional[str] = None, status: Optional[Union[str, \"AlertStatus\"]]",
"class ReportConfigTimePeriod(msrest.serialization.Model): \"\"\"The start and end date for pulling data",
"details: Alert details. :type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails :param cost_entity_id: related budget.",
"'properties.definition', 'type': 'AlertPropertiesDefinition'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'source': {'key':",
"ignored when sending a request. :ivar value: The list of",
"= resource_filter self.meter_filter = meter_filter self.tag_filter = tag_filter self.threshold =",
":param contact_groups: list of action groups to broadcast to. :type",
"overriding_alert: Optional[str] = None, **kwargs ): super(AlertPropertiesDetails, self).__init__(**kwargs) self.time_grain_type =",
"**kwargs ): super(QueryDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration",
"{'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'next_run_time_estimate': {'readonly':",
"ignored when sending a request. :ivar provider: Service provider: Microsoft.CostManagement.",
"'properties.submittedBy', 'type': 'str'}, 'submitted_time': {'key': 'properties.submittedTime', 'type': 'iso-8601'}, 'processing_start_time': {'key':",
"Union[str, \"FunctionType\"], **kwargs ): super(ReportConfigAggregation, self).__init__(**kwargs) self.name = name self.function",
"= None, include_actual_cost: Optional[bool] = None, include_fresh_partial_cost: Optional[bool] = None,",
"\"FunctionType\"], **kwargs ): super(QueryAggregation, self).__init__(**kwargs) self.name = name self.function =",
"type to show in view. Possible values include: \"Dimension\", \"TagKey\".",
"str or ~azure.mgmt.costmanagement.models.ReportConfigColumnType :param name: Required. The name of the",
"be ignored if aggregation and grouping are provided. :type configuration:",
"configuration: Optional[\"QueryDatasetConfiguration\"] = None, aggregation: Optional[Dict[str, \"QueryAggregation\"]] = None, grouping:",
"be selected for the export. If not provided then the",
"_validation = { 'grouping': {'max_items': 2, 'min_items': 0}, } _attribute_map",
"dimension: Optional[\"ReportConfigComparisonExpression\"] = None, tag: Optional[\"ReportConfigComparisonExpression\"] = None, **kwargs ):",
"'timeframe': {'key': 'properties.query.timeframe', 'type': 'str'}, 'time_period': {'key': 'properties.query.timePeriod', 'type': 'ReportConfigTimePeriod'},",
"= None, **kwargs ): super(ReportConfigDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class",
"datetime.datetime, to: Optional[datetime.datetime] = None, **kwargs ): super(ExportRecurrencePeriod, self).__init__(**kwargs) self.from_property",
"to process the incoming request. The reason is provided in",
"self.details = details self.cost_entity_id = cost_entity_id self.status = status self.creation_time",
"self.scope = scope self.created_on = None self.modified_on = None self.chart",
"to 2 group by clauses. :type grouping: list[~azure.mgmt.costmanagement.models.QueryGrouping] :param filter:",
"dataset: ~azure.mgmt.costmanagement.models.ReportConfigDataset \"\"\" _validation = { 'id': {'readonly': True}, 'name':",
"in the report. :type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated \"\"\" _validation = {",
"\"CostThresholdExceeded\", \"UsageThresholdExceeded\", \"CreditThresholdApproaching\", \"CreditThresholdReached\", \"QuotaThresholdApproaching\", \"QuotaThresholdReached\", \"MultiCurrency\", \"ForecastCostThresholdExceeded\", \"ForecastUsageThresholdExceeded\", \"InvoiceDueDateApproaching\",",
"send to Azure. :param direction: Direction of sort. Possible values",
"and forecasted data can be differentiated based on dates. Possible",
"tag: Optional[\"ReportConfigComparisonExpression\"] = None, **kwargs ): super(ReportConfigFilter, self).__init__(**kwargs) self.and_property =",
"'not', 'type': 'QueryFilter'}, 'dimension': {'key': 'dimension', 'type': 'QueryComparisonExpression'}, 'tag': {'key':",
"will be ignored if aggregation and grouping are provided. :type",
"operation is performed: Dimensions, Query. :vartype resource: str :ivar operation:",
"the next execution time. :vartype next_run_time_estimate: ~datetime.datetime :param schedule: Has",
"created_on: Date the user created this view. :vartype created_on: ~datetime.datetime",
"cost management operations supported by the Microsoft.CostManagement resource provider. :vartype",
"= modification_time self.status_modification_user_name = status_modification_user_name self.status_modification_time = status_modification_time class AlertPropertiesDefinition(msrest.serialization.Model):",
"date range for data in the export. This should only",
"dates. Possible values include: \"Usage\". :type type_properties_query_type: str or ~azure.mgmt.costmanagement.models.ReportType",
"of alert. Possible values include: \"Preset\", \"User\". :type source: str",
"'tags': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id',",
"Optional[List[\"ReportConfigFilter\"]] = None, not_property: Optional[\"ReportConfigFilter\"] = None, dimension: Optional[\"ReportConfigComparisonExpression\"] =",
"self).__init__(**kwargs) self.from_property = from_property self.to = to class ExportSchedule(msrest.serialization.Model): \"\"\"The",
"used in the report. All required parameters must be populated",
"error message. Some Error responses: * 429 TooManyRequests - Request",
"up to 2 aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation] :param",
"self.function = function class QueryColumn(msrest.serialization.Model): \"\"\"QueryColumn. :param name: The name",
"Date the user created this view. :vartype created_on: ~datetime.datetime :ivar",
"be populated in order to send to Azure. :param direction:",
"of the error. Variables are only populated by the server,",
"Show costs accumulated over time. Possible values include: \"true\", \"false\".",
"self.metric = metric self.kpis = kpis self.pivots = pivots self.type_properties_query_type",
":type resource_filter: list[object] :param meter_filter: array of meters to filter",
"and grouping are provided. :type configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration :param aggregation: Dictionary",
"'root_folder_path': {'key': 'rootFolderPath', 'type': 'str'}, } def __init__( self, *,",
"status self.submitted_by = submitted_by self.submitted_time = submitted_time self.processing_start_time = processing_start_time",
"definition: Required. Has the definition for the export. :type definition:",
"from_property self.to = to class View(ProxyResource): \"\"\"States and configurations of",
"with amount. Possible values include: \"None\", \"EqualTo\", \"GreaterThan\", \"GreaterThanOrEqualTo\", \"LessThan\",",
"The reason is provided in the error message. Some Error",
":type aggregation: dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation] :param grouping: Array of group by",
"resource_group_filter: list[object] :param resource_filter: array of resources to filter by.",
"'name': {'readonly': True}, 'type': {'readonly': True}, 'tags': {'readonly': True}, 'description':",
"\"\"\"The properties of the export. Variables are only populated by",
"type: Data type to show in view. Possible values include:",
":type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation] :param filter: Has filter expression to",
"str \"\"\" _validation = { 'resource_id': {'required': True}, 'container': {'required':",
"error. :type error: ~azure.mgmt.costmanagement.models.ErrorDetails \"\"\" _validation = { 'id': {'readonly':",
"= None, status_modification_time: Optional[str] = None, **kwargs ): super(Alert, self).__init__(**kwargs)",
"Union[str, \"QueryColumnType\"], name: str, **kwargs ): super(QueryGrouping, self).__init__(**kwargs) self.type =",
"costs accumulated over time. Possible values include: \"true\", \"false\". :type",
"'close_time': {'key': 'properties.closeTime', 'type': 'str'}, 'modification_time': {'key': 'properties.modificationTime', 'type': 'str'},",
"{'required': True}, 'function': {'required': True}, } _attribute_map = { 'name':",
"to compare currentSpend with amount. Possible values include: \"None\", \"EqualTo\",",
"Optional[List[\"QueryColumn\"]] = None, rows: Optional[List[List[object]]] = None, **kwargs ): super(QueryResult,",
"sending a request. :ivar provider: Service provider: Microsoft.CostManagement. :vartype provider:",
"\"\"\"The order by expression to be used in the report.",
"'QueryFilter'}, } def __init__( self, *, granularity: Optional[Union[str, \"GranularityType\"]] =",
"* 503 ServiceUnavailable - Service is temporarily unavailable. Retry after",
"grouping self.filter = filter class QueryDatasetConfiguration(msrest.serialization.Model): \"\"\"The configuration of dataset",
"**kwargs) self.display_name = display_name self.scope = scope self.created_on = None",
"= aggregation self.filter = filter class ForecastDefinition(msrest.serialization.Model): \"\"\"The definition of",
"to send to Azure. :param direction: Direction of sort. Possible",
"'type': '[str]'}, 'contact_roles': {'key': 'contactRoles', 'type': '[str]'}, 'overriding_alert': {'key': 'overridingAlert',",
"\"\"\"The delivery information associated with a export. All required parameters",
"end date of recurrence. :type to: ~datetime.datetime \"\"\" _validation =",
"bool \"\"\" _validation = { 'type': {'required': True}, 'timeframe': {'required':",
"(R) AutoRest Code Generator. # Changes may cause incorrect behavior",
"a request. :ivar provider: Service provider: Microsoft.CostManagement. :vartype provider: str",
"{'key': 'filter', 'type': 'QueryFilter'}, } def __init__( self, *, granularity:",
"\"\"\"Result of alerts. Variables are only populated by the server,",
"{ 'and_property': {'key': 'and', 'type': '[QueryFilter]'}, 'or_property': {'key': 'or', 'type':",
"to: Required. The end date to pull data to. :type",
"list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param or_property: The logical \"OR\" expression. Must have at",
"= from_property self.to = to class ForecastDataset(msrest.serialization.Model): \"\"\"The definition of",
"Optional[bool] = None, include_fresh_partial_cost: Optional[bool] = None, **kwargs ): super(ForecastDefinition,",
"metric: Optional[Union[str, \"MetricType\"]] = None, kpis: Optional[List[\"KpiProperties\"]] = None, pivots:",
"= contact_groups self.contact_roles = contact_roles self.overriding_alert = overriding_alert class AlertsResult(msrest.serialization.Model):",
"{'key': 'status', 'type': 'str'}, 'recurrence': {'key': 'recurrence', 'type': 'str'}, 'recurrence_period':",
"\"Tag\", \"Dimension\". :type type: str or ~azure.mgmt.costmanagement.models.ReportConfigColumnType :param name: Required.",
"dimension. :type dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression :param tag: Has comparison expression for",
"data for the query. :type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod :param dataset: Has",
"\"TheLastMonth\", \"TheLastBillingMonth\", \"WeekToDate\", \"Custom\". :type timeframe: str or ~azure.mgmt.costmanagement.models.TimeframeType :param",
"\"AlertCategory\"]] = None, criteria: Optional[Union[str, \"AlertCriteria\"]] = None, **kwargs ):",
"forecasted data can be differentiated based on dates. Possible values",
"for InvoiceSection scope, 'providers/Microsoft.Management/managementGroups/{managementGroupId}' for Management Group scope, '/providers/Microsoft.CostManagement/externalBillingAccounts/{externalBillingAccountName}' for",
"sending a request. :ivar name: Operation name: {provider}/{resource}/{operation}. :vartype name:",
"\"BillingAnnual\". :type time_grain_type: str or ~azure.mgmt.costmanagement.models.AlertTimeGrainType :param period_start_date: datetime of",
"{'readonly': True}, 'next_link': {'readonly': True}, } _attribute_map = { 'value':",
"): super(KpiProperties, self).__init__(**kwargs) self.type = type self.id = id self.enabled",
"view on. This includes 'subscriptions/{subscriptionId}' for subscription scope, 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for",
"query. Possible values include: \"Daily\". :type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType",
"self.usage_end = None self.next_link = None class DimensionsListResult(msrest.serialization.Model): \"\"\"Result of",
"current_spend self.contact_emails = contact_emails self.contact_groups = contact_groups self.contact_roles = contact_roles",
"**kwargs ): super(ExportExecutionListResult, self).__init__(**kwargs) self.value = None class ExportListResult(msrest.serialization.Model): \"\"\"Result",
"e_tag: Optional[str] = None, format: Optional[Union[str, \"FormatType\"]] = None, delivery_info:",
"a request. All required parameters must be populated in order",
"modified_on: ~datetime.datetime :param chart: Chart type of the main view",
"time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod :param dataset: Has definition for data in this",
"self.resource = None self.operation = None class OperationListResult(msrest.serialization.Model): \"\"\"Result of",
":ivar total: Total number of data for the dimension. :vartype",
"type self.id = id self.enabled = enabled class Operation(msrest.serialization.Model): \"\"\"A",
"values: Required. Array of values to use for comparison. :type",
"OnDemand executions it is the user email. For scheduled executions",
"Data field to show in view. :type name: str \"\"\"",
"Has time period for pulling data for the report. :type",
"tag_filter self.threshold = threshold self.operator = operator self.amount = amount",
"Dimension(Resource): \"\"\"Dimension. Variables are only populated by the server, and",
"): super(DimensionsListResult, self).__init__(**kwargs) self.value = None class DismissAlertPayload(msrest.serialization.Model): \"\"\"The request",
"\"ActualCost\", \"AmortizedCost\". :type type: str or ~azure.mgmt.costmanagement.models.ExportType :param timeframe: Required.",
"ForecastDefinition(msrest.serialization.Model): \"\"\"The definition of a forecast. All required parameters must",
"str \"\"\" _validation = { 'type': {'required': True}, 'name': {'required':",
"'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(AlertsResult,",
"'processing_start_time': {'key': 'properties.processingStartTime', 'type': 'iso-8601'}, 'processing_end_time': {'key': 'properties.processingEndTime', 'type': 'iso-8601'},",
"the delivery of the export. To allow access to a",
"'type': {'key': 'type', 'type': 'str'}, } def __init__( self, *,",
"'name': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name',",
"The type of the query. Possible values include: \"Usage\", \"ActualCost\",",
":param type: KPI type (Forecast, Budget). Possible values include: \"Forecast\",",
"'type': 'str'}, } def __init__( self, *, name: str, function:",
"link to get the next set of results. Variables are",
"'properties.kpis', 'type': '[KpiProperties]'}, 'pivots': {'key': 'properties.pivots', 'type': '[PivotProperties]'}, 'type_properties_query_type': {'key':",
"alias for the aggregated column. Report can have up to",
"The start date for export data. :type from_property: ~datetime.datetime :param",
"{'key': 'properties.details', 'type': 'AlertPropertiesDetails'}, 'cost_entity_id': {'key': 'properties.costEntityId', 'type': 'str'}, 'status':",
"id: str :param enabled: show the KPI in the UI?.",
"criteria: Optional[Union[str, \"AlertCriteria\"]] = None, **kwargs ): super(AlertPropertiesDefinition, self).__init__(**kwargs) self.type",
"last modified. :type status_modification_time: str \"\"\" _attribute_map = { 'definition':",
"Allows columns to be selected for the export. If not",
"str, operator: Union[str, \"OperatorType\"], values: List[str], **kwargs ): super(QueryComparisonExpression, self).__init__(**kwargs)",
"set of alerts results if there are any. :vartype next_link:",
"required parameters must be populated in order to send to",
"API users need to register the subscription. For more information",
"the query. The key of each item in the dictionary",
"name: Operation name: {provider}/{resource}/{operation}. :vartype name: str :param display: The",
"Optional[str] = None, status_modification_time: Optional[str] = None, **kwargs ): super(DismissAlertPayload,",
"super(ErrorResponse, self).__init__(**kwargs) self.error = error class ProxyResource(msrest.serialization.Model): \"\"\"The Resource model",
"'schedule', 'type': 'ExportSchedule'}, } def __init__( self, *, delivery_info: \"ExportDeliveryInfo\",",
":type id: str :param enabled: show the KPI in the",
"self).__init__(**kwargs) self.type = type self.name = name class QueryAggregation(msrest.serialization.Model): \"\"\"The",
"threshold self.operator = operator self.amount = amount self.unit = unit",
"time_period: Has time period for pulling data for the forecast.",
"None, submitted_by: Optional[str] = None, submitted_time: Optional[datetime.datetime] = None, processing_start_time:",
"name of the directory where exports will be uploaded. :type",
"of the column to group. Possible values include: \"Tag\", \"Dimension\".",
"name: Required. The name of the column to aggregate. :type",
"resourceGroup scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}' for Department",
"error: Optional[\"ErrorDetails\"] = None, **kwargs ): super(ErrorResponse, self).__init__(**kwargs) self.error =",
"List[str], **kwargs ): super(QueryComparisonExpression, self).__init__(**kwargs) self.name = name self.operator =",
"if aggregation and grouping are provided. :type configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration :param",
"{'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, }",
"*, delivery_info: \"ExportDeliveryInfo\", definition: \"ExportDefinition\", format: Optional[Union[str, \"FormatType\"]] = None,",
"self.columns = columns class ExportDefinition(msrest.serialization.Model): \"\"\"The definition of an export.",
"the query. All required parameters must be populated in order",
"this forecast. :type dataset: ~azure.mgmt.costmanagement.models.ForecastDataset :param include_actual_cost: a boolean determining",
"self.kpis = kpis self.pivots = pivots self.type_properties_query_type = type_properties_query_type self.timeframe",
"of dataset in the query. :param columns: Array of column",
"'time_period': {'key': 'timePeriod', 'type': 'ReportConfigTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'ReportConfigDatasetAutoGenerated'},",
"filter by. :type resource_group_filter: list[object] :param resource_filter: array of resources",
"'aggregation', 'type': '{ReportConfigAggregation}'}, 'grouping': {'key': 'grouping', 'type': '[ReportConfigGrouping]'}, 'sorting': {'key':",
"super(Dimension, self).__init__(**kwargs) self.description = None self.filter_enabled = None self.grouping_enabled =",
":param display: The object that represents the operation. :type display:",
"The object that represents the operation. :type display: ~azure.mgmt.costmanagement.models.OperationDisplay \"\"\"",
"{'key': 'properties.columns', 'type': '[QueryColumn]'}, 'rows': {'key': 'properties.rows', 'type': '[[object]]'}, }",
"{'key': 'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'ExportDatasetConfiguration'}, }",
"column names to be included in the query. Any valid",
"by expression to be used in the query. All required",
"{'key': 'recurrencePeriod', 'type': 'ExportRecurrencePeriod'}, } def __init__( self, *, recurrence:",
"creation_time: Optional[str] = None, close_time: Optional[str] = None, modification_time: Optional[str]",
"self).__init__(**kwargs) self.name = name self.type = type class QueryComparisonExpression(msrest.serialization.Model): \"\"\"The",
"list[str] :param overriding_alert: overriding alert. :type overriding_alert: str \"\"\" _attribute_map",
"self.configuration = configuration self.aggregation = aggregation self.grouping = grouping self.sorting",
"'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'}, 'description': {'key': 'properties.description', 'type': 'str'},",
"the export. For OnDemand executions it is the user email.",
"schedule class ExportRecurrencePeriod(msrest.serialization.Model): \"\"\"The start and end date for recurrence",
"**kwargs ): super(ReportConfigDatasetAutoGenerated, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration",
"\"ActualCost\", \"AmortizedCost\", \"AHUB\". :type metric: str or ~azure.mgmt.costmanagement.models.MetricType :param kpis:",
"send to Azure. :param status: The status of the export's",
"triggered_by self.resource_group_filter = resource_group_filter self.resource_filter = resource_filter self.meter_filter = meter_filter",
"resource_group_filter: array of resourceGroups to filter by. :type resource_group_filter: list[object]",
"): super(ExportRecurrencePeriod, self).__init__(**kwargs) self.from_property = from_property self.to = to class",
"meter_filter: array of meters to filter by. :type meter_filter: list[object]",
":param to: Required. The end date for export data. :type",
"status_modification_time: dateTime in which the alert status was last modified.",
"'description': {'readonly': True}, 'filter_enabled': {'readonly': True}, 'grouping_enabled': {'readonly': True}, 'total':",
"\"ExportDefinition\", format: Optional[Union[str, \"FormatType\"]] = None, run_history: Optional[\"ExportExecutionListResult\"] = None,",
"\"QueryColumnType\"], name: str, **kwargs ): super(QueryGrouping, self).__init__(**kwargs) self.type = type",
"tags to filter by. :type tag_filter: object :param threshold: notification",
"'criteria', 'type': 'str'}, } def __init__( self, *, type: Optional[Union[str,",
"\"Monthly\", \"Annually\". :type recurrence: str or ~azure.mgmt.costmanagement.models.RecurrenceType :param recurrence_period: Has",
"by. :type meter_filter: list[object] :param tag_filter: tags to filter by.",
"export. :type definition: ~azure.mgmt.costmanagement.models.ExportDefinition :param run_history: If requested, has the",
"run_history: Optional[\"ExportExecutionListResult\"] = None, schedule: Optional[\"ExportSchedule\"] = None, **kwargs ):",
":type timeframe: str or ~azure.mgmt.costmanagement.models.ReportTimeframeType :param time_period: Has time period",
"'contactEmails', 'type': '[str]'}, 'contact_groups': {'key': 'contactGroups', 'type': '[str]'}, 'contact_roles': {'key':",
"available columns. The available columns can vary by customer channel",
"str :param resource_group_filter: array of resourceGroups to filter by. :type",
"'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}' for BillingProfile scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}' for InvoiceSection scope, 'providers/Microsoft.Management/managementGroups/{managementGroupId}' for",
"be used in the report. :param and_property: The logical \"AND\"",
"estimate of the next execution time. :vartype next_run_time_estimate: ~datetime.datetime \"\"\"",
"the next set of results. Variables are only populated by",
"for data in the export. This should only be specified",
"= None, status: Optional[Union[str, \"AlertStatus\"]] = None, creation_time: Optional[str] =",
"'type': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id',",
"Optional[\"ErrorDetails\"] = None, **kwargs ): super(ExportExecution, self).__init__(**kwargs) self.execution_type = execution_type",
"'properties.query.timeframe', 'type': 'str'}, 'time_period': {'key': 'properties.query.timePeriod', 'type': 'ReportConfigTimePeriod'}, 'dataset': {'key':",
"None, kpis: Optional[List[\"KpiProperties\"]] = None, pivots: Optional[List[\"PivotProperties\"]] = None, type_properties_query_type:",
"'name': {'required': True}, } _attribute_map = { 'direction': {'key': 'direction',",
"the report. :param columns: Array of column names to be",
"def __init__( self, *, definition: Optional[\"AlertPropertiesDefinition\"] = None, description: Optional[str]",
"Read, write, delete, etc. :vartype operation: str \"\"\" _validation =",
"container: str :param root_folder_path: The name of the directory where",
"= None, criteria: Optional[Union[str, \"AlertCriteria\"]] = None, **kwargs ): super(AlertPropertiesDefinition,",
"values include: \"Preset\", \"User\". :type source: str or ~azure.mgmt.costmanagement.models.AlertSource :param",
"management operations supported by the Microsoft.CostManagement resource provider. :vartype value:",
"not yet provide data for charges or amortization for service",
"str :param scope: Cost Management scope to save the view",
"str or ~azure.mgmt.costmanagement.models.ReportType :param timeframe: Required. The time frame for",
"have at least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.QueryFilter] :param or_property:",
"= to class ForecastDataset(msrest.serialization.Model): \"\"\"The definition of data present in",
"send to Azure. :param name: Required. The name of the",
"query includes all columns. :type columns: list[str] \"\"\" _attribute_map =",
"to Azure. :param type: Required. The type of the export.",
"to: ~datetime.datetime \"\"\" _validation = { 'from_property': {'required': True}, 'to':",
"include: \"Csv\". :type format: str or ~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Has",
"def __init__( self, *, type: Union[str, \"QueryColumnType\"], name: str, **kwargs",
"provided. :type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration :param aggregation: Dictionary of aggregation expression",
"subscription. For more information see https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services . All required parameters",
"_validation = { 'name': {'readonly': True}, } _attribute_map = {",
"{'key': 'properties.usageStart', 'type': 'iso-8601'}, 'usage_end': {'key': 'properties.usageEnd', 'type': 'iso-8601'}, 'next_link':",
"**kwargs ): super(QueryAggregation, self).__init__(**kwargs) self.name = name self.function = function",
"= function class QueryColumn(msrest.serialization.Model): \"\"\"QueryColumn. :param name: The name of",
"def __init__( self, *, type: Union[str, \"ReportConfigColumnType\"], name: str, **kwargs",
"self).__init__(**kwargs) self.id = None self.name = None self.type = None",
"for data in the export. :type data_set: ~azure.mgmt.costmanagement.models.ExportDataset \"\"\" _validation",
"'timePeriod', 'type': 'ExportTimePeriod'}, 'data_set': {'key': 'dataSet', 'type': 'ExportDataset'}, } def",
"(c) Microsoft Corporation. All rights reserved. # Licensed under the",
"KPI must contain a 'type' and 'enabled' key. :param type:",
"Optional[\"ReportConfigTimePeriod\"] = None, dataset: Optional[\"ReportConfigDatasetAutoGenerated\"] = None, **kwargs ): super(ReportConfigDefinition,",
"'iso-8601'}, } def __init__( self, *, delivery_info: \"ExportDeliveryInfo\", definition: \"ExportDefinition\",",
"'type': 'str'}, 'criteria': {'key': 'criteria', 'type': 'str'}, } def __init__(",
"None, **kwargs ): super(QueryDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration =",
"Alert(Resource): \"\"\"An individual alert. Variables are only populated by the",
":param name: The name of column. :type name: str :param",
":type include_actual_cost: bool :param include_fresh_partial_cost: a boolean determining if FreshPartialCost",
"= None, **kwargs ): super(ProxyResource, self).__init__(**kwargs) self.id = None self.name",
"{'key': 'properties.kpis', 'type': '[KpiProperties]'}, 'pivots': {'key': 'properties.pivots', 'type': '[PivotProperties]'}, 'type_properties_query_type':",
"'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'description': {'key':",
"'type': 'float'}, 'unit': {'key': 'unit', 'type': 'str'}, 'current_spend': {'key': 'currentSpend',",
"'str'}, 'source': {'key': 'properties.source', 'type': 'str'}, 'details': {'key': 'properties.details', 'type':",
"Optional[bool] = None, **kwargs ): super(ForecastDefinition, self).__init__(**kwargs) self.type = type",
"'enabled' key. :param type: KPI type (Forecast, Budget). Possible values",
"timeframe self.time_period = time_period self.dataset = dataset self.include_actual_cost = include_actual_cost",
"\"AlertTimeGrainType\"]] = None, period_start_date: Optional[str] = None, triggered_by: Optional[str] =",
"kpis self.pivots = pivots self.type_properties_query_type = type_properties_query_type self.timeframe = timeframe",
"information for the delivery of the export. To allow access",
"be populated in order to send to Azure. :param from_property:",
"dataset: Optional[\"ReportConfigDataset\"] = None, **kwargs ): super(View, self).__init__(e_tag=e_tag, **kwargs) self.display_name",
":type type_properties_query_type: str or ~azure.mgmt.costmanagement.models.ReportType :param timeframe: The time frame",
"filter_enabled: Filter enabled. :vartype filter_enabled: bool :ivar grouping_enabled: Grouping enabled.",
"def __init__( self, *, and_property: Optional[List[\"ReportConfigFilterAutoGenerated\"]] = None, or_property: Optional[List[\"ReportConfigFilterAutoGenerated\"]]",
"the export. To allow access to a storage account, you",
"submitted_by: The identifier for the entity that executed the export.",
"def __init__( self, *, resource_id: str, container: str, root_folder_path: Optional[str]",
"'display', 'type': 'OperationDisplay'}, } def __init__( self, *, display: Optional[\"OperationDisplay\"]",
"'ExportDatasetConfiguration'}, } def __init__( self, *, granularity: Optional[Union[str, \"GranularityType\"]] =",
"and will be ignored when sending a request. All required",
"2 items. :type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param not_property: The logical \"NOT\"",
"history for the export. :type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult :ivar next_run_time_estimate: If",
"self.next_run_time_estimate = None self.schedule = schedule class ExportDataset(msrest.serialization.Model): \"\"\"The definition",
"expression to be used in the report. :param and_property: The",
"processing_start_time: The time when export was picked up to be",
"reason is provided in the error message. Some Error responses:",
"is supported. Possible values include: \"Daily\". :type granularity: str or",
"} def __init__( self, *, recurrence: Union[str, \"RecurrenceType\"], status: Optional[Union[str,",
"'properties.processingEndTime', 'type': 'iso-8601'}, 'file_name': {'key': 'properties.fileName', 'type': 'str'}, 'run_settings': {'key':",
"'type', 'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, 'format': {'key':",
"'properties.definition', 'type': 'ExportDefinition'}, 'run_history': {'key': 'properties.runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key':",
"= None, or_property: Optional[List[\"ReportConfigFilter\"]] = None, not_property: Optional[\"ReportConfigFilter\"] = None,",
"\"DataNotAvailable\". :type status: str or ~azure.mgmt.costmanagement.models.ExecutionStatus :param submitted_by: The identifier",
":param or_property: The logical \"OR\" expression. Must have at least",
"of tags. Resource tags. :vartype tags: dict[str, str] :ivar description:",
"= None, include_fresh_partial_cost: Optional[bool] = None, **kwargs ): super(ForecastDefinition, self).__init__(**kwargs)",
"{'key': 'type', 'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, }",
"direction self.name = name class ReportConfigTimePeriod(msrest.serialization.Model): \"\"\"The start and end",
"Possible values include: \"Area\", \"Line\", \"StackedColumn\", \"GroupedColumn\", \"Table\". :type chart:",
":vartype tags: dict[str, str] :ivar description: Dimension description. :vartype description:",
"information associated with a export. All required parameters must be",
"dateTime in which alert was last modified. :type modification_time: str",
"config. :type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDatasetAutoGenerated \"\"\" _validation = { 'type': {'required':",
"self.next_link = next_link self.columns = columns self.rows = rows class",
":param timeframe: Required. The time frame for pulling data for",
"not provided, then report includes all columns. :type columns: list[str]",
"{'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ):",
"ExportListResult(msrest.serialization.Model): \"\"\"Result of listing exports. It contains a list of",
"to get the next set of operation list results if",
"period_start_date self.triggered_by = triggered_by self.resource_group_filter = resource_group_filter self.resource_filter = resource_filter",
"{'key': 'resource', 'type': 'str'}, 'operation': {'key': 'operation', 'type': 'str'}, }",
"= { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name',",
"self, *, execution_type: Optional[Union[str, \"ExecutionType\"]] = None, status: Optional[Union[str, \"ExecutionStatus\"]]",
"to be executed. :type submitted_time: ~datetime.datetime :param processing_start_time: The time",
"Has definition for data in this forecast. :type dataset: ~azure.mgmt.costmanagement.models.ForecastDataset",
"close_time: dateTime in which alert was closed. :type close_time: str",
"order to send to Azure. :param from_property: Required. The start",
"None, status_modification_time: Optional[str] = None, **kwargs ): super(Alert, self).__init__(**kwargs) self.definition",
"dict[str, str] :param next_link: The link (url) to the next",
"by expression to use in the report. Report can have",
"= None class DismissAlertPayload(msrest.serialization.Model): \"\"\"The request payload to update an",
"__init__( self, **kwargs ): super(AlertsResult, self).__init__(**kwargs) self.value = None self.next_link",
"{ 'type': {'required': True}, 'name': {'required': True}, } _attribute_map =",
"__init__( self, *, delivery_info: \"ExportDeliveryInfo\", definition: \"ExportDefinition\", format: Optional[Union[str, \"FormatType\"]]",
"for comparison. Possible values include: \"In\", \"Contains\". :type operator: str",
"= None, **kwargs ): super(AlertPropertiesDefinition, self).__init__(**kwargs) self.type = type self.category",
"submitted_time self.processing_start_time = processing_start_time self.processing_end_time = processing_end_time self.file_name = file_name",
"differentiated based on dates. Possible values include: \"Usage\". :type type_properties_query_type:",
"Optional[Dict[str, \"QueryAggregation\"]] = None, grouping: Optional[List[\"QueryGrouping\"]] = None, filter: Optional[\"QueryFilter\"]",
"= None, time_period: Optional[\"ReportConfigTimePeriod\"] = None, dataset: Optional[\"ReportConfigDataset\"] = None,",
"class QueryColumn(msrest.serialization.Model): \"\"\"QueryColumn. :param name: The name of column. :type",
"= data_set class ExportDeliveryDestination(msrest.serialization.Model): \"\"\"The destination information for the delivery",
"Required. The start date of recurrence. :type from_property: ~datetime.datetime :param",
"Optional[\"ReportConfigDataset\"] = None, **kwargs ): super(View, self).__init__(e_tag=e_tag, **kwargs) self.display_name =",
"must contain a 'type' and 'name'. :param type: Data type",
"_attribute_map = { 'value': {'key': 'value', 'type': '[Alert]'}, 'next_link': {'key':",
"alert status was last modified. :type status_modification_time: str \"\"\" _validation",
"Budget). Possible values include: \"Forecast\", \"Budget\". :type type: str or",
"= None, **kwargs ): super(Export, self).__init__(e_tag=e_tag, **kwargs) self.format = format",
"or ~azure.mgmt.costmanagement.models.GranularityType :param configuration: The export dataset configuration. :type configuration:",
"include: \"Usage\". :type type: str or ~azure.mgmt.costmanagement.models.ReportType :param timeframe: Required.",
"{'required': True}, 'values': {'required': True, 'min_items': 1}, } _attribute_map =",
"~azure.mgmt.costmanagement.models.QueryFilter :param dimension: Has comparison expression for a dimension. :type",
"tags: A set of tags. Resource tags. :vartype tags: dict[str,",
"= sorting self.filter = filter class ReportConfigDatasetAutoGenerated(msrest.serialization.Model): \"\"\"The definition of",
":param name: Required. The name of the column to use",
"request. :ivar id: Resource Id. :vartype id: str :ivar name:",
":param criteria: Criteria that triggered alert. Possible values include: \"CostThresholdExceeded\",",
"include: \"Tag\", \"Dimension\". :type type: str or ~azure.mgmt.costmanagement.models.ReportConfigColumnType :param name:",
"processing_start_time: ~datetime.datetime :param processing_end_time: The time when the export execution",
"schedule information for the export. :type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule \"\"\" _validation",
"that represents the operation. Variables are only populated by the",
"'{str}'}, 'next_link': {'key': 'properties.nextLink', 'type': 'str'}, 'columns': {'key': 'properties.columns', 'type':",
"self).__init__(**kwargs) self.from_property = from_property self.to = to class ReportConfigAggregation(msrest.serialization.Model): \"\"\"The",
"'contact_roles': {'key': 'contactRoles', 'type': '[str]'}, 'overriding_alert': {'key': 'overridingAlert', 'type': 'str'},",
"'str'}, } def __init__( self, *, resource_id: str, container: str,",
"~azure.mgmt.costmanagement.models.QueryAggregation] :param grouping: Array of group by expression to use",
"'configuration', 'type': 'QueryDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'}, 'grouping': {'key':",
"of tags. Resource tags. :vartype tags: dict[str, str] :param execution_type:",
"class AlertPropertiesDefinition(msrest.serialization.Model): \"\"\"defines the type of alert. :param type: type",
":type error: ~azure.mgmt.costmanagement.models.ErrorDetails \"\"\" _validation = { 'id': {'readonly': True},",
"'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, } def",
"save the view on. This includes 'subscriptions/{subscriptionId}' for subscription scope,",
"the export. :type time_period: ~azure.mgmt.costmanagement.models.ExportTimePeriod :param data_set: The definition for",
"super(ProxyResource, self).__init__(**kwargs) self.id = None self.name = None self.type =",
"'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}' for Department scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}'",
"be populated in order to send to Azure. :param format:",
"\"\"\"The export dataset configuration. Allows columns to be selected for",
"dataset: ~azure.mgmt.costmanagement.models.ForecastDataset :param include_actual_cost: a boolean determining if actualCost will",
":vartype value: list[~azure.mgmt.costmanagement.models.Export] \"\"\" _validation = { 'value': {'readonly': True},",
"API operation. Variables are only populated by the server, and",
"recurrence schedule. All required parameters must be populated in order",
"\"FormatType\"]] = None, delivery_info: Optional[\"ExportDeliveryInfo\"] = None, definition: Optional[\"ExportDefinition\"] =",
"= None, **kwargs ): super(ReportConfigSorting, self).__init__(**kwargs) self.direction = direction self.name",
"unit: Optional[str] = None, current_spend: Optional[float] = None, contact_emails: Optional[List[str]]",
"are provided. :type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration :param aggregation: Dictionary of aggregation",
"**kwargs ): super(ExportDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration",
"Microsoft.CostManagement. :vartype provider: str :ivar resource: Resource on which the",
"tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression \"\"\" _validation = { 'and_property': {'min_items': 2}, 'or_property':",
"granularity: The granularity of rows in the query. Possible values",
"The time frame for pulling data for the export. If",
"Optional[\"ExportExecutionListResult\"] = None, **kwargs ): super(CommonExportProperties, self).__init__(**kwargs) self.format = format",
"self, **kwargs ): super(Resource, self).__init__(**kwargs) self.id = None self.name =",
"to Azure. :param format: The format of the export being",
"pivots: list[~azure.mgmt.costmanagement.models.PivotProperties] :param type_properties_query_type: The type of the report. Usage",
"Has comparison expression for a dimension. :type dimension: ~azure.mgmt.costmanagement.models.QueryComparisonExpression :param",
"'type': 'str'}, 'category': {'key': 'category', 'type': 'str'}, 'criteria': {'key': 'criteria',",
"None, chart: Optional[Union[str, \"ChartType\"]] = None, accumulated: Optional[Union[str, \"AccumulatedType\"]] =",
"None self.usage_start = None self.usage_end = None self.next_link = None",
"of tags. Resource tags. :vartype tags: dict[str, str] \"\"\" _validation",
"the export. Currently only 'Daily' is supported. Possible values include:",
"= status_modification_user_name self.status_modification_time = status_modification_time class AlertPropertiesDefinition(msrest.serialization.Model): \"\"\"defines the type",
"creation_time: dateTime in which alert was created. :type creation_time: str",
"after waiting for the time specified in the \"Retry-After\" header.",
"\"InvoiceDueDateReached\", \"CrossCloudNewDataAvailable\", \"CrossCloudCollectionError\", \"GeneralThresholdError\". :type criteria: str or ~azure.mgmt.costmanagement.models.AlertCriteria \"\"\"",
"from. :type from_property: ~datetime.datetime :param to: Required. The end date",
"recurrence: Union[str, \"RecurrenceType\"], status: Optional[Union[str, \"StatusType\"]] = None, recurrence_period: Optional[\"ExportRecurrencePeriod\"]",
":param resource_id: Required. The resource id of the storage account",
"of alerts. Variables are only populated by the server, and",
"{'readonly': True}, } _attribute_map = { 'code': {'key': 'code', 'type':",
"{'readonly': True}, 'grouping_enabled': {'readonly': True}, 'total': {'readonly': True}, 'category': {'readonly':",
"'min_items': 1}, } _attribute_map = { 'name': {'key': 'name', 'type':",
"type: str or ~azure.mgmt.costmanagement.models.PivotType :param name: Data field to show",
"a report config. All required parameters must be populated in",
"Resource type. :vartype type: str :param e_tag: eTag of the",
"used to determine whether the user is updating the latest",
"execution_type: str or ~azure.mgmt.costmanagement.models.ExecutionType :param status: The last known status",
"ignored when sending a request. :ivar code: Error code. :vartype",
"= None, modification_time: Optional[str] = None, status_modification_user_name: Optional[str] = None,",
"recurrence. Possible values include: \"Daily\", \"Weekly\", \"Monthly\", \"Annually\". :type recurrence:",
"2}, } _attribute_map = { 'and_property': {'key': 'and', 'type': '[ReportConfigFilter]'},",
"use in the forecast. :type filter: ~azure.mgmt.costmanagement.models.QueryFilter \"\"\" _attribute_map =",
"_validation = { 'type': {'required': True}, 'timeframe': {'required': True}, }",
"= to class ReportConfigAggregation(msrest.serialization.Model): \"\"\"The aggregation expression to be used",
"= type class QueryComparisonExpression(msrest.serialization.Model): \"\"\"The comparison expression to be used",
"'function': {'key': 'function', 'type': 'str'}, } def __init__( self, *,",
"= columns self.rows = rows class QueryTimePeriod(msrest.serialization.Model): \"\"\"The start and",
"Possible values include: \"Usage\". :type type_properties_query_type: str or ~azure.mgmt.costmanagement.models.ReportType :param",
"None, dataset: Optional[\"QueryDataset\"] = None, **kwargs ): super(QueryDefinition, self).__init__(**kwargs) self.type",
"None, **kwargs ): super(ReportConfigSorting, self).__init__(**kwargs) self.direction = direction self.name =",
"Required. Has the definition for the export. :type definition: ~azure.mgmt.costmanagement.models.ExportDefinition",
"str or ~azure.mgmt.costmanagement.models.RecurrenceType :param recurrence_period: Has start and end date",
"forecast. :type filter: ~azure.mgmt.costmanagement.models.QueryFilter \"\"\" _attribute_map = { 'granularity': {'key':",
"waiting for the time specified in the \"x-ms-ratelimit-microsoft.consumption-retry-after\" header. *",
"the exported file. :type file_name: str :param run_settings: The export",
"dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation] :param grouping: Array of group by expression to",
"from_property: Required. The start date to pull data from. :type",
"tag_filter: object :param threshold: notification threshold percentage as a decimal",
"): super(Export, self).__init__(e_tag=e_tag, **kwargs) self.format = format self.delivery_info = delivery_info",
"self.from_property = from_property self.to = to class View(ProxyResource): \"\"\"States and",
"str :param run_settings: The export settings that were in effect",
":ivar value: The list of dimensions. :vartype value: list[~azure.mgmt.costmanagement.models.Dimension] \"\"\"",
"(see examples). :type columns: list[str] \"\"\" _attribute_map = { 'columns':",
"{'key': 'error', 'type': 'ErrorDetails'}, } def __init__( self, *, error:",
"'properties.fileName', 'type': 'str'}, 'run_settings': {'key': 'properties.runSettings', 'type': 'CommonExportProperties'}, 'error': {'key':",
"metric (budget). :type id: str :param enabled: show the KPI",
"contact. :type contact_emails: list[str] :param contact_groups: list of action groups",
"processing_start_time: Optional[datetime.datetime] = None, processing_end_time: Optional[datetime.datetime] = None, file_name: Optional[str]",
"from_property: Required. The start date of recurrence. :type from_property: ~datetime.datetime",
"Has configuration information for the data in the export. The",
"_attribute_map = { 'and_property': {'key': 'and', 'type': '[ReportConfigFilter]'}, 'or_property': {'key':",
"\"\"\"The definition for data in the export. :param granularity: The",
"2 group by clauses. :type grouping: list[~azure.mgmt.costmanagement.models.QueryGrouping] :param filter: Has",
"'dataset': {'key': 'properties.query.dataset', 'type': 'ReportConfigDataset'}, } def __init__( self, *,",
"updating the latest version or not. :type e_tag: str :param",
":type operator: str or ~azure.mgmt.costmanagement.models.OperatorType :param values: Required. Array of",
"budget. :type cost_entity_id: str :param status: alert status. Possible values",
"\"Forecast\", \"Budget\". :type type: str or ~azure.mgmt.costmanagement.models.KpiType :param id: ID",
"tags. Resource tags. :vartype tags: dict[str, str] :param execution_type: The",
"version supports subscription lowest possible grain. :type name: str \"\"\"",
"cost management operations. It contains a list of operations and",
"self.type = type self.timeframe = timeframe self.time_period = time_period self.dataset",
"values include: \"None\", \"Monthly\", \"Quarterly\", \"Annually\", \"BillingMonth\", \"BillingQuarter\", \"BillingAnnual\". :type",
"'value': {'key': 'value', 'type': '[Operation]'}, 'next_link': {'key': 'nextLink', 'type': 'str'},",
"Optional[Union[str, \"AlertType\"]] = None, category: Optional[Union[str, \"AlertCategory\"]] = None, criteria:",
"'properties.rows', 'type': '[[object]]'}, } def __init__( self, *, next_link: Optional[str]",
"will be ignored when sending a request. :ivar value: List",
"the service is not able to process the incoming request.",
"custom, then a specific time period must be provided. Possible",
"\"Inactive\". :type status: str or ~azure.mgmt.costmanagement.models.StatusType :param recurrence: Required. The",
"'properties.modificationTime', 'type': 'str'}, 'status_modification_user_name': {'key': 'properties.statusModificationUserName', 'type': 'str'}, 'status_modification_time': {'key':",
"= None, close_time: Optional[str] = None, modification_time: Optional[str] = None,",
"to Azure. :param name: Required. The name of the column",
"= processing_start_time self.processing_end_time = processing_end_time self.file_name = file_name self.run_settings =",
"self.run_history = run_history self.next_run_time_estimate = None class Dimension(Resource): \"\"\"Dimension. Variables",
"contact_groups: list[str] :param contact_roles: list of contact roles. :type contact_roles:",
"\"AND\" expression. Must have at least 2 items. :type and_property:",
"{'key': 'value', 'type': '[Dimension]'}, } def __init__( self, **kwargs ):",
"_attribute_map = { 'value': {'key': 'value', 'type': '[Dimension]'}, } def",
"*, from_property: datetime.datetime, to: datetime.datetime, **kwargs ): super(ExportTimePeriod, self).__init__(**kwargs) self.from_property",
"recent execution history for the export. :type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult :ivar",
"include: \"Cost\", \"Usage\", \"Billing\", \"System\". :type category: str or ~azure.mgmt.costmanagement.models.AlertCategory",
"super(AlertPropertiesDetails, self).__init__(**kwargs) self.time_grain_type = time_grain_type self.period_start_date = period_start_date self.triggered_by =",
"= criteria class AlertPropertiesDetails(msrest.serialization.Model): \"\"\"Alert details. :param time_grain_type: Type of",
"operator: operator used to compare currentSpend with amount. Possible values",
"was picked up to be executed. :type processing_start_time: ~datetime.datetime :param",
"~azure.mgmt.costmanagement.models.QueryAggregation] :param filter: Has filter expression to use in the",
"or ~azure.mgmt.costmanagement.models.StatusType :param recurrence: Required. The schedule recurrence. Possible values",
"self.id = id self.enabled = enabled class Operation(msrest.serialization.Model): \"\"\"A Cost",
"available exports in the scope provided. Variables are only populated",
"total: int :ivar category: Dimension category. :vartype category: str :ivar",
"is required once per subscription. When creating an export in",
"\"ExecutionStatus\"]] = None, submitted_by: Optional[str] = None, submitted_time: Optional[datetime.datetime] =",
"= grouping self.sorting = sorting self.filter = filter class ReportConfigDatasetAutoGenerated(msrest.serialization.Model):",
"{'key': 'resourceFilter', 'type': '[object]'}, 'meter_filter': {'key': 'meterFilter', 'type': '[object]'}, 'tag_filter':",
"{'key': 'grouping', 'type': '[ReportConfigGrouping]'}, 'sorting': {'key': 'sorting', 'type': '[ReportConfigSorting]'}, 'filter':",
"view. :vartype created_on: ~datetime.datetime :ivar modified_on: Date when the user",
"list[str] :param contact_roles: list of contact roles. :type contact_roles: list[str]",
"= None, id: Optional[str] = None, enabled: Optional[bool] = None,",
"~datetime.datetime :param to: Required. The end date for export data.",
"'operator', 'type': 'str'}, 'values': {'key': 'values', 'type': '[str]'}, } def",
"'file_name': {'key': 'properties.fileName', 'type': 'str'}, 'run_settings': {'key': 'properties.runSettings', 'type': 'CommonExportProperties'},",
"'value', 'type': '[Export]'}, } def __init__( self, **kwargs ): super(ExportListResult,",
"query. All required parameters must be populated in order to",
"Azure. :param from_property: Required. The start date to pull data",
"export. :param granularity: The granularity of rows in the export.",
"= None, type_properties_query_type: Optional[Union[str, \"ReportType\"]] = None, timeframe: Optional[Union[str, \"ReportTimeframeType\"]]",
"of aggregation expression to use in the query. The key",
":vartype code: str :ivar message: Error message indicating why the",
"{provider}/{resource}/{operation}. :vartype name: str :param display: The object that represents",
"sorting self.filter = filter class ReportConfigDatasetAutoGenerated(msrest.serialization.Model): \"\"\"The definition of data",
"The end date to pull data to. :type to: ~datetime.datetime",
"or ~azure.mgmt.costmanagement.models.MetricType :param kpis: List of KPIs to show in",
"for pulling data for the query. All required parameters must",
"{'key': 'timeframe', 'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type': 'ReportConfigTimePeriod'}, 'dataset':",
":param not_property: The logical \"NOT\" expression. :type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated :param",
":type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails :param cost_entity_id: related budget. :type cost_entity_id: str",
"'type': 'str'}, 'run_settings': {'key': 'properties.runSettings', 'type': 'CommonExportProperties'}, 'error': {'key': 'properties.error',",
"all columns listed under groupings and aggregation. Variables are only",
"of each item in the dictionary is the alias for",
"to a storage account, you must register the account's subscription",
"in the UI?. :type enabled: bool \"\"\" _attribute_map = {",
"rows: Array of rows. :type rows: list[list[object]] \"\"\" _validation =",
"'deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition': {'key': 'definition', 'type': 'ExportDefinition'}, 'run_history': {'key':",
"need to register the subscription. For more information see https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services",
"the export's schedule. If 'Inactive', the export's schedule is paused.",
"to class View(ProxyResource): \"\"\"States and configurations of Cost Analysis. Variables",
"= None, **kwargs ): super(ErrorResponse, self).__init__(**kwargs) self.error = error class",
"e_tag class Export(ProxyResource): \"\"\"An export resource. Variables are only populated",
"the column to group. Possible values include: \"Tag\", \"Dimension\". :type",
"must be provided. Possible values include: \"WeekToDate\", \"MonthToDate\", \"YearToDate\", \"Custom\".",
"{'key': 'eTag', 'type': 'str'}, 'display_name': {'key': 'properties.displayName', 'type': 'str'}, 'scope':",
"\"\"\"defines the type of alert. :param type: type of alert.",
"self, *, e_tag: Optional[str] = None, format: Optional[Union[str, \"FormatType\"]] =",
"comparison expression to be used in the query. All required",
"alerts results if there are any. :vartype next_link: str \"\"\"",
"'category', 'type': 'str'}, 'criteria': {'key': 'criteria', 'type': 'str'}, } def",
"will be ignored when sending a request. :ivar provider: Service",
"entity that executed the export. For OnDemand executions it is",
"or ~azure.mgmt.costmanagement.models.ExecutionType :param status: The last known status of the",
"'nextRunTimeEstimate', 'type': 'iso-8601'}, 'schedule': {'key': 'schedule', 'type': 'ExportSchedule'}, } def",
"'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(OperationListResult,",
"self.schedule = schedule class ExportRecurrencePeriod(msrest.serialization.Model): \"\"\"The start and end date",
"column to group. This version supports subscription lowest possible grain.",
"display_name: User input name of the view. Required. :type display_name:",
"_attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'operator': {'key':",
"time frame for pulling data for the export. If custom,",
"the view. Required. :type display_name: str :param scope: Cost Management",
"date of recurrence. :type to: ~datetime.datetime \"\"\" _validation = {",
"str, container: str, root_folder_path: Optional[str] = None, **kwargs ): super(ExportDeliveryDestination,",
"Array of order by expression to use in the report.",
"and will be ignored when sending a request. :ivar value:",
"The time frame for pulling data for the query. If",
"self.submitted_by = submitted_by self.submitted_time = submitted_time self.processing_start_time = processing_start_time self.processing_end_time",
"type of the export execution. Possible values include: \"OnDemand\", \"Scheduled\".",
"Optional[str] = None, **kwargs ): super(PivotProperties, self).__init__(**kwargs) self.type = type",
"} def __init__( self, *, e_tag: Optional[str] = None, display_name:",
"alert. Variables are only populated by the server, and will",
"'str'}, } def __init__( self, *, type: Union[str, \"QueryColumnType\"], name:",
"currentSpend with amount. Possible values include: \"None\", \"EqualTo\", \"GreaterThan\", \"GreaterThanOrEqualTo\",",
"enabled. :vartype filter_enabled: bool :ivar grouping_enabled: Grouping enabled. :vartype grouping_enabled:",
"str or ~azure.mgmt.costmanagement.models.FunctionType \"\"\" _validation = { 'name': {'required': True},",
"key of each item in the dictionary is the alias",
"use in the query. The key of each item in",
"type of column. :type type: str \"\"\" _attribute_map = {",
"definition: defines the type of alert. :type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition :param",
"= None, **kwargs ): super(DismissAlertPayload, self).__init__(**kwargs) self.definition = definition self.description",
"Possible values include: \"CostThresholdExceeded\", \"UsageThresholdExceeded\", \"CreditThresholdApproaching\", \"CreditThresholdReached\", \"QuotaThresholdApproaching\", \"QuotaThresholdReached\", \"MultiCurrency\",",
":type grouping: list[~azure.mgmt.costmanagement.models.QueryGrouping] :param filter: Has filter expression to use",
"_validation = { 'name': {'required': True}, } _attribute_map = {",
"time. :vartype next_run_time_estimate: ~datetime.datetime \"\"\" _validation = { 'delivery_info': {'required':",
"include: \"MonthToDate\", \"BillingMonthToDate\", \"TheLastMonth\", \"TheLastBillingMonth\", \"WeekToDate\", \"Custom\". :type timeframe: str",
"self, *, error: Optional[\"ErrorDetails\"] = None, **kwargs ): super(ErrorResponse, self).__init__(**kwargs)",
"'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key':",
"time_period: Has time period for pulling data for the export.",
"{'key': 'not', 'type': 'ReportConfigFilterAutoGenerated'}, 'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'}, 'tag':",
"for Department scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}' for EnrollmentAccount scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}' for BillingProfile",
"include_actual_cost: a boolean determining if actualCost will be included. :type",
"for data in this query. :type dataset: ~azure.mgmt.costmanagement.models.QueryDataset \"\"\" _validation",
"report. Any valid report column name is allowed. If not",
"super(ReportConfigGrouping, self).__init__(**kwargs) self.type = type self.name = name class ReportConfigSorting(msrest.serialization.Model):",
"class PivotProperties(msrest.serialization.Model): \"\"\"Each pivot must contain a 'type' and 'name'.",
"= None, type: Optional[str] = None, **kwargs ): super(QueryColumn, self).__init__(**kwargs)",
":param destination: Required. Has destination for the export being delivered.",
"aggregation: Dictionary of aggregation expression to use in the query.",
"'str'}, 'columns': {'key': 'properties.columns', 'type': '[QueryColumn]'}, 'rows': {'key': 'properties.rows', 'type':",
"why the operation failed. :vartype message: str \"\"\" _validation =",
"of columns. :type columns: list[~azure.mgmt.costmanagement.models.QueryColumn] :param rows: Array of rows.",
"None, sorting: Optional[List[\"ReportConfigSorting\"]] = None, filter: Optional[\"ReportConfigFilterAutoGenerated\"] = None, **kwargs",
"'pivots': {'key': 'properties.pivots', 'type': '[PivotProperties]'}, 'type_properties_query_type': {'key': 'properties.query.type', 'type': 'str'},",
"current_spend: Optional[float] = None, contact_emails: Optional[List[str]] = None, contact_groups: Optional[List[str]]",
"'filter', 'type': 'ReportConfigFilterAutoGenerated'}, } def __init__( self, *, granularity: Optional[Union[str,",
"to Azure. :param type: Required. The type of the query.",
"self.close_time = close_time self.modification_time = modification_time self.status_modification_user_name = status_modification_user_name self.status_modification_time",
"True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'},",
"with the export. All required parameters must be populated in",
"'name', 'type': 'str'}, 'display': {'key': 'display', 'type': 'OperationDisplay'}, } def",
"Optional[str] = None, status_modification_time: Optional[str] = None, **kwargs ): super(Alert,",
"from_property self.to = to class ReportConfigAggregation(msrest.serialization.Model): \"\"\"The aggregation expression to",
"include_actual_cost self.include_fresh_partial_cost = include_fresh_partial_cost class KpiProperties(msrest.serialization.Model): \"\"\"Each KPI must contain",
"'display_name': {'key': 'properties.displayName', 'type': 'str'}, 'scope': {'key': 'properties.scope', 'type': 'str'},",
"ExportTimePeriod(msrest.serialization.Model): \"\"\"The date range for data in the export. This",
"be delivered. :type resource_id: str :param container: Required. The name",
"str \"\"\" _attribute_map = { 'time_grain_type': {'key': 'timeGrainType', 'type': 'str'},",
"self, **kwargs ): super(OperationListResult, self).__init__(**kwargs) self.value = None self.next_link =",
"greater than start date. :type recurrence_period: ~azure.mgmt.costmanagement.models.ExportRecurrencePeriod \"\"\" _validation =",
"values include: \"Dimension\", \"TagKey\". :type type: str or ~azure.mgmt.costmanagement.models.PivotType :param",
"provided, then report includes all columns. :type columns: list[str] \"\"\"",
"'tag_filter': {'key': 'tagFilter', 'type': 'object'}, 'threshold': {'key': 'threshold', 'type': 'float'},",
"Optional[\"ExportSchedule\"] = None, **kwargs ): super(ExportProperties, self).__init__(format=format, delivery_info=delivery_info, definition=definition, run_history=run_history,",
"value: A list of export executions. :vartype value: list[~azure.mgmt.costmanagement.models.ExportExecution] \"\"\"",
"'delivery_info': {'key': 'deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition': {'key': 'definition', 'type': 'ExportDefinition'},",
"payload to update an alert. :param definition: defines the type",
"None, threshold: Optional[float] = None, operator: Optional[Union[str, \"AlertOperator\"]] = None,",
"status was last modified. :type status_modification_time: str \"\"\" _attribute_map =",
"**kwargs ): super(Alert, self).__init__(**kwargs) self.definition = definition self.description = description",
"indicating why the operation failed. :vartype message: str \"\"\" _validation",
"'filter': {'key': 'filter', 'type': 'ReportConfigFilter'}, } def __init__( self, *,",
"aggregation: Dictionary of aggregation expression to use in the forecast.",
":type scope: str :ivar created_on: Date the user created this",
"will be ignored when sending a request. :ivar id: Resource",
"{'readonly': True}, 'type': {'readonly': True}, 'tags': {'readonly': True}, } _attribute_map",
"run_settings: The export settings that were in effect for this",
"history of an export. Variables are only populated by the",
"'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'properties.nextRunTimeEstimate', 'type': 'iso-8601'}, 'schedule': {'key': 'properties.schedule', 'type':",
"include: \"Queued\", \"InProgress\", \"Completed\", \"Failed\", \"Timeout\", \"NewDataNotAvailable\", \"DataNotAvailable\". :type status:",
"'properties.total', 'type': 'int'}, 'category': {'key': 'properties.category', 'type': 'str'}, 'usage_start': {'key':",
"*, data: Optional[List[str]] = None, **kwargs ): super(Dimension, self).__init__(**kwargs) self.description",
"'operator': {'key': 'operator', 'type': 'str'}, 'amount': {'key': 'amount', 'type': 'float'},",
"{'key': 'category', 'type': 'str'}, 'criteria': {'key': 'criteria', 'type': 'str'}, }",
":ivar modified_on: Date when the user last modified this view.",
":type data: list[str] :ivar total: Total number of data for",
"to sort. :type name: str \"\"\" _validation = { 'name':",
":type e_tag: str :param display_name: User input name of the",
"name: Required. The name of the column to group. :type",
"sending a request. :ivar value: A list of export executions.",
"Optional[str] = None, source: Optional[Union[str, \"AlertSource\"]] = None, details: Optional[\"AlertPropertiesDetails\"]",
":type status_modification_user_name: str :param status_modification_time: dateTime in which the alert",
"type of alert. :param type: type of alert. Possible values",
"None, **kwargs ): super(QueryDefinition, self).__init__(**kwargs) self.type = type self.timeframe =",
"submitted_by self.submitted_time = submitted_time self.processing_start_time = processing_start_time self.processing_end_time = processing_end_time",
"class ExportDeliveryInfo(msrest.serialization.Model): \"\"\"The delivery information associated with a export. All",
"include: \"None\", \"Active\", \"Overridden\", \"Resolved\", \"Dismissed\". :type status: str or",
"Optional[str] = None, close_time: Optional[str] = None, modification_time: Optional[str] =",
"~azure.mgmt.costmanagement.models.ForecastTimeframeType :param time_period: Has time period for pulling data for",
"None, enabled: Optional[bool] = None, **kwargs ): super(KpiProperties, self).__init__(**kwargs) self.type",
"'type': 'AlertPropertiesDefinition'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'source': {'key': 'properties.source',",
"'Csv' is supported. Possible values include: \"Csv\". :type format: str",
"'from_property': {'required': True}, 'to': {'required': True}, } _attribute_map = {",
"= dimension self.tag = tag class ReportConfigFilterAutoGenerated(msrest.serialization.Model): \"\"\"The filter expression",
"2 group by clauses. :type grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping] :param sorting: Array",
"of 3 sub-views in the Cost Analysis UI. :type pivots:",
"forecast. Possible values include: \"Daily\". :type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType",
":param aggregation: Dictionary of aggregation expression to use in the",
"~azure.mgmt.costmanagement.models.GranularityType :param configuration: The export dataset configuration. :type configuration: ~azure.mgmt.costmanagement.models.ExportDatasetConfiguration",
"~datetime.datetime :param schedule: Has schedule information for the export. :type",
"'AlertPropertiesDefinition'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'source': {'key': 'properties.source', 'type':",
"definition of data present in the forecast. :param granularity: The",
"'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, 'format': {'key': 'properties.format', 'type':",
"'str'}, 'time_period': {'key': 'properties.query.timePeriod', 'type': 'ReportConfigTimePeriod'}, 'dataset': {'key': 'properties.query.dataset', 'type':",
"set of operation list results if there are any. :vartype",
"Optional[\"ExportExecutionListResult\"] = None, schedule: Optional[\"ExportSchedule\"] = None, **kwargs ): super(Export,",
"{ 'from_property': {'key': 'from', 'type': 'iso-8601'}, 'to': {'key': 'to', 'type':",
"_attribute_map = { 'provider': {'key': 'provider', 'type': 'str'}, 'resource': {'key':",
"Required. The time frame for pulling data for the report.",
":type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule \"\"\" _validation = { 'delivery_info': {'required': True},",
"= None self.e_tag = e_tag class Export(ProxyResource): \"\"\"An export resource.",
"} def __init__( self, *, next_link: Optional[str] = None, columns:",
"'timePeriod', 'type': 'QueryTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'ForecastDataset'}, 'include_actual_cost': {'key':",
"and_property: Optional[List[\"QueryFilter\"]] = None, or_property: Optional[List[\"QueryFilter\"]] = None, not_property: Optional[\"QueryFilter\"]",
"'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, } def __init__( self,",
"to use for comparison. Possible values include: \"In\", \"Contains\". :type",
"determining if actualCost will be included. :type include_actual_cost: bool :param",
"the alias for the aggregated column. forecast can have up",
"allowed. If not provided, then query includes all columns. :type",
"Alert description. :type description: str :param source: Source of alert.",
"data in this query. :type dataset: ~azure.mgmt.costmanagement.models.QueryDataset \"\"\" _validation =",
"of aggregation expression to use in the report. The key",
"'ReportConfigTimePeriod'}, 'dataset': {'key': 'properties.query.dataset', 'type': 'ReportConfigDataset'}, } def __init__( self,",
"'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'}, 'schedule': {'key': 'schedule', 'type':",
"\"Custom\". :type timeframe: str or ~azure.mgmt.costmanagement.models.ForecastTimeframeType :param time_period: Has time",
"then report includes all columns. :type columns: list[str] \"\"\" _attribute_map",
"} def __init__( self, *, columns: Optional[List[str]] = None, **kwargs",
"PivotProperties(msrest.serialization.Model): \"\"\"Each pivot must contain a 'type' and 'name'. :param",
"\"BudgetForecast\". :type type: str or ~azure.mgmt.costmanagement.models.AlertType :param category: Alert category.",
"include_fresh_partial_cost: Optional[bool] = None, **kwargs ): super(ForecastDefinition, self).__init__(**kwargs) self.type =",
"contains a list of operations and a URL link to",
"The granularity of rows in the export. Currently only 'Daily'",
"'type': 'QueryDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'}, 'filter': {'key': 'filter',",
"'properties.pivots', 'type': '[PivotProperties]'}, 'type_properties_query_type': {'key': 'properties.query.type', 'type': 'str'}, 'timeframe': {'key':",
"or ~azure.mgmt.costmanagement.models.ReportGranularityType :param configuration: Has configuration information for the data",
"'and', 'type': '[QueryFilter]'}, 'or_property': {'key': 'or', 'type': '[QueryFilter]'}, 'not_property': {'key':",
"QueryTimePeriod(msrest.serialization.Model): \"\"\"The start and end date for pulling data for",
"be ignored when sending a request. :ivar value: List of",
"eTag of the resource. To handle concurrent update scenario, this",
"{ 'value': {'key': 'value', 'type': '[Dimension]'}, } def __init__( self,",
"details self.cost_entity_id = cost_entity_id self.status = status self.creation_time = creation_time",
"\"\"\" _validation = { 'name': {'required': True}, 'function': {'required': True},",
"super(QueryTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to = to class ReportConfigAggregation(msrest.serialization.Model):",
"data for the report. If custom, then a specific time",
"= None, current_spend: Optional[float] = None, contact_emails: Optional[List[str]] = None,",
"tags: dict[str, str] \"\"\" _validation = { 'id': {'readonly': True},",
"'[object]'}, 'tag_filter': {'key': 'tagFilter', 'type': 'object'}, 'threshold': {'key': 'threshold', 'type':",
"'type': '[QueryFilter]'}, 'not_property': {'key': 'not', 'type': 'QueryFilter'}, 'dimension': {'key': 'dimension',",
"Possible values include: \"Csv\". :type format: str or ~azure.mgmt.costmanagement.models.FormatType :param",
"be populated in order to send to Azure. :param resource_id:",
"__init__( self, *, type: Union[str, \"QueryColumnType\"], name: str, **kwargs ):",
"*, type: Optional[Union[str, \"KpiType\"]] = None, id: Optional[str] = None,",
"data in this report config. :type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDatasetAutoGenerated \"\"\" _validation",
"name: Data field to show in view. :type name: str",
"name: str \"\"\" _validation = { 'type': {'required': True}, 'name':",
"category self.criteria = criteria class AlertPropertiesDetails(msrest.serialization.Model): \"\"\"Alert details. :param time_grain_type:",
"{'key': 'contactEmails', 'type': '[str]'}, 'contact_groups': {'key': 'contactGroups', 'type': '[str]'}, 'contact_roles':",
":param dimension: Has comparison expression for a dimension. :type dimension:",
"'properties.source', 'type': 'str'}, 'details': {'key': 'properties.details', 'type': 'AlertPropertiesDetails'}, 'cost_entity_id': {'key':",
"'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, } def __init__(",
"self.modification_time = modification_time self.status_modification_user_name = status_modification_user_name self.status_modification_time = status_modification_time class",
"send to Azure. :param type: Required. The type of the",
"category. Possible values include: \"Cost\", \"Usage\", \"Billing\", \"System\". :type category:",
"not provided then the export will include all available columns.",
"unit: str :param current_spend: current spend. :type current_spend: float :param",
"'str'}, 'delivery_info': {'key': 'deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition': {'key': 'definition', 'type':",
"the most recent execution history for the export. :type run_history:",
"and_property: The logical \"AND\" expression. Must have at least 2",
"): super(ForecastDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration self.aggregation",
"ReportConfigDatasetConfiguration(msrest.serialization.Model): \"\"\"The configuration of dataset in the report. :param columns:",
"None, grouping: Optional[List[\"ReportConfigGrouping\"]] = None, sorting: Optional[List[\"ReportConfigSorting\"]] = None, filter:",
"'QueryFilter'}, 'dimension': {'key': 'dimension', 'type': 'QueryComparisonExpression'}, 'tag': {'key': 'tag', 'type':",
"'enabled': {'key': 'enabled', 'type': 'bool'}, } def __init__( self, *,",
"str or ~azure.mgmt.costmanagement.models.PivotType :param name: Data field to show in",
"\"Contains\". :type operator: str or ~azure.mgmt.costmanagement.models.OperatorType :param values: Required. Array",
"self.error = error class ProxyResource(msrest.serialization.Model): \"\"\"The Resource model definition. Variables",
"to determine whether the user is updating the latest version",
"= type self.name = name class QueryResult(Resource): \"\"\"Result of query.",
"expression. Must have at least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.QueryFilter]",
"None, **kwargs ): super(ExportDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration =",
"'display': {'key': 'display', 'type': 'OperationDisplay'}, } def __init__( self, *,",
"None, tag: Optional[\"ReportConfigComparisonExpression\"] = None, **kwargs ): super(ReportConfigFilterAutoGenerated, self).__init__(**kwargs) self.and_property",
"is equivalent to 'ActualCost' and is applicable to exports that",
"'str'}, 'id': {'key': 'id', 'type': 'str'}, 'enabled': {'key': 'enabled', 'type':",
"'include_fresh_partial_cost': {'key': 'includeFreshPartialCost', 'type': 'bool'}, } def __init__( self, *,",
"filter class ReportConfigDatasetConfiguration(msrest.serialization.Model): \"\"\"The configuration of dataset in the report.",
":vartype category: str :ivar usage_start: Usage start. :vartype usage_start: ~datetime.datetime",
"request. :ivar provider: Service provider: Microsoft.CostManagement. :vartype provider: str :ivar",
"version or not. :type e_tag: str :param format: The format",
"HttpResponseError import msrest.serialization from ._cost_management_client_enums import * class Resource(msrest.serialization.Model): \"\"\"The",
"from ._cost_management_client_enums import * class Resource(msrest.serialization.Model): \"\"\"The Resource model definition.",
"super(AlertPropertiesDefinition, self).__init__(**kwargs) self.type = type self.category = category self.criteria =",
"pivot must contain a 'type' and 'name'. :param type: Data",
"next set of operation list results if there are any.",
"the code is regenerated. # -------------------------------------------------------------------------- import datetime from typing",
"ignored when sending a request. All required parameters must be",
"self.columns = columns self.rows = rows class QueryTimePeriod(msrest.serialization.Model): \"\"\"The start",
"{ 'type': {'key': 'type', 'type': 'str'}, 'id': {'key': 'id', 'type':",
"It contains all columns listed under groupings and aggregation. Variables",
":type file_name: str :param run_settings: The export settings that were",
"for EnrollmentAccount scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}' for BillingProfile scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}' for InvoiceSection",
"to use in the query. Query can have up to",
"str, direction: Optional[Union[str, \"ReportConfigSortingDirection\"]] = None, **kwargs ): super(ReportConfigSorting, self).__init__(**kwargs)",
"ExternalBillingAccount scope, and '/providers/Microsoft.CostManagement/externalSubscriptions/{externalSubscriptionName}' for ExternalSubscription scope. :type scope: str",
"recurrence: Required. The schedule recurrence. Possible values include: \"Daily\", \"Weekly\",",
"The name of the column to use in comparison. :type",
"Must have at least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param",
"Optional[str] = None, modification_time: Optional[str] = None, status_modification_user_name: Optional[str] =",
"of resource related to metric (budget). :type id: str :param",
"Optional[List[\"QueryFilter\"]] = None, or_property: Optional[List[\"QueryFilter\"]] = None, not_property: Optional[\"QueryFilter\"] =",
"is performed: Dimensions, Query. :vartype resource: str :ivar operation: Operation",
"get the next set of results. Variables are only populated",
"The export dataset configuration. :type configuration: ~azure.mgmt.costmanagement.models.ExportDatasetConfiguration \"\"\" _attribute_map =",
"populated in order to send to Azure. :param destination: Required.",
"to send to Azure. :param type: Required. Has type of",
":param operator: operator used to compare currentSpend with amount. Possible",
"Possible values include: \"None\", \"Active\", \"Overridden\", \"Resolved\", \"Dismissed\". :type status:",
"'filter_enabled': {'key': 'properties.filterEnabled', 'type': 'bool'}, 'grouping_enabled': {'key': 'properties.groupingEnabled', 'type': 'bool'},",
"'contactGroups', 'type': '[str]'}, 'contact_roles': {'key': 'contactRoles', 'type': '[str]'}, 'overriding_alert': {'key':",
"of the column to use in comparison. :type name: str",
"type: str or ~azure.mgmt.costmanagement.models.QueryColumnType :param name: Required. The name of",
"dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation] :param filter: Has filter expression to use in",
"the forecast. :type filter: ~azure.mgmt.costmanagement.models.QueryFilter \"\"\" _attribute_map = { 'granularity':",
"InvoiceSection scope, 'providers/Microsoft.Management/managementGroups/{managementGroupId}' for Management Group scope, '/providers/Microsoft.CostManagement/externalBillingAccounts/{externalBillingAccountName}' for ExternalBillingAccount",
"'next_run_time_estimate': {'readonly': True}, } _attribute_map = { 'format': {'key': 'format',",
"**kwargs ): super(Export, self).__init__(e_tag=e_tag, **kwargs) self.format = format self.delivery_info =",
"'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self,",
":param include_fresh_partial_cost: a boolean determining if FreshPartialCost will be included.",
"= None, data_set: Optional[\"ExportDataset\"] = None, **kwargs ): super(ExportDefinition, self).__init__(**kwargs)",
"when export was queued to be executed. :type submitted_time: ~datetime.datetime",
"type: Read, write, delete, etc. :vartype operation: str \"\"\" _validation",
"None self.name = None self.type = None self.e_tag = e_tag",
"and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param or_property: The logical \"OR\" expression. Must have",
"str, function: Union[str, \"FunctionType\"], **kwargs ): super(ReportConfigAggregation, self).__init__(**kwargs) self.name =",
"AlertPropertiesDefinition(msrest.serialization.Model): \"\"\"defines the type of alert. :param type: type of",
"Optional[\"ExportDatasetConfiguration\"] = None, **kwargs ): super(ExportDataset, self).__init__(**kwargs) self.granularity = granularity",
"delivered. :type destination: ~azure.mgmt.costmanagement.models.ExportDeliveryDestination \"\"\" _validation = { 'destination': {'required':",
"code: Error code. :vartype code: str :ivar message: Error message",
"__init__( self, *, columns: Optional[List[str]] = None, **kwargs ): super(ReportConfigDatasetConfiguration,",
"True}, 'tags': {'readonly': True}, } _attribute_map = { 'id': {'key':",
"comparison expression for a dimension. :type dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression :param tag:",
"= { 'format': {'key': 'format', 'type': 'str'}, 'delivery_info': {'key': 'deliveryInfo',",
"for the data in the export. The configuration will be",
"self.value = None self.next_link = None class CommonExportProperties(msrest.serialization.Model): \"\"\"The common",
":type display: ~azure.mgmt.costmanagement.models.OperationDisplay \"\"\" _validation = { 'name': {'readonly': True},",
"{'required': True}, 'next_run_time_estimate': {'readonly': True}, } _attribute_map = { 'format':",
"'[KpiProperties]'}, 'pivots': {'key': 'properties.pivots', 'type': '[PivotProperties]'}, 'type_properties_query_type': {'key': 'properties.query.type', 'type':",
"exports. :vartype value: list[~azure.mgmt.costmanagement.models.Export] \"\"\" _validation = { 'value': {'readonly':",
":type type: str or ~azure.mgmt.costmanagement.models.PivotType :param name: Data field to",
"group. This version supports subscription lowest possible grain. :type name:",
"'columns': {'key': 'columns', 'type': '[str]'}, } def __init__( self, *,",
"'[str]'}, } def __init__( self, *, columns: Optional[List[str]] = None,",
"definition for data in the export. :type data_set: ~azure.mgmt.costmanagement.models.ExportDataset \"\"\"",
"grouping: Optional[List[\"QueryGrouping\"]] = None, filter: Optional[\"QueryFilter\"] = None, **kwargs ):",
"= { 'value': {'readonly': True}, } _attribute_map = { 'value':",
"'value': {'readonly': True}, 'next_link': {'readonly': True}, } _attribute_map = {",
"in future. If present, the end date must be greater",
"super(Export, self).__init__(e_tag=e_tag, **kwargs) self.format = format self.delivery_info = delivery_info self.definition",
"{'key': 'properties.statusModificationTime', 'type': 'str'}, } def __init__( self, *, definition:",
"__init__( self, *, type: Optional[Union[str, \"KpiType\"]] = None, id: Optional[str]",
"self.next_link = None class PivotProperties(msrest.serialization.Model): \"\"\"Each pivot must contain a",
"description. :type description: str :param source: Source of alert. Possible",
"Union[str, \"ForecastType\"], timeframe: Union[str, \"ForecastTimeframeType\"], time_period: Optional[\"QueryTimePeriod\"] = None, dataset:",
"\"\"\"The date range for data in the export. This should",
"True}, 'type': {'readonly': True}, } _attribute_map = { 'id': {'key':",
"None, **kwargs ): super(Alert, self).__init__(**kwargs) self.definition = definition self.description =",
"= type self.name = name class ReportConfigSorting(msrest.serialization.Model): \"\"\"The order by",
"'str'}, } def __init__( self, *, definition: Optional[\"AlertPropertiesDefinition\"] = None,",
"str, root_folder_path: Optional[str] = None, **kwargs ): super(ExportDeliveryDestination, self).__init__(**kwargs) self.resource_id",
"'type': 'int'}, 'category': {'key': 'properties.category', 'type': 'str'}, 'usage_start': {'key': 'properties.usageStart',",
"is 3 months. All required parameters must be populated in",
"not_property self.dimension = dimension self.tag = tag class QueryGrouping(msrest.serialization.Model): \"\"\"The",
"'object'}, 'threshold': {'key': 'threshold', 'type': 'float'}, 'operator': {'key': 'operator', 'type':",
"~azure.mgmt.costmanagement.models.AlertPropertiesDefinition :param description: Alert description. :type description: str :param source:",
"Optional[\"OperationDisplay\"] = None, **kwargs ): super(Operation, self).__init__(**kwargs) self.name = None",
"columns: list[str] \"\"\" _attribute_map = { 'columns': {'key': 'columns', 'type':",
"'type': 'str'}, 'submitted_time': {'key': 'properties.submittedTime', 'type': 'iso-8601'}, 'processing_start_time': {'key': 'properties.processingStartTime',",
"= { 'error': {'key': 'error', 'type': 'ErrorDetails'}, } def __init__(",
"report. Usage represents actual usage, forecast represents forecasted data and",
"Optional[\"QueryFilter\"] = None, **kwargs ): super(ForecastDataset, self).__init__(**kwargs) self.granularity = granularity",
"'dataset', 'type': 'ForecastDataset'}, 'include_actual_cost': {'key': 'includeActualCost', 'type': 'bool'}, 'include_fresh_partial_cost': {'key':",
"'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'ReportConfigDatasetConfiguration'}, 'aggregation': {'key': 'aggregation',",
"list[~azure.mgmt.costmanagement.models.QueryColumn] :param rows: Array of rows. :type rows: list[list[object]] \"\"\"",
"aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation] :param grouping: Array of group by expression",
"._cost_management_client_enums import * class Resource(msrest.serialization.Model): \"\"\"The Resource model definition. Variables",
"temporarily unavailable. Retry after waiting for the time specified in",
"license information. # Code generated by Microsoft (R) AutoRest Code",
"of group by expression to use in the query. Query",
"__init__( self, *, and_property: Optional[List[\"QueryFilter\"]] = None, or_property: Optional[List[\"QueryFilter\"]] =",
"Date when the user last modified this view. :vartype modified_on:",
"at least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param or_property: The",
"specified with timeFrame set to 'Custom'. The maximum date range",
"information for the data in the export. The configuration will",
"views. It contains a list of available views. Variables are",
"of a forecast. All required parameters must be populated in",
"Union[str, \"RecurrenceType\"], status: Optional[Union[str, \"StatusType\"]] = None, recurrence_period: Optional[\"ExportRecurrencePeriod\"] =",
"= { 'provider': {'readonly': True}, 'resource': {'readonly': True}, 'operation': {'readonly':",
"to send to Azure. :param type: Required. The type of",
"or_property: The logical \"OR\" expression. Must have at least 2",
"= tag class QueryGrouping(msrest.serialization.Model): \"\"\"The group by expression to be",
"'type': '[Alert]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__(",
"present in the query. :param granularity: The granularity of rows",
"'type': '[object]'}, 'resource_filter': {'key': 'resourceFilter', 'type': '[object]'}, 'meter_filter': {'key': 'meterFilter',",
"alert. :param definition: defines the type of alert. :type definition:",
"supported. Possible values include: \"Daily\". :type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType",
"True}, 'definition': {'required': True}, 'next_run_time_estimate': {'readonly': True}, } _attribute_map =",
"'type': '[ReportConfigFilter]'}, 'not_property': {'key': 'not', 'type': 'ReportConfigFilter'}, 'dimension': {'key': 'dimension',",
"= None, grouping: Optional[List[\"ReportConfigGrouping\"]] = None, sorting: Optional[List[\"ReportConfigSorting\"]] = None,",
"= filter class ReportConfigDatasetAutoGenerated(msrest.serialization.Model): \"\"\"The definition of data present in",
":type operator: str or ~azure.mgmt.costmanagement.models.AlertOperator :param amount: budget threshold amount.",
"created this view. :vartype created_on: ~datetime.datetime :ivar modified_on: Date when",
"_attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'timeframe': {'key':",
"name: str :param operator: Required. The operator to use for",
"super(ExportDefinition, self).__init__(**kwargs) self.type = type self.timeframe = timeframe self.time_period =",
"} _attribute_map = { 'value': {'key': 'value', 'type': '[Operation]'}, 'next_link':",
"= status_modification_time class AlertPropertiesDefinition(msrest.serialization.Model): \"\"\"defines the type of alert. :param",
"): super(ExportDeliveryInfo, self).__init__(**kwargs) self.destination = destination class ExportExecution(Resource): \"\"\"An export",
"None, **kwargs ): super(PivotProperties, self).__init__(**kwargs) self.type = type self.name =",
"resource_id self.container = container self.root_folder_path = root_folder_path class ExportDeliveryInfo(msrest.serialization.Model): \"\"\"The",
"\"Usage\". :type type_properties_query_type: str or ~azure.mgmt.costmanagement.models.ReportType :param timeframe: The time",
"super(ExportDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration class ExportDatasetConfiguration(msrest.serialization.Model):",
"'type': 'str'}, 'usage_start': {'key': 'properties.usageStart', 'type': 'iso-8601'}, 'usage_end': {'key': 'properties.usageEnd',",
"= None, **kwargs ): super(ExportProperties, self).__init__(format=format, delivery_info=delivery_info, definition=definition, run_history=run_history, **kwargs)",
"the Microsoft.CostManagement resource provider. :vartype value: list[~azure.mgmt.costmanagement.models.Operation] :ivar next_link: URL",
"): super(ReportConfigDefinition, self).__init__(**kwargs) self.type = type self.timeframe = timeframe self.time_period",
"include: \"None\", \"Monthly\", \"Quarterly\", \"Annually\", \"BillingMonth\", \"BillingQuarter\", \"BillingAnnual\". :type time_grain_type:",
"= resource_group_filter self.resource_filter = resource_filter self.meter_filter = meter_filter self.tag_filter =",
"to send to Azure. :param format: The format of the",
"time period for pulling data for the report. :type time_period:",
"= None, status_modification_user_name: Optional[str] = None, status_modification_time: Optional[str] = None,",
"of recurrence. :type from_property: ~datetime.datetime :param to: The end date",
"= function class ReportConfigComparisonExpression(msrest.serialization.Model): \"\"\"The comparison expression to be used",
"This version supports subscription lowest possible grain. :type name: str",
"filter: ~azure.mgmt.costmanagement.models.QueryFilter \"\"\" _attribute_map = { 'granularity': {'key': 'granularity', 'type':",
"If not provided, then report includes all columns. :type columns:",
"datetime.datetime, **kwargs ): super(ExportTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to =",
"name of the column to group. :type name: str \"\"\"",
"_attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key':",
"{ 'and_property': {'min_items': 2}, 'or_property': {'min_items': 2}, } _attribute_map =",
":param amount: budget threshold amount. :type amount: float :param unit:",
"self.or_property = or_property self.not_property = not_property self.dimension = dimension self.tag",
"{'key': 'resourceId', 'type': 'str'}, 'container': {'key': 'container', 'type': 'str'}, 'root_folder_path':",
"'[ReportConfigSorting]'}, 'filter': {'key': 'filter', 'type': 'ReportConfigFilterAutoGenerated'}, } def __init__( self,",
"self.dataset = dataset class ReportConfigFilter(msrest.serialization.Model): \"\"\"The filter expression to be",
"Currently only 'Daily' is supported. Possible values include: \"Daily\". :type",
"'type', 'type': 'str'}, 'timeframe': {'key': 'timeframe', 'type': 'str'}, 'time_period': {'key':",
"displaying costs. Possible values include: \"ActualCost\", \"AmortizedCost\", \"AHUB\". :type metric:",
":param recurrence: Required. The schedule recurrence. Possible values include: \"Daily\",",
"time_grain_type: Optional[Union[str, \"AlertTimeGrainType\"]] = None, period_start_date: Optional[str] = None, triggered_by:",
"the dictionary is the alias for the aggregated column. Query",
":param values: Required. Array of values to use for comparison.",
"**kwargs) self.format = format self.delivery_info = delivery_info self.definition = definition",
"report. :type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilter \"\"\" _validation = { 'grouping': {'max_items':",
"\"InvoiceDueDateApproaching\", \"InvoiceDueDateReached\", \"CrossCloudNewDataAvailable\", \"CrossCloudCollectionError\", \"GeneralThresholdError\". :type criteria: str or ~azure.mgmt.costmanagement.models.AlertCriteria",
"operator to use for comparison. Possible values include: \"In\", \"Contains\".",
"\"Csv\". :type format: str or ~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Required. Has",
"= None, period_start_date: Optional[str] = None, triggered_by: Optional[str] = None,",
"{'key': 'operation', 'type': 'str'}, } def __init__( self, **kwargs ):",
"of cost management operations supported by the Microsoft.CostManagement resource provider.",
"None, **kwargs ): super(Operation, self).__init__(**kwargs) self.name = None self.display =",
"aggregation: Optional[Dict[str, \"ReportConfigAggregation\"]] = None, grouping: Optional[List[\"ReportConfigGrouping\"]] = None, sorting:",
"\"\"\"An individual alert. Variables are only populated by the server,",
"Possible values include: \"Usage\", \"ActualCost\", \"AmortizedCost\". :type type: str or",
"\"\"\"Result of query. It contains all columns listed under groupings",
":vartype next_link: str \"\"\" _validation = { 'id': {'readonly': True},",
"request. :ivar value: The list of views. :vartype value: list[~azure.mgmt.costmanagement.models.View]",
"self.sorting = sorting self.filter = filter class ReportConfigDatasetConfiguration(msrest.serialization.Model): \"\"\"The configuration",
"self).__init__(**kwargs) self.status = status self.recurrence = recurrence self.recurrence_period = recurrence_period",
"or ~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Required. Has delivery information for the",
"percentage as a decimal which activated this alert. :type threshold:",
"status_modification_time class ErrorDetails(msrest.serialization.Model): \"\"\"The details of the error. Variables are",
"Optional[str] = None, run_settings: Optional[\"CommonExportProperties\"] = None, error: Optional[\"ErrorDetails\"] =",
"= name self.operator = operator self.values = values class QueryDataset(msrest.serialization.Model):",
"alert. :type triggered_by: str :param resource_group_filter: array of resourceGroups to",
"} def __init__( self, **kwargs ): super(ErrorDetails, self).__init__(**kwargs) self.code =",
"applicable to exports that do not yet provide data for",
"category: Alert category. Possible values include: \"Cost\", \"Usage\", \"Billing\", \"System\".",
"{'key': 'recurrence', 'type': 'str'}, 'recurrence_period': {'key': 'recurrencePeriod', 'type': 'ExportRecurrencePeriod'}, }",
"None, **kwargs ): super(CommonExportProperties, self).__init__(**kwargs) self.format = format self.delivery_info =",
"'created_on': {'key': 'properties.createdOn', 'type': 'iso-8601'}, 'modified_on': {'key': 'properties.modifiedOn', 'type': 'iso-8601'},",
":param recurrence_period: Has start and end date of the recurrence.",
"of rows. :type rows: list[list[object]] \"\"\" _validation = { 'id':",
"the query. :type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod :param dataset: Has definition for",
"Resource(msrest.serialization.Model): \"\"\"The Resource model definition. Variables are only populated by",
"description: Optional[str] = None, source: Optional[Union[str, \"AlertSource\"]] = None, details:",
":vartype grouping_enabled: bool :param data: :type data: list[str] :ivar total:",
"'str'}, 'modification_time': {'key': 'properties.modificationTime', 'type': 'str'}, 'status_modification_user_name': {'key': 'properties.statusModificationUserName', 'type':",
"and grouping are provided. :type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration :param aggregation: Dictionary",
"name: Required. The name of the column to group. This",
"{'readonly': True}, 'operation': {'readonly': True}, } _attribute_map = { 'provider':",
"granularity: Optional[Union[str, \"GranularityType\"]] = None, configuration: Optional[\"ExportDatasetConfiguration\"] = None, **kwargs",
"~azure.mgmt.costmanagement.models.QueryColumnType :param name: Required. The name of the column to",
"'status', 'type': 'str'}, 'recurrence': {'key': 'recurrence', 'type': 'str'}, 'recurrence_period': {'key':",
"waiting for the time specified in the \"Retry-After\" header. :param",
":type meter_filter: list[object] :param tag_filter: tags to filter by. :type",
"True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'created_on': {'readonly': True},",
"typing import Dict, List, Optional, Union from azure.core.exceptions import HttpResponseError",
"Possible values include: \"Forecast\", \"Budget\". :type type: str or ~azure.mgmt.costmanagement.models.KpiType",
"next page of results. :type next_link: str :param columns: Array",
"~azure.mgmt.costmanagement.models.ExportDefinition :param run_history: If requested, has the most recent execution",
"supports subscription lowest possible grain. :type name: str \"\"\" _validation",
"'filter': {'key': 'filter', 'type': 'QueryFilter'}, } def __init__( self, *,",
"file_name: Optional[str] = None, run_settings: Optional[\"CommonExportProperties\"] = None, error: Optional[\"ErrorDetails\"]",
"Possible values include: \"WeekToDate\", \"MonthToDate\", \"YearToDate\", \"Custom\". :type timeframe: str",
"self, *, and_property: Optional[List[\"ReportConfigFilter\"]] = None, or_property: Optional[List[\"ReportConfigFilter\"]] = None,",
"= recurrence_period class ExportTimePeriod(msrest.serialization.Model): \"\"\"The date range for data in",
"ReportConfigSorting(msrest.serialization.Model): \"\"\"The order by expression to be used in the",
":param modification_time: dateTime in which alert was last modified. :type",
"~azure.mgmt.costmanagement.models.ExportType :param timeframe: Required. The time frame for pulling data",
"\"Resolved\", \"Dismissed\". :type status: str or ~azure.mgmt.costmanagement.models.AlertStatus :param creation_time: dateTime",
"= None, or_property: Optional[List[\"QueryFilter\"]] = None, not_property: Optional[\"QueryFilter\"] = None,",
"views. Variables are only populated by the server, and will",
"alias for the aggregated column. forecast can have up to",
"for a dimension. :type dimension: ~azure.mgmt.costmanagement.models.QueryComparisonExpression :param tag: Has comparison",
"or ~azure.mgmt.costmanagement.models.ReportConfigSortingDirection :param name: Required. The name of the column",
"self.time_period = time_period self.dataset = dataset self.include_actual_cost = include_actual_cost self.include_fresh_partial_cost",
"(budget). :type id: str :param enabled: show the KPI in",
"or ~azure.mgmt.costmanagement.models.PivotType :param name: Data field to show in view.",
"start. :vartype usage_start: ~datetime.datetime :ivar usage_end: Usage end. :vartype usage_end:",
"latest version or not. :type e_tag: str :param format: The",
"contact_groups: list of action groups to broadcast to. :type contact_groups:",
"= None, processing_start_time: Optional[datetime.datetime] = None, processing_end_time: Optional[datetime.datetime] = None,",
":type submitted_time: ~datetime.datetime :param processing_start_time: The time when export was",
"Analysis. Variables are only populated by the server, and will",
"} _attribute_map = { 'value': {'key': 'value', 'type': '[View]'}, 'next_link':",
"None self.e_tag = e_tag class Export(ProxyResource): \"\"\"An export resource. Variables",
"self.dataset = dataset class QueryFilter(msrest.serialization.Model): \"\"\"The filter expression to be",
"\"\"\" _validation = { 'type': {'required': True}, 'timeframe': {'required': True},",
"\"MetricType\"]] = None, kpis: Optional[List[\"KpiProperties\"]] = None, pivots: Optional[List[\"PivotProperties\"]] =",
"'str'}, 'created_on': {'key': 'properties.createdOn', 'type': 'iso-8601'}, 'modified_on': {'key': 'properties.modifiedOn', 'type':",
"future. If present, the end date must be greater than",
"in the report. :param columns: Array of column names to",
"'str'}, } def __init__( self, *, type: Optional[Union[str, \"AlertType\"]] =",
"include all available columns. :param columns: Array of column names",
"submitted_by: Optional[str] = None, submitted_time: Optional[datetime.datetime] = None, processing_start_time: Optional[datetime.datetime]",
"'type': 'str'}, 'timeframe': {'key': 'properties.query.timeframe', 'type': 'str'}, 'time_period': {'key': 'properties.query.timePeriod',",
"'time_period': {'key': 'timePeriod', 'type': 'QueryTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'ForecastDataset'},",
"in which alert was closed. :type close_time: str :param modification_time:",
"import msrest.serialization from ._cost_management_client_enums import * class Resource(msrest.serialization.Model): \"\"\"The Resource",
"results. Variables are only populated by the server, and will",
"operation: Operation type: Read, write, delete, etc. :vartype operation: str",
"= contact_emails self.contact_groups = contact_groups self.contact_roles = contact_roles self.overriding_alert =",
"'type': 'OperationDisplay'}, } def __init__( self, *, display: Optional[\"OperationDisplay\"] =",
"was last modified. :type modification_time: str :param status_modification_user_name: :type status_modification_user_name:",
"export. :type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule \"\"\" _validation = { 'id': {'readonly':",
"'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'e_tag': {'key': 'eTag',",
"expression to use in the report. :type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilter \"\"\"",
"'grouping': {'max_items': 2, 'min_items': 0}, } _attribute_map = { 'granularity':",
"_validation = { 'type': {'required': True}, 'name': {'required': True}, }",
"for the report. If custom, then a specific time period",
"{'key': 'or', 'type': '[QueryFilter]'}, 'not_property': {'key': 'not', 'type': 'QueryFilter'}, 'dimension':",
"Optional[datetime.datetime] = None, processing_end_time: Optional[datetime.datetime] = None, file_name: Optional[str] =",
"status self.creation_time = creation_time self.close_time = close_time self.modification_time = modification_time",
"values include: \"Usage\", \"ActualCost\", \"AmortizedCost\". :type type: str or ~azure.mgmt.costmanagement.models.ForecastType",
":type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated :param dimension: Has comparison expression for a",
"it is 'System'. :type submitted_by: str :param submitted_time: The time",
"REST API operation. Variables are only populated by the server,",
"the Microsoft.CostManagementExports resource provider. This is required once per subscription.",
"Optional[List[\"QueryGrouping\"]] = None, filter: Optional[\"QueryFilter\"] = None, **kwargs ): super(QueryDataset,",
"a tag. :type tag: ~azure.mgmt.costmanagement.models.QueryComparisonExpression \"\"\" _validation = { 'and_property':",
"valid query column name is allowed. If not provided, then",
"'contactRoles', 'type': '[str]'}, 'overriding_alert': {'key': 'overridingAlert', 'type': 'str'}, } def",
"use in comparison. :type name: str :param operator: Required. The",
"\"Daily\", \"Weekly\", \"Monthly\", \"Annually\". :type recurrence: str or ~azure.mgmt.costmanagement.models.RecurrenceType :param",
"{ 'and_property': {'key': 'and', 'type': '[ReportConfigFilterAutoGenerated]'}, 'or_property': {'key': 'or', 'type':",
"'grouping_enabled': {'key': 'properties.groupingEnabled', 'type': 'bool'}, 'data': {'key': 'properties.data', 'type': '[str]'},",
"months. All required parameters must be populated in order to",
"'str'}, } def __init__( self, *, type: Optional[Union[str, \"PivotType\"]] =",
"alert. Possible values include: \"CostThresholdExceeded\", \"UsageThresholdExceeded\", \"CreditThresholdApproaching\", \"CreditThresholdReached\", \"QuotaThresholdApproaching\", \"QuotaThresholdReached\",",
"0}, } _attribute_map = { 'granularity': {'key': 'granularity', 'type': 'str'},",
"based on dates. Possible values include: \"Usage\". :type type: str",
"{'key': 'display', 'type': 'OperationDisplay'}, } def __init__( self, *, display:",
"UI. :type kpis: list[~azure.mgmt.costmanagement.models.KpiProperties] :param pivots: Configuration of 3 sub-views",
"ExportProperties(CommonExportProperties): \"\"\"The properties of the export. Variables are only populated",
"the dictionary is the alias for the aggregated column. Report",
"_attribute_map = { 'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'}, 'description': {'key':",
"of a report config. All required parameters must be populated",
"**kwargs ): super(QueryColumn, self).__init__(**kwargs) self.name = name self.type = type",
"MIT License. See License.txt in the project root for license",
"any. :vartype next_link: str \"\"\" _validation = { 'value': {'readonly':",
"do not yet provide data for charges or amortization for",
"'str'}, 'operator': {'key': 'operator', 'type': 'str'}, 'values': {'key': 'values', 'type':",
"Optional[\"CommonExportProperties\"] = None, error: Optional[\"ErrorDetails\"] = None, **kwargs ): super(ExportExecution,",
"~azure.mgmt.costmanagement.models.ReportConfigFilter \"\"\" _validation = { 'grouping': {'max_items': 2, 'min_items': 0},",
"date to pull data to. :type to: ~datetime.datetime \"\"\" _validation",
"'type': {'required': True}, 'timeframe': {'required': True}, } _attribute_map = {",
":type filter: ~azure.mgmt.costmanagement.models.QueryFilter \"\"\" _validation = { 'grouping': {'max_items': 2,",
"'type', 'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, } def",
"Required. Has delivery information for the export. :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo",
"\"Dismissed\". :type status: str or ~azure.mgmt.costmanagement.models.AlertStatus :param creation_time: dateTime in",
"Azure. :param destination: Required. Has destination for the export being",
"None, id: Optional[str] = None, enabled: Optional[bool] = None, **kwargs",
"aggregation function to use. Possible values include: \"Sum\". :type function:",
"filter: Optional[\"ReportConfigFilterAutoGenerated\"] = None, **kwargs ): super(ReportConfigDatasetAutoGenerated, self).__init__(**kwargs) self.granularity =",
"= None, error: Optional[\"ErrorDetails\"] = None, **kwargs ): super(ExportExecution, self).__init__(**kwargs)",
"self, *, display: Optional[\"OperationDisplay\"] = None, **kwargs ): super(Operation, self).__init__(**kwargs)",
"= None, pivots: Optional[List[\"PivotProperties\"]] = None, type_properties_query_type: Optional[Union[str, \"ReportType\"]] =",
"Optional[Union[str, \"AlertOperator\"]] = None, amount: Optional[float] = None, unit: Optional[str]",
"2 aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation] :param grouping: Array",
"processing_start_time self.processing_end_time = processing_end_time self.file_name = file_name self.run_settings = run_settings",
"page of results. :vartype next_link: str \"\"\" _validation = {",
"self, *, type: Union[str, \"QueryColumnType\"], name: str, **kwargs ): super(QueryGrouping,",
":type direction: str or ~azure.mgmt.costmanagement.models.ReportConfigSortingDirection :param name: Required. The name",
"{'key': 'provider', 'type': 'str'}, 'resource': {'key': 'resource', 'type': 'str'}, 'operation':",
"can be differentiated based on dates. Possible values include: \"Usage\".",
"{'key': 'dataset', 'type': 'ForecastDataset'}, 'include_actual_cost': {'key': 'includeActualCost', 'type': 'bool'}, 'include_fresh_partial_cost':",
"} def __init__( self, *, type: Union[str, \"ReportType\"], timeframe: Union[str,",
"'status': {'key': 'properties.status', 'type': 'str'}, 'creation_time': {'key': 'properties.creationTime', 'type': 'str'},",
"tags: dict[str, str] :param next_link: The link (url) to the",
"= include_actual_cost self.include_fresh_partial_cost = include_fresh_partial_cost class KpiProperties(msrest.serialization.Model): \"\"\"Each KPI must",
"'str'}, 'timeframe': {'key': 'properties.query.timeframe', 'type': 'str'}, 'time_period': {'key': 'properties.query.timePeriod', 'type':",
":type enabled: bool \"\"\" _attribute_map = { 'type': {'key': 'type',",
"All rights reserved. # Licensed under the MIT License. See",
"subscription lowest possible grain. :type name: str \"\"\" _validation =",
"will be ignored when sending a request. All required parameters",
"{'key': 'timePeriod', 'type': 'ExportTimePeriod'}, 'data_set': {'key': 'dataSet', 'type': 'ExportDataset'}, }",
"be populated in order to send to Azure. :param destination:",
"Microsoft.CostManagementExports resource provider. This is required once per subscription. When",
"} def __init__( self, **kwargs ): super(AlertsResult, self).__init__(**kwargs) self.value =",
"'details': {'key': 'properties.details', 'type': 'AlertPropertiesDetails'}, 'cost_entity_id': {'key': 'properties.costEntityId', 'type': 'str'},",
"\"AmortizedCost\". :type type: str or ~azure.mgmt.costmanagement.models.ForecastType :param timeframe: Required. The",
"self.timeframe = timeframe self.time_period = time_period self.data_set = data_set class",
"None, status_modification_user_name: Optional[str] = None, status_modification_time: Optional[str] = None, **kwargs",
"'periodStartDate', 'type': 'str'}, 'triggered_by': {'key': 'triggeredBy', 'type': 'str'}, 'resource_group_filter': {'key':",
"notification threshold percentage as a decimal which activated this alert.",
"grouping are provided. :type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration :param aggregation: Dictionary of",
"selected for the export. If not provided then the export",
":type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult :ivar next_run_time_estimate: If the export has an",
"per subscription. When creating an export in the Azure portal,",
"time_period self.dataset = dataset class QueryFilter(msrest.serialization.Model): \"\"\"The filter expression to",
"'category': {'key': 'category', 'type': 'str'}, 'criteria': {'key': 'criteria', 'type': 'str'},",
"which the operation is performed: Dimensions, Query. :vartype resource: str",
"*, name: str, direction: Optional[Union[str, \"ReportConfigSortingDirection\"]] = None, **kwargs ):",
"Report can have up to 2 aggregation clauses. :type aggregation:",
"Code Generator. # Changes may cause incorrect behavior and will",
"file_name: str :param run_settings: The export settings that were in",
"self).__init__(**kwargs) self.type = type self.name = name class QueryResult(Resource): \"\"\"Result",
"to send to Azure. :param from_property: Required. The start date",
"= error class ProxyResource(msrest.serialization.Model): \"\"\"The Resource model definition. Variables are",
"in the export. :param and_property: The logical \"AND\" expression. Must",
"{'key': 'and', 'type': '[ReportConfigFilter]'}, 'or_property': {'key': 'or', 'type': '[ReportConfigFilter]'}, 'not_property':",
"tag class QueryGrouping(msrest.serialization.Model): \"\"\"The group by expression to be used",
"dateTime in which alert was created. :type creation_time: str :param",
"direction: str or ~azure.mgmt.costmanagement.models.ReportConfigSortingDirection :param name: Required. The name of",
"def __init__( self, *, e_tag: Optional[str] = None, **kwargs ):",
"exports in the scope provided. Variables are only populated by",
"True}, 'operation': {'readonly': True}, } _attribute_map = { 'provider': {'key':",
"the export. Variables are only populated by the server, and",
"execution time. :vartype next_run_time_estimate: ~datetime.datetime :param schedule: Has schedule information",
"of operations and a URL link to get the next",
"super(AlertsResult, self).__init__(**kwargs) self.value = None self.next_link = None class CommonExportProperties(msrest.serialization.Model):",
"ErrorDetails(msrest.serialization.Model): \"\"\"The details of the error. Variables are only populated",
"= from_property self.to = to class ExportSchedule(msrest.serialization.Model): \"\"\"The schedule associated",
"__init__( self, *, resource_id: str, container: str, root_folder_path: Optional[str] =",
"ExportSchedule(msrest.serialization.Model): \"\"\"The schedule associated with the export. All required parameters",
"grouping self.sorting = sorting self.filter = filter class ReportConfigDatasetConfiguration(msrest.serialization.Model): \"\"\"The",
"to use in the report. The key of each item",
"end date for recurrence schedule. All required parameters must be",
"by expression to use in the report. :type sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting]",
"self.type = None self.e_tag = e_tag class Export(ProxyResource): \"\"\"An export",
"str :param enabled: show the KPI in the UI?. :type",
"\"AlertOperator\"]] = None, amount: Optional[float] = None, unit: Optional[str] =",
"end date for pulling data for the report. All required",
"None class DismissAlertPayload(msrest.serialization.Model): \"\"\"The request payload to update an alert.",
"str :ivar operation: Operation type: Read, write, delete, etc. :vartype",
"tags: dict[str, str] :param definition: defines the type of alert.",
"'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, } def",
"'str'}, 'resource': {'key': 'resource', 'type': 'str'}, 'operation': {'key': 'operation', 'type':",
"def __init__( self, *, granularity: Optional[Union[str, \"GranularityType\"]] = None, configuration:",
":type metric: str or ~azure.mgmt.costmanagement.models.MetricType :param kpis: List of KPIs",
"dimension: Has comparison expression for a dimension. :type dimension: ~azure.mgmt.costmanagement.models.QueryComparisonExpression",
"'name', 'type': 'str'}, } def __init__( self, *, name: str,",
"\"\"\" _attribute_map = { 'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'}, 'description':",
"paused. Possible values include: \"Active\", \"Inactive\". :type status: str or",
"operations supported by the Microsoft.CostManagement resource provider. :vartype value: list[~azure.mgmt.costmanagement.models.Operation]",
":param pivots: Configuration of 3 sub-views in the Cost Analysis",
"\"\"\" _validation = { 'delivery_info': {'required': True}, 'definition': {'required': True},",
"for the aggregated column. Report can have up to 2",
":type dataset: ~azure.mgmt.costmanagement.models.ForecastDataset :param include_actual_cost: a boolean determining if actualCost",
"date of recurrence. :type from_property: ~datetime.datetime :param to: The end",
"The time when the export execution finished. :type processing_end_time: ~datetime.datetime",
"URL to get the next set of alerts results if",
"'properties.status', 'type': 'str'}, 'submitted_by': {'key': 'properties.submittedBy', 'type': 'str'}, 'submitted_time': {'key':",
"not able to process the incoming request. The reason is",
"'type': 'str'}, 'kpis': {'key': 'properties.kpis', 'type': '[KpiProperties]'}, 'pivots': {'key': 'properties.pivots',",
"and will be ignored when sending a request. :ivar provider:",
"self.status_modification_time = status_modification_time class AlertPropertiesDefinition(msrest.serialization.Model): \"\"\"defines the type of alert.",
"threshold amount. :type amount: float :param unit: unit of currency",
"None, submitted_time: Optional[datetime.datetime] = None, processing_start_time: Optional[datetime.datetime] = None, processing_end_time:",
":param granularity: The granularity of rows in the report. Possible",
"'type': 'str'}, } def __init__( self, *, resource_id: str, container:",
"\"LessThan\", \"LessThanOrEqualTo\". :type operator: str or ~azure.mgmt.costmanagement.models.AlertOperator :param amount: budget",
"= submitted_time self.processing_start_time = processing_start_time self.processing_end_time = processing_end_time self.file_name =",
"export execution. Variables are only populated by the server, and",
"the query. If custom, then a specific time period must",
"= None self.usage_end = None self.next_link = None class DimensionsListResult(msrest.serialization.Model):",
"str or ~azure.mgmt.costmanagement.models.ExecutionType :param status: The last known status of",
"= None, columns: Optional[List[\"QueryColumn\"]] = None, rows: Optional[List[List[object]]] = None,",
"{'key': 'configuration', 'type': 'QueryDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'}, 'filter':",
"{'key': 'timePeriod', 'type': 'QueryTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'QueryDataset'}, }",
"of the column to group. :type name: str \"\"\" _validation",
"{'readonly': True}, 'resource': {'readonly': True}, 'operation': {'readonly': True}, } _attribute_map",
"latest version or not. :type e_tag: str \"\"\" _validation =",
"str] :param next_link: The link (url) to the next page",
"be uploaded. :type container: str :param root_folder_path: The name of",
"dataset: Optional[\"QueryDataset\"] = None, **kwargs ): super(QueryDefinition, self).__init__(**kwargs) self.type =",
"sorting: Optional[List[\"ReportConfigSorting\"]] = None, filter: Optional[\"ReportConfigFilterAutoGenerated\"] = None, **kwargs ):",
"} def __init__( self, *, type: Optional[Union[str, \"PivotType\"]] = None,",
"self.grouping_enabled = None self.data = data self.total = None self.category",
"the forecast. If custom, then a specific time period must",
":type from_property: ~datetime.datetime :param to: Required. The end date for",
"be greater than start date. :type recurrence_period: ~azure.mgmt.costmanagement.models.ExportRecurrencePeriod \"\"\" _validation",
"not_property self.dimension = dimension self.tag = tag class ReportConfigGrouping(msrest.serialization.Model): \"\"\"The",
"Optional[\"ExportDefinition\"] = None, run_history: Optional[\"ExportExecutionListResult\"] = None, schedule: Optional[\"ExportSchedule\"] =",
"'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'next_link': {'key':",
"None, **kwargs ): super(QueryResult, self).__init__(**kwargs) self.next_link = next_link self.columns =",
"str or ~azure.mgmt.costmanagement.models.StatusType :param recurrence: Required. The schedule recurrence. Possible",
"name class ReportConfigSorting(msrest.serialization.Model): \"\"\"The order by expression to be used",
"'chart': {'key': 'properties.chart', 'type': 'str'}, 'accumulated': {'key': 'properties.accumulated', 'type': 'str'},",
"operator: Required. The operator to use for comparison. Possible values",
"Union[str, \"ExportType\"], timeframe: Union[str, \"TimeframeType\"], time_period: Optional[\"QueryTimePeriod\"] = None, dataset:",
"= None, cost_entity_id: Optional[str] = None, status: Optional[Union[str, \"AlertStatus\"]] =",
"None, **kwargs ): super(ReportConfigDefinition, self).__init__(**kwargs) self.type = type self.timeframe =",
"'tag', 'type': 'ReportConfigComparisonExpression'}, } def __init__( self, *, and_property: Optional[List[\"ReportConfigFilter\"]]",
"format: Optional[Union[str, \"FormatType\"]] = None, run_history: Optional[\"ExportExecutionListResult\"] = None, schedule:",
"include_fresh_partial_cost class KpiProperties(msrest.serialization.Model): \"\"\"Each KPI must contain a 'type' and",
"list[~azure.mgmt.costmanagement.models.PivotProperties] :param type_properties_query_type: The type of the report. Usage represents",
"): super(ProxyResource, self).__init__(**kwargs) self.id = None self.name = None self.type",
"data. :type from_property: ~datetime.datetime :param to: Required. The end date",
"next_run_time_estimate: ~datetime.datetime :param schedule: Has schedule information for the export.",
":vartype id: str :ivar name: Resource name. :vartype name: str",
"{ 'name': {'readonly': True}, } _attribute_map = { 'name': {'key':",
"key. :param type: KPI type (Forecast, Budget). Possible values include:",
"that were in effect for this execution. :type run_settings: ~azure.mgmt.costmanagement.models.CommonExportProperties",
"expression to use in the report. :type sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting] :param",
"str :param function: Required. The name of the aggregation function",
"of data present in the query. :param granularity: The granularity",
"function: Required. The name of the aggregation function to use.",
":vartype next_run_time_estimate: ~datetime.datetime :param schedule: Has schedule information for the",
":param name: Required. The name of the column to group.",
"submitted_time: Optional[datetime.datetime] = None, processing_start_time: Optional[datetime.datetime] = None, processing_end_time: Optional[datetime.datetime]",
"None, filter: Optional[\"QueryFilter\"] = None, **kwargs ): super(ForecastDataset, self).__init__(**kwargs) self.granularity",
"time_period self.dataset = dataset self.include_actual_cost = include_actual_cost self.include_fresh_partial_cost = include_fresh_partial_cost",
"'str'}, 'values': {'key': 'values', 'type': '[str]'}, } def __init__( self,",
"category. :vartype category: str :ivar usage_start: Usage start. :vartype usage_start:",
"**kwargs ): super(ReportConfigDefinition, self).__init__(**kwargs) self.type = type self.timeframe = timeframe",
"} _attribute_map = { 'value': {'key': 'value', 'type': '[Alert]'}, 'next_link':",
"= None, dataset: Optional[\"ReportConfigDatasetAutoGenerated\"] = None, **kwargs ): super(ReportConfigDefinition, self).__init__(**kwargs)",
"items. :type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param not_property: The logical \"NOT\" expression.",
"self.name = name self.operator = operator self.values = values class",
"QueryResult(Resource): \"\"\"Result of query. It contains all columns listed under",
"Cost Analysis UI. :type kpis: list[~azure.mgmt.costmanagement.models.KpiProperties] :param pivots: Configuration of",
"of periodStartDate. :type period_start_date: str :param triggered_by: notificationId that triggered",
"'type': 'ExportDeliveryDestination'}, } def __init__( self, *, destination: \"ExportDeliveryDestination\", **kwargs",
"'definition': {'required': True}, 'next_run_time_estimate': {'readonly': True}, } _attribute_map = {",
"supported by the Microsoft.CostManagement resource provider. :vartype value: list[~azure.mgmt.costmanagement.models.Operation] :ivar",
"*, from_property: datetime.datetime, to: Optional[datetime.datetime] = None, **kwargs ): super(ExportRecurrencePeriod,",
"None self.filter_enabled = None self.grouping_enabled = None self.data = data",
"'type': 'ExportSchedule'}, } def __init__( self, *, delivery_info: \"ExportDeliveryInfo\", definition:",
"format of the export being delivered. Currently only 'Csv' is",
"in the export. :type data_set: ~azure.mgmt.costmanagement.models.ExportDataset \"\"\" _validation = {",
"str or ~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Required. Has delivery information for",
":type processing_end_time: ~datetime.datetime :param file_name: The name of the exported",
"last modified this view. :vartype modified_on: ~datetime.datetime :param chart: Chart",
"clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation] :param filter: Has filter expression",
"provider: Microsoft.CostManagement. :vartype provider: str :ivar resource: Resource on which",
":type configuration: ~azure.mgmt.costmanagement.models.ExportDatasetConfiguration \"\"\" _attribute_map = { 'granularity': {'key': 'granularity',",
"~azure.mgmt.costmanagement.models.ReportConfigAggregation] :param grouping: Array of group by expression to use",
"{ 'destination': {'required': True}, } _attribute_map = { 'destination': {'key':",
"'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type': 'ExportTimePeriod'}, 'data_set': {'key': 'dataSet',",
"self).__init__(**kwargs) self.code = None self.message = None class ErrorResponse(msrest.serialization.Model): \"\"\"Error",
"column. forecast can have up to 2 aggregation clauses. :type",
"str :param container: Required. The name of the container where",
"self, **kwargs ): super(ExportListResult, self).__init__(**kwargs) self.value = None class ExportProperties(CommonExportProperties):",
"Array of values to use for comparison. :type values: list[str]",
"'resource': {'key': 'resource', 'type': 'str'}, 'operation': {'key': 'operation', 'type': 'str'},",
"view in Cost Analysis. Required. Possible values include: \"Area\", \"Line\",",
"self.next_link = None class CommonExportProperties(msrest.serialization.Model): \"\"\"The common properties of the",
"~azure.mgmt.costmanagement.models.ExportDataset \"\"\" _validation = { 'type': {'required': True}, 'timeframe': {'required':",
"items. :type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param or_property: The logical \"OR\" expression.",
"None, **kwargs ): super(ExportDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class ExportDefinition(msrest.serialization.Model):",
"were in effect for this execution. :type run_settings: ~azure.mgmt.costmanagement.models.CommonExportProperties :param",
"'{QueryAggregation}'}, 'grouping': {'key': 'grouping', 'type': '[QueryGrouping]'}, 'filter': {'key': 'filter', 'type':",
"contains a list of available views. Variables are only populated",
"\"\"\" _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'name':",
"groups to broadcast to. :type contact_groups: list[str] :param contact_roles: list",
"= None, tag: Optional[\"ReportConfigComparisonExpression\"] = None, **kwargs ): super(ReportConfigFilterAutoGenerated, self).__init__(**kwargs)",
"be used in the report. All required parameters must be",
"\"\"\" _validation = { 'recurrence': {'required': True}, } _attribute_map =",
"ExportExecution(Resource): \"\"\"An export execution. Variables are only populated by the",
"Has filter expression to use in the forecast. :type filter:",
"= None class ExportProperties(CommonExportProperties): \"\"\"The properties of the export. Variables",
"to exports that do not yet provide data for charges",
":type modification_time: str :param status_modification_user_name: :type status_modification_user_name: str :param status_modification_time:",
"_attribute_map = { 'and_property': {'key': 'and', 'type': '[ReportConfigFilterAutoGenerated]'}, 'or_property': {'key':",
"class ReportConfigFilter(msrest.serialization.Model): \"\"\"The filter expression to be used in the",
"contact_groups: Optional[List[str]] = None, contact_roles: Optional[List[str]] = None, overriding_alert: Optional[str]",
"from_property self.to = to class ForecastDataset(msrest.serialization.Model): \"\"\"The definition of data",
"'str'}, 'kpis': {'key': 'properties.kpis', 'type': '[KpiProperties]'}, 'pivots': {'key': 'properties.pivots', 'type':",
"dataset class ReportConfigFilter(msrest.serialization.Model): \"\"\"The filter expression to be used in",
"Optional[\"ReportConfigComparisonExpression\"] = None, **kwargs ): super(ReportConfigFilterAutoGenerated, self).__init__(**kwargs) self.and_property = and_property",
"None, **kwargs ): super(ExportProperties, self).__init__(format=format, delivery_info=delivery_info, definition=definition, run_history=run_history, **kwargs) self.schedule",
"'status_modification_user_name': {'key': 'properties.statusModificationUserName', 'type': 'str'}, 'status_modification_time': {'key': 'properties.statusModificationTime', 'type': 'str'},",
"data and UsageAndForecast represents both usage and forecasted data. Actual",
"self.chart = chart self.accumulated = accumulated self.metric = metric self.kpis",
"type self.name = name class QueryResult(Resource): \"\"\"Result of query. It",
"'iso-8601'}, } def __init__( self, *, from_property: datetime.datetime, to: Optional[datetime.datetime]",
"{'key': 'tag', 'type': 'ReportConfigComparisonExpression'}, } def __init__( self, *, and_property:",
"'total': {'readonly': True}, 'category': {'readonly': True}, 'usage_start': {'readonly': True}, 'usage_end':",
"forecast. All required parameters must be populated in order to",
":param scope: Cost Management scope to save the view on.",
"'period_start_date': {'key': 'periodStartDate', 'type': 'str'}, 'triggered_by': {'key': 'triggeredBy', 'type': 'str'},",
"\"\"\"Alert details. :param time_grain_type: Type of timegrain cadence. Possible values",
"if actualCost will be included. :type include_actual_cost: bool :param include_fresh_partial_cost:",
"present in the forecast. :param granularity: The granularity of rows",
"'type', 'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, 'display_name': {'key':",
"self.grouping = grouping self.filter = filter class QueryDatasetConfiguration(msrest.serialization.Model): \"\"\"The configuration",
"{'readonly': True}, } _attribute_map = { 'provider': {'key': 'provider', 'type':",
"operator: str or ~azure.mgmt.costmanagement.models.AlertOperator :param amount: budget threshold amount. :type",
":param execution_type: The type of the export execution. Possible values",
"sending a request. :ivar value: List of alerts. :vartype value:",
"None, unit: Optional[str] = None, current_spend: Optional[float] = None, contact_emails:",
"the end date must be greater than start date. :type",
"None, aggregation: Optional[Dict[str, \"ReportConfigAggregation\"]] = None, grouping: Optional[List[\"ReportConfigGrouping\"]] = None,",
"\"CreditThresholdApproaching\", \"CreditThresholdReached\", \"QuotaThresholdApproaching\", \"QuotaThresholdReached\", \"MultiCurrency\", \"ForecastCostThresholdExceeded\", \"ForecastUsageThresholdExceeded\", \"InvoiceDueDateApproaching\", \"InvoiceDueDateReached\", \"CrossCloudNewDataAvailable\",",
"{ 'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'}, 'description': {'key': 'properties.description', 'type':",
"information for the export. :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Has",
"{ 'destination': {'key': 'destination', 'type': 'ExportDeliveryDestination'}, } def __init__( self,",
"The identifier for the entity that executed the export. For",
"'type': 'str'}, 'created_on': {'key': 'properties.createdOn', 'type': 'iso-8601'}, 'modified_on': {'key': 'properties.modifiedOn',",
"ignored when sending a request. :ivar name: Operation name: {provider}/{resource}/{operation}.",
"{'key': 'properties.submittedTime', 'type': 'iso-8601'}, 'processing_start_time': {'key': 'properties.processingStartTime', 'type': 'iso-8601'}, 'processing_end_time':",
"Possible values include: \"Daily\". :type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType :param",
"{ 'type': {'required': True}, 'timeframe': {'required': True}, } _attribute_map =",
"operator used to compare currentSpend with amount. Possible values include:",
"'properties.statusModificationUserName', 'type': 'str'}, 'status_modification_time': {'key': 'properties.statusModificationTime', 'type': 'str'}, } def",
"None class DimensionsListResult(msrest.serialization.Model): \"\"\"Result of listing dimensions. It contains a",
"None, recurrence_period: Optional[\"ExportRecurrencePeriod\"] = None, **kwargs ): super(ExportSchedule, self).__init__(**kwargs) self.status",
"of alert. Possible values include: \"Budget\", \"Invoice\", \"Credit\", \"Quota\", \"General\",",
"Microsoft.CostManagement resource provider. :vartype value: list[~azure.mgmt.costmanagement.models.Operation] :ivar next_link: URL to",
"values include: \"true\", \"false\". :type accumulated: str or ~azure.mgmt.costmanagement.models.AccumulatedType :param",
"from_property: datetime.datetime, to: datetime.datetime, **kwargs ): super(ReportConfigTimePeriod, self).__init__(**kwargs) self.from_property =",
"creation_time: str :param close_time: dateTime in which alert was closed.",
"format: Optional[Union[str, \"FormatType\"]] = None, run_history: Optional[\"ExportExecutionListResult\"] = None, **kwargs",
"{'key': 'eTag', 'type': 'str'}, } def __init__( self, *, e_tag:",
"'[PivotProperties]'}, 'type_properties_query_type': {'key': 'properties.query.type', 'type': 'str'}, 'timeframe': {'key': 'properties.query.timeframe', 'type':",
":param enabled: show the KPI in the UI?. :type enabled:",
"Optional[\"QueryFilter\"] = None, dimension: Optional[\"QueryComparisonExpression\"] = None, tag: Optional[\"QueryComparisonExpression\"] =",
"'[object]'}, 'meter_filter': {'key': 'meterFilter', 'type': '[object]'}, 'tag_filter': {'key': 'tagFilter', 'type':",
":type execution_type: str or ~azure.mgmt.costmanagement.models.ExecutionType :param status: The last known",
"self.time_period = time_period self.dataset = dataset class QueryFilter(msrest.serialization.Model): \"\"\"The filter",
"contact_emails: Optional[List[str]] = None, contact_groups: Optional[List[str]] = None, contact_roles: Optional[List[str]]",
"for pulling data for the query. If custom, then a",
"this query. :type dataset: ~azure.mgmt.costmanagement.models.QueryDataset \"\"\" _validation = { 'type':",
"description: str :param source: Source of alert. Possible values include:",
"'ExportDefinition'}, 'run_history': {'key': 'runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type':",
"Required. The name of the column to use in comparison.",
"only populated by the server, and will be ignored when",
"'sorting': {'key': 'sorting', 'type': '[ReportConfigSorting]'}, 'filter': {'key': 'filter', 'type': 'ReportConfigFilter'},",
"the aggregated column. Report can have up to 2 aggregation",
"{'key': 'contactGroups', 'type': '[str]'}, 'contact_roles': {'key': 'contactRoles', 'type': '[str]'}, 'overriding_alert':",
"the storage account where exports will be delivered. :type resource_id:",
"'include_actual_cost': {'key': 'includeActualCost', 'type': 'bool'}, 'include_fresh_partial_cost': {'key': 'includeFreshPartialCost', 'type': 'bool'},",
"'triggeredBy', 'type': 'str'}, 'resource_group_filter': {'key': 'resourceGroupFilter', 'type': '[object]'}, 'resource_filter': {'key':",
"the column to group. This version supports subscription lowest possible",
"self, *, definition: Optional[\"AlertPropertiesDefinition\"] = None, description: Optional[str] = None,",
"None, sorting: Optional[List[\"ReportConfigSorting\"]] = None, filter: Optional[\"ReportConfigFilter\"] = None, **kwargs",
"include: \"Csv\". :type format: str or ~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Required.",
"\"KpiType\"]] = None, id: Optional[str] = None, enabled: Optional[bool] =",
"\"InProgress\", \"Completed\", \"Failed\", \"Timeout\", \"NewDataNotAvailable\", \"DataNotAvailable\". :type status: str or",
"creation_time self.close_time = close_time self.modification_time = modification_time self.status_modification_user_name = status_modification_user_name",
"'schedule': {'key': 'schedule', 'type': 'ExportSchedule'}, } def __init__( self, *,",
"~azure.mgmt.costmanagement.models.AlertType :param category: Alert category. Possible values include: \"Cost\", \"Usage\",",
"Resource tags. :vartype tags: dict[str, str] :ivar description: Dimension description.",
"{'key': 'properties.costEntityId', 'type': 'str'}, 'status': {'key': 'properties.status', 'type': 'str'}, 'creation_time':",
"error: ~azure.mgmt.costmanagement.models.ErrorDetails \"\"\" _attribute_map = { 'error': {'key': 'error', 'type':",
"str :param close_time: dateTime in which alert was closed. :type",
"'str'}, } def __init__( self, **kwargs ): super(ViewListResult, self).__init__(**kwargs) self.value",
"str] :ivar description: Dimension description. :vartype description: str :ivar filter_enabled:",
"{ 'resource_id': {'required': True}, 'container': {'required': True}, } _attribute_map =",
"request. :ivar value: The list of exports. :vartype value: list[~azure.mgmt.costmanagement.models.Export]",
"alert. Possible values include: \"Preset\", \"User\". :type source: str or",
"a list of operations and a URL link to get",
"self.not_property = not_property self.dimension = dimension self.tag = tag class",
"for the time specified in the \"Retry-After\" header. :param error:",
"} _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'timeframe':",
"filter expression to use in the report. :type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilter",
"= unit self.current_spend = current_spend self.contact_emails = contact_emails self.contact_groups =",
"and 'name'. :param type: Data type to show in view.",
"ExportDeliveryDestination(msrest.serialization.Model): \"\"\"The destination information for the delivery of the export.",
"} def __init__( self, *, type: Union[str, \"ForecastType\"], timeframe: Union[str,",
"str \"\"\" _validation = { 'provider': {'readonly': True}, 'resource': {'readonly':",
"self, *, from_property: datetime.datetime, to: datetime.datetime, **kwargs ): super(QueryTimePeriod, self).__init__(**kwargs)",
"e_tag: eTag of the resource. To handle concurrent update scenario,",
"from_property: datetime.datetime, to: datetime.datetime, **kwargs ): super(ExportTimePeriod, self).__init__(**kwargs) self.from_property =",
"access to a storage account, you must register the account's",
"= name self.operator = operator self.values = values class ReportConfigDataset(msrest.serialization.Model):",
"self, *, and_property: Optional[List[\"ReportConfigFilterAutoGenerated\"]] = None, or_property: Optional[List[\"ReportConfigFilterAutoGenerated\"]] = None,",
"to get the next set of results. Variables are only",
"column names to be included in the report. Any valid",
":type name: str \"\"\" _attribute_map = { 'type': {'key': 'type',",
"_validation = { 'provider': {'readonly': True}, 'resource': {'readonly': True}, 'operation':",
"class DimensionsListResult(msrest.serialization.Model): \"\"\"Result of listing dimensions. It contains a list",
"the operation. :type display: ~azure.mgmt.costmanagement.models.OperationDisplay \"\"\" _validation = { 'name':",
"= time_period self.dataset = dataset class QueryFilter(msrest.serialization.Model): \"\"\"The filter expression",
"= None, **kwargs ): super(ReportConfigDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration",
"details of the error. Variables are only populated by the",
"only 'Daily' is supported. Possible values include: \"Daily\". :type granularity:",
"} def __init__( self, **kwargs ): super(Resource, self).__init__(**kwargs) self.id =",
"filter: Optional[\"QueryFilter\"] = None, **kwargs ): super(ForecastDataset, self).__init__(**kwargs) self.granularity =",
"Has comparison expression for a dimension. :type dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression :param",
"'timePeriod', 'type': 'ReportConfigTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'ReportConfigDatasetAutoGenerated'}, } def",
"configuration self.aggregation = aggregation self.filter = filter class ForecastDefinition(msrest.serialization.Model): \"\"\"The",
"'type': {'key': 'type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'},",
"group by clauses. :type grouping: list[~azure.mgmt.costmanagement.models.QueryGrouping] :param filter: Has filter",
"email. For scheduled executions it is 'System'. :type submitted_by: str",
"report config. All required parameters must be populated in order",
"'id', 'type': 'str'}, 'enabled': {'key': 'enabled', 'type': 'bool'}, } def",
"a query. All required parameters must be populated in order",
"pulling data for the query. All required parameters must be",
"3 months. All required parameters must be populated in order",
"self.resource_group_filter = resource_group_filter self.resource_filter = resource_filter self.meter_filter = meter_filter self.tag_filter",
"): super(AlertsResult, self).__init__(**kwargs) self.value = None self.next_link = None class",
"configuration information for the data in the report. The configuration",
"**kwargs ): super(QueryDefinition, self).__init__(**kwargs) self.type = type self.timeframe = timeframe",
"~azure.mgmt.costmanagement.models.QueryComparisonExpression \"\"\" _validation = { 'and_property': {'min_items': 2}, 'or_property': {'min_items':",
"examples). :type columns: list[str] \"\"\" _attribute_map = { 'columns': {'key':",
"True}, 'filter_enabled': {'readonly': True}, 'grouping_enabled': {'readonly': True}, 'total': {'readonly': True},",
"None, configuration: Optional[\"ReportConfigDatasetConfiguration\"] = None, aggregation: Optional[Dict[str, \"ReportConfigAggregation\"]] = None,",
"Azure. :param format: The format of the export being delivered.",
"available columns can vary by customer channel (see examples). :type",
"{'required': True}, } _attribute_map = { 'from_property': {'key': 'from', 'type':",
"Chart type of the main view in Cost Analysis. Required.",
"the export. :type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult :ivar next_run_time_estimate: If the export",
"Required. The name of the column to aggregate. :type name:",
"filter: ~azure.mgmt.costmanagement.models.ReportConfigFilter \"\"\" _validation = { 'grouping': {'max_items': 2, 'min_items':",
"metric: str or ~azure.mgmt.costmanagement.models.MetricType :param kpis: List of KPIs to",
"Has time period for pulling data for the forecast. :type",
"for the export. :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Has the",
"'str'}, 'recurrence_period': {'key': 'recurrencePeriod', 'type': 'ExportRecurrencePeriod'}, } def __init__( self,",
"type: Optional[str] = None, **kwargs ): super(QueryColumn, self).__init__(**kwargs) self.name =",
"schedule class ExportDataset(msrest.serialization.Model): \"\"\"The definition for data in the export.",
"time_grain_type self.period_start_date = period_start_date self.triggered_by = triggered_by self.resource_group_filter = resource_group_filter",
"date must be greater than start date. :type recurrence_period: ~azure.mgmt.costmanagement.models.ExportRecurrencePeriod",
":param tag_filter: tags to filter by. :type tag_filter: object :param",
"self.function = function class ReportConfigComparisonExpression(msrest.serialization.Model): \"\"\"The comparison expression to be",
"of the view. Required. :type display_name: str :param scope: Cost",
"_attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'type': {'key':",
"expression to use in the report. The key of each",
"data_set: Optional[\"ExportDataset\"] = None, **kwargs ): super(ExportDefinition, self).__init__(**kwargs) self.type =",
"= None, **kwargs ): super(ReportConfigFilter, self).__init__(**kwargs) self.and_property = and_property self.or_property",
"Has delivery information for the export. :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param",
"\"\"\" _validation = { 'destination': {'required': True}, } _attribute_map =",
"contact_roles: Optional[List[str]] = None, overriding_alert: Optional[str] = None, **kwargs ):",
"operation. :type display: ~azure.mgmt.costmanagement.models.OperationDisplay \"\"\" _validation = { 'name': {'readonly':",
"'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'}, } def __init__(",
"be used in the export. :param and_property: The logical \"AND\"",
"None, not_property: Optional[\"QueryFilter\"] = None, dimension: Optional[\"QueryComparisonExpression\"] = None, tag:",
"{'key': 'properties.statusModificationUserName', 'type': 'str'}, 'status_modification_time': {'key': 'properties.statusModificationTime', 'type': 'str'}, }",
"this report config. :type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDataset \"\"\" _validation = {",
":param dataset: Has definition for data in this report config.",
"The granularity of rows in the forecast. Possible values include:",
"{'key': 'includeFreshPartialCost', 'type': 'bool'}, } def __init__( self, *, type:",
"a request. :ivar code: Error code. :vartype code: str :ivar",
"__init__( self, **kwargs ): super(ViewListResult, self).__init__(**kwargs) self.value = None self.next_link",
"e_tag: str :param display_name: User input name of the view.",
"definition: Optional[\"AlertPropertiesDefinition\"] = None, description: Optional[str] = None, source: Optional[Union[str,",
"'properties.query.timePeriod', 'type': 'ReportConfigTimePeriod'}, 'dataset': {'key': 'properties.query.dataset', 'type': 'ReportConfigDataset'}, } def",
"name: str, operator: Union[str, \"OperatorType\"], values: List[str], **kwargs ): super(QueryComparisonExpression,",
"include: \"Dimension\", \"TagKey\". :type type: str or ~azure.mgmt.costmanagement.models.PivotType :param name:",
"data for charges or amortization for service reservations. Possible values",
"\"User\". :type source: str or ~azure.mgmt.costmanagement.models.AlertSource :param details: Alert details.",
"self.category = category self.criteria = criteria class AlertPropertiesDetails(msrest.serialization.Model): \"\"\"Alert details.",
"{'key': 'properties.processingEndTime', 'type': 'iso-8601'}, 'file_name': {'key': 'properties.fileName', 'type': 'str'}, 'run_settings':",
"~datetime.datetime :ivar next_link: The link (url) to the next page",
"Possible values include: \"Usage\". :type type: str or ~azure.mgmt.costmanagement.models.ReportType :param",
"QueryAggregation(msrest.serialization.Model): \"\"\"The aggregation expression to be used in the query.",
"\"ForecastType\"], timeframe: Union[str, \"ForecastTimeframeType\"], time_period: Optional[\"QueryTimePeriod\"] = None, dataset: Optional[\"ForecastDataset\"]",
"is the alias for the aggregated column. Report can have",
"self, *, next_link: Optional[str] = None, columns: Optional[List[\"QueryColumn\"]] = None,",
"Possible values include: \"Preset\", \"User\". :type source: str or ~azure.mgmt.costmanagement.models.AlertSource",
"= None class ErrorResponse(msrest.serialization.Model): \"\"\"Error response indicates that the service",
"'definition': {'key': 'properties.definition', 'type': 'ExportDefinition'}, 'run_history': {'key': 'properties.runHistory', 'type': 'ExportExecutionListResult'},",
"request payload to update an alert. :param definition: defines the",
"): super(QueryTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to = to class",
"None, dataset: Optional[\"ReportConfigDataset\"] = None, **kwargs ): super(View, self).__init__(e_tag=e_tag, **kwargs)",
"name self.function = function class ReportConfigComparisonExpression(msrest.serialization.Model): \"\"\"The comparison expression to",
"\"\"\"The configuration of dataset in the query. :param columns: Array",
"= None, sorting: Optional[List[\"ReportConfigSorting\"]] = None, filter: Optional[\"ReportConfigFilter\"] = None,",
"status_modification_time: str \"\"\" _validation = { 'id': {'readonly': True}, 'name':",
"Resource on which the operation is performed: Dimensions, Query. :vartype",
"with a export. All required parameters must be populated in",
"schedule is paused. Possible values include: \"Active\", \"Inactive\". :type status:",
"Possible values include: \"Queued\", \"InProgress\", \"Completed\", \"Failed\", \"Timeout\", \"NewDataNotAvailable\", \"DataNotAvailable\".",
":type type: str or ~azure.mgmt.costmanagement.models.ReportConfigColumnType :param name: Required. The name",
"'type': 'str'}, 'format': {'key': 'properties.format', 'type': 'str'}, 'delivery_info': {'key': 'properties.deliveryInfo',",
"have up to 2 group by clauses. :type grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping]",
"defines the type of alert. :type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition :param description:",
"'type': 'str'}, } def __init__( self, *, e_tag: Optional[str] =",
"{'key': 'configuration', 'type': 'ReportConfigDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{ReportConfigAggregation}'}, 'grouping':",
"for a tag. :type tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression \"\"\" _validation = {",
"self.type = type self.timeframe = timeframe self.time_period = time_period self.data_set",
"'type': '[ReportConfigFilterAutoGenerated]'}, 'or_property': {'key': 'or', 'type': '[ReportConfigFilterAutoGenerated]'}, 'not_property': {'key': 'not',",
"'name', 'type': 'str'}, } def __init__( self, *, type: Union[str,",
"'from_property': {'key': 'from', 'type': 'iso-8601'}, 'to': {'key': 'to', 'type': 'iso-8601'},",
"of column names to be included in the export. If",
"report. Report can have up to 2 group by clauses.",
"str :param triggered_by: notificationId that triggered this alert. :type triggered_by:",
"'type': 'ExportTimePeriod'}, 'data_set': {'key': 'dataSet', 'type': 'ExportDataset'}, } def __init__(",
"__init__( self, *, error: Optional[\"ErrorDetails\"] = None, **kwargs ): super(ErrorResponse,",
":type close_time: str :param modification_time: dateTime in which alert was",
"def __init__( self, **kwargs ): super(ExportListResult, self).__init__(**kwargs) self.value = None",
":param type: type of alert. Possible values include: \"Budget\", \"Invoice\",",
"dict[str, str] \"\"\" _validation = { 'id': {'readonly': True}, 'name':",
"export settings that were in effect for this execution. :type",
"handle concurrent update scenario, this field will be used to",
"Optional[str] = None, enabled: Optional[bool] = None, **kwargs ): super(KpiProperties,",
":type type: str or ~azure.mgmt.costmanagement.models.ForecastType :param timeframe: Required. The time",
"can have up to 2 aggregation clauses. :type aggregation: dict[str,",
":param filter: Has filter expression to use in the forecast.",
"\"\"\" _validation = { 'and_property': {'min_items': 2}, 'or_property': {'min_items': 2},",
"Required. The name of the container where exports will be",
"the query. Any valid query column name is allowed. If",
"TooManyRequests - Request is throttled. Retry after waiting for the",
":param not_property: The logical \"NOT\" expression. :type not_property: ~azure.mgmt.costmanagement.models.QueryFilter :param",
"'type': 'str'}, 'creation_time': {'key': 'properties.creationTime', 'type': 'str'}, 'close_time': {'key': 'properties.closeTime',",
"~azure.mgmt.costmanagement.models.FunctionType \"\"\" _validation = { 'name': {'required': True}, 'function': {'required':",
"sorting self.filter = filter class ReportConfigDatasetConfiguration(msrest.serialization.Model): \"\"\"The configuration of dataset",
":ivar operation: Operation type: Read, write, delete, etc. :vartype operation:",
"the export. :type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule \"\"\" _validation = { 'id':",
"The operator to use for comparison. Possible values include: \"In\",",
"type: Union[str, \"ReportConfigColumnType\"], name: str, **kwargs ): super(ReportConfigGrouping, self).__init__(**kwargs) self.type",
"super(ExportDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class ExportDefinition(msrest.serialization.Model): \"\"\"The definition of",
"for the export. :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Required. Has",
"is not able to process the incoming request. The reason",
"differentiated based on dates. Possible values include: \"Usage\". :type type:",
"class ProxyResource(msrest.serialization.Model): \"\"\"The Resource model definition. Variables are only populated",
"config. :type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDataset \"\"\" _validation = { 'id': {'readonly':",
"time_period: Optional[\"QueryTimePeriod\"] = None, dataset: Optional[\"QueryDataset\"] = None, **kwargs ):",
"executions. :vartype value: list[~azure.mgmt.costmanagement.models.ExportExecution] \"\"\" _validation = { 'value': {'readonly':",
"'id': {'key': 'id', 'type': 'str'}, 'enabled': {'key': 'enabled', 'type': 'bool'},",
"super(ExportDeliveryDestination, self).__init__(**kwargs) self.resource_id = resource_id self.container = container self.root_folder_path =",
"'type': 'iso-8601'}, } def __init__( self, *, delivery_info: \"ExportDeliveryInfo\", definition:",
"} def __init__( self, *, granularity: Optional[Union[str, \"ReportGranularityType\"]] = None,",
"It contains a list of available dimensions. Variables are only",
"*, type: Optional[Union[str, \"PivotType\"]] = None, name: Optional[str] = None,",
"for the aggregated column. Query can have up to 2",
"'type': '[object]'}, 'tag_filter': {'key': 'tagFilter', 'type': 'object'}, 'threshold': {'key': 'threshold',",
"= tag class ReportConfigGrouping(msrest.serialization.Model): \"\"\"The group by expression to be",
"expression for a tag. :type tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression \"\"\" _validation =",
"\"BillingMonthToDate\", \"TheLastMonth\", \"TheLastBillingMonth\", \"WeekToDate\", \"Custom\". :type timeframe: str or ~azure.mgmt.costmanagement.models.ForecastTimeframeType",
"'timeframe', 'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type': 'ReportConfigTimePeriod'}, 'dataset': {'key':",
"'[[object]]'}, } def __init__( self, *, next_link: Optional[str] = None,",
"- Service is temporarily unavailable. Retry after waiting for the",
"include: \"Daily\". :type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType :param configuration: Has",
"'type': {'readonly': True}, 'tags': {'readonly': True}, 'description': {'readonly': True}, 'filter_enabled':",
":param run_history: If requested, has the most recent execution history",
"'{str}'}, 'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'}, 'description': {'key': 'properties.description', 'type':",
"'granularity': {'key': 'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'ReportConfigDatasetConfiguration'},",
":param configuration: Has configuration information for the data in the",
"column. Query can have up to 2 aggregation clauses. :type",
"{'key': 'properties.rows', 'type': '[[object]]'}, } def __init__( self, *, next_link:",
"= None, **kwargs ): super(QueryDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class",
"'type': 'QueryDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'}, 'grouping': {'key': 'grouping',",
"include: \"Ascending\", \"Descending\". :type direction: str or ~azure.mgmt.costmanagement.models.ReportConfigSortingDirection :param name:",
"{ 'name': {'key': 'name', 'type': 'str'}, 'function': {'key': 'function', 'type':",
"= None, tag: Optional[\"ReportConfigComparisonExpression\"] = None, **kwargs ): super(ReportConfigFilter, self).__init__(**kwargs)",
"from_property: datetime.datetime, to: Optional[datetime.datetime] = None, **kwargs ): super(ExportRecurrencePeriod, self).__init__(**kwargs)",
"'System'. :type submitted_by: str :param submitted_time: The time when export",
"Data type to show in view. Possible values include: \"Dimension\",",
"supported. Possible values include: \"Csv\". :type format: str or ~azure.mgmt.costmanagement.models.FormatType",
"{'key': 'properties.groupingEnabled', 'type': 'bool'}, 'data': {'key': 'properties.data', 'type': '[str]'}, 'total':",
"} def __init__( self, **kwargs ): super(ExportListResult, self).__init__(**kwargs) self.value =",
"expression to be used in the export. :param and_property: The",
"KPI type (Forecast, Budget). Possible values include: \"Forecast\", \"Budget\". :type",
"Type of timegrain cadence. Possible values include: \"None\", \"Monthly\", \"Quarterly\",",
"_validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type':",
"def __init__( self, *, recurrence: Union[str, \"RecurrenceType\"], status: Optional[Union[str, \"StatusType\"]]",
"coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights",
"= { 'destination': {'required': True}, } _attribute_map = { 'destination':",
"{'key': 'dataset', 'type': 'ReportConfigDatasetAutoGenerated'}, } def __init__( self, *, type:",
":param contact_roles: list of contact roles. :type contact_roles: list[str] :param",
"columns to be selected for the export. If not provided",
"\"\"\"Result of listing exports. It contains a list of available",
"include_fresh_partial_cost: a boolean determining if FreshPartialCost will be included. :type",
"group by expression to use in the query. Query can",
"pulling data for the export. :type time_period: ~azure.mgmt.costmanagement.models.ExportTimePeriod :param data_set:",
"columns. :param columns: Array of column names to be included",
"total: Total number of data for the dimension. :vartype total:",
"type: str :ivar tags: A set of tags. Resource tags.",
"of the column to group. This version supports subscription lowest",
":param format: The format of the export being delivered. Currently",
"to Azure. :param type: Required. The type of the forecast.",
":type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated \"\"\" _validation = { 'grouping': {'max_items': 2,",
"= filter class ReportConfigDatasetConfiguration(msrest.serialization.Model): \"\"\"The configuration of dataset in the",
"'type': 'ExportRecurrencePeriod'}, } def __init__( self, *, recurrence: Union[str, \"RecurrenceType\"],",
"Optional[str] = None, **kwargs ): super(Alert, self).__init__(**kwargs) self.definition = definition",
"True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[Alert]'},",
"\"GreaterThan\", \"GreaterThanOrEqualTo\", \"LessThan\", \"LessThanOrEqualTo\". :type operator: str or ~azure.mgmt.costmanagement.models.AlertOperator :param",
"\"ReportGranularityType\"]] = None, configuration: Optional[\"ReportConfigDatasetConfiguration\"] = None, aggregation: Optional[Dict[str, \"ReportConfigAggregation\"]]",
"under the MIT License. See License.txt in the project root",
"to be used in the report. All required parameters must",
":param root_folder_path: The name of the directory where exports will",
"The granularity of rows in the query. Possible values include:",
"pulling data for the export. If custom, then a specific",
"list[object] :param meter_filter: array of meters to filter by. :type",
"_attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'message': {'key':",
"export. :type data_set: ~azure.mgmt.costmanagement.models.ExportDataset \"\"\" _validation = { 'type': {'required':",
"of meters to filter by. :type meter_filter: list[object] :param tag_filter:",
"True}, 'description': {'readonly': True}, 'filter_enabled': {'readonly': True}, 'grouping_enabled': {'readonly': True},",
"'OperationDisplay'}, } def __init__( self, *, display: Optional[\"OperationDisplay\"] = None,",
"and_property: list[~azure.mgmt.costmanagement.models.QueryFilter] :param or_property: The logical \"OR\" expression. Must have",
"): super(ReportConfigFilterAutoGenerated, self).__init__(**kwargs) self.and_property = and_property self.or_property = or_property self.not_property",
"= { 'name': {'key': 'name', 'type': 'str'}, 'function': {'key': 'function',",
"\"\"\"The definition of data present in the query. :param granularity:",
"values include: \"Ascending\", \"Descending\". :type direction: str or ~azure.mgmt.costmanagement.models.ReportConfigSortingDirection :param",
"Azure. :param type: Required. Has type of the column to",
"populated in order to send to Azure. :param name: Required.",
"\"\"\" _attribute_map = { 'granularity': {'key': 'granularity', 'type': 'str'}, 'configuration':",
"type: Required. The type of the export. Note that 'Usage'",
"configuration class ExportDatasetConfiguration(msrest.serialization.Model): \"\"\"The export dataset configuration. Allows columns to",
":type kpis: list[~azure.mgmt.costmanagement.models.KpiProperties] :param pivots: Configuration of 3 sub-views in",
"\"\"\" _validation = { 'resource_id': {'required': True}, 'container': {'required': True},",
"possible grain. :type name: str \"\"\" _validation = { 'type':",
":vartype tags: dict[str, str] \"\"\" _validation = { 'id': {'readonly':",
"self.dimension = dimension self.tag = tag class ReportConfigFilterAutoGenerated(msrest.serialization.Model): \"\"\"The filter",
"None, columns: Optional[List[\"QueryColumn\"]] = None, rows: Optional[List[List[object]]] = None, **kwargs",
"boolean determining if FreshPartialCost will be included. :type include_fresh_partial_cost: bool",
"Required. :type display_name: str :param scope: Cost Management scope to",
"schedule. All required parameters must be populated in order to",
"can have up to 2 group by clauses. :type grouping:",
"= None, category: Optional[Union[str, \"AlertCategory\"]] = None, criteria: Optional[Union[str, \"AlertCriteria\"]]",
"\"Completed\", \"Failed\", \"Timeout\", \"NewDataNotAvailable\", \"DataNotAvailable\". :type status: str or ~azure.mgmt.costmanagement.models.ExecutionStatus",
"start and end date of the recurrence. The start date",
"is allowed. If not provided, then report includes all columns.",
"'[Export]'}, } def __init__( self, **kwargs ): super(ExportListResult, self).__init__(**kwargs) self.value",
"~azure.mgmt.costmanagement.models.StatusType :param recurrence: Required. The schedule recurrence. Possible values include:",
"this execution. :type run_settings: ~azure.mgmt.costmanagement.models.CommonExportProperties :param error: The details of",
"up to 2 group by clauses. :type grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping] :param",
"= format self.delivery_info = delivery_info self.definition = definition self.run_history =",
"self, **kwargs ): super(ViewListResult, self).__init__(**kwargs) self.value = None self.next_link =",
"which the alert status was last modified. :type status_modification_time: str",
"type: Required. The type of the query. Possible values include:",
"Dict, List, Optional, Union from azure.core.exceptions import HttpResponseError import msrest.serialization",
"{'key': 'properties.executionType', 'type': 'str'}, 'status': {'key': 'properties.status', 'type': 'str'}, 'submitted_by':",
"to use in the query. The key of each item",
"details. :param time_grain_type: Type of timegrain cadence. Possible values include:",
"'[QueryFilter]'}, 'or_property': {'key': 'or', 'type': '[QueryFilter]'}, 'not_property': {'key': 'not', 'type':",
"class QueryTimePeriod(msrest.serialization.Model): \"\"\"The start and end date for pulling data",
":ivar type: Resource type. :vartype type: str :ivar tags: A",
"class ExportExecution(Resource): \"\"\"An export execution. Variables are only populated by",
"'properties.description', 'type': 'str'}, 'source': {'key': 'properties.source', 'type': 'str'}, 'details': {'key':",
"Has schedule information for the export. :type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule \"\"\"",
"of the recurrence. The start date must be in future.",
"Optional[\"ReportConfigComparisonExpression\"] = None, **kwargs ): super(ReportConfigFilter, self).__init__(**kwargs) self.and_property = and_property",
"~azure.mgmt.costmanagement.models.QueryTimePeriod :param dataset: Has definition for data in this query.",
"self.status = status self.creation_time = creation_time self.close_time = close_time self.modification_time",
"grouping: Array of group by expression to use in the",
"register the subscription. For more information see https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services . All",
"'value': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value',",
"send to Azure. :param destination: Required. Has destination for the",
"\"StatusType\"]] = None, recurrence_period: Optional[\"ExportRecurrencePeriod\"] = None, **kwargs ): super(ExportSchedule,",
"None, **kwargs ): super(DismissAlertPayload, self).__init__(**kwargs) self.definition = definition self.description =",
"def __init__( self, *, type: Optional[Union[str, \"PivotType\"]] = None, name:",
"least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param or_property: The logical",
"'str'}, } def __init__( self, **kwargs ): super(OperationListResult, self).__init__(**kwargs) self.value",
"a decimal which activated this alert. :type threshold: float :param",
"of the query. Possible values include: \"Usage\", \"ActualCost\", \"AmortizedCost\". :type",
"'timeframe': {'key': 'timeframe', 'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type': 'QueryTimePeriod'},",
"format: The format of the export being delivered. Currently only",
"= None class PivotProperties(msrest.serialization.Model): \"\"\"Each pivot must contain a 'type'",
"None, type_properties_query_type: Optional[Union[str, \"ReportType\"]] = None, timeframe: Optional[Union[str, \"ReportTimeframeType\"]] =",
"~azure.mgmt.costmanagement.models.AlertCriteria \"\"\" _attribute_map = { 'type': {'key': 'type', 'type': 'str'},",
"'str'}, 'status': {'key': 'properties.status', 'type': 'str'}, 'submitted_by': {'key': 'properties.submittedBy', 'type':",
"\"\"\"Each KPI must contain a 'type' and 'enabled' key. :param",
"in the forecast. The key of each item in the",
":param filter: Has filter expression to use in the query.",
"= None, tag: Optional[\"QueryComparisonExpression\"] = None, **kwargs ): super(QueryFilter, self).__init__(**kwargs)",
"'type': 'str'}, 'timeframe': {'key': 'timeframe', 'type': 'str'}, 'time_period': {'key': 'timePeriod',",
":type status: str or ~azure.mgmt.costmanagement.models.ExecutionStatus :param submitted_by: The identifier for",
"self).__init__(**kwargs) self.from_property = from_property self.to = to class ForecastDataset(msrest.serialization.Model): \"\"\"The",
"root_folder_path: str \"\"\" _validation = { 'resource_id': {'required': True}, 'container':",
"'str'}, 'run_settings': {'key': 'properties.runSettings', 'type': 'CommonExportProperties'}, 'error': {'key': 'properties.error', 'type':",
"Optional[List[str]] = None, overriding_alert: Optional[str] = None, **kwargs ): super(AlertPropertiesDetails,",
"str :param status_modification_user_name: :type status_modification_user_name: str :param status_modification_time: dateTime in",
"'CommonExportProperties'}, 'error': {'key': 'properties.error', 'type': 'ErrorDetails'}, } def __init__( self,",
"on dates. Possible values include: \"Usage\". :type type_properties_query_type: str or",
":type root_folder_path: str \"\"\" _validation = { 'resource_id': {'required': True},",
"ProxyResource(msrest.serialization.Model): \"\"\"The Resource model definition. Variables are only populated by",
"= dataset class ReportConfigFilter(msrest.serialization.Model): \"\"\"The filter expression to be used",
"__init__( self, *, e_tag: Optional[str] = None, **kwargs ): super(ProxyResource,",
"aggregation. Variables are only populated by the server, and will",
"class ReportConfigDatasetConfiguration(msrest.serialization.Model): \"\"\"The configuration of dataset in the report. :param",
"aggregated column. Query can have up to 2 aggregation clauses.",
"display_name self.scope = scope self.created_on = None self.modified_on = None",
"Possible values include: \"None\", \"Monthly\", \"Quarterly\", \"Annually\", \"BillingMonth\", \"BillingQuarter\", \"BillingAnnual\".",
"'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__(",
"self, *, type: Union[str, \"ExportType\"], timeframe: Union[str, \"TimeframeType\"], time_period: Optional[\"QueryTimePeriod\"]",
"ignored when sending a request. :ivar value: List of alerts.",
"_attribute_map = { 'value': {'key': 'value', 'type': '[View]'}, 'next_link': {'key':",
"for charges or amortization for service reservations. Possible values include:",
"= operator self.values = values class ReportConfigDataset(msrest.serialization.Model): \"\"\"The definition of",
"resource related to metric (budget). :type id: str :param enabled:",
"'min_items': 0}, } _attribute_map = { 'granularity': {'key': 'granularity', 'type':",
"'iso-8601'}, 'schedule': {'key': 'properties.schedule', 'type': 'ExportSchedule'}, } def __init__( self,",
"Operation type: Read, write, delete, etc. :vartype operation: str \"\"\"",
"then the export will include all available columns. :param columns:",
"have at least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.QueryFilter] :param not_property:",
"'description': {'key': 'properties.description', 'type': 'str'}, 'source': {'key': 'properties.source', 'type': 'str'},",
":vartype description: str :ivar filter_enabled: Filter enabled. :vartype filter_enabled: bool",
"'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}' for Department scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}' for EnrollmentAccount scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}' for",
"def __init__( self, *, time_grain_type: Optional[Union[str, \"AlertTimeGrainType\"]] = None, period_start_date:",
"latest version or not. :type e_tag: str :param display_name: User",
"report includes all columns. :type columns: list[str] \"\"\" _attribute_map =",
"indicates that the service is not able to process the",
"'scope': {'key': 'properties.scope', 'type': 'str'}, 'created_on': {'key': 'properties.createdOn', 'type': 'iso-8601'},",
"uploaded. :type root_folder_path: str \"\"\" _validation = { 'resource_id': {'required':",
"Possible values include: \"Daily\", \"Weekly\", \"Monthly\", \"Annually\". :type recurrence: str",
"'cost_entity_id': {'key': 'properties.costEntityId', 'type': 'str'}, 'status': {'key': 'properties.status', 'type': 'str'},",
"in which the alert status was last modified. :type status_modification_time:",
"or ~azure.mgmt.costmanagement.models.ForecastType :param timeframe: Required. The time frame for pulling",
"Optional[float] = None, operator: Optional[Union[str, \"AlertOperator\"]] = None, amount: Optional[float]",
"= source self.details = details self.cost_entity_id = cost_entity_id self.status =",
"is the alias for the aggregated column. Query can have",
"time_period self.data_set = data_set class ExportDeliveryDestination(msrest.serialization.Model): \"\"\"The destination information for",
"name: str, **kwargs ): super(QueryGrouping, self).__init__(**kwargs) self.type = type self.name",
"{'key': 'type', 'type': 'str'}, } def __init__( self, *, name:",
"= None, run_settings: Optional[\"CommonExportProperties\"] = None, error: Optional[\"ErrorDetails\"] = None,",
"str or ~azure.mgmt.costmanagement.models.GranularityType :param configuration: Has configuration information for the",
"delivery information for the export. :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition:",
"schedule recurrence. Possible values include: \"Daily\", \"Weekly\", \"Monthly\", \"Annually\". :type",
"'ForecastDataset'}, 'include_actual_cost': {'key': 'includeActualCost', 'type': 'bool'}, 'include_fresh_partial_cost': {'key': 'includeFreshPartialCost', 'type':",
"If requested, has the most recent execution history for the",
"The key of each item in the dictionary is the",
"True}, 'container': {'required': True}, } _attribute_map = { 'resource_id': {'key':",
"for the data in the report. The configuration will be",
"the query. :param granularity: The granularity of rows in the",
"tags. :vartype tags: dict[str, str] :param next_link: The link (url)",
"contains all columns listed under groupings and aggregation. Variables are",
"ExportDataset(msrest.serialization.Model): \"\"\"The definition for data in the export. :param granularity:",
"'to': {'key': 'to', 'type': 'iso-8601'}, } def __init__( self, *,",
"'type': {'key': 'type', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'},",
"'str'}, 'status_modification_time': {'key': 'properties.statusModificationTime', 'type': 'str'}, } def __init__( self,",
"to Azure. :param status: The status of the export's schedule.",
"'str'}, 'time_period': {'key': 'timePeriod', 'type': 'ReportConfigTimePeriod'}, 'dataset': {'key': 'dataset', 'type':",
"status_modification_user_name: str :param status_modification_time: dateTime in which the alert status",
"'current_spend': {'key': 'currentSpend', 'type': 'float'}, 'contact_emails': {'key': 'contactEmails', 'type': '[str]'},",
"the user last modified this view. :vartype modified_on: ~datetime.datetime :param",
"'type': 'ReportConfigDatasetAutoGenerated'}, } def __init__( self, *, type: Union[str, \"ReportType\"],",
"None, **kwargs ): super(AlertPropertiesDetails, self).__init__(**kwargs) self.time_grain_type = time_grain_type self.period_start_date =",
"include: \"true\", \"false\". :type accumulated: str or ~azure.mgmt.costmanagement.models.AccumulatedType :param metric:",
"data present in the forecast. :param granularity: The granularity of",
"expression. Must have at least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.QueryFilter]",
"or not. :type e_tag: str :param format: The format of",
"Optional[Union[str, \"ReportType\"]] = None, timeframe: Optional[Union[str, \"ReportTimeframeType\"]] = None, time_period:",
"} def __init__( self, **kwargs ): super(ViewListResult, self).__init__(**kwargs) self.value =",
"'bool'}, 'grouping_enabled': {'key': 'properties.groupingEnabled', 'type': 'bool'}, 'data': {'key': 'properties.data', 'type':",
"end date to pull data to. :type to: ~datetime.datetime \"\"\"",
"'type': {'required': True}, 'name': {'required': True}, } _attribute_map = {",
"run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult :ivar next_run_time_estimate: If the export has an active",
"{ 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True},",
"self.time_grain_type = time_grain_type self.period_start_date = period_start_date self.triggered_by = triggered_by self.resource_group_filter",
"export dataset configuration. Allows columns to be selected for the",
"The start date to pull data from. :type from_property: ~datetime.datetime",
"str :param e_tag: eTag of the resource. To handle concurrent",
"available views. Variables are only populated by the server, and",
"of the error. :type error: ~azure.mgmt.costmanagement.models.ErrorDetails \"\"\" _attribute_map = {",
"error. Variables are only populated by the server, and will",
"__init__( self, *, execution_type: Optional[Union[str, \"ExecutionType\"]] = None, status: Optional[Union[str,",
":type values: list[str] \"\"\" _validation = { 'name': {'required': True},",
"self.period_start_date = period_start_date self.triggered_by = triggered_by self.resource_group_filter = resource_group_filter self.resource_filter",
"next_run_time_estimate: If the export has an active schedule, provides an",
"the report. If custom, then a specific time period must",
"Optional[Union[str, \"ReportTimeframeType\"]] = None, time_period: Optional[\"ReportConfigTimePeriod\"] = None, dataset: Optional[\"ReportConfigDataset\"]",
"resource id of the storage account where exports will be",
"filter: Has filter expression to use in the query. :type",
"{'key': 'timeframe', 'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type': 'ExportTimePeriod'}, 'data_set':",
"*, and_property: Optional[List[\"QueryFilter\"]] = None, or_property: Optional[List[\"QueryFilter\"]] = None, not_property:",
"run_history self.next_run_time_estimate = None class Dimension(Resource): \"\"\"Dimension. Variables are only",
"'type': 'str'}, 'filter_enabled': {'key': 'properties.filterEnabled', 'type': 'bool'}, 'grouping_enabled': {'key': 'properties.groupingEnabled',",
"= None, aggregation: Optional[Dict[str, \"ReportConfigAggregation\"]] = None, grouping: Optional[List[\"ReportConfigGrouping\"]] =",
"= container self.root_folder_path = root_folder_path class ExportDeliveryInfo(msrest.serialization.Model): \"\"\"The delivery information",
"filter by. :type tag_filter: object :param threshold: notification threshold percentage",
"aggregation expression to use in the query. The key of",
"'provider': {'key': 'provider', 'type': 'str'}, 'resource': {'key': 'resource', 'type': 'str'},",
"type of alert. Possible values include: \"Budget\", \"Invoice\", \"Credit\", \"Quota\",",
"amount: float :param unit: unit of currency being used. :type",
"an export in the Azure portal, it is done automatically,",
"'properties.error', 'type': 'ErrorDetails'}, } def __init__( self, *, execution_type: Optional[Union[str,",
"or ~azure.mgmt.costmanagement.models.AlertCriteria \"\"\" _attribute_map = { 'type': {'key': 'type', 'type':",
":param period_start_date: datetime of periodStartDate. :type period_start_date: str :param triggered_by:",
"contact_groups self.contact_roles = contact_roles self.overriding_alert = overriding_alert class AlertsResult(msrest.serialization.Model): \"\"\"Result",
"= from_property self.to = to class View(ProxyResource): \"\"\"States and configurations",
"'submitted_by': {'key': 'properties.submittedBy', 'type': 'str'}, 'submitted_time': {'key': 'properties.submittedTime', 'type': 'iso-8601'},",
"~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration :param aggregation: Dictionary of aggregation expression to use in",
"chart self.accumulated = accumulated self.metric = metric self.kpis = kpis",
":param operator: Required. The operator to use for comparison. Possible",
"Azure. :param name: Required. The name of the column to",
"= { 'status': {'key': 'status', 'type': 'str'}, 'recurrence': {'key': 'recurrence',",
"names to be included in the export. If not provided",
"operation: str \"\"\" _validation = { 'provider': {'readonly': True}, 'resource':",
"Analysis UI. :type kpis: list[~azure.mgmt.costmanagement.models.KpiProperties] :param pivots: Configuration of 3",
"triggered alert. Possible values include: \"CostThresholdExceeded\", \"UsageThresholdExceeded\", \"CreditThresholdApproaching\", \"CreditThresholdReached\", \"QuotaThresholdApproaching\",",
"{'key': 'message', 'type': 'str'}, } def __init__( self, **kwargs ):",
"\"\"\"The definition of an export. All required parameters must be",
"associated with the export. All required parameters must be populated",
"scope, 'providers/Microsoft.Management/managementGroups/{managementGroupId}' for Management Group scope, '/providers/Microsoft.CostManagement/externalBillingAccounts/{externalBillingAccountName}' for ExternalBillingAccount scope,",
"not_property: Optional[\"QueryFilter\"] = None, dimension: Optional[\"QueryComparisonExpression\"] = None, tag: Optional[\"QueryComparisonExpression\"]",
"start date to pull data from. :type from_property: ~datetime.datetime :param",
"'e_tag': {'key': 'eTag', 'type': 'str'}, 'format': {'key': 'properties.format', 'type': 'str'},",
"next_link self.columns = columns self.rows = rows class QueryTimePeriod(msrest.serialization.Model): \"\"\"The",
"in this forecast. :type dataset: ~azure.mgmt.costmanagement.models.ForecastDataset :param include_actual_cost: a boolean",
"{'required': True}, 'operator': {'required': True}, 'values': {'required': True, 'min_items': 1},",
"\"Usage\". :type type: str or ~azure.mgmt.costmanagement.models.ReportType :param timeframe: Required. The",
"\"\"\" _validation = { 'from_property': {'required': True}, 'to': {'required': True},",
"'str'}, } def __init__( self, *, name: str, function: Union[str,",
"export in the Azure portal, it is done automatically, however",
"\"Cost\", \"Usage\", \"Billing\", \"System\". :type category: str or ~azure.mgmt.costmanagement.models.AlertCategory :param",
"'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'e_tag': {'key':",
"rows: list[list[object]] \"\"\" _validation = { 'id': {'readonly': True}, 'name':",
":type type: str or ~azure.mgmt.costmanagement.models.QueryColumnType :param name: Required. The name",
"report. If custom, then a specific time period must be",
"the MIT License. See License.txt in the project root for",
"self, *, time_grain_type: Optional[Union[str, \"AlertTimeGrainType\"]] = None, period_start_date: Optional[str] =",
"export will include all available columns. :param columns: Array of",
"to contact. :type contact_emails: list[str] :param contact_groups: list of action",
"'type': 'ReportConfigComparisonExpression'}, } def __init__( self, *, and_property: Optional[List[\"ReportConfigFilter\"]] =",
"= None, **kwargs ): super(Alert, self).__init__(**kwargs) self.definition = definition self.description",
"None, **kwargs ): super(ReportConfigDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class ReportConfigDefinition(msrest.serialization.Model):",
"= definition self.description = description self.source = source self.details =",
"'and_property': {'key': 'and', 'type': '[ReportConfigFilter]'}, 'or_property': {'key': 'or', 'type': '[ReportConfigFilter]'},",
"{'key': 'runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'}, }",
"to 2 aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation] :param grouping:",
"'type': 'str'}, } def __init__( self, *, data: Optional[List[str]] =",
"end. :vartype usage_end: ~datetime.datetime :ivar next_link: The link (url) to",
"dataset in the query. :param columns: Array of column names",
"= None, timeframe: Optional[Union[str, \"ReportTimeframeType\"]] = None, time_period: Optional[\"ReportConfigTimePeriod\"] =",
"'properties.usageEnd', 'type': 'iso-8601'}, 'next_link': {'key': 'properties.nextLink', 'type': 'str'}, } def",
"export. :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Required. Has the definition",
"'recurrencePeriod', 'type': 'ExportRecurrencePeriod'}, } def __init__( self, *, recurrence: Union[str,",
"**kwargs ): super(ReportConfigTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to = to",
"'type': 'str'}, 'delivery_info': {'key': 'deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition': {'key': 'definition',",
"datetime.datetime, to: datetime.datetime, **kwargs ): super(ReportConfigTimePeriod, self).__init__(**kwargs) self.from_property = from_property",
"self.include_actual_cost = include_actual_cost self.include_fresh_partial_cost = include_fresh_partial_cost class KpiProperties(msrest.serialization.Model): \"\"\"Each KPI",
"): super(ExportProperties, self).__init__(format=format, delivery_info=delivery_info, definition=definition, run_history=run_history, **kwargs) self.schedule = schedule",
":type status_modification_time: str \"\"\" _validation = { 'id': {'readonly': True},",
"least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.QueryFilter] :param or_property: The logical",
"columns. :type columns: list[~azure.mgmt.costmanagement.models.QueryColumn] :param rows: Array of rows. :type",
":vartype provider: str :ivar resource: Resource on which the operation",
"include: \"WeekToDate\", \"MonthToDate\", \"YearToDate\", \"Custom\". :type timeframe: str or ~azure.mgmt.costmanagement.models.ReportTimeframeType",
"or ~azure.mgmt.costmanagement.models.ChartType :param accumulated: Show costs accumulated over time. Possible",
"dimension. :vartype total: int :ivar category: Dimension category. :vartype category:",
"\"Weekly\", \"Monthly\", \"Annually\". :type recurrence: str or ~azure.mgmt.costmanagement.models.RecurrenceType :param recurrence_period:",
"None, accumulated: Optional[Union[str, \"AccumulatedType\"]] = None, metric: Optional[Union[str, \"MetricType\"]] =",
"Direction of sort. Possible values include: \"Ascending\", \"Descending\". :type direction:",
"): super(PivotProperties, self).__init__(**kwargs) self.type = type self.name = name class",
"'properties.data', 'type': '[str]'}, 'total': {'key': 'properties.total', 'type': 'int'}, 'category': {'key':",
"timegrain cadence. Possible values include: \"None\", \"Monthly\", \"Quarterly\", \"Annually\", \"BillingMonth\",",
"resource_id: str, container: str, root_folder_path: Optional[str] = None, **kwargs ):",
"to Azure. :param from_property: Required. The start date for export",
"source: Optional[Union[str, \"AlertSource\"]] = None, details: Optional[\"AlertPropertiesDetails\"] = None, cost_entity_id:",
"contain a 'type' and 'enabled' key. :param type: KPI type",
"the export's schedule is paused. Possible values include: \"Active\", \"Inactive\".",
"= time_grain_type self.period_start_date = period_start_date self.triggered_by = triggered_by self.resource_group_filter =",
"'type': 'str'}, 'recurrence': {'key': 'recurrence', 'type': 'str'}, 'recurrence_period': {'key': 'recurrencePeriod',",
"delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Required. Has the definition for the",
"self.created_on = None self.modified_on = None self.chart = chart self.accumulated",
"export execution finished. :type processing_end_time: ~datetime.datetime :param file_name: The name",
"None, **kwargs ): super(ExportSchedule, self).__init__(**kwargs) self.status = status self.recurrence =",
"self.triggered_by = triggered_by self.resource_group_filter = resource_group_filter self.resource_filter = resource_filter self.meter_filter",
"\"Retry-After\" header. :param error: The details of the error. :type",
":type resource_group_filter: list[object] :param resource_filter: array of resources to filter",
"str :ivar type: Resource type. :vartype type: str :param e_tag:",
"'type': 'bool'}, 'include_fresh_partial_cost': {'key': 'includeFreshPartialCost', 'type': 'bool'}, } def __init__(",
"'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, } def __init__(",
"ReportConfigTimePeriod(msrest.serialization.Model): \"\"\"The start and end date for pulling data for",
"views. :vartype value: list[~azure.mgmt.costmanagement.models.View] :ivar next_link: The link (url) to",
"yet provide data for charges or amortization for service reservations.",
"\"LessThanOrEqualTo\". :type operator: str or ~azure.mgmt.costmanagement.models.AlertOperator :param amount: budget threshold",
"view. Possible values include: \"Dimension\", \"TagKey\". :type type: str or",
"status: str or ~azure.mgmt.costmanagement.models.AlertStatus :param creation_time: dateTime in which alert",
"\"ForecastCostThresholdExceeded\", \"ForecastUsageThresholdExceeded\", \"InvoiceDueDateApproaching\", \"InvoiceDueDateReached\", \"CrossCloudNewDataAvailable\", \"CrossCloudCollectionError\", \"GeneralThresholdError\". :type criteria: str",
"direction: Optional[Union[str, \"ReportConfigSortingDirection\"]] = None, **kwargs ): super(ReportConfigSorting, self).__init__(**kwargs) self.direction",
"= { 'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message',",
"description: Dimension description. :vartype description: str :ivar filter_enabled: Filter enabled.",
"next_link: str \"\"\" _validation = { 'value': {'readonly': True}, 'next_link':",
"If custom, then a specific time period must be provided.",
"self, **kwargs ): super(AlertsResult, self).__init__(**kwargs) self.value = None self.next_link =",
"= configuration self.aggregation = aggregation self.filter = filter class ForecastDefinition(msrest.serialization.Model):",
"__init__( self, *, e_tag: Optional[str] = None, format: Optional[Union[str, \"FormatType\"]]",
"*, definition: Optional[\"AlertPropertiesDefinition\"] = None, description: Optional[str] = None, source:",
"names to be included in the report. Any valid report",
"{ 'from_property': {'required': True}, } _attribute_map = { 'from_property': {'key':",
"update scenario, this field will be used to determine whether",
"The start date of recurrence. :type from_property: ~datetime.datetime :param to:",
"whether the user is updating the latest version or not.",
"'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, 'display_name': {'key': 'properties.displayName', 'type':",
"submitted_time: The time when export was queued to be executed.",
"start date of recurrence. :type from_property: ~datetime.datetime :param to: The",
"True}, 'to': {'required': True}, } _attribute_map = { 'from_property': {'key':",
"Optional[List[\"ReportConfigFilterAutoGenerated\"]] = None, not_property: Optional[\"ReportConfigFilterAutoGenerated\"] = None, dimension: Optional[\"ReportConfigComparisonExpression\"] =",
"The last known status of the export execution. Possible values",
"'threshold': {'key': 'threshold', 'type': 'float'}, 'operator': {'key': 'operator', 'type': 'str'},",
"'type', 'type': 'str'}, 'category': {'key': 'category', 'type': 'str'}, 'criteria': {'key':",
"not. :type e_tag: str :param display_name: User input name of",
"Optional[Union[str, \"ExecutionStatus\"]] = None, submitted_by: Optional[str] = None, submitted_time: Optional[datetime.datetime]",
"allowed. If not provided, then report includes all columns. :type",
"True}, 'type': {'readonly': True}, 'created_on': {'readonly': True}, 'modified_on': {'readonly': True},",
":ivar type: Resource type. :vartype type: str :param e_tag: eTag",
"granularity of rows in the query. Possible values include: \"Daily\".",
"class DismissAlertPayload(msrest.serialization.Model): \"\"\"The request payload to update an alert. :param",
"'[QueryGrouping]'}, 'filter': {'key': 'filter', 'type': 'QueryFilter'}, } def __init__( self,",
"'name': {'required': True}, } _attribute_map = { 'type': {'key': 'type',",
"{'key': 'tag', 'type': 'QueryComparisonExpression'}, } def __init__( self, *, and_property:",
"usage and forecasted data can be differentiated based on dates.",
"\"AmortizedCost\". :type type: str or ~azure.mgmt.costmanagement.models.ExportType :param timeframe: Required. The",
"def __init__( self, *, granularity: Optional[Union[str, \"ReportGranularityType\"]] = None, configuration:",
"{ 'resource_id': {'key': 'resourceId', 'type': 'str'}, 'container': {'key': 'container', 'type':",
"self.operator = operator self.values = values class ReportConfigDataset(msrest.serialization.Model): \"\"\"The definition",
"__init__( self, *, type: Union[str, \"ForecastType\"], timeframe: Union[str, \"ForecastTimeframeType\"], time_period:",
"'str'}, } def __init__( self, *, type: Union[str, \"ReportConfigColumnType\"], name:",
"that represents the operation. :type display: ~azure.mgmt.costmanagement.models.OperationDisplay \"\"\" _validation =",
"Source of alert. Possible values include: \"Preset\", \"User\". :type source:",
":type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilter :param dimension: Has comparison expression for a",
"header. * 503 ServiceUnavailable - Service is temporarily unavailable. Retry",
"for the delivery of the export. To allow access to",
"'recurrence_period': {'key': 'recurrencePeriod', 'type': 'ExportRecurrencePeriod'}, } def __init__( self, *,",
"~azure.mgmt.costmanagement.models.QueryFilter \"\"\" _attribute_map = { 'granularity': {'key': 'granularity', 'type': 'str'},",
"link (url) to the next page of results. :type next_link:",
"spend. :type current_spend: float :param contact_emails: list of emails to",
"the export. The configuration will be ignored if aggregation and",
":type time_grain_type: str or ~azure.mgmt.costmanagement.models.AlertTimeGrainType :param period_start_date: datetime of periodStartDate.",
"_attribute_map = { 'value': {'key': 'value', 'type': '[Export]'}, } def",
"self).__init__(e_tag=e_tag, **kwargs) self.format = format self.delivery_info = delivery_info self.definition =",
"frame for pulling data for the query. If custom, then",
"include: \"Preset\", \"User\". :type source: str or ~azure.mgmt.costmanagement.models.AlertSource :param details:",
"OperationDisplay(msrest.serialization.Model): \"\"\"The object that represents the operation. Variables are only",
"= dimension self.tag = tag class QueryGrouping(msrest.serialization.Model): \"\"\"The group by",
"\"\"\"The definition of a query. All required parameters must be",
"Possible values include: \"Cost\", \"Usage\", \"Billing\", \"System\". :type category: str",
"amortization for service reservations. Possible values include: \"Usage\", \"ActualCost\", \"AmortizedCost\".",
"str or ~azure.mgmt.costmanagement.models.OperatorType :param values: Required. Array of values to",
"self.name = None self.display = display class OperationDisplay(msrest.serialization.Model): \"\"\"The object",
"which alert was last modified. :type modification_time: str :param status_modification_user_name:",
"in the forecast. Possible values include: \"Daily\". :type granularity: str",
":param source: Source of alert. Possible values include: \"Preset\", \"User\".",
"only 'Csv' is supported. Possible values include: \"Csv\". :type format:",
"'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'next_run_time_estimate':",
"column. Report can have up to 2 aggregation clauses. :type",
"Optional[float] = None, unit: Optional[str] = None, current_spend: Optional[float] =",
"~datetime.datetime :ivar usage_end: Usage end. :vartype usage_end: ~datetime.datetime :ivar next_link:",
"class ErrorDetails(msrest.serialization.Model): \"\"\"The details of the error. Variables are only",
"represents the operation. :type display: ~azure.mgmt.costmanagement.models.OperationDisplay \"\"\" _validation = {",
"forecast. :type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod :param dataset: Has definition for data",
"'QueryTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'QueryDataset'}, } def __init__( self,",
":param unit: unit of currency being used. :type unit: str",
"{'key': 'properties.status', 'type': 'str'}, 'creation_time': {'key': 'properties.creationTime', 'type': 'str'}, 'close_time':",
"of tags. Resource tags. :vartype tags: dict[str, str] :param next_link:",
"self).__init__(**kwargs) self.description = None self.filter_enabled = None self.grouping_enabled = None",
"The resource id of the storage account where exports will",
"{'readonly': True}, 'tags': {'readonly': True}, 'description': {'readonly': True}, 'filter_enabled': {'readonly':",
"operator self.amount = amount self.unit = unit self.current_spend = current_spend",
"): super(ForecastDefinition, self).__init__(**kwargs) self.type = type self.timeframe = timeframe self.time_period",
"): super(ReportConfigAggregation, self).__init__(**kwargs) self.name = name self.function = function class",
"def __init__( self, *, from_property: datetime.datetime, to: Optional[datetime.datetime] = None,",
"the directory where exports will be uploaded. :type root_folder_path: str",
"items. :type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param not_property: The logical \"NOT\" expression.",
"Union[str, \"TimeframeType\"], time_period: Optional[\"QueryTimePeriod\"] = None, dataset: Optional[\"QueryDataset\"] = None,",
"'tag', 'type': 'ReportConfigComparisonExpression'}, } def __init__( self, *, and_property: Optional[List[\"ReportConfigFilterAutoGenerated\"]]",
"Optional[List[\"KpiProperties\"]] = None, pivots: Optional[List[\"PivotProperties\"]] = None, type_properties_query_type: Optional[Union[str, \"ReportType\"]]",
"format: str or ~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Required. Has delivery information",
"str or ~azure.mgmt.costmanagement.models.MetricType :param kpis: List of KPIs to show",
"self.sorting = sorting self.filter = filter class ReportConfigDatasetAutoGenerated(msrest.serialization.Model): \"\"\"The definition",
"order to send to Azure. :param resource_id: Required. The resource",
"The time when export was picked up to be executed.",
"None, status_modification_time: Optional[str] = None, **kwargs ): super(DismissAlertPayload, self).__init__(**kwargs) self.definition",
"The export settings that were in effect for this execution.",
":param sorting: Array of order by expression to use in",
"column to sort. :type name: str \"\"\" _validation = {",
"related to metric (budget). :type id: str :param enabled: show",
"scope provided. Variables are only populated by the server, and",
"'eTag', 'type': 'str'}, 'display_name': {'key': 'properties.displayName', 'type': 'str'}, 'scope': {'key':",
"= { 'value': {'key': 'value', 'type': '[Operation]'}, 'next_link': {'key': 'nextLink',",
"self.next_run_time_estimate = None class Dimension(Resource): \"\"\"Dimension. Variables are only populated",
":ivar message: Error message indicating why the operation failed. :vartype",
"\"BillingQuarter\", \"BillingAnnual\". :type time_grain_type: str or ~azure.mgmt.costmanagement.models.AlertTimeGrainType :param period_start_date: datetime",
"for the forecast. :type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod :param dataset: Has definition",
"type of the export. Note that 'Usage' is equivalent to",
"'ExportDeliveryDestination'}, } def __init__( self, *, destination: \"ExportDeliveryDestination\", **kwargs ):",
"{'key': 'tags', 'type': '{str}'}, 'next_link': {'key': 'properties.nextLink', 'type': 'str'}, 'columns':",
"None, run_history: Optional[\"ExportExecutionListResult\"] = None, **kwargs ): super(CommonExportProperties, self).__init__(**kwargs) self.format",
"Must have at least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param",
"if aggregation and grouping are provided. :type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration :param",
"\"\"\" _validation = { 'value': {'readonly': True}, 'next_link': {'readonly': True},",
"ignored if aggregation and grouping are provided. :type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration",
"'properties.query.type', 'type': 'str'}, 'timeframe': {'key': 'properties.query.timeframe', 'type': 'str'}, 'time_period': {'key':",
"query. :param granularity: The granularity of rows in the query.",
"2 items. :type or_property: list[~azure.mgmt.costmanagement.models.QueryFilter] :param not_property: The logical \"NOT\"",
"file_name: The name of the exported file. :type file_name: str",
"available dimensions. Variables are only populated by the server, and",
"): super(CommonExportProperties, self).__init__(**kwargs) self.format = format self.delivery_info = delivery_info self.definition",
"execution history of an export. Variables are only populated by",
"expression. :type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated :param dimension: Has comparison expression for",
"= sorting self.filter = filter class ReportConfigDatasetConfiguration(msrest.serialization.Model): \"\"\"The configuration of",
"'type': 'ExportDefinition'}, 'run_history': {'key': 'properties.runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'properties.nextRunTimeEstimate',",
"Optional[Union[str, \"AlertTimeGrainType\"]] = None, period_start_date: Optional[str] = None, triggered_by: Optional[str]",
"'metric': {'key': 'properties.metric', 'type': 'str'}, 'kpis': {'key': 'properties.kpis', 'type': '[KpiProperties]'},",
"{'key': 'properties.pivots', 'type': '[PivotProperties]'}, 'type_properties_query_type': {'key': 'properties.query.type', 'type': 'str'}, 'timeframe':",
"int :ivar category: Dimension category. :vartype category: str :ivar usage_start:",
"in order to send to Azure. :param status: The status",
"= { 'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'}, 'description': {'key': 'properties.description',",
"'[ReportConfigSorting]'}, 'filter': {'key': 'filter', 'type': 'ReportConfigFilter'}, } def __init__( self,",
"'values': {'key': 'values', 'type': '[str]'}, } def __init__( self, *,",
"aggregation expression to use in the forecast. The key of",
"{'key': 'destination', 'type': 'ExportDeliveryDestination'}, } def __init__( self, *, destination:",
"error class ExportExecutionListResult(msrest.serialization.Model): \"\"\"Result of listing the execution history of",
"'execution_type': {'key': 'properties.executionType', 'type': 'str'}, 'status': {'key': 'properties.status', 'type': 'str'},",
"self.to = to class ReportConfigAggregation(msrest.serialization.Model): \"\"\"The aggregation expression to be",
"of rows in the report. Possible values include: \"Daily\", \"Monthly\".",
"list[~azure.mgmt.costmanagement.models.ReportConfigSorting] :param filter: Has filter expression to use in the",
"None, **kwargs ): super(View, self).__init__(e_tag=e_tag, **kwargs) self.display_name = display_name self.scope",
"None, **kwargs ): super(ExportDeliveryDestination, self).__init__(**kwargs) self.resource_id = resource_id self.container =",
"= close_time self.modification_time = modification_time self.status_modification_user_name = status_modification_user_name self.status_modification_time =",
":type not_property: ~azure.mgmt.costmanagement.models.QueryFilter :param dimension: Has comparison expression for a",
"to use in the report. :type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated \"\"\" _validation",
"or ~azure.mgmt.costmanagement.models.TimeframeType :param time_period: Has time period for pulling data",
"str or ~azure.mgmt.costmanagement.models.KpiType :param id: ID of resource related to",
"the query. Possible values include: \"Daily\". :type granularity: str or",
"self.filter_enabled = None self.grouping_enabled = None self.data = data self.total",
"of the storage account where exports will be delivered. :type",
"'aggregation', 'type': '{QueryAggregation}'}, 'grouping': {'key': 'grouping', 'type': '[QueryGrouping]'}, 'filter': {'key':",
"operator: Union[str, \"OperatorType\"], values: List[str], **kwargs ): super(ReportConfigComparisonExpression, self).__init__(**kwargs) self.name",
"data. :type to: ~datetime.datetime \"\"\" _validation = { 'from_property': {'required':",
"def __init__( self, *, type: Union[str, \"ForecastType\"], timeframe: Union[str, \"ForecastTimeframeType\"],",
"'type': 'QueryFilter'}, 'dimension': {'key': 'dimension', 'type': 'QueryComparisonExpression'}, 'tag': {'key': 'tag',",
"'name': {'key': 'name', 'type': 'str'}, 'display': {'key': 'display', 'type': 'OperationDisplay'},",
"class OperationListResult(msrest.serialization.Model): \"\"\"Result of listing cost management operations. It contains",
"'type': 'str'}, 'display': {'key': 'display', 'type': 'OperationDisplay'}, } def __init__(",
"'type': '{QueryAggregation}'}, 'grouping': {'key': 'grouping', 'type': '[QueryGrouping]'}, 'filter': {'key': 'filter',",
"'enabled', 'type': 'bool'}, } def __init__( self, *, type: Optional[Union[str,",
"It contains a list of available exports in the scope",
"meter_filter: list[object] :param tag_filter: tags to filter by. :type tag_filter:",
":type type: str \"\"\" _attribute_map = { 'name': {'key': 'name',",
"of the main view in Cost Analysis. Required. Possible values",
"'type': 'iso-8601'}, 'file_name': {'key': 'properties.fileName', 'type': 'str'}, 'run_settings': {'key': 'properties.runSettings',",
"and will be lost if the code is regenerated. #",
"self.dataset = dataset class ViewListResult(msrest.serialization.Model): \"\"\"Result of listing views. It",
"\"AmortizedCost\", \"AHUB\". :type metric: str or ~azure.mgmt.costmanagement.models.MetricType :param kpis: List",
"common properties of the export. Variables are only populated by",
"super(ExportExecutionListResult, self).__init__(**kwargs) self.value = None class ExportListResult(msrest.serialization.Model): \"\"\"Result of listing",
"{ 'status': {'key': 'status', 'type': 'str'}, 'recurrence': {'key': 'recurrence', 'type':",
"and end date for recurrence schedule. All required parameters must",
"'type': '[ReportConfigFilter]'}, 'or_property': {'key': 'or', 'type': '[ReportConfigFilter]'}, 'not_property': {'key': 'not',",
"will be ignored when sending a request. :ivar code: Error",
"'subscriptions/{subscriptionId}' for subscription scope, 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for",
"'message': {'key': 'message', 'type': 'str'}, } def __init__( self, **kwargs",
"scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}' for BillingProfile scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}' for InvoiceSection scope, 'providers/Microsoft.Management/managementGroups/{managementGroupId}'",
"least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param not_property: The logical",
"name of the exported file. :type file_name: str :param run_settings:",
"The name of column. :type name: str :param type: The",
"self.status = status self.submitted_by = submitted_by self.submitted_time = submitted_time self.processing_start_time",
":type name: str \"\"\" _validation = { 'type': {'required': True},",
"for data in this forecast. :type dataset: ~azure.mgmt.costmanagement.models.ForecastDataset :param include_actual_cost:",
"'granularity': {'key': 'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'ExportDatasetConfiguration'},",
"True}, 'usage_end': {'readonly': True}, 'next_link': {'readonly': True}, } _attribute_map =",
"None self.next_link = None class CommonExportProperties(msrest.serialization.Model): \"\"\"The common properties of",
"= None, **kwargs ): super(ExportExecution, self).__init__(**kwargs) self.execution_type = execution_type self.status",
"'type': 'ReportConfigFilter'}, } def __init__( self, *, granularity: Optional[Union[str, \"ReportGranularityType\"]]",
"self.description = description self.source = source self.details = details self.cost_entity_id",
"when sending a request. :ivar value: The list of exports.",
"{ 'value': {'key': 'value', 'type': '[ExportExecution]'}, } def __init__( self,",
"must be provided. Possible values include: \"MonthToDate\", \"BillingMonthToDate\", \"TheLastMonth\", \"TheLastBillingMonth\",",
"'or_property': {'key': 'or', 'type': '[ReportConfigFilter]'}, 'not_property': {'key': 'not', 'type': 'ReportConfigFilter'},",
":type format: str or ~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Required. Has delivery",
"and end date for pulling data for the query. All",
"is supported. Possible values include: \"Csv\". :type format: str or",
"in the report. The key of each item in the",
"self.values = values class QueryDataset(msrest.serialization.Model): \"\"\"The definition of data present",
":param rows: Array of rows. :type rows: list[list[object]] \"\"\" _validation",
"or ~azure.mgmt.costmanagement.models.ForecastTimeframeType :param time_period: Has time period for pulling data",
"\"\"\" _attribute_map = { 'time_grain_type': {'key': 'timeGrainType', 'type': 'str'}, 'period_start_date':",
"class View(ProxyResource): \"\"\"States and configurations of Cost Analysis. Variables are",
"or ~azure.mgmt.costmanagement.models.ReportType :param timeframe: Required. The time frame for pulling",
"def __init__( self, *, display: Optional[\"OperationDisplay\"] = None, **kwargs ):",
"~azure.mgmt.costmanagement.models.MetricType :param kpis: List of KPIs to show in Cost",
":type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param or_property: The logical \"OR\" expression. Must",
"super(QueryDefinition, self).__init__(**kwargs) self.type = type self.timeframe = timeframe self.time_period =",
"export. Currently only 'Daily' is supported. Possible values include: \"Daily\".",
"source: str or ~azure.mgmt.costmanagement.models.AlertSource :param details: Alert details. :type details:",
"{'key': 'threshold', 'type': 'float'}, 'operator': {'key': 'operator', 'type': 'str'}, 'amount':",
"status_modification_time: Optional[str] = None, **kwargs ): super(DismissAlertPayload, self).__init__(**kwargs) self.definition =",
"have at least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param not_property:",
"to: datetime.datetime, **kwargs ): super(ReportConfigTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to",
"If the export has an active schedule, provides an estimate",
"self.name = name self.function = function class QueryColumn(msrest.serialization.Model): \"\"\"QueryColumn. :param",
"= None, tag_filter: Optional[object] = None, threshold: Optional[float] = None,",
":param timeframe: The time frame for pulling data for the",
"'created_on': {'readonly': True}, 'modified_on': {'readonly': True}, } _attribute_map = {",
"and will be ignored when sending a request. :ivar code:",
"**kwargs ): super(ExportDeliveryInfo, self).__init__(**kwargs) self.destination = destination class ExportExecution(Resource): \"\"\"An",
"column. :type name: str :param type: The type of column.",
"self, **kwargs ): super(OperationDisplay, self).__init__(**kwargs) self.provider = None self.resource =",
":param status: The status of the export's schedule. If 'Inactive',",
"array of resourceGroups to filter by. :type resource_group_filter: list[object] :param",
"= { 'resource_id': {'key': 'resourceId', 'type': 'str'}, 'container': {'key': 'container',",
"= { 'delivery_info': {'required': True}, 'definition': {'required': True}, 'next_run_time_estimate': {'readonly':",
"super(QueryAggregation, self).__init__(**kwargs) self.name = name self.function = function class QueryColumn(msrest.serialization.Model):",
":type configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration :param aggregation: Dictionary of aggregation expression to",
"'{ReportConfigAggregation}'}, 'grouping': {'key': 'grouping', 'type': '[ReportConfigGrouping]'}, 'sorting': {'key': 'sorting', 'type':",
"# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All",
"DismissAlertPayload(msrest.serialization.Model): \"\"\"The request payload to update an alert. :param definition:",
"'str'}, 'recurrence': {'key': 'recurrence', 'type': 'str'}, 'recurrence_period': {'key': 'recurrencePeriod', 'type':",
"str \"\"\" _validation = { 'code': {'readonly': True}, 'message': {'readonly':",
"The time frame for pulling data for the report. If",
":type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDatasetAutoGenerated \"\"\" _validation = { 'type': {'required': True},",
"status: Optional[Union[str, \"ExecutionStatus\"]] = None, submitted_by: Optional[str] = None, submitted_time:",
"'type': 'str'}, 'resource': {'key': 'resource', 'type': 'str'}, 'operation': {'key': 'operation',",
"associated with a export. All required parameters must be populated",
":param overriding_alert: overriding alert. :type overriding_alert: str \"\"\" _attribute_map =",
"filter expression to use in the query. :type filter: ~azure.mgmt.costmanagement.models.QueryFilter",
"**kwargs ): super(ErrorDetails, self).__init__(**kwargs) self.code = None self.message = None",
"'and', 'type': '[ReportConfigFilter]'}, 'or_property': {'key': 'or', 'type': '[ReportConfigFilter]'}, 'not_property': {'key':",
"self, *, name: str, operator: Union[str, \"OperatorType\"], values: List[str], **kwargs",
"set of tags. Resource tags. :vartype tags: dict[str, str] \"\"\"",
"report. :type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated \"\"\" _validation = { 'grouping': {'max_items':",
"a dimension. :type dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression :param tag: Has comparison expression",
"'resource': {'readonly': True}, 'operation': {'readonly': True}, } _attribute_map = {",
"data for the export. :type time_period: ~azure.mgmt.costmanagement.models.ExportTimePeriod :param data_set: The",
"contact_roles: list of contact roles. :type contact_roles: list[str] :param overriding_alert:",
"an alert. :param definition: defines the type of alert. :type",
"User input name of the view. Required. :type display_name: str",
"\"CrossCloudCollectionError\", \"GeneralThresholdError\". :type criteria: str or ~azure.mgmt.costmanagement.models.AlertCriteria \"\"\" _attribute_map =",
"'type': 'str'}, } def __init__( self, *, definition: Optional[\"AlertPropertiesDefinition\"] =",
"list[~azure.mgmt.costmanagement.models.Operation] :ivar next_link: URL to get the next set of",
"**kwargs ): super(Dimension, self).__init__(**kwargs) self.description = None self.filter_enabled = None",
"or ~azure.mgmt.costmanagement.models.AlertTimeGrainType :param period_start_date: datetime of periodStartDate. :type period_start_date: str",
"reservations. Possible values include: \"Usage\", \"ActualCost\", \"AmortizedCost\". :type type: str",
"self.status_modification_user_name = status_modification_user_name self.status_modification_time = status_modification_time class ErrorDetails(msrest.serialization.Model): \"\"\"The details",
"recurrence_period: Has start and end date of the recurrence. The",
"= None, configuration: Optional[\"QueryDatasetConfiguration\"] = None, aggregation: Optional[Dict[str, \"QueryAggregation\"]] =",
"chart: Optional[Union[str, \"ChartType\"]] = None, accumulated: Optional[Union[str, \"AccumulatedType\"]] = None,",
"execution time. :vartype next_run_time_estimate: ~datetime.datetime \"\"\" _validation = { 'delivery_info':",
"a request. :ivar value: List of cost management operations supported",
"allow access to a storage account, you must register the",
"{'key': 'properties.definition', 'type': 'ExportDefinition'}, 'run_history': {'key': 'properties.runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate':",
"None, **kwargs ): super(ReportConfigFilterAutoGenerated, self).__init__(**kwargs) self.and_property = and_property self.or_property =",
"= None, **kwargs ): super(AlertPropertiesDetails, self).__init__(**kwargs) self.time_grain_type = time_grain_type self.period_start_date",
"the incoming request. The reason is provided in the error",
"True}, 'total': {'readonly': True}, 'category': {'readonly': True}, 'usage_start': {'readonly': True},",
"= configuration self.aggregation = aggregation self.grouping = grouping self.sorting =",
"for the query. All required parameters must be populated in",
"'type': '[View]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__(",
"self, *, e_tag: Optional[str] = None, display_name: Optional[str] = None,",
"Array of column names to be included in the report.",
"'message', 'type': 'str'}, } def __init__( self, **kwargs ): super(ErrorDetails,",
"super(ExportRecurrencePeriod, self).__init__(**kwargs) self.from_property = from_property self.to = to class ExportSchedule(msrest.serialization.Model):",
"\"Csv\". :type format: str or ~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Has delivery",
"= threshold self.operator = operator self.amount = amount self.unit =",
"will be uploaded. :type container: str :param root_folder_path: The name",
"KPIs to show in Cost Analysis UI. :type kpis: list[~azure.mgmt.costmanagement.models.KpiProperties]",
"'or_property': {'key': 'or', 'type': '[ReportConfigFilterAutoGenerated]'}, 'not_property': {'key': 'not', 'type': 'ReportConfigFilterAutoGenerated'},",
"populated by the server, and will be ignored when sending",
"self.status_modification_user_name = status_modification_user_name self.status_modification_time = status_modification_time class AlertPropertiesDefinition(msrest.serialization.Model): \"\"\"defines the",
"{'key': 'dimension', 'type': 'QueryComparisonExpression'}, 'tag': {'key': 'tag', 'type': 'QueryComparisonExpression'}, }",
"frame for pulling data for the forecast. If custom, then",
"True}, 'type': {'readonly': True}, 'tags': {'readonly': True}, 'description': {'readonly': True},",
"None, **kwargs ): super(ExportRecurrencePeriod, self).__init__(**kwargs) self.from_property = from_property self.to =",
"Has time period for pulling data for the export. :type",
"resources to filter by. :type resource_filter: list[object] :param meter_filter: array",
"Azure portal, it is done automatically, however API users need",
"timeframe: Union[str, \"ForecastTimeframeType\"], time_period: Optional[\"QueryTimePeriod\"] = None, dataset: Optional[\"ForecastDataset\"] =",
"= None, **kwargs ): super(QueryResult, self).__init__(**kwargs) self.next_link = next_link self.columns",
"None self.modified_on = None self.chart = chart self.accumulated = accumulated",
"to send to Azure. :param destination: Required. Has destination for",
"= { 'and_property': {'key': 'and', 'type': '[ReportConfigFilterAutoGenerated]'}, 'or_property': {'key': 'or',",
"'properties.executionType', 'type': 'str'}, 'status': {'key': 'properties.status', 'type': 'str'}, 'submitted_by': {'key':",
"**kwargs ): super(View, self).__init__(e_tag=e_tag, **kwargs) self.display_name = display_name self.scope =",
"'str'}, 'category': {'key': 'category', 'type': 'str'}, 'criteria': {'key': 'criteria', 'type':",
"= None, filter: Optional[\"ReportConfigFilterAutoGenerated\"] = None, **kwargs ): super(ReportConfigDatasetAutoGenerated, self).__init__(**kwargs)",
"'not', 'type': 'ReportConfigFilter'}, 'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'}, 'tag': {'key':",
"self.aggregation = aggregation self.grouping = grouping self.sorting = sorting self.filter",
"next_link: Optional[str] = None, columns: Optional[List[\"QueryColumn\"]] = None, rows: Optional[List[List[object]]]",
"~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Has the definition for the export. :type",
"{'key': 'properties.accumulated', 'type': 'str'}, 'metric': {'key': 'properties.metric', 'type': 'str'}, 'kpis':",
"set of tags. Resource tags. :vartype tags: dict[str, str] :param",
":vartype operation: str \"\"\" _validation = { 'provider': {'readonly': True},",
"container self.root_folder_path = root_folder_path class ExportDeliveryInfo(msrest.serialization.Model): \"\"\"The delivery information associated",
":param type: Required. Has type of the column to group.",
"exports will be uploaded. :type container: str :param root_folder_path: The",
"present, the end date must be greater than start date.",
"'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'execution_type': {'key':",
"self.root_folder_path = root_folder_path class ExportDeliveryInfo(msrest.serialization.Model): \"\"\"The delivery information associated with",
"True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[View]'},",
"_validation = { 'destination': {'required': True}, } _attribute_map = {",
"self.recurrence_period = recurrence_period class ExportTimePeriod(msrest.serialization.Model): \"\"\"The date range for data",
"of tags. Resource tags. :vartype tags: dict[str, str] :param definition:",
":param submitted_time: The time when export was queued to be",
"range for data in the export. This should only be",
"values include: \"Queued\", \"InProgress\", \"Completed\", \"Failed\", \"Timeout\", \"NewDataNotAvailable\", \"DataNotAvailable\". :type",
"use for comparison. Possible values include: \"In\", \"Contains\". :type operator:",
"\"ExportType\"], timeframe: Union[str, \"TimeframeType\"], time_period: Optional[\"QueryTimePeriod\"] = None, dataset: Optional[\"QueryDataset\"]",
"'type': 'iso-8601'}, 'processing_end_time': {'key': 'properties.processingEndTime', 'type': 'iso-8601'}, 'file_name': {'key': 'properties.fileName',",
"be provided. Possible values include: \"WeekToDate\", \"MonthToDate\", \"YearToDate\", \"Custom\". :type",
"aggregation expression to be used in the report. All required",
"= operator self.values = values class QueryDataset(msrest.serialization.Model): \"\"\"The definition of",
"ReportConfigDefinition(msrest.serialization.Model): \"\"\"The definition of a report config. All required parameters",
"name: Required. The name of the column to sort. :type",
"name class QueryResult(Resource): \"\"\"Result of query. It contains all columns",
"'includeActualCost', 'type': 'bool'}, 'include_fresh_partial_cost': {'key': 'includeFreshPartialCost', 'type': 'bool'}, } def",
"from_property: ~datetime.datetime :param to: Required. The end date for export",
"when sending a request. All required parameters must be populated",
"Query. :vartype resource: str :ivar operation: Operation type: Read, write,",
"Dimension category. :vartype category: str :ivar usage_start: Usage start. :vartype",
"'type': '[ExportExecution]'}, } def __init__( self, **kwargs ): super(ExportExecutionListResult, self).__init__(**kwargs)",
"{ 'granularity': {'key': 'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type':",
"The name of the aggregation function to use. Possible values",
"'type': 'iso-8601'}, 'schedule': {'key': 'properties.schedule', 'type': 'ExportSchedule'}, } def __init__(",
"{'required': True}, 'timeframe': {'required': True}, } _attribute_map = { 'type':",
"{'key': 'filter', 'type': 'ReportConfigFilter'}, } def __init__( self, *, granularity:",
":type from_property: ~datetime.datetime :param to: Required. The end date to",
":vartype resource: str :ivar operation: Operation type: Read, write, delete,",
"True}, 'timeframe': {'required': True}, } _attribute_map = { 'type': {'key':",
"None, definition: Optional[\"ExportDefinition\"] = None, run_history: Optional[\"ExportExecutionListResult\"] = None, schedule:",
"super(ReportConfigDefinition, self).__init__(**kwargs) self.type = type self.timeframe = timeframe self.time_period =",
"{'key': 'properties.status', 'type': 'str'}, 'submitted_by': {'key': 'properties.submittedBy', 'type': 'str'}, 'submitted_time':",
"True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'},",
"of export executions. :vartype value: list[~azure.mgmt.costmanagement.models.ExportExecution] \"\"\" _validation = {",
"{'key': 'properties.description', 'type': 'str'}, 'source': {'key': 'properties.source', 'type': 'str'}, 'details':",
"most recent execution history for the export. :type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult",
"sending a request. :ivar value: The list of dimensions. :vartype",
"a boolean determining if actualCost will be included. :type include_actual_cost:",
"Optional[Union[str, \"AccumulatedType\"]] = None, metric: Optional[Union[str, \"MetricType\"]] = None, kpis:",
"*, type: Optional[Union[str, \"AlertType\"]] = None, category: Optional[Union[str, \"AlertCategory\"]] =",
"configuration. Allows columns to be selected for the export. If",
"self).__init__(**kwargs) self.name = None self.display = display class OperationDisplay(msrest.serialization.Model): \"\"\"The",
"self.contact_groups = contact_groups self.contact_roles = contact_roles self.overriding_alert = overriding_alert class",
"provides an estimate of the next execution time. :vartype next_run_time_estimate:",
"'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'next_link': {'key': 'properties.nextLink',",
":param definition: Required. Has the definition for the export. :type",
"item in the dictionary is the alias for the aggregated",
"when sending a request. :ivar name: Operation name: {provider}/{resource}/{operation}. :vartype",
"type: Required. The type of the report. Usage represents actual",
"'time_period': {'key': 'timePeriod', 'type': 'ExportTimePeriod'}, 'data_set': {'key': 'dataSet', 'type': 'ExportDataset'},",
"export. :type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult :ivar next_run_time_estimate: If the export has",
"tag: ~azure.mgmt.costmanagement.models.QueryComparisonExpression \"\"\" _validation = { 'and_property': {'min_items': 2}, 'or_property':",
"~azure.mgmt.costmanagement.models.ReportGranularityType :param configuration: Has configuration information for the data in",
"CommonExportProperties(msrest.serialization.Model): \"\"\"The common properties of the export. Variables are only",
"'value', 'type': '[Operation]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def",
"alert was closed. :type close_time: str :param modification_time: dateTime in",
"available columns. :param columns: Array of column names to be",
"The time frame for pulling data for the forecast. If",
"Optional[Dict[str, \"QueryAggregation\"]] = None, filter: Optional[\"QueryFilter\"] = None, **kwargs ):",
"self.processing_end_time = processing_end_time self.file_name = file_name self.run_settings = run_settings self.error",
"Variables are only populated by the server, and will be",
"be executed. :type submitted_time: ~datetime.datetime :param processing_start_time: The time when",
"resource_filter self.meter_filter = meter_filter self.tag_filter = tag_filter self.threshold = threshold",
"~azure.mgmt.costmanagement.models.AccumulatedType :param metric: Metric to use when displaying costs. Possible",
"use. Possible values include: \"Sum\". :type function: str or ~azure.mgmt.costmanagement.models.FunctionType",
"type: Union[str, \"ForecastType\"], timeframe: Union[str, \"ForecastTimeframeType\"], time_period: Optional[\"QueryTimePeriod\"] = None,",
"'str'}, 'configuration': {'key': 'configuration', 'type': 'ReportConfigDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type':",
"sending a request. :ivar code: Error code. :vartype code: str",
"{ 'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type':",
"results. :type next_link: str :param columns: Array of columns. :type",
"to Azure. :param from_property: Required. The start date to pull",
"None, **kwargs ): super(ReportConfigFilter, self).__init__(**kwargs) self.and_property = and_property self.or_property =",
"send to Azure. :param format: The format of the export",
"self, *, and_property: Optional[List[\"QueryFilter\"]] = None, or_property: Optional[List[\"QueryFilter\"]] = None,",
"represents the operation. Variables are only populated by the server,",
"): super(ReportConfigDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration self.aggregation",
"'[Dimension]'}, } def __init__( self, **kwargs ): super(DimensionsListResult, self).__init__(**kwargs) self.value",
"For more information see https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services . All required parameters must",
":type filter: ~azure.mgmt.costmanagement.models.QueryFilter \"\"\" _attribute_map = { 'granularity': {'key': 'granularity',",
"not. :type e_tag: str \"\"\" _validation = { 'id': {'readonly':",
"the KPI in the UI?. :type enabled: bool \"\"\" _attribute_map",
"values include: \"Usage\", \"ActualCost\", \"AmortizedCost\". :type type: str or ~azure.mgmt.costmanagement.models.ExportType",
"\"CrossCloudNewDataAvailable\", \"CrossCloudCollectionError\", \"GeneralThresholdError\". :type criteria: str or ~azure.mgmt.costmanagement.models.AlertCriteria \"\"\" _attribute_map",
"in comparison. :type name: str :param operator: Required. The operator",
"**kwargs ): super(QueryGrouping, self).__init__(**kwargs) self.type = type self.name = name",
"data for the export. If custom, then a specific time",
"Optional[Union[str, \"KpiType\"]] = None, id: Optional[str] = None, enabled: Optional[bool]",
"aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation] :param grouping: Array of",
"Required. Has destination for the export being delivered. :type destination:",
"'ReportConfigDataset'}, } def __init__( self, *, e_tag: Optional[str] = None,",
"exports. It contains a list of available exports in the",
"scope. :type scope: str :ivar created_on: Date the user created",
"= None, configuration: Optional[\"ExportDatasetConfiguration\"] = None, **kwargs ): super(ExportDataset, self).__init__(**kwargs)",
"'value', 'type': '[Alert]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def",
"the report. Possible values include: \"Daily\", \"Monthly\". :type granularity: str",
":param description: Alert description. :type description: str :param source: Source",
"'type': 'QueryTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'QueryDataset'}, } def __init__(",
"configuration. :type configuration: ~azure.mgmt.costmanagement.models.ExportDatasetConfiguration \"\"\" _attribute_map = { 'granularity': {'key':",
"Usage end. :vartype usage_end: ~datetime.datetime :ivar next_link: The link (url)",
"'destination': {'key': 'destination', 'type': 'ExportDeliveryDestination'}, } def __init__( self, *,",
"meter_filter self.tag_filter = tag_filter self.threshold = threshold self.operator = operator",
"for export data. :type from_property: ~datetime.datetime :param to: Required. The",
"is throttled. Retry after waiting for the time specified in",
"self.filter = filter class ReportConfigDatasetConfiguration(msrest.serialization.Model): \"\"\"The configuration of dataset in",
"{'key': 'value', 'type': '[View]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, }",
"contact_emails: list of emails to contact. :type contact_emails: list[str] :param",
"The details of the error. :type error: ~azure.mgmt.costmanagement.models.ErrorDetails \"\"\" _attribute_map",
"{'key': 'rootFolderPath', 'type': 'str'}, } def __init__( self, *, resource_id:",
"for the report. All required parameters must be populated in",
"time_period: ~azure.mgmt.costmanagement.models.ExportTimePeriod :param data_set: The definition for data in the",
"self, *, name: str, function: Union[str, \"FunctionType\"], **kwargs ): super(ReportConfigAggregation,",
"status was last modified. :type status_modification_time: str \"\"\" _validation =",
"the report. Any valid report column name is allowed. If",
"'bool'}, } def __init__( self, *, type: Optional[Union[str, \"KpiType\"]] =",
"to use for comparison. :type values: list[str] \"\"\" _validation =",
"self).__init__(**kwargs) self.type = type self.id = id self.enabled = enabled",
"'processing_end_time': {'key': 'properties.processingEndTime', 'type': 'iso-8601'}, 'file_name': {'key': 'properties.fileName', 'type': 'str'},",
"= schedule class ExportRecurrencePeriod(msrest.serialization.Model): \"\"\"The start and end date for",
"clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation] :param grouping: Array of group",
"**kwargs ): super(ReportConfigComparisonExpression, self).__init__(**kwargs) self.name = name self.operator = operator",
"~azure.mgmt.costmanagement.models.ExecutionType :param status: The last known status of the export",
"{'key': 'properties.createdOn', 'type': 'iso-8601'}, 'modified_on': {'key': 'properties.modifiedOn', 'type': 'iso-8601'}, 'chart':",
"self, *, data: Optional[List[str]] = None, **kwargs ): super(Dimension, self).__init__(**kwargs)",
"class QueryComparisonExpression(msrest.serialization.Model): \"\"\"The comparison expression to be used in the",
"\"\"\"Dimension. Variables are only populated by the server, and will",
"= None self.next_link = None class CommonExportProperties(msrest.serialization.Model): \"\"\"The common properties",
"in order to send to Azure. :param name: Required. The",
"~datetime.datetime :param processing_end_time: The time when the export execution finished.",
"the forecast. The key of each item in the dictionary",
"Possible values include: \"Sum\". :type function: str or ~azure.mgmt.costmanagement.models.FunctionType \"\"\"",
"{'key': 'overridingAlert', 'type': 'str'}, } def __init__( self, *, time_grain_type:",
"Optional[List[object]] = None, resource_filter: Optional[List[object]] = None, meter_filter: Optional[List[object]] =",
"= None, **kwargs ): super(ForecastDefinition, self).__init__(**kwargs) self.type = type self.timeframe",
"'QueryDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'}, 'grouping': {'key': 'grouping', 'type':",
"kpis: list[~azure.mgmt.costmanagement.models.KpiProperties] :param pivots: Configuration of 3 sub-views in the",
"= name class ReportConfigSorting(msrest.serialization.Model): \"\"\"The order by expression to be",
"source self.details = details self.cost_entity_id = cost_entity_id self.status = status",
"= { 'name': {'required': True}, 'operator': {'required': True}, 'values': {'required':",
":type creation_time: str :param close_time: dateTime in which alert was",
"will include all available columns. The available columns can vary",
"Optional[Union[str, \"FormatType\"]] = None, run_history: Optional[\"ExportExecutionListResult\"] = None, **kwargs ):",
"order by expression to be used in the report. All",
"None, **kwargs ): super(ExportDefinition, self).__init__(**kwargs) self.type = type self.timeframe =",
"= { 'columns': {'key': 'columns', 'type': '[str]'}, } def __init__(",
"'value', 'type': '[Dimension]'}, } def __init__( self, **kwargs ): super(DimensionsListResult,",
"self.operator = operator self.values = values class QueryDataset(msrest.serialization.Model): \"\"\"The definition",
":type type: str or ~azure.mgmt.costmanagement.models.ReportType :param timeframe: Required. The time",
"self.file_name = file_name self.run_settings = run_settings self.error = error class",
"for pulling data for the forecast. If custom, then a",
"'type': '[PivotProperties]'}, 'type_properties_query_type': {'key': 'properties.query.type', 'type': 'str'}, 'timeframe': {'key': 'properties.query.timeframe',",
"{'key': 'name', 'type': 'str'}, } def __init__( self, *, name:",
"'type': 'QueryFilter'}, } def __init__( self, *, granularity: Optional[Union[str, \"GranularityType\"]]",
"and_property self.or_property = or_property self.not_property = not_property self.dimension = dimension",
":param status_modification_user_name: :type status_modification_user_name: str :param status_modification_time: dateTime in which",
"it is done automatically, however API users need to register",
"'ExportDeliveryInfo'}, 'definition': {'key': 'definition', 'type': 'ExportDefinition'}, 'run_history': {'key': 'runHistory', 'type':",
"None, not_property: Optional[\"ReportConfigFilter\"] = None, dimension: Optional[\"ReportConfigComparisonExpression\"] = None, tag:",
"{'readonly': True}, 'usage_end': {'readonly': True}, 'next_link': {'readonly': True}, } _attribute_map",
"run_history self.next_run_time_estimate = None self.schedule = schedule class ExportDataset(msrest.serialization.Model): \"\"\"The",
"'{str}'}, } def __init__( self, **kwargs ): super(Resource, self).__init__(**kwargs) self.id",
"'iso-8601'}, 'chart': {'key': 'properties.chart', 'type': 'str'}, 'accumulated': {'key': 'properties.accumulated', 'type':",
"*, recurrence: Union[str, \"RecurrenceType\"], status: Optional[Union[str, \"StatusType\"]] = None, recurrence_period:",
"datetime of periodStartDate. :type period_start_date: str :param triggered_by: notificationId that",
"{'key': 'properties.query.dataset', 'type': 'ReportConfigDataset'}, } def __init__( self, *, e_tag:",
"True}, 'grouping_enabled': {'readonly': True}, 'total': {'readonly': True}, 'category': {'readonly': True},",
"True}, 'operator': {'required': True}, 'values': {'required': True, 'min_items': 1}, }",
"'type': 'iso-8601'}, } def __init__( self, *, from_property: datetime.datetime, to:",
"run_settings self.error = error class ExportExecutionListResult(msrest.serialization.Model): \"\"\"Result of listing the",
"} _attribute_map = { 'direction': {'key': 'direction', 'type': 'str'}, 'name':",
"the query. Possible values include: \"Usage\", \"ActualCost\", \"AmortizedCost\". :type type:",
"of the export. To allow access to a storage account,",
"~azure.mgmt.costmanagement.models.ExportDatasetConfiguration \"\"\" _attribute_map = { 'granularity': {'key': 'granularity', 'type': 'str'},",
"*, type: Union[str, \"ReportType\"], timeframe: Union[str, \"ReportTimeframeType\"], time_period: Optional[\"ReportConfigTimePeriod\"] =",
"created_on: ~datetime.datetime :ivar modified_on: Date when the user last modified",
"for license information. # Code generated by Microsoft (R) AutoRest",
"there are any. :vartype next_link: str \"\"\" _validation = {",
"Optional[datetime.datetime] = None, processing_start_time: Optional[datetime.datetime] = None, processing_end_time: Optional[datetime.datetime] =",
"self.configuration = configuration self.aggregation = aggregation self.grouping = grouping self.filter",
"over time. Possible values include: \"true\", \"false\". :type accumulated: str",
"= name class QueryAggregation(msrest.serialization.Model): \"\"\"The aggregation expression to be used",
"self.tag_filter = tag_filter self.threshold = threshold self.operator = operator self.amount",
"export. If not provided then the export will include all",
"'type': 'object'}, 'threshold': {'key': 'threshold', 'type': 'float'}, 'operator': {'key': 'operator',",
"'type': 'ReportConfigComparisonExpression'}, 'tag': {'key': 'tag', 'type': 'ReportConfigComparisonExpression'}, } def __init__(",
"None, include_actual_cost: Optional[bool] = None, include_fresh_partial_cost: Optional[bool] = None, **kwargs",
"logical \"NOT\" expression. :type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated :param dimension: Has comparison",
"Any valid report column name is allowed. If not provided,",
"{ 'type': {'key': 'type', 'type': 'str'}, 'name': {'key': 'name', 'type':",
"= { 'value': {'key': 'value', 'type': '[Dimension]'}, } def __init__(",
"group by expression to be used in the query. All",
"ReportConfigFilter(msrest.serialization.Model): \"\"\"The filter expression to be used in the report.",
"column names to be included in the export. If not",
"# -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved.",
"__init__( self, **kwargs ): super(DimensionsListResult, self).__init__(**kwargs) self.value = None class",
"None, configuration: Optional[\"QueryDatasetConfiguration\"] = None, aggregation: Optional[Dict[str, \"QueryAggregation\"]] = None,",
"{'key': 'name', 'type': 'str'}, 'function': {'key': 'function', 'type': 'str'}, }",
"= name class QueryResult(Resource): \"\"\"Result of query. It contains all",
"creating an export in the Azure portal, it is done",
"'meter_filter': {'key': 'meterFilter', 'type': '[object]'}, 'tag_filter': {'key': 'tagFilter', 'type': 'object'},",
"the export. :param and_property: The logical \"AND\" expression. Must have",
":param type: Required. The type of the export. Note that",
"None self.category = None self.usage_start = None self.usage_end = None",
"List, Optional, Union from azure.core.exceptions import HttpResponseError import msrest.serialization from",
"not_property: The logical \"NOT\" expression. :type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilter :param dimension:",
"'e_tag': {'key': 'eTag', 'type': 'str'}, 'display_name': {'key': 'properties.displayName', 'type': 'str'},",
"str or ~azure.mgmt.costmanagement.models.QueryColumnType :param name: Required. The name of the",
"= cost_entity_id self.status = status self.creation_time = creation_time self.close_time =",
"super(Resource, self).__init__(**kwargs) self.id = None self.name = None self.type =",
"'type': 'str'}, 'root_folder_path': {'key': 'rootFolderPath', 'type': 'str'}, } def __init__(",
"forecasted data and UsageAndForecast represents both usage and forecasted data.",
"when sending a request. :ivar provider: Service provider: Microsoft.CostManagement. :vartype",
"type_properties_query_type: The type of the report. Usage represents actual usage,",
"super(ExportSchedule, self).__init__(**kwargs) self.status = status self.recurrence = recurrence self.recurrence_period =",
"values include: \"None\", \"EqualTo\", \"GreaterThan\", \"GreaterThanOrEqualTo\", \"LessThan\", \"LessThanOrEqualTo\". :type operator:",
"name: str \"\"\" _validation = { 'name': {'required': True}, }",
"array of meters to filter by. :type meter_filter: list[object] :param",
"{'key': 'properties.displayName', 'type': 'str'}, 'scope': {'key': 'properties.scope', 'type': 'str'}, 'created_on':",
"aggregation self.filter = filter class ForecastDefinition(msrest.serialization.Model): \"\"\"The definition of a",
"= scope self.created_on = None self.modified_on = None self.chart =",
"to class ForecastDataset(msrest.serialization.Model): \"\"\"The definition of data present in the",
"'triggered_by': {'key': 'triggeredBy', 'type': 'str'}, 'resource_group_filter': {'key': 'resourceGroupFilter', 'type': '[object]'},",
":param metric: Metric to use when displaying costs. Possible values",
"this alert. :type threshold: float :param operator: operator used to",
"include: \"Daily\", \"Monthly\". :type granularity: str or ~azure.mgmt.costmanagement.models.ReportGranularityType :param configuration:",
"~azure.mgmt.costmanagement.models.GranularityType :param configuration: Has configuration information for the data in",
"): super(OperationListResult, self).__init__(**kwargs) self.value = None self.next_link = None class",
"Configuration of 3 sub-views in the Cost Analysis UI. :type",
"be provided. Possible values include: \"MonthToDate\", \"BillingMonthToDate\", \"TheLastMonth\", \"TheLastBillingMonth\", \"WeekToDate\",",
"account where exports will be delivered. :type resource_id: str :param",
"name class ReportConfigTimePeriod(msrest.serialization.Model): \"\"\"The start and end date for pulling",
"'type': 'str'}, 'close_time': {'key': 'properties.closeTime', 'type': 'str'}, 'modification_time': {'key': 'properties.modificationTime',",
"class ForecastDataset(msrest.serialization.Model): \"\"\"The definition of data present in the forecast.",
"= amount self.unit = unit self.current_spend = current_spend self.contact_emails =",
"related budget. :type cost_entity_id: str :param status: alert status. Possible",
"modified this view. :vartype modified_on: ~datetime.datetime :param chart: Chart type",
"type: type of alert. Possible values include: \"Budget\", \"Invoice\", \"Credit\",",
"the entity that executed the export. For OnDemand executions it",
"None, name: Optional[str] = None, **kwargs ): super(PivotProperties, self).__init__(**kwargs) self.type",
"= None class CommonExportProperties(msrest.serialization.Model): \"\"\"The common properties of the export.",
"'type': 'str'}, 'triggered_by': {'key': 'triggeredBy', 'type': 'str'}, 'resource_group_filter': {'key': 'resourceGroupFilter',",
"Service is temporarily unavailable. Retry after waiting for the time",
"{'key': 'properties.chart', 'type': 'str'}, 'accumulated': {'key': 'properties.accumulated', 'type': 'str'}, 'metric':",
"None, details: Optional[\"AlertPropertiesDetails\"] = None, cost_entity_id: Optional[str] = None, status:",
"required once per subscription. When creating an export in the",
"the query. :param columns: Array of column names to be",
"= not_property self.dimension = dimension self.tag = tag class ReportConfigGrouping(msrest.serialization.Model):",
"self.recurrence = recurrence self.recurrence_period = recurrence_period class ExportTimePeriod(msrest.serialization.Model): \"\"\"The date",
"category: str :ivar usage_start: Usage start. :vartype usage_start: ~datetime.datetime :ivar",
"= run_history self.next_run_time_estimate = None self.schedule = schedule class ExportDataset(msrest.serialization.Model):",
"OperationListResult(msrest.serialization.Model): \"\"\"Result of listing cost management operations. It contains a",
"to. :type to: ~datetime.datetime \"\"\" _validation = { 'from_property': {'required':",
"None, filter: Optional[\"ReportConfigFilter\"] = None, **kwargs ): super(ReportConfigDataset, self).__init__(**kwargs) self.granularity",
"{'key': 'tagFilter', 'type': 'object'}, 'threshold': {'key': 'threshold', 'type': 'float'}, 'operator':",
"): super(Alert, self).__init__(**kwargs) self.definition = definition self.description = description self.source",
"\"TimeframeType\"], time_period: Optional[\"QueryTimePeriod\"] = None, dataset: Optional[\"QueryDataset\"] = None, **kwargs",
":type period_start_date: str :param triggered_by: notificationId that triggered this alert.",
"\"QueryAggregation\"]] = None, filter: Optional[\"QueryFilter\"] = None, **kwargs ): super(ForecastDataset,",
"self).__init__(**kwargs) self.execution_type = execution_type self.status = status self.submitted_by = submitted_by",
"not_property: The logical \"NOT\" expression. :type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated :param dimension:",
"view. :type name: str \"\"\" _attribute_map = { 'type': {'key':",
"'type': '[KpiProperties]'}, 'pivots': {'key': 'properties.pivots', 'type': '[PivotProperties]'}, 'type_properties_query_type': {'key': 'properties.query.type',",
"= granularity self.configuration = configuration self.aggregation = aggregation self.filter =",
"the aggregated column. Query can have up to 2 aggregation",
"timeFrame set to 'Custom'. The maximum date range is 3",
"\"\"\"The definition of data present in the report. :param granularity:",
"self.criteria = criteria class AlertPropertiesDetails(msrest.serialization.Model): \"\"\"Alert details. :param time_grain_type: Type",
"The available columns can vary by customer channel (see examples).",
"'timeframe': {'required': True}, } _attribute_map = { 'type': {'key': 'type',",
"Optional[\"QueryDatasetConfiguration\"] = None, aggregation: Optional[Dict[str, \"QueryAggregation\"]] = None, filter: Optional[\"QueryFilter\"]",
"None, error: Optional[\"ErrorDetails\"] = None, **kwargs ): super(ExportExecution, self).__init__(**kwargs) self.execution_type",
"or_property: Optional[List[\"ReportConfigFilter\"]] = None, not_property: Optional[\"ReportConfigFilter\"] = None, dimension: Optional[\"ReportConfigComparisonExpression\"]",
"str :ivar filter_enabled: Filter enabled. :vartype filter_enabled: bool :ivar grouping_enabled:",
"or ~azure.mgmt.costmanagement.models.AlertOperator :param amount: budget threshold amount. :type amount: float",
"__init__( self, *, display: Optional[\"OperationDisplay\"] = None, **kwargs ): super(Operation,",
"present in the report. :param granularity: The granularity of rows",
"{ 'name': {'key': 'name', 'type': 'str'}, 'display': {'key': 'display', 'type':",
"all available columns. The available columns can vary by customer",
"the user email. For scheduled executions it is 'System'. :type",
"{'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'tags': {'readonly':",
"'ExportRecurrencePeriod'}, } def __init__( self, *, recurrence: Union[str, \"RecurrenceType\"], status:",
"None, delivery_info: Optional[\"ExportDeliveryInfo\"] = None, definition: Optional[\"ExportDefinition\"] = None, run_history:",
"\"\"\"The definition of a forecast. All required parameters must be",
"{'key': 'container', 'type': 'str'}, 'root_folder_path': {'key': 'rootFolderPath', 'type': 'str'}, }",
"in the query. All required parameters must be populated in",
"\"\"\" _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'type':",
"'str'}, 'submitted_by': {'key': 'properties.submittedBy', 'type': 'str'}, 'submitted_time': {'key': 'properties.submittedTime', 'type':",
"self, *, from_property: datetime.datetime, to: datetime.datetime, **kwargs ): super(ReportConfigTimePeriod, self).__init__(**kwargs)",
"= category self.criteria = criteria class AlertPropertiesDetails(msrest.serialization.Model): \"\"\"Alert details. :param",
"class ExportDatasetConfiguration(msrest.serialization.Model): \"\"\"The export dataset configuration. Allows columns to be",
"run_settings: ~azure.mgmt.costmanagement.models.CommonExportProperties :param error: The details of any error. :type",
"executions it is the user email. For scheduled executions it",
"'resource_filter': {'key': 'resourceFilter', 'type': '[object]'}, 'meter_filter': {'key': 'meterFilter', 'type': '[object]'},",
":param e_tag: eTag of the resource. To handle concurrent update",
"None, **kwargs ): super(QueryDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class QueryDefinition(msrest.serialization.Model):",
"{'readonly': True}, 'tags': {'readonly': True}, } _attribute_map = { 'id':",
"values include: \"Csv\". :type format: str or ~azure.mgmt.costmanagement.models.FormatType :param delivery_info:",
"\"Descending\". :type direction: str or ~azure.mgmt.costmanagement.models.ReportConfigSortingDirection :param name: Required. The",
"'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'}, } def __init__( self, *,",
"\"NOT\" expression. :type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated :param dimension: Has comparison expression",
"in order to send to Azure. :param direction: Direction of",
"provider. :vartype value: list[~azure.mgmt.costmanagement.models.Operation] :ivar next_link: URL to get the",
"list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param not_property: The logical \"NOT\" expression. :type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated",
":param accumulated: Show costs accumulated over time. Possible values include:",
"self.type = type self.id = id self.enabled = enabled class",
"= None, configuration: Optional[\"ReportConfigDatasetConfiguration\"] = None, aggregation: Optional[Dict[str, \"ReportConfigAggregation\"]] =",
"of query. It contains all columns listed under groupings and",
"the report. The key of each item in the dictionary",
"*, error: Optional[\"ErrorDetails\"] = None, **kwargs ): super(ErrorResponse, self).__init__(**kwargs) self.error",
"None, dataset: Optional[\"ReportConfigDatasetAutoGenerated\"] = None, **kwargs ): super(ReportConfigDefinition, self).__init__(**kwargs) self.type",
"columns listed under groupings and aggregation. Variables are only populated",
"'overriding_alert': {'key': 'overridingAlert', 'type': 'str'}, } def __init__( self, *,",
"responses: * 429 TooManyRequests - Request is throttled. Retry after",
"type: Union[str, \"ExportType\"], timeframe: Union[str, \"TimeframeType\"], time_period: Optional[\"ExportTimePeriod\"] = None,",
"} def __init__( self, *, destination: \"ExportDeliveryDestination\", **kwargs ): super(ExportDeliveryInfo,",
"account's subscription with the Microsoft.CostManagementExports resource provider. This is required",
"Optional[\"ForecastDataset\"] = None, include_actual_cost: Optional[bool] = None, include_fresh_partial_cost: Optional[bool] =",
"type: Optional[Union[str, \"PivotType\"]] = None, name: Optional[str] = None, **kwargs",
"= None, threshold: Optional[float] = None, operator: Optional[Union[str, \"AlertOperator\"]] =",
"def __init__( self, **kwargs ): super(ExportExecutionListResult, self).__init__(**kwargs) self.value = None",
"'name': {'required': True}, 'operator': {'required': True}, 'values': {'required': True, 'min_items':",
"roles. :type contact_roles: list[str] :param overriding_alert: overriding alert. :type overriding_alert:",
"alert. :type threshold: float :param operator: operator used to compare",
"or ~azure.mgmt.costmanagement.models.OperatorType :param values: Required. Array of values to use",
"or ~azure.mgmt.costmanagement.models.ReportType :param timeframe: The time frame for pulling data",
"'str'}, 'metric': {'key': 'properties.metric', 'type': 'str'}, 'kpis': {'key': 'properties.kpis', 'type':",
":param processing_start_time: The time when export was picked up to",
"= { 'value': {'key': 'value', 'type': '[Alert]'}, 'next_link': {'key': 'nextLink',",
"for data in this report config. :type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDataset \"\"\"",
"'time_period': {'key': 'properties.query.timePeriod', 'type': 'ReportConfigTimePeriod'}, 'dataset': {'key': 'properties.query.dataset', 'type': 'ReportConfigDataset'},",
"= type self.name = name class QueryAggregation(msrest.serialization.Model): \"\"\"The aggregation expression",
"'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key':",
"in the scope provided. Variables are only populated by the",
"values include: \"CostThresholdExceeded\", \"UsageThresholdExceeded\", \"CreditThresholdApproaching\", \"CreditThresholdReached\", \"QuotaThresholdApproaching\", \"QuotaThresholdReached\", \"MultiCurrency\", \"ForecastCostThresholdExceeded\",",
"{'key': 'definition', 'type': 'ExportDefinition'}, 'run_history': {'key': 'runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate':",
"value: list[~azure.mgmt.costmanagement.models.Operation] :ivar next_link: URL to get the next set",
"of the next execution time. :vartype next_run_time_estimate: ~datetime.datetime :param schedule:",
"set of results. Variables are only populated by the server,",
"for recurrence schedule. All required parameters must be populated in",
"type: Optional[Union[str, \"KpiType\"]] = None, id: Optional[str] = None, enabled:",
"self.time_period = time_period self.dataset = dataset class ViewListResult(msrest.serialization.Model): \"\"\"Result of",
"\"Daily\". :type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType :param configuration: Has configuration",
"scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}' for Department scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}' for EnrollmentAccount scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}'",
":vartype type: str :ivar tags: A set of tags. Resource",
"{'key': 'properties.nextLink', 'type': 'str'}, 'columns': {'key': 'properties.columns', 'type': '[QueryColumn]'}, 'rows':",
"of column names to be included in the query. Any",
"AlertPropertiesDetails(msrest.serialization.Model): \"\"\"Alert details. :param time_grain_type: Type of timegrain cadence. Possible",
"filter: ~azure.mgmt.costmanagement.models.QueryFilter \"\"\" _validation = { 'grouping': {'max_items': 2, 'min_items':",
"'str'}, 'delivery_info': {'key': 'properties.deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition': {'key': 'properties.definition', 'type':",
"self.to = to class ForecastDataset(msrest.serialization.Model): \"\"\"The definition of data present",
"To allow access to a storage account, you must register",
"'grouping', 'type': '[ReportConfigGrouping]'}, 'sorting': {'key': 'sorting', 'type': '[ReportConfigSorting]'}, 'filter': {'key':",
":param name: Required. The name of the column to aggregate.",
"= None, run_history: Optional[\"ExportExecutionListResult\"] = None, **kwargs ): super(CommonExportProperties, self).__init__(**kwargs)",
"be specified with timeFrame set to 'Custom'. The maximum date",
"'tags', 'type': '{str}'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'filter_enabled': {'key':",
"export execution. Possible values include: \"Queued\", \"InProgress\", \"Completed\", \"Failed\", \"Timeout\",",
"\"ForecastUsageThresholdExceeded\", \"InvoiceDueDateApproaching\", \"InvoiceDueDateReached\", \"CrossCloudNewDataAvailable\", \"CrossCloudCollectionError\", \"GeneralThresholdError\". :type criteria: str or",
"def __init__( self, *, delivery_info: \"ExportDeliveryInfo\", definition: \"ExportDefinition\", format: Optional[Union[str,",
"contact_roles self.overriding_alert = overriding_alert class AlertsResult(msrest.serialization.Model): \"\"\"Result of alerts. Variables",
"type. :vartype type: str :param e_tag: eTag of the resource.",
"\"System\". :type category: str or ~azure.mgmt.costmanagement.models.AlertCategory :param criteria: Criteria that",
"self.id = None self.name = None self.type = None self.e_tag",
"True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'tags': {'readonly': True},",
"pulling data for the report. :type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod :param dataset:",
"of results. :vartype next_link: str \"\"\" _validation = { 'value':",
"{'key': 'properties.query.type', 'type': 'str'}, 'timeframe': {'key': 'properties.query.timeframe', 'type': 'str'}, 'time_period':",
"Optional[\"ExportDeliveryInfo\"] = None, definition: Optional[\"ExportDefinition\"] = None, run_history: Optional[\"ExportExecutionListResult\"] =",
"must be in future. If present, the end date must",
"'type': '{str}'}, 'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'}, 'description': {'key': 'properties.description',",
"\"MultiCurrency\", \"ForecastCostThresholdExceeded\", \"ForecastUsageThresholdExceeded\", \"InvoiceDueDateApproaching\", \"InvoiceDueDateReached\", \"CrossCloudNewDataAvailable\", \"CrossCloudCollectionError\", \"GeneralThresholdError\". :type criteria:",
"values: list[str] \"\"\" _validation = { 'name': {'required': True}, 'operator':",
"values include: \"Usage\". :type type_properties_query_type: str or ~azure.mgmt.costmanagement.models.ReportType :param timeframe:",
"it is the user email. For scheduled executions it is",
"filter: Optional[\"ReportConfigFilter\"] = None, **kwargs ): super(ReportConfigDataset, self).__init__(**kwargs) self.granularity =",
"use in the query. Query can have up to 2",
"schedule: ~azure.mgmt.costmanagement.models.ExportSchedule \"\"\" _validation = { 'id': {'readonly': True}, 'name':",
"= None, status: Optional[Union[str, \"ExecutionStatus\"]] = None, submitted_by: Optional[str] =",
"Dictionary of aggregation expression to use in the forecast. The",
"or not. :type e_tag: str \"\"\" _validation = { 'id':",
"of the forecast. Possible values include: \"Usage\", \"ActualCost\", \"AmortizedCost\". :type",
"= triggered_by self.resource_group_filter = resource_group_filter self.resource_filter = resource_filter self.meter_filter =",
"str :ivar type: Resource type. :vartype type: str :ivar tags:",
"= None, aggregation: Optional[Dict[str, \"QueryAggregation\"]] = None, filter: Optional[\"QueryFilter\"] =",
"'next_link': {'key': 'properties.nextLink', 'type': 'str'}, } def __init__( self, *,",
"dictionary is the alias for the aggregated column. forecast can",
"AutoRest Code Generator. # Changes may cause incorrect behavior and",
":param cost_entity_id: related budget. :type cost_entity_id: str :param status: alert",
"report. All required parameters must be populated in order to",
"when sending a request. :ivar value: The list of views.",
"str or ~azure.mgmt.costmanagement.models.AlertTimeGrainType :param period_start_date: datetime of periodStartDate. :type period_start_date:",
"Usage represents actual usage, forecast represents forecasted data and UsageAndForecast",
"a list of available exports in the scope provided. Variables",
"dataset in the report. :param columns: Array of column names",
"specific time period must be provided. Possible values include: \"WeekToDate\",",
"{'key': 'properties.modifiedOn', 'type': 'iso-8601'}, 'chart': {'key': 'properties.chart', 'type': 'str'}, 'accumulated':",
"used in the query. All required parameters must be populated",
"self.run_history = run_history self.next_run_time_estimate = None self.schedule = schedule class",
"} def __init__( self, *, type: Union[str, \"ExportType\"], timeframe: Union[str,",
"'container': {'required': True}, } _attribute_map = { 'resource_id': {'key': 'resourceId',",
":param columns: Array of columns. :type columns: list[~azure.mgmt.costmanagement.models.QueryColumn] :param rows:",
"= None self.name = None self.type = None self.tags =",
"{'key': 'type', 'type': 'str'}, 'category': {'key': 'category', 'type': 'str'}, 'criteria':",
"{'required': True}, 'container': {'required': True}, } _attribute_map = { 'resource_id':",
"= columns class ReportConfigDefinition(msrest.serialization.Model): \"\"\"The definition of a report config.",
"aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation] :param filter: Has filter expression to use",
"): super(ReportConfigSorting, self).__init__(**kwargs) self.direction = direction self.name = name class",
"populated in order to send to Azure. :param format: The",
"} def __init__( self, *, from_property: datetime.datetime, to: Optional[datetime.datetime] =",
"'timeGrainType', 'type': 'str'}, 'period_start_date': {'key': 'periodStartDate', 'type': 'str'}, 'triggered_by': {'key':",
"Possible values include: \"Budget\", \"Invoice\", \"Credit\", \"Quota\", \"General\", \"xCloud\", \"BudgetForecast\".",
"self.operator = operator self.amount = amount self.unit = unit self.current_spend",
"self, *, type: Union[str, \"ExportType\"], timeframe: Union[str, \"TimeframeType\"], time_period: Optional[\"ExportTimePeriod\"]",
"by expression to be used in the report. All required",
":type definition: ~azure.mgmt.costmanagement.models.ExportDefinition :param run_history: If requested, has the most",
"= name class ReportConfigTimePeriod(msrest.serialization.Model): \"\"\"The start and end date for",
"for the time specified in the \"x-ms-ratelimit-microsoft.consumption-retry-after\" header. * 503",
"'QueryComparisonExpression'}, } def __init__( self, *, and_property: Optional[List[\"QueryFilter\"]] = None,",
"Required. The type of the query. Possible values include: \"Usage\",",
"{'key': 'properties.description', 'type': 'str'}, 'filter_enabled': {'key': 'properties.filterEnabled', 'type': 'bool'}, 'grouping_enabled':",
"period must be provided. Possible values include: \"WeekToDate\", \"MonthToDate\", \"YearToDate\",",
"Optional[\"QueryComparisonExpression\"] = None, tag: Optional[\"QueryComparisonExpression\"] = None, **kwargs ): super(QueryFilter,",
"'grouping': {'key': 'grouping', 'type': '[ReportConfigGrouping]'}, 'sorting': {'key': 'sorting', 'type': '[ReportConfigSorting]'},",
"execution. Possible values include: \"OnDemand\", \"Scheduled\". :type execution_type: str or",
"error: ~azure.mgmt.costmanagement.models.ErrorDetails \"\"\" _validation = { 'id': {'readonly': True}, 'name':",
"expression to be used in the query. All required parameters",
"\"ReportConfigColumnType\"], name: str, **kwargs ): super(ReportConfigGrouping, self).__init__(**kwargs) self.type = type",
"the export. If custom, then a specific time period must",
"use for comparison. :type values: list[str] \"\"\" _validation = {",
"date of the recurrence. The start date must be in",
"query. :param columns: Array of column names to be included",
"start and end date for pulling data for the report.",
"\"\"\"The configuration of dataset in the report. :param columns: Array",
"aggregated column. forecast can have up to 2 aggregation clauses.",
"to use in comparison. :type name: str :param operator: Required.",
"the export will include all available columns. The available columns",
"of order by expression to use in the report. :type",
"listed under groupings and aggregation. Variables are only populated by",
"values include: \"Daily\". :type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType :param configuration:",
"= None, details: Optional[\"AlertPropertiesDetails\"] = None, cost_entity_id: Optional[str] = None,",
"triggered_by: Optional[str] = None, resource_group_filter: Optional[List[object]] = None, resource_filter: Optional[List[object]]",
"sending a request. :ivar value: The list of exports. :vartype",
"response indicates that the service is not able to process",
"class QueryResult(Resource): \"\"\"Result of query. It contains all columns listed",
":type category: str or ~azure.mgmt.costmanagement.models.AlertCategory :param criteria: Criteria that triggered",
"= status self.recurrence = recurrence self.recurrence_period = recurrence_period class ExportTimePeriod(msrest.serialization.Model):",
"None, dimension: Optional[\"QueryComparisonExpression\"] = None, tag: Optional[\"QueryComparisonExpression\"] = None, **kwargs",
"Dictionary of aggregation expression to use in the report. The",
"being used. :type unit: str :param current_spend: current spend. :type",
"executed. :type submitted_time: ~datetime.datetime :param processing_start_time: The time when export",
"= dataset class QueryFilter(msrest.serialization.Model): \"\"\"The filter expression to be used",
"alert was created. :type creation_time: str :param close_time: dateTime in",
"str, **kwargs ): super(QueryGrouping, self).__init__(**kwargs) self.type = type self.name =",
"str, operator: Union[str, \"OperatorType\"], values: List[str], **kwargs ): super(ReportConfigComparisonExpression, self).__init__(**kwargs)",
"~azure.mgmt.costmanagement.models.ChartType :param accumulated: Show costs accumulated over time. Possible values",
"= status self.submitted_by = submitted_by self.submitted_time = submitted_time self.processing_start_time =",
"or ~azure.mgmt.costmanagement.models.AlertType :param category: Alert category. Possible values include: \"Cost\",",
"values include: \"Daily\", \"Monthly\". :type granularity: str or ~azure.mgmt.costmanagement.models.ReportGranularityType :param",
"dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation] :param grouping: Array of group by expression to",
"'type': 'str'}, } def __init__( self, **kwargs ): super(AlertsResult, self).__init__(**kwargs)",
":type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition :param description: Alert description. :type description: str",
"comparison expression to be used in the report. All required",
"Changes may cause incorrect behavior and will be lost if",
"'str'}, } def __init__( self, **kwargs ): super(ErrorDetails, self).__init__(**kwargs) self.code",
"budget threshold amount. :type amount: float :param unit: unit of",
"{ 'provider': {'key': 'provider', 'type': 'str'}, 'resource': {'key': 'resource', 'type':",
"The list of dimensions. :vartype value: list[~azure.mgmt.costmanagement.models.Dimension] \"\"\" _validation =",
"**kwargs ): super(QueryResult, self).__init__(**kwargs) self.next_link = next_link self.columns = columns",
"= dataset class ViewListResult(msrest.serialization.Model): \"\"\"Result of listing views. It contains",
"'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type':",
"or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param not_property: The logical \"NOT\" expression. :type not_property:",
"'str'}, 'root_folder_path': {'key': 'rootFolderPath', 'type': 'str'}, } def __init__( self,",
"Required. Possible values include: \"Area\", \"Line\", \"StackedColumn\", \"GroupedColumn\", \"Table\". :type",
"= None self.category = None self.usage_start = None self.usage_end =",
"'properties.closeTime', 'type': 'str'}, 'modification_time': {'key': 'properties.modificationTime', 'type': 'str'}, 'status_modification_user_name': {'key':",
"{'key': 'properties.closeTime', 'type': 'str'}, 'modification_time': {'key': 'properties.modificationTime', 'type': 'str'}, 'status_modification_user_name':",
"in the Azure portal, it is done automatically, however API",
"last modified. :type status_modification_time: str \"\"\" _validation = { 'id':",
"= tag class ReportConfigFilterAutoGenerated(msrest.serialization.Model): \"\"\"The filter expression to be used",
"ID of resource related to metric (budget). :type id: str",
"None self.data = data self.total = None self.category = None",
"__init__( self, **kwargs ): super(ExportListResult, self).__init__(**kwargs) self.value = None class",
"bool \"\"\" _attribute_map = { 'type': {'key': 'type', 'type': 'str'},",
"'category': {'readonly': True}, 'usage_start': {'readonly': True}, 'usage_end': {'readonly': True}, 'next_link':",
"{'readonly': True}, } _attribute_map = { 'format': {'key': 'format', 'type':",
"super(ReportConfigFilter, self).__init__(**kwargs) self.and_property = and_property self.or_property = or_property self.not_property =",
"__init__( self, *, granularity: Optional[Union[str, \"GranularityType\"]] = None, configuration: Optional[\"QueryDatasetConfiguration\"]",
"'type': 'str'}, 'function': {'key': 'function', 'type': 'str'}, } def __init__(",
"import Dict, List, Optional, Union from azure.core.exceptions import HttpResponseError import",
":param status: The last known status of the export execution.",
"data for the dimension. :vartype total: int :ivar category: Dimension",
"granularity: str or ~azure.mgmt.costmanagement.models.GranularityType :param configuration: Has configuration information for",
"which alert was closed. :type close_time: str :param modification_time: dateTime",
"None, type: Optional[str] = None, **kwargs ): super(QueryColumn, self).__init__(**kwargs) self.name",
"the view on. This includes 'subscriptions/{subscriptionId}' for subscription scope, 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}'",
"subscription scope, 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account",
"dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression :param tag: Has comparison expression for a tag.",
"report. :type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod :param dataset: Has definition for data",
"* class Resource(msrest.serialization.Model): \"\"\"The Resource model definition. Variables are only",
"date for pulling data for the report. All required parameters",
"self.name = name self.type = type class QueryComparisonExpression(msrest.serialization.Model): \"\"\"The comparison",
"field to show in view. :type name: str \"\"\" _attribute_map",
"the subscription. For more information see https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services . All required",
"and a URL link to get the next set of",
"= None, **kwargs ): super(CommonExportProperties, self).__init__(**kwargs) self.format = format self.delivery_info",
"recurrence_period class ExportTimePeriod(msrest.serialization.Model): \"\"\"The date range for data in the",
"Resource tags. :vartype tags: dict[str, str] \"\"\" _validation = {",
"'type': 'str'}, 'values': {'key': 'values', 'type': '[str]'}, } def __init__(",
"{'key': 'type', 'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, 'display_name':",
"type: str or ~azure.mgmt.costmanagement.models.ExportType :param timeframe: Required. The time frame",
"self.name = name class QueryResult(Resource): \"\"\"Result of query. It contains",
"{'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, }",
"= { 'name': {'required': True}, } _attribute_map = { 'direction':",
"delivery_info: Has delivery information for the export. :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo",
"= status_modification_user_name self.status_modification_time = status_modification_time class ErrorDetails(msrest.serialization.Model): \"\"\"The details of",
"'properties.runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'properties.nextRunTimeEstimate', 'type': 'iso-8601'}, 'schedule': {'key':",
"= from_property self.to = to class ReportConfigAggregation(msrest.serialization.Model): \"\"\"The aggregation expression",
"'time_period': {'key': 'timePeriod', 'type': 'QueryTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'QueryDataset'},",
"for comparison. :type values: list[str] \"\"\" _validation = { 'name':",
"expression. :type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilter :param dimension: Has comparison expression for",
"currency being used. :type unit: str :param current_spend: current spend.",
"'str'}, } def __init__( self, *, time_grain_type: Optional[Union[str, \"AlertTimeGrainType\"]] =",
"\"FormatType\"]] = None, run_history: Optional[\"ExportExecutionListResult\"] = None, schedule: Optional[\"ExportSchedule\"] =",
"definition for the export. :type definition: ~azure.mgmt.costmanagement.models.ExportDefinition :param run_history: If",
"None, rows: Optional[List[List[object]]] = None, **kwargs ): super(QueryResult, self).__init__(**kwargs) self.next_link",
"super(QueryDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration self.aggregation =",
"'timeframe': {'key': 'timeframe', 'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type': 'ExportTimePeriod'},",
"Has configuration information for the data in the report. The",
"self.timeframe = timeframe self.time_period = time_period self.dataset = dataset self.include_actual_cost",
"unit: unit of currency being used. :type unit: str :param",
"'source': {'key': 'properties.source', 'type': 'str'}, 'details': {'key': 'properties.details', 'type': 'AlertPropertiesDetails'},",
":vartype name: str :ivar type: Resource type. :vartype type: str",
"to Azure. :param resource_id: Required. The resource id of the",
"= None self.operation = None class OperationListResult(msrest.serialization.Model): \"\"\"Result of listing",
"'name', 'type': 'str'}, 'function': {'key': 'function', 'type': 'str'}, } def",
"grouping self.sorting = sorting self.filter = filter class ReportConfigDatasetAutoGenerated(msrest.serialization.Model): \"\"\"The",
"Union[str, \"OperatorType\"], values: List[str], **kwargs ): super(ReportConfigComparisonExpression, self).__init__(**kwargs) self.name =",
"'tags', 'type': '{str}'}, 'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'}, 'description': {'key':",
"delete, etc. :vartype operation: str \"\"\" _validation = { 'provider':",
"{'key': 'enabled', 'type': 'bool'}, } def __init__( self, *, type:",
"usage, forecast represents forecasted data and UsageAndForecast represents both usage",
"data self.total = None self.category = None self.usage_start = None",
"_attribute_map = { 'from_property': {'key': 'from', 'type': 'iso-8601'}, 'to': {'key':",
"class OperationDisplay(msrest.serialization.Model): \"\"\"The object that represents the operation. Variables are",
"\"Quarterly\", \"Annually\", \"BillingMonth\", \"BillingQuarter\", \"BillingAnnual\". :type time_grain_type: str or ~azure.mgmt.costmanagement.models.AlertTimeGrainType",
"= { 'time_grain_type': {'key': 'timeGrainType', 'type': 'str'}, 'period_start_date': {'key': 'periodStartDate',",
"time specified in the \"x-ms-ratelimit-microsoft.consumption-retry-after\" header. * 503 ServiceUnavailable -",
"expression for a dimension. :type dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression :param tag: Has",
"and will be ignored when sending a request. :ivar name:",
"accumulated self.metric = metric self.kpis = kpis self.pivots = pivots",
"be ignored when sending a request. :ivar value: The list",
"id: str :ivar name: Resource name. :vartype name: str :ivar",
"enabled: bool \"\"\" _attribute_map = { 'type': {'key': 'type', 'type':",
":param definition: defines the type of alert. :type definition: ~azure.mgmt.costmanagement.models.AlertPropertiesDefinition",
"= { 'resource_id': {'required': True}, 'container': {'required': True}, } _attribute_map",
"= { 'name': {'key': 'name', 'type': 'str'}, 'operator': {'key': 'operator',",
"\"WeekToDate\", \"MonthToDate\", \"YearToDate\", \"Custom\". :type timeframe: str or ~azure.mgmt.costmanagement.models.ReportTimeframeType :param",
"{'required': True}, } _attribute_map = { 'status': {'key': 'status', 'type':",
"1}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'},",
"'ReportConfigComparisonExpression'}, } def __init__( self, *, and_property: Optional[List[\"ReportConfigFilter\"]] = None,",
"'modification_time': {'key': 'properties.modificationTime', 'type': 'str'}, 'status_modification_user_name': {'key': 'properties.statusModificationUserName', 'type': 'str'},",
"'bool'}, } def __init__( self, *, type: Union[str, \"ForecastType\"], timeframe:",
"of any error. :type error: ~azure.mgmt.costmanagement.models.ErrorDetails \"\"\" _validation = {",
"or ~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Has delivery information for the export.",
"dataset: Has definition for data in this query. :type dataset:",
"and '/providers/Microsoft.CostManagement/externalSubscriptions/{externalSubscriptionName}' for ExternalSubscription scope. :type scope: str :ivar created_on:",
"_validation = { 'name': {'required': True}, 'operator': {'required': True}, 'values':",
"enabled: show the KPI in the UI?. :type enabled: bool",
"amount. :type amount: float :param unit: unit of currency being",
"use when displaying costs. Possible values include: \"ActualCost\", \"AmortizedCost\", \"AHUB\".",
"'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, 'format': {'key': 'properties.format',",
"ReportConfigFilterAutoGenerated(msrest.serialization.Model): \"\"\"The filter expression to be used in the report.",
"self.data = data self.total = None self.category = None self.usage_start",
"\"\"\" _validation = { 'name': {'required': True}, 'operator': {'required': True},",
"performed: Dimensions, Query. :vartype resource: str :ivar operation: Operation type:",
"{'key': 'properties.format', 'type': 'str'}, 'delivery_info': {'key': 'properties.deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition':",
"be ignored when sending a request. :ivar value: A list",
"be ignored when sending a request. All required parameters must",
"Optional[str] = None, chart: Optional[Union[str, \"ChartType\"]] = None, accumulated: Optional[Union[str,",
"self.values = values class ReportConfigDataset(msrest.serialization.Model): \"\"\"The definition of data present",
"scope, and '/providers/Microsoft.CostManagement/externalSubscriptions/{externalSubscriptionName}' for ExternalSubscription scope. :type scope: str :ivar",
"Has definition for data in this report config. :type dataset:",
"configuration of dataset in the query. :param columns: Array of",
"self.current_spend = current_spend self.contact_emails = contact_emails self.contact_groups = contact_groups self.contact_roles",
"in order to send to Azure. :param type: Required. Has",
"of column. :type name: str :param type: The type of",
"} def __init__( self, *, e_tag: Optional[str] = None, format:",
"a request. :ivar value: A list of export executions. :vartype",
"columns can vary by customer channel (see examples). :type columns:",
"timeframe: Union[str, \"TimeframeType\"], time_period: Optional[\"ExportTimePeriod\"] = None, data_set: Optional[\"ExportDataset\"] =",
"user is updating the latest version or not. :type e_tag:",
":param file_name: The name of the exported file. :type file_name:",
"self).__init__(**kwargs) self.value = None class DismissAlertPayload(msrest.serialization.Model): \"\"\"The request payload to",
"self.modified_on = None self.chart = chart self.accumulated = accumulated self.metric",
"= time_period self.data_set = data_set class ExportDeliveryDestination(msrest.serialization.Model): \"\"\"The destination information",
"start and end date for pulling data for the query.",
"~azure.mgmt.costmanagement.models.ReportConfigFilter :param dimension: Has comparison expression for a dimension. :type",
"populated in order to send to Azure. :param type: Required.",
"self.format = format self.delivery_info = delivery_info self.definition = definition self.run_history",
"chart: str or ~azure.mgmt.costmanagement.models.ChartType :param accumulated: Show costs accumulated over",
"name is allowed. If not provided, then query includes all",
"'type': 'str'}, } def __init__( self, **kwargs ): super(ViewListResult, self).__init__(**kwargs)",
"self.tag = tag class ReportConfigFilterAutoGenerated(msrest.serialization.Model): \"\"\"The filter expression to be",
"format: str or ~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Has delivery information for",
"Microsoft Corporation. All rights reserved. # Licensed under the MIT",
"configuration: Optional[\"QueryDatasetConfiguration\"] = None, aggregation: Optional[Dict[str, \"QueryAggregation\"]] = None, filter:",
"'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map = {",
"{'key': 'sorting', 'type': '[ReportConfigSorting]'}, 'filter': {'key': 'filter', 'type': 'ReportConfigFilter'}, }",
":type chart: str or ~azure.mgmt.costmanagement.models.ChartType :param accumulated: Show costs accumulated",
"definition of data present in the query. :param granularity: The",
"__init__( self, **kwargs ): super(ErrorDetails, self).__init__(**kwargs) self.code = None self.message",
"a list of available dimensions. Variables are only populated by",
"If not provided then the export will include all available",
":param chart: Chart type of the main view in Cost",
"'error': {'key': 'properties.error', 'type': 'ErrorDetails'}, } def __init__( self, *,",
"of KPIs to show in Cost Analysis UI. :type kpis:",
"'str'}, 'configuration': {'key': 'configuration', 'type': 'QueryDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type':",
"True}, } _attribute_map = { 'destination': {'key': 'destination', 'type': 'ExportDeliveryDestination'},",
"dict[str, str] :param definition: defines the type of alert. :type",
"'rows': {'key': 'properties.rows', 'type': '[[object]]'}, } def __init__( self, *,",
"**kwargs ): super(ReportConfigFilterAutoGenerated, self).__init__(**kwargs) self.and_property = and_property self.or_property = or_property",
":type sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting] :param filter: Has filter expression to use",
"recurrence self.recurrence_period = recurrence_period class ExportTimePeriod(msrest.serialization.Model): \"\"\"The date range for",
"'configuration', 'type': 'ReportConfigDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{ReportConfigAggregation}'}, 'grouping': {'key':",
"or ~azure.mgmt.costmanagement.models.ReportTimeframeType :param time_period: Has time period for pulling data",
"'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, 'display_name': {'key': 'properties.displayName',",
"Dictionary of aggregation expression to use in the query. The",
"\"Area\", \"Line\", \"StackedColumn\", \"GroupedColumn\", \"Table\". :type chart: str or ~azure.mgmt.costmanagement.models.ChartType",
"data for the query. All required parameters must be populated",
"information for the export. :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Required.",
"Union[str, \"ExportType\"], timeframe: Union[str, \"TimeframeType\"], time_period: Optional[\"ExportTimePeriod\"] = None, data_set:",
"): super(Operation, self).__init__(**kwargs) self.name = None self.display = display class",
"See License.txt in the project root for license information. #",
"str] \"\"\" _validation = { 'id': {'readonly': True}, 'name': {'readonly':",
"must register the account's subscription with the Microsoft.CostManagementExports resource provider.",
"definition. Variables are only populated by the server, and will",
"__init__( self, *, data: Optional[List[str]] = None, **kwargs ): super(Dimension,",
"type: Required. The type of the forecast. Possible values include:",
"self.type = type self.name = name class QueryResult(Resource): \"\"\"Result of",
"set to 'Custom'. The maximum date range is 3 months.",
"grouping: list[~azure.mgmt.costmanagement.models.QueryGrouping] :param filter: Has filter expression to use in",
":param from_property: Required. The start date of recurrence. :type from_property:",
"e_tag: Optional[str] = None, display_name: Optional[str] = None, scope: Optional[str]",
"= { 'value': {'key': 'value', 'type': '[View]'}, 'next_link': {'key': 'nextLink',",
"(url) to the next page of results. :vartype next_link: str",
"'ReportConfigComparisonExpression'}, } def __init__( self, *, and_property: Optional[List[\"ReportConfigFilterAutoGenerated\"]] = None,",
"= None class DimensionsListResult(msrest.serialization.Model): \"\"\"Result of listing dimensions. It contains",
"{'key': 'properties.usageEnd', 'type': 'iso-8601'}, 'next_link': {'key': 'properties.nextLink', 'type': 'str'}, }",
"Resource Id. :vartype id: str :ivar name: Resource name. :vartype",
"Required. The start date for export data. :type from_property: ~datetime.datetime",
"root_folder_path: Optional[str] = None, **kwargs ): super(ExportDeliveryDestination, self).__init__(**kwargs) self.resource_id =",
"'str'}, 'status_modification_user_name': {'key': 'properties.statusModificationUserName', 'type': 'str'}, 'status_modification_time': {'key': 'properties.statusModificationTime', 'type':",
"None, resource_filter: Optional[List[object]] = None, meter_filter: Optional[List[object]] = None, tag_filter:",
"'filter', 'type': 'QueryFilter'}, } def __init__( self, *, granularity: Optional[Union[str,",
"\"ChartType\"]] = None, accumulated: Optional[Union[str, \"AccumulatedType\"]] = None, metric: Optional[Union[str,",
"finished. :type processing_end_time: ~datetime.datetime :param file_name: The name of the",
"active schedule, provides an estimate of the next execution time.",
":param next_link: The link (url) to the next page of",
"super(PivotProperties, self).__init__(**kwargs) self.type = type self.name = name class QueryAggregation(msrest.serialization.Model):",
"order to send to Azure. :param type: Required. Has type",
"{'key': 'timePeriod', 'type': 'ReportConfigTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'ReportConfigDatasetAutoGenerated'}, }",
"'nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(ViewListResult,",
"have up to 2 aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation]",
"\"false\". :type accumulated: str or ~azure.mgmt.costmanagement.models.AccumulatedType :param metric: Metric to",
":type dimension: ~azure.mgmt.costmanagement.models.QueryComparisonExpression :param tag: Has comparison expression for a",
"str :ivar message: Error message indicating why the operation failed.",
"# Changes may cause incorrect behavior and will be lost",
"= timeframe self.time_period = time_period self.data_set = data_set class ExportDeliveryDestination(msrest.serialization.Model):",
":ivar next_link: URL to get the next set of alerts",
"show in view. Possible values include: \"Dimension\", \"TagKey\". :type type:",
"Optional[Union[str, \"AlertCategory\"]] = None, criteria: Optional[Union[str, \"AlertCriteria\"]] = None, **kwargs",
"{'key': 'not', 'type': 'QueryFilter'}, 'dimension': {'key': 'dimension', 'type': 'QueryComparisonExpression'}, 'tag':",
"include: \"Usage\". :type type_properties_query_type: str or ~azure.mgmt.costmanagement.models.ReportType :param timeframe: The",
"the forecast. :type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod :param dataset: Has definition for",
"class Resource(msrest.serialization.Model): \"\"\"The Resource model definition. Variables are only populated",
"_attribute_map = { 'destination': {'key': 'destination', 'type': 'ExportDeliveryDestination'}, } def",
"an export. Variables are only populated by the server, and",
"e_tag: Optional[str] = None, **kwargs ): super(ProxyResource, self).__init__(**kwargs) self.id =",
"} _attribute_map = { 'resource_id': {'key': 'resourceId', 'type': 'str'}, 'container':",
"__init__( self, *, columns: Optional[List[str]] = None, **kwargs ): super(QueryDatasetConfiguration,",
"in view. :type name: str \"\"\" _attribute_map = { 'type':",
"**kwargs ): super(CommonExportProperties, self).__init__(**kwargs) self.format = format self.delivery_info = delivery_info",
"on. This includes 'subscriptions/{subscriptionId}' for subscription scope, 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup",
"column to group. :type name: str \"\"\" _validation = {",
"\"\"\" _validation = { 'grouping': {'max_items': 2, 'min_items': 0}, }",
"'QueryTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'ForecastDataset'}, 'include_actual_cost': {'key': 'includeActualCost', 'type':",
"{'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'next_link':",
"= granularity self.configuration = configuration self.aggregation = aggregation self.grouping =",
"self).__init__(**kwargs) self.name = name self.function = function class ReportConfigComparisonExpression(msrest.serialization.Model): \"\"\"The",
":ivar tags: A set of tags. Resource tags. :vartype tags:",
"None class Dimension(Resource): \"\"\"Dimension. Variables are only populated by the",
"Optional[List[str]] = None, **kwargs ): super(QueryDatasetConfiguration, self).__init__(**kwargs) self.columns = columns",
"root for license information. # Code generated by Microsoft (R)",
"to use in the forecast. The key of each item",
"= grouping self.filter = filter class QueryDatasetConfiguration(msrest.serialization.Model): \"\"\"The configuration of",
"unavailable. Retry after waiting for the time specified in the",
"specified in the \"Retry-After\" header. :param error: The details of",
":param from_property: Required. The start date for export data. :type",
"clauses. :type grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping] :param sorting: Array of order by",
":type grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping] :param sorting: Array of order by expression",
"values include: \"Forecast\", \"Budget\". :type type: str or ~azure.mgmt.costmanagement.models.KpiType :param",
"from azure.core.exceptions import HttpResponseError import msrest.serialization from ._cost_management_client_enums import *",
"'to': {'required': True}, } _attribute_map = { 'from_property': {'key': 'from',",
"} def __init__( self, *, type: Union[str, \"ReportConfigColumnType\"], name: str,",
"resource provider. This is required once per subscription. When creating",
"**kwargs ): super(ExportDefinition, self).__init__(**kwargs) self.type = type self.timeframe = timeframe",
"that triggered alert. Possible values include: \"CostThresholdExceeded\", \"UsageThresholdExceeded\", \"CreditThresholdApproaching\", \"CreditThresholdReached\",",
"not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilter :param dimension: Has comparison expression for a dimension.",
"{ 'name': {'required': True}, 'operator': {'required': True}, 'values': {'required': True,",
"in the \"Retry-After\" header. :param error: The details of the",
"None, filter: Optional[\"ReportConfigFilterAutoGenerated\"] = None, **kwargs ): super(ReportConfigDatasetAutoGenerated, self).__init__(**kwargs) self.granularity",
"'int'}, 'category': {'key': 'properties.category', 'type': 'str'}, 'usage_start': {'key': 'properties.usageStart', 'type':",
"Optional[\"ReportConfigFilter\"] = None, dimension: Optional[\"ReportConfigComparisonExpression\"] = None, tag: Optional[\"ReportConfigComparisonExpression\"] =",
"request. :ivar code: Error code. :vartype code: str :ivar message:",
"'operator': {'required': True}, 'values': {'required': True, 'min_items': 1}, } _attribute_map",
":param granularity: The granularity of rows in the query. Possible",
"for the export. :type time_period: ~azure.mgmt.costmanagement.models.ExportTimePeriod :param data_set: The definition",
"Operation(msrest.serialization.Model): \"\"\"A Cost management REST API operation. Variables are only",
"in the query. Possible values include: \"Daily\". :type granularity: str",
"be ignored when sending a request. :ivar provider: Service provider:",
"= { 'name': {'readonly': True}, } _attribute_map = { 'name':",
":type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod :param dataset: Has definition for data in",
":vartype modified_on: ~datetime.datetime :param chart: Chart type of the main",
"\"\"\" _validation = { 'name': {'required': True}, } _attribute_map =",
"ignored when sending a request. :ivar value: List of cost",
"name of the view. Required. :type display_name: str :param scope:",
"'destination', 'type': 'ExportDeliveryDestination'}, } def __init__( self, *, destination: \"ExportDeliveryDestination\",",
"{'key': 'properties.filterEnabled', 'type': 'bool'}, 'grouping_enabled': {'key': 'properties.groupingEnabled', 'type': 'bool'}, 'data':",
"Union[str, \"ReportConfigColumnType\"], name: str, **kwargs ): super(ReportConfigGrouping, self).__init__(**kwargs) self.type =",
":param kpis: List of KPIs to show in Cost Analysis",
"'str'}, 'format': {'key': 'properties.format', 'type': 'str'}, 'delivery_info': {'key': 'properties.deliveryInfo', 'type':",
"alerts. :vartype value: list[~azure.mgmt.costmanagement.models.Alert] :ivar next_link: URL to get the",
"'str'}, 'display_name': {'key': 'properties.displayName', 'type': 'str'}, 'scope': {'key': 'properties.scope', 'type':",
"type: Optional[Union[str, \"AlertType\"]] = None, category: Optional[Union[str, \"AlertCategory\"]] = None,",
"*, and_property: Optional[List[\"ReportConfigFilterAutoGenerated\"]] = None, or_property: Optional[List[\"ReportConfigFilterAutoGenerated\"]] = None, not_property:",
"type: Union[str, \"ReportType\"], timeframe: Union[str, \"ReportTimeframeType\"], time_period: Optional[\"ReportConfigTimePeriod\"] = None,",
"} _attribute_map = { 'status': {'key': 'status', 'type': 'str'}, 'recurrence':",
"for data in this report config. :type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDatasetAutoGenerated \"\"\"",
"The name of the container where exports will be uploaded.",
"'definition', 'type': 'ExportDefinition'}, 'run_history': {'key': 'runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key':",
"'nextRunTimeEstimate', 'type': 'iso-8601'}, } def __init__( self, *, delivery_info: \"ExportDeliveryInfo\",",
"\"Usage\", \"ActualCost\", \"AmortizedCost\". :type type: str or ~azure.mgmt.costmanagement.models.ForecastType :param timeframe:",
"timeframe self.time_period = time_period self.data_set = data_set class ExportDeliveryDestination(msrest.serialization.Model): \"\"\"The",
"**kwargs ): super(ForecastDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration",
"{ 'from_property': {'required': True}, 'to': {'required': True}, } _attribute_map =",
"'properties.details', 'type': 'AlertPropertiesDetails'}, 'cost_entity_id': {'key': 'properties.costEntityId', 'type': 'str'}, 'status': {'key':",
"= None, enabled: Optional[bool] = None, **kwargs ): super(KpiProperties, self).__init__(**kwargs)",
"True}, } _attribute_map = { 'from_property': {'key': 'from', 'type': 'iso-8601'},",
"For OnDemand executions it is the user email. For scheduled",
"date range is 3 months. All required parameters must be",
"criteria class AlertPropertiesDetails(msrest.serialization.Model): \"\"\"Alert details. :param time_grain_type: Type of timegrain",
"**kwargs ): super(ReportConfigDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration",
"'str'}, 'time_period': {'key': 'timePeriod', 'type': 'ExportTimePeriod'}, 'data_set': {'key': 'dataSet', 'type':",
"dateTime in which the alert status was last modified. :type",
"The logical \"NOT\" expression. :type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated :param dimension: Has",
"for the export. If not provided then the export will",
"compare currentSpend with amount. Possible values include: \"None\", \"EqualTo\", \"GreaterThan\",",
"tag. :type tag: ~azure.mgmt.costmanagement.models.QueryComparisonExpression \"\"\" _validation = { 'and_property': {'min_items':",
"list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param or_property: The logical \"OR\" expression. Must have at",
"expression. Must have at least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated]",
"error. :type error: ~azure.mgmt.costmanagement.models.ErrorDetails \"\"\" _attribute_map = { 'error': {'key':",
"_validation = { 'name': {'required': True}, 'function': {'required': True}, }",
"The maximum date range is 3 months. All required parameters",
"self.next_link = None class DimensionsListResult(msrest.serialization.Model): \"\"\"Result of listing dimensions. It",
"~azure.mgmt.costmanagement.models.CommonExportProperties :param error: The details of any error. :type error:",
"provided in the error message. Some Error responses: * 429",
"done automatically, however API users need to register the subscription.",
":type triggered_by: str :param resource_group_filter: array of resourceGroups to filter",
"{'key': 'deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition': {'key': 'definition', 'type': 'ExportDefinition'}, 'run_history':",
"configuration self.aggregation = aggregation self.grouping = grouping self.sorting = sorting",
"str or ~azure.mgmt.costmanagement.models.AlertStatus :param creation_time: dateTime in which alert was",
"= error class ExportExecutionListResult(msrest.serialization.Model): \"\"\"Result of listing the execution history",
"version or not. :type e_tag: str :param display_name: User input",
"sending a request. :ivar value: The list of views. :vartype",
":type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule \"\"\" _validation = { 'id': {'readonly': True},",
"'type': 'str'}, 'period_start_date': {'key': 'periodStartDate', 'type': 'str'}, 'triggered_by': {'key': 'triggeredBy',",
"model definition. Variables are only populated by the server, and",
"configuration: Optional[\"ReportConfigDatasetConfiguration\"] = None, aggregation: Optional[Dict[str, \"ReportConfigAggregation\"]] = None, grouping:",
"None, status: Optional[Union[str, \"AlertStatus\"]] = None, creation_time: Optional[str] = None,",
"of the export execution. Possible values include: \"Queued\", \"InProgress\", \"Completed\",",
"the server, and will be ignored when sending a request.",
"__init__( self, *, type: Union[str, \"ReportType\"], timeframe: Union[str, \"ReportTimeframeType\"], time_period:",
"Array of rows. :type rows: list[list[object]] \"\"\" _validation = {",
"DimensionsListResult(msrest.serialization.Model): \"\"\"Result of listing dimensions. It contains a list of",
"'or', 'type': '[QueryFilter]'}, 'not_property': {'key': 'not', 'type': 'QueryFilter'}, 'dimension': {'key':",
"'grouping_enabled': {'readonly': True}, 'total': {'readonly': True}, 'category': {'readonly': True}, 'usage_start':",
"Azure. :param type: Required. The type of the export. Note",
"when export was picked up to be executed. :type processing_start_time:",
"The start date must be in future. If present, the",
"Required. The end date for export data. :type to: ~datetime.datetime",
"Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect",
"{'key': 'tags', 'type': '{str}'}, 'execution_type': {'key': 'properties.executionType', 'type': 'str'}, 'status':",
"the export has an active schedule, provides an estimate of",
"sort. :type name: str \"\"\" _validation = { 'name': {'required':",
"when sending a request. :ivar code: Error code. :vartype code:",
"): super(ReportConfigComparisonExpression, self).__init__(**kwargs) self.name = name self.operator = operator self.values",
"'delivery_info': {'required': True}, 'definition': {'required': True}, 'next_run_time_estimate': {'readonly': True}, }",
"Optional[str] = None, columns: Optional[List[\"QueryColumn\"]] = None, rows: Optional[List[List[object]]] =",
"Error code. :vartype code: str :ivar message: Error message indicating",
"_attribute_map = { 'value': {'key': 'value', 'type': '[ExportExecution]'}, } def",
"be used to determine whether the user is updating the",
"message indicating why the operation failed. :vartype message: str \"\"\"",
"e_tag: str \"\"\" _validation = { 'id': {'readonly': True}, 'name':",
"str \"\"\" _attribute_map = { 'definition': {'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'},",
"data_set class ExportDeliveryDestination(msrest.serialization.Model): \"\"\"The destination information for the delivery of",
"the export. Note that 'Usage' is equivalent to 'ActualCost' and",
"'type': 'iso-8601'}, 'usage_end': {'key': 'properties.usageEnd', 'type': 'iso-8601'}, 'next_link': {'key': 'properties.nextLink',",
"= None, not_property: Optional[\"ReportConfigFilter\"] = None, dimension: Optional[\"ReportConfigComparisonExpression\"] = None,",
"self).__init__(format=format, delivery_info=delivery_info, definition=definition, run_history=run_history, **kwargs) self.schedule = schedule class ExportRecurrencePeriod(msrest.serialization.Model):",
"'properties.processingStartTime', 'type': 'iso-8601'}, 'processing_end_time': {'key': 'properties.processingEndTime', 'type': 'iso-8601'}, 'file_name': {'key':",
"dimension. :type dimension: ~azure.mgmt.costmanagement.models.QueryComparisonExpression :param tag: Has comparison expression for",
"you must register the account's subscription with the Microsoft.CostManagementExports resource",
"\"YearToDate\", \"Custom\". :type timeframe: str or ~azure.mgmt.costmanagement.models.ReportTimeframeType :param time_period: Has",
"} def __init__( self, *, type: Union[str, \"QueryColumnType\"], name: str,",
"be included. :type include_actual_cost: bool :param include_fresh_partial_cost: a boolean determining",
"description: str :ivar filter_enabled: Filter enabled. :vartype filter_enabled: bool :ivar",
"{'key': 'to', 'type': 'iso-8601'}, } def __init__( self, *, from_property:",
"__init__( self, *, name: str, function: Union[str, \"FunctionType\"], **kwargs ):",
"or_property self.not_property = not_property self.dimension = dimension self.tag = tag",
"show in view. :type name: str \"\"\" _attribute_map = {",
"= None, display_name: Optional[str] = None, scope: Optional[str] = None,",
"\"Tag\", \"Dimension\". :type type: str or ~azure.mgmt.costmanagement.models.QueryColumnType :param name: Required.",
"in the report. The configuration will be ignored if aggregation",
"delivery_info=delivery_info, definition=definition, run_history=run_history, **kwargs) self.schedule = schedule class ExportRecurrencePeriod(msrest.serialization.Model): \"\"\"The",
"'columns': {'key': 'properties.columns', 'type': '[QueryColumn]'}, 'rows': {'key': 'properties.rows', 'type': '[[object]]'},",
"frame for pulling data for the report. If custom, then",
"Optional[str] = None, resource_group_filter: Optional[List[object]] = None, resource_filter: Optional[List[object]] =",
"~datetime.datetime \"\"\" _validation = { 'delivery_info': {'required': True}, 'definition': {'required':",
"'/providers/Microsoft.CostManagement/externalSubscriptions/{externalSubscriptionName}' for ExternalSubscription scope. :type scope: str :ivar created_on: Date",
"True}, } _attribute_map = { 'resource_id': {'key': 'resourceId', 'type': 'str'},",
"start date for export data. :type from_property: ~datetime.datetime :param to:",
"all available columns. :param columns: Array of column names to",
"super(ExportListResult, self).__init__(**kwargs) self.value = None class ExportProperties(CommonExportProperties): \"\"\"The properties of",
"\"x-ms-ratelimit-microsoft.consumption-retry-after\" header. * 503 ServiceUnavailable - Service is temporarily unavailable.",
"_attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'function': {'key':",
"\"AHUB\". :type metric: str or ~azure.mgmt.costmanagement.models.MetricType :param kpis: List of",
"} def __init__( self, *, execution_type: Optional[Union[str, \"ExecutionType\"]] = None,",
"{'key': 'name', 'type': 'str'}, 'operator': {'key': 'operator', 'type': 'str'}, 'values':",
"'float'}, 'unit': {'key': 'unit', 'type': 'str'}, 'current_spend': {'key': 'currentSpend', 'type':",
"'type', 'type': 'str'}, } def __init__( self, *, name: Optional[str]",
"filter_enabled: bool :ivar grouping_enabled: Grouping enabled. :vartype grouping_enabled: bool :param",
"str or ~azure.mgmt.costmanagement.models.AlertType :param category: Alert category. Possible values include:",
"'{str}'}, 'execution_type': {'key': 'properties.executionType', 'type': 'str'}, 'status': {'key': 'properties.status', 'type':",
"is the alias for the aggregated column. forecast can have",
"self.aggregation = aggregation self.grouping = grouping self.filter = filter class",
"send to Azure. :param from_property: Required. The start date for",
"Optional[List[\"ReportConfigFilterAutoGenerated\"]] = None, or_property: Optional[List[\"ReportConfigFilterAutoGenerated\"]] = None, not_property: Optional[\"ReportConfigFilterAutoGenerated\"] =",
"self.name = name class ReportConfigSorting(msrest.serialization.Model): \"\"\"The order by expression to",
"{ 'code': {'readonly': True}, 'message': {'readonly': True}, } _attribute_map =",
"specific time period must be provided. Possible values include: \"MonthToDate\",",
"'type': 'iso-8601'}, 'processing_start_time': {'key': 'properties.processingStartTime', 'type': 'iso-8601'}, 'processing_end_time': {'key': 'properties.processingEndTime',",
"{'key': 'function', 'type': 'str'}, } def __init__( self, *, name:",
"'type': 'iso-8601'}, 'to': {'key': 'to', 'type': 'iso-8601'}, } def __init__(",
"'str'}, 'type': {'key': 'type', 'type': 'str'}, 'e_tag': {'key': 'eTag', 'type':",
"operation. Variables are only populated by the server, and will",
"self.pivots = pivots self.type_properties_query_type = type_properties_query_type self.timeframe = timeframe self.time_period",
"= { 'value': {'key': 'value', 'type': '[ExportExecution]'}, } def __init__(",
"Possible values include: \"MonthToDate\", \"BillingMonthToDate\", \"TheLastMonth\", \"TheLastBillingMonth\", \"WeekToDate\", \"Custom\". :type",
"a request. :ivar value: The list of views. :vartype value:",
"least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param not_property: The logical",
"broadcast to. :type contact_groups: list[str] :param contact_roles: list of contact",
"contact roles. :type contact_roles: list[str] :param overriding_alert: overriding alert. :type",
"\"GranularityType\"]] = None, configuration: Optional[\"ExportDatasetConfiguration\"] = None, **kwargs ): super(ExportDataset,",
"Query can have up to 2 group by clauses. :type",
"\"QuotaThresholdApproaching\", \"QuotaThresholdReached\", \"MultiCurrency\", \"ForecastCostThresholdExceeded\", \"ForecastUsageThresholdExceeded\", \"InvoiceDueDateApproaching\", \"InvoiceDueDateReached\", \"CrossCloudNewDataAvailable\", \"CrossCloudCollectionError\", \"GeneralThresholdError\".",
"in order to send to Azure. :param format: The format",
"**kwargs ): super(QueryTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to = to",
"data in the export. The configuration will be ignored if",
"expression to use in the query. :type filter: ~azure.mgmt.costmanagement.models.QueryFilter \"\"\"",
"\"Line\", \"StackedColumn\", \"GroupedColumn\", \"Table\". :type chart: str or ~azure.mgmt.costmanagement.models.ChartType :param",
"to: datetime.datetime, **kwargs ): super(QueryTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to",
"alert. :type overriding_alert: str \"\"\" _attribute_map = { 'time_grain_type': {'key':",
":param granularity: The granularity of rows in the forecast. Possible",
"columns: list[~azure.mgmt.costmanagement.models.QueryColumn] :param rows: Array of rows. :type rows: list[list[object]]",
"configuration: Has configuration information for the data in the report.",
"self).__init__(**kwargs) self.direction = direction self.name = name class ReportConfigTimePeriod(msrest.serialization.Model): \"\"\"The",
"'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'execution_type': {'key': 'properties.executionType',",
"user last modified this view. :vartype modified_on: ~datetime.datetime :param chart:",
"delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Has the definition for the export.",
"} def __init__( self, *, name: str, function: Union[str, \"FunctionType\"],",
"~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression \"\"\" _validation = { 'and_property': {'min_items': 2}, 'or_property': {'min_items':",
"= rows class QueryTimePeriod(msrest.serialization.Model): \"\"\"The start and end date for",
"rows: Optional[List[List[object]]] = None, **kwargs ): super(QueryResult, self).__init__(**kwargs) self.next_link =",
"meters to filter by. :type meter_filter: list[object] :param tag_filter: tags",
"'runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'}, } def",
"name: The name of column. :type name: str :param type:",
"~azure.mgmt.costmanagement.models.ReportType :param timeframe: The time frame for pulling data for",
"{ 'name': {'required': True}, } _attribute_map = { 'direction': {'key':",
"in this report config. :type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDataset \"\"\" _validation =",
"self).__init__(**kwargs) self.name = name self.operator = operator self.values = values",
"None, source: Optional[Union[str, \"AlertSource\"]] = None, details: Optional[\"AlertPropertiesDetails\"] = None,",
"request. The reason is provided in the error message. Some",
"{'key': 'properties.submittedBy', 'type': 'str'}, 'submitted_time': {'key': 'properties.submittedTime', 'type': 'iso-8601'}, 'processing_start_time':",
"= recurrence self.recurrence_period = recurrence_period class ExportTimePeriod(msrest.serialization.Model): \"\"\"The date range",
"'operation': {'key': 'operation', 'type': 'str'}, } def __init__( self, **kwargs",
"must be greater than start date. :type recurrence_period: ~azure.mgmt.costmanagement.models.ExportRecurrencePeriod \"\"\"",
"{'key': 'columns', 'type': '[str]'}, } def __init__( self, *, columns:",
"that do not yet provide data for charges or amortization",
"= type self.category = category self.criteria = criteria class AlertPropertiesDetails(msrest.serialization.Model):",
"} _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'operator':",
"\"Sum\". :type function: str or ~azure.mgmt.costmanagement.models.FunctionType \"\"\" _validation = {",
":type status: str or ~azure.mgmt.costmanagement.models.AlertStatus :param creation_time: dateTime in which",
"that the service is not able to process the incoming",
"the Cost Analysis UI. :type pivots: list[~azure.mgmt.costmanagement.models.PivotProperties] :param type_properties_query_type: The",
"self.schedule = schedule class ExportDataset(msrest.serialization.Model): \"\"\"The definition for data in",
"'properties.status', 'type': 'str'}, 'creation_time': {'key': 'properties.creationTime', 'type': 'str'}, 'close_time': {'key':",
"the column to group. :type name: str \"\"\" _validation =",
"{'key': 'properties.definition', 'type': 'AlertPropertiesDefinition'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'source':",
"~datetime.datetime :ivar modified_on: Date when the user last modified this",
"field will be used to determine whether the user is",
"to 2 aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation] :param grouping:",
"grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping] :param sorting: Array of order by expression to",
"= accumulated self.metric = metric self.kpis = kpis self.pivots =",
"type_properties_query_type: Optional[Union[str, \"ReportType\"]] = None, timeframe: Optional[Union[str, \"ReportTimeframeType\"]] = None,",
":type dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression :param tag: Has comparison expression for a",
"Resource type. :vartype type: str :ivar tags: A set of",
"**kwargs ): super(ExportSchedule, self).__init__(**kwargs) self.status = status self.recurrence = recurrence",
"'meterFilter', 'type': '[object]'}, 'tag_filter': {'key': 'tagFilter', 'type': 'object'}, 'threshold': {'key':",
"} def __init__( self, *, delivery_info: \"ExportDeliveryInfo\", definition: \"ExportDefinition\", format:",
"be included in the export. If not provided then the",
"to show in view. Possible values include: \"Dimension\", \"TagKey\". :type",
"**kwargs ): super(AlertsResult, self).__init__(**kwargs) self.value = None self.next_link = None",
"export. :type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule \"\"\" _validation = { 'delivery_info': {'required':",
"sorting: Optional[List[\"ReportConfigSorting\"]] = None, filter: Optional[\"ReportConfigFilter\"] = None, **kwargs ):",
"Has comparison expression for a tag. :type tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression \"\"\"",
"the next page of results. :type next_link: str :param columns:",
"'usage_start': {'readonly': True}, 'usage_end': {'readonly': True}, 'next_link': {'readonly': True}, }",
"= root_folder_path class ExportDeliveryInfo(msrest.serialization.Model): \"\"\"The delivery information associated with a",
"self.code = None self.message = None class ErrorResponse(msrest.serialization.Model): \"\"\"Error response",
"class ReportConfigFilterAutoGenerated(msrest.serialization.Model): \"\"\"The filter expression to be used in the",
"Total number of data for the dimension. :vartype total: int",
"'resource_id': {'required': True}, 'container': {'required': True}, } _attribute_map = {",
"self, *, resource_id: str, container: str, root_folder_path: Optional[str] = None,",
"'iso-8601'}, 'modified_on': {'key': 'properties.modifiedOn', 'type': 'iso-8601'}, 'chart': {'key': 'properties.chart', 'type':",
"'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self, **kwargs",
"have at least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param not_property:",
"list[list[object]] \"\"\" _validation = { 'id': {'readonly': True}, 'name': {'readonly':",
"'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'},",
"of sort. Possible values include: \"Ascending\", \"Descending\". :type direction: str",
"period for pulling data for the export. :type time_period: ~azure.mgmt.costmanagement.models.ExportTimePeriod",
"pivots: Configuration of 3 sub-views in the Cost Analysis UI.",
"True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[Export]'},",
"the export. :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Has the definition",
"'configuration', 'type': 'ExportDatasetConfiguration'}, } def __init__( self, *, granularity: Optional[Union[str,",
"None, or_property: Optional[List[\"ReportConfigFilterAutoGenerated\"]] = None, not_property: Optional[\"ReportConfigFilterAutoGenerated\"] = None, dimension:",
"'type': 'ExportDefinition'}, 'run_history': {'key': 'runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'nextRunTimeEstimate',",
"'properties.deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition': {'key': 'properties.definition', 'type': 'ExportDefinition'}, 'run_history': {'key':",
"'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'ExportDatasetConfiguration'}, } def __init__(",
"Azure. :param direction: Direction of sort. Possible values include: \"Ascending\",",
"'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'}, } def __init__( self,",
"'name': {'key': 'name', 'type': 'str'}, 'operator': {'key': 'operator', 'type': 'str'},",
"'tags', 'type': '{str}'}, } def __init__( self, **kwargs ): super(Resource,",
"= kpis self.pivots = pivots self.type_properties_query_type = type_properties_query_type self.timeframe =",
"'name', 'type': 'str'}, 'operator': {'key': 'operator', 'type': 'str'}, 'values': {'key':",
"Optional[List[\"ReportConfigGrouping\"]] = None, sorting: Optional[List[\"ReportConfigSorting\"]] = None, filter: Optional[\"ReportConfigFilterAutoGenerated\"] =",
"{'required': True}, 'definition': {'required': True}, 'next_run_time_estimate': {'readonly': True}, } _attribute_map",
"that triggered this alert. :type triggered_by: str :param resource_group_filter: array",
"def __init__( self, *, destination: \"ExportDeliveryDestination\", **kwargs ): super(ExportDeliveryInfo, self).__init__(**kwargs)",
"range is 3 months. All required parameters must be populated",
"'status_modification_time': {'key': 'properties.statusModificationTime', 'type': 'str'}, } def __init__( self, *,",
"grain. :type name: str \"\"\" _validation = { 'type': {'required':",
"self.definition = definition self.run_history = run_history self.next_run_time_estimate = None self.schedule",
"in order to send to Azure. :param type: Required. The",
"~azure.mgmt.costmanagement.models.AlertCategory :param criteria: Criteria that triggered alert. Possible values include:",
"'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'execution_type': {'key': 'properties.executionType', 'type':",
":param time_grain_type: Type of timegrain cadence. Possible values include: \"None\",",
"alert status was last modified. :type status_modification_time: str \"\"\" _attribute_map",
"Service provider: Microsoft.CostManagement. :vartype provider: str :ivar resource: Resource on",
"Alert details. :type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails :param cost_entity_id: related budget. :type",
"'aggregation', 'type': '{QueryAggregation}'}, 'filter': {'key': 'filter', 'type': 'QueryFilter'}, } def",
"columns self.rows = rows class QueryTimePeriod(msrest.serialization.Model): \"\"\"The start and end",
"= None, chart: Optional[Union[str, \"ChartType\"]] = None, accumulated: Optional[Union[str, \"AccumulatedType\"]]",
"True}, 'values': {'required': True, 'min_items': 1}, } _attribute_map = {",
":param include_actual_cost: a boolean determining if actualCost will be included.",
"List of KPIs to show in Cost Analysis UI. :type",
"boolean determining if actualCost will be included. :type include_actual_cost: bool",
"Possible values include: \"ActualCost\", \"AmortizedCost\", \"AHUB\". :type metric: str or",
"~azure.mgmt.costmanagement.models.ForecastType :param timeframe: Required. The time frame for pulling data",
"that executed the export. For OnDemand executions it is the",
"status: The status of the export's schedule. If 'Inactive', the",
"= run_history self.next_run_time_estimate = None class Dimension(Resource): \"\"\"Dimension. Variables are",
"for pulling data for the export. :type time_period: ~azure.mgmt.costmanagement.models.ExportTimePeriod :param",
"delivered. Currently only 'Csv' is supported. Possible values include: \"Csv\".",
"will include all available columns. :param columns: Array of column",
"'format', 'type': 'str'}, 'delivery_info': {'key': 'deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition': {'key':",
"'timePeriod', 'type': 'QueryTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'QueryDataset'}, } def",
"'columns', 'type': '[str]'}, } def __init__( self, *, columns: Optional[List[str]]",
"the main view in Cost Analysis. Required. Possible values include:",
"~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression :param tag: Has comparison expression for a tag. :type",
"dimension self.tag = tag class ReportConfigFilterAutoGenerated(msrest.serialization.Model): \"\"\"The filter expression to",
"'next_link': {'key': 'properties.nextLink', 'type': 'str'}, 'columns': {'key': 'properties.columns', 'type': '[QueryColumn]'},",
"Filter enabled. :vartype filter_enabled: bool :ivar grouping_enabled: Grouping enabled. :vartype",
"contact_emails: list[str] :param contact_groups: list of action groups to broadcast",
":type include_fresh_partial_cost: bool \"\"\" _validation = { 'type': {'required': True},",
"'overridingAlert', 'type': 'str'}, } def __init__( self, *, time_grain_type: Optional[Union[str,",
"} _attribute_map = { 'from_property': {'key': 'from', 'type': 'iso-8601'}, 'to':",
"None, dimension: Optional[\"ReportConfigComparisonExpression\"] = None, tag: Optional[\"ReportConfigComparisonExpression\"] = None, **kwargs",
"export. To allow access to a storage account, you must",
"None class CommonExportProperties(msrest.serialization.Model): \"\"\"The common properties of the export. Variables",
"'properties.schedule', 'type': 'ExportSchedule'}, } def __init__( self, *, e_tag: Optional[str]",
"{'readonly': True}, 'type': {'readonly': True}, 'created_on': {'readonly': True}, 'modified_on': {'readonly':",
"'str'}, } def __init__( self, *, name: Optional[str] = None,",
"'type': 'ExportSchedule'}, } def __init__( self, *, e_tag: Optional[str] =",
"by clauses. :type grouping: list[~azure.mgmt.costmanagement.models.QueryGrouping] :param filter: Has filter expression",
"The schedule recurrence. Possible values include: \"Daily\", \"Weekly\", \"Monthly\", \"Annually\".",
"self.type_properties_query_type = type_properties_query_type self.timeframe = timeframe self.time_period = time_period self.dataset",
"\"OR\" expression. Must have at least 2 items. :type or_property:",
"If present, the end date must be greater than start",
"Optional[float] = None, contact_emails: Optional[List[str]] = None, contact_groups: Optional[List[str]] =",
"data to. :type to: ~datetime.datetime \"\"\" _validation = { 'from_property':",
"'type': 'QueryComparisonExpression'}, } def __init__( self, *, and_property: Optional[List[\"QueryFilter\"]] =",
"time when export was picked up to be executed. :type",
"self.enabled = enabled class Operation(msrest.serialization.Model): \"\"\"A Cost management REST API",
"tags. :vartype tags: dict[str, str] :param definition: defines the type",
"list[~azure.mgmt.costmanagement.models.Export] \"\"\" _validation = { 'value': {'readonly': True}, } _attribute_map",
"} _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display':",
"Union[str, \"OperatorType\"], values: List[str], **kwargs ): super(QueryComparisonExpression, self).__init__(**kwargs) self.name =",
"Optional[List[str]] = None, contact_groups: Optional[List[str]] = None, contact_roles: Optional[List[str]] =",
"granularity: The granularity of rows in the report. Possible values",
"the report. Usage represents actual usage, forecast represents forecasted data",
"2 items. :type and_property: list[~azure.mgmt.costmanagement.models.QueryFilter] :param or_property: The logical \"OR\"",
"= name self.function = function class ReportConfigComparisonExpression(msrest.serialization.Model): \"\"\"The comparison expression",
"self.name = name self.function = function class ReportConfigComparisonExpression(msrest.serialization.Model): \"\"\"The comparison",
"'configuration': {'key': 'configuration', 'type': 'QueryDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'},",
"'ExportSchedule'}, } def __init__( self, *, e_tag: Optional[str] = None,",
"'type': 'str'}, 'container': {'key': 'container', 'type': 'str'}, 'root_folder_path': {'key': 'rootFolderPath',",
"details of any error. :type error: ~azure.mgmt.costmanagement.models.ErrorDetails \"\"\" _validation =",
"'not_property': {'key': 'not', 'type': 'QueryFilter'}, 'dimension': {'key': 'dimension', 'type': 'QueryComparisonExpression'},",
"{'key': 'dataSet', 'type': 'ExportDataset'}, } def __init__( self, *, type:",
"None, dataset: Optional[\"ForecastDataset\"] = None, include_actual_cost: Optional[bool] = None, include_fresh_partial_cost:",
"'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'description': {'key': 'properties.description',",
"Optional[List[\"ReportConfigSorting\"]] = None, filter: Optional[\"ReportConfigFilterAutoGenerated\"] = None, **kwargs ): super(ReportConfigDatasetAutoGenerated,",
"{'key': 'not', 'type': 'ReportConfigFilter'}, 'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'}, 'tag':",
"None, status: Optional[Union[str, \"ExecutionStatus\"]] = None, submitted_by: Optional[str] = None,",
"exports that do not yet provide data for charges or",
"determining if FreshPartialCost will be included. :type include_fresh_partial_cost: bool \"\"\"",
"\"\"\" _attribute_map = { 'error': {'key': 'error', 'type': 'ErrorDetails'}, }",
"= time_period self.dataset = dataset class ViewListResult(msrest.serialization.Model): \"\"\"Result of listing",
":param submitted_by: The identifier for the entity that executed the",
"= None, schedule: Optional[\"ExportSchedule\"] = None, **kwargs ): super(ExportProperties, self).__init__(format=format,",
"'iso-8601'}, 'usage_end': {'key': 'properties.usageEnd', 'type': 'iso-8601'}, 'next_link': {'key': 'properties.nextLink', 'type':",
"str :ivar name: Resource name. :vartype name: str :ivar type:",
"function: Union[str, \"FunctionType\"], **kwargs ): super(ReportConfigAggregation, self).__init__(**kwargs) self.name = name",
"in the report. :param and_property: The logical \"AND\" expression. Must",
"value: List of alerts. :vartype value: list[~azure.mgmt.costmanagement.models.Alert] :ivar next_link: URL",
"Optional[Union[str, \"AlertCriteria\"]] = None, **kwargs ): super(AlertPropertiesDefinition, self).__init__(**kwargs) self.type =",
"} def __init__( self, *, type: Optional[Union[str, \"AlertType\"]] = None,",
"'iso-8601'}, 'processing_start_time': {'key': 'properties.processingStartTime', 'type': 'iso-8601'}, 'processing_end_time': {'key': 'properties.processingEndTime', 'type':",
"The granularity of rows in the report. Possible values include:",
"None, **kwargs ): super(Dimension, self).__init__(**kwargs) self.description = None self.filter_enabled =",
"aggregation self.grouping = grouping self.filter = filter class QueryDatasetConfiguration(msrest.serialization.Model): \"\"\"The",
"'tags': {'readonly': True}, 'description': {'readonly': True}, 'filter_enabled': {'readonly': True}, 'grouping_enabled':",
"{'readonly': True}, 'usage_start': {'readonly': True}, 'usage_end': {'readonly': True}, 'next_link': {'readonly':",
"include_actual_cost: Optional[bool] = None, include_fresh_partial_cost: Optional[bool] = None, **kwargs ):",
"the dictionary is the alias for the aggregated column. forecast",
"are provided. :type configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration :param aggregation: Dictionary of aggregation",
"data. Actual usage and forecasted data can be differentiated based",
"} def __init__( self, **kwargs ): super(OperationListResult, self).__init__(**kwargs) self.value =",
"class QueryAggregation(msrest.serialization.Model): \"\"\"The aggregation expression to be used in the",
"= columns class QueryDefinition(msrest.serialization.Model): \"\"\"The definition of a query. All",
"used in the report. :param and_property: The logical \"AND\" expression.",
"that 'Usage' is equivalent to 'ActualCost' and is applicable to",
"'type': 'str'}, 'resource_group_filter': {'key': 'resourceGroupFilter', 'type': '[object]'}, 'resource_filter': {'key': 'resourceFilter',",
"When creating an export in the Azure portal, it is",
"run_history: Optional[\"ExportExecutionListResult\"] = None, **kwargs ): super(CommonExportProperties, self).__init__(**kwargs) self.format =",
":vartype tags: dict[str, str] :param execution_type: The type of the",
"grouping: Optional[List[\"ReportConfigGrouping\"]] = None, sorting: Optional[List[\"ReportConfigSorting\"]] = None, filter: Optional[\"ReportConfigFilter\"]",
"~azure.mgmt.costmanagement.models.ReportConfigDataset \"\"\" _validation = { 'id': {'readonly': True}, 'name': {'readonly':",
"was queued to be executed. :type submitted_time: ~datetime.datetime :param processing_start_time:",
"of the column to sort. :type name: str \"\"\" _validation",
"__init__( self, *, granularity: Optional[Union[str, \"ReportGranularityType\"]] = None, configuration: Optional[\"ReportConfigDatasetConfiguration\"]",
"Optional[List[str]] = None, contact_roles: Optional[List[str]] = None, overriding_alert: Optional[str] =",
"*, e_tag: Optional[str] = None, **kwargs ): super(ProxyResource, self).__init__(**kwargs) self.id",
"function class ReportConfigComparisonExpression(msrest.serialization.Model): \"\"\"The comparison expression to be used in",
"filter expression to use in the report. :type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated",
"str :param source: Source of alert. Possible values include: \"Preset\",",
"self.filter = filter class ReportConfigDatasetAutoGenerated(msrest.serialization.Model): \"\"\"The definition of data present",
"operation list results if there are any. :vartype next_link: str",
":type to: ~datetime.datetime \"\"\" _validation = { 'from_property': {'required': True},",
"of column. :type type: str \"\"\" _attribute_map = { 'name':",
"for Management Group scope, '/providers/Microsoft.CostManagement/externalBillingAccounts/{externalBillingAccountName}' for ExternalBillingAccount scope, and '/providers/Microsoft.CostManagement/externalSubscriptions/{externalSubscriptionName}'",
"configuration of dataset in the report. :param columns: Array of",
"super(ReportConfigComparisonExpression, self).__init__(**kwargs) self.name = name self.operator = operator self.values =",
"id: ID of resource related to metric (budget). :type id:",
"'[str]'}, 'overriding_alert': {'key': 'overridingAlert', 'type': 'str'}, } def __init__( self,",
"\"Queued\", \"InProgress\", \"Completed\", \"Failed\", \"Timeout\", \"NewDataNotAvailable\", \"DataNotAvailable\". :type status: str",
"= None, **kwargs ): super(PivotProperties, self).__init__(**kwargs) self.type = type self.name",
"list of available views. Variables are only populated by the",
"= None self.display = display class OperationDisplay(msrest.serialization.Model): \"\"\"The object that",
"this view. :vartype modified_on: ~datetime.datetime :param chart: Chart type of",
"\"\"\"The group by expression to be used in the report.",
"name self.type = type class QueryComparisonExpression(msrest.serialization.Model): \"\"\"The comparison expression to",
"str :param modification_time: dateTime in which alert was last modified.",
"= None, contact_roles: Optional[List[str]] = None, overriding_alert: Optional[str] = None,",
"~datetime.datetime :param to: Required. The end date to pull data",
"to send to Azure. :param resource_id: Required. The resource id",
"**kwargs ): super(OperationDisplay, self).__init__(**kwargs) self.provider = None self.resource = None",
"'to', 'type': 'iso-8601'}, } def __init__( self, *, from_property: datetime.datetime,",
"definition of an export. All required parameters must be populated",
"resource: Resource on which the operation is performed: Dimensions, Query.",
"None class OperationListResult(msrest.serialization.Model): \"\"\"Result of listing cost management operations. It",
"used to compare currentSpend with amount. Possible values include: \"None\",",
"class ExportDataset(msrest.serialization.Model): \"\"\"The definition for data in the export. :param",
"{'required': True}, } _attribute_map = { 'direction': {'key': 'direction', 'type':",
"def __init__( self, *, and_property: Optional[List[\"ReportConfigFilter\"]] = None, or_property: Optional[List[\"ReportConfigFilter\"]]",
"= meter_filter self.tag_filter = tag_filter self.threshold = threshold self.operator =",
"name class QueryAggregation(msrest.serialization.Model): \"\"\"The aggregation expression to be used in",
"of results. :type next_link: str :param columns: Array of columns.",
"the execution history of an export. Variables are only populated",
"= delivery_info self.definition = definition self.run_history = run_history self.next_run_time_estimate =",
"'type': 'str'}, 'details': {'key': 'properties.details', 'type': 'AlertPropertiesDetails'}, 'cost_entity_id': {'key': 'properties.costEntityId',",
"super(QueryGrouping, self).__init__(**kwargs) self.type = type self.name = name class QueryResult(Resource):",
"columns: Optional[List[str]] = None, **kwargs ): super(QueryDatasetConfiguration, self).__init__(**kwargs) self.columns =",
"in the query. :type filter: ~azure.mgmt.costmanagement.models.QueryFilter \"\"\" _validation = {",
"{'key': 'id', 'type': 'str'}, 'enabled': {'key': 'enabled', 'type': 'bool'}, }",
"Union[str, \"ReportType\"], timeframe: Union[str, \"ReportTimeframeType\"], time_period: Optional[\"ReportConfigTimePeriod\"] = None, dataset:",
"the export being delivered. :type destination: ~azure.mgmt.costmanagement.models.ExportDeliveryDestination \"\"\" _validation =",
"is updating the latest version or not. :type e_tag: str",
"{ 'type': {'key': 'type', 'type': 'str'}, 'category': {'key': 'category', 'type':",
"storage account where exports will be delivered. :type resource_id: str",
"'direction', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def",
"503 ServiceUnavailable - Service is temporarily unavailable. Retry after waiting",
"'[View]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self,",
"\"FormatType\"]] = None, run_history: Optional[\"ExportExecutionListResult\"] = None, **kwargs ): super(CommonExportProperties,",
"-------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. #",
"True}, } _attribute_map = { 'direction': {'key': 'direction', 'type': 'str'},",
"\"true\", \"false\". :type accumulated: str or ~azure.mgmt.costmanagement.models.AccumulatedType :param metric: Metric",
"column to group. Possible values include: \"Tag\", \"Dimension\". :type type:",
"incorrect behavior and will be lost if the code is",
"action groups to broadcast to. :type contact_groups: list[str] :param contact_roles:",
"Possible values include: \"true\", \"false\". :type accumulated: str or ~azure.mgmt.costmanagement.models.AccumulatedType",
"= values class ReportConfigDataset(msrest.serialization.Model): \"\"\"The definition of data present in",
"triggered_by: str :param resource_group_filter: array of resourceGroups to filter by.",
"~azure.mgmt.costmanagement.models.ReportConfigTimePeriod :param dataset: Has definition for data in this report",
"export. :param and_property: The logical \"AND\" expression. Must have at",
"equivalent to 'ActualCost' and is applicable to exports that do",
"*, next_link: Optional[str] = None, columns: Optional[List[\"QueryColumn\"]] = None, rows:",
":ivar id: Resource Id. :vartype id: str :ivar name: Resource",
"'bool'}, 'data': {'key': 'properties.data', 'type': '[str]'}, 'total': {'key': 'properties.total', 'type':",
"values include: \"WeekToDate\", \"MonthToDate\", \"YearToDate\", \"Custom\". :type timeframe: str or",
"*, type: Union[str, \"ReportConfigColumnType\"], name: str, **kwargs ): super(ReportConfigGrouping, self).__init__(**kwargs)",
"request. :ivar value: List of cost management operations supported by",
"self.and_property = and_property self.or_property = or_property self.not_property = not_property self.dimension",
"} _attribute_map = { 'and_property': {'key': 'and', 'type': '[ReportConfigFilter]'}, 'or_property':",
"query. Any valid query column name is allowed. If not",
"self.type = type class QueryComparisonExpression(msrest.serialization.Model): \"\"\"The comparison expression to be",
"'type': 'str'}, 'recurrence_period': {'key': 'recurrencePeriod', 'type': 'ExportRecurrencePeriod'}, } def __init__(",
"grouping are provided. :type configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration :param aggregation: Dictionary of",
"'properties.usageStart', 'type': 'iso-8601'}, 'usage_end': {'key': 'properties.usageEnd', 'type': 'iso-8601'}, 'next_link': {'key':",
"self.destination = destination class ExportExecution(Resource): \"\"\"An export execution. Variables are",
":type type: str or ~azure.mgmt.costmanagement.models.AlertType :param category: Alert category. Possible",
"~azure.mgmt.costmanagement.models.ExportExecutionListResult :ivar next_run_time_estimate: If the export has an active schedule,",
"class ReportConfigDefinition(msrest.serialization.Model): \"\"\"The definition of a report config. All required",
"input name of the view. Required. :type display_name: str :param",
"'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, }",
"filter class QueryDatasetConfiguration(msrest.serialization.Model): \"\"\"The configuration of dataset in the query.",
"delivery of the export. To allow access to a storage",
":type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType :param configuration: Has configuration information",
"be populated in order to send to Azure. :param type:",
"\"\"\"The object that represents the operation. Variables are only populated",
"or ~azure.mgmt.costmanagement.models.AlertStatus :param creation_time: dateTime in which alert was created.",
"of the export being delivered. Currently only 'Csv' is supported.",
"next_link: URL to get the next set of alerts results",
"None class ExportListResult(msrest.serialization.Model): \"\"\"Result of listing exports. It contains a",
"forecast. :param granularity: The granularity of rows in the forecast.",
"type of the forecast. Possible values include: \"Usage\", \"ActualCost\", \"AmortizedCost\".",
"{'key': 'dataset', 'type': 'QueryDataset'}, } def __init__( self, *, type:",
"error: The details of the error. :type error: ~azure.mgmt.costmanagement.models.ErrorDetails \"\"\"",
"\"ExportDeliveryInfo\", definition: \"ExportDefinition\", format: Optional[Union[str, \"FormatType\"]] = None, run_history: Optional[\"ExportExecutionListResult\"]",
"of currency being used. :type unit: str :param current_spend: current",
"frame for pulling data for the export. If custom, then",
"= or_property self.not_property = not_property self.dimension = dimension self.tag =",
"'function', 'type': 'str'}, } def __init__( self, *, name: str,",
"'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, } def __init__( self,",
"configuration self.aggregation = aggregation self.grouping = grouping self.filter = filter",
"~azure.mgmt.costmanagement.models.TimeframeType :param time_period: Has time period for pulling data for",
"values: List[str], **kwargs ): super(ReportConfigComparisonExpression, self).__init__(**kwargs) self.name = name self.operator",
"str :param root_folder_path: The name of the directory where exports",
"'error': {'key': 'error', 'type': 'ErrorDetails'}, } def __init__( self, *,",
"= grouping self.sorting = sorting self.filter = filter class ReportConfigDatasetConfiguration(msrest.serialization.Model):",
"__init__( self, *, next_link: Optional[str] = None, columns: Optional[List[\"QueryColumn\"]] =",
"by expression to use in the query. Query can have",
"type: str or ~azure.mgmt.costmanagement.models.AlertType :param category: Alert category. Possible values",
"{'min_items': 2}, } _attribute_map = { 'and_property': {'key': 'and', 'type':",
"'type': '[object]'}, 'meter_filter': {'key': 'meterFilter', 'type': '[object]'}, 'tag_filter': {'key': 'tagFilter',",
"the forecast. Possible values include: \"Daily\". :type granularity: str or",
"name: str, function: Union[str, \"FunctionType\"], **kwargs ): super(ReportConfigAggregation, self).__init__(**kwargs) self.name",
"get the next set of alerts results if there are",
"ExportRecurrencePeriod(msrest.serialization.Model): \"\"\"The start and end date for recurrence schedule. All",
"'dataset', 'type': 'QueryDataset'}, } def __init__( self, *, type: Union[str,",
"= { 'and_property': {'key': 'and', 'type': '[ReportConfigFilter]'}, 'or_property': {'key': 'or',",
"self.amount = amount self.unit = unit self.current_spend = current_spend self.contact_emails",
"'type': '[QueryColumn]'}, 'rows': {'key': 'properties.rows', 'type': '[[object]]'}, } def __init__(",
"{'required': True}, 'to': {'required': True}, } _attribute_map = { 'from_property':",
"Optional[List[str]] = None, **kwargs ): super(ReportConfigDatasetConfiguration, self).__init__(**kwargs) self.columns = columns",
"name: {provider}/{resource}/{operation}. :vartype name: str :param display: The object that",
"{'key': 'properties.nextRunTimeEstimate', 'type': 'iso-8601'}, 'schedule': {'key': 'properties.schedule', 'type': 'ExportSchedule'}, }",
"'rootFolderPath', 'type': 'str'}, } def __init__( self, *, resource_id: str,",
"configurations of Cost Analysis. Variables are only populated by the",
"time_period self.dataset = dataset class ViewListResult(msrest.serialization.Model): \"\"\"Result of listing views.",
"'type': 'str'}, 'columns': {'key': 'properties.columns', 'type': '[QueryColumn]'}, 'rows': {'key': 'properties.rows',",
"'from', 'type': 'iso-8601'}, 'to': {'key': 'to', 'type': 'iso-8601'}, } def",
"data present in the query. :param granularity: The granularity of",
":type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType :param configuration: The export dataset",
"operations and a URL link to get the next set",
"type. :vartype type: str :ivar tags: A set of tags.",
"cost_entity_id self.status = status self.creation_time = creation_time self.close_time = close_time",
"definition for data in this forecast. :type dataset: ~azure.mgmt.costmanagement.models.ForecastDataset :param",
"= resource_id self.container = container self.root_folder_path = root_folder_path class ExportDeliveryInfo(msrest.serialization.Model):",
"\"CreditThresholdReached\", \"QuotaThresholdApproaching\", \"QuotaThresholdReached\", \"MultiCurrency\", \"ForecastCostThresholdExceeded\", \"ForecastUsageThresholdExceeded\", \"InvoiceDueDateApproaching\", \"InvoiceDueDateReached\", \"CrossCloudNewDataAvailable\", \"CrossCloudCollectionError\",",
"provided then the export will include all available columns. The",
"None, current_spend: Optional[float] = None, contact_emails: Optional[List[str]] = None, contact_groups:",
"to: The end date of recurrence. :type to: ~datetime.datetime \"\"\"",
"str or ~azure.mgmt.costmanagement.models.ReportType :param timeframe: The time frame for pulling",
"Dimension description. :vartype description: str :ivar filter_enabled: Filter enabled. :vartype",
"The logical \"AND\" expression. Must have at least 2 items.",
"= None, **kwargs ): super(ExportDeliveryDestination, self).__init__(**kwargs) self.resource_id = resource_id self.container",
"usage_end: Usage end. :vartype usage_end: ~datetime.datetime :ivar next_link: The link",
"granularity: str or ~azure.mgmt.costmanagement.models.ReportGranularityType :param configuration: Has configuration information for",
"\"\"\"States and configurations of Cost Analysis. Variables are only populated",
"request. :ivar name: Operation name: {provider}/{resource}/{operation}. :vartype name: str :param",
"the data in the report. The configuration will be ignored",
"*, columns: Optional[List[str]] = None, **kwargs ): super(ExportDatasetConfiguration, self).__init__(**kwargs) self.columns",
"} _attribute_map = { 'destination': {'key': 'destination', 'type': 'ExportDeliveryDestination'}, }",
":ivar value: List of cost management operations supported by the",
"for BillingProfile scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}' for InvoiceSection scope, 'providers/Microsoft.Management/managementGroups/{managementGroupId}' for Management",
"next set of results. Variables are only populated by the",
"= to class View(ProxyResource): \"\"\"States and configurations of Cost Analysis.",
":param processing_end_time: The time when the export execution finished. :type",
"~datetime.datetime :param file_name: The name of the exported file. :type",
"and is applicable to exports that do not yet provide",
":ivar usage_start: Usage start. :vartype usage_start: ~datetime.datetime :ivar usage_end: Usage",
"'submitted_time': {'key': 'properties.submittedTime', 'type': 'iso-8601'}, 'processing_start_time': {'key': 'properties.processingStartTime', 'type': 'iso-8601'},",
"function: str or ~azure.mgmt.costmanagement.models.FunctionType \"\"\" _validation = { 'name': {'required':",
"'type': '[str]'}, } def __init__( self, *, name: str, operator:",
"comparison expression for a dimension. :type dimension: ~azure.mgmt.costmanagement.models.QueryComparisonExpression :param tag:",
"etc. :vartype operation: str \"\"\" _validation = { 'provider': {'readonly':",
"**kwargs ): super(DimensionsListResult, self).__init__(**kwargs) self.value = None class DismissAlertPayload(msrest.serialization.Model): \"\"\"The",
"\"\"\"The request payload to update an alert. :param definition: defines",
"'type': '{str}'}, 'execution_type': {'key': 'properties.executionType', 'type': 'str'}, 'status': {'key': 'properties.status',",
"True}, } _attribute_map = { 'status': {'key': 'status', 'type': 'str'},",
"= processing_end_time self.file_name = file_name self.run_settings = run_settings self.error =",
"{'key': 'properties.processingStartTime', 'type': 'iso-8601'}, 'processing_end_time': {'key': 'properties.processingEndTime', 'type': 'iso-8601'}, 'file_name':",
"# Licensed under the MIT License. See License.txt in the",
"populated in order to send to Azure. :param resource_id: Required.",
"__init__( self, *, time_grain_type: Optional[Union[str, \"AlertTimeGrainType\"]] = None, period_start_date: Optional[str]",
"2, 'min_items': 0}, } _attribute_map = { 'granularity': {'key': 'granularity',",
"data can be differentiated based on dates. Possible values include:",
":param filter: Has filter expression to use in the report.",
"= include_fresh_partial_cost class KpiProperties(msrest.serialization.Model): \"\"\"Each KPI must contain a 'type'",
"cost_entity_id: related budget. :type cost_entity_id: str :param status: alert status.",
"'type': '[ReportConfigGrouping]'}, 'sorting': {'key': 'sorting', 'type': '[ReportConfigSorting]'}, 'filter': {'key': 'filter',",
"= overriding_alert class AlertsResult(msrest.serialization.Model): \"\"\"Result of alerts. Variables are only",
":vartype message: str \"\"\" _validation = { 'code': {'readonly': True},",
"**kwargs ): super(ExportDeliveryDestination, self).__init__(**kwargs) self.resource_id = resource_id self.container = container",
"'type': 'str'}, 'status_modification_user_name': {'key': 'properties.statusModificationUserName', 'type': 'str'}, 'status_modification_time': {'key': 'properties.statusModificationTime',",
"current spend. :type current_spend: float :param contact_emails: list of emails",
"str or ~azure.mgmt.costmanagement.models.AccumulatedType :param metric: Metric to use when displaying",
"The name of the column to group. This version supports",
"super(ForecastDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration self.aggregation =",
":type amount: float :param unit: unit of currency being used.",
"super(ExportProperties, self).__init__(format=format, delivery_info=delivery_info, definition=definition, run_history=run_history, **kwargs) self.schedule = schedule class",
"str \"\"\" _validation = { 'id': {'readonly': True}, 'name': {'readonly':",
"after waiting for the time specified in the \"x-ms-ratelimit-microsoft.consumption-retry-after\" header.",
"grouping: Optional[List[\"ReportConfigGrouping\"]] = None, sorting: Optional[List[\"ReportConfigSorting\"]] = None, filter: Optional[\"ReportConfigFilterAutoGenerated\"]",
"def __init__( self, *, data: Optional[List[str]] = None, **kwargs ):",
"'timeframe': {'key': 'timeframe', 'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type': 'ReportConfigTimePeriod'},",
"'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'QueryDatasetConfiguration'}, 'aggregation': {'key': 'aggregation',",
":vartype next_link: str \"\"\" _validation = { 'value': {'readonly': True},",
"export was picked up to be executed. :type processing_start_time: ~datetime.datetime",
"Optional[Union[str, \"ReportGranularityType\"]] = None, configuration: Optional[\"ReportConfigDatasetConfiguration\"] = None, aggregation: Optional[Dict[str,",
"message. Some Error responses: * 429 TooManyRequests - Request is",
"Has filter expression to use in the query. :type filter:",
"self.from_property = from_property self.to = to class ForecastDataset(msrest.serialization.Model): \"\"\"The definition",
"and 'enabled' key. :param type: KPI type (Forecast, Budget). Possible",
"# -------------------------------------------------------------------------- import datetime from typing import Dict, List, Optional,",
"ExportExecutionListResult(msrest.serialization.Model): \"\"\"Result of listing the execution history of an export.",
"\"ExportDeliveryDestination\", **kwargs ): super(ExportDeliveryInfo, self).__init__(**kwargs) self.destination = destination class ExportExecution(Resource):",
"in this report config. :type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDatasetAutoGenerated \"\"\" _validation =",
"Optional[\"AlertPropertiesDefinition\"] = None, description: Optional[str] = None, source: Optional[Union[str, \"AlertSource\"]]",
"in the export. :param granularity: The granularity of rows in",
"~azure.mgmt.costmanagement.models.ReportType :param timeframe: Required. The time frame for pulling data",
"# Code generated by Microsoft (R) AutoRest Code Generator. #",
"or ~azure.mgmt.costmanagement.models.AlertCategory :param criteria: Criteria that triggered alert. Possible values",
"not_property: ~azure.mgmt.costmanagement.models.QueryFilter :param dimension: Has comparison expression for a dimension.",
"'status': {'key': 'properties.status', 'type': 'str'}, 'submitted_by': {'key': 'properties.submittedBy', 'type': 'str'},",
"def __init__( self, *, name: str, function: Union[str, \"FunctionType\"], **kwargs",
"self).__init__(**kwargs) self.error = error class ProxyResource(msrest.serialization.Model): \"\"\"The Resource model definition.",
"= type_properties_query_type self.timeframe = timeframe self.time_period = time_period self.dataset =",
"include: \"Forecast\", \"Budget\". :type type: str or ~azure.mgmt.costmanagement.models.KpiType :param id:",
"'dimension', 'type': 'ReportConfigComparisonExpression'}, 'tag': {'key': 'tag', 'type': 'ReportConfigComparisonExpression'}, } def",
"class ViewListResult(msrest.serialization.Model): \"\"\"Result of listing views. It contains a list",
"of listing dimensions. It contains a list of available dimensions.",
":type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param not_property: The logical \"NOT\" expression. :type",
":ivar code: Error code. :vartype code: str :ivar message: Error",
"super(QueryColumn, self).__init__(**kwargs) self.name = name self.type = type class QueryComparisonExpression(msrest.serialization.Model):",
":type display_name: str :param scope: Cost Management scope to save",
"_attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'id': {'key':",
"'container': {'key': 'container', 'type': 'str'}, 'root_folder_path': {'key': 'rootFolderPath', 'type': 'str'},",
"to use when displaying costs. Possible values include: \"ActualCost\", \"AmortizedCost\",",
"operations. It contains a list of operations and a URL",
"the resource. To handle concurrent update scenario, this field will",
"'type': 'iso-8601'}, 'chart': {'key': 'properties.chart', 'type': 'str'}, 'accumulated': {'key': 'properties.accumulated',",
"Optional[\"ExportTimePeriod\"] = None, data_set: Optional[\"ExportDataset\"] = None, **kwargs ): super(ExportDefinition,",
"True}, 'type': {'readonly': True}, 'next_run_time_estimate': {'readonly': True}, } _attribute_map =",
"'ExportDataset'}, } def __init__( self, *, type: Union[str, \"ExportType\"], timeframe:",
"a boolean determining if FreshPartialCost will be included. :type include_fresh_partial_cost:",
"tag class ReportConfigFilterAutoGenerated(msrest.serialization.Model): \"\"\"The filter expression to be used in",
"__init__( self, **kwargs ): super(OperationDisplay, self).__init__(**kwargs) self.provider = None self.resource",
"'float'}, 'operator': {'key': 'operator', 'type': 'str'}, 'amount': {'key': 'amount', 'type':",
"Optional[datetime.datetime] = None, **kwargs ): super(ExportRecurrencePeriod, self).__init__(**kwargs) self.from_property = from_property",
"type: Resource type. :vartype type: str :param e_tag: eTag of",
"category: Optional[Union[str, \"AlertCategory\"]] = None, criteria: Optional[Union[str, \"AlertCriteria\"]] = None,",
":type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Has the definition for the",
"data for the forecast. :type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod :param dataset: Has",
"*, columns: Optional[List[str]] = None, **kwargs ): super(ReportConfigDatasetConfiguration, self).__init__(**kwargs) self.columns",
"Actual usage and forecasted data can be differentiated based on",
"'amount', 'type': 'float'}, 'unit': {'key': 'unit', 'type': 'str'}, 'current_spend': {'key':",
"configuration: Has configuration information for the data in the export.",
"'type': 'str'}, 'status_modification_time': {'key': 'properties.statusModificationTime', 'type': 'str'}, } def __init__(",
"self.time_period = time_period self.data_set = data_set class ExportDeliveryDestination(msrest.serialization.Model): \"\"\"The destination",
"'type_properties_query_type': {'key': 'properties.query.type', 'type': 'str'}, 'timeframe': {'key': 'properties.query.timeframe', 'type': 'str'},",
"\"\"\"The schedule associated with the export. All required parameters must",
"the scope provided. Variables are only populated by the server,",
"list of views. :vartype value: list[~azure.mgmt.costmanagement.models.View] :ivar next_link: The link",
"# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed",
"'str'}, 'configuration': {'key': 'configuration', 'type': 'ExportDatasetConfiguration'}, } def __init__( self,",
"processing_end_time: ~datetime.datetime :param file_name: The name of the exported file.",
"is done automatically, however API users need to register the",
"tags: dict[str, str] :ivar description: Dimension description. :vartype description: str",
"filter: Optional[\"QueryFilter\"] = None, **kwargs ): super(QueryDataset, self).__init__(**kwargs) self.granularity =",
"The name of the column to aggregate. :type name: str",
":type source: str or ~azure.mgmt.costmanagement.models.AlertSource :param details: Alert details. :type",
"query. Possible values include: \"Usage\", \"ActualCost\", \"AmortizedCost\". :type type: str",
"definition of a forecast. All required parameters must be populated",
":type processing_start_time: ~datetime.datetime :param processing_end_time: The time when the export",
"Criteria that triggered alert. Possible values include: \"CostThresholdExceeded\", \"UsageThresholdExceeded\", \"CreditThresholdApproaching\",",
"the alert status was last modified. :type status_modification_time: str \"\"\"",
":ivar value: A list of export executions. :vartype value: list[~azure.mgmt.costmanagement.models.ExportExecution]",
"execution. :type run_settings: ~azure.mgmt.costmanagement.models.CommonExportProperties :param error: The details of any",
"self, *, destination: \"ExportDeliveryDestination\", **kwargs ): super(ExportDeliveryInfo, self).__init__(**kwargs) self.destination =",
"to Azure. :param direction: Direction of sort. Possible values include:",
"Has type of the column to group. Possible values include:",
"self.type = type self.category = category self.criteria = criteria class",
"tag: Has comparison expression for a tag. :type tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression",
"the export. If not provided then the export will include",
"\"Table\". :type chart: str or ~azure.mgmt.costmanagement.models.ChartType :param accumulated: Show costs",
":type rows: list[list[object]] \"\"\" _validation = { 'id': {'readonly': True},",
"'str'}, } def __init__( self, **kwargs ): super(AlertsResult, self).__init__(**kwargs) self.value",
"str \"\"\" _attribute_map = { 'name': {'key': 'name', 'type': 'str'},",
"\"Dimension\". :type type: str or ~azure.mgmt.costmanagement.models.QueryColumnType :param name: Required. The",
"'type': 'float'}, 'contact_emails': {'key': 'contactEmails', 'type': '[str]'}, 'contact_groups': {'key': 'contactGroups',",
"(Forecast, Budget). Possible values include: \"Forecast\", \"Budget\". :type type: str",
"str \"\"\" _attribute_map = { 'type': {'key': 'type', 'type': 'str'},",
":param details: Alert details. :type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails :param cost_entity_id: related",
"aggregation and grouping are provided. :type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration :param aggregation:",
"will be included. :type include_fresh_partial_cost: bool \"\"\" _validation = {",
"self).__init__(**kwargs) self.columns = columns class QueryDefinition(msrest.serialization.Model): \"\"\"The definition of a",
"'usage_end': {'key': 'properties.usageEnd', 'type': 'iso-8601'}, 'next_link': {'key': 'properties.nextLink', 'type': 'str'},",
"time_period: Has time period for pulling data for the query.",
"self).__init__(**kwargs) self.destination = destination class ExportExecution(Resource): \"\"\"An export execution. Variables",
"operator: Optional[Union[str, \"AlertOperator\"]] = None, amount: Optional[float] = None, unit:",
"'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'},",
"**kwargs ): super(ReportConfigGrouping, self).__init__(**kwargs) self.type = type self.name = name",
"str :param status_modification_time: dateTime in which the alert status was",
"run_history: If requested, has the most recent execution history for",
"populated in order to send to Azure. :param direction: Direction",
"self.type = type self.name = name class ReportConfigSorting(msrest.serialization.Model): \"\"\"The order",
"values include: \"ActualCost\", \"AmortizedCost\", \"AHUB\". :type metric: str or ~azure.mgmt.costmanagement.models.MetricType",
"export. :type time_period: ~azure.mgmt.costmanagement.models.ExportTimePeriod :param data_set: The definition for data",
"**kwargs ): super(QueryComparisonExpression, self).__init__(**kwargs) self.name = name self.operator = operator",
"data from. :type from_property: ~datetime.datetime :param to: Required. The end",
"expression. :type not_property: ~azure.mgmt.costmanagement.models.QueryFilter :param dimension: Has comparison expression for",
"of an export. All required parameters must be populated in",
"cost_entity_id: Optional[str] = None, status: Optional[Union[str, \"AlertStatus\"]] = None, creation_time:",
"forecast can have up to 2 aggregation clauses. :type aggregation:",
"in the query. :param granularity: The granularity of rows in",
"To handle concurrent update scenario, this field will be used",
"type: Resource type. :vartype type: str :ivar tags: A set",
"in the \"x-ms-ratelimit-microsoft.consumption-retry-after\" header. * 503 ServiceUnavailable - Service is",
":param triggered_by: notificationId that triggered this alert. :type triggered_by: str",
":type recurrence: str or ~azure.mgmt.costmanagement.models.RecurrenceType :param recurrence_period: Has start and",
"super(QueryComparisonExpression, self).__init__(**kwargs) self.name = name self.operator = operator self.values =",
"or ~azure.mgmt.costmanagement.models.ExportType :param timeframe: Required. The time frame for pulling",
"'amount': {'key': 'amount', 'type': 'float'}, 'unit': {'key': 'unit', 'type': 'str'},",
"to use in the forecast. :type filter: ~azure.mgmt.costmanagement.models.QueryFilter \"\"\" _attribute_map",
"None, scope: Optional[str] = None, chart: Optional[Union[str, \"ChartType\"]] = None,",
"self.status_modification_time = status_modification_time class ErrorDetails(msrest.serialization.Model): \"\"\"The details of the error.",
"delivery information associated with a export. All required parameters must",
"the forecast. Possible values include: \"Usage\", \"ActualCost\", \"AmortizedCost\". :type type:",
":ivar name: Resource name. :vartype name: str :ivar type: Resource",
"than start date. :type recurrence_period: ~azure.mgmt.costmanagement.models.ExportRecurrencePeriod \"\"\" _validation = {",
"'type': 'iso-8601'}, 'schedule': {'key': 'schedule', 'type': 'ExportSchedule'}, } def __init__(",
"'str'}, 'creation_time': {'key': 'properties.creationTime', 'type': 'str'}, 'close_time': {'key': 'properties.closeTime', 'type':",
"grouping_enabled: Grouping enabled. :vartype grouping_enabled: bool :param data: :type data:",
"modified. :type status_modification_time: str \"\"\" _validation = { 'id': {'readonly':",
"= None, submitted_time: Optional[datetime.datetime] = None, processing_start_time: Optional[datetime.datetime] = None,",
"included. :type include_fresh_partial_cost: bool \"\"\" _validation = { 'type': {'required':",
"'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, } def __init__(",
"order to send to Azure. :param format: The format of",
"'status': {'key': 'status', 'type': 'str'}, 'recurrence': {'key': 'recurrence', 'type': 'str'},",
"updating the latest version or not. :type e_tag: str \"\"\"",
"list[str] \"\"\" _validation = { 'name': {'required': True}, 'operator': {'required':",
"the query. :type filter: ~azure.mgmt.costmanagement.models.QueryFilter \"\"\" _validation = { 'grouping':",
"Required. The schedule recurrence. Possible values include: \"Daily\", \"Weekly\", \"Monthly\",",
"triggered this alert. :type triggered_by: str :param resource_group_filter: array of",
"None class ErrorResponse(msrest.serialization.Model): \"\"\"Error response indicates that the service is",
"{ 'name': {'key': 'name', 'type': 'str'}, 'operator': {'key': 'operator', 'type':",
"QueryColumn(msrest.serialization.Model): \"\"\"QueryColumn. :param name: The name of column. :type name:",
"'[str]'}, } def __init__( self, *, name: str, operator: Union[str,",
"self.tag = tag class QueryGrouping(msrest.serialization.Model): \"\"\"The group by expression to",
"'and_property': {'key': 'and', 'type': '[QueryFilter]'}, 'or_property': {'key': 'or', 'type': '[QueryFilter]'},",
"Optional[datetime.datetime] = None, file_name: Optional[str] = None, run_settings: Optional[\"CommonExportProperties\"] =",
"aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation] :param filter: Has filter",
"= timeframe self.time_period = time_period self.dataset = dataset class QueryFilter(msrest.serialization.Model):",
"ErrorResponse(msrest.serialization.Model): \"\"\"Error response indicates that the service is not able",
"Optional[str] = None, current_spend: Optional[float] = None, contact_emails: Optional[List[str]] =",
"The definition for data in the export. :type data_set: ~azure.mgmt.costmanagement.models.ExportDataset",
"'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type': 'QueryTimePeriod'}, 'dataset': {'key': 'dataset',",
"_attribute_map = { 'and_property': {'key': 'and', 'type': '[QueryFilter]'}, 'or_property': {'key':",
"'provider', 'type': 'str'}, 'resource': {'key': 'resource', 'type': 'str'}, 'operation': {'key':",
"True}, 'tags': {'readonly': True}, 'description': {'readonly': True}, 'filter_enabled': {'readonly': True},",
"run_settings: Optional[\"CommonExportProperties\"] = None, error: Optional[\"ErrorDetails\"] = None, **kwargs ):",
"'properties.accumulated', 'type': 'str'}, 'metric': {'key': 'properties.metric', 'type': 'str'}, 'kpis': {'key':",
"category: Dimension category. :vartype category: str :ivar usage_start: Usage start.",
"Azure. :param resource_id: Required. The resource id of the storage",
"\"GroupedColumn\", \"Table\". :type chart: str or ~azure.mgmt.costmanagement.models.ChartType :param accumulated: Show",
"usage_start: ~datetime.datetime :ivar usage_end: Usage end. :vartype usage_end: ~datetime.datetime :ivar",
"items. :type or_property: list[~azure.mgmt.costmanagement.models.QueryFilter] :param not_property: The logical \"NOT\" expression.",
":vartype next_run_time_estimate: ~datetime.datetime \"\"\" _validation = { 'delivery_info': {'required': True},",
"by the Microsoft.CostManagement resource provider. :vartype value: list[~azure.mgmt.costmanagement.models.Operation] :ivar next_link:",
"True}, 'next_run_time_estimate': {'readonly': True}, } _attribute_map = { 'id': {'key':",
"of available exports in the scope provided. Variables are only",
"= { 'value': {'key': 'value', 'type': '[Export]'}, } def __init__(",
":type description: str :param source: Source of alert. Possible values",
"def __init__( self, *, and_property: Optional[List[\"QueryFilter\"]] = None, or_property: Optional[List[\"QueryFilter\"]]",
"): super(ExportExecution, self).__init__(**kwargs) self.execution_type = execution_type self.status = status self.submitted_by",
"True}, 'next_link': {'readonly': True}, } _attribute_map = { 'id': {'key':",
"'type': 'bool'}, 'grouping_enabled': {'key': 'properties.groupingEnabled', 'type': 'bool'}, 'data': {'key': 'properties.data',",
"list of export executions. :vartype value: list[~azure.mgmt.costmanagement.models.ExportExecution] \"\"\" _validation =",
"execution. Variables are only populated by the server, and will",
"see https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services . All required parameters must be populated in",
"to use. Possible values include: \"Sum\". :type function: str or",
"'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'}, 'schedule': {'key': 'schedule',",
"'dataset': {'key': 'dataset', 'type': 'ForecastDataset'}, 'include_actual_cost': {'key': 'includeActualCost', 'type': 'bool'},",
"'description': {'key': 'properties.description', 'type': 'str'}, 'filter_enabled': {'key': 'properties.filterEnabled', 'type': 'bool'},",
"the next set of alerts results if there are any.",
"listing cost management operations. It contains a list of operations",
"'ActualCost' and is applicable to exports that do not yet",
"provider. This is required once per subscription. When creating an",
"= { 'from_property': {'key': 'from', 'type': 'iso-8601'}, 'to': {'key': 'to',",
"{'key': 'name', 'type': 'str'}, 'display': {'key': 'display', 'type': 'OperationDisplay'}, }",
"= tag_filter self.threshold = threshold self.operator = operator self.amount =",
"for pulling data for the query. :type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod :param",
"function: Union[str, \"FunctionType\"], **kwargs ): super(QueryAggregation, self).__init__(**kwargs) self.name = name",
"any error. :type error: ~azure.mgmt.costmanagement.models.ErrorDetails \"\"\" _validation = { 'id':",
":type criteria: str or ~azure.mgmt.costmanagement.models.AlertCriteria \"\"\" _attribute_map = { 'type':",
"unit of currency being used. :type unit: str :param current_spend:",
"resource. To handle concurrent update scenario, this field will be",
"Optional[\"ExportDataset\"] = None, **kwargs ): super(ExportDefinition, self).__init__(**kwargs) self.type = type",
"'QueryDataset'}, } def __init__( self, *, type: Union[str, \"ExportType\"], timeframe:",
"failed. :vartype message: str \"\"\" _validation = { 'code': {'readonly':",
"): super(AlertPropertiesDetails, self).__init__(**kwargs) self.time_grain_type = time_grain_type self.period_start_date = period_start_date self.triggered_by",
"_validation = { 'value': {'readonly': True}, 'next_link': {'readonly': True}, }",
"= period_start_date self.triggered_by = triggered_by self.resource_group_filter = resource_group_filter self.resource_filter =",
"rights reserved. # Licensed under the MIT License. See License.txt",
"{'required': True}, 'name': {'required': True}, } _attribute_map = { 'type':",
"enabled. :vartype grouping_enabled: bool :param data: :type data: list[str] :ivar",
"\"Custom\". :type timeframe: str or ~azure.mgmt.costmanagement.models.TimeframeType :param time_period: Has time",
"for the query. :type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod :param dataset: Has definition",
"*, display: Optional[\"OperationDisplay\"] = None, **kwargs ): super(Operation, self).__init__(**kwargs) self.name",
"'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'}, 'filter': {'key': 'filter', 'type': 'QueryFilter'},",
"= None, **kwargs ): super(ExportSchedule, self).__init__(**kwargs) self.status = status self.recurrence",
"_validation = { 'resource_id': {'required': True}, 'container': {'required': True}, }",
"is the user email. For scheduled executions it is 'System'.",
"__init__( self, *, from_property: datetime.datetime, to: datetime.datetime, **kwargs ): super(ReportConfigTimePeriod,",
"{'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'definition':",
"'resource', 'type': 'str'}, 'operation': {'key': 'operation', 'type': 'str'}, } def",
"for the report. :type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod :param dataset: Has definition",
"str :param format: The format of the export being delivered.",
"} _attribute_map = { 'and_property': {'key': 'and', 'type': '[QueryFilter]'}, 'or_property':",
"\"AlertType\"]] = None, category: Optional[Union[str, \"AlertCategory\"]] = None, criteria: Optional[Union[str,",
"list[~azure.mgmt.costmanagement.models.ExportExecution] \"\"\" _validation = { 'value': {'readonly': True}, } _attribute_map",
"'operator': {'key': 'operator', 'type': 'str'}, 'values': {'key': 'values', 'type': '[str]'},",
"overriding alert. :type overriding_alert: str \"\"\" _attribute_map = { 'time_grain_type':",
"details: Optional[\"AlertPropertiesDetails\"] = None, cost_entity_id: Optional[str] = None, status: Optional[Union[str,",
"list of exports. :vartype value: list[~azure.mgmt.costmanagement.models.Export] \"\"\" _validation = {",
"self.tags = None class Alert(Resource): \"\"\"An individual alert. Variables are",
"BillingProfile scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}' for InvoiceSection scope, 'providers/Microsoft.Management/managementGroups/{managementGroupId}' for Management Group",
"None, **kwargs ): super(Export, self).__init__(e_tag=e_tag, **kwargs) self.format = format self.delivery_info",
"start date. :type recurrence_period: ~azure.mgmt.costmanagement.models.ExportRecurrencePeriod \"\"\" _validation = { 'recurrence':",
":param and_property: The logical \"AND\" expression. Must have at least",
"= None, contact_emails: Optional[List[str]] = None, contact_groups: Optional[List[str]] = None,",
"Optional[\"QueryTimePeriod\"] = None, dataset: Optional[\"ForecastDataset\"] = None, include_actual_cost: Optional[bool] =",
"super(ForecastDefinition, self).__init__(**kwargs) self.type = type self.timeframe = timeframe self.time_period =",
"'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'next_link': {'key': 'properties.nextLink', 'type':",
"the UI?. :type enabled: bool \"\"\" _attribute_map = { 'type':",
"definition: \"ExportDefinition\", format: Optional[Union[str, \"FormatType\"]] = None, run_history: Optional[\"ExportExecutionListResult\"] =",
"tags. Resource tags. :vartype tags: dict[str, str] :param next_link: The",
"*, name: Optional[str] = None, type: Optional[str] = None, **kwargs",
"None, or_property: Optional[List[\"QueryFilter\"]] = None, not_property: Optional[\"QueryFilter\"] = None, dimension:",
"update an alert. :param definition: defines the type of alert.",
"tag: Optional[\"QueryComparisonExpression\"] = None, **kwargs ): super(QueryFilter, self).__init__(**kwargs) self.and_property =",
"then the export will include all available columns. The available",
"self, *, granularity: Optional[Union[str, \"GranularityType\"]] = None, configuration: Optional[\"QueryDatasetConfiguration\"] =",
"} _attribute_map = { 'value': {'key': 'value', 'type': '[Export]'}, }",
"both usage and forecasted data. Actual usage and forecasted data",
"type class QueryComparisonExpression(msrest.serialization.Model): \"\"\"The comparison expression to be used in",
"self).__init__(**kwargs) self.provider = None self.resource = None self.operation = None",
"time period for pulling data for the query. :type time_period:",
"__init__( self, *, name: str, operator: Union[str, \"OperatorType\"], values: List[str],",
"all columns. :type columns: list[str] \"\"\" _attribute_map = { 'columns':",
"self.definition = definition self.run_history = run_history self.next_run_time_estimate = None class",
"The type of the forecast. Possible values include: \"Usage\", \"ActualCost\",",
"use in the report. Report can have up to 2",
"2 items. :type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param not_property: The logical \"NOT\"",
"None, metric: Optional[Union[str, \"MetricType\"]] = None, kpis: Optional[List[\"KpiProperties\"]] = None,",
"None, **kwargs ): super(ReportConfigDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration =",
"self.configuration = configuration self.aggregation = aggregation self.filter = filter class",
"\"Ascending\", \"Descending\". :type direction: str or ~azure.mgmt.costmanagement.models.ReportConfigSortingDirection :param name: Required.",
"forecast. Possible values include: \"Usage\", \"ActualCost\", \"AmortizedCost\". :type type: str",
"tag_filter: tags to filter by. :type tag_filter: object :param threshold:",
"self).__init__(**kwargs) self.value = None self.next_link = None class CommonExportProperties(msrest.serialization.Model): \"\"\"The",
"azure.core.exceptions import HttpResponseError import msrest.serialization from ._cost_management_client_enums import * class",
"str or ~azure.mgmt.costmanagement.models.ReportGranularityType :param configuration: Has configuration information for the",
"'type': 'ExportDataset'}, } def __init__( self, *, type: Union[str, \"ExportType\"],",
"super(DismissAlertPayload, self).__init__(**kwargs) self.definition = definition self.description = description self.source =",
"type: Union[str, \"ExportType\"], timeframe: Union[str, \"TimeframeType\"], time_period: Optional[\"QueryTimePeriod\"] = None,",
". All required parameters must be populated in order to",
"the export will include all available columns. :param columns: Array",
"'from_property': {'required': True}, } _attribute_map = { 'from_property': {'key': 'from',",
"for the entity that executed the export. For OnDemand executions",
"Optional[str] = None, scope: Optional[str] = None, chart: Optional[Union[str, \"ChartType\"]]",
"the next set of operation list results if there are",
"to register the subscription. For more information see https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services .",
"self.configuration = configuration class ExportDatasetConfiguration(msrest.serialization.Model): \"\"\"The export dataset configuration. Allows",
"super(ExportExecution, self).__init__(**kwargs) self.execution_type = execution_type self.status = status self.submitted_by =",
"closed. :type close_time: str :param modification_time: dateTime in which alert",
"{'key': 'properties.schedule', 'type': 'ExportSchedule'}, } def __init__( self, *, e_tag:",
"'Daily' is supported. Possible values include: \"Daily\". :type granularity: str",
"{'key': 'sorting', 'type': '[ReportConfigSorting]'}, 'filter': {'key': 'filter', 'type': 'ReportConfigFilterAutoGenerated'}, }",
"export data. :type from_property: ~datetime.datetime :param to: Required. The end",
"**kwargs ): super(AlertPropertiesDetails, self).__init__(**kwargs) self.time_grain_type = time_grain_type self.period_start_date = period_start_date",
"self.columns = columns class QueryDefinition(msrest.serialization.Model): \"\"\"The definition of a query.",
"'time_grain_type': {'key': 'timeGrainType', 'type': 'str'}, 'period_start_date': {'key': 'periodStartDate', 'type': 'str'},",
"List of alerts. :vartype value: list[~azure.mgmt.costmanagement.models.Alert] :ivar next_link: URL to",
"definition for data in this report config. :type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDataset",
"= None, run_history: Optional[\"ExportExecutionListResult\"] = None, schedule: Optional[\"ExportSchedule\"] = None,",
"parameters must be populated in order to send to Azure.",
"= data self.total = None self.category = None self.usage_start =",
"sending a request. :ivar id: Resource Id. :vartype id: str",
":param contact_emails: list of emails to contact. :type contact_emails: list[str]",
"\"None\", \"Active\", \"Overridden\", \"Resolved\", \"Dismissed\". :type status: str or ~azure.mgmt.costmanagement.models.AlertStatus",
"If 'Inactive', the export's schedule is paused. Possible values include:",
"\"Usage\", \"Billing\", \"System\". :type category: str or ~azure.mgmt.costmanagement.models.AlertCategory :param criteria:",
"{'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type':",
"period for pulling data for the forecast. :type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod",
"code: str :ivar message: Error message indicating why the operation",
"\"PivotType\"]] = None, name: Optional[str] = None, **kwargs ): super(PivotProperties,",
"information for the data in the report. The configuration will",
"super(ReportConfigDatasetAutoGenerated, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration self.aggregation =",
"delivery_info: Optional[\"ExportDeliveryInfo\"] = None, definition: Optional[\"ExportDefinition\"] = None, run_history: Optional[\"ExportExecutionListResult\"]",
"__init__( self, **kwargs ): super(ExportExecutionListResult, self).__init__(**kwargs) self.value = None class",
"be ignored when sending a request. :ivar code: Error code.",
"super(View, self).__init__(e_tag=e_tag, **kwargs) self.display_name = display_name self.scope = scope self.created_on",
"tags: dict[str, str] :param execution_type: The type of the export",
"data_set: The definition for data in the export. :type data_set:",
"None, **kwargs ): super(ForecastDefinition, self).__init__(**kwargs) self.type = type self.timeframe =",
"self.definition = definition self.description = description self.source = source self.details",
"{'key': 'properties.runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'properties.nextRunTimeEstimate', 'type': 'iso-8601'}, 'schedule':",
"'iso-8601'}, 'file_name': {'key': 'properties.fileName', 'type': 'str'}, 'run_settings': {'key': 'properties.runSettings', 'type':",
"= None, **kwargs ): super(ForecastDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration",
"include: \"Usage\", \"ActualCost\", \"AmortizedCost\". :type type: str or ~azure.mgmt.costmanagement.models.ExportType :param",
"'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'QueryDatasetConfiguration'}, 'aggregation': {'key':",
"Optional[str] = None, submitted_time: Optional[datetime.datetime] = None, processing_start_time: Optional[datetime.datetime] =",
"by. :type tag_filter: object :param threshold: notification threshold percentage as",
":param data_set: The definition for data in the export. :type",
"'tagFilter', 'type': 'object'}, 'threshold': {'key': 'threshold', 'type': 'float'}, 'operator': {'key':",
"self.overriding_alert = overriding_alert class AlertsResult(msrest.serialization.Model): \"\"\"Result of alerts. Variables are",
"\"\"\"The common properties of the export. Variables are only populated",
"with the Microsoft.CostManagementExports resource provider. This is required once per",
"'tags': {'key': 'tags', 'type': '{str}'}, 'next_link': {'key': 'properties.nextLink', 'type': 'str'},",
"class QueryDataset(msrest.serialization.Model): \"\"\"The definition of data present in the query.",
"granularity: Optional[Union[str, \"ReportGranularityType\"]] = None, configuration: Optional[\"ReportConfigDatasetConfiguration\"] = None, aggregation:",
"True}, } _attribute_map = { 'format': {'key': 'format', 'type': 'str'},",
"\"ActualCost\", \"AmortizedCost\". :type type: str or ~azure.mgmt.costmanagement.models.ForecastType :param timeframe: Required.",
"def __init__( self, *, name: str, operator: Union[str, \"OperatorType\"], values:",
"\"\"\"QueryColumn. :param name: The name of column. :type name: str",
"execution_type: Optional[Union[str, \"ExecutionType\"]] = None, status: Optional[Union[str, \"ExecutionStatus\"]] = None,",
"group. Possible values include: \"Tag\", \"Dimension\". :type type: str or",
"Azure. :param type: Required. The type of the query. Possible",
"by Microsoft (R) AutoRest Code Generator. # Changes may cause",
"= None, name: Optional[str] = None, **kwargs ): super(PivotProperties, self).__init__(**kwargs)",
"'type': 'ReportConfigFilterAutoGenerated'}, 'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'}, 'tag': {'key': 'tag',",
"= and_property self.or_property = or_property self.not_property = not_property self.dimension =",
"to Azure. :param destination: Required. Has destination for the export",
"in the error message. Some Error responses: * 429 TooManyRequests",
"Has definition for data in this query. :type dataset: ~azure.mgmt.costmanagement.models.QueryDataset",
"be included. :type include_fresh_partial_cost: bool \"\"\" _validation = { 'type':",
"kpis: List of KPIs to show in Cost Analysis UI.",
"str \"\"\" _validation = { 'name': {'required': True}, } _attribute_map",
"{ 'provider': {'readonly': True}, 'resource': {'readonly': True}, 'operation': {'readonly': True},",
"= { 'recurrence': {'required': True}, } _attribute_map = { 'status':",
":param error: The details of the error. :type error: ~azure.mgmt.costmanagement.models.ErrorDetails",
"UI. :type pivots: list[~azure.mgmt.costmanagement.models.PivotProperties] :param type_properties_query_type: The type of the",
"scope: Cost Management scope to save the view on. This",
"channel (see examples). :type columns: list[str] \"\"\" _attribute_map = {",
"enabled class Operation(msrest.serialization.Model): \"\"\"A Cost management REST API operation. Variables",
"*, type: Union[str, \"ExportType\"], timeframe: Union[str, \"TimeframeType\"], time_period: Optional[\"QueryTimePeriod\"] =",
"'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type': 'ReportConfigTimePeriod'}, 'dataset': {'key': 'dataset',",
"'not_property': {'key': 'not', 'type': 'ReportConfigFilter'}, 'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'},",
"to send to Azure. :param status: The status of the",
"\"Dimension\". :type type: str or ~azure.mgmt.costmanagement.models.ReportConfigColumnType :param name: Required. The",
"time when the export execution finished. :type processing_end_time: ~datetime.datetime :param",
"execution_type: The type of the export execution. Possible values include:",
"name of the container where exports will be uploaded. :type",
"Optional[\"ReportConfigDatasetAutoGenerated\"] = None, **kwargs ): super(ReportConfigDefinition, self).__init__(**kwargs) self.type = type",
"self, **kwargs ): super(DimensionsListResult, self).__init__(**kwargs) self.value = None class DismissAlertPayload(msrest.serialization.Model):",
"super(OperationListResult, self).__init__(**kwargs) self.value = None self.next_link = None class PivotProperties(msrest.serialization.Model):",
"'function': {'required': True}, } _attribute_map = { 'name': {'key': 'name',",
"'type': 'str'}, 'metric': {'key': 'properties.metric', 'type': 'str'}, 'kpis': {'key': 'properties.kpis',",
"with timeFrame set to 'Custom'. The maximum date range is",
"self.type = type self.name = name class QueryAggregation(msrest.serialization.Model): \"\"\"The aggregation",
"__init__( self, *, from_property: datetime.datetime, to: Optional[datetime.datetime] = None, **kwargs",
"{'key': 'resourceGroupFilter', 'type': '[object]'}, 'resource_filter': {'key': 'resourceFilter', 'type': '[object]'}, 'meter_filter':",
"~azure.mgmt.costmanagement.models.ForecastDataset :param include_actual_cost: a boolean determining if actualCost will be",
"date must be in future. If present, the end date",
"resource. Variables are only populated by the server, and will",
"represents both usage and forecasted data. Actual usage and forecasted",
"self).__init__(**kwargs) self.value = None class ExportListResult(msrest.serialization.Model): \"\"\"Result of listing exports.",
"'iso-8601'}, } def __init__( self, *, from_property: datetime.datetime, to: datetime.datetime,",
"dataset: Optional[\"ReportConfigDatasetAutoGenerated\"] = None, **kwargs ): super(ReportConfigDefinition, self).__init__(**kwargs) self.type =",
"used. :type unit: str :param current_spend: current spend. :type current_spend:",
"type: str :param e_tag: eTag of the resource. To handle",
"data in the export. This should only be specified with",
"to 'Custom'. The maximum date range is 3 months. All",
"__init__( self, *, e_tag: Optional[str] = None, display_name: Optional[str] =",
"= filter class ForecastDefinition(msrest.serialization.Model): \"\"\"The definition of a forecast. All",
":type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilter \"\"\" _validation = { 'grouping': {'max_items': 2,",
"self, **kwargs ): super(ErrorDetails, self).__init__(**kwargs) self.code = None self.message =",
"definition self.run_history = run_history self.next_run_time_estimate = None self.schedule = schedule",
"class Export(ProxyResource): \"\"\"An export resource. Variables are only populated by",
"~azure.mgmt.costmanagement.models.ExecutionStatus :param submitted_by: The identifier for the entity that executed",
"'and_property': {'min_items': 2}, 'or_property': {'min_items': 2}, } _attribute_map = {",
"{'key': 'timePeriod', 'type': 'QueryTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'ForecastDataset'}, 'include_actual_cost':",
":type next_link: str :param columns: Array of columns. :type columns:",
"self, *, type: Union[str, \"ReportType\"], timeframe: Union[str, \"ReportTimeframeType\"], time_period: Optional[\"ReportConfigTimePeriod\"]",
"): super(QueryComparisonExpression, self).__init__(**kwargs) self.name = name self.operator = operator self.values",
":param dataset: Has definition for data in this forecast. :type",
"resource_filter: Optional[List[object]] = None, meter_filter: Optional[List[object]] = None, tag_filter: Optional[object]",
"kpis: Optional[List[\"KpiProperties\"]] = None, pivots: Optional[List[\"PivotProperties\"]] = None, type_properties_query_type: Optional[Union[str,",
"'grouping': {'key': 'grouping', 'type': '[QueryGrouping]'}, 'filter': {'key': 'filter', 'type': 'QueryFilter'},",
"super(ExportTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to = to class ForecastDataset(msrest.serialization.Model):",
"columns. The available columns can vary by customer channel (see",
"exported file. :type file_name: str :param run_settings: The export settings",
"= creation_time self.close_time = close_time self.modification_time = modification_time self.status_modification_user_name =",
"the \"x-ms-ratelimit-microsoft.consumption-retry-after\" header. * 503 ServiceUnavailable - Service is temporarily",
"filter: Has filter expression to use in the forecast. :type",
"destination information for the delivery of the export. To allow",
"from_property: datetime.datetime, to: datetime.datetime, **kwargs ): super(QueryTimePeriod, self).__init__(**kwargs) self.from_property =",
"to be selected for the export. If not provided then",
"export data. :type to: ~datetime.datetime \"\"\" _validation = { 'from_property':",
"super(QueryDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class QueryDefinition(msrest.serialization.Model): \"\"\"The definition of",
"Possible values include: \"Daily\", \"Monthly\". :type granularity: str or ~azure.mgmt.costmanagement.models.ReportGranularityType",
"'[ReportConfigGrouping]'}, 'sorting': {'key': 'sorting', 'type': '[ReportConfigSorting]'}, 'filter': {'key': 'filter', 'type':",
"self.contact_emails = contact_emails self.contact_groups = contact_groups self.contact_roles = contact_roles self.overriding_alert",
":type contact_groups: list[str] :param contact_roles: list of contact roles. :type",
"start and end date for recurrence schedule. All required parameters",
"was last modified. :type status_modification_time: str \"\"\" _validation = {",
"{'key': 'properties.query.timeframe', 'type': 'str'}, 'time_period': {'key': 'properties.query.timePeriod', 'type': 'ReportConfigTimePeriod'}, 'dataset':",
"'type': '[Export]'}, } def __init__( self, **kwargs ): super(ExportListResult, self).__init__(**kwargs)",
"export. :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Has the definition for",
"\"\"\"The filter expression to be used in the report. :param",
"'[QueryColumn]'}, 'rows': {'key': 'properties.rows', 'type': '[[object]]'}, } def __init__( self,",
"scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}' for Department scope,",
"export dataset configuration. :type configuration: ~azure.mgmt.costmanagement.models.ExportDatasetConfiguration \"\"\" _attribute_map = {",
"= None, resource_filter: Optional[List[object]] = None, meter_filter: Optional[List[object]] = None,",
"): super(ExportListResult, self).__init__(**kwargs) self.value = None class ExportProperties(CommonExportProperties): \"\"\"The properties",
"values include: \"None\", \"Active\", \"Overridden\", \"Resolved\", \"Dismissed\". :type status: str",
"\"AccumulatedType\"]] = None, metric: Optional[Union[str, \"MetricType\"]] = None, kpis: Optional[List[\"KpiProperties\"]]",
"a storage account, you must register the account's subscription with",
"\"\"\"Result of listing dimensions. It contains a list of available",
"\"ReportConfigAggregation\"]] = None, grouping: Optional[List[\"ReportConfigGrouping\"]] = None, sorting: Optional[List[\"ReportConfigSorting\"]] =",
"for pulling data for the report. All required parameters must",
"for subscription scope, 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing",
"of the container where exports will be uploaded. :type container:",
"'str'}, 'triggered_by': {'key': 'triggeredBy', 'type': 'str'}, 'resource_group_filter': {'key': 'resourceGroupFilter', 'type':",
"of listing cost management operations. It contains a list of",
"'name': {'key': 'name', 'type': 'str'}, } def __init__( self, *,",
"'next_run_time_estimate': {'key': 'nextRunTimeEstimate', 'type': 'iso-8601'}, 'schedule': {'key': 'schedule', 'type': 'ExportSchedule'},",
"of results. :vartype next_link: str \"\"\" _validation = { 'id':",
"accumulated over time. Possible values include: \"true\", \"false\". :type accumulated:",
"self, *, from_property: datetime.datetime, to: Optional[datetime.datetime] = None, **kwargs ):",
"Optional[List[\"ReportConfigFilter\"]] = None, or_property: Optional[List[\"ReportConfigFilter\"]] = None, not_property: Optional[\"ReportConfigFilter\"] =",
"of the column to aggregate. :type name: str :param function:",
"id: Optional[str] = None, enabled: Optional[bool] = None, **kwargs ):",
"'type': 'ReportConfigDataset'}, } def __init__( self, *, e_tag: Optional[str] =",
"_validation = { 'code': {'readonly': True}, 'message': {'readonly': True}, }",
"'str'}, 'criteria': {'key': 'criteria', 'type': 'str'}, } def __init__( self,",
"for the export. If custom, then a specific time period",
"have at least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param or_property:",
"dimensions. :vartype value: list[~azure.mgmt.costmanagement.models.Dimension] \"\"\" _validation = { 'value': {'readonly':",
"'type': 'QueryDataset'}, } def __init__( self, *, type: Union[str, \"ExportType\"],",
":ivar resource: Resource on which the operation is performed: Dimensions,",
"**kwargs ): super(ReportConfigFilter, self).__init__(**kwargs) self.and_property = and_property self.or_property = or_property",
"of available dimensions. Variables are only populated by the server,",
"Cost Analysis UI. :type pivots: list[~azure.mgmt.costmanagement.models.PivotProperties] :param type_properties_query_type: The type",
"= None, **kwargs ): super(ExportDefinition, self).__init__(**kwargs) self.type = type self.timeframe",
"aggregation: Optional[Dict[str, \"QueryAggregation\"]] = None, filter: Optional[\"QueryFilter\"] = None, **kwargs",
"{'key': 'meterFilter', 'type': '[object]'}, 'tag_filter': {'key': 'tagFilter', 'type': 'object'}, 'threshold':",
"the export. :param granularity: The granularity of rows in the",
"None, tag: Optional[\"ReportConfigComparisonExpression\"] = None, **kwargs ): super(ReportConfigFilter, self).__init__(**kwargs) self.and_property",
"the export. :type definition: ~azure.mgmt.costmanagement.models.ExportDefinition :param run_history: If requested, has",
"the error. Variables are only populated by the server, and",
"{ 'and_property': {'key': 'and', 'type': '[ReportConfigFilter]'}, 'or_property': {'key': 'or', 'type':",
":type name: str :param type: The type of column. :type",
"the aggregated column. forecast can have up to 2 aggregation",
"self.dimension = dimension self.tag = tag class QueryGrouping(msrest.serialization.Model): \"\"\"The group",
"expression. Must have at least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter]",
"is temporarily unavailable. Retry after waiting for the time specified",
"None, **kwargs ): super(ProxyResource, self).__init__(**kwargs) self.id = None self.name =",
"Optional[object] = None, threshold: Optional[float] = None, operator: Optional[Union[str, \"AlertOperator\"]]",
"= granularity self.configuration = configuration class ExportDatasetConfiguration(msrest.serialization.Model): \"\"\"The export dataset",
"alert status. Possible values include: \"None\", \"Active\", \"Overridden\", \"Resolved\", \"Dismissed\".",
"tag_filter: Optional[object] = None, threshold: Optional[float] = None, operator: Optional[Union[str,",
"Azure. :param type: Required. The type of the forecast. Possible",
"'properties.costEntityId', 'type': 'str'}, 'status': {'key': 'properties.status', 'type': 'str'}, 'creation_time': {'key':",
"query. :type filter: ~azure.mgmt.costmanagement.models.QueryFilter \"\"\" _validation = { 'grouping': {'max_items':",
"self, *, name: str, function: Union[str, \"FunctionType\"], **kwargs ): super(QueryAggregation,",
"'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'properties.nextRunTimeEstimate', 'type': 'iso-8601'}, 'schedule': {'key': 'properties.schedule',",
"last modified. :type modification_time: str :param status_modification_user_name: :type status_modification_user_name: str",
"{'key': 'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'ReportConfigDatasetConfiguration'}, 'aggregation':",
"is 'System'. :type submitted_by: str :param submitted_time: The time when",
"**kwargs ): super(ExportProperties, self).__init__(format=format, delivery_info=delivery_info, definition=definition, run_history=run_history, **kwargs) self.schedule =",
"definition self.run_history = run_history self.next_run_time_estimate = None class Dimension(Resource): \"\"\"Dimension.",
"'granularity': {'key': 'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'QueryDatasetConfiguration'},",
"**kwargs ): super(OperationListResult, self).__init__(**kwargs) self.value = None self.next_link = None",
"= status self.creation_time = creation_time self.close_time = close_time self.modification_time =",
"*, name: str, operator: Union[str, \"OperatorType\"], values: List[str], **kwargs ):",
"Optional[\"ReportConfigFilterAutoGenerated\"] = None, dimension: Optional[\"ReportConfigComparisonExpression\"] = None, tag: Optional[\"ReportConfigComparisonExpression\"] =",
"'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key': 'value',",
"date to pull data from. :type from_property: ~datetime.datetime :param to:",
"{'readonly': True}, 'next_link': {'readonly': True}, } _attribute_map = { 'id':",
"datetime.datetime, to: datetime.datetime, **kwargs ): super(ExportTimePeriod, self).__init__(**kwargs) self.from_property = from_property",
"self, *, e_tag: Optional[str] = None, **kwargs ): super(ProxyResource, self).__init__(**kwargs)",
":type time_period: ~azure.mgmt.costmanagement.models.ExportTimePeriod :param data_set: The definition for data in",
"'eTag', 'type': 'str'}, 'format': {'key': 'properties.format', 'type': 'str'}, 'delivery_info': {'key':",
"Array of column names to be included in the export.",
":vartype tags: dict[str, str] :param next_link: The link (url) to",
":type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod :param dataset: Has definition for data in",
"of rows in the export. Currently only 'Daily' is supported.",
"of a query. All required parameters must be populated in",
"view. :vartype modified_on: ~datetime.datetime :param chart: Chart type of the",
":param type: Data type to show in view. Possible values",
"Required. The time frame for pulling data for the export.",
"Must have at least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param",
"= direction self.name = name class ReportConfigTimePeriod(msrest.serialization.Model): \"\"\"The start and",
"'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type':",
"self).__init__(**kwargs) self.resource_id = resource_id self.container = container self.root_folder_path = root_folder_path",
"container: str, root_folder_path: Optional[str] = None, **kwargs ): super(ExportDeliveryDestination, self).__init__(**kwargs)",
"account, you must register the account's subscription with the Microsoft.CostManagementExports",
"use in the report. :type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated \"\"\" _validation =",
"self.delivery_info = delivery_info self.definition = definition self.run_history = run_history self.next_run_time_estimate",
"{ 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type':",
"None, criteria: Optional[Union[str, \"AlertCriteria\"]] = None, **kwargs ): super(AlertPropertiesDefinition, self).__init__(**kwargs)",
"self, *, columns: Optional[List[str]] = None, **kwargs ): super(QueryDatasetConfiguration, self).__init__(**kwargs)",
"} _attribute_map = { 'granularity': {'key': 'granularity', 'type': 'str'}, 'configuration':",
"self.usage_start = None self.usage_end = None self.next_link = None class",
"Has comparison expression for a tag. :type tag: ~azure.mgmt.costmanagement.models.QueryComparisonExpression \"\"\"",
"Required. The end date to pull data to. :type to:",
"included in the query. Any valid query column name is",
"~azure.mgmt.costmanagement.models.AlertStatus :param creation_time: dateTime in which alert was created. :type",
"not_property self.dimension = dimension self.tag = tag class ReportConfigFilterAutoGenerated(msrest.serialization.Model): \"\"\"The",
"on which the operation is performed: Dimensions, Query. :vartype resource:",
"report config. :type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDataset \"\"\" _validation = { 'id':",
"self.status = status self.recurrence = recurrence self.recurrence_period = recurrence_period class",
":param time_period: Has time period for pulling data for the",
"next page of results. :vartype next_link: str \"\"\" _validation =",
"self.operation = None class OperationListResult(msrest.serialization.Model): \"\"\"Result of listing cost management",
"\"Budget\". :type type: str or ~azure.mgmt.costmanagement.models.KpiType :param id: ID of",
"The details of any error. :type error: ~azure.mgmt.costmanagement.models.ErrorDetails \"\"\" _validation",
"ServiceUnavailable - Service is temporarily unavailable. Retry after waiting for",
":type tag: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression \"\"\" _validation = { 'and_property': {'min_items': 2},",
"definition=definition, run_history=run_history, **kwargs) self.schedule = schedule class ExportRecurrencePeriod(msrest.serialization.Model): \"\"\"The start",
"contains a list of available dimensions. Variables are only populated",
"or ~azure.mgmt.costmanagement.models.KpiType :param id: ID of resource related to metric",
"error class ProxyResource(msrest.serialization.Model): \"\"\"The Resource model definition. Variables are only",
"= None, metric: Optional[Union[str, \"MetricType\"]] = None, kpis: Optional[List[\"KpiProperties\"]] =",
"= to class ExportSchedule(msrest.serialization.Model): \"\"\"The schedule associated with the export.",
"'properties.creationTime', 'type': 'str'}, 'close_time': {'key': 'properties.closeTime', 'type': 'str'}, 'modification_time': {'key':",
"represents actual usage, forecast represents forecasted data and UsageAndForecast represents",
"= None self.usage_start = None self.usage_end = None self.next_link =",
"Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under",
"'dataSet', 'type': 'ExportDataset'}, } def __init__( self, *, type: Union[str,",
"list[~azure.mgmt.costmanagement.models.QueryFilter] :param or_property: The logical \"OR\" expression. Must have at",
"'ExportDefinition'}, 'run_history': {'key': 'properties.runHistory', 'type': 'ExportExecutionListResult'}, 'next_run_time_estimate': {'key': 'properties.nextRunTimeEstimate', 'type':",
"= None self.modified_on = None self.chart = chart self.accumulated =",
":type run_settings: ~azure.mgmt.costmanagement.models.CommonExportProperties :param error: The details of any error.",
"was closed. :type close_time: str :param modification_time: dateTime in which",
"None self.tags = None class Alert(Resource): \"\"\"An individual alert. Variables",
"'properties.query.dataset', 'type': 'ReportConfigDataset'}, } def __init__( self, *, e_tag: Optional[str]",
"in order to send to Azure. :param destination: Required. Has",
"*, from_property: datetime.datetime, to: datetime.datetime, **kwargs ): super(ReportConfigTimePeriod, self).__init__(**kwargs) self.from_property",
"modified. :type modification_time: str :param status_modification_user_name: :type status_modification_user_name: str :param",
"in which alert was last modified. :type modification_time: str :param",
"of the export execution. Possible values include: \"OnDemand\", \"Scheduled\". :type",
"columns: Optional[List[\"QueryColumn\"]] = None, rows: Optional[List[List[object]]] = None, **kwargs ):",
"for Billing Account scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}' for Department scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}' for",
"_validation = { 'and_property': {'min_items': 2}, 'or_property': {'min_items': 2}, }",
"'name'. :param type: Data type to show in view. Possible",
"provided. Possible values include: \"WeekToDate\", \"MonthToDate\", \"YearToDate\", \"Custom\". :type timeframe:",
"True}, } _attribute_map = { 'value': {'key': 'value', 'type': '[Operation]'},",
"'tag': {'key': 'tag', 'type': 'ReportConfigComparisonExpression'}, } def __init__( self, *,",
"group. :type name: str \"\"\" _validation = { 'type': {'required':",
"expression to use in the forecast. The key of each",
"{'key': 'operator', 'type': 'str'}, 'values': {'key': 'values', 'type': '[str]'}, }",
"'type': '{str}'}, } def __init__( self, **kwargs ): super(Resource, self).__init__(**kwargs)",
"grouping_enabled: bool :param data: :type data: list[str] :ivar total: Total",
"Required. The start date to pull data from. :type from_property:",
"*, resource_id: str, container: str, root_folder_path: Optional[str] = None, **kwargs",
"The list of exports. :vartype value: list[~azure.mgmt.costmanagement.models.Export] \"\"\" _validation =",
"{'key': 'properties.metric', 'type': 'str'}, 'kpis': {'key': 'properties.kpis', 'type': '[KpiProperties]'}, 'pivots':",
"Optional[\"ReportConfigFilter\"] = None, **kwargs ): super(ReportConfigDataset, self).__init__(**kwargs) self.granularity = granularity",
"~azure.mgmt.costmanagement.models.ErrorDetails \"\"\" _validation = { 'id': {'readonly': True}, 'name': {'readonly':",
"} def __init__( self, *, e_tag: Optional[str] = None, **kwargs",
"If not provided, then query includes all columns. :type columns:",
"threshold: float :param operator: operator used to compare currentSpend with",
"self.threshold = threshold self.operator = operator self.amount = amount self.unit",
"= None self.name = None self.type = None self.e_tag =",
"time frame for pulling data for the forecast. If custom,",
"\"AlertSource\"]] = None, details: Optional[\"AlertPropertiesDetails\"] = None, cost_entity_id: Optional[str] =",
"logical \"OR\" expression. Must have at least 2 items. :type",
"'tag', 'type': 'QueryComparisonExpression'}, } def __init__( self, *, and_property: Optional[List[\"QueryFilter\"]]",
"type of the main view in Cost Analysis. Required. Possible",
"'tags', 'type': '{str}'}, 'next_link': {'key': 'properties.nextLink', 'type': 'str'}, 'columns': {'key':",
"dataset class ViewListResult(msrest.serialization.Model): \"\"\"Result of listing views. It contains a",
"to group. This version supports subscription lowest possible grain. :type",
"\"\"\" _validation = { 'name': {'readonly': True}, } _attribute_map =",
"provider: str :ivar resource: Resource on which the operation is",
"{'key': 'value', 'type': '[Alert]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, }",
"Optional[str] = None, format: Optional[Union[str, \"FormatType\"]] = None, delivery_info: Optional[\"ExportDeliveryInfo\"]",
"True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'next_run_time_estimate': {'readonly': True},",
"'aggregation': {'key': 'aggregation', 'type': '{ReportConfigAggregation}'}, 'grouping': {'key': 'grouping', 'type': '[ReportConfigGrouping]'},",
"granularity of rows in the forecast. Possible values include: \"Daily\".",
"'timeframe', 'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type': 'ExportTimePeriod'}, 'data_set': {'key':",
"definition for data in this report config. :type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDatasetAutoGenerated",
"_attribute_map = { 'direction': {'key': 'direction', 'type': 'str'}, 'name': {'key':",
"management REST API operation. Variables are only populated by the",
"timeframe self.time_period = time_period self.dataset = dataset class QueryFilter(msrest.serialization.Model): \"\"\"The",
"values include: \"In\", \"Contains\". :type operator: str or ~azure.mgmt.costmanagement.models.OperatorType :param",
"the account's subscription with the Microsoft.CostManagementExports resource provider. This is",
"the operation. Variables are only populated by the server, and",
"'values', 'type': '[str]'}, } def __init__( self, *, name: str,",
"of contact roles. :type contact_roles: list[str] :param overriding_alert: overriding alert.",
"None, or_property: Optional[List[\"ReportConfigFilter\"]] = None, not_property: Optional[\"ReportConfigFilter\"] = None, dimension:",
"as a decimal which activated this alert. :type threshold: float",
"'resource_id': {'key': 'resourceId', 'type': 'str'}, 'container': {'key': 'container', 'type': 'str'},",
"a list of available views. Variables are only populated by",
"'properties.metric', 'type': 'str'}, 'kpis': {'key': 'properties.kpis', 'type': '[KpiProperties]'}, 'pivots': {'key':",
"{ 'value': {'key': 'value', 'type': '[Alert]'}, 'next_link': {'key': 'nextLink', 'type':",
"include: \"OnDemand\", \"Scheduled\". :type execution_type: str or ~azure.mgmt.costmanagement.models.ExecutionType :param status:",
"= { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly':",
"the operation is performed: Dimensions, Query. :vartype resource: str :ivar",
"Azure. :param from_property: Required. The start date for export data.",
"): super(QueryResult, self).__init__(**kwargs) self.next_link = next_link self.columns = columns self.rows",
"'type': {'readonly': True}, 'next_run_time_estimate': {'readonly': True}, } _attribute_map = {",
"object :param threshold: notification threshold percentage as a decimal which",
"super(Alert, self).__init__(**kwargs) self.definition = definition self.description = description self.source =",
"filter class ForecastDefinition(msrest.serialization.Model): \"\"\"The definition of a forecast. All required",
"None, run_history: Optional[\"ExportExecutionListResult\"] = None, schedule: Optional[\"ExportSchedule\"] = None, **kwargs",
"Cost Analysis. Required. Possible values include: \"Area\", \"Line\", \"StackedColumn\", \"GroupedColumn\",",
"KpiProperties(msrest.serialization.Model): \"\"\"Each KPI must contain a 'type' and 'enabled' key.",
"report column name is allowed. If not provided, then report",
"= None, dimension: Optional[\"ReportConfigComparisonExpression\"] = None, tag: Optional[\"ReportConfigComparisonExpression\"] = None,",
"resource_group_filter: Optional[List[object]] = None, resource_filter: Optional[List[object]] = None, meter_filter: Optional[List[object]]",
"= None, **kwargs ): super(QueryFilter, self).__init__(**kwargs) self.and_property = and_property self.or_property",
"\"WeekToDate\", \"Custom\". :type timeframe: str or ~azure.mgmt.costmanagement.models.ForecastTimeframeType :param time_period: Has",
"The type of the report. Usage represents actual usage, forecast",
"request. :ivar value: List of alerts. :vartype value: list[~azure.mgmt.costmanagement.models.Alert] :ivar",
"'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}' for InvoiceSection scope, 'providers/Microsoft.Management/managementGroups/{managementGroupId}' for Management Group scope, '/providers/Microsoft.CostManagement/externalBillingAccounts/{externalBillingAccountName}'",
"{'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map",
"'type': 'bool'}, } def __init__( self, *, type: Optional[Union[str, \"KpiType\"]]",
"2 aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation] :param grouping: Array",
"class ExportSchedule(msrest.serialization.Model): \"\"\"The schedule associated with the export. All required",
"\"\"\"Result of listing views. It contains a list of available",
"Required. The resource id of the storage account where exports",
"or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param not_property: The logical \"NOT\" expression. :type not_property:",
"\"NOT\" expression. :type not_property: ~azure.mgmt.costmanagement.models.QueryFilter :param dimension: Has comparison expression",
"Licensed under the MIT License. See License.txt in the project",
"'ErrorDetails'}, } def __init__( self, *, execution_type: Optional[Union[str, \"ExecutionType\"]] =",
"'[object]'}, 'resource_filter': {'key': 'resourceFilter', 'type': '[object]'}, 'meter_filter': {'key': 'meterFilter', 'type':",
"{'key': 'and', 'type': '[QueryFilter]'}, 'or_property': {'key': 'or', 'type': '[QueryFilter]'}, 'not_property':",
"= None, kpis: Optional[List[\"KpiProperties\"]] = None, pivots: Optional[List[\"PivotProperties\"]] = None,",
"for a tag. :type tag: ~azure.mgmt.costmanagement.models.QueryComparisonExpression \"\"\" _validation = {",
"the recurrence. The start date must be in future. If",
"\"\"\" _validation = { 'type': {'required': True}, 'name': {'required': True},",
"populated in order to send to Azure. :param from_property: Required.",
"'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}' for EnrollmentAccount scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}' for BillingProfile scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}' for",
"data for the report. :type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod :param dataset: Has",
"triggered_by: notificationId that triggered this alert. :type triggered_by: str :param",
"= None, contact_groups: Optional[List[str]] = None, contact_roles: Optional[List[str]] = None,",
"def __init__( self, *, type: Union[str, \"ExportType\"], timeframe: Union[str, \"TimeframeType\"],",
"format self.delivery_info = delivery_info self.definition = definition self.run_history = run_history",
"class CommonExportProperties(msrest.serialization.Model): \"\"\"The common properties of the export. Variables are",
"**kwargs ): super(ReportConfigDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class ReportConfigDefinition(msrest.serialization.Model): \"\"\"The",
"to show in view. :type name: str \"\"\" _attribute_map =",
"expression. Must have at least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated]",
":ivar value: The list of views. :vartype value: list[~azure.mgmt.costmanagement.models.View] :ivar",
"str or ~azure.mgmt.costmanagement.models.AlertSource :param details: Alert details. :type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails",
"None class ExportProperties(CommonExportProperties): \"\"\"The properties of the export. Variables are",
"must be populated in order to send to Azure. :param",
"date for export data. :type from_property: ~datetime.datetime :param to: Required.",
"of the resource. To handle concurrent update scenario, this field",
"= None, **kwargs ): super(QueryDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration",
"values: List[str], **kwargs ): super(QueryComparisonExpression, self).__init__(**kwargs) self.name = name self.operator",
"'type': '[Operation]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__(",
"= { 'type': {'required': True}, 'name': {'required': True}, } _attribute_map",
"str, function: Union[str, \"FunctionType\"], **kwargs ): super(QueryAggregation, self).__init__(**kwargs) self.name =",
"'str'}, 'container': {'key': 'container', 'type': 'str'}, 'root_folder_path': {'key': 'rootFolderPath', 'type':",
"date for recurrence schedule. All required parameters must be populated",
"): super(QueryDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration self.aggregation",
"{'key': 'schedule', 'type': 'ExportSchedule'}, } def __init__( self, *, delivery_info:",
"= None, aggregation: Optional[Dict[str, \"QueryAggregation\"]] = None, grouping: Optional[List[\"QueryGrouping\"]] =",
"use in the query. :type filter: ~azure.mgmt.costmanagement.models.QueryFilter \"\"\" _validation =",
"{'required': True}, } _attribute_map = { 'destination': {'key': 'destination', 'type':",
"a request. :ivar value: The list of dimensions. :vartype value:",
"'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'ReportConfigDatasetConfiguration'}, 'aggregation': {'key':",
"time period for pulling data for the export. :type time_period:",
"value: list[~azure.mgmt.costmanagement.models.Alert] :ivar next_link: URL to get the next set",
"'resourceGroupFilter', 'type': '[object]'}, 'resource_filter': {'key': 'resourceFilter', 'type': '[object]'}, 'meter_filter': {'key':",
"): super(ExportDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration class",
"be populated in order to send to Azure. :param status:",
"Union[str, \"ReportTimeframeType\"], time_period: Optional[\"ReportConfigTimePeriod\"] = None, dataset: Optional[\"ReportConfigDatasetAutoGenerated\"] = None,",
"to use in the report. Report can have up to",
"value: The list of views. :vartype value: list[~azure.mgmt.costmanagement.models.View] :ivar next_link:",
"def __init__( self, **kwargs ): super(AlertsResult, self).__init__(**kwargs) self.value = None",
"name: str \"\"\" _attribute_map = { 'type': {'key': 'type', 'type':",
"'str'}, 'details': {'key': 'properties.details', 'type': 'AlertPropertiesDetails'}, 'cost_entity_id': {'key': 'properties.costEntityId', 'type':",
"Some Error responses: * 429 TooManyRequests - Request is throttled.",
"EnrollmentAccount scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}' for BillingProfile scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}' for InvoiceSection scope,",
"'type': '{QueryAggregation}'}, 'filter': {'key': 'filter', 'type': 'QueryFilter'}, } def __init__(",
"Required. The type of the report. Usage represents actual usage,",
":type status: str or ~azure.mgmt.costmanagement.models.StatusType :param recurrence: Required. The schedule",
"'properties.displayName', 'type': 'str'}, 'scope': {'key': 'properties.scope', 'type': 'str'}, 'created_on': {'key':",
"aggregation expression to use in the report. The key of",
"\"Overridden\", \"Resolved\", \"Dismissed\". :type status: str or ~azure.mgmt.costmanagement.models.AlertStatus :param creation_time:",
"= { 'provider': {'key': 'provider', 'type': 'str'}, 'resource': {'key': 'resource',",
"{'key': 'properties.source', 'type': 'str'}, 'details': {'key': 'properties.details', 'type': 'AlertPropertiesDetails'}, 'cost_entity_id':",
"def __init__( self, *, error: Optional[\"ErrorDetails\"] = None, **kwargs ):",
"submitted_by: str :param submitted_time: The time when export was queued",
"ReportConfigDatasetAutoGenerated(msrest.serialization.Model): \"\"\"The definition of data present in the report. :param",
"be ignored when sending a request. :ivar name: Operation name:",
"list[object] :param resource_filter: array of resources to filter by. :type",
"container: Required. The name of the container where exports will",
"have up to 2 aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation]",
"Optional[\"ReportConfigFilterAutoGenerated\"] = None, **kwargs ): super(ReportConfigDatasetAutoGenerated, self).__init__(**kwargs) self.granularity = granularity",
"_attribute_map = { 'value': {'key': 'value', 'type': '[Operation]'}, 'next_link': {'key':",
"{'key': 'values', 'type': '[str]'}, } def __init__( self, *, name:",
"vary by customer channel (see examples). :type columns: list[str] \"\"\"",
":type from_property: ~datetime.datetime :param to: The end date of recurrence.",
"threshold: Optional[float] = None, operator: Optional[Union[str, \"AlertOperator\"]] = None, amount:",
"} def __init__( self, *, definition: Optional[\"AlertPropertiesDefinition\"] = None, description:",
"definition for data in this query. :type dataset: ~azure.mgmt.costmanagement.models.QueryDataset \"\"\"",
"'[ReportConfigFilter]'}, 'not_property': {'key': 'not', 'type': 'ReportConfigFilter'}, 'dimension': {'key': 'dimension', 'type':",
"= modification_time self.status_modification_user_name = status_modification_user_name self.status_modification_time = status_modification_time class ErrorDetails(msrest.serialization.Model):",
"accumulated: Show costs accumulated over time. Possible values include: \"true\",",
"self).__init__(**kwargs) self.columns = columns class ExportDefinition(msrest.serialization.Model): \"\"\"The definition of an",
":type type: str or ~azure.mgmt.costmanagement.models.KpiType :param id: ID of resource",
"dates. Possible values include: \"Usage\". :type type: str or ~azure.mgmt.costmanagement.models.ReportType",
"exports will be uploaded. :type root_folder_path: str \"\"\" _validation =",
"pulling data for the forecast. :type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod :param dataset:",
"self).__init__(**kwargs) self.name = name self.function = function class QueryColumn(msrest.serialization.Model): \"\"\"QueryColumn.",
"= type self.timeframe = timeframe self.time_period = time_period self.data_set =",
"'type': 'str'}, } def __init__( self, *, type: Union[str, \"QueryColumnType\"],",
"~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Has delivery information for the export. :type",
"**kwargs ): super(KpiProperties, self).__init__(**kwargs) self.type = type self.id = id",
"None, **kwargs ): super(ReportConfigDatasetAutoGenerated, self).__init__(**kwargs) self.granularity = granularity self.configuration =",
"Optional[\"QueryComparisonExpression\"] = None, **kwargs ): super(QueryFilter, self).__init__(**kwargs) self.and_property = and_property",
":param tag: Has comparison expression for a tag. :type tag:",
"Query can have up to 2 aggregation clauses. :type aggregation:",
"{ 'value': {'readonly': True}, 'next_link': {'readonly': True}, } _attribute_map =",
"'type': {'key': 'type', 'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'},",
"sort. Possible values include: \"Ascending\", \"Descending\". :type direction: str or",
"when sending a request. :ivar value: List of cost management",
"): super(AlertPropertiesDefinition, self).__init__(**kwargs) self.type = type self.category = category self.criteria",
"service is not able to process the incoming request. The",
"~azure.mgmt.costmanagement.models.ReportTimeframeType :param time_period: Has time period for pulling data for",
"= None, status_modification_time: Optional[str] = None, **kwargs ): super(DismissAlertPayload, self).__init__(**kwargs)",
"= { 'destination': {'key': 'destination', 'type': 'ExportDeliveryDestination'}, } def __init__(",
"include: \"None\", \"EqualTo\", \"GreaterThan\", \"GreaterThanOrEqualTo\", \"LessThan\", \"LessThanOrEqualTo\". :type operator: str",
"'error', 'type': 'ErrorDetails'}, } def __init__( self, *, error: Optional[\"ErrorDetails\"]",
"str :param operator: Required. The operator to use for comparison.",
"'format': {'key': 'properties.format', 'type': 'str'}, 'delivery_info': {'key': 'properties.deliveryInfo', 'type': 'ExportDeliveryInfo'},",
"description. :vartype description: str :ivar filter_enabled: Filter enabled. :vartype filter_enabled:",
"{'key': 'timeGrainType', 'type': 'str'}, 'period_start_date': {'key': 'periodStartDate', 'type': 'str'}, 'triggered_by':",
"True}, 'function': {'required': True}, } _attribute_map = { 'name': {'key':",
"Resource tags. :vartype tags: dict[str, str] :param next_link: The link",
"send to Azure. :param from_property: Required. The start date to",
"} def __init__( self, *, resource_id: str, container: str, root_folder_path:",
"_validation = { 'from_property': {'required': True}, } _attribute_map = {",
"\"NOT\" expression. :type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilter :param dimension: Has comparison expression",
"{ 'value': {'key': 'value', 'type': '[Operation]'}, 'next_link': {'key': 'nextLink', 'type':",
"~datetime.datetime :param chart: Chart type of the main view in",
"Dimensions, Query. :vartype resource: str :ivar operation: Operation type: Read,",
"exports will be delivered. :type resource_id: str :param container: Required.",
"{'key': 'type', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'enabled':",
"query. :type dataset: ~azure.mgmt.costmanagement.models.QueryDataset \"\"\" _validation = { 'type': {'required':",
"of data present in the report. :param granularity: The granularity",
"type self.timeframe = timeframe self.time_period = time_period self.dataset = dataset",
"of listing the execution history of an export. Variables are",
"'or_property': {'key': 'or', 'type': '[QueryFilter]'}, 'not_property': {'key': 'not', 'type': 'QueryFilter'},",
"server, and will be ignored when sending a request. All",
"id self.enabled = enabled class Operation(msrest.serialization.Model): \"\"\"A Cost management REST",
"logical \"NOT\" expression. :type not_property: ~azure.mgmt.costmanagement.models.QueryFilter :param dimension: Has comparison",
"None, tag_filter: Optional[object] = None, threshold: Optional[float] = None, operator:",
"criteria: Criteria that triggered alert. Possible values include: \"CostThresholdExceeded\", \"UsageThresholdExceeded\",",
"Error message indicating why the operation failed. :vartype message: str",
":type destination: ~azure.mgmt.costmanagement.models.ExportDeliveryDestination \"\"\" _validation = { 'destination': {'required': True},",
"'currentSpend', 'type': 'float'}, 'contact_emails': {'key': 'contactEmails', 'type': '[str]'}, 'contact_groups': {'key':",
"= { 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type',",
":param not_property: The logical \"NOT\" expression. :type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilter :param",
"Array of columns. :type columns: list[~azure.mgmt.costmanagement.models.QueryColumn] :param rows: Array of",
"{'key': 'aggregation', 'type': '{QueryAggregation}'}, 'filter': {'key': 'filter', 'type': 'QueryFilter'}, }",
"= name self.function = function class QueryColumn(msrest.serialization.Model): \"\"\"QueryColumn. :param name:",
"self.display_name = display_name self.scope = scope self.created_on = None self.modified_on",
"Has destination for the export being delivered. :type destination: ~azure.mgmt.costmanagement.models.ExportDeliveryDestination",
"execution. Possible values include: \"Queued\", \"InProgress\", \"Completed\", \"Failed\", \"Timeout\", \"NewDataNotAvailable\",",
"timeframe: The time frame for pulling data for the report.",
"{'key': 'properties.creationTime', 'type': 'str'}, 'close_time': {'key': 'properties.closeTime', 'type': 'str'}, 'modification_time':",
"Optional[str] = None, **kwargs ): super(ExportDeliveryDestination, self).__init__(**kwargs) self.resource_id = resource_id",
"'tag': {'key': 'tag', 'type': 'QueryComparisonExpression'}, } def __init__( self, *,",
"modification_time self.status_modification_user_name = status_modification_user_name self.status_modification_time = status_modification_time class AlertPropertiesDefinition(msrest.serialization.Model): \"\"\"defines",
"'type': 'str'}, } def __init__( self, **kwargs ): super(ErrorDetails, self).__init__(**kwargs)",
"\"\"\"The group by expression to be used in the query.",
"of resources to filter by. :type resource_filter: list[object] :param meter_filter:",
"overriding_alert: overriding alert. :type overriding_alert: str \"\"\" _attribute_map = {",
"Has start and end date of the recurrence. The start",
"period for pulling data for the report. :type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod",
":type contact_roles: list[str] :param overriding_alert: overriding alert. :type overriding_alert: str",
"{'key': 'value', 'type': '[Export]'}, } def __init__( self, **kwargs ):",
"ExportDeliveryInfo(msrest.serialization.Model): \"\"\"The delivery information associated with a export. All required",
"schedule: Optional[\"ExportSchedule\"] = None, **kwargs ): super(ExportProperties, self).__init__(format=format, delivery_info=delivery_info, definition=definition,",
"'ReportConfigFilterAutoGenerated'}, } def __init__( self, *, granularity: Optional[Union[str, \"ReportGranularityType\"]] =",
"-------------------------------------------------------------------------- import datetime from typing import Dict, List, Optional, Union",
"The list of views. :vartype value: list[~azure.mgmt.costmanagement.models.View] :ivar next_link: The",
"*, type: Union[str, \"ExportType\"], timeframe: Union[str, \"TimeframeType\"], time_period: Optional[\"ExportTimePeriod\"] =",
"notificationId that triggered this alert. :type triggered_by: str :param resource_group_filter:",
"'type': {'key': 'type', 'type': 'str'}, 'timeframe': {'key': 'timeframe', 'type': 'str'},",
"of exports. :vartype value: list[~azure.mgmt.costmanagement.models.Export] \"\"\" _validation = { 'value':",
"True}, 'next_run_time_estimate': {'readonly': True}, } _attribute_map = { 'format': {'key':",
"expression. Must have at least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter]",
"be lost if the code is regenerated. # -------------------------------------------------------------------------- import",
"def __init__( self, *, name: Optional[str] = None, type: Optional[str]",
"cadence. Possible values include: \"None\", \"Monthly\", \"Quarterly\", \"Annually\", \"BillingMonth\", \"BillingQuarter\",",
"class ExportDefinition(msrest.serialization.Model): \"\"\"The definition of an export. All required parameters",
"the data in the export. The configuration will be ignored",
"The name of the column to sort. :type name: str",
"main view in Cost Analysis. Required. Possible values include: \"Area\",",
"status_modification_time: Optional[str] = None, **kwargs ): super(Alert, self).__init__(**kwargs) self.definition =",
"the export. :type delivery_info: ~azure.mgmt.costmanagement.models.ExportDeliveryInfo :param definition: Required. Has the",
"next_link: str :param columns: Array of columns. :type columns: list[~azure.mgmt.costmanagement.models.QueryColumn]",
"of available views. Variables are only populated by the server,",
"Optional[\"ReportConfigTimePeriod\"] = None, dataset: Optional[\"ReportConfigDataset\"] = None, **kwargs ): super(View,",
"'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'}, 'tag': {'key': 'tag', 'type': 'ReportConfigComparisonExpression'},",
"'AlertPropertiesDetails'}, 'cost_entity_id': {'key': 'properties.costEntityId', 'type': 'str'}, 'status': {'key': 'properties.status', 'type':",
":vartype usage_end: ~datetime.datetime :ivar next_link: The link (url) to the",
"'type': 'ForecastDataset'}, 'include_actual_cost': {'key': 'includeActualCost', 'type': 'bool'}, 'include_fresh_partial_cost': {'key': 'includeFreshPartialCost',",
"'type': 'bool'}, } def __init__( self, *, type: Union[str, \"ForecastType\"],",
"'unit', 'type': 'str'}, 'current_spend': {'key': 'currentSpend', 'type': 'float'}, 'contact_emails': {'key':",
"None self.usage_end = None self.next_link = None class DimensionsListResult(msrest.serialization.Model): \"\"\"Result",
"export. This should only be specified with timeFrame set to",
"{'key': 'direction', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, }",
":vartype value: list[~azure.mgmt.costmanagement.models.Alert] :ivar next_link: URL to get the next",
"Union[str, \"TimeframeType\"], time_period: Optional[\"ExportTimePeriod\"] = None, data_set: Optional[\"ExportDataset\"] = None,",
"Optional[Union[str, \"ExecutionType\"]] = None, status: Optional[Union[str, \"ExecutionStatus\"]] = None, submitted_by:",
"values to use for comparison. :type values: list[str] \"\"\" _validation",
"None, time_period: Optional[\"ReportConfigTimePeriod\"] = None, dataset: Optional[\"ReportConfigDataset\"] = None, **kwargs",
"\"MonthToDate\", \"BillingMonthToDate\", \"TheLastMonth\", \"TheLastBillingMonth\", \"WeekToDate\", \"Custom\". :type timeframe: str or",
"{'key': 'nextRunTimeEstimate', 'type': 'iso-8601'}, } def __init__( self, *, delivery_info:",
"\"ReportTimeframeType\"], time_period: Optional[\"ReportConfigTimePeriod\"] = None, dataset: Optional[\"ReportConfigDatasetAutoGenerated\"] = None, **kwargs",
"export. Note that 'Usage' is equivalent to 'ActualCost' and is",
"type (Forecast, Budget). Possible values include: \"Forecast\", \"Budget\". :type type:",
"will be included. :type include_actual_cost: bool :param include_fresh_partial_cost: a boolean",
"alert. Possible values include: \"Budget\", \"Invoice\", \"Credit\", \"Quota\", \"General\", \"xCloud\",",
"self.name = name class QueryAggregation(msrest.serialization.Model): \"\"\"The aggregation expression to be",
"request. :ivar value: A list of export executions. :vartype value:",
":vartype usage_start: ~datetime.datetime :ivar usage_end: Usage end. :vartype usage_end: ~datetime.datetime",
"aggregation and grouping are provided. :type configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration :param aggregation:",
"'type': 'str'}, } def __init__( self, *, name: str, direction:",
"): super(OperationDisplay, self).__init__(**kwargs) self.provider = None self.resource = None self.operation",
"= None, sorting: Optional[List[\"ReportConfigSorting\"]] = None, filter: Optional[\"ReportConfigFilterAutoGenerated\"] = None,",
"self.unit = unit self.current_spend = current_spend self.contact_emails = contact_emails self.contact_groups",
"'str'}, 'message': {'key': 'message', 'type': 'str'}, } def __init__( self,",
"metric self.kpis = kpis self.pivots = pivots self.type_properties_query_type = type_properties_query_type",
"'type': 'float'}, 'operator': {'key': 'operator', 'type': 'str'}, 'amount': {'key': 'amount',",
"display: ~azure.mgmt.costmanagement.models.OperationDisplay \"\"\" _validation = { 'name': {'readonly': True}, }",
"self.granularity = granularity self.configuration = configuration self.aggregation = aggregation self.grouping",
"automatically, however API users need to register the subscription. For",
"of resourceGroups to filter by. :type resource_group_filter: list[object] :param resource_filter:",
"only be specified with timeFrame set to 'Custom'. The maximum",
"values include: \"Area\", \"Line\", \"StackedColumn\", \"GroupedColumn\", \"Table\". :type chart: str",
"data in this forecast. :type dataset: ~azure.mgmt.costmanagement.models.ForecastDataset :param include_actual_cost: a",
"the alias for the aggregated column. Query can have up",
"in Cost Analysis. Required. Possible values include: \"Area\", \"Line\", \"StackedColumn\",",
"Required. The type of the export. Note that 'Usage' is",
"time period for pulling data for the forecast. :type time_period:",
":param schedule: Has schedule information for the export. :type schedule:",
"Optional[Union[str, \"GranularityType\"]] = None, configuration: Optional[\"QueryDatasetConfiguration\"] = None, aggregation: Optional[Dict[str,",
"~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated \"\"\" _validation = { 'grouping': {'max_items': 2, 'min_items': 0},",
"for the dimension. :vartype total: int :ivar category: Dimension category.",
"} def __init__( self, *, granularity: Optional[Union[str, \"GranularityType\"]] = None,",
"write, delete, etc. :vartype operation: str \"\"\" _validation = {",
"'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'tags':",
"'{str}'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'filter_enabled': {'key': 'properties.filterEnabled', 'type':",
"the report. :type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilter \"\"\" _validation = { 'grouping':",
"'Inactive', the export's schedule is paused. Possible values include: \"Active\",",
"lowest possible grain. :type name: str \"\"\" _validation = {",
"scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}' for EnrollmentAccount scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}' for BillingProfile scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}'",
"{'key': 'type', 'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, 'format':",
"self.container = container self.root_folder_path = root_folder_path class ExportDeliveryInfo(msrest.serialization.Model): \"\"\"The delivery",
"KPI in the UI?. :type enabled: bool \"\"\" _attribute_map =",
"contact_emails self.contact_groups = contact_groups self.contact_roles = contact_roles self.overriding_alert = overriding_alert",
"'str'}, 'timeframe': {'key': 'timeframe', 'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type':",
"next_link: The link (url) to the next page of results.",
"= submitted_by self.submitted_time = submitted_time self.processing_start_time = processing_start_time self.processing_end_time =",
"class ErrorResponse(msrest.serialization.Model): \"\"\"Error response indicates that the service is not",
"have at least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param or_property:",
"value: list[~azure.mgmt.costmanagement.models.Export] \"\"\" _validation = { 'value': {'readonly': True}, }",
"which activated this alert. :type threshold: float :param operator: operator",
"to be executed. :type processing_start_time: ~datetime.datetime :param processing_end_time: The time",
"tags. :vartype tags: dict[str, str] \"\"\" _validation = { 'id':",
"columns: Optional[List[str]] = None, **kwargs ): super(ReportConfigDatasetConfiguration, self).__init__(**kwargs) self.columns =",
"'type': 'str'}, } def __init__( self, *, time_grain_type: Optional[Union[str, \"AlertTimeGrainType\"]]",
"close_time: str :param modification_time: dateTime in which alert was last",
"Azure. :param status: The status of the export's schedule. If",
"str or ~azure.mgmt.costmanagement.models.TimeframeType :param time_period: Has time period for pulling",
"{'key': 'type', 'type': 'str'}, 'timeframe': {'key': 'timeframe', 'type': 'str'}, 'time_period':",
"server, and will be ignored when sending a request. :ivar",
"Required. The name of the column to sort. :type name:",
"= None, accumulated: Optional[Union[str, \"AccumulatedType\"]] = None, metric: Optional[Union[str, \"MetricType\"]]",
"time when export was queued to be executed. :type submitted_time:",
"populated in order to send to Azure. :param status: The",
"was last modified. :type status_modification_time: str \"\"\" _attribute_map = {",
"def __init__( self, *, execution_type: Optional[Union[str, \"ExecutionType\"]] = None, status:",
"**kwargs ): super(ExportRecurrencePeriod, self).__init__(**kwargs) self.from_property = from_property self.to = to",
"name: Required. The name of the column to use in",
"the column to sort. :type name: str \"\"\" _validation =",
"'iso-8601'}, 'next_link': {'key': 'properties.nextLink', 'type': 'str'}, } def __init__( self,",
"\"UsageThresholdExceeded\", \"CreditThresholdApproaching\", \"CreditThresholdReached\", \"QuotaThresholdApproaching\", \"QuotaThresholdReached\", \"MultiCurrency\", \"ForecastCostThresholdExceeded\", \"ForecastUsageThresholdExceeded\", \"InvoiceDueDateApproaching\", \"InvoiceDueDateReached\",",
"def __init__( self, *, type: Union[str, \"ReportType\"], timeframe: Union[str, \"ReportTimeframeType\"],",
"'type': 'str'}, } def __init__( self, *, name: Optional[str] =",
"class ExportListResult(msrest.serialization.Model): \"\"\"Result of listing exports. It contains a list",
"\"Daily\", \"Monthly\". :type granularity: str or ~azure.mgmt.costmanagement.models.ReportGranularityType :param configuration: Has",
"include: \"CostThresholdExceeded\", \"UsageThresholdExceeded\", \"CreditThresholdApproaching\", \"CreditThresholdReached\", \"QuotaThresholdApproaching\", \"QuotaThresholdReached\", \"MultiCurrency\", \"ForecastCostThresholdExceeded\", \"ForecastUsageThresholdExceeded\",",
"self, *, granularity: Optional[Union[str, \"GranularityType\"]] = None, configuration: Optional[\"ExportDatasetConfiguration\"] =",
"self, *, granularity: Optional[Union[str, \"ReportGranularityType\"]] = None, configuration: Optional[\"ReportConfigDatasetConfiguration\"] =",
"query. Query can have up to 2 group by clauses.",
"class QueryDatasetConfiguration(msrest.serialization.Model): \"\"\"The configuration of dataset in the query. :param",
"not_property: Optional[\"ReportConfigFilter\"] = None, dimension: Optional[\"ReportConfigComparisonExpression\"] = None, tag: Optional[\"ReportConfigComparisonExpression\"]",
"page of results. :type next_link: str :param columns: Array of",
"= None, delivery_info: Optional[\"ExportDeliveryInfo\"] = None, definition: Optional[\"ExportDefinition\"] = None,",
"'[ReportConfigFilter]'}, 'or_property': {'key': 'or', 'type': '[ReportConfigFilter]'}, 'not_property': {'key': 'not', 'type':",
"{'key': 'and', 'type': '[ReportConfigFilterAutoGenerated]'}, 'or_property': {'key': 'or', 'type': '[ReportConfigFilterAutoGenerated]'}, 'not_property':",
"or ~azure.mgmt.costmanagement.models.GranularityType :param configuration: Has configuration information for the data",
"str :ivar usage_start: Usage start. :vartype usage_start: ~datetime.datetime :ivar usage_end:",
"group by expression to use in the report. Report can",
"type: str \"\"\" _attribute_map = { 'name': {'key': 'name', 'type':",
"list of dimensions. :vartype value: list[~azure.mgmt.costmanagement.models.Dimension] \"\"\" _validation = {",
"_validation = { 'from_property': {'required': True}, 'to': {'required': True}, }",
"pulling data for the report. All required parameters must be",
"amount: Optional[float] = None, unit: Optional[str] = None, current_spend: Optional[float]",
"export. The configuration will be ignored if aggregation and grouping",
":type pivots: list[~azure.mgmt.costmanagement.models.PivotProperties] :param type_properties_query_type: The type of the report.",
"status self.recurrence = recurrence self.recurrence_period = recurrence_period class ExportTimePeriod(msrest.serialization.Model): \"\"\"The",
"time_period: Optional[\"ExportTimePeriod\"] = None, data_set: Optional[\"ExportDataset\"] = None, **kwargs ):",
"in the export. The configuration will be ignored if aggregation",
"**kwargs ): super(ExportExecution, self).__init__(**kwargs) self.execution_type = execution_type self.status = status",
"rows in the export. Currently only 'Daily' is supported. Possible",
"when sending a request. :ivar value: List of alerts. :vartype",
"2}, 'or_property': {'min_items': 2}, } _attribute_map = { 'and_property': {'key':",
"by clauses. :type grouping: list[~azure.mgmt.costmanagement.models.ReportConfigGrouping] :param sorting: Array of order",
"user created this view. :vartype created_on: ~datetime.datetime :ivar modified_on: Date",
"valid report column name is allowed. If not provided, then",
"'ReportConfigDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{ReportConfigAggregation}'}, 'grouping': {'key': 'grouping', 'type':",
"'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'definition': {'key': 'properties.definition', 'type':",
"values include: \"OnDemand\", \"Scheduled\". :type execution_type: str or ~azure.mgmt.costmanagement.models.ExecutionType :param",
"= None, **kwargs ): super(QueryDefinition, self).__init__(**kwargs) self.type = type self.timeframe",
"= { 'value': {'readonly': True}, 'next_link': {'readonly': True}, } _attribute_map",
"'type': '[QueryFilter]'}, 'or_property': {'key': 'or', 'type': '[QueryFilter]'}, 'not_property': {'key': 'not',",
"= None self.chart = chart self.accumulated = accumulated self.metric =",
"Error responses: * 429 TooManyRequests - Request is throttled. Retry",
"{'key': 'includeActualCost', 'type': 'bool'}, 'include_fresh_partial_cost': {'key': 'includeFreshPartialCost', 'type': 'bool'}, }",
"'{QueryAggregation}'}, 'filter': {'key': 'filter', 'type': 'QueryFilter'}, } def __init__( self,",
"str :param current_spend: current spend. :type current_spend: float :param contact_emails:",
"display: Optional[\"OperationDisplay\"] = None, **kwargs ): super(Operation, self).__init__(**kwargs) self.name =",
"self.aggregation = aggregation self.filter = filter class ForecastDefinition(msrest.serialization.Model): \"\"\"The definition",
"self).__init__(**kwargs) self.columns = columns class ReportConfigDefinition(msrest.serialization.Model): \"\"\"The definition of a",
"'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'definition': {'key':",
"def __init__( self, *, type: Optional[Union[str, \"KpiType\"]] = None, id:",
"expression to use in the query. The key of each",
"up to 2 aggregation clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation] :param",
"to Azure. :param type: Required. The type of the report.",
"tag: Optional[\"ReportConfigComparisonExpression\"] = None, **kwargs ): super(ReportConfigFilterAutoGenerated, self).__init__(**kwargs) self.and_property =",
"name self.operator = operator self.values = values class ReportConfigDataset(msrest.serialization.Model): \"\"\"The",
":type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param not_property: The logical \"NOT\" expression. :type",
"} def __init__( self, **kwargs ): super(OperationDisplay, self).__init__(**kwargs) self.provider =",
"= run_settings self.error = error class ExportExecutionListResult(msrest.serialization.Model): \"\"\"Result of listing",
"will be uploaded. :type root_folder_path: str \"\"\" _validation = {",
"to Azure. :param from_property: Required. The start date of recurrence.",
"and end date for pulling data for the report. All",
"list[~azure.mgmt.costmanagement.models.KpiProperties] :param pivots: Configuration of 3 sub-views in the Cost",
"overriding_alert: str \"\"\" _attribute_map = { 'time_grain_type': {'key': 'timeGrainType', 'type':",
"a 'type' and 'name'. :param type: Data type to show",
"the report. :param granularity: The granularity of rows in the",
"class ExportProperties(CommonExportProperties): \"\"\"The properties of the export. Variables are only",
"The end date of recurrence. :type to: ~datetime.datetime \"\"\" _validation",
"super(QueryFilter, self).__init__(**kwargs) self.and_property = and_property self.or_property = or_property self.not_property =",
":vartype total: int :ivar category: Dimension category. :vartype category: str",
"or not. :type e_tag: str :param display_name: User input name",
"columns. :type columns: list[str] \"\"\" _attribute_map = { 'columns': {'key':",
"value: list[~azure.mgmt.costmanagement.models.View] :ivar next_link: The link (url) to the next",
"self).__init__(**kwargs) self.type = type self.category = category self.criteria = criteria",
"of views. :vartype value: list[~azure.mgmt.costmanagement.models.View] :ivar next_link: The link (url)",
"name of the column to group. This version supports subscription",
":param grouping: Array of group by expression to use in",
"aggregate. :type name: str :param function: Required. The name of",
"*, name: str, function: Union[str, \"FunctionType\"], **kwargs ): super(QueryAggregation, self).__init__(**kwargs)",
"estimate of the next execution time. :vartype next_run_time_estimate: ~datetime.datetime :param",
"data for the forecast. If custom, then a specific time",
"self.contact_roles = contact_roles self.overriding_alert = overriding_alert class AlertsResult(msrest.serialization.Model): \"\"\"Result of",
"self.value = None class ExportListResult(msrest.serialization.Model): \"\"\"Result of listing exports. It",
"the type of alert. :param type: type of alert. Possible",
"\"Usage\", \"ActualCost\", \"AmortizedCost\". :type type: str or ~azure.mgmt.costmanagement.models.ExportType :param timeframe:",
"of the exported file. :type file_name: str :param run_settings: The",
"throttled. Retry after waiting for the time specified in the",
"~datetime.datetime \"\"\" _validation = { 'from_property': {'required': True}, } _attribute_map",
"str :param display_name: User input name of the view. Required.",
"in the report. :type sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting] :param filter: Has filter",
"The end date for export data. :type to: ~datetime.datetime \"\"\"",
"list of contact roles. :type contact_roles: list[str] :param overriding_alert: overriding",
"'timeframe', 'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type': 'QueryTimePeriod'}, 'dataset': {'key':",
"least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param or_property: The logical",
"class ExportRecurrencePeriod(msrest.serialization.Model): \"\"\"The start and end date for recurrence schedule.",
"'type': 'str'}, 'e_tag': {'key': 'eTag', 'type': 'str'}, } def __init__(",
"Optional[Union[str, \"ReportConfigSortingDirection\"]] = None, **kwargs ): super(ReportConfigSorting, self).__init__(**kwargs) self.direction =",
"list[~azure.mgmt.costmanagement.models.Dimension] \"\"\" _validation = { 'value': {'readonly': True}, } _attribute_map",
"None self.name = None self.type = None self.tags = None",
"None, tag: Optional[\"QueryComparisonExpression\"] = None, **kwargs ): super(QueryFilter, self).__init__(**kwargs) self.and_property",
"time frame for pulling data for the report. If custom,",
"'destination': {'required': True}, } _attribute_map = { 'destination': {'key': 'destination',",
"information. # Code generated by Microsoft (R) AutoRest Code Generator.",
"array of resources to filter by. :type resource_filter: list[object] :param",
"in the export. If not provided then the export will",
"granularity: Optional[Union[str, \"GranularityType\"]] = None, configuration: Optional[\"QueryDatasetConfiguration\"] = None, aggregation:",
"contact_roles: list[str] :param overriding_alert: overriding alert. :type overriding_alert: str \"\"\"",
"= { 'granularity': {'key': 'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration',",
":param type: Required. The type of the query. Possible values",
"'total': {'key': 'properties.total', 'type': 'int'}, 'category': {'key': 'properties.category', 'type': 'str'},",
"dimension: ~azure.mgmt.costmanagement.models.QueryComparisonExpression :param tag: Has comparison expression for a tag.",
"= destination class ExportExecution(Resource): \"\"\"An export execution. Variables are only",
"Billing Account scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}' for Department scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}' for EnrollmentAccount",
"for pulling data for the forecast. :type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod :param",
"\"TagKey\". :type type: str or ~azure.mgmt.costmanagement.models.PivotType :param name: Data field",
"~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Required. Has delivery information for the export.",
"True}, 'name': {'required': True}, } _attribute_map = { 'type': {'key':",
"'type': 'str'}, } def __init__( self, **kwargs ): super(OperationListResult, self).__init__(**kwargs)",
"None, cost_entity_id: Optional[str] = None, status: Optional[Union[str, \"AlertStatus\"]] = None,",
":type cost_entity_id: str :param status: alert status. Possible values include:",
"at least 2 items. :type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param or_property: The",
":param type: Required. The type of the forecast. Possible values",
"self.columns = columns class ReportConfigDefinition(msrest.serialization.Model): \"\"\"The definition of a report",
"__init__( self, *, type: Union[str, \"ReportConfigColumnType\"], name: str, **kwargs ):",
"amount self.unit = unit self.current_spend = current_spend self.contact_emails = contact_emails",
"Alert category. Possible values include: \"Cost\", \"Usage\", \"Billing\", \"System\". :type",
"listing the execution history of an export. Variables are only",
"filter by. :type resource_filter: list[object] :param meter_filter: array of meters",
"the latest version or not. :type e_tag: str :param format:",
"= None, overriding_alert: Optional[str] = None, **kwargs ): super(AlertPropertiesDetails, self).__init__(**kwargs)",
"or_property: Optional[List[\"ReportConfigFilterAutoGenerated\"]] = None, not_property: Optional[\"ReportConfigFilterAutoGenerated\"] = None, dimension: Optional[\"ReportConfigComparisonExpression\"]",
"scope to save the view on. This includes 'subscriptions/{subscriptionId}' for",
"= operator self.amount = amount self.unit = unit self.current_spend =",
"'dataset': {'key': 'dataset', 'type': 'QueryDataset'}, } def __init__( self, *,",
"of operation list results if there are any. :vartype next_link:",
"str \"\"\" _validation = { 'value': {'readonly': True}, 'next_link': {'readonly':",
"str or ~azure.mgmt.costmanagement.models.AlertOperator :param amount: budget threshold amount. :type amount:",
"not_property: The logical \"NOT\" expression. :type not_property: ~azure.mgmt.costmanagement.models.QueryFilter :param dimension:",
"when the export execution finished. :type processing_end_time: ~datetime.datetime :param file_name:",
"of timegrain cadence. Possible values include: \"None\", \"Monthly\", \"Quarterly\", \"Annually\",",
"execution finished. :type processing_end_time: ~datetime.datetime :param file_name: The name of",
"\"StackedColumn\", \"GroupedColumn\", \"Table\". :type chart: str or ~azure.mgmt.costmanagement.models.ChartType :param accumulated:",
"when sending a request. :ivar value: The list of dimensions.",
"None, pivots: Optional[List[\"PivotProperties\"]] = None, type_properties_query_type: Optional[Union[str, \"ReportType\"]] = None,",
"timeframe self.time_period = time_period self.dataset = dataset class ReportConfigFilter(msrest.serialization.Model): \"\"\"The",
"_attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display': {'key':",
"the next execution time. :vartype next_run_time_estimate: ~datetime.datetime \"\"\" _validation =",
"= None self.next_link = None class PivotProperties(msrest.serialization.Model): \"\"\"Each pivot must",
"ReportConfigComparisonExpression(msrest.serialization.Model): \"\"\"The comparison expression to be used in the report.",
"'modified_on': {'key': 'properties.modifiedOn', 'type': 'iso-8601'}, 'chart': {'key': 'properties.chart', 'type': 'str'},",
"or ~azure.mgmt.costmanagement.models.RecurrenceType :param recurrence_period: Has start and end date of",
"{'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, }",
"data in this report config. :type dataset: ~azure.mgmt.costmanagement.models.ReportConfigDataset \"\"\" _validation",
"time_period self.dataset = dataset class ReportConfigFilter(msrest.serialization.Model): \"\"\"The filter expression to",
"): super(QueryAggregation, self).__init__(**kwargs) self.name = name self.function = function class",
"accumulated: Optional[Union[str, \"AccumulatedType\"]] = None, metric: Optional[Union[str, \"MetricType\"]] = None,",
"timeframe: str or ~azure.mgmt.costmanagement.models.ForecastTimeframeType :param time_period: Has time period for",
"or_property: Optional[List[\"QueryFilter\"]] = None, not_property: Optional[\"QueryFilter\"] = None, dimension: Optional[\"QueryComparisonExpression\"]",
"{ 'value': {'readonly': True}, } _attribute_map = { 'value': {'key':",
"data in the report. The configuration will be ignored if",
"super(CommonExportProperties, self).__init__(**kwargs) self.format = format self.delivery_info = delivery_info self.definition =",
"The logical \"NOT\" expression. :type not_property: ~azure.mgmt.costmanagement.models.QueryFilter :param dimension: Has",
"description: Alert description. :type description: str :param source: Source of",
"end date for pulling data for the query. All required",
"of the export's schedule. If 'Inactive', the export's schedule is",
"has the most recent execution history for the export. :type",
"name: str, direction: Optional[Union[str, \"ReportConfigSortingDirection\"]] = None, **kwargs ): super(ReportConfigSorting,",
"is paused. Possible values include: \"Active\", \"Inactive\". :type status: str",
"= None, grouping: Optional[List[\"QueryGrouping\"]] = None, filter: Optional[\"QueryFilter\"] = None,",
"use in the report. :type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilter \"\"\" _validation =",
"'type': 'str'}, 'amount': {'key': 'amount', 'type': 'float'}, 'unit': {'key': 'unit',",
"for pulling data for the report. :type time_period: ~azure.mgmt.costmanagement.models.ReportConfigTimePeriod :param",
"'ReportConfigFilterAutoGenerated'}, 'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'}, 'tag': {'key': 'tag', 'type':",
"aggregation: dict[str, ~azure.mgmt.costmanagement.models.ReportConfigAggregation] :param grouping: Array of group by expression",
"= { 'type': {'key': 'type', 'type': 'str'}, 'category': {'key': 'category',",
"export's schedule is paused. Possible values include: \"Active\", \"Inactive\". :type",
"dateTime in which alert was closed. :type close_time: str :param",
"this field will be used to determine whether the user",
":vartype value: list[~azure.mgmt.costmanagement.models.View] :ivar next_link: The link (url) to the",
":param to: Required. The end date to pull data to.",
"Optional[\"QueryDatasetConfiguration\"] = None, aggregation: Optional[Dict[str, \"QueryAggregation\"]] = None, grouping: Optional[List[\"QueryGrouping\"]]",
":vartype value: list[~azure.mgmt.costmanagement.models.Operation] :ivar next_link: URL to get the next",
"'type': 'str'}, 'scope': {'key': 'properties.scope', 'type': 'str'}, 'created_on': {'key': 'properties.createdOn',",
"): super(ExportDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class ExportDefinition(msrest.serialization.Model): \"\"\"The definition",
"= status_modification_time class ErrorDetails(msrest.serialization.Model): \"\"\"The details of the error. Variables",
"None, contact_emails: Optional[List[str]] = None, contact_groups: Optional[List[str]] = None, contact_roles:",
"class Operation(msrest.serialization.Model): \"\"\"A Cost management REST API operation. Variables are",
"an export. All required parameters must be populated in order",
"): super(DismissAlertPayload, self).__init__(**kwargs) self.definition = definition self.description = description self.source",
"sending a request. :ivar value: List of cost management operations",
"\"\"\" _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'category':",
"\"Budget\", \"Invoice\", \"Credit\", \"Quota\", \"General\", \"xCloud\", \"BudgetForecast\". :type type: str",
"Required. Array of values to use for comparison. :type values:",
"configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration :param aggregation: Dictionary of aggregation expression to use",
"None self.next_link = None class DimensionsListResult(msrest.serialization.Model): \"\"\"Result of listing dimensions.",
"= None, **kwargs ): super(ReportConfigFilterAutoGenerated, self).__init__(**kwargs) self.and_property = and_property self.or_property",
"Possible values include: \"Dimension\", \"TagKey\". :type type: str or ~azure.mgmt.costmanagement.models.PivotType",
"*, time_grain_type: Optional[Union[str, \"AlertTimeGrainType\"]] = None, period_start_date: Optional[str] = None,",
"\"None\", \"Monthly\", \"Quarterly\", \"Annually\", \"BillingMonth\", \"BillingQuarter\", \"BillingAnnual\". :type time_grain_type: str",
"ExportDatasetConfiguration(msrest.serialization.Model): \"\"\"The export dataset configuration. Allows columns to be selected",
"'grouping', 'type': '[QueryGrouping]'}, 'filter': {'key': 'filter', 'type': 'QueryFilter'}, } def",
"= enabled class Operation(msrest.serialization.Model): \"\"\"A Cost management REST API operation.",
"self.tag = tag class ReportConfigGrouping(msrest.serialization.Model): \"\"\"The group by expression to",
"{'key': 'tags', 'type': '{str}'}, } def __init__( self, **kwargs ):",
"provided then the export will include all available columns. :param",
"= aggregation self.grouping = grouping self.sorting = sorting self.filter =",
"a request. :ivar value: List of alerts. :vartype value: list[~azure.mgmt.costmanagement.models.Alert]",
"dictionary is the alias for the aggregated column. Query can",
"provided. :type configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration :param aggregation: Dictionary of aggregation expression",
"= None, file_name: Optional[str] = None, run_settings: Optional[\"CommonExportProperties\"] = None,",
"= None self.type = None self.tags = None class Alert(Resource):",
"Report can have up to 2 group by clauses. :type",
"to: ~datetime.datetime \"\"\" _validation = { 'from_property': {'required': True}, }",
"self.type = None self.tags = None class Alert(Resource): \"\"\"An individual",
"storage account, you must register the account's subscription with the",
"Resource tags. :vartype tags: dict[str, str] :param execution_type: The type",
"of values to use for comparison. :type values: list[str] \"\"\"",
"class Alert(Resource): \"\"\"An individual alert. Variables are only populated by",
"= None, **kwargs ): super(QueryColumn, self).__init__(**kwargs) self.name = name self.type",
"super(DimensionsListResult, self).__init__(**kwargs) self.value = None class DismissAlertPayload(msrest.serialization.Model): \"\"\"The request payload",
"in the forecast. :param granularity: The granularity of rows in",
"= { 'type': {'required': True}, 'timeframe': {'required': True}, } _attribute_map",
":param display_name: User input name of the view. Required. :type",
"class QueryDefinition(msrest.serialization.Model): \"\"\"The definition of a query. All required parameters",
"'properties.chart', 'type': 'str'}, 'accumulated': {'key': 'properties.accumulated', 'type': 'str'}, 'metric': {'key':",
"class AlertsResult(msrest.serialization.Model): \"\"\"Result of alerts. Variables are only populated by",
"'tags': {'key': 'tags', 'type': '{str}'}, 'description': {'key': 'properties.description', 'type': 'str'},",
"Management scope to save the view on. This includes 'subscriptions/{subscriptionId}'",
"period_start_date: datetime of periodStartDate. :type period_start_date: str :param triggered_by: notificationId",
"'filter': {'key': 'filter', 'type': 'ReportConfigFilterAutoGenerated'}, } def __init__( self, *,",
"the forecast. :param granularity: The granularity of rows in the",
"{'key': 'currentSpend', 'type': 'float'}, 'contact_emails': {'key': 'contactEmails', 'type': '[str]'}, 'contact_groups':",
"Possible values include: \"Active\", \"Inactive\". :type status: str or ~azure.mgmt.costmanagement.models.StatusType",
"'str'}, 'usage_start': {'key': 'properties.usageStart', 'type': 'iso-8601'}, 'usage_end': {'key': 'properties.usageEnd', 'type':",
":vartype filter_enabled: bool :ivar grouping_enabled: Grouping enabled. :vartype grouping_enabled: bool",
"'type': 'AlertPropertiesDetails'}, 'cost_entity_id': {'key': 'properties.costEntityId', 'type': 'str'}, 'status': {'key': 'properties.status',",
"} _attribute_map = { 'value': {'key': 'value', 'type': '[ExportExecution]'}, }",
"} _attribute_map = { 'provider': {'key': 'provider', 'type': 'str'}, 'resource':",
"operator: Union[str, \"OperatorType\"], values: List[str], **kwargs ): super(QueryComparisonExpression, self).__init__(**kwargs) self.name",
"effect for this execution. :type run_settings: ~azure.mgmt.costmanagement.models.CommonExportProperties :param error: The",
":type tag_filter: object :param threshold: notification threshold percentage as a",
"ignored if aggregation and grouping are provided. :type configuration: ~azure.mgmt.costmanagement.models.ReportConfigDatasetConfiguration",
"of the directory where exports will be uploaded. :type root_folder_path:",
"the export. :type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule \"\"\" _validation = { 'delivery_info':",
"Optional[str] = None, **kwargs ): super(DismissAlertPayload, self).__init__(**kwargs) self.definition = definition",
"alert. :param type: type of alert. Possible values include: \"Budget\",",
"emails to contact. :type contact_emails: list[str] :param contact_groups: list of",
"to filter by. :type tag_filter: object :param threshold: notification threshold",
"contains a list of available exports in the scope provided.",
"'type': 'ReportConfigTimePeriod'}, 'dataset': {'key': 'dataset', 'type': 'ReportConfigDatasetAutoGenerated'}, } def __init__(",
"dict[str, str] :param execution_type: The type of the export execution.",
":ivar filter_enabled: Filter enabled. :vartype filter_enabled: bool :ivar grouping_enabled: Grouping",
"data for the query. If custom, then a specific time",
"Optional[List[object]] = None, tag_filter: Optional[object] = None, threshold: Optional[float] =",
"self.to = to class View(ProxyResource): \"\"\"States and configurations of Cost",
"or ~azure.mgmt.costmanagement.models.FunctionType \"\"\" _validation = { 'name': {'required': True}, 'function':",
"{'key': 'properties.runSettings', 'type': 'CommonExportProperties'}, 'error': {'key': 'properties.error', 'type': 'ErrorDetails'}, }",
"{'key': 'name', 'type': 'str'}, } def __init__( self, *, type:",
"name of the column to sort. :type name: str \"\"\"",
"): super(ExportDefinition, self).__init__(**kwargs) self.type = type self.timeframe = timeframe self.time_period",
"\"RecurrenceType\"], status: Optional[Union[str, \"StatusType\"]] = None, recurrence_period: Optional[\"ExportRecurrencePeriod\"] = None,",
"display_name: str :param scope: Cost Management scope to save the",
"self.description = None self.filter_enabled = None self.grouping_enabled = None self.data",
"'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}'",
"sorting: Array of order by expression to use in the",
"'type': 'ReportConfigDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{ReportConfigAggregation}'}, 'grouping': {'key': 'grouping',",
"'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'enabled': {'key': 'enabled',",
"and forecasted data. Actual usage and forecasted data can be",
"self.accumulated = accumulated self.metric = metric self.kpis = kpis self.pivots",
"= None class OperationListResult(msrest.serialization.Model): \"\"\"Result of listing cost management operations.",
"to use in the report. :type sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting] :param filter:",
":param id: ID of resource related to metric (budget). :type",
"to use in the query. :type filter: ~azure.mgmt.costmanagement.models.QueryFilter \"\"\" _validation",
"group by expression to be used in the report. All",
"data present in the report. :param granularity: The granularity of",
"The type of the export execution. Possible values include: \"OnDemand\",",
":type unit: str :param current_spend: current spend. :type current_spend: float",
"the dimension. :vartype total: int :ivar category: Dimension category. :vartype",
"= not_property self.dimension = dimension self.tag = tag class ReportConfigFilterAutoGenerated(msrest.serialization.Model):",
"= aggregation self.grouping = grouping self.filter = filter class QueryDatasetConfiguration(msrest.serialization.Model):",
"run_history=run_history, **kwargs) self.schedule = schedule class ExportRecurrencePeriod(msrest.serialization.Model): \"\"\"The start and",
"- Request is throttled. Retry after waiting for the time",
":ivar description: Dimension description. :vartype description: str :ivar filter_enabled: Filter",
"None, period_start_date: Optional[str] = None, triggered_by: Optional[str] = None, resource_group_filter:",
"code. :vartype code: str :ivar message: Error message indicating why",
"and end date of the recurrence. The start date must",
"self.filter = filter class QueryDatasetConfiguration(msrest.serialization.Model): \"\"\"The configuration of dataset in",
"= None, **kwargs ): super(ReportConfigDefinition, self).__init__(**kwargs) self.type = type self.timeframe",
"time_period: Optional[\"ReportConfigTimePeriod\"] = None, dataset: Optional[\"ReportConfigDataset\"] = None, **kwargs ):",
"generated by Microsoft (R) AutoRest Code Generator. # Changes may",
"however API users need to register the subscription. For more",
"at least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param not_property: The",
"list of available exports in the scope provided. Variables are",
"A set of tags. Resource tags. :vartype tags: dict[str, str]",
"status. Possible values include: \"None\", \"Active\", \"Overridden\", \"Resolved\", \"Dismissed\". :type",
":type columns: list[~azure.mgmt.costmanagement.models.QueryColumn] :param rows: Array of rows. :type rows:",
"for pulling data for the report. If custom, then a",
"of alerts results if there are any. :vartype next_link: str",
"def __init__( self, *, e_tag: Optional[str] = None, display_name: Optional[str]",
"schedule: Has schedule information for the export. :type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule",
"): super(QueryDefinition, self).__init__(**kwargs) self.type = type self.timeframe = timeframe self.time_period",
"'not', 'type': 'ReportConfigFilterAutoGenerated'}, 'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'}, 'tag': {'key':",
"{ 'columns': {'key': 'columns', 'type': '[str]'}, } def __init__( self,",
"{'key': 'configuration', 'type': 'QueryDatasetConfiguration'}, 'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'}, 'grouping':",
"'ReportConfigFilter'}, } def __init__( self, *, granularity: Optional[Union[str, \"ReportGranularityType\"]] =",
"dataset configuration. Allows columns to be selected for the export.",
"'criteria': {'key': 'criteria', 'type': 'str'}, } def __init__( self, *,",
"from typing import Dict, List, Optional, Union from azure.core.exceptions import",
"Any valid query column name is allowed. If not provided,",
"\"MonthToDate\", \"YearToDate\", \"Custom\". :type timeframe: str or ~azure.mgmt.costmanagement.models.ReportTimeframeType :param time_period:",
"Azure. :param type: Required. The type of the report. Usage",
"Currently only 'Csv' is supported. Possible values include: \"Csv\". :type",
"\"TheLastBillingMonth\", \"WeekToDate\", \"Custom\". :type timeframe: str or ~azure.mgmt.costmanagement.models.TimeframeType :param time_period:",
"information for the export. :type schedule: ~azure.mgmt.costmanagement.models.ExportSchedule \"\"\" _validation =",
"export. All required parameters must be populated in order to",
"List of cost management operations supported by the Microsoft.CostManagement resource",
"'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'},",
"type of the query. Possible values include: \"Usage\", \"ActualCost\", \"AmortizedCost\".",
"\"WeekToDate\", \"Custom\". :type timeframe: str or ~azure.mgmt.costmanagement.models.TimeframeType :param time_period: Has",
"The format of the export being delivered. Currently only 'Csv'",
"processing_end_time self.file_name = file_name self.run_settings = run_settings self.error = error",
"super(Operation, self).__init__(**kwargs) self.name = None self.display = display class OperationDisplay(msrest.serialization.Model):",
"__init__( self, **kwargs ): super(OperationListResult, self).__init__(**kwargs) self.value = None self.next_link",
"= None, schedule: Optional[\"ExportSchedule\"] = None, **kwargs ): super(Export, self).__init__(e_tag=e_tag,",
"): super(ExportDeliveryDestination, self).__init__(**kwargs) self.resource_id = resource_id self.container = container self.root_folder_path",
"message: Error message indicating why the operation failed. :vartype message:",
"{'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type':",
"'contact_groups': {'key': 'contactGroups', 'type': '[str]'}, 'contact_roles': {'key': 'contactRoles', 'type': '[str]'},",
"None, category: Optional[Union[str, \"AlertCategory\"]] = None, criteria: Optional[Union[str, \"AlertCriteria\"]] =",
"= timeframe self.time_period = time_period self.dataset = dataset class ReportConfigFilter(msrest.serialization.Model):",
"'value', 'type': '[ExportExecution]'}, } def __init__( self, **kwargs ): super(ExportExecutionListResult,",
"\"\"\"The definition of a report config. All required parameters must",
"2}, } _attribute_map = { 'and_property': {'key': 'and', 'type': '[ReportConfigFilterAutoGenerated]'},",
"to. :type contact_groups: list[str] :param contact_roles: list of contact roles.",
"): super(ErrorResponse, self).__init__(**kwargs) self.error = error class ProxyResource(msrest.serialization.Model): \"\"\"The Resource",
"The type of the export. Note that 'Usage' is equivalent",
"} def __init__( self, **kwargs ): super(ExportExecutionListResult, self).__init__(**kwargs) self.value =",
"{'key': 'properties.deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition': {'key': 'properties.definition', 'type': 'ExportDefinition'}, 'run_history':",
"of action groups to broadcast to. :type contact_groups: list[str] :param",
"__init__( self, **kwargs ): super(Resource, self).__init__(**kwargs) self.id = None self.name",
"*, granularity: Optional[Union[str, \"ReportGranularityType\"]] = None, configuration: Optional[\"ReportConfigDatasetConfiguration\"] = None,",
"{'key': 'from', 'type': 'iso-8601'}, 'to': {'key': 'to', 'type': 'iso-8601'}, }",
"\"xCloud\", \"BudgetForecast\". :type type: str or ~azure.mgmt.costmanagement.models.AlertType :param category: Alert",
"column to use in comparison. :type name: str :param operator:",
"\"\"\" _validation = { 'from_property': {'required': True}, } _attribute_map =",
"name of the column to use in comparison. :type name:",
"): super(Dimension, self).__init__(**kwargs) self.description = None self.filter_enabled = None self.grouping_enabled",
"get the next set of operation list results if there",
"when the user last modified this view. :vartype modified_on: ~datetime.datetime",
"self.to = to class ExportSchedule(msrest.serialization.Model): \"\"\"The schedule associated with the",
"List[str], **kwargs ): super(ReportConfigComparisonExpression, self).__init__(**kwargs) self.name = name self.operator =",
"export resource. Variables are only populated by the server, and",
"def __init__( self, **kwargs ): super(OperationListResult, self).__init__(**kwargs) self.value = None",
"__init__( self, *, from_property: datetime.datetime, to: datetime.datetime, **kwargs ): super(ExportTimePeriod,",
"can vary by customer channel (see examples). :type columns: list[str]",
"value: The list of exports. :vartype value: list[~azure.mgmt.costmanagement.models.Export] \"\"\" _validation",
"a URL link to get the next set of results.",
"True}, 'next_link': {'readonly': True}, } _attribute_map = { 'value': {'key':",
":param data: :type data: list[str] :ivar total: Total number of",
"filter: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated \"\"\" _validation = { 'grouping': {'max_items': 2, 'min_items':",
"= None, definition: Optional[\"ExportDefinition\"] = None, run_history: Optional[\"ExportExecutionListResult\"] = None,",
"Optional[str] = None, display_name: Optional[str] = None, scope: Optional[str] =",
"in the query. Any valid query column name is allowed.",
"list[~azure.mgmt.costmanagement.models.QueryGrouping] :param filter: Has filter expression to use in the",
"'type': 'QueryComparisonExpression'}, 'tag': {'key': 'tag', 'type': 'QueryComparisonExpression'}, } def __init__(",
"costs. Possible values include: \"ActualCost\", \"AmortizedCost\", \"AHUB\". :type metric: str",
"columns: Array of columns. :type columns: list[~azure.mgmt.costmanagement.models.QueryColumn] :param rows: Array",
"column name is allowed. If not provided, then report includes",
"Optional[Union[str, \"FormatType\"]] = None, delivery_info: Optional[\"ExportDeliveryInfo\"] = None, definition: Optional[\"ExportDefinition\"]",
"the Azure portal, it is done automatically, however API users",
"): super(ReportConfigDatasetAutoGenerated, self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration self.aggregation",
"status of the export execution. Possible values include: \"Queued\", \"InProgress\",",
"Optional[\"ExportRecurrencePeriod\"] = None, **kwargs ): super(ExportSchedule, self).__init__(**kwargs) self.status = status",
"dict[str, str] :ivar description: Dimension description. :vartype description: str :ivar",
"self.meter_filter = meter_filter self.tag_filter = tag_filter self.threshold = threshold self.operator",
"'type': 'ExportDatasetConfiguration'}, } def __init__( self, *, granularity: Optional[Union[str, \"GranularityType\"]]",
"'creation_time': {'key': 'properties.creationTime', 'type': 'str'}, 'close_time': {'key': 'properties.closeTime', 'type': 'str'},",
"View(ProxyResource): \"\"\"States and configurations of Cost Analysis. Variables are only",
"'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type',",
"\"Quota\", \"General\", \"xCloud\", \"BudgetForecast\". :type type: str or ~azure.mgmt.costmanagement.models.AlertType :param",
"or ~azure.mgmt.costmanagement.models.AccumulatedType :param metric: Metric to use when displaying costs.",
":vartype value: list[~azure.mgmt.costmanagement.models.ExportExecution] \"\"\" _validation = { 'value': {'readonly': True},",
"name: Optional[str] = None, type: Optional[str] = None, **kwargs ):",
":param resource_group_filter: array of resourceGroups to filter by. :type resource_group_filter:",
"{'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags':",
"{'key': 'filter', 'type': 'ReportConfigFilterAutoGenerated'}, } def __init__( self, *, granularity:",
"date for export data. :type to: ~datetime.datetime \"\"\" _validation =",
"Union from azure.core.exceptions import HttpResponseError import msrest.serialization from ._cost_management_client_enums import",
"= { 'type': {'key': 'type', 'type': 'str'}, 'timeframe': {'key': 'timeframe',",
"forecast represents forecasted data and UsageAndForecast represents both usage and",
"the container where exports will be uploaded. :type container: str",
"'str'}, 'display': {'key': 'display', 'type': 'OperationDisplay'}, } def __init__( self,",
"processing_end_time: Optional[datetime.datetime] = None, file_name: Optional[str] = None, run_settings: Optional[\"CommonExportProperties\"]",
"= None self.filter_enabled = None self.grouping_enabled = None self.data =",
"import * class Resource(msrest.serialization.Model): \"\"\"The Resource model definition. Variables are",
"'type': 'str'}, 'operation': {'key': 'operation', 'type': 'str'}, } def __init__(",
"groupings and aggregation. Variables are only populated by the server,",
"self).__init__(**kwargs) self.value = None self.next_link = None class PivotProperties(msrest.serialization.Model): \"\"\"Each",
"QueryGrouping(msrest.serialization.Model): \"\"\"The group by expression to be used in the",
"in the export. This should only be specified with timeFrame",
"format: Optional[Union[str, \"FormatType\"]] = None, delivery_info: Optional[\"ExportDeliveryInfo\"] = None, definition:",
"Optional[str] = None, **kwargs ): super(ProxyResource, self).__init__(**kwargs) self.id = None",
"or ~azure.mgmt.costmanagement.models.ExecutionStatus :param submitted_by: The identifier for the entity that",
"'str'}, } def __init__( self, *, name: str, direction: Optional[Union[str,",
"} _attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'message':",
"class ExportTimePeriod(msrest.serialization.Model): \"\"\"The date range for data in the export.",
"'operator', 'type': 'str'}, 'amount': {'key': 'amount', 'type': 'float'}, 'unit': {'key':",
"\"Monthly\". :type granularity: str or ~azure.mgmt.costmanagement.models.ReportGranularityType :param configuration: Has configuration",
"configuration will be ignored if aggregation and grouping are provided.",
"{'key': 'properties.fileName', 'type': 'str'}, 'run_settings': {'key': 'properties.runSettings', 'type': 'CommonExportProperties'}, 'error':",
"order by expression to use in the report. :type sorting:",
"= current_spend self.contact_emails = contact_emails self.contact_groups = contact_groups self.contact_roles =",
"'data_set': {'key': 'dataSet', 'type': 'ExportDataset'}, } def __init__( self, *,",
"{ 'grouping': {'max_items': 2, 'min_items': 0}, } _attribute_map = {",
"modification_time: str :param status_modification_user_name: :type status_modification_user_name: str :param status_modification_time: dateTime",
"threshold: notification threshold percentage as a decimal which activated this",
"Usage start. :vartype usage_start: ~datetime.datetime :ivar usage_end: Usage end. :vartype",
"show the KPI in the UI?. :type enabled: bool \"\"\"",
"= next_link self.columns = columns self.rows = rows class QueryTimePeriod(msrest.serialization.Model):",
"'name': {'required': True}, 'function': {'required': True}, } _attribute_map = {",
"'type': 'str'}, 'accumulated': {'key': 'properties.accumulated', 'type': 'str'}, 'metric': {'key': 'properties.metric',",
"'contact_emails': {'key': 'contactEmails', 'type': '[str]'}, 'contact_groups': {'key': 'contactGroups', 'type': '[str]'},",
"register the account's subscription with the Microsoft.CostManagementExports resource provider. This",
"**kwargs ): super(ExportTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to = to",
"if there are any. :vartype next_link: str \"\"\" _validation =",
"'value': {'key': 'value', 'type': '[Export]'}, } def __init__( self, **kwargs",
"export executions. :vartype value: list[~azure.mgmt.costmanagement.models.ExportExecution] \"\"\" _validation = { 'value':",
"directory where exports will be uploaded. :type root_folder_path: str \"\"\"",
":param type: Required. The type of the report. Usage represents",
"Optional[\"QueryTimePeriod\"] = None, dataset: Optional[\"QueryDataset\"] = None, **kwargs ): super(QueryDefinition,",
"\"\"\"The aggregation expression to be used in the query. All",
"= type self.timeframe = timeframe self.time_period = time_period self.dataset =",
"): super(Resource, self).__init__(**kwargs) self.id = None self.name = None self.type",
"value: The list of dimensions. :vartype value: list[~azure.mgmt.costmanagement.models.Dimension] \"\"\" _validation",
"to pull data from. :type from_property: ~datetime.datetime :param to: Required.",
"Required. Has type of the column to group. Possible values",
"\"Custom\". :type timeframe: str or ~azure.mgmt.costmanagement.models.ReportTimeframeType :param time_period: Has time",
":param current_spend: current spend. :type current_spend: float :param contact_emails: list",
"'str'}, 'operation': {'key': 'operation', 'type': 'str'}, } def __init__( self,",
"of group by expression to use in the report. Report",
"} def __init__( self, *, error: Optional[\"ErrorDetails\"] = None, **kwargs",
"\"OnDemand\", \"Scheduled\". :type execution_type: str or ~azure.mgmt.costmanagement.models.ExecutionType :param status: The",
"__init__( self, *, type: Union[str, \"ExportType\"], timeframe: Union[str, \"TimeframeType\"], time_period:",
":type tag: ~azure.mgmt.costmanagement.models.QueryComparisonExpression \"\"\" _validation = { 'and_property': {'min_items': 2},",
"\"ExportType\"], timeframe: Union[str, \"TimeframeType\"], time_period: Optional[\"ExportTimePeriod\"] = None, data_set: Optional[\"ExportDataset\"]",
"{'key': 'properties.total', 'type': 'int'}, 'category': {'key': 'properties.category', 'type': 'str'}, 'usage_start':",
"self.display = display class OperationDisplay(msrest.serialization.Model): \"\"\"The object that represents the",
"def __init__( self, *, from_property: datetime.datetime, to: datetime.datetime, **kwargs ):",
"recurrence_period: Optional[\"ExportRecurrencePeriod\"] = None, **kwargs ): super(ExportSchedule, self).__init__(**kwargs) self.status =",
"'properties.createdOn', 'type': 'iso-8601'}, 'modified_on': {'key': 'properties.modifiedOn', 'type': 'iso-8601'}, 'chart': {'key':",
"report. :param and_property: The logical \"AND\" expression. Must have at",
":type name: str :param function: Required. The name of the",
"self.granularity = granularity self.configuration = configuration class ExportDatasetConfiguration(msrest.serialization.Model): \"\"\"The export",
"~azure.mgmt.costmanagement.models.ErrorDetails \"\"\" _attribute_map = { 'error': {'key': 'error', 'type': 'ErrorDetails'},",
"the export. This should only be specified with timeFrame set",
"modification_time: dateTime in which alert was last modified. :type modification_time:",
"Optional[List[object]] = None, meter_filter: Optional[List[object]] = None, tag_filter: Optional[object] =",
"'[ReportConfigFilterAutoGenerated]'}, 'or_property': {'key': 'or', 'type': '[ReportConfigFilterAutoGenerated]'}, 'not_property': {'key': 'not', 'type':",
"'type', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'enabled': {'key':",
"= { 'and_property': {'key': 'and', 'type': '[QueryFilter]'}, 'or_property': {'key': 'or',",
"'name': {'readonly': True}, 'type': {'readonly': True}, 'created_on': {'readonly': True}, 'modified_on':",
"columns class QueryDefinition(msrest.serialization.Model): \"\"\"The definition of a query. All required",
":type configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration :param aggregation: Dictionary of aggregation expression to",
"resource_filter: array of resources to filter by. :type resource_filter: list[object]",
"self.include_fresh_partial_cost = include_fresh_partial_cost class KpiProperties(msrest.serialization.Model): \"\"\"Each KPI must contain a",
"a 'type' and 'enabled' key. :param type: KPI type (Forecast,",
"{ 'name': {'required': True}, 'function': {'required': True}, } _attribute_map =",
"{ 'error': {'key': 'error', 'type': 'ErrorDetails'}, } def __init__( self,",
"None, processing_end_time: Optional[datetime.datetime] = None, file_name: Optional[str] = None, run_settings:",
"): super(ReportConfigFilter, self).__init__(**kwargs) self.and_property = and_property self.or_property = or_property self.not_property",
"status: str or ~azure.mgmt.costmanagement.models.ExecutionStatus :param submitted_by: The identifier for the",
"= { 'direction': {'key': 'direction', 'type': 'str'}, 'name': {'key': 'name',",
"\"\"\"Each pivot must contain a 'type' and 'name'. :param type:",
"= None self.data = data self.total = None self.category =",
"self.grouping = grouping self.sorting = sorting self.filter = filter class",
"): super(ReportConfigDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class ReportConfigDefinition(msrest.serialization.Model): \"\"\"The definition",
"'usage_end': {'readonly': True}, 'next_link': {'readonly': True}, } _attribute_map = {",
"data for the report. All required parameters must be populated",
"Analysis UI. :type pivots: list[~azure.mgmt.costmanagement.models.PivotProperties] :param type_properties_query_type: The type of",
"self.category = None self.usage_start = None self.usage_end = None self.next_link",
"list of action groups to broadcast to. :type contact_groups: list[str]",
"\"\"\"A Cost management REST API operation. Variables are only populated",
"configuration: The export dataset configuration. :type configuration: ~azure.mgmt.costmanagement.models.ExportDatasetConfiguration \"\"\" _attribute_map",
"provided. Variables are only populated by the server, and will",
"Resource tags. :vartype tags: dict[str, str] :param definition: defines the",
"to be used in the export. :param and_property: The logical",
"name of the aggregation function to use. Possible values include:",
"self).__init__(**kwargs) self.next_link = next_link self.columns = columns self.rows = rows",
":param from_property: Required. The start date to pull data from.",
"= pivots self.type_properties_query_type = type_properties_query_type self.timeframe = timeframe self.time_period =",
"**kwargs ): super(DismissAlertPayload, self).__init__(**kwargs) self.definition = definition self.description = description",
"report. :param granularity: The granularity of rows in the report.",
"self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration class ExportDatasetConfiguration(msrest.serialization.Model): \"\"\"The",
"'type': '[Dimension]'}, } def __init__( self, **kwargs ): super(DimensionsListResult, self).__init__(**kwargs)",
"tags. Resource tags. :vartype tags: dict[str, str] :param definition: defines",
"next execution time. :vartype next_run_time_estimate: ~datetime.datetime :param schedule: Has schedule",
"timeframe self.time_period = time_period self.dataset = dataset class ViewListResult(msrest.serialization.Model): \"\"\"Result",
":vartype created_on: ~datetime.datetime :ivar modified_on: Date when the user last",
"\"\"\"The start and end date for pulling data for the",
"'type': 'ReportConfigTimePeriod'}, 'dataset': {'key': 'properties.query.dataset', 'type': 'ReportConfigDataset'}, } def __init__(",
"} def __init__( self, *, from_property: datetime.datetime, to: datetime.datetime, **kwargs",
"QueryDefinition(msrest.serialization.Model): \"\"\"The definition of a query. All required parameters must",
"\"\"\"An export resource. Variables are only populated by the server,",
"__init__( self, *, columns: Optional[List[str]] = None, **kwargs ): super(ExportDatasetConfiguration,",
"QueryDatasetConfiguration(msrest.serialization.Model): \"\"\"The configuration of dataset in the query. :param columns:",
"'iso-8601'}, 'schedule': {'key': 'schedule', 'type': 'ExportSchedule'}, } def __init__( self,",
"This is required once per subscription. When creating an export",
"= { 'name': {'key': 'name', 'type': 'str'}, 'display': {'key': 'display',",
"destination class ExportExecution(Resource): \"\"\"An export execution. Variables are only populated",
"end date must be greater than start date. :type recurrence_period:",
"forecast. :type dataset: ~azure.mgmt.costmanagement.models.ForecastDataset :param include_actual_cost: a boolean determining if",
"for the export. :type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult :ivar next_run_time_estimate: If the",
"version or not. :type e_tag: str \"\"\" _validation = {",
"self, *, name: Optional[str] = None, type: Optional[str] = None,",
"Optional[Union[str, \"StatusType\"]] = None, recurrence_period: Optional[\"ExportRecurrencePeriod\"] = None, **kwargs ):",
"decimal which activated this alert. :type threshold: float :param operator:",
"pulling data for the forecast. If custom, then a specific",
":param error: The details of any error. :type error: ~azure.mgmt.costmanagement.models.ErrorDetails",
"\"TimeframeType\"], time_period: Optional[\"ExportTimePeriod\"] = None, data_set: Optional[\"ExportDataset\"] = None, **kwargs",
"Possible values include: \"Tag\", \"Dimension\". :type type: str or ~azure.mgmt.costmanagement.models.ReportConfigColumnType",
"delivery_info: \"ExportDeliveryInfo\", definition: \"ExportDefinition\", format: Optional[Union[str, \"FormatType\"]] = None, run_history:",
"self, *, type: Union[str, \"ForecastType\"], timeframe: Union[str, \"ForecastTimeframeType\"], time_period: Optional[\"QueryTimePeriod\"]",
"super(ReportConfigFilterAutoGenerated, self).__init__(**kwargs) self.and_property = and_property self.or_property = or_property self.not_property =",
"order to send to Azure. :param name: Required. The name",
"= time_period self.dataset = dataset class ReportConfigFilter(msrest.serialization.Model): \"\"\"The filter expression",
"URL link to get the next set of results. Variables",
"'code': {'readonly': True}, 'message': {'readonly': True}, } _attribute_map = {",
"processing_end_time: The time when the export execution finished. :type processing_end_time:",
"str or ~azure.mgmt.costmanagement.models.ExecutionStatus :param submitted_by: The identifier for the entity",
"{'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'description':",
"{'key': 'criteria', 'type': 'str'}, } def __init__( self, *, type:",
"granularity of rows in the report. Possible values include: \"Daily\",",
"{'readonly': True}, 'modified_on': {'readonly': True}, } _attribute_map = { 'id':",
"True}, 'category': {'readonly': True}, 'usage_start': {'readonly': True}, 'usage_end': {'readonly': True},",
"the user created this view. :vartype created_on: ~datetime.datetime :ivar modified_on:",
"to class ReportConfigAggregation(msrest.serialization.Model): \"\"\"The aggregation expression to be used in",
"'value': {'key': 'value', 'type': '[Alert]'}, 'next_link': {'key': 'nextLink', 'type': 'str'},",
"be uploaded. :type root_folder_path: str \"\"\" _validation = { 'resource_id':",
"'container', 'type': 'str'}, 'root_folder_path': {'key': 'rootFolderPath', 'type': 'str'}, } def",
"the report. :type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated \"\"\" _validation = { 'grouping':",
"Has filter expression to use in the report. :type filter:",
"'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'},",
"'properties.nextLink', 'type': 'str'}, 'columns': {'key': 'properties.columns', 'type': '[QueryColumn]'}, 'rows': {'key':",
"Optional[Dict[str, \"ReportConfigAggregation\"]] = None, grouping: Optional[List[\"ReportConfigGrouping\"]] = None, sorting: Optional[List[\"ReportConfigSorting\"]]",
"\"Active\", \"Inactive\". :type status: str or ~azure.mgmt.costmanagement.models.StatusType :param recurrence: Required.",
"None, creation_time: Optional[str] = None, close_time: Optional[str] = None, modification_time:",
"**kwargs ): super(QueryDatasetConfiguration, self).__init__(**kwargs) self.columns = columns class QueryDefinition(msrest.serialization.Model): \"\"\"The",
"forecast. The key of each item in the dictionary is",
"= None, **kwargs ): super(Operation, self).__init__(**kwargs) self.name = None self.display",
"in the report. All required parameters must be populated in",
"id: Resource Id. :vartype id: str :ivar name: Resource name.",
"dataset: Optional[\"ForecastDataset\"] = None, include_actual_cost: Optional[bool] = None, include_fresh_partial_cost: Optional[bool]",
"self.from_property = from_property self.to = to class ReportConfigAggregation(msrest.serialization.Model): \"\"\"The aggregation",
"and_property: Optional[List[\"ReportConfigFilterAutoGenerated\"]] = None, or_property: Optional[List[\"ReportConfigFilterAutoGenerated\"]] = None, not_property: Optional[\"ReportConfigFilterAutoGenerated\"]",
"list[~azure.mgmt.costmanagement.models.ReportConfigGrouping] :param sorting: Array of order by expression to use",
"may cause incorrect behavior and will be lost if the",
"be included in the report. Any valid report column name",
"resource_id: Required. The resource id of the storage account where",
"for service reservations. Possible values include: \"Usage\", \"ActualCost\", \"AmortizedCost\". :type",
"= None, dataset: Optional[\"ForecastDataset\"] = None, include_actual_cost: Optional[bool] = None,",
"submitted_time: ~datetime.datetime :param processing_start_time: The time when export was picked",
"display_name: Optional[str] = None, scope: Optional[str] = None, chart: Optional[Union[str,",
"time specified in the \"Retry-After\" header. :param error: The details",
"name: Resource name. :vartype name: str :ivar type: Resource type.",
"ExportDefinition(msrest.serialization.Model): \"\"\"The definition of an export. All required parameters must",
"destination: \"ExportDeliveryDestination\", **kwargs ): super(ExportDeliveryInfo, self).__init__(**kwargs) self.destination = destination class",
"\"In\", \"Contains\". :type operator: str or ~azure.mgmt.costmanagement.models.OperatorType :param values: Required.",
"the project root for license information. # Code generated by",
"time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod :param dataset: Has definition for data in this",
"uploaded. :type container: str :param root_folder_path: The name of the",
"type: KPI type (Forecast, Budget). Possible values include: \"Forecast\", \"Budget\".",
"delivered. :type resource_id: str :param container: Required. The name of",
"'next_run_time_estimate': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id',",
"items. :type and_property: list[~azure.mgmt.costmanagement.models.QueryFilter] :param or_property: The logical \"OR\" expression.",
"'aggregation': {'key': 'aggregation', 'type': '{QueryAggregation}'}, 'grouping': {'key': 'grouping', 'type': '[QueryGrouping]'},",
":param granularity: The granularity of rows in the export. Currently",
"order to send to Azure. :param direction: Direction of sort.",
"config. All required parameters must be populated in order to",
"{'readonly': True}, 'type': {'readonly': True}, } _attribute_map = { 'id':",
"Must have at least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param",
":type name: str \"\"\" _validation = { 'name': {'required': True},",
"of the export. Note that 'Usage' is equivalent to 'ActualCost'",
"send to Azure. :param from_property: Required. The start date of",
"of listing exports. It contains a list of available exports",
":type e_tag: str :param format: The format of the export",
"start date must be in future. If present, the end",
"actualCost will be included. :type include_actual_cost: bool :param include_fresh_partial_cost: a",
"The time when export was queued to be executed. :type",
"scope, '/providers/Microsoft.CostManagement/externalBillingAccounts/{externalBillingAccountName}' for ExternalBillingAccount scope, and '/providers/Microsoft.CostManagement/externalSubscriptions/{externalSubscriptionName}' for ExternalSubscription scope.",
"'str'}, 'accumulated': {'key': 'properties.accumulated', 'type': 'str'}, 'metric': {'key': 'properties.metric', 'type':",
"{'key': 'eTag', 'type': 'str'}, 'format': {'key': 'properties.format', 'type': 'str'}, 'delivery_info':",
"**kwargs ): super(Resource, self).__init__(**kwargs) self.id = None self.name = None",
"def __init__( self, **kwargs ): super(ViewListResult, self).__init__(**kwargs) self.value = None",
"source: Source of alert. Possible values include: \"Preset\", \"User\". :type",
"granularity self.configuration = configuration self.aggregation = aggregation self.filter = filter",
"'str'}, } def __init__( self, *, data: Optional[List[str]] = None,",
"2 items. :type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param or_property: The logical \"OR\"",
"operator self.values = values class ReportConfigDataset(msrest.serialization.Model): \"\"\"The definition of data",
"listing views. It contains a list of available views. Variables",
"the export. :type data_set: ~azure.mgmt.costmanagement.models.ExportDataset \"\"\" _validation = { 'type':",
"'usage_start': {'key': 'properties.usageStart', 'type': 'iso-8601'}, 'usage_end': {'key': 'properties.usageEnd', 'type': 'iso-8601'},",
"Resource name. :vartype name: str :ivar type: Resource type. :vartype",
"= None, recurrence_period: Optional[\"ExportRecurrencePeriod\"] = None, **kwargs ): super(ExportSchedule, self).__init__(**kwargs)",
"ForecastDataset(msrest.serialization.Model): \"\"\"The definition of data present in the forecast. :param",
"'bool'}, 'include_fresh_partial_cost': {'key': 'includeFreshPartialCost', 'type': 'bool'}, } def __init__( self,",
"Optional[str] = None, status_modification_user_name: Optional[str] = None, status_modification_time: Optional[str] =",
"= display class OperationDisplay(msrest.serialization.Model): \"\"\"The object that represents the operation.",
"query. It contains all columns listed under groupings and aggregation.",
"expression to be used in the report. All required parameters",
"super(ReportConfigTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to = to class View(ProxyResource):",
"'data': {'key': 'properties.data', 'type': '[str]'}, 'total': {'key': 'properties.total', 'type': 'int'},",
"{ 'direction': {'key': 'direction', 'type': 'str'}, 'name': {'key': 'name', 'type':",
"in the query. The key of each item in the",
"= values class QueryDataset(msrest.serialization.Model): \"\"\"The definition of data present in",
"None, **kwargs ): super(ExportExecution, self).__init__(**kwargs) self.execution_type = execution_type self.status =",
"= None, **kwargs ): super(ReportConfigDatasetAutoGenerated, self).__init__(**kwargs) self.granularity = granularity self.configuration",
"Optional[str] = None, status: Optional[Union[str, \"AlertStatus\"]] = None, creation_time: Optional[str]",
"the time specified in the \"x-ms-ratelimit-microsoft.consumption-retry-after\" header. * 503 ServiceUnavailable",
"The link (url) to the next page of results. :vartype",
"Array of group by expression to use in the report.",
"name: str :param type: The type of column. :type type:",
"super(ErrorDetails, self).__init__(**kwargs) self.code = None self.message = None class ErrorResponse(msrest.serialization.Model):",
"service reservations. Possible values include: \"Usage\", \"ActualCost\", \"AmortizedCost\". :type type:",
"for export data. :type to: ~datetime.datetime \"\"\" _validation = {",
"**kwargs ): super(QueryFilter, self).__init__(**kwargs) self.and_property = and_property self.or_property = or_property",
"\"OperatorType\"], values: List[str], **kwargs ): super(QueryComparisonExpression, self).__init__(**kwargs) self.name = name",
"export was queued to be executed. :type submitted_time: ~datetime.datetime :param",
"tags. :vartype tags: dict[str, str] :ivar description: Dimension description. :vartype",
"Required. The name of the column to group. This version",
"self.execution_type = execution_type self.status = status self.submitted_by = submitted_by self.submitted_time",
"status_modification_user_name: Optional[str] = None, status_modification_time: Optional[str] = None, **kwargs ):",
"\"BillingMonthToDate\", \"TheLastMonth\", \"TheLastBillingMonth\", \"WeekToDate\", \"Custom\". :type timeframe: str or ~azure.mgmt.costmanagement.models.TimeframeType",
"expression to use in the report. :type filter: ~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated \"\"\"",
":param type: The type of column. :type type: str \"\"\"",
"be ignored when sending a request. :ivar id: Resource Id.",
"cost_entity_id: str :param status: alert status. Possible values include: \"None\",",
"\"General\", \"xCloud\", \"BudgetForecast\". :type type: str or ~azure.mgmt.costmanagement.models.AlertType :param category:",
"listing exports. It contains a list of available exports in",
"to: datetime.datetime, **kwargs ): super(ExportTimePeriod, self).__init__(**kwargs) self.from_property = from_property self.to",
"'type': '[str]'}, 'contact_groups': {'key': 'contactGroups', 'type': '[str]'}, 'contact_roles': {'key': 'contactRoles',",
"value: list[~azure.mgmt.costmanagement.models.Dimension] \"\"\" _validation = { 'value': {'readonly': True}, }",
"= metric self.kpis = kpis self.pivots = pivots self.type_properties_query_type =",
"Possible values include: \"None\", \"EqualTo\", \"GreaterThan\", \"GreaterThanOrEqualTo\", \"LessThan\", \"LessThanOrEqualTo\". :type",
":type function: str or ~azure.mgmt.costmanagement.models.FunctionType \"\"\" _validation = { 'name':",
"status: Optional[Union[str, \"StatusType\"]] = None, recurrence_period: Optional[\"ExportRecurrencePeriod\"] = None, **kwargs",
"None, close_time: Optional[str] = None, modification_time: Optional[str] = None, status_modification_user_name:",
"type: Required. Has type of the column to group. Possible",
"will be ignored when sending a request. :ivar value: A",
"Must have at least 2 items. :type or_property: list[~azure.mgmt.costmanagement.models.QueryFilter] :param",
"self.rows = rows class QueryTimePeriod(msrest.serialization.Model): \"\"\"The start and end date",
"the latest version or not. :type e_tag: str \"\"\" _validation",
"be in future. If present, the end date must be",
":param name: Required. The name of the column to sort.",
"\"Dimension\", \"TagKey\". :type type: str or ~azure.mgmt.costmanagement.models.PivotType :param name: Data",
"number of data for the dimension. :vartype total: int :ivar",
"{'key': 'timeframe', 'type': 'str'}, 'time_period': {'key': 'timePeriod', 'type': 'QueryTimePeriod'}, 'dataset':",
"= None, rows: Optional[List[List[object]]] = None, **kwargs ): super(QueryResult, self).__init__(**kwargs)",
"on dates. Possible values include: \"Usage\". :type type: str or",
"'delivery_info': {'key': 'properties.deliveryInfo', 'type': 'ExportDeliveryInfo'}, 'definition': {'key': 'properties.definition', 'type': 'ExportDefinition'},",
"{ 'type': {'key': 'type', 'type': 'str'}, 'timeframe': {'key': 'timeframe', 'type':",
"this alert. :type triggered_by: str :param resource_group_filter: array of resourceGroups",
"dataset class QueryFilter(msrest.serialization.Model): \"\"\"The filter expression to be used in",
"Cost Management scope to save the view on. This includes",
"_attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'name': {'key':",
"None, schedule: Optional[\"ExportSchedule\"] = None, **kwargs ): super(ExportProperties, self).__init__(format=format, delivery_info=delivery_info,",
"It contains a list of available views. Variables are only",
"execution history for the export. :type run_history: ~azure.mgmt.costmanagement.models.ExportExecutionListResult :ivar next_run_time_estimate:",
"'type': 'str'}, } def __init__( self, *, type: Union[str, \"ReportConfigColumnType\"],",
"~azure.mgmt.costmanagement.models.ExportTimePeriod :param data_set: The definition for data in the export.",
"dataset: ~azure.mgmt.costmanagement.models.ReportConfigDatasetAutoGenerated \"\"\" _validation = { 'type': {'required': True}, 'timeframe':",
":type data_set: ~azure.mgmt.costmanagement.models.ExportDataset \"\"\" _validation = { 'type': {'required': True},",
"of the aggregation function to use. Possible values include: \"Sum\".",
"or ~azure.mgmt.costmanagement.models.AlertSource :param details: Alert details. :type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails :param",
"Optional[bool] = None, **kwargs ): super(KpiProperties, self).__init__(**kwargs) self.type = type",
"usage_start: Usage start. :vartype usage_start: ~datetime.datetime :ivar usage_end: Usage end.",
"'value': {'key': 'value', 'type': '[Dimension]'}, } def __init__( self, **kwargs",
"'type': 'str'}, 'status': {'key': 'properties.status', 'type': 'str'}, 'creation_time': {'key': 'properties.creationTime',",
"\"TheLastMonth\", \"TheLastBillingMonth\", \"WeekToDate\", \"Custom\". :type timeframe: str or ~azure.mgmt.costmanagement.models.ForecastTimeframeType :param",
"'properties.submittedTime', 'type': 'iso-8601'}, 'processing_start_time': {'key': 'properties.processingStartTime', 'type': 'iso-8601'}, 'processing_end_time': {'key':",
"'sorting': {'key': 'sorting', 'type': '[ReportConfigSorting]'}, 'filter': {'key': 'filter', 'type': 'ReportConfigFilterAutoGenerated'},",
"= file_name self.run_settings = run_settings self.error = error class ExportExecutionListResult(msrest.serialization.Model):",
"root_folder_path: The name of the directory where exports will be",
"'[Alert]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__( self,",
"self).__init__(**kwargs) self.granularity = granularity self.configuration = configuration self.aggregation = aggregation",
"of listing views. It contains a list of available views.",
"} _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'function':",
"**kwargs ): super(ProxyResource, self).__init__(**kwargs) self.id = None self.name = None",
"= execution_type self.status = status self.submitted_by = submitted_by self.submitted_time =",
"= None, **kwargs ): super(ExportDataset, self).__init__(**kwargs) self.granularity = granularity self.configuration",
"self.value = None class ExportProperties(CommonExportProperties): \"\"\"The properties of the export.",
"'str'}, 'time_period': {'key': 'timePeriod', 'type': 'QueryTimePeriod'}, 'dataset': {'key': 'dataset', 'type':",
"= None, source: Optional[Union[str, \"AlertSource\"]] = None, details: Optional[\"AlertPropertiesDetails\"] =",
"= columns class ExportDefinition(msrest.serialization.Model): \"\"\"The definition of an export. All",
"'includeFreshPartialCost', 'type': 'bool'}, } def __init__( self, *, type: Union[str,",
"a request. :ivar id: Resource Id. :vartype id: str :ivar",
"\"BillingMonth\", \"BillingQuarter\", \"BillingAnnual\". :type time_grain_type: str or ~azure.mgmt.costmanagement.models.AlertTimeGrainType :param period_start_date:",
"{'readonly': True}, 'description': {'readonly': True}, 'filter_enabled': {'readonly': True}, 'grouping_enabled': {'readonly':",
"this view. :vartype created_on: ~datetime.datetime :ivar modified_on: Date when the",
"error: Optional[\"ErrorDetails\"] = None, **kwargs ): super(ExportExecution, self).__init__(**kwargs) self.execution_type =",
"'not_property': {'key': 'not', 'type': 'ReportConfigFilterAutoGenerated'}, 'dimension': {'key': 'dimension', 'type': 'ReportConfigComparisonExpression'},",
"list[str] \"\"\" _attribute_map = { 'columns': {'key': 'columns', 'type': '[str]'},",
"scope: Optional[str] = None, chart: Optional[Union[str, \"ChartType\"]] = None, accumulated:",
"'modified_on': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id',",
"cause incorrect behavior and will be lost if the code",
"modification_time: Optional[str] = None, status_modification_user_name: Optional[str] = None, status_modification_time: Optional[str]",
"'tags': {'key': 'tags', 'type': '{str}'}, } def __init__( self, **kwargs",
"dimension self.tag = tag class QueryGrouping(msrest.serialization.Model): \"\"\"The group by expression",
"aggregation: Dictionary of aggregation expression to use in the report.",
"{'key': 'dimension', 'type': 'ReportConfigComparisonExpression'}, 'tag': {'key': 'tag', 'type': 'ReportConfigComparisonExpression'}, }",
"values include: \"Usage\". :type type: str or ~azure.mgmt.costmanagement.models.ReportType :param timeframe:",
"Id. :vartype id: str :ivar name: Resource name. :vartype name:",
"\"QueryAggregation\"]] = None, grouping: Optional[List[\"QueryGrouping\"]] = None, filter: Optional[\"QueryFilter\"] =",
"'type': {'readonly': True}, 'tags': {'readonly': True}, } _attribute_map = {",
"None, operator: Optional[Union[str, \"AlertOperator\"]] = None, amount: Optional[float] = None,",
"self, **kwargs ): super(ExportExecutionListResult, self).__init__(**kwargs) self.value = None class ExportListResult(msrest.serialization.Model):",
"aggregation: Optional[Dict[str, \"QueryAggregation\"]] = None, grouping: Optional[List[\"QueryGrouping\"]] = None, filter:",
"The link (url) to the next page of results. :type",
"Optional[List[\"PivotProperties\"]] = None, type_properties_query_type: Optional[Union[str, \"ReportType\"]] = None, timeframe: Optional[Union[str,",
"details. :type details: ~azure.mgmt.costmanagement.models.AlertPropertiesDetails :param cost_entity_id: related budget. :type cost_entity_id:",
"configuration: ~azure.mgmt.costmanagement.models.QueryDatasetConfiguration :param aggregation: Dictionary of aggregation expression to use",
"None self.schedule = schedule class ExportDataset(msrest.serialization.Model): \"\"\"The definition for data",
"__init__( self, *, name: Optional[str] = None, type: Optional[str] =",
"~azure.mgmt.costmanagement.models.QueryComparisonExpression :param tag: Has comparison expression for a tag. :type",
"'Custom'. The maximum date range is 3 months. All required",
"Retry after waiting for the time specified in the \"Retry-After\"",
"'str'}, 'amount': {'key': 'amount', 'type': 'float'}, 'unit': {'key': 'unit', 'type':",
"\"\"\"An export execution. Variables are only populated by the server,",
"str or ~azure.mgmt.costmanagement.models.ForecastTimeframeType :param time_period: Has time period for pulling",
"None self.type = None self.tags = None class Alert(Resource): \"\"\"An",
"} def __init__( self, *, type: Optional[Union[str, \"KpiType\"]] = None,",
"of emails to contact. :type contact_emails: list[str] :param contact_groups: list",
"charges or amortization for service reservations. Possible values include: \"Usage\",",
"column to aggregate. :type name: str :param function: Required. The",
"'str'}, 'filter_enabled': {'key': 'properties.filterEnabled', 'type': 'bool'}, 'grouping_enabled': {'key': 'properties.groupingEnabled', 'type':",
"regenerated. # -------------------------------------------------------------------------- import datetime from typing import Dict, List,",
"= { 'name': {'required': True}, 'function': {'required': True}, } _attribute_map",
"self.resource_id = resource_id self.container = container self.root_folder_path = root_folder_path class",
"= display_name self.scope = scope self.created_on = None self.modified_on =",
"AlertsResult(msrest.serialization.Model): \"\"\"Result of alerts. Variables are only populated by the",
"= None, not_property: Optional[\"ReportConfigFilterAutoGenerated\"] = None, dimension: Optional[\"ReportConfigComparisonExpression\"] = None,",
"str or ~azure.mgmt.costmanagement.models.ReportConfigSortingDirection :param name: Required. The name of the",
"{'key': 'aggregation', 'type': '{ReportConfigAggregation}'}, 'grouping': {'key': 'grouping', 'type': '[ReportConfigGrouping]'}, 'sorting':",
"list[~azure.mgmt.costmanagement.models.ReportConfigFilter] :param not_property: The logical \"NOT\" expression. :type not_property: ~azure.mgmt.costmanagement.models.ReportConfigFilter",
"settings that were in effect for this execution. :type run_settings:",
"class ReportConfigAggregation(msrest.serialization.Model): \"\"\"The aggregation expression to be used in the",
"} def __init__( self, *, and_property: Optional[List[\"QueryFilter\"]] = None, or_property:",
"\"OperatorType\"], values: List[str], **kwargs ): super(ReportConfigComparisonExpression, self).__init__(**kwargs) self.name = name",
"name of the column to aggregate. :type name: str :param",
"= id self.enabled = enabled class Operation(msrest.serialization.Model): \"\"\"A Cost management",
"2 items. :type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param or_property: The logical \"OR\"",
"} def __init__( self, *, name: str, direction: Optional[Union[str, \"ReportConfigSortingDirection\"]]",
"'str'}, } def __init__( self, *, e_tag: Optional[str] = None,",
"None, **kwargs ): super(ErrorResponse, self).__init__(**kwargs) self.error = error class ProxyResource(msrest.serialization.Model):",
"enabled: Optional[bool] = None, **kwargs ): super(KpiProperties, self).__init__(**kwargs) self.type =",
"__init__( self, *, recurrence: Union[str, \"RecurrenceType\"], status: Optional[Union[str, \"StatusType\"]] =",
"~azure.mgmt.costmanagement.models.PivotType :param name: Data field to show in view. :type",
"\"Daily\". :type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType :param configuration: The export",
"and will be ignored when sending a request. :ivar id:",
"Optional[List[\"ReportConfigSorting\"]] = None, filter: Optional[\"ReportConfigFilter\"] = None, **kwargs ): super(ReportConfigDataset,",
"bool :param include_fresh_partial_cost: a boolean determining if FreshPartialCost will be",
"based on dates. Possible values include: \"Usage\". :type type_properties_query_type: str",
"= schedule class ExportDataset(msrest.serialization.Model): \"\"\"The definition for data in the",
"usage and forecasted data. Actual usage and forecasted data can",
"'[ReportConfigFilterAutoGenerated]'}, 'not_property': {'key': 'not', 'type': 'ReportConfigFilterAutoGenerated'}, 'dimension': {'key': 'dimension', 'type':",
"= { 'from_property': {'required': True}, } _attribute_map = { 'from_property':",
"None self.operation = None class OperationListResult(msrest.serialization.Model): \"\"\"Result of listing cost",
"def __init__( self, *, type: Optional[Union[str, \"AlertType\"]] = None, category:",
"ReportConfigDataset(msrest.serialization.Model): \"\"\"The definition of data present in the report. :param",
"the operation failed. :vartype message: str \"\"\" _validation = {",
"'threshold', 'type': 'float'}, 'operator': {'key': 'operator', 'type': 'str'}, 'amount': {'key':",
"date for pulling data for the query. All required parameters",
"values class QueryDataset(msrest.serialization.Model): \"\"\"The definition of data present in the",
"meter_filter: Optional[List[object]] = None, tag_filter: Optional[object] = None, threshold: Optional[float]",
"columns class ExportDefinition(msrest.serialization.Model): \"\"\"The definition of an export. All required",
"~datetime.datetime :param to: The end date of recurrence. :type to:",
":type timeframe: str or ~azure.mgmt.costmanagement.models.ForecastTimeframeType :param time_period: Has time period",
"expression for a dimension. :type dimension: ~azure.mgmt.costmanagement.models.QueryComparisonExpression :param tag: Has",
"= None, description: Optional[str] = None, source: Optional[Union[str, \"AlertSource\"]] =",
"str] :param definition: defines the type of alert. :type definition:",
":type status_modification_time: str \"\"\" _attribute_map = { 'definition': {'key': 'properties.definition',",
"current_spend: current spend. :type current_spend: float :param contact_emails: list of",
"Corporation. All rights reserved. # Licensed under the MIT License.",
"{'key': 'granularity', 'type': 'str'}, 'configuration': {'key': 'configuration', 'type': 'QueryDatasetConfiguration'}, 'aggregation':",
"'[str]'}, 'contact_groups': {'key': 'contactGroups', 'type': '[str]'}, 'contact_roles': {'key': 'contactRoles', 'type':",
"granularity: str or ~azure.mgmt.costmanagement.models.GranularityType :param configuration: The export dataset configuration.",
"project root for license information. # Code generated by Microsoft",
"scope self.created_on = None self.modified_on = None self.chart = chart",
"~azure.mgmt.costmanagement.models.QueryDataset \"\"\" _validation = { 'type': {'required': True}, 'timeframe': {'required':",
"of results. Variables are only populated by the server, and",
"the report. :type sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting] :param filter: Has filter expression",
"type self.timeframe = timeframe self.time_period = time_period self.data_set = data_set",
"period for pulling data for the query. :type time_period: ~azure.mgmt.costmanagement.models.QueryTimePeriod",
"more information see https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services . All required parameters must be",
"str or ~azure.mgmt.costmanagement.models.AlertCategory :param criteria: Criteria that triggered alert. Possible",
"for this execution. :type run_settings: ~azure.mgmt.costmanagement.models.CommonExportProperties :param error: The details",
"str or ~azure.mgmt.costmanagement.models.ReportTimeframeType :param time_period: Has time period for pulling",
"forecasted data. Actual usage and forecasted data can be differentiated",
"clauses. :type grouping: list[~azure.mgmt.costmanagement.models.QueryGrouping] :param filter: Has filter expression to",
"\"Monthly\", \"Quarterly\", \"Annually\", \"BillingMonth\", \"BillingQuarter\", \"BillingAnnual\". :type time_grain_type: str or",
"'type': 'str'}, } def __init__( self, **kwargs ): super(OperationDisplay, self).__init__(**kwargs)",
"_attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'category': {'key':",
"*, and_property: Optional[List[\"ReportConfigFilter\"]] = None, or_property: Optional[List[\"ReportConfigFilter\"]] = None, not_property:",
"will be ignored when sending a request. :ivar value: The",
"dictionary is the alias for the aggregated column. Report can",
"= { 'code': {'readonly': True}, 'message': {'readonly': True}, } _attribute_map",
"{'max_items': 2, 'min_items': 0}, } _attribute_map = { 'granularity': {'key':",
"Operation name: {provider}/{resource}/{operation}. :vartype name: str :param display: The object",
"'type': '[ReportConfigSorting]'}, 'filter': {'key': 'filter', 'type': 'ReportConfigFilter'}, } def __init__(",
"\"\"\" _validation = { 'value': {'readonly': True}, } _attribute_map =",
"class ReportConfigDatasetAutoGenerated(msrest.serialization.Model): \"\"\"The definition of data present in the report.",
"specified in the \"x-ms-ratelimit-microsoft.consumption-retry-after\" header. * 503 ServiceUnavailable - Service",
"~azure.mgmt.costmanagement.models.AlertTimeGrainType :param period_start_date: datetime of periodStartDate. :type period_start_date: str :param",
"name: str :ivar type: Resource type. :vartype type: str :param",
"provided. Possible values include: \"MonthToDate\", \"BillingMonthToDate\", \"TheLastMonth\", \"TheLastBillingMonth\", \"WeekToDate\", \"Custom\".",
"None self.resource = None self.operation = None class OperationListResult(msrest.serialization.Model): \"\"\"Result",
"modified_on: Date when the user last modified this view. :vartype",
":param status: alert status. Possible values include: \"None\", \"Active\", \"Overridden\",",
"a export. All required parameters must be populated in order",
"self, *, recurrence: Union[str, \"RecurrenceType\"], status: Optional[Union[str, \"StatusType\"]] = None,",
"__init__( self, *, granularity: Optional[Union[str, \"GranularityType\"]] = None, configuration: Optional[\"ExportDatasetConfiguration\"]",
"\"Annually\", \"BillingMonth\", \"BillingQuarter\", \"BillingAnnual\". :type time_grain_type: str or ~azure.mgmt.costmanagement.models.AlertTimeGrainType :param",
"\"NewDataNotAvailable\", \"DataNotAvailable\". :type status: str or ~azure.mgmt.costmanagement.models.ExecutionStatus :param submitted_by: The",
"status: alert status. Possible values include: \"None\", \"Active\", \"Overridden\", \"Resolved\",",
"be used in the query. All required parameters must be",
"**kwargs ): super(AlertPropertiesDefinition, self).__init__(**kwargs) self.type = type self.category = category",
"clauses. :type aggregation: dict[str, ~azure.mgmt.costmanagement.models.QueryAggregation] :param grouping: Array of group",
"~azure.mgmt.costmanagement.models.KpiType :param id: ID of resource related to metric (budget).",
"= dataset self.include_actual_cost = include_actual_cost self.include_fresh_partial_cost = include_fresh_partial_cost class KpiProperties(msrest.serialization.Model):",
"the latest version or not. :type e_tag: str :param display_name:",
"'[str]'}, 'total': {'key': 'properties.total', 'type': 'int'}, 'category': {'key': 'properties.category', 'type':",
"_attribute_map = { 'granularity': {'key': 'granularity', 'type': 'str'}, 'configuration': {'key':",
"status_modification_user_name self.status_modification_time = status_modification_time class ErrorDetails(msrest.serialization.Model): \"\"\"The details of the",
"will be ignored when sending a request. :ivar name: Operation",
"timeframe: str or ~azure.mgmt.costmanagement.models.ReportTimeframeType :param time_period: Has time period for",
"= None class Alert(Resource): \"\"\"An individual alert. Variables are only",
"{'readonly': True}, 'created_on': {'readonly': True}, 'modified_on': {'readonly': True}, } _attribute_map",
"): super(QueryFilter, self).__init__(**kwargs) self.and_property = and_property self.or_property = or_property self.not_property",
"\"Failed\", \"Timeout\", \"NewDataNotAvailable\", \"DataNotAvailable\". :type status: str or ~azure.mgmt.costmanagement.models.ExecutionStatus :param",
"~azure.mgmt.costmanagement.models.ReportConfigSortingDirection :param name: Required. The name of the column to",
"= configuration self.aggregation = aggregation self.grouping = grouping self.filter =",
":type or_property: list[~azure.mgmt.costmanagement.models.QueryFilter] :param not_property: The logical \"NOT\" expression. :type",
"'type': '{str}'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'filter_enabled': {'key': 'properties.filterEnabled',",
"{ 'time_grain_type': {'key': 'timeGrainType', 'type': 'str'}, 'period_start_date': {'key': 'periodStartDate', 'type':",
"the export execution. Possible values include: \"Queued\", \"InProgress\", \"Completed\", \"Failed\",",
"not provided, then query includes all columns. :type columns: list[str]",
"self.processing_start_time = processing_start_time self.processing_end_time = processing_end_time self.file_name = file_name self.run_settings",
"License.txt in the project root for license information. # Code",
"'properties.nextLink', 'type': 'str'}, } def __init__( self, *, data: Optional[List[str]]",
"of data present in the forecast. :param granularity: The granularity",
"Generator. # Changes may cause incorrect behavior and will be",
"str :ivar resource: Resource on which the operation is performed:",
":type and_property: list[~azure.mgmt.costmanagement.models.QueryFilter] :param or_property: The logical \"OR\" expression. Must",
"date. :type recurrence_period: ~azure.mgmt.costmanagement.models.ExportRecurrencePeriod \"\"\" _validation = { 'recurrence': {'required':",
"'format': {'key': 'format', 'type': 'str'}, 'delivery_info': {'key': 'deliveryInfo', 'type': 'ExportDeliveryInfo'},",
":ivar next_link: The link (url) to the next page of",
"once per subscription. When creating an export in the Azure",
"\"\"\" _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'id':",
"for the export being delivered. :type destination: ~azure.mgmt.costmanagement.models.ExportDeliveryDestination \"\"\" _validation",
"will be lost if the code is regenerated. # --------------------------------------------------------------------------",
"report. The key of each item in the dictionary is",
"\"Preset\", \"User\". :type source: str or ~azure.mgmt.costmanagement.models.AlertSource :param details: Alert",
"'[str]'}, 'contact_roles': {'key': 'contactRoles', 'type': '[str]'}, 'overriding_alert': {'key': 'overridingAlert', 'type':",
"None class PivotProperties(msrest.serialization.Model): \"\"\"Each pivot must contain a 'type' and",
"data_set: ~azure.mgmt.costmanagement.models.ExportDataset \"\"\" _validation = { 'type': {'required': True}, 'timeframe':",
"None self.type = None self.e_tag = e_tag class Export(ProxyResource): \"\"\"An",
"included in the report. Any valid report column name is",
"**kwargs ): super(ErrorResponse, self).__init__(**kwargs) self.error = error class ProxyResource(msrest.serialization.Model): \"\"\"The",
"'type' and 'name'. :param type: Data type to show in",
"'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags',",
"= contact_roles self.overriding_alert = overriding_alert class AlertsResult(msrest.serialization.Model): \"\"\"Result of alerts.",
"contain a 'type' and 'name'. :param type: Data type to",
"\"\"\"The start and end date for recurrence schedule. All required",
"None, **kwargs ): super(QueryFilter, self).__init__(**kwargs) self.and_property = and_property self.or_property =",
"alert was last modified. :type modification_time: str :param status_modification_user_name: :type",
"None, resource_group_filter: Optional[List[object]] = None, resource_filter: Optional[List[object]] = None, meter_filter:",
"'type': '[str]'}, 'overriding_alert': {'key': 'overridingAlert', 'type': 'str'}, } def __init__(",
"= definition self.run_history = run_history self.next_run_time_estimate = None class Dimension(Resource):",
"list[~azure.mgmt.costmanagement.models.QueryFilter] :param not_property: The logical \"NOT\" expression. :type not_property: ~azure.mgmt.costmanagement.models.QueryFilter",
"sub-views in the Cost Analysis UI. :type pivots: list[~azure.mgmt.costmanagement.models.PivotProperties] :param",
"reserved. # Licensed under the MIT License. See License.txt in",
"'type': {'key': 'type', 'type': 'str'}, 'category': {'key': 'category', 'type': 'str'},",
"'or_property': {'min_items': 2}, } _attribute_map = { 'and_property': {'key': 'and',",
"\"\"\"The comparison expression to be used in the query. All",
"are only populated by the server, and will be ignored",
"= chart self.accumulated = accumulated self.metric = metric self.kpis =",
"the \"Retry-After\" header. :param error: The details of the error.",
"): super(ExportSchedule, self).__init__(**kwargs) self.status = status self.recurrence = recurrence self.recurrence_period",
"True}, 'name': {'readonly': True}, 'type': {'readonly': True}, } _attribute_map =",
"str :param columns: Array of columns. :type columns: list[~azure.mgmt.costmanagement.models.QueryColumn] :param",
"and UsageAndForecast represents both usage and forecasted data. Actual usage",
"{ 'format': {'key': 'format', 'type': 'str'}, 'delivery_info': {'key': 'deliveryInfo', 'type':",
"report. :type sorting: list[~azure.mgmt.costmanagement.models.ReportConfigSorting] :param filter: Has filter expression to",
"str or ~azure.mgmt.costmanagement.models.FormatType :param delivery_info: Has delivery information for the",
"the report. All required parameters must be populated in order",
"} def __init__( self, *, name: Optional[str] = None, type:",
"{'key': 'or', 'type': '[ReportConfigFilter]'}, 'not_property': {'key': 'not', 'type': 'ReportConfigFilter'}, 'dimension':",
"'properties.groupingEnabled', 'type': 'bool'}, 'data': {'key': 'properties.data', 'type': '[str]'}, 'total': {'key':",
"to show in Cost Analysis UI. :type kpis: list[~azure.mgmt.costmanagement.models.KpiProperties] :param",
"= None self.message = None class ErrorResponse(msrest.serialization.Model): \"\"\"Error response indicates",
"The type of column. :type type: str \"\"\" _attribute_map =",
"datetime from typing import Dict, List, Optional, Union from azure.core.exceptions",
"schedule associated with the export. All required parameters must be",
"operator: str or ~azure.mgmt.costmanagement.models.OperatorType :param values: Required. Array of values",
"self).__init__(**kwargs) self.time_grain_type = time_grain_type self.period_start_date = period_start_date self.triggered_by = triggered_by",
"include: \"Tag\", \"Dimension\". :type type: str or ~azure.mgmt.costmanagement.models.QueryColumnType :param name:",
"status_modification_time class AlertPropertiesDefinition(msrest.serialization.Model): \"\"\"defines the type of alert. :param type:",
"object that represents the operation. Variables are only populated by",
":type contact_emails: list[str] :param contact_groups: list of action groups to",
"status: The last known status of the export execution. Possible",
"items. :type and_property: list[~azure.mgmt.costmanagement.models.ReportConfigFilterAutoGenerated] :param or_property: The logical \"OR\" expression.",
"self).__init__(**kwargs) self.type = type self.name = name class ReportConfigSorting(msrest.serialization.Model): \"\"\"The",
"class ReportConfigGrouping(msrest.serialization.Model): \"\"\"The group by expression to be used in",
"to: Optional[datetime.datetime] = None, **kwargs ): super(ExportRecurrencePeriod, self).__init__(**kwargs) self.from_property =",
"'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, } def",
"of rows in the query. Possible values include: \"Daily\". :type",
"'filter', 'type': 'ReportConfigFilter'}, } def __init__( self, *, granularity: Optional[Union[str,",
":type granularity: str or ~azure.mgmt.costmanagement.models.ReportGranularityType :param configuration: Has configuration information",
"= None, resource_group_filter: Optional[List[object]] = None, resource_filter: Optional[List[object]] = None,",
"'/providers/Microsoft.CostManagement/externalBillingAccounts/{externalBillingAccountName}' for ExternalBillingAccount scope, and '/providers/Microsoft.CostManagement/externalSubscriptions/{externalSubscriptionName}' for ExternalSubscription scope. :type",
"self.timeframe = timeframe self.time_period = time_period self.dataset = dataset class",
"'type': 'str'}, 'status': {'key': 'properties.status', 'type': 'str'}, 'submitted_by': {'key': 'properties.submittedBy',",
"name self.operator = operator self.values = values class QueryDataset(msrest.serialization.Model): \"\"\"The",
"None, contact_groups: Optional[List[str]] = None, contact_roles: Optional[List[str]] = None, overriding_alert:",
"include: \"Daily\". :type granularity: str or ~azure.mgmt.costmanagement.models.GranularityType :param configuration: The",
"status: str or ~azure.mgmt.costmanagement.models.StatusType :param recurrence: Required. The schedule recurrence.",
"to get the next set of alerts results if there",
"'type': 'str'}, 'submitted_by': {'key': 'properties.submittedBy', 'type': 'str'}, 'submitted_time': {'key': 'properties.submittedTime',",
"or ~azure.mgmt.costmanagement.models.ReportConfigColumnType :param name: Required. The name of the column",
"provided, then query includes all columns. :type columns: list[str] \"\"\"",
"include: \"Active\", \"Inactive\". :type status: str or ~azure.mgmt.costmanagement.models.StatusType :param recurrence:",
":param columns: Array of column names to be included in",
"= None class Dimension(Resource): \"\"\"Dimension. Variables are only populated by",
"'type': 'str'}, } def __init__( self, *, type: Optional[Union[str, \"PivotType\"]]",
"dimension: Has comparison expression for a dimension. :type dimension: ~azure.mgmt.costmanagement.models.ReportConfigComparisonExpression",
"'type': 'CommonExportProperties'}, 'error': {'key': 'properties.error', 'type': 'ErrorDetails'}, } def __init__(",
"datetime.datetime, to: datetime.datetime, **kwargs ): super(QueryTimePeriod, self).__init__(**kwargs) self.from_property = from_property",
"= { 'grouping': {'max_items': 2, 'min_items': 0}, } _attribute_map ="
] |
[
"Roundtripping data through the compressor works correctly. \"\"\" with open(simple_compressed_file[0],",
"52}, {\"quality\": 52}, {\"lgwin\": 52}, {\"lgblock\": 52}, ] ) @pytest.mark.parametrize(\"exception_cls\",",
"import brotli import pytest from hypothesis import given from hypothesis.strategies",
"max_value=0), integers(min_value=16, max_value=24) ), ) def test_streaming_compression_flush(one_compressed_file, chunk_size, mode, quality,",
"lgblock=one_of( integers(min_value=0, max_value=0), integers(min_value=16, max_value=24) ), ) def test_streaming_compression(one_compressed_file, chunk_size,",
"), ) def test_streaming_compression(one_compressed_file, chunk_size, mode, quality, lgwin, lgblock): \"\"\"",
"== f.read() @given(binary()) def test_compressed_data_roundtrips(s): assert brotli.decompress(brotli.compress(s)) == s @given(binary(),",
"52}, {\"lgwin\": 52}, {\"lgblock\": 52}, ] ) @pytest.mark.parametrize(\"exception_cls\", [brotli.Error, brotli.error])",
"mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock ) with open(one_compressed_file, 'rb') as f:",
"next_data: break compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.finish()) decompressed = brotli.decompress(b''.join(compressed_chunks)) with open(one_compressed_file, 'rb')",
"test_streaming_compression_flush(one_compressed_file, chunk_size, mode, quality, lgwin, lgblock): \"\"\" Confirm that the",
"compression of single chunks. \"\"\" import brotli import pytest from",
"brotli.decompress(brotli.compress(s)) == s @given(binary(), binary()) def test_compressed_data_with_dictionaries(s, dictionary): d =",
"uncompressed_data = f.read() assert brotli.decompress( brotli.compress(uncompressed_data) ) == uncompressed_data @given(",
"s @given(binary(), binary()) def test_compressed_data_with_dictionaries(s, dictionary): d = brotli.Decompressor(dictionary) compressed",
"integers(min_value=16, max_value=24) ), ) def test_streaming_compression(one_compressed_file, chunk_size, mode, quality, lgwin,",
"integers, sampled_from, one_of def test_roundtrip_compression_with_files(simple_compressed_file): \"\"\" Roundtripping data through the",
"def test_compressed_data_roundtrips(s): assert brotli.decompress(brotli.compress(s)) == s @given(binary(), binary()) def test_compressed_data_with_dictionaries(s,",
"def test_streaming_compression_flush(one_compressed_file, chunk_size, mode, quality, lgwin, lgblock): \"\"\" Confirm that",
"as f: uncompressed_data = f.read() assert brotli.decompress( brotli.compress(uncompressed_data) ) ==",
"with open(one_compressed_file, 'rb') as f: while True: next_data = f.read(chunk_size)",
"def test_compressed_data_with_dictionaries(s, dictionary): d = brotli.Decompressor(dictionary) compressed = brotli.compress(s, dictionary=dictionary)",
") def test_streaming_compression_flush(one_compressed_file, chunk_size, mode, quality, lgwin, lgblock): \"\"\" Confirm",
"@pytest.mark.parametrize( \"params\", [ {\"mode\": 52}, {\"quality\": 52}, {\"lgwin\": 52}, {\"lgblock\":",
"mode, quality, lgwin, lgblock): \"\"\" Confirm that the streaming compressor",
"c = brotli.Compressor( mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock ) with open(one_compressed_file,",
"test_compressed_data_with_dictionaries(s, dictionary): d = brotli.Decompressor(dictionary) compressed = brotli.compress(s, dictionary=dictionary) uncompressed",
"compressed_chunks = [] c = brotli.Compressor( mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock",
"compressed = brotli.compress(s, dictionary=dictionary) uncompressed = d.decompress(compressed) assert uncompressed ==",
"\"\"\" import brotli import pytest from hypothesis import given from",
"brotli.decompress(b''.join(compressed_chunks)) with open(one_compressed_file, 'rb') as f: assert decompressed == f.read()",
"'rb') as f: uncompressed_data = f.read() assert brotli.decompress( brotli.compress(uncompressed_data) )",
"not next_data: break compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.flush()) compressed_chunks.append(c.finish()) decompressed = brotli.decompress(b''.join(compressed_chunks)) with",
"expected, including flushes after each chunk. \"\"\" compressed_chunks = []",
"the streaming compressor works as expected, including flushes after each",
"[ {\"mode\": 52}, {\"quality\": 52}, {\"lgwin\": 52}, {\"lgblock\": 52}, ]",
"assert brotli.decompress(brotli.compress(s)) == s @given(binary(), binary()) def test_compressed_data_with_dictionaries(s, dictionary): d",
"decompressed = brotli.decompress(b''.join(compressed_chunks)) with open(one_compressed_file, 'rb') as f: assert decompressed",
"chunks. \"\"\" import brotli import pytest from hypothesis import given",
"== uncompressed_data @given( chunk_size=integers(min_value=1, max_value=2**12), mode=sampled_from(list(brotli.BrotliEncoderMode)), quality=integers(min_value=0, max_value=11), lgwin=integers(min_value=10, max_value=24),",
"chunk_size=integers(min_value=1, max_value=2**12), mode=sampled_from(list(brotli.BrotliEncoderMode)), quality=integers(min_value=0, max_value=11), lgwin=integers(min_value=10, max_value=24), lgblock=one_of( integers(min_value=0, max_value=0),",
"lgwin, lgblock): \"\"\" Confirm that the streaming compressor works as",
"{\"lgwin\": 52}, {\"lgblock\": 52}, ] ) @pytest.mark.parametrize(\"exception_cls\", [brotli.Error, brotli.error]) def",
"not next_data: break compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.finish()) decompressed = brotli.decompress(b''.join(compressed_chunks)) with open(one_compressed_file,",
"as f: assert decompressed == f.read() @given(binary()) def test_compressed_data_roundtrips(s): assert",
"\"\"\" Confirm that the streaming compressor works as expected. \"\"\"",
"test_compressed_data_roundtrips(s): assert brotli.decompress(brotli.compress(s)) == s @given(binary(), binary()) def test_compressed_data_with_dictionaries(s, dictionary):",
"sampled_from, one_of def test_roundtrip_compression_with_files(simple_compressed_file): \"\"\" Roundtripping data through the compressor",
"= [] c = brotli.Compressor( mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock )",
"decompressed == f.read() @given( chunk_size=integers(min_value=1, max_value=2**12), mode=sampled_from(list(brotli.BrotliEncoderMode)), quality=integers(min_value=0, max_value=11), lgwin=integers(min_value=10,",
"True: next_data = f.read(chunk_size) if not next_data: break compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.finish())",
"mode=sampled_from(list(brotli.BrotliEncoderMode)), quality=integers(min_value=0, max_value=11), lgwin=integers(min_value=10, max_value=24), lgblock=one_of( integers(min_value=0, max_value=0), integers(min_value=16, max_value=24)",
"correctly. \"\"\" with open(simple_compressed_file[0], 'rb') as f: uncompressed_data = f.read()",
"@given(binary()) def test_compressed_data_roundtrips(s): assert brotli.decompress(brotli.compress(s)) == s @given(binary(), binary()) def",
"hypothesis.strategies import binary, integers, sampled_from, one_of def test_roundtrip_compression_with_files(simple_compressed_file): \"\"\" Roundtripping",
"with open(one_compressed_file, 'rb') as f: assert decompressed == f.read() @given(binary())",
"works as expected. \"\"\" compressed_chunks = [] c = brotli.Compressor(",
"# -*- coding: utf-8 -*- \"\"\" test_simple_compression ~~~~~~~~~~~~~~~~~~~~~~~~~ Tests for",
"s @pytest.mark.parametrize( \"params\", [ {\"mode\": 52}, {\"quality\": 52}, {\"lgwin\": 52},",
"import pytest from hypothesis import given from hypothesis.strategies import binary,",
"including flushes after each chunk. \"\"\" compressed_chunks = [] c",
"compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.finish()) decompressed = brotli.decompress(b''.join(compressed_chunks)) with open(one_compressed_file, 'rb') as f:",
"== f.read() @given( chunk_size=integers(min_value=1, max_value=2**12), mode=sampled_from(list(brotli.BrotliEncoderMode)), quality=integers(min_value=0, max_value=11), lgwin=integers(min_value=10, max_value=24),",
"f: uncompressed_data = f.read() assert brotli.decompress( brotli.compress(uncompressed_data) ) == uncompressed_data",
"the streaming compressor works as expected. \"\"\" compressed_chunks = []",
"-*- coding: utf-8 -*- \"\"\" test_simple_compression ~~~~~~~~~~~~~~~~~~~~~~~~~ Tests for compression",
"decompressed == f.read() @given(binary()) def test_compressed_data_roundtrips(s): assert brotli.decompress(brotli.compress(s)) == s",
"streaming compressor works as expected, including flushes after each chunk.",
"max_value=24), lgblock=one_of( integers(min_value=0, max_value=0), integers(min_value=16, max_value=24) ), ) def test_streaming_compression(one_compressed_file,",
"integers(min_value=16, max_value=24) ), ) def test_streaming_compression_flush(one_compressed_file, chunk_size, mode, quality, lgwin,",
"lgblock): \"\"\" Confirm that the streaming compressor works as expected.",
"flushes after each chunk. \"\"\" compressed_chunks = [] c =",
"pytest from hypothesis import given from hypothesis.strategies import binary, integers,",
"assert decompressed == f.read() @given(binary()) def test_compressed_data_roundtrips(s): assert brotli.decompress(brotli.compress(s)) ==",
"52}, ] ) @pytest.mark.parametrize(\"exception_cls\", [brotli.Error, brotli.error]) def test_bad_compressor_parameters(params, exception_cls): with",
") == uncompressed_data @given( chunk_size=integers(min_value=1, max_value=2**12), mode=sampled_from(list(brotli.BrotliEncoderMode)), quality=integers(min_value=0, max_value=11), lgwin=integers(min_value=10,",
"assert brotli.decompress( brotli.compress(uncompressed_data) ) == uncompressed_data @given( chunk_size=integers(min_value=1, max_value=2**12), mode=sampled_from(list(brotli.BrotliEncoderMode)),",
"\"\"\" test_simple_compression ~~~~~~~~~~~~~~~~~~~~~~~~~ Tests for compression of single chunks. \"\"\"",
"from hypothesis import given from hypothesis.strategies import binary, integers, sampled_from,",
"each chunk. \"\"\" compressed_chunks = [] c = brotli.Compressor( mode=mode,",
"lgwin=lgwin, lgblock=lgblock ) with open(one_compressed_file, 'rb') as f: while True:",
"import binary, integers, sampled_from, one_of def test_roundtrip_compression_with_files(simple_compressed_file): \"\"\" Roundtripping data",
"Confirm that the streaming compressor works as expected, including flushes",
"with open(one_compressed_file, 'rb') as f: assert decompressed == f.read() @given(",
"brotli import pytest from hypothesis import given from hypothesis.strategies import",
"\"\"\" Roundtripping data through the compressor works correctly. \"\"\" with",
"expected. \"\"\" compressed_chunks = [] c = brotli.Compressor( mode=mode, quality=quality,",
"-*- \"\"\" test_simple_compression ~~~~~~~~~~~~~~~~~~~~~~~~~ Tests for compression of single chunks.",
"{\"quality\": 52}, {\"lgwin\": 52}, {\"lgblock\": 52}, ] ) @pytest.mark.parametrize(\"exception_cls\", [brotli.Error,",
"next_data: break compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.flush()) compressed_chunks.append(c.finish()) decompressed = brotli.decompress(b''.join(compressed_chunks)) with open(one_compressed_file,",
"compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.flush()) compressed_chunks.append(c.finish()) decompressed = brotli.decompress(b''.join(compressed_chunks)) with open(one_compressed_file, 'rb') as",
"of single chunks. \"\"\" import brotli import pytest from hypothesis",
"as f: assert decompressed == f.read() @given( chunk_size=integers(min_value=1, max_value=2**12), mode=sampled_from(list(brotli.BrotliEncoderMode)),",
"brotli.compress(uncompressed_data) ) == uncompressed_data @given( chunk_size=integers(min_value=1, max_value=2**12), mode=sampled_from(list(brotli.BrotliEncoderMode)), quality=integers(min_value=0, max_value=11),",
"lgblock): \"\"\" Confirm that the streaming compressor works as expected,",
"Confirm that the streaming compressor works as expected. \"\"\" compressed_chunks",
"d = brotli.Decompressor(dictionary) compressed = brotli.compress(s, dictionary=dictionary) uncompressed = d.decompress(compressed)",
"= brotli.compress(s, dictionary=dictionary) uncompressed = d.decompress(compressed) assert uncompressed == s",
"= d.decompress(compressed) assert uncompressed == s @pytest.mark.parametrize( \"params\", [ {\"mode\":",
"lgblock=one_of( integers(min_value=0, max_value=0), integers(min_value=16, max_value=24) ), ) def test_streaming_compression_flush(one_compressed_file, chunk_size,",
"quality, lgwin, lgblock): \"\"\" Confirm that the streaming compressor works",
"brotli.Compressor( mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock ) with open(one_compressed_file, 'rb') as",
"compressor works as expected. \"\"\" compressed_chunks = [] c =",
"test_streaming_compression(one_compressed_file, chunk_size, mode, quality, lgwin, lgblock): \"\"\" Confirm that the",
"from hypothesis.strategies import binary, integers, sampled_from, one_of def test_roundtrip_compression_with_files(simple_compressed_file): \"\"\"",
"after each chunk. \"\"\" compressed_chunks = [] c = brotli.Compressor(",
"{\"lgblock\": 52}, ] ) @pytest.mark.parametrize(\"exception_cls\", [brotli.Error, brotli.error]) def test_bad_compressor_parameters(params, exception_cls):",
"break compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.flush()) compressed_chunks.append(c.finish()) decompressed = brotli.decompress(b''.join(compressed_chunks)) with open(one_compressed_file, 'rb')",
"~~~~~~~~~~~~~~~~~~~~~~~~~ Tests for compression of single chunks. \"\"\" import brotli",
"True: next_data = f.read(chunk_size) if not next_data: break compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.flush())",
"f.read(chunk_size) if not next_data: break compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.flush()) compressed_chunks.append(c.finish()) decompressed =",
"lgwin=integers(min_value=10, max_value=24), lgblock=one_of( integers(min_value=0, max_value=0), integers(min_value=16, max_value=24) ), ) def",
"max_value=0), integers(min_value=16, max_value=24) ), ) def test_streaming_compression(one_compressed_file, chunk_size, mode, quality,",
"= brotli.Compressor( mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock ) with open(one_compressed_file, 'rb')",
"] ) @pytest.mark.parametrize(\"exception_cls\", [brotli.Error, brotli.error]) def test_bad_compressor_parameters(params, exception_cls): with pytest.raises(exception_cls):",
"test_roundtrip_compression_with_files(simple_compressed_file): \"\"\" Roundtripping data through the compressor works correctly. \"\"\"",
"as expected, including flushes after each chunk. \"\"\" compressed_chunks =",
"assert decompressed == f.read() @given( chunk_size=integers(min_value=1, max_value=2**12), mode=sampled_from(list(brotli.BrotliEncoderMode)), quality=integers(min_value=0, max_value=11),",
"[] c = brotli.Compressor( mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock ) with",
"next_data = f.read(chunk_size) if not next_data: break compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.finish()) decompressed",
"uncompressed = d.decompress(compressed) assert uncompressed == s @pytest.mark.parametrize( \"params\", [",
"max_value=24) ), ) def test_streaming_compression(one_compressed_file, chunk_size, mode, quality, lgwin, lgblock):",
"integers(min_value=0, max_value=0), integers(min_value=16, max_value=24) ), ) def test_streaming_compression_flush(one_compressed_file, chunk_size, mode,",
"test_simple_compression ~~~~~~~~~~~~~~~~~~~~~~~~~ Tests for compression of single chunks. \"\"\" import",
"\"\"\" compressed_chunks = [] c = brotli.Compressor( mode=mode, quality=quality, lgwin=lgwin,",
"f: assert decompressed == f.read() @given(binary()) def test_compressed_data_roundtrips(s): assert brotli.decompress(brotli.compress(s))",
"@given( chunk_size=integers(min_value=1, max_value=2**12), mode=sampled_from(list(brotli.BrotliEncoderMode)), quality=integers(min_value=0, max_value=11), lgwin=integers(min_value=10, max_value=24), lgblock=one_of( integers(min_value=0,",
"works as expected, including flushes after each chunk. \"\"\" compressed_chunks",
"works correctly. \"\"\" with open(simple_compressed_file[0], 'rb') as f: uncompressed_data =",
"as f: while True: next_data = f.read(chunk_size) if not next_data:",
"= f.read(chunk_size) if not next_data: break compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.finish()) decompressed =",
"quality=quality, lgwin=lgwin, lgblock=lgblock ) with open(one_compressed_file, 'rb') as f: while",
"open(simple_compressed_file[0], 'rb') as f: uncompressed_data = f.read() assert brotli.decompress( brotli.compress(uncompressed_data)",
"f: assert decompressed == f.read() @given( chunk_size=integers(min_value=1, max_value=2**12), mode=sampled_from(list(brotli.BrotliEncoderMode)), quality=integers(min_value=0,",
"compressed_chunks.append(c.flush()) compressed_chunks.append(c.finish()) decompressed = brotli.decompress(b''.join(compressed_chunks)) with open(one_compressed_file, 'rb') as f:",
") @pytest.mark.parametrize(\"exception_cls\", [brotli.Error, brotli.error]) def test_bad_compressor_parameters(params, exception_cls): with pytest.raises(exception_cls): brotli.Compressor(**params)",
"= brotli.Decompressor(dictionary) compressed = brotli.compress(s, dictionary=dictionary) uncompressed = d.decompress(compressed) assert",
"integers(min_value=0, max_value=0), integers(min_value=16, max_value=24) ), ) def test_streaming_compression(one_compressed_file, chunk_size, mode,",
"52}, {\"lgblock\": 52}, ] ) @pytest.mark.parametrize(\"exception_cls\", [brotli.Error, brotli.error]) def test_bad_compressor_parameters(params,",
"quality=integers(min_value=0, max_value=11), lgwin=integers(min_value=10, max_value=24), lgblock=one_of( integers(min_value=0, max_value=0), integers(min_value=16, max_value=24) ),",
"f.read() @given(binary()) def test_compressed_data_roundtrips(s): assert brotli.decompress(brotli.compress(s)) == s @given(binary(), binary())",
"given from hypothesis.strategies import binary, integers, sampled_from, one_of def test_roundtrip_compression_with_files(simple_compressed_file):",
"'rb') as f: while True: next_data = f.read(chunk_size) if not",
"f.read(chunk_size) if not next_data: break compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.finish()) decompressed = brotli.decompress(b''.join(compressed_chunks))",
"while True: next_data = f.read(chunk_size) if not next_data: break compressed_chunks.append(c.compress(next_data))",
"), ) def test_streaming_compression_flush(one_compressed_file, chunk_size, mode, quality, lgwin, lgblock): \"\"\"",
"binary()) def test_compressed_data_with_dictionaries(s, dictionary): d = brotli.Decompressor(dictionary) compressed = brotli.compress(s,",
"f.read() @given( chunk_size=integers(min_value=1, max_value=2**12), mode=sampled_from(list(brotli.BrotliEncoderMode)), quality=integers(min_value=0, max_value=11), lgwin=integers(min_value=10, max_value=24), lgblock=one_of(",
"coding: utf-8 -*- \"\"\" test_simple_compression ~~~~~~~~~~~~~~~~~~~~~~~~~ Tests for compression of",
"'rb') as f: assert decompressed == f.read() @given(binary()) def test_compressed_data_roundtrips(s):",
"assert uncompressed == s @pytest.mark.parametrize( \"params\", [ {\"mode\": 52}, {\"quality\":",
"import given from hypothesis.strategies import binary, integers, sampled_from, one_of def",
"the compressor works correctly. \"\"\" with open(simple_compressed_file[0], 'rb') as f:",
"lgblock=lgblock ) with open(one_compressed_file, 'rb') as f: while True: next_data",
"if not next_data: break compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.flush()) compressed_chunks.append(c.finish()) decompressed = brotli.decompress(b''.join(compressed_chunks))",
"that the streaming compressor works as expected, including flushes after",
"brotli.compress(s, dictionary=dictionary) uncompressed = d.decompress(compressed) assert uncompressed == s @pytest.mark.parametrize(",
"max_value=11), lgwin=integers(min_value=10, max_value=24), lgblock=one_of( integers(min_value=0, max_value=0), integers(min_value=16, max_value=24) ), )",
"utf-8 -*- \"\"\" test_simple_compression ~~~~~~~~~~~~~~~~~~~~~~~~~ Tests for compression of single",
"open(one_compressed_file, 'rb') as f: while True: next_data = f.read(chunk_size) if",
"= brotli.decompress(b''.join(compressed_chunks)) with open(one_compressed_file, 'rb') as f: assert decompressed ==",
"d.decompress(compressed) assert uncompressed == s @pytest.mark.parametrize( \"params\", [ {\"mode\": 52},",
"Tests for compression of single chunks. \"\"\" import brotli import",
"as expected. \"\"\" compressed_chunks = [] c = brotli.Compressor( mode=mode,",
"data through the compressor works correctly. \"\"\" with open(simple_compressed_file[0], 'rb')",
"<reponame>jayvdb/brotlipy # -*- coding: utf-8 -*- \"\"\" test_simple_compression ~~~~~~~~~~~~~~~~~~~~~~~~~ Tests",
"@given(binary(), binary()) def test_compressed_data_with_dictionaries(s, dictionary): d = brotli.Decompressor(dictionary) compressed =",
"\"\"\" Confirm that the streaming compressor works as expected, including",
"single chunks. \"\"\" import brotli import pytest from hypothesis import",
"\"\"\" with open(simple_compressed_file[0], 'rb') as f: uncompressed_data = f.read() assert",
"streaming compressor works as expected. \"\"\" compressed_chunks = [] c",
"max_value=2**12), mode=sampled_from(list(brotli.BrotliEncoderMode)), quality=integers(min_value=0, max_value=11), lgwin=integers(min_value=10, max_value=24), lgblock=one_of( integers(min_value=0, max_value=0), integers(min_value=16,",
"f: while True: next_data = f.read(chunk_size) if not next_data: break",
"through the compressor works correctly. \"\"\" with open(simple_compressed_file[0], 'rb') as",
"for compression of single chunks. \"\"\" import brotli import pytest",
"chunk. \"\"\" compressed_chunks = [] c = brotli.Compressor( mode=mode, quality=quality,",
") def test_streaming_compression(one_compressed_file, chunk_size, mode, quality, lgwin, lgblock): \"\"\" Confirm",
"with open(simple_compressed_file[0], 'rb') as f: uncompressed_data = f.read() assert brotli.decompress(",
"that the streaming compressor works as expected. \"\"\" compressed_chunks =",
"if not next_data: break compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.finish()) decompressed = brotli.decompress(b''.join(compressed_chunks)) with",
"'rb') as f: assert decompressed == f.read() @given( chunk_size=integers(min_value=1, max_value=2**12),",
"\"params\", [ {\"mode\": 52}, {\"quality\": 52}, {\"lgwin\": 52}, {\"lgblock\": 52},",
"compressor works as expected, including flushes after each chunk. \"\"\"",
"== s @given(binary(), binary()) def test_compressed_data_with_dictionaries(s, dictionary): d = brotli.Decompressor(dictionary)",
"max_value=24) ), ) def test_streaming_compression_flush(one_compressed_file, chunk_size, mode, quality, lgwin, lgblock):",
"== s @pytest.mark.parametrize( \"params\", [ {\"mode\": 52}, {\"quality\": 52}, {\"lgwin\":",
"uncompressed == s @pytest.mark.parametrize( \"params\", [ {\"mode\": 52}, {\"quality\": 52},",
"uncompressed_data @given( chunk_size=integers(min_value=1, max_value=2**12), mode=sampled_from(list(brotli.BrotliEncoderMode)), quality=integers(min_value=0, max_value=11), lgwin=integers(min_value=10, max_value=24), lgblock=one_of(",
"brotli.Decompressor(dictionary) compressed = brotli.compress(s, dictionary=dictionary) uncompressed = d.decompress(compressed) assert uncompressed",
"f.read() assert brotli.decompress( brotli.compress(uncompressed_data) ) == uncompressed_data @given( chunk_size=integers(min_value=1, max_value=2**12),",
"break compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.finish()) decompressed = brotli.decompress(b''.join(compressed_chunks)) with open(one_compressed_file, 'rb') as",
"chunk_size, mode, quality, lgwin, lgblock): \"\"\" Confirm that the streaming",
"= f.read(chunk_size) if not next_data: break compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.flush()) compressed_chunks.append(c.finish()) decompressed",
"dictionary=dictionary) uncompressed = d.decompress(compressed) assert uncompressed == s @pytest.mark.parametrize( \"params\",",
"compressor works correctly. \"\"\" with open(simple_compressed_file[0], 'rb') as f: uncompressed_data",
"hypothesis import given from hypothesis.strategies import binary, integers, sampled_from, one_of",
"max_value=24), lgblock=one_of( integers(min_value=0, max_value=0), integers(min_value=16, max_value=24) ), ) def test_streaming_compression_flush(one_compressed_file,",
"next_data = f.read(chunk_size) if not next_data: break compressed_chunks.append(c.compress(next_data)) compressed_chunks.append(c.flush()) compressed_chunks.append(c.finish())",
") with open(one_compressed_file, 'rb') as f: while True: next_data =",
"def test_streaming_compression(one_compressed_file, chunk_size, mode, quality, lgwin, lgblock): \"\"\" Confirm that",
"open(one_compressed_file, 'rb') as f: assert decompressed == f.read() @given( chunk_size=integers(min_value=1,",
"def test_roundtrip_compression_with_files(simple_compressed_file): \"\"\" Roundtripping data through the compressor works correctly.",
"binary, integers, sampled_from, one_of def test_roundtrip_compression_with_files(simple_compressed_file): \"\"\" Roundtripping data through",
"one_of def test_roundtrip_compression_with_files(simple_compressed_file): \"\"\" Roundtripping data through the compressor works",
"open(one_compressed_file, 'rb') as f: assert decompressed == f.read() @given(binary()) def",
"{\"mode\": 52}, {\"quality\": 52}, {\"lgwin\": 52}, {\"lgblock\": 52}, ] )",
"compressed_chunks.append(c.finish()) decompressed = brotli.decompress(b''.join(compressed_chunks)) with open(one_compressed_file, 'rb') as f: assert",
"dictionary): d = brotli.Decompressor(dictionary) compressed = brotli.compress(s, dictionary=dictionary) uncompressed =",
"brotli.decompress( brotli.compress(uncompressed_data) ) == uncompressed_data @given( chunk_size=integers(min_value=1, max_value=2**12), mode=sampled_from(list(brotli.BrotliEncoderMode)), quality=integers(min_value=0,",
"= f.read() assert brotli.decompress( brotli.compress(uncompressed_data) ) == uncompressed_data @given( chunk_size=integers(min_value=1,"
] |
[
"low self.avg = avg self.vol = vol self.vol_cur = vol_cur",
"def avg(self, value: float): self._avg = Decimal(value) @property def vol(self)",
"= Decimal(value) @property def vol_cur(self) -> Decimal: return self._vol_cur @vol_cur.setter",
"self._avg = Decimal(value) @property def vol(self) -> Decimal: return self._vol",
"int: return self._updated @updated.setter def updated(self, value: int): self._updated =",
"return self._vol @vol.setter def vol(self, value: float): self._vol = Decimal(value)",
"def sell(self) -> Decimal: return self._sell @sell.setter def sell(self, value:",
"<filename>wexapi/models/ticker.py from decimal import Decimal class Ticker(object): def __init__( self,",
"return self._high @high.setter def high(self, value: float): self._high = Decimal(value)",
"self._buy @buy.setter def buy(self, value: float): self._buy = Decimal(value) @property",
"@vol_cur.setter def vol_cur(self, value: float): self._vol_cur = Decimal(value) @property def",
"return self._sell @sell.setter def sell(self, value: float): self._sell = Decimal(value)",
"def vol(self, value: float): self._vol = Decimal(value) @property def vol_cur(self)",
"value: float): self._buy = Decimal(value) @property def sell(self) -> Decimal:",
"last: float, buy: float, sell: float, updated: int, ): self.high",
"Decimal(value) @property def low(self) -> Decimal: return self._low @low.setter def",
"Decimal: return self._buy @buy.setter def buy(self, value: float): self._buy =",
"@sell.setter def sell(self, value: float): self._sell = Decimal(value) @property def",
"float, vol_cur: int, last: float, buy: float, sell: float, updated:",
"self._high = Decimal(value) @property def low(self) -> Decimal: return self._low",
"= Decimal(value) @property def last(self) -> Decimal: return self._last @last.setter",
"Decimal(value) @property def updated(self) -> int: return self._updated @updated.setter def",
"float, sell: float, updated: int, ): self.high = high self.low",
"self._vol_cur = Decimal(value) @property def last(self) -> Decimal: return self._last",
"@property def vol_cur(self) -> Decimal: return self._vol_cur @vol_cur.setter def vol_cur(self,",
"= high self.low = low self.avg = avg self.vol =",
"self.low = low self.avg = avg self.vol = vol self.vol_cur",
"avg(self, value: float): self._avg = Decimal(value) @property def vol(self) ->",
"= Decimal(value) @property def buy(self) -> Decimal: return self._buy @buy.setter",
"Decimal(value) @property def sell(self) -> Decimal: return self._sell @sell.setter def",
"float, low: float, avg: float, vol: float, vol_cur: int, last:",
"last(self) -> Decimal: return self._last @last.setter def last(self, value: float):",
"sell(self, value: float): self._sell = Decimal(value) @property def updated(self) ->",
"= avg self.vol = vol self.vol_cur = vol_cur self.last =",
"Decimal: return self._vol_cur @vol_cur.setter def vol_cur(self, value: float): self._vol_cur =",
"high(self) -> Decimal: return self._high @high.setter def high(self, value: float):",
"self._vol @vol.setter def vol(self, value: float): self._vol = Decimal(value) @property",
"@property def avg(self) -> Decimal: return self._avg @avg.setter def avg(self,",
"vol_cur(self) -> Decimal: return self._vol_cur @vol_cur.setter def vol_cur(self, value: float):",
"updated: int, ): self.high = high self.low = low self.avg",
"last self.buy = buy self.sell = sell self.updated = updated",
"def vol(self) -> Decimal: return self._vol @vol.setter def vol(self, value:",
"= last self.buy = buy self.sell = sell self.updated =",
"= low self.avg = avg self.vol = vol self.vol_cur =",
"@property def last(self) -> Decimal: return self._last @last.setter def last(self,",
"high(self, value: float): self._high = Decimal(value) @property def low(self) ->",
"Decimal(value) @property def avg(self) -> Decimal: return self._avg @avg.setter def",
"self.vol_cur = vol_cur self.last = last self.buy = buy self.sell",
"def high(self) -> Decimal: return self._high @high.setter def high(self, value:",
"self._avg @avg.setter def avg(self, value: float): self._avg = Decimal(value) @property",
"return self._last @last.setter def last(self, value: float): self._last = Decimal(value)",
"@buy.setter def buy(self, value: float): self._buy = Decimal(value) @property def",
"@high.setter def high(self, value: float): self._high = Decimal(value) @property def",
"updated(self) -> int: return self._updated @updated.setter def updated(self, value: int):",
"@vol.setter def vol(self, value: float): self._vol = Decimal(value) @property def",
"-> Decimal: return self._buy @buy.setter def buy(self, value: float): self._buy",
"= Decimal(value) @property def sell(self) -> Decimal: return self._sell @sell.setter",
"def low(self, value: float): self._low = Decimal(value) @property def avg(self)",
"@last.setter def last(self, value: float): self._last = Decimal(value) @property def",
"self._sell @sell.setter def sell(self, value: float): self._sell = Decimal(value) @property",
"return self._buy @buy.setter def buy(self, value: float): self._buy = Decimal(value)",
"= vol self.vol_cur = vol_cur self.last = last self.buy =",
"float): self._high = Decimal(value) @property def low(self) -> Decimal: return",
"float): self._vol = Decimal(value) @property def vol_cur(self) -> Decimal: return",
"Decimal(value) @property def buy(self) -> Decimal: return self._buy @buy.setter def",
"high: float, low: float, avg: float, vol: float, vol_cur: int,",
"-> Decimal: return self._vol @vol.setter def vol(self, value: float): self._vol",
"@property def low(self) -> Decimal: return self._low @low.setter def low(self,",
"= sell self.updated = updated @property def high(self) -> Decimal:",
"@property def buy(self) -> Decimal: return self._buy @buy.setter def buy(self,",
"avg(self) -> Decimal: return self._avg @avg.setter def avg(self, value: float):",
"def high(self, value: float): self._high = Decimal(value) @property def low(self)",
"value: float): self._sell = Decimal(value) @property def updated(self) -> int:",
"vol(self) -> Decimal: return self._vol @vol.setter def vol(self, value: float):",
"= Decimal(value) @property def updated(self) -> int: return self._updated @updated.setter",
"Ticker(object): def __init__( self, high: float, low: float, avg: float,",
"vol_cur(self, value: float): self._vol_cur = Decimal(value) @property def last(self) ->",
"return self._low @low.setter def low(self, value: float): self._low = Decimal(value)",
"import Decimal class Ticker(object): def __init__( self, high: float, low:",
"Decimal: return self._low @low.setter def low(self, value: float): self._low =",
"self.last = last self.buy = buy self.sell = sell self.updated",
"Decimal: return self._sell @sell.setter def sell(self, value: float): self._sell =",
"return self._avg @avg.setter def avg(self, value: float): self._avg = Decimal(value)",
"float): self._sell = Decimal(value) @property def updated(self) -> int: return",
"def last(self, value: float): self._last = Decimal(value) @property def buy(self)",
"Decimal: return self._high @high.setter def high(self, value: float): self._high =",
"from decimal import Decimal class Ticker(object): def __init__( self, high:",
"Decimal: return self._avg @avg.setter def avg(self, value: float): self._avg =",
"self._high @high.setter def high(self, value: float): self._high = Decimal(value) @property",
"buy self.sell = sell self.updated = updated @property def high(self)",
"-> Decimal: return self._last @last.setter def last(self, value: float): self._last",
"value: float): self._vol_cur = Decimal(value) @property def last(self) -> Decimal:",
"class Ticker(object): def __init__( self, high: float, low: float, avg:",
"def updated(self) -> int: return self._updated @updated.setter def updated(self, value:",
"sell(self) -> Decimal: return self._sell @sell.setter def sell(self, value: float):",
"sell: float, updated: int, ): self.high = high self.low =",
"float, updated: int, ): self.high = high self.low = low",
"value: float): self._avg = Decimal(value) @property def vol(self) -> Decimal:",
"): self.high = high self.low = low self.avg = avg",
"self._vol_cur @vol_cur.setter def vol_cur(self, value: float): self._vol_cur = Decimal(value) @property",
"Decimal(value) @property def last(self) -> Decimal: return self._last @last.setter def",
"= Decimal(value) @property def vol(self) -> Decimal: return self._vol @vol.setter",
"-> Decimal: return self._vol_cur @vol_cur.setter def vol_cur(self, value: float): self._vol_cur",
"vol_cur: int, last: float, buy: float, sell: float, updated: int,",
"= Decimal(value) @property def low(self) -> Decimal: return self._low @low.setter",
"float): self._buy = Decimal(value) @property def sell(self) -> Decimal: return",
"= updated @property def high(self) -> Decimal: return self._high @high.setter",
"self, high: float, low: float, avg: float, vol: float, vol_cur:",
"self.high = high self.low = low self.avg = avg self.vol",
"@property def sell(self) -> Decimal: return self._sell @sell.setter def sell(self,",
"= buy self.sell = sell self.updated = updated @property def",
"value: float): self._last = Decimal(value) @property def buy(self) -> Decimal:",
"self.vol = vol self.vol_cur = vol_cur self.last = last self.buy",
"sell self.updated = updated @property def high(self) -> Decimal: return",
"def vol_cur(self, value: float): self._vol_cur = Decimal(value) @property def last(self)",
"float, avg: float, vol: float, vol_cur: int, last: float, buy:",
"high self.low = low self.avg = avg self.vol = vol",
"Decimal(value) @property def vol_cur(self) -> Decimal: return self._vol_cur @vol_cur.setter def",
"@property def updated(self) -> int: return self._updated @updated.setter def updated(self,",
"float, buy: float, sell: float, updated: int, ): self.high =",
"-> Decimal: return self._high @high.setter def high(self, value: float): self._high",
"float): self._last = Decimal(value) @property def buy(self) -> Decimal: return",
"def avg(self) -> Decimal: return self._avg @avg.setter def avg(self, value:",
"__init__( self, high: float, low: float, avg: float, vol: float,",
"low(self, value: float): self._low = Decimal(value) @property def avg(self) ->",
"vol self.vol_cur = vol_cur self.last = last self.buy = buy",
"Decimal: return self._vol @vol.setter def vol(self, value: float): self._vol =",
"-> Decimal: return self._low @low.setter def low(self, value: float): self._low",
"self._last = Decimal(value) @property def buy(self) -> Decimal: return self._buy",
"def __init__( self, high: float, low: float, avg: float, vol:",
"self._last @last.setter def last(self, value: float): self._last = Decimal(value) @property",
"vol: float, vol_cur: int, last: float, buy: float, sell: float,",
"Decimal(value) @property def vol(self) -> Decimal: return self._vol @vol.setter def",
"int, ): self.high = high self.low = low self.avg =",
"float): self._avg = Decimal(value) @property def vol(self) -> Decimal: return",
"def vol_cur(self) -> Decimal: return self._vol_cur @vol_cur.setter def vol_cur(self, value:",
"buy(self) -> Decimal: return self._buy @buy.setter def buy(self, value: float):",
"Decimal: return self._last @last.setter def last(self, value: float): self._last =",
"= Decimal(value) @property def avg(self) -> Decimal: return self._avg @avg.setter",
"= vol_cur self.last = last self.buy = buy self.sell =",
"vol_cur self.last = last self.buy = buy self.sell = sell",
"updated @property def high(self) -> Decimal: return self._high @high.setter def",
"def buy(self) -> Decimal: return self._buy @buy.setter def buy(self, value:",
"float): self._vol_cur = Decimal(value) @property def last(self) -> Decimal: return",
"@property def vol(self) -> Decimal: return self._vol @vol.setter def vol(self,",
"self.avg = avg self.vol = vol self.vol_cur = vol_cur self.last",
"def last(self) -> Decimal: return self._last @last.setter def last(self, value:",
"-> int: return self._updated @updated.setter def updated(self, value: int): self._updated",
"value: float): self._low = Decimal(value) @property def avg(self) -> Decimal:",
"self.sell = sell self.updated = updated @property def high(self) ->",
"self.buy = buy self.sell = sell self.updated = updated @property",
"@avg.setter def avg(self, value: float): self._avg = Decimal(value) @property def",
"int, last: float, buy: float, sell: float, updated: int, ):",
"buy(self, value: float): self._buy = Decimal(value) @property def sell(self) ->",
"low(self) -> Decimal: return self._low @low.setter def low(self, value: float):",
"Decimal class Ticker(object): def __init__( self, high: float, low: float,",
"float): self._low = Decimal(value) @property def avg(self) -> Decimal: return",
"def buy(self, value: float): self._buy = Decimal(value) @property def sell(self)",
"self._low @low.setter def low(self, value: float): self._low = Decimal(value) @property",
"self._buy = Decimal(value) @property def sell(self) -> Decimal: return self._sell",
"-> Decimal: return self._sell @sell.setter def sell(self, value: float): self._sell",
"def low(self) -> Decimal: return self._low @low.setter def low(self, value:",
"self._sell = Decimal(value) @property def updated(self) -> int: return self._updated",
"last(self, value: float): self._last = Decimal(value) @property def buy(self) ->",
"low: float, avg: float, vol: float, vol_cur: int, last: float,",
"-> Decimal: return self._avg @avg.setter def avg(self, value: float): self._avg",
"buy: float, sell: float, updated: int, ): self.high = high",
"value: float): self._high = Decimal(value) @property def low(self) -> Decimal:",
"self.updated = updated @property def high(self) -> Decimal: return self._high",
"return self._vol_cur @vol_cur.setter def vol_cur(self, value: float): self._vol_cur = Decimal(value)",
"self._vol = Decimal(value) @property def vol_cur(self) -> Decimal: return self._vol_cur",
"return self._updated @updated.setter def updated(self, value: int): self._updated = int(value)",
"float, vol: float, vol_cur: int, last: float, buy: float, sell:",
"self._low = Decimal(value) @property def avg(self) -> Decimal: return self._avg",
"avg self.vol = vol self.vol_cur = vol_cur self.last = last",
"avg: float, vol: float, vol_cur: int, last: float, buy: float,",
"@low.setter def low(self, value: float): self._low = Decimal(value) @property def",
"vol(self, value: float): self._vol = Decimal(value) @property def vol_cur(self) ->",
"value: float): self._vol = Decimal(value) @property def vol_cur(self) -> Decimal:",
"@property def high(self) -> Decimal: return self._high @high.setter def high(self,",
"decimal import Decimal class Ticker(object): def __init__( self, high: float,",
"def sell(self, value: float): self._sell = Decimal(value) @property def updated(self)"
] |
[
"z in c3: crt = chinese_remainder_theorem([(x, p1), (y, p2), (z,",
"p1 = 32581479300404876772405716877547 p2 = 27038194053540661979045656526063 p3 = 26440615366395242196516853423447 n",
"c2 = [19616973567618515464515107624812] c3 = [13374868592866626517389128266735, 7379361747422713811654086477766, 5686385026105901867473638678946] \"\"\" for",
"= p1*p2*p3 e = 3 c = int(open(\"flag.enc\", \"rb\").read().encode(\"hex\"), 16)",
"p1*p2*p3 e = 3 c = int(open(\"flag.enc\", \"rb\").read().encode(\"hex\"), 16) #",
"c = int(open(\"flag.enc\", \"rb\").read().encode(\"hex\"), 16) # from User's Guide to",
"# from User's Guide to PARI/GP, nth_root function sqrtnall =",
"(z, p3)]) d = hex(crt, 2)[2:].decode(\"hex\") if \"0ctf\" in d:",
"[19616973567618515464515107624812] c3 = [13374868592866626517389128266735, 7379361747422713811654086477766, 5686385026105901867473638678946] \"\"\" for x in",
"r*z;n=1;while(r2!=r,r2*=z;n++));V=vector(n);V[1]=r;for(i=2,n,V[i]=V[i-1]*z);V}' c1 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c, p1)]))",
"\"\"\" c1 = [6149264605288583791069539134541, 13404203109409336045283549715377, 13028011585706956936052628027629] c2 = [19616973567618515464515107624812] c3",
"c2 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c, p2)])) c3",
"c1 = [6149264605288583791069539134541, 13404203109409336045283549715377, 13028011585706956936052628027629] c2 = [19616973567618515464515107624812] c3 =",
"PARI/GP, nth_root function sqrtnall = 'sqrtnall(x,n)={my(V,r,z,r2);r=sqrtn(x,n,&z);if(!z,error(\"Impossible case in sqrtn\"));if(type(x)==\"t_INTMOD\"||type(x)==\"t_PADIC\",r2 =",
"(c, p3)])) \"\"\" c1 = [6149264605288583791069539134541, 13404203109409336045283549715377, 13028011585706956936052628027629] c2 =",
"y in c2: for z in c3: crt = chinese_remainder_theorem([(x,",
"(c, p2)])) c3 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c,",
"nth_root function sqrtnall = 'sqrtnall(x,n)={my(V,r,z,r2);r=sqrtn(x,n,&z);if(!z,error(\"Impossible case in sqrtn\"));if(type(x)==\"t_INTMOD\"||type(x)==\"t_PADIC\",r2 = r*z;n=1;while(r2!=r,r2*=z;n++));V=vector(n);V[1]=r;for(i=2,n,V[i]=V[i-1]*z);V}'",
"3)))\" % (c, p1)])) c2 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\"",
"= r*z;n=1;while(r2!=r,r2*=z;n++));V=vector(n);V[1]=r;for(i=2,n,V[i]=V[i-1]*z);V}' c1 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c,",
"c3 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c, p3)])) \"\"\"",
"p2), (z, p3)]) d = hex(crt, 2)[2:].decode(\"hex\") if \"0ctf\" in",
"p2)])) c3 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c, p3)]))",
"\"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c, p2)])) c3 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d,",
"import * p1 = 32581479300404876772405716877547 p2 = 27038194053540661979045656526063 p3 =",
"Guide to PARI/GP, nth_root function sqrtnall = 'sqrtnall(x,n)={my(V,r,z,r2);r=sqrtn(x,n,&z);if(!z,error(\"Impossible case in",
"13404203109409336045283549715377, 13028011585706956936052628027629] c2 = [19616973567618515464515107624812] c3 = [13374868592866626517389128266735, 7379361747422713811654086477766, 5686385026105901867473638678946]",
"3)))\" % (c, p2)])) c3 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\"",
"\"\"\" for x in c1: for y in c2: for",
"26440615366395242196516853423447 n = p1*p2*p3 e = 3 c = int(open(\"flag.enc\",",
"= eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c, p2)])) c3 =",
"= [13374868592866626517389128266735, 7379361747422713811654086477766, 5686385026105901867473638678946] \"\"\" for x in c1: for",
"16) # from User's Guide to PARI/GP, nth_root function sqrtnall",
"%d), 3)))\" % (c, p2)])) c3 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d),",
"\"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c, p1)])) c2 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d,",
"in c3: crt = chinese_remainder_theorem([(x, p1), (y, p2), (z, p3)])",
"c3: crt = chinese_remainder_theorem([(x, p1), (y, p2), (z, p3)]) d",
"= chinese_remainder_theorem([(x, p1), (y, p2), (z, p3)]) d = hex(crt,",
"sqrtn\"));if(type(x)==\"t_INTMOD\"||type(x)==\"t_PADIC\",r2 = r*z;n=1;while(r2!=r,r2*=z;n++));V=vector(n);V[1]=r;for(i=2,n,V[i]=V[i-1]*z);V}' c1 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" %",
"c1: for y in c2: for z in c3: crt",
"5686385026105901867473638678946] \"\"\" for x in c1: for y in c2:",
"<filename>hard-gists/98bb452dc14e8c40e403/snippet.py from scryptos import * p1 = 32581479300404876772405716877547 p2 =",
"c2: for z in c3: crt = chinese_remainder_theorem([(x, p1), (y,",
"\"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c, p3)])) \"\"\" c1 = [6149264605288583791069539134541,",
"= 26440615366395242196516853423447 n = p1*p2*p3 e = 3 c =",
"c1 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c, p1)])) c2",
"= 'sqrtnall(x,n)={my(V,r,z,r2);r=sqrtn(x,n,&z);if(!z,error(\"Impossible case in sqrtn\"));if(type(x)==\"t_INTMOD\"||type(x)==\"t_PADIC\",r2 = r*z;n=1;while(r2!=r,r2*=z;n++));V=vector(n);V[1]=r;for(i=2,n,V[i]=V[i-1]*z);V}' c1 = eval(parigp([sqrtnall,",
"chinese_remainder_theorem([(x, p1), (y, p2), (z, p3)]) d = hex(crt, 2)[2:].decode(\"hex\")",
"= 32581479300404876772405716877547 p2 = 27038194053540661979045656526063 p3 = 26440615366395242196516853423447 n =",
"= 3 c = int(open(\"flag.enc\", \"rb\").read().encode(\"hex\"), 16) # from User's",
"(c, p1)])) c2 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c,",
"in sqrtn\"));if(type(x)==\"t_INTMOD\"||type(x)==\"t_PADIC\",r2 = r*z;n=1;while(r2!=r,r2*=z;n++));V=vector(n);V[1]=r;for(i=2,n,V[i]=V[i-1]*z);V}' c1 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\"",
"function sqrtnall = 'sqrtnall(x,n)={my(V,r,z,r2);r=sqrtn(x,n,&z);if(!z,error(\"Impossible case in sqrtn\"));if(type(x)==\"t_INTMOD\"||type(x)==\"t_PADIC\",r2 = r*z;n=1;while(r2!=r,r2*=z;n++));V=vector(n);V[1]=r;for(i=2,n,V[i]=V[i-1]*z);V}' c1",
"n = p1*p2*p3 e = 3 c = int(open(\"flag.enc\", \"rb\").read().encode(\"hex\"),",
"p3)])) \"\"\" c1 = [6149264605288583791069539134541, 13404203109409336045283549715377, 13028011585706956936052628027629] c2 = [19616973567618515464515107624812]",
"for x in c1: for y in c2: for z",
"= [6149264605288583791069539134541, 13404203109409336045283549715377, 13028011585706956936052628027629] c2 = [19616973567618515464515107624812] c3 = [13374868592866626517389128266735,",
"% (c, p3)])) \"\"\" c1 = [6149264605288583791069539134541, 13404203109409336045283549715377, 13028011585706956936052628027629] c2",
"c3 = [13374868592866626517389128266735, 7379361747422713811654086477766, 5686385026105901867473638678946] \"\"\" for x in c1:",
"= int(open(\"flag.enc\", \"rb\").read().encode(\"hex\"), 16) # from User's Guide to PARI/GP,",
"in c1: for y in c2: for z in c3:",
"[13374868592866626517389128266735, 7379361747422713811654086477766, 5686385026105901867473638678946] \"\"\" for x in c1: for y",
"e = 3 c = int(open(\"flag.enc\", \"rb\").read().encode(\"hex\"), 16) # from",
"[6149264605288583791069539134541, 13404203109409336045283549715377, 13028011585706956936052628027629] c2 = [19616973567618515464515107624812] c3 = [13374868592866626517389128266735, 7379361747422713811654086477766,",
"p1), (y, p2), (z, p3)]) d = hex(crt, 2)[2:].decode(\"hex\") if",
"= [19616973567618515464515107624812] c3 = [13374868592866626517389128266735, 7379361747422713811654086477766, 5686385026105901867473638678946] \"\"\" for x",
"d = hex(crt, 2)[2:].decode(\"hex\") if \"0ctf\" in d: print d[d.find(\"0ctf\"):].strip()",
"% (c, p2)])) c3 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" %",
"= 27038194053540661979045656526063 p3 = 26440615366395242196516853423447 n = p1*p2*p3 e =",
"for z in c3: crt = chinese_remainder_theorem([(x, p1), (y, p2),",
"13028011585706956936052628027629] c2 = [19616973567618515464515107624812] c3 = [13374868592866626517389128266735, 7379361747422713811654086477766, 5686385026105901867473638678946] \"\"\"",
"int(open(\"flag.enc\", \"rb\").read().encode(\"hex\"), 16) # from User's Guide to PARI/GP, nth_root",
"p3 = 26440615366395242196516853423447 n = p1*p2*p3 e = 3 c",
"from scryptos import * p1 = 32581479300404876772405716877547 p2 = 27038194053540661979045656526063",
"in c2: for z in c3: crt = chinese_remainder_theorem([(x, p1),",
"3)))\" % (c, p3)])) \"\"\" c1 = [6149264605288583791069539134541, 13404203109409336045283549715377, 13028011585706956936052628027629]",
"3 c = int(open(\"flag.enc\", \"rb\").read().encode(\"hex\"), 16) # from User's Guide",
"= eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c, p3)])) \"\"\" c1",
"to PARI/GP, nth_root function sqrtnall = 'sqrtnall(x,n)={my(V,r,z,r2);r=sqrtn(x,n,&z);if(!z,error(\"Impossible case in sqrtn\"));if(type(x)==\"t_INTMOD\"||type(x)==\"t_PADIC\",r2",
"User's Guide to PARI/GP, nth_root function sqrtnall = 'sqrtnall(x,n)={my(V,r,z,r2);r=sqrtn(x,n,&z);if(!z,error(\"Impossible case",
"(y, p2), (z, p3)]) d = hex(crt, 2)[2:].decode(\"hex\") if \"0ctf\"",
"case in sqrtn\"));if(type(x)==\"t_INTMOD\"||type(x)==\"t_PADIC\",r2 = r*z;n=1;while(r2!=r,r2*=z;n++));V=vector(n);V[1]=r;for(i=2,n,V[i]=V[i-1]*z);V}' c1 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d),",
"7379361747422713811654086477766, 5686385026105901867473638678946] \"\"\" for x in c1: for y in",
"x in c1: for y in c2: for z in",
"%d), 3)))\" % (c, p1)])) c2 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d),",
"* p1 = 32581479300404876772405716877547 p2 = 27038194053540661979045656526063 p3 = 26440615366395242196516853423447",
"\"rb\").read().encode(\"hex\"), 16) # from User's Guide to PARI/GP, nth_root function",
"for y in c2: for z in c3: crt =",
"eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c, p2)])) c3 = eval(parigp([sqrtnall,",
"p2 = 27038194053540661979045656526063 p3 = 26440615366395242196516853423447 n = p1*p2*p3 e",
"eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c, p1)])) c2 = eval(parigp([sqrtnall,",
"27038194053540661979045656526063 p3 = 26440615366395242196516853423447 n = p1*p2*p3 e = 3",
"eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c, p3)])) \"\"\" c1 =",
"32581479300404876772405716877547 p2 = 27038194053540661979045656526063 p3 = 26440615366395242196516853423447 n = p1*p2*p3",
"'sqrtnall(x,n)={my(V,r,z,r2);r=sqrtn(x,n,&z);if(!z,error(\"Impossible case in sqrtn\"));if(type(x)==\"t_INTMOD\"||type(x)==\"t_PADIC\",r2 = r*z;n=1;while(r2!=r,r2*=z;n++));V=vector(n);V[1]=r;for(i=2,n,V[i]=V[i-1]*z);V}' c1 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d,",
"% (c, p1)])) c2 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" %",
"from User's Guide to PARI/GP, nth_root function sqrtnall = 'sqrtnall(x,n)={my(V,r,z,r2);r=sqrtn(x,n,&z);if(!z,error(\"Impossible",
"sqrtnall = 'sqrtnall(x,n)={my(V,r,z,r2);r=sqrtn(x,n,&z);if(!z,error(\"Impossible case in sqrtn\"));if(type(x)==\"t_INTMOD\"||type(x)==\"t_PADIC\",r2 = r*z;n=1;while(r2!=r,r2*=z;n++));V=vector(n);V[1]=r;for(i=2,n,V[i]=V[i-1]*z);V}' c1 =",
"= eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c, p1)])) c2 =",
"p3)]) d = hex(crt, 2)[2:].decode(\"hex\") if \"0ctf\" in d: print",
"%d), 3)))\" % (c, p3)])) \"\"\" c1 = [6149264605288583791069539134541, 13404203109409336045283549715377,",
"crt = chinese_remainder_theorem([(x, p1), (y, p2), (z, p3)]) d =",
"p1)])) c2 = eval(parigp([sqrtnall, \"Vec(liftall(sqrtnall(Mod(%d, %d), 3)))\" % (c, p2)]))",
"scryptos import * p1 = 32581479300404876772405716877547 p2 = 27038194053540661979045656526063 p3"
] |
[
"max_length=70)), ('favourite_music', models.CharField(blank=True, max_length=70)), ('about', models.TextField(blank=True, max_length=300)), ('picture', models.ImageField(default='/profile_images/avatar.jpeg', upload_to='profile_images')),",
"('uploaded_at', models.DateTimeField(auto_now_add=True, null=True)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserProfile',",
"# -*- coding: utf-8 -*- # Generated by Django 1.11",
"verbose_name='ID')), ('title', models.CharField(blank=True, max_length=70, null=True)), ('document', models.FileField(upload_to=musa.models.get_upload_path)), ('uploaded_at', models.DateTimeField(auto_now_add=True, null=True)),",
"2018-03-29 06:43 from __future__ import unicode_literals from django.conf import settings",
"('fullname', models.CharField(blank=True, max_length=70)), ('favourite_music', models.CharField(blank=True, max_length=70)), ('about', models.TextField(blank=True, max_length=300)), ('picture',",
"1.11 on 2018-03-29 06:43 from __future__ import unicode_literals from django.conf",
"__future__ import unicode_literals from django.conf import settings from django.db import",
"settings from django.db import migrations, models import django.db.models.deletion import musa.models",
"fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(blank=True, max_length=70, null=True)),",
"fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('fullname', models.CharField(blank=True, max_length=70)), ('favourite_music',",
"= True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [",
"primary_key=True, serialize=False, verbose_name='ID')), ('fullname', models.CharField(blank=True, max_length=70)), ('favourite_music', models.CharField(blank=True, max_length=70)), ('about',",
"('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(blank=True, max_length=70, null=True)), ('document',",
"by Django 1.11 on 2018-03-29 06:43 from __future__ import unicode_literals",
"<gh_stars>0 # -*- coding: utf-8 -*- # Generated by Django",
"] operations = [ migrations.CreateModel( name='MusicCollection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,",
"-*- # Generated by Django 1.11 on 2018-03-29 06:43 from",
"from __future__ import unicode_literals from django.conf import settings from django.db",
"[ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='MusicCollection', fields=[ ('id',",
"models.CharField(blank=True, max_length=70, null=True)), ('document', models.FileField(upload_to=musa.models.get_upload_path)), ('uploaded_at', models.DateTimeField(auto_now_add=True, null=True)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,",
"models.CharField(blank=True, max_length=70)), ('about', models.TextField(blank=True, max_length=300)), ('picture', models.ImageField(default='/profile_images/avatar.jpeg', upload_to='profile_images')), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE,",
"utf-8 -*- # Generated by Django 1.11 on 2018-03-29 06:43",
"], ), migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),",
"models import django.db.models.deletion import musa.models class Migration(migrations.Migration): initial = True",
"Generated by Django 1.11 on 2018-03-29 06:43 from __future__ import",
"-*- coding: utf-8 -*- # Generated by Django 1.11 on",
"('favourite_music', models.CharField(blank=True, max_length=70)), ('about', models.TextField(blank=True, max_length=300)), ('picture', models.ImageField(default='/profile_images/avatar.jpeg', upload_to='profile_images')), ('user',",
"musa.models class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL),",
"initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations =",
"models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(blank=True, max_length=70, null=True)), ('document', models.FileField(upload_to=musa.models.get_upload_path)),",
"('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('fullname', models.CharField(blank=True, max_length=70)), ('favourite_music', models.CharField(blank=True,",
"name='MusicCollection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(blank=True, max_length=70,",
"import settings from django.db import migrations, models import django.db.models.deletion import",
"06:43 from __future__ import unicode_literals from django.conf import settings from",
"migrations, models import django.db.models.deletion import musa.models class Migration(migrations.Migration): initial =",
"models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('fullname', models.CharField(blank=True, max_length=70)), ('favourite_music', models.CharField(blank=True, max_length=70)),",
"), migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('fullname',",
"dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='MusicCollection',",
"('document', models.FileField(upload_to=musa.models.get_upload_path)), ('uploaded_at', models.DateTimeField(auto_now_add=True, null=True)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ),",
"models.TextField(blank=True, max_length=300)), ('picture', models.ImageField(default='/profile_images/avatar.jpeg', upload_to='profile_images')), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ),",
"('about', models.TextField(blank=True, max_length=300)), ('picture', models.ImageField(default='/profile_images/avatar.jpeg', upload_to='profile_images')), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ],",
"import migrations, models import django.db.models.deletion import musa.models class Migration(migrations.Migration): initial",
"class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ]",
"True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel(",
"to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,",
"models.DateTimeField(auto_now_add=True, null=True)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserProfile', fields=[",
"Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations",
"max_length=300)), ('picture', models.ImageField(default='/profile_images/avatar.jpeg', upload_to='profile_images')), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]",
"models.CharField(blank=True, max_length=70)), ('favourite_music', models.CharField(blank=True, max_length=70)), ('about', models.TextField(blank=True, max_length=300)), ('picture', models.ImageField(default='/profile_images/avatar.jpeg',",
"models.FileField(upload_to=musa.models.get_upload_path)), ('uploaded_at', models.DateTimeField(auto_now_add=True, null=True)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel(",
"django.db.models.deletion import musa.models class Migration(migrations.Migration): initial = True dependencies =",
"coding: utf-8 -*- # Generated by Django 1.11 on 2018-03-29",
"models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,",
"# Generated by Django 1.11 on 2018-03-29 06:43 from __future__",
"('title', models.CharField(blank=True, max_length=70, null=True)), ('document', models.FileField(upload_to=musa.models.get_upload_path)), ('uploaded_at', models.DateTimeField(auto_now_add=True, null=True)), ('user',",
"import musa.models class Migration(migrations.Migration): initial = True dependencies = [",
"migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('fullname', models.CharField(blank=True,",
"('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True,",
"operations = [ migrations.CreateModel( name='MusicCollection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,",
"max_length=70)), ('about', models.TextField(blank=True, max_length=300)), ('picture', models.ImageField(default='/profile_images/avatar.jpeg', upload_to='profile_images')), ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),",
"Django 1.11 on 2018-03-29 06:43 from __future__ import unicode_literals from",
"from django.db import migrations, models import django.db.models.deletion import musa.models class",
"verbose_name='ID')), ('fullname', models.CharField(blank=True, max_length=70)), ('favourite_music', models.CharField(blank=True, max_length=70)), ('about', models.TextField(blank=True, max_length=300)),",
"= [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='MusicCollection', fields=[",
"= [ migrations.CreateModel( name='MusicCollection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),",
"migrations.CreateModel( name='MusicCollection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(blank=True,",
"from django.conf import settings from django.db import migrations, models import",
"name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('fullname', models.CharField(blank=True, max_length=70)),",
"serialize=False, verbose_name='ID')), ('fullname', models.CharField(blank=True, max_length=70)), ('favourite_music', models.CharField(blank=True, max_length=70)), ('about', models.TextField(blank=True,",
"django.db import migrations, models import django.db.models.deletion import musa.models class Migration(migrations.Migration):",
"null=True)), ('document', models.FileField(upload_to=musa.models.get_upload_path)), ('uploaded_at', models.DateTimeField(auto_now_add=True, null=True)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ],",
"primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(blank=True, max_length=70, null=True)), ('document', models.FileField(upload_to=musa.models.get_upload_path)), ('uploaded_at',",
"on 2018-03-29 06:43 from __future__ import unicode_literals from django.conf import",
"django.conf import settings from django.db import migrations, models import django.db.models.deletion",
"[ migrations.CreateModel( name='MusicCollection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title',",
"migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='MusicCollection', fields=[ ('id', models.AutoField(auto_created=True,",
"import unicode_literals from django.conf import settings from django.db import migrations,",
"null=True)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserProfile', fields=[ ('id',",
"import django.db.models.deletion import musa.models class Migration(migrations.Migration): initial = True dependencies",
"unicode_literals from django.conf import settings from django.db import migrations, models",
"serialize=False, verbose_name='ID')), ('title', models.CharField(blank=True, max_length=70, null=True)), ('document', models.FileField(upload_to=musa.models.get_upload_path)), ('uploaded_at', models.DateTimeField(auto_now_add=True,",
"max_length=70, null=True)), ('document', models.FileField(upload_to=musa.models.get_upload_path)), ('uploaded_at', models.DateTimeField(auto_now_add=True, null=True)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),"
] |
[
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"the CPython loops are mapped to these nodes. \"\"\" from",
"from .LabelCodes import getGotoCode, getLabelCode def generateLoopBreakCode(statement, emit, context): #",
"= context.setCurrentSourceCodeReference( statement.getSourceReference() ) getErrorExitBoolCode( condition=\"CONSIDER_THREADING() == false\", emit=emit, context=context",
"emit) old_loop_break = context.setLoopBreakTarget(loop_end_label) old_loop_continue = context.setLoopContinueTarget(loop_start_label) generateStatementSequenceCode( statement_sequence=statement.subnode_loop_body, allow_none=True,",
"# # Licensed under the Apache License, Version 2.0 (the",
"with CPython, but also works on its own. # #",
"compliance with the License. # You may obtain a copy",
"2.0 (the \"License\"); # you may not use this file",
"agreed to in writing, software # distributed under the License",
"file except in compliance with the License. # You may",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"Unless required by applicable law or agreed to in writing,",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"them. In Nuitka, there are no for-loops or while-loops at",
"and statements there-in that break under certain conditions. See Developer",
"use it. pylint: disable=unused-argument getExceptionUnpublishedReleaseCode(emit, context) continue_target = context.getLoopContinueTarget() getGotoCode(continue_target,",
"it. pylint: disable=unused-argument getExceptionUnpublishedReleaseCode(emit, context) continue_target = context.getLoopContinueTarget() getGotoCode(continue_target, emit)",
"distributed under the License is distributed on an \"AS IS\"",
"an exception, it's unclear what line it would be anyway.",
"# Note: We are using the wrong line here, but",
"emit) def generateLoopContinueCode(statement, emit, context): # Functions used for generation",
"optimizing Python compiler that is compatible and # integrates with",
"emit, context): loop_start_label = context.allocateLabel(\"loop_start\") if not statement.isStatementAborting(): loop_end_label =",
"# limitations under the License. # \"\"\" Loop codes. Code",
"break under certain conditions. See Developer Manual for how the",
"loop_end_label = context.allocateLabel(\"loop_end\") else: loop_end_label = None getLabelCode(loop_start_label, emit) old_loop_break",
"the specific language governing permissions and # limitations under the",
"of \"Nuitka\", an optimizing Python compiler that is compatible and",
"# \"\"\" Loop codes. Code generation for loops, breaking them,",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"2021, <NAME>, mailto:<EMAIL> # # Part of \"Nuitka\", an optimizing",
"have been re-formulated in a simpler loop without a condition,",
"loop without a condition, and statements there-in that break under",
"context.setCurrentSourceCodeReference(old_source_ref) getGotoCode(loop_start_label, emit) if loop_end_label is not None: getLabelCode(loop_end_label, emit)",
") context.setCurrentSourceCodeReference(old_source_ref) getGotoCode(loop_start_label, emit) if loop_end_label is not None: getLabelCode(loop_end_label,",
"Part of \"Nuitka\", an optimizing Python compiler that is compatible",
"express or implied. # See the License for the specific",
"applicable law or agreed to in writing, software # distributed",
"except in compliance with the License. # You may obtain",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"continue_target = context.getLoopContinueTarget() getGotoCode(continue_target, emit) def generateLoopCode(statement, emit, context): loop_start_label",
"under the License. # \"\"\" Loop codes. Code generation for",
"this point. They have been re-formulated in a simpler loop",
"statement.getSourceReference() ) getErrorExitBoolCode( condition=\"CONSIDER_THREADING() == false\", emit=emit, context=context ) context.setCurrentSourceCodeReference(old_source_ref)",
"Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"# Copyright 2021, <NAME>, mailto:<EMAIL> # # Part of \"Nuitka\",",
"does # not use it. pylint: disable=unused-argument getExceptionUnpublishedReleaseCode(emit, context) break_target",
"Functions used for generation all accept statement, but this one",
"not use this file except in compliance with the License.",
"line here, but it's an exception, it's unclear what line",
"point. They have been re-formulated in a simpler loop without",
"emit, context): # Functions used for generation all accept statement,",
"statements there-in that break under certain conditions. See Developer Manual",
"limitations under the License. # \"\"\" Loop codes. Code generation",
"break_target = context.getLoopBreakTarget() getGotoCode(break_target, emit) def generateLoopContinueCode(statement, emit, context): #",
"context): # Functions used for generation all accept statement, but",
"writing, software # distributed under the License is distributed on",
"in writing, software # distributed under the License is distributed",
"CPython loops are mapped to these nodes. \"\"\" from .CodeHelpers",
"false\", emit=emit, context=context ) context.setCurrentSourceCodeReference(old_source_ref) getGotoCode(loop_start_label, emit) if loop_end_label is",
"but also works on its own. # # Licensed under",
"them, or continuing them. In Nuitka, there are no for-loops",
"you may not use this file except in compliance with",
"allow_none=True, emit=emit, context=context, ) context.setLoopBreakTarget(old_loop_break) context.setLoopContinueTarget(old_loop_continue) # Note: We are",
"emit=emit, context=context ) context.setCurrentSourceCodeReference(old_source_ref) getGotoCode(loop_start_label, emit) if loop_end_label is not",
"been re-formulated in a simpler loop without a condition, and",
"# Licensed under the Apache License, Version 2.0 (the \"License\");",
"unclear what line it would be anyway. old_source_ref = context.setCurrentSourceCodeReference(",
"for generation all accept statement, but this one does #",
"are mapped to these nodes. \"\"\" from .CodeHelpers import generateStatementSequenceCode",
"old_loop_continue = context.setLoopContinueTarget(loop_start_label) generateStatementSequenceCode( statement_sequence=statement.subnode_loop_body, allow_none=True, emit=emit, context=context, ) context.setLoopBreakTarget(old_loop_break)",
"a condition, and statements there-in that break under certain conditions.",
"compatible and # integrates with CPython, but also works on",
"this one does # not use it. pylint: disable=unused-argument getExceptionUnpublishedReleaseCode(emit,",
"# # Part of \"Nuitka\", an optimizing Python compiler that",
"old_source_ref = context.setCurrentSourceCodeReference( statement.getSourceReference() ) getErrorExitBoolCode( condition=\"CONSIDER_THREADING() == false\", emit=emit,",
"integrates with CPython, but also works on its own. #",
"no for-loops or while-loops at this point. They have been",
"in a simpler loop without a condition, and statements there-in",
".ErrorCodes import getErrorExitBoolCode from .ExceptionCodes import getExceptionUnpublishedReleaseCode from .LabelCodes import",
"use this file except in compliance with the License. #",
"without a condition, and statements there-in that break under certain",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"# not use it. pylint: disable=unused-argument getExceptionUnpublishedReleaseCode(emit, context) break_target =",
"def generateLoopCode(statement, emit, context): loop_start_label = context.allocateLabel(\"loop_start\") if not statement.isStatementAborting():",
"it's unclear what line it would be anyway. old_source_ref =",
"context=context ) context.setCurrentSourceCodeReference(old_source_ref) getGotoCode(loop_start_label, emit) if loop_end_label is not None:",
"to these nodes. \"\"\" from .CodeHelpers import generateStatementSequenceCode from .ErrorCodes",
"or continuing them. In Nuitka, there are no for-loops or",
"own. # # Licensed under the Apache License, Version 2.0",
"disable=unused-argument getExceptionUnpublishedReleaseCode(emit, context) break_target = context.getLoopBreakTarget() getGotoCode(break_target, emit) def generateLoopContinueCode(statement,",
"loops are mapped to these nodes. \"\"\" from .CodeHelpers import",
"context.getLoopBreakTarget() getGotoCode(break_target, emit) def generateLoopContinueCode(statement, emit, context): # Functions used",
"generateLoopContinueCode(statement, emit, context): # Functions used for generation all accept",
"CONDITIONS OF ANY KIND, either express or implied. # See",
"at this point. They have been re-formulated in a simpler",
"getGotoCode, getLabelCode def generateLoopBreakCode(statement, emit, context): # Functions used for",
"# Functions used for generation all accept statement, but this",
"= context.setLoopBreakTarget(loop_end_label) old_loop_continue = context.setLoopContinueTarget(loop_start_label) generateStatementSequenceCode( statement_sequence=statement.subnode_loop_body, allow_none=True, emit=emit, context=context,",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"here, but it's an exception, it's unclear what line it",
"pylint: disable=unused-argument getExceptionUnpublishedReleaseCode(emit, context) continue_target = context.getLoopContinueTarget() getGotoCode(continue_target, emit) def",
"getErrorExitBoolCode( condition=\"CONSIDER_THREADING() == false\", emit=emit, context=context ) context.setCurrentSourceCodeReference(old_source_ref) getGotoCode(loop_start_label, emit)",
"or implied. # See the License for the specific language",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"License. # You may obtain a copy of the License",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"License, Version 2.0 (the \"License\"); # you may not use",
"condition, and statements there-in that break under certain conditions. See",
"its own. # # Licensed under the Apache License, Version",
"def generateLoopBreakCode(statement, emit, context): # Functions used for generation all",
"pylint: disable=unused-argument getExceptionUnpublishedReleaseCode(emit, context) break_target = context.getLoopBreakTarget() getGotoCode(break_target, emit) def",
"import getErrorExitBoolCode from .ExceptionCodes import getExceptionUnpublishedReleaseCode from .LabelCodes import getGotoCode,",
"# You may obtain a copy of the License at",
"while-loops at this point. They have been re-formulated in a",
"KIND, either express or implied. # See the License for",
"specific language governing permissions and # limitations under the License.",
"getGotoCode(continue_target, emit) def generateLoopCode(statement, emit, context): loop_start_label = context.allocateLabel(\"loop_start\") if",
"Note: We are using the wrong line here, but it's",
"would be anyway. old_source_ref = context.setCurrentSourceCodeReference( statement.getSourceReference() ) getErrorExitBoolCode( condition=\"CONSIDER_THREADING()",
"context=context, ) context.setLoopBreakTarget(old_loop_break) context.setLoopContinueTarget(old_loop_continue) # Note: We are using the",
"under the License is distributed on an \"AS IS\" BASIS,",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"what line it would be anyway. old_source_ref = context.setCurrentSourceCodeReference( statement.getSourceReference()",
"breaking them, or continuing them. In Nuitka, there are no",
"License for the specific language governing permissions and # limitations",
"loops, breaking them, or continuing them. In Nuitka, there are",
"Developer Manual for how the CPython loops are mapped to",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"does # not use it. pylint: disable=unused-argument getExceptionUnpublishedReleaseCode(emit, context) continue_target",
"the License. # \"\"\" Loop codes. Code generation for loops,",
"generation all accept statement, but this one does # not",
"also works on its own. # # Licensed under the",
"context.setLoopContinueTarget(loop_start_label) generateStatementSequenceCode( statement_sequence=statement.subnode_loop_body, allow_none=True, emit=emit, context=context, ) context.setLoopBreakTarget(old_loop_break) context.setLoopContinueTarget(old_loop_continue) #",
"works on its own. # # Licensed under the Apache",
"it would be anyway. old_source_ref = context.setCurrentSourceCodeReference( statement.getSourceReference() ) getErrorExitBoolCode(",
"== false\", emit=emit, context=context ) context.setCurrentSourceCodeReference(old_source_ref) getGotoCode(loop_start_label, emit) if loop_end_label",
"getLabelCode def generateLoopBreakCode(statement, emit, context): # Functions used for generation",
"and # integrates with CPython, but also works on its",
"the wrong line here, but it's an exception, it's unclear",
"emit) def generateLoopCode(statement, emit, context): loop_start_label = context.allocateLabel(\"loop_start\") if not",
"getExceptionUnpublishedReleaseCode from .LabelCodes import getGotoCode, getLabelCode def generateLoopBreakCode(statement, emit, context):",
"is compatible and # integrates with CPython, but also works",
"context) break_target = context.getLoopBreakTarget() getGotoCode(break_target, emit) def generateLoopContinueCode(statement, emit, context):",
"the License for the specific language governing permissions and #",
"generateLoopBreakCode(statement, emit, context): # Functions used for generation all accept",
"context) continue_target = context.getLoopContinueTarget() getGotoCode(continue_target, emit) def generateLoopCode(statement, emit, context):",
"on its own. # # Licensed under the Apache License,",
"(the \"License\"); # you may not use this file except",
".LabelCodes import getGotoCode, getLabelCode def generateLoopBreakCode(statement, emit, context): # Functions",
"Apache License, Version 2.0 (the \"License\"); # you may not",
"but this one does # not use it. pylint: disable=unused-argument",
"getLabelCode(loop_start_label, emit) old_loop_break = context.setLoopBreakTarget(loop_end_label) old_loop_continue = context.setLoopContinueTarget(loop_start_label) generateStatementSequenceCode( statement_sequence=statement.subnode_loop_body,",
"# you may not use this file except in compliance",
"getExceptionUnpublishedReleaseCode(emit, context) continue_target = context.getLoopContinueTarget() getGotoCode(continue_target, emit) def generateLoopCode(statement, emit,",
"either express or implied. # See the License for the",
"generateLoopCode(statement, emit, context): loop_start_label = context.allocateLabel(\"loop_start\") if not statement.isStatementAborting(): loop_end_label",
"In Nuitka, there are no for-loops or while-loops at this",
"disable=unused-argument getExceptionUnpublishedReleaseCode(emit, context) continue_target = context.getLoopContinueTarget() getGotoCode(continue_target, emit) def generateLoopCode(statement,",
"OR CONDITIONS OF ANY KIND, either express or implied. #",
"= context.getLoopBreakTarget() getGotoCode(break_target, emit) def generateLoopContinueCode(statement, emit, context): # Functions",
"old_loop_break = context.setLoopBreakTarget(loop_end_label) old_loop_continue = context.setLoopContinueTarget(loop_start_label) generateStatementSequenceCode( statement_sequence=statement.subnode_loop_body, allow_none=True, emit=emit,",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"the License is distributed on an \"AS IS\" BASIS, #",
"used for generation all accept statement, but this one does",
"import getExceptionUnpublishedReleaseCode from .LabelCodes import getGotoCode, getLabelCode def generateLoopBreakCode(statement, emit,",
"= context.setLoopContinueTarget(loop_start_label) generateStatementSequenceCode( statement_sequence=statement.subnode_loop_body, allow_none=True, emit=emit, context=context, ) context.setLoopBreakTarget(old_loop_break) context.setLoopContinueTarget(old_loop_continue)",
"# integrates with CPython, but also works on its own.",
"in compliance with the License. # You may obtain a",
"there-in that break under certain conditions. See Developer Manual for",
"context.setCurrentSourceCodeReference( statement.getSourceReference() ) getErrorExitBoolCode( condition=\"CONSIDER_THREADING() == false\", emit=emit, context=context )",
"software # distributed under the License is distributed on an",
"import generateStatementSequenceCode from .ErrorCodes import getErrorExitBoolCode from .ExceptionCodes import getExceptionUnpublishedReleaseCode",
"from .ErrorCodes import getErrorExitBoolCode from .ExceptionCodes import getExceptionUnpublishedReleaseCode from .LabelCodes",
"not use it. pylint: disable=unused-argument getExceptionUnpublishedReleaseCode(emit, context) continue_target = context.getLoopContinueTarget()",
"codes. Code generation for loops, breaking them, or continuing them.",
"= context.allocateLabel(\"loop_start\") if not statement.isStatementAborting(): loop_end_label = context.allocateLabel(\"loop_end\") else: loop_end_label",
"They have been re-formulated in a simpler loop without a",
"governing permissions and # limitations under the License. # \"\"\"",
"there are no for-loops or while-loops at this point. They",
"if not statement.isStatementAborting(): loop_end_label = context.allocateLabel(\"loop_end\") else: loop_end_label = None",
"statement.isStatementAborting(): loop_end_label = context.allocateLabel(\"loop_end\") else: loop_end_label = None getLabelCode(loop_start_label, emit)",
"# # Unless required by applicable law or agreed to",
"Nuitka, there are no for-loops or while-loops at this point.",
"generateStatementSequenceCode from .ErrorCodes import getErrorExitBoolCode from .ExceptionCodes import getExceptionUnpublishedReleaseCode from",
"are using the wrong line here, but it's an exception,",
"for loops, breaking them, or continuing them. In Nuitka, there",
"certain conditions. See Developer Manual for how the CPython loops",
"See Developer Manual for how the CPython loops are mapped",
"import getGotoCode, getLabelCode def generateLoopBreakCode(statement, emit, context): # Functions used",
"context.allocateLabel(\"loop_end\") else: loop_end_label = None getLabelCode(loop_start_label, emit) old_loop_break = context.setLoopBreakTarget(loop_end_label)",
"use it. pylint: disable=unused-argument getExceptionUnpublishedReleaseCode(emit, context) break_target = context.getLoopBreakTarget() getGotoCode(break_target,",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"that is compatible and # integrates with CPython, but also",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"<NAME>, mailto:<EMAIL> # # Part of \"Nuitka\", an optimizing Python",
"Version 2.0 (the \"License\"); # you may not use this",
"re-formulated in a simpler loop without a condition, and statements",
"law or agreed to in writing, software # distributed under",
"from .CodeHelpers import generateStatementSequenceCode from .ErrorCodes import getErrorExitBoolCode from .ExceptionCodes",
"using the wrong line here, but it's an exception, it's",
"generation for loops, breaking them, or continuing them. In Nuitka,",
"context.setLoopBreakTarget(old_loop_break) context.setLoopContinueTarget(old_loop_continue) # Note: We are using the wrong line",
"context): loop_start_label = context.allocateLabel(\"loop_start\") if not statement.isStatementAborting(): loop_end_label = context.allocateLabel(\"loop_end\")",
"one does # not use it. pylint: disable=unused-argument getExceptionUnpublishedReleaseCode(emit, context)",
"We are using the wrong line here, but it's an",
"getErrorExitBoolCode from .ExceptionCodes import getExceptionUnpublishedReleaseCode from .LabelCodes import getGotoCode, getLabelCode",
"Python compiler that is compatible and # integrates with CPython,",
"continuing them. In Nuitka, there are no for-loops or while-loops",
"getGotoCode(break_target, emit) def generateLoopContinueCode(statement, emit, context): # Functions used for",
"implied. # See the License for the specific language governing",
"= context.getLoopContinueTarget() getGotoCode(continue_target, emit) def generateLoopCode(statement, emit, context): loop_start_label =",
"an optimizing Python compiler that is compatible and # integrates",
"statement_sequence=statement.subnode_loop_body, allow_none=True, emit=emit, context=context, ) context.setLoopBreakTarget(old_loop_break) context.setLoopContinueTarget(old_loop_continue) # Note: We",
"context.setLoopContinueTarget(old_loop_continue) # Note: We are using the wrong line here,",
"under the Apache License, Version 2.0 (the \"License\"); # you",
"are no for-loops or while-loops at this point. They have",
"\"License\"); # you may not use this file except in",
"not use it. pylint: disable=unused-argument getExceptionUnpublishedReleaseCode(emit, context) break_target = context.getLoopBreakTarget()",
"and # limitations under the License. # \"\"\" Loop codes.",
"for-loops or while-loops at this point. They have been re-formulated",
"a simpler loop without a condition, and statements there-in that",
"that break under certain conditions. See Developer Manual for how",
"these nodes. \"\"\" from .CodeHelpers import generateStatementSequenceCode from .ErrorCodes import",
"getExceptionUnpublishedReleaseCode(emit, context) break_target = context.getLoopBreakTarget() getGotoCode(break_target, emit) def generateLoopContinueCode(statement, emit,",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
") getErrorExitBoolCode( condition=\"CONSIDER_THREADING() == false\", emit=emit, context=context ) context.setCurrentSourceCodeReference(old_source_ref) getGotoCode(loop_start_label,",
"wrong line here, but it's an exception, it's unclear what",
"mailto:<EMAIL> # # Part of \"Nuitka\", an optimizing Python compiler",
"Manual for how the CPython loops are mapped to these",
"context.setLoopBreakTarget(loop_end_label) old_loop_continue = context.setLoopContinueTarget(loop_start_label) generateStatementSequenceCode( statement_sequence=statement.subnode_loop_body, allow_none=True, emit=emit, context=context, )",
"\"\"\" Loop codes. Code generation for loops, breaking them, or",
"loop_end_label = None getLabelCode(loop_start_label, emit) old_loop_break = context.setLoopBreakTarget(loop_end_label) old_loop_continue =",
"by applicable law or agreed to in writing, software #",
"# distributed under the License is distributed on an \"AS",
"OF ANY KIND, either express or implied. # See the",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"from .ExceptionCodes import getExceptionUnpublishedReleaseCode from .LabelCodes import getGotoCode, getLabelCode def",
"loop_start_label = context.allocateLabel(\"loop_start\") if not statement.isStatementAborting(): loop_end_label = context.allocateLabel(\"loop_end\") else:",
"may obtain a copy of the License at # #",
"# Unless required by applicable law or agreed to in",
"ANY KIND, either express or implied. # See the License",
"See the License for the specific language governing permissions and",
"def generateLoopContinueCode(statement, emit, context): # Functions used for generation all",
"it's an exception, it's unclear what line it would be",
"nodes. \"\"\" from .CodeHelpers import generateStatementSequenceCode from .ErrorCodes import getErrorExitBoolCode",
"be anyway. old_source_ref = context.setCurrentSourceCodeReference( statement.getSourceReference() ) getErrorExitBoolCode( condition=\"CONSIDER_THREADING() ==",
"under certain conditions. See Developer Manual for how the CPython",
"conditions. See Developer Manual for how the CPython loops are",
"for how the CPython loops are mapped to these nodes.",
"the License. # You may obtain a copy of the",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"for the specific language governing permissions and # limitations under",
"\"\"\" from .CodeHelpers import generateStatementSequenceCode from .ErrorCodes import getErrorExitBoolCode from",
".CodeHelpers import generateStatementSequenceCode from .ErrorCodes import getErrorExitBoolCode from .ExceptionCodes import",
"= context.allocateLabel(\"loop_end\") else: loop_end_label = None getLabelCode(loop_start_label, emit) old_loop_break =",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"context.allocateLabel(\"loop_start\") if not statement.isStatementAborting(): loop_end_label = context.allocateLabel(\"loop_end\") else: loop_end_label =",
"to in writing, software # distributed under the License is",
"anyway. old_source_ref = context.setCurrentSourceCodeReference( statement.getSourceReference() ) getErrorExitBoolCode( condition=\"CONSIDER_THREADING() == false\",",
"simpler loop without a condition, and statements there-in that break",
"all accept statement, but this one does # not use",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"Copyright 2021, <NAME>, mailto:<EMAIL> # # Part of \"Nuitka\", an",
"# See the License for the specific language governing permissions",
"Loop codes. Code generation for loops, breaking them, or continuing",
"emit=emit, context=context, ) context.setLoopBreakTarget(old_loop_break) context.setLoopContinueTarget(old_loop_continue) # Note: We are using",
") context.setLoopBreakTarget(old_loop_break) context.setLoopContinueTarget(old_loop_continue) # Note: We are using the wrong",
"You may obtain a copy of the License at #",
"= None getLabelCode(loop_start_label, emit) old_loop_break = context.setLoopBreakTarget(loop_end_label) old_loop_continue = context.setLoopContinueTarget(loop_start_label)",
"statement, but this one does # not use it. pylint:",
"language governing permissions and # limitations under the License. #",
"exception, it's unclear what line it would be anyway. old_source_ref",
"may not use this file except in compliance with the",
"or agreed to in writing, software # distributed under the",
"Code generation for loops, breaking them, or continuing them. In",
"how the CPython loops are mapped to these nodes. \"\"\"",
"required by applicable law or agreed to in writing, software",
".ExceptionCodes import getExceptionUnpublishedReleaseCode from .LabelCodes import getGotoCode, getLabelCode def generateLoopBreakCode(statement,",
"it. pylint: disable=unused-argument getExceptionUnpublishedReleaseCode(emit, context) break_target = context.getLoopBreakTarget() getGotoCode(break_target, emit)",
"accept statement, but this one does # not use it.",
"\"Nuitka\", an optimizing Python compiler that is compatible and #",
"not statement.isStatementAborting(): loop_end_label = context.allocateLabel(\"loop_end\") else: loop_end_label = None getLabelCode(loop_start_label,",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"generateStatementSequenceCode( statement_sequence=statement.subnode_loop_body, allow_none=True, emit=emit, context=context, ) context.setLoopBreakTarget(old_loop_break) context.setLoopContinueTarget(old_loop_continue) # Note:",
"License. # \"\"\" Loop codes. Code generation for loops, breaking",
"context.getLoopContinueTarget() getGotoCode(continue_target, emit) def generateLoopCode(statement, emit, context): loop_start_label = context.allocateLabel(\"loop_start\")",
"with the License. # You may obtain a copy of",
"this file except in compliance with the License. # You",
"None getLabelCode(loop_start_label, emit) old_loop_break = context.setLoopBreakTarget(loop_end_label) old_loop_continue = context.setLoopContinueTarget(loop_start_label) generateStatementSequenceCode(",
"# Part of \"Nuitka\", an optimizing Python compiler that is",
"but it's an exception, it's unclear what line it would",
"line it would be anyway. old_source_ref = context.setCurrentSourceCodeReference( statement.getSourceReference() )",
"compiler that is compatible and # integrates with CPython, but",
"the Apache License, Version 2.0 (the \"License\"); # you may",
"or while-loops at this point. They have been re-formulated in",
"CPython, but also works on its own. # # Licensed",
"else: loop_end_label = None getLabelCode(loop_start_label, emit) old_loop_break = context.setLoopBreakTarget(loop_end_label) old_loop_continue",
"permissions and # limitations under the License. # \"\"\" Loop",
"# not use it. pylint: disable=unused-argument getExceptionUnpublishedReleaseCode(emit, context) continue_target =",
"mapped to these nodes. \"\"\" from .CodeHelpers import generateStatementSequenceCode from",
"condition=\"CONSIDER_THREADING() == false\", emit=emit, context=context ) context.setCurrentSourceCodeReference(old_source_ref) getGotoCode(loop_start_label, emit) if"
] |
[
"'0' * (8 - len(_line)) + _line array_str += _line",
"с интервалом 2, начиная от 3, а после новое число",
"\"add\") & (line[1].isnumeric()): bloom_filter.add(int(line[1])) elif (line[0] == \"search\") & (line[1].isnumeric()):",
"input().split() if len(line) == 0: continue elif line[0] == \"print\":",
"хеш-функций необходимо знать первые k простых чисел. Посчитаем их один",
"= BloomFilter(elements_number, probability) if (bloom_filter.size == 0) | (bloom_filter.hash_numbers ==",
"return self.__prime_numbers.append(2) i = 3 while len(self.__prime_numbers) < prime_size: j",
"по счету простое число, а M - 31ое число Мерсенна,",
"- len(_line)) + _line array_str += _line return array_str[:self.__size] class",
"< self.hash_numbers: if not self.__bitarray.check_bit(self.__get_hash(key, i)): return False i +=",
"байтовый массив. Реализуем для удобства отдельную СД, из методов необходимо:",
"self.hash_numbers: self.__bitarray.add_bit(self.__get_hash(key, i)) i += 1 def search(self, key: int):",
"- (i % 8)) def check_bit(self, i): if (self.__array[i //",
"основание), количество хеш-функций будет равно -log2P # хеш-функции используются вида:",
"M) mod m,где - x - ключ, i - номер",
"BloomFilter(elements_number, probability) if (bloom_filter.size == 0) | (bloom_filter.hash_numbers == 0):",
"= \"\" for byte in self.__array: _line = str(bin(byte))[2:] if",
"return (((i + 1) * x + self.__prime_numbers[i]) % Mersen_31)",
"(7 - (i % 8)))) == 0: return False else:",
"+ 1) self.__bitarray = BitArray(self.size) def __get_prime(self, prime_size): # обычный",
"continue bloom_filter = BloomFilter(elements_number, probability) if (bloom_filter.size == 0) |",
"\"\" for byte in self.__array: _line = str(bin(byte))[2:] if len(_line)",
"def print(self): return self.__bitarray.print() bloom_filter = 0 while True: try:",
"= float(line[2]) if (elements_number <= 0) | (probability <= 0)",
"число, а M - 31ое число Мерсенна, M = 2^31",
"|= 2 ** (7 - (i % 8)) def check_bit(self,",
"__get_hash(self, x, i): return (((i + 1) * x +",
"# хеш-функции используются вида: (((i + 1)*x + p(i+1)) mod",
"а после новое число проверять на # делимость на уже",
"0 while i < self.hash_numbers: if not self.__bitarray.check_bit(self.__get_hash(key, i)): return",
"будем хранить в структуре данных. # Также нам необходимо создать",
"continue break except TypeError: print(\"error\") continue else: print(\"error\") continue except",
"= 0 while i < self.hash_numbers: self.__bitarray.add_bit(self.__get_hash(key, i)) i +=",
"bloom_filter = 0 while True: try: line = input().split() if",
"len(line) == 0: continue else: if line[0] == \"set\": try:",
"один раз в конструкторе BloomFilter # и будем хранить в",
"/ ln2 (2 - основание), количество хеш-функций будет равно -log2P",
"BitArray: def __init__(self, size): self.__array = bytearray(int(math.ceil(size / 8))) self.__size",
"массив отсутствует, # поэтому будем использовать байтовый массив. Реализуем для",
"| (probability >= 1): print(\"error\") continue bloom_filter = BloomFilter(elements_number, probability)",
"= input().split() if len(line) == 0: continue elif line[0] ==",
"используются вида: (((i + 1)*x + p(i+1)) mod M) mod",
"питоне битовый массив отсутствует, # поэтому будем использовать байтовый массив.",
"== 0: continue else: if line[0] == \"set\": try: elements_number",
"счету простое число, а M - 31ое число Мерсенна, M",
"новое число проверять на # делимость на уже найденные простые",
"483 647, M - простое число. # При подсчёте хеш-функций",
"содержится в i//8 байте на i % 8 месте self.__array[i",
"return False i += 1 return True def print(self): return",
"!= 8: _line = '0' * (8 - len(_line)) +",
"* x + self.__prime_numbers[i]) % Mersen_31) % self.size def add(self,",
"0 while i < self.hash_numbers: self.__bitarray.add_bit(self.__get_hash(key, i)) i += 1",
"простое число. # При подсчёте хеш-функций необходимо знать первые k",
"- i-тое по счету простое число, а M - 31ое",
"except EOFError: exit() print(bloom_filter.size, bloom_filter.hash_numbers) while True: try: line =",
"p: float): self.size = int(-round(n * math.log2(p) / math.log(2))) self.hash_numbers",
"структуре данных. # Также нам необходимо создать битовый массив размера",
"на уже найденные простые числа (кроме двойки, мы же рассматриваем",
"== \"add\") & (line[1].isnumeric()): bloom_filter.add(int(line[1])) elif (line[0] == \"search\") &",
"prime_size): # обычный проход по всем числам и их проверка",
"= bytearray(int(math.ceil(size / 8))) self.__size = size def add_bit(self, i):",
"exit() print(bloom_filter.size, bloom_filter.hash_numbers) while True: try: line = input().split() if",
"структуры m = -(nlog2P) / ln2 (2 - основание), количество",
"1): print(\"error\") continue bloom_filter = BloomFilter(elements_number, probability) if (bloom_filter.size ==",
"math.log(2))) self.hash_numbers = int(-round(math.log2(p))) self.__prime_numbers = list() self.__get_prime(self.hash_numbers + 1)",
"+ self.__prime_numbers[i]) % Mersen_31) % self.size def add(self, key: int):",
"и напечатать (вернуть) сам массив Mersen_31 = 2147483647 class BitArray:",
"j += 1 if prime_flag: self.__prime_numbers.append(i) i += 2 def",
"и их проверка на простоту - сложно по времени #",
"8: _line = '0' * (8 - len(_line)) + _line",
"+= 1 if prime_flag: self.__prime_numbers.append(i) i += 2 def __get_hash(self,",
"if (elements_number <= 0) | (probability <= 0) | (probability",
"i = 3 while len(self.__prime_numbers) < prime_size: j = 1",
"1 def search(self, key: int): i = 0 while i",
"(i % 8)))) == 0: return False else: return True",
"их проверка на простоту - сложно по времени # немного",
"n: int, p: float): self.size = int(-round(n * math.log2(p) /",
"len(line) == 0: continue elif line[0] == \"print\": print(bloom_filter.print()) elif",
"времени # немного упростим: во-первых будем идти с интервалом 2,",
"= str(bin(byte))[2:] if len(_line) != 8: _line = '0' *",
"list() self.__get_prime(self.hash_numbers + 1) self.__bitarray = BitArray(self.size) def __get_prime(self, prime_size):",
"= -(nlog2P) / ln2 (2 - основание), количество хеш-функций будет",
"будем идти с интервалом 2, начиная от 3, а после",
"рассматриваем нечётные) if prime_size == 1: self.__prime_numbers.append(2) return self.__prime_numbers.append(2) i",
"0: continue else: if line[0] == \"set\": try: elements_number =",
"/ math.log(2))) self.hash_numbers = int(-round(math.log2(p))) self.__prime_numbers = list() self.__get_prime(self.hash_numbers +",
"m,где - x - ключ, i - номер хэш-функции, #",
"if prime_size == 1: self.__prime_numbers.append(2) return self.__prime_numbers.append(2) i = 3",
"0) | (probability >= 1): print(\"error\") continue bloom_filter = BloomFilter(elements_number,",
"- 1, M = 2 147 483 647, M -",
"print(bloom_filter.print()) elif (line[0] == \"add\") & (line[1].isnumeric()): bloom_filter.add(int(line[1])) elif (line[0]",
"8)))) == 0: return False else: return True def print(self):",
"(probability <= 0) | (probability >= 1): print(\"error\") continue bloom_filter",
"& (2 ** (7 - (i % 8)))) == 0:",
"(line[1].isnumeric()): bloom_filter.add(int(line[1])) elif (line[0] == \"search\") & (line[1].isnumeric()): print(int(bloom_filter.search(int(line[1])))) else:",
"хеш-функции используются вида: (((i + 1)*x + p(i+1)) mod M)",
"(line[0] == \"search\") & (line[1].isnumeric()): print(int(bloom_filter.search(int(line[1])))) else: print(\"error\") except EOFError:",
"BloomFilter: def __init__(self, n: int, p: float): self.size = int(-round(n",
"для удобства отдельную СД, из методов необходимо: изменить # указанный",
"if len(line) == 0: continue elif line[0] == \"print\": print(bloom_filter.print())",
"float(line[2]) if (elements_number <= 0) | (probability <= 0) |",
"8)) def check_bit(self, i): if (self.__array[i // 8] & (2",
"if (self.__array[i // 8] & (2 ** (7 - (i",
"% 8 месте self.__array[i // 8] |= 2 ** (7",
"чисел. Посчитаем их один раз в конструкторе BloomFilter # и",
"первые k простых чисел. Посчитаем их один раз в конструкторе",
"j = 1 prime_flag = True while j < len(self.__prime_numbers):",
"+ 1)*x + p(i+1)) mod M) mod m,где - x",
"по времени # немного упростим: во-первых будем идти с интервалом",
"проверять на # делимость на уже найденные простые числа (кроме",
"массив размера m, однако по умолчанию в питоне битовый массив",
"# поэтому будем использовать байтовый массив. Реализуем для удобства отдельную",
"месте self.__array[i // 8] |= 2 ** (7 - (i",
"= size def add_bit(self, i): # i-тый бит содержится в",
"add_bit(self, i): # i-тый бит содержится в i//8 байте на",
"- приблизительное число элементов в массиве, P - вероятность ложноположительного",
"if prime_flag: self.__prime_numbers.append(i) i += 2 def __get_hash(self, x, i):",
"else: print(\"error\") continue except EOFError: exit() print(bloom_filter.size, bloom_filter.hash_numbers) while True:",
"byte in self.__array: _line = str(bin(byte))[2:] if len(_line) != 8:",
"\"set\": try: elements_number = int(line[1]) probability = float(line[2]) if (elements_number",
"число проверять на # делимость на уже найденные простые числа",
"найденные простые числа (кроме двойки, мы же рассматриваем нечётные) if",
"line[0] == \"set\": try: elements_number = int(line[1]) probability = float(line[2])",
"+= 2 def __get_hash(self, x, i): return (((i + 1)",
"= True while j < len(self.__prime_numbers): if (i % self.__prime_numbers[j])",
"= input().split() if len(line) == 0: continue else: if line[0]",
"упростим: во-первых будем идти с интервалом 2, начиная от 3,",
"ложноположительного ответа, тогда размер # структуры m = -(nlog2P) /",
"- (i % 8)))) == 0: return False else: return",
"число. # При подсчёте хеш-функций необходимо знать первые k простых",
"False i += 1 return True def print(self): return self.__bitarray.print()",
"import exit # итак, n - приблизительное число элементов в",
"i += 1 def search(self, key: int): i = 0",
"i % 8 месте self.__array[i // 8] |= 2 **",
"0 while True: try: line = input().split() if len(line) ==",
"def add_bit(self, i): # i-тый бит содержится в i//8 байте",
"self.__prime_numbers = list() self.__get_prime(self.hash_numbers + 1) self.__bitarray = BitArray(self.size) def",
"self.size def add(self, key: int): i = 0 while i",
"= 0 while i < self.hash_numbers: if not self.__bitarray.check_bit(self.__get_hash(key, i)):",
"k простых чисел. Посчитаем их один раз в конструкторе BloomFilter",
"+ _line array_str += _line return array_str[:self.__size] class BloomFilter: def",
"# i-тый бит содержится в i//8 байте на i %",
"147 483 647, M - простое число. # При подсчёте",
"EOFError: exit() print(bloom_filter.size, bloom_filter.hash_numbers) while True: try: line = input().split()",
"Посчитаем их один раз в конструкторе BloomFilter # и будем",
"print(\"error\") continue bloom_filter = BloomFilter(elements_number, probability) if (bloom_filter.size == 0)",
"использовать байтовый массив. Реализуем для удобства отдельную СД, из методов",
"необходимо: изменить # указанный бит на 1, проверить является ли",
"self.__bitarray = BitArray(self.size) def __get_prime(self, prime_size): # обычный проход по",
"self.__array[i // 8] |= 2 ** (7 - (i %",
"8 месте self.__array[i // 8] |= 2 ** (7 -",
"# обычный проход по всем числам и их проверка на",
"в i//8 байте на i % 8 месте self.__array[i //",
"self.__array: _line = str(bin(byte))[2:] if len(_line) != 8: _line =",
"методов необходимо: изменить # указанный бит на 1, проверить является",
"P - вероятность ложноположительного ответа, тогда размер # структуры m",
"= 0 while True: try: line = input().split() if len(line)",
"идти с интервалом 2, начиная от 3, а после новое",
"prime_flag: self.__prime_numbers.append(i) i += 2 def __get_hash(self, x, i): return",
"массив Mersen_31 = 2147483647 class BitArray: def __init__(self, size): self.__array",
"= int(line[1]) probability = float(line[2]) if (elements_number <= 0) |",
"# итак, n - приблизительное число элементов в массиве, P",
"Реализуем для удобства отдельную СД, из методов необходимо: изменить #",
"probability = float(line[2]) if (elements_number <= 0) | (probability <=",
"При подсчёте хеш-функций необходимо знать первые k простых чисел. Посчитаем",
"== 0) | (bloom_filter.hash_numbers == 0): print(\"error\") continue break except",
"int): i = 0 while i < self.hash_numbers: self.__bitarray.add_bit(self.__get_hash(key, i))",
"и будем хранить в структуре данных. # Также нам необходимо",
"def check_bit(self, i): if (self.__array[i // 8] & (2 **",
"while True: try: line = input().split() if len(line) == 0:",
"n - приблизительное число элементов в массиве, P - вероятность",
"обычный проход по всем числам и их проверка на простоту",
"if len(_line) != 8: _line = '0' * (8 -",
"key: int): i = 0 while i < self.hash_numbers: self.__bitarray.add_bit(self.__get_hash(key,",
"// 8] & (2 ** (7 - (i % 8))))",
"на # делимость на уже найденные простые числа (кроме двойки,",
"массиве, P - вероятность ложноположительного ответа, тогда размер # структуры",
"равно -log2P # хеш-функции используются вида: (((i + 1)*x +",
"2147483647 class BitArray: def __init__(self, size): self.__array = bytearray(int(math.ceil(size /",
"% 8)) def check_bit(self, i): if (self.__array[i // 8] &",
"массив. Реализуем для удобства отдельную СД, из методов необходимо: изменить",
"(i % 8)) def check_bit(self, i): if (self.__array[i // 8]",
"def __get_prime(self, prime_size): # обычный проход по всем числам и",
"количество хеш-функций будет равно -log2P # хеш-функции используются вида: (((i",
"== 0: prime_flag = False break j += 1 if",
"ln2 (2 - основание), количество хеш-функций будет равно -log2P #",
"после новое число проверять на # делимость на уже найденные",
"i = 0 while i < self.hash_numbers: self.__bitarray.add_bit(self.__get_hash(key, i)) i",
"self.__bitarray.print() bloom_filter = 0 while True: try: line = input().split()",
"8] |= 2 ** (7 - (i % 8)) def",
"return False else: return True def print(self): array_str = \"\"",
"x, i): return (((i + 1) * x + self.__prime_numbers[i])",
"размера m, однако по умолчанию в питоне битовый массив отсутствует,",
"итак, n - приблизительное число элементов в массиве, P -",
"1 return True def print(self): return self.__bitarray.print() bloom_filter = 0",
"array_str += _line return array_str[:self.__size] class BloomFilter: def __init__(self, n:",
"0) | (bloom_filter.hash_numbers == 0): print(\"error\") continue break except TypeError:",
"def add(self, key: int): i = 0 while i <",
"# pi - i-тое по счету простое число, а M",
"простое число, а M - 31ое число Мерсенна, M =",
"_line return array_str[:self.__size] class BloomFilter: def __init__(self, n: int, p:",
"i): return (((i + 1) * x + self.__prime_numbers[i]) %",
"на i % 8 месте self.__array[i // 8] |= 2",
"изменить # указанный бит на 1, проверить является ли указанный",
"prime_flag = False break j += 1 if prime_flag: self.__prime_numbers.append(i)",
"хеш-функций будет равно -log2P # хеш-функции используются вида: (((i +",
"нам необходимо создать битовый массив размера m, однако по умолчанию",
"1 и напечатать (вернуть) сам массив Mersen_31 = 2147483647 class",
"i)) i += 1 def search(self, key: int): i =",
"вида: (((i + 1)*x + p(i+1)) mod M) mod m,где",
"начиная от 3, а после новое число проверять на #",
"print(self): return self.__bitarray.print() bloom_filter = 0 while True: try: line",
"i)): return False i += 1 return True def print(self):",
"class BitArray: def __init__(self, size): self.__array = bytearray(int(math.ceil(size / 8)))",
"self.__prime_numbers.append(2) return self.__prime_numbers.append(2) i = 3 while len(self.__prime_numbers) < prime_size:",
"битовый массив размера m, однако по умолчанию в питоне битовый",
"- ключ, i - номер хэш-функции, # pi - i-тое",
"1) self.__bitarray = BitArray(self.size) def __get_prime(self, prime_size): # обычный проход",
"ли указанный бит 1 и напечатать (вернуть) сам массив Mersen_31",
"+= 1 return True def print(self): return self.__bitarray.print() bloom_filter =",
"= 2^31 - 1, M = 2 147 483 647,",
"M = 2 147 483 647, M - простое число.",
"M = 2^31 - 1, M = 2 147 483",
"необходимо создать битовый массив размера m, однако по умолчанию в",
"size def add_bit(self, i): # i-тый бит содержится в i//8",
"not self.__bitarray.check_bit(self.__get_hash(key, i)): return False i += 1 return True",
"try: elements_number = int(line[1]) probability = float(line[2]) if (elements_number <=",
"конструкторе BloomFilter # и будем хранить в структуре данных. #",
"по умолчанию в питоне битовый массив отсутствует, # поэтому будем",
"True def print(self): return self.__bitarray.print() bloom_filter = 0 while True:",
"будет равно -log2P # хеш-функции используются вида: (((i + 1)*x",
"= int(-round(n * math.log2(p) / math.log(2))) self.hash_numbers = int(-round(math.log2(p))) self.__prime_numbers",
"break except TypeError: print(\"error\") continue else: print(\"error\") continue except EOFError:",
"1, проверить является ли указанный бит 1 и напечатать (вернуть)",
"TypeError: print(\"error\") continue else: print(\"error\") continue except EOFError: exit() print(bloom_filter.size,",
"bloom_filter.hash_numbers) while True: try: line = input().split() if len(line) ==",
"float): self.size = int(-round(n * math.log2(p) / math.log(2))) self.hash_numbers =",
"(i % self.__prime_numbers[j]) == 0: prime_flag = False break j",
"continue else: print(\"error\") continue except EOFError: exit() print(bloom_filter.size, bloom_filter.hash_numbers) while",
"отсутствует, # поэтому будем использовать байтовый массив. Реализуем для удобства",
"def __init__(self, n: int, p: float): self.size = int(-round(n *",
"# делимость на уже найденные простые числа (кроме двойки, мы",
"self.__bitarray.add_bit(self.__get_hash(key, i)) i += 1 def search(self, key: int): i",
"тогда размер # структуры m = -(nlog2P) / ln2 (2",
"BitArray(self.size) def __get_prime(self, prime_size): # обычный проход по всем числам",
"len(_line)) + _line array_str += _line return array_str[:self.__size] class BloomFilter:",
"немного упростим: во-первых будем идти с интервалом 2, начиная от",
"if line[0] == \"set\": try: elements_number = int(line[1]) probability =",
"* (8 - len(_line)) + _line array_str += _line return",
"отдельную СД, из методов необходимо: изменить # указанный бит на",
"2 def __get_hash(self, x, i): return (((i + 1) *",
"BloomFilter # и будем хранить в структуре данных. # Также",
"+= _line return array_str[:self.__size] class BloomFilter: def __init__(self, n: int,",
"prime_size == 1: self.__prime_numbers.append(2) return self.__prime_numbers.append(2) i = 3 while",
"i - номер хэш-функции, # pi - i-тое по счету",
"== 0: return False else: return True def print(self): array_str",
"% 8)))) == 0: return False else: return True def",
"__init__(self, size): self.__array = bytearray(int(math.ceil(size / 8))) self.__size = size",
"in self.__array: _line = str(bin(byte))[2:] if len(_line) != 8: _line",
"< prime_size: j = 1 prime_flag = True while j",
"= '0' * (8 - len(_line)) + _line array_str +=",
"if (bloom_filter.size == 0) | (bloom_filter.hash_numbers == 0): print(\"error\") continue",
"необходимо знать первые k простых чисел. Посчитаем их один раз",
"(7 - (i % 8)) def check_bit(self, i): if (self.__array[i",
"line[0] == \"print\": print(bloom_filter.print()) elif (line[0] == \"add\") & (line[1].isnumeric()):",
"2 ** (7 - (i % 8)) def check_bit(self, i):",
"бит на 1, проверить является ли указанный бит 1 и",
"input().split() if len(line) == 0: continue else: if line[0] ==",
"False break j += 1 if prime_flag: self.__prime_numbers.append(i) i +=",
"мы же рассматриваем нечётные) if prime_size == 1: self.__prime_numbers.append(2) return",
"return self.__bitarray.print() bloom_filter = 0 while True: try: line =",
"хэш-функции, # pi - i-тое по счету простое число, а",
"pi - i-тое по счету простое число, а M -",
"= BitArray(self.size) def __get_prime(self, prime_size): # обычный проход по всем",
"for byte in self.__array: _line = str(bin(byte))[2:] if len(_line) !=",
"(self.__array[i // 8] & (2 ** (7 - (i %",
"+ 1) * x + self.__prime_numbers[i]) % Mersen_31) % self.size",
"i < self.hash_numbers: self.__bitarray.add_bit(self.__get_hash(key, i)) i += 1 def search(self,",
"True def print(self): array_str = \"\" for byte in self.__array:",
"(((i + 1)*x + p(i+1)) mod M) mod m,где -",
"из методов необходимо: изменить # указанный бит на 1, проверить",
"try: line = input().split() if len(line) == 0: continue else:",
"3, а после новое число проверять на # делимость на",
"print(self): array_str = \"\" for byte in self.__array: _line =",
"# структуры m = -(nlog2P) / ln2 (2 - основание),",
"+ p(i+1)) mod M) mod m,где - x - ключ,",
"continue except EOFError: exit() print(bloom_filter.size, bloom_filter.hash_numbers) while True: try: line",
"if (i % self.__prime_numbers[j]) == 0: prime_flag = False break",
"probability) if (bloom_filter.size == 0) | (bloom_filter.hash_numbers == 0): print(\"error\")",
"двойки, мы же рассматриваем нечётные) if prime_size == 1: self.__prime_numbers.append(2)",
"x - ключ, i - номер хэш-функции, # pi -",
"является ли указанный бит 1 и напечатать (вернуть) сам массив",
"continue else: if line[0] == \"set\": try: elements_number = int(line[1])",
"байте на i % 8 месте self.__array[i // 8] |=",
"elif (line[0] == \"add\") & (line[1].isnumeric()): bloom_filter.add(int(line[1])) elif (line[0] ==",
"напечатать (вернуть) сам массив Mersen_31 = 2147483647 class BitArray: def",
"return array_str[:self.__size] class BloomFilter: def __init__(self, n: int, p: float):",
"exit # итак, n - приблизительное число элементов в массиве,",
"print(bloom_filter.size, bloom_filter.hash_numbers) while True: try: line = input().split() if len(line)",
"в конструкторе BloomFilter # и будем хранить в структуре данных.",
"# указанный бит на 1, проверить является ли указанный бит",
"elif line[0] == \"print\": print(bloom_filter.print()) elif (line[0] == \"add\") &",
"ответа, тогда размер # структуры m = -(nlog2P) / ln2",
"m, однако по умолчанию в питоне битовый массив отсутствует, #",
"elements_number = int(line[1]) probability = float(line[2]) if (elements_number <= 0)",
"битовый массив отсутствует, # поэтому будем использовать байтовый массив. Реализуем",
"= 2147483647 class BitArray: def __init__(self, size): self.__array = bytearray(int(math.ceil(size",
"- 31ое число Мерсенна, M = 2^31 - 1, M",
"# и будем хранить в структуре данных. # Также нам",
"== \"set\": try: elements_number = int(line[1]) probability = float(line[2]) if",
"array_str = \"\" for byte in self.__array: _line = str(bin(byte))[2:]",
"print(\"error\") continue except EOFError: exit() print(bloom_filter.size, bloom_filter.hash_numbers) while True: try:",
"= 1 prime_flag = True while j < len(self.__prime_numbers): if",
"p(i+1)) mod M) mod m,где - x - ключ, i",
"нечётные) if prime_size == 1: self.__prime_numbers.append(2) return self.__prime_numbers.append(2) i =",
"i = 0 while i < self.hash_numbers: if not self.__bitarray.check_bit(self.__get_hash(key,",
"= False break j += 1 if prime_flag: self.__prime_numbers.append(i) i",
"Также нам необходимо создать битовый массив размера m, однако по",
"continue elif line[0] == \"print\": print(bloom_filter.print()) elif (line[0] == \"add\")",
"- номер хэш-функции, # pi - i-тое по счету простое",
"# При подсчёте хеш-функций необходимо знать первые k простых чисел.",
"(кроме двойки, мы же рассматриваем нечётные) if prime_size == 1:",
"size): self.__array = bytearray(int(math.ceil(size / 8))) self.__size = size def",
"1)*x + p(i+1)) mod M) mod m,где - x -",
"M - простое число. # При подсчёте хеш-функций необходимо знать",
"< self.hash_numbers: self.__bitarray.add_bit(self.__get_hash(key, i)) i += 1 def search(self, key:",
"31ое число Мерсенна, M = 2^31 - 1, M =",
"i-тый бит содержится в i//8 байте на i % 8",
"1 prime_flag = True while j < len(self.__prime_numbers): if (i",
"указанный бит на 1, проверить является ли указанный бит 1",
"/ 8))) self.__size = size def add_bit(self, i): # i-тый",
"math.log2(p) / math.log(2))) self.hash_numbers = int(-round(math.log2(p))) self.__prime_numbers = list() self.__get_prime(self.hash_numbers",
"return True def print(self): array_str = \"\" for byte in",
"__init__(self, n: int, p: float): self.size = int(-round(n * math.log2(p)",
"от 3, а после новое число проверять на # делимость",
"len(self.__prime_numbers) < prime_size: j = 1 prime_flag = True while",
"а M - 31ое число Мерсенна, M = 2^31 -",
"== 0): print(\"error\") continue break except TypeError: print(\"error\") continue else:",
"Mersen_31 = 2147483647 class BitArray: def __init__(self, size): self.__array =",
"(2 ** (7 - (i % 8)))) == 0: return",
"_line = str(bin(byte))[2:] if len(_line) != 8: _line = '0'",
"line = input().split() if len(line) == 0: continue else: if",
"СД, из методов необходимо: изменить # указанный бит на 1,",
"if len(line) == 0: continue else: if line[0] == \"set\":",
"(line[0] == \"add\") & (line[1].isnumeric()): bloom_filter.add(int(line[1])) elif (line[0] == \"search\")",
"add(self, key: int): i = 0 while i < self.hash_numbers:",
"self.__prime_numbers.append(i) i += 2 def __get_hash(self, x, i): return (((i",
"% self.size def add(self, key: int): i = 0 while",
"= 2 147 483 647, M - простое число. #",
"M - 31ое число Мерсенна, M = 2^31 - 1,",
"1 if prime_flag: self.__prime_numbers.append(i) i += 2 def __get_hash(self, x,",
"- x - ключ, i - номер хэш-функции, # pi",
"< len(self.__prime_numbers): if (i % self.__prime_numbers[j]) == 0: prime_flag =",
"(elements_number <= 0) | (probability <= 0) | (probability >=",
"except TypeError: print(\"error\") continue else: print(\"error\") continue except EOFError: exit()",
"_line array_str += _line return array_str[:self.__size] class BloomFilter: def __init__(self,",
"def search(self, key: int): i = 0 while i <",
"def print(self): array_str = \"\" for byte in self.__array: _line",
"check_bit(self, i): if (self.__array[i // 8] & (2 ** (7",
"== 1: self.__prime_numbers.append(2) return self.__prime_numbers.append(2) i = 3 while len(self.__prime_numbers)",
"int): i = 0 while i < self.hash_numbers: if not",
"проход по всем числам и их проверка на простоту -",
">= 1): print(\"error\") continue bloom_filter = BloomFilter(elements_number, probability) if (bloom_filter.size",
"bytearray(int(math.ceil(size / 8))) self.__size = size def add_bit(self, i): #",
"| (probability <= 0) | (probability >= 1): print(\"error\") continue",
"(2 - основание), количество хеш-функций будет равно -log2P # хеш-функции",
"размер # структуры m = -(nlog2P) / ln2 (2 -",
"def __get_hash(self, x, i): return (((i + 1) * x",
"номер хэш-функции, # pi - i-тое по счету простое число,",
"self.__prime_numbers.append(2) i = 3 while len(self.__prime_numbers) < prime_size: j =",
"ключ, i - номер хэш-функции, # pi - i-тое по",
"1: self.__prime_numbers.append(2) return self.__prime_numbers.append(2) i = 3 while len(self.__prime_numbers) <",
"в питоне битовый массив отсутствует, # поэтому будем использовать байтовый",
"указанный бит 1 и напечатать (вернуть) сам массив Mersen_31 =",
"1) * x + self.__prime_numbers[i]) % Mersen_31) % self.size def",
"знать первые k простых чисел. Посчитаем их один раз в",
"хранить в структуре данных. # Также нам необходимо создать битовый",
"= list() self.__get_prime(self.hash_numbers + 1) self.__bitarray = BitArray(self.size) def __get_prime(self,",
"на 1, проверить является ли указанный бит 1 и напечатать",
"while len(self.__prime_numbers) < prime_size: j = 1 prime_flag = True",
"поэтому будем использовать байтовый массив. Реализуем для удобства отдельную СД,",
"2, начиная от 3, а после новое число проверять на",
"раз в конструкторе BloomFilter # и будем хранить в структуре",
"# немного упростим: во-первых будем идти с интервалом 2, начиная",
"% self.__prime_numbers[j]) == 0: prime_flag = False break j +=",
"бит 1 и напечатать (вернуть) сам массив Mersen_31 = 2147483647",
"print(\"error\") continue break except TypeError: print(\"error\") continue else: print(\"error\") continue",
"на простоту - сложно по времени # немного упростим: во-первых",
"& (line[1].isnumeric()): bloom_filter.add(int(line[1])) elif (line[0] == \"search\") & (line[1].isnumeric()): print(int(bloom_filter.search(int(line[1]))))",
"True while j < len(self.__prime_numbers): if (i % self.__prime_numbers[j]) ==",
"<= 0) | (probability >= 1): print(\"error\") continue bloom_filter =",
"0): print(\"error\") continue break except TypeError: print(\"error\") continue else: print(\"error\")",
"числам и их проверка на простоту - сложно по времени",
"int(line[1]) probability = float(line[2]) if (elements_number <= 0) | (probability",
"sys import exit # итак, n - приблизительное число элементов",
"# Также нам необходимо создать битовый массив размера m, однако",
"же рассматриваем нечётные) if prime_size == 1: self.__prime_numbers.append(2) return self.__prime_numbers.append(2)",
"else: return True def print(self): array_str = \"\" for byte",
"m = -(nlog2P) / ln2 (2 - основание), количество хеш-функций",
"== \"print\": print(bloom_filter.print()) elif (line[0] == \"add\") & (line[1].isnumeric()): bloom_filter.add(int(line[1]))",
"сложно по времени # немного упростим: во-первых будем идти с",
"return True def print(self): return self.__bitarray.print() bloom_filter = 0 while",
"в массиве, P - вероятность ложноположительного ответа, тогда размер #",
"элементов в массиве, P - вероятность ложноположительного ответа, тогда размер",
"бит содержится в i//8 байте на i % 8 месте",
"prime_flag = True while j < len(self.__prime_numbers): if (i %",
"подсчёте хеш-функций необходимо знать первые k простых чисел. Посчитаем их",
"from sys import exit # итак, n - приблизительное число",
"создать битовый массив размера m, однако по умолчанию в питоне",
"mod M) mod m,где - x - ключ, i -",
"| (bloom_filter.hash_numbers == 0): print(\"error\") continue break except TypeError: print(\"error\")",
"self.__prime_numbers[i]) % Mersen_31) % self.size def add(self, key: int): i",
"== 0: continue elif line[0] == \"print\": print(bloom_filter.print()) elif (line[0]",
"self.size = int(-round(n * math.log2(p) / math.log(2))) self.hash_numbers = int(-round(math.log2(p)))",
"2^31 - 1, M = 2 147 483 647, M",
"8] & (2 ** (7 - (i % 8)))) ==",
"- простое число. # При подсчёте хеш-функций необходимо знать первые",
"self.__bitarray.check_bit(self.__get_hash(key, i)): return False i += 1 return True def",
"<= 0) | (probability <= 0) | (probability >= 1):",
"prime_size: j = 1 prime_flag = True while j <",
"if not self.__bitarray.check_bit(self.__get_hash(key, i)): return False i += 1 return",
"_line = '0' * (8 - len(_line)) + _line array_str",
"def __init__(self, size): self.__array = bytearray(int(math.ceil(size / 8))) self.__size =",
"простые числа (кроме двойки, мы же рассматриваем нечётные) if prime_size",
"x + self.__prime_numbers[i]) % Mersen_31) % self.size def add(self, key:",
"key: int): i = 0 while i < self.hash_numbers: if",
"mod m,где - x - ключ, i - номер хэш-функции,",
"в структуре данных. # Также нам необходимо создать битовый массив",
"проверка на простоту - сложно по времени # немного упростим:",
"False else: return True def print(self): array_str = \"\" for",
"try: line = input().split() if len(line) == 0: continue elif",
"делимость на уже найденные простые числа (кроме двойки, мы же",
"self.hash_numbers: if not self.__bitarray.check_bit(self.__get_hash(key, i)): return False i += 1",
"число элементов в массиве, P - вероятность ложноположительного ответа, тогда",
"i//8 байте на i % 8 месте self.__array[i // 8]",
"+= 1 def search(self, key: int): i = 0 while",
"2 147 483 647, M - простое число. # При",
"int(-round(math.log2(p))) self.__prime_numbers = list() self.__get_prime(self.hash_numbers + 1) self.__bitarray = BitArray(self.size)",
"1, M = 2 147 483 647, M - простое",
"len(self.__prime_numbers): if (i % self.__prime_numbers[j]) == 0: prime_flag = False",
"0: return False else: return True def print(self): array_str =",
"число Мерсенна, M = 2^31 - 1, M = 2",
"len(_line) != 8: _line = '0' * (8 - len(_line))",
"bloom_filter = BloomFilter(elements_number, probability) if (bloom_filter.size == 0) | (bloom_filter.hash_numbers",
"= 3 while len(self.__prime_numbers) < prime_size: j = 1 prime_flag",
"break j += 1 if prime_flag: self.__prime_numbers.append(i) i += 2",
"self.__array = bytearray(int(math.ceil(size / 8))) self.__size = size def add_bit(self,",
"// 8] |= 2 ** (7 - (i % 8))",
"однако по умолчанию в питоне битовый массив отсутствует, # поэтому",
"else: if line[0] == \"set\": try: elements_number = int(line[1]) probability",
"интервалом 2, начиная от 3, а после новое число проверять",
"int(-round(n * math.log2(p) / math.log(2))) self.hash_numbers = int(-round(math.log2(p))) self.__prime_numbers =",
"(bloom_filter.hash_numbers == 0): print(\"error\") continue break except TypeError: print(\"error\") continue",
"while i < self.hash_numbers: if not self.__bitarray.check_bit(self.__get_hash(key, i)): return False",
"i < self.hash_numbers: if not self.__bitarray.check_bit(self.__get_hash(key, i)): return False i",
"- вероятность ложноположительного ответа, тогда размер # структуры m =",
"0) | (probability <= 0) | (probability >= 1): print(\"error\")",
"self.__prime_numbers[j]) == 0: prime_flag = False break j += 1",
"while j < len(self.__prime_numbers): if (i % self.__prime_numbers[j]) == 0:",
"простоту - сложно по времени # немного упростим: во-первых будем",
"647, M - простое число. # При подсчёте хеш-функций необходимо",
"будем использовать байтовый массив. Реализуем для удобства отдельную СД, из",
"str(bin(byte))[2:] if len(_line) != 8: _line = '0' * (8",
"простых чисел. Посчитаем их один раз в конструкторе BloomFilter #",
"(bloom_filter.size == 0) | (bloom_filter.hash_numbers == 0): print(\"error\") continue break",
"print(\"error\") continue else: print(\"error\") continue except EOFError: exit() print(bloom_filter.size, bloom_filter.hash_numbers)",
"self.__size = size def add_bit(self, i): # i-тый бит содержится",
"умолчанию в питоне битовый массив отсутствует, # поэтому будем использовать",
"** (7 - (i % 8)) def check_bit(self, i): if",
"проверить является ли указанный бит 1 и напечатать (вернуть) сам",
"3 while len(self.__prime_numbers) < prime_size: j = 1 prime_flag =",
"line = input().split() if len(line) == 0: continue elif line[0]",
"i): if (self.__array[i // 8] & (2 ** (7 -",
"array_str[:self.__size] class BloomFilter: def __init__(self, n: int, p: float): self.size",
"вероятность ложноположительного ответа, тогда размер # структуры m = -(nlog2P)",
"j < len(self.__prime_numbers): if (i % self.__prime_numbers[j]) == 0: prime_flag",
"данных. # Также нам необходимо создать битовый массив размера m,",
"во-первых будем идти с интервалом 2, начиная от 3, а",
"Mersen_31) % self.size def add(self, key: int): i = 0",
"i += 1 return True def print(self): return self.__bitarray.print() bloom_filter",
"* math.log2(p) / math.log(2))) self.hash_numbers = int(-round(math.log2(p))) self.__prime_numbers = list()",
"(8 - len(_line)) + _line array_str += _line return array_str[:self.__size]",
"self.hash_numbers = int(-round(math.log2(p))) self.__prime_numbers = list() self.__get_prime(self.hash_numbers + 1) self.__bitarray",
"int, p: float): self.size = int(-round(n * math.log2(p) / math.log(2)))",
"их один раз в конструкторе BloomFilter # и будем хранить",
"True: try: line = input().split() if len(line) == 0: continue",
"import math from sys import exit # итак, n -",
"(вернуть) сам массив Mersen_31 = 2147483647 class BitArray: def __init__(self,",
"0: prime_flag = False break j += 1 if prime_flag:",
"i-тое по счету простое число, а M - 31ое число",
"self.__get_prime(self.hash_numbers + 1) self.__bitarray = BitArray(self.size) def __get_prime(self, prime_size): #",
"Мерсенна, M = 2^31 - 1, M = 2 147",
"= int(-round(math.log2(p))) self.__prime_numbers = list() self.__get_prime(self.hash_numbers + 1) self.__bitarray =",
"уже найденные простые числа (кроме двойки, мы же рассматриваем нечётные)",
"class BloomFilter: def __init__(self, n: int, p: float): self.size =",
"по всем числам и их проверка на простоту - сложно",
"сам массив Mersen_31 = 2147483647 class BitArray: def __init__(self, size):",
"- основание), количество хеш-функций будет равно -log2P # хеш-функции используются",
"приблизительное число элементов в массиве, P - вероятность ложноположительного ответа,",
"i += 2 def __get_hash(self, x, i): return (((i +",
"i): # i-тый бит содержится в i//8 байте на i",
"(((i + 1) * x + self.__prime_numbers[i]) % Mersen_31) %",
"8))) self.__size = size def add_bit(self, i): # i-тый бит",
"while i < self.hash_numbers: self.__bitarray.add_bit(self.__get_hash(key, i)) i += 1 def",
"** (7 - (i % 8)))) == 0: return False",
"bloom_filter.add(int(line[1])) elif (line[0] == \"search\") & (line[1].isnumeric()): print(int(bloom_filter.search(int(line[1])))) else: print(\"error\")",
"__get_prime(self, prime_size): # обычный проход по всем числам и их",
"-log2P # хеш-функции используются вида: (((i + 1)*x + p(i+1))",
"search(self, key: int): i = 0 while i < self.hash_numbers:",
"числа (кроме двойки, мы же рассматриваем нечётные) if prime_size ==",
"% Mersen_31) % self.size def add(self, key: int): i =",
"удобства отдельную СД, из методов необходимо: изменить # указанный бит",
"-(nlog2P) / ln2 (2 - основание), количество хеш-функций будет равно",
"\"print\": print(bloom_filter.print()) elif (line[0] == \"add\") & (line[1].isnumeric()): bloom_filter.add(int(line[1])) elif",
"== \"search\") & (line[1].isnumeric()): print(int(bloom_filter.search(int(line[1])))) else: print(\"error\") except EOFError: break",
"всем числам и их проверка на простоту - сложно по",
"0: continue elif line[0] == \"print\": print(bloom_filter.print()) elif (line[0] ==",
"math from sys import exit # итак, n - приблизительное",
"(probability >= 1): print(\"error\") continue bloom_filter = BloomFilter(elements_number, probability) if",
"elif (line[0] == \"search\") & (line[1].isnumeric()): print(int(bloom_filter.search(int(line[1])))) else: print(\"error\") except",
"- сложно по времени # немного упростим: во-первых будем идти"
] |
[
"zmq.Context() s = ctx.socket(zmq.SUB) s.connect(connect_to) s.setsockopt(zmq.SUBSCRIBE,'') sync(connect_to) start = time.clock()",
"size: %.0f [B]\" % (message_size, ) print \"array count: %.0f\"",
"megabits = float (throughput * message_size * 8) / 1000000",
"!= 3: print 'usage: subscriber <connect_to> <array-count>' sys.exit (1) try:",
"s = ctx.socket(zmq.SUB) s.connect(connect_to) s.setsockopt(zmq.SUBSCRIBE,'') sync(connect_to) start = time.clock() print",
"integers' sys.exit (1) ctx = zmq.Context() s = ctx.socket(zmq.SUB) s.connect(connect_to)",
"3: print 'usage: subscriber <connect_to> <array-count>' sys.exit (1) try: connect_to",
"= float (throughput * message_size * 8) / 1000000 print",
"the file COPYING.BSD, distributed as part of this software. #-----------------------------------------------------------------------------",
"int (sys.argv[2]) except (ValueError, OverflowError), e: print 'array-count must be",
"start) * 1000000 if elapsed == 0: elapsed = 1",
"1000000 if elapsed == 0: elapsed = 1 throughput =",
"test that subscribes to NumPy arrays. Uses REQ/REP (on PUB/SUB",
"the New BSD License. The full license is in #",
"socket + 1 sync_with = ':'.join(connect_to.split(':')[:-1] + [str(int(connect_to.split(':')[-1]) + 1)]",
"subscribes to NumPy arrays. Uses REQ/REP (on PUB/SUB socket +",
"float (elapsed) message_size = a.nbytes megabits = float (throughput *",
"in # the file COPYING.BSD, distributed as part of this",
"print \" Done.\" end = time.clock() elapsed = (end -",
"connect_to = sys.argv[1] array_count = int (sys.argv[2]) except (ValueError, OverflowError),",
"= ':'.join(connect_to.split(':')[:-1] + [str(int(connect_to.split(':')[-1]) + 1)] ) ctx = zmq.Context.instance()",
"(sys.argv) != 3: print 'usage: subscriber <connect_to> <array-count>' sys.exit (1)",
"print \"array count: %.0f\" % (array_count, ) print \"mean throughput:",
"throughput: %.3f [Mb/s]\" % (megabits, ) time.sleep(1.0) if __name__ ==",
"/ float (elapsed) message_size = a.nbytes megabits = float (throughput",
"except (ValueError, OverflowError), e: print 'array-count must be integers' sys.exit",
"(array_count, ) print \"mean throughput: %.0f [msg/s]\" % (throughput, )",
"sys.exit (1) ctx = zmq.Context() s = ctx.socket(zmq.SUB) s.connect(connect_to) s.setsockopt(zmq.SUBSCRIBE,'')",
"s.connect(connect_to) s.setsockopt(zmq.SUBSCRIBE,'') sync(connect_to) start = time.clock() print \"Receiving arrays...\" for",
"start = time.clock() print \"Receiving arrays...\" for i in range(array_count):",
"print 'usage: subscriber <connect_to> <array-count>' sys.exit (1) try: connect_to =",
"/ 1000000 print \"message size: %.0f [B]\" % (message_size, )",
"License. The full license is in # the file COPYING.BSD,",
"+ 1) to synchronize \"\"\" #----------------------------------------------------------------------------- # Copyright (c) 2010",
") ctx = zmq.Context.instance() s = ctx.socket(zmq.REQ) s.connect(sync_with) s.send('READY') s.recv()",
"%.0f\" % (array_count, ) print \"mean throughput: %.0f [msg/s]\" %",
"1 sync_with = ':'.join(connect_to.split(':')[:-1] + [str(int(connect_to.split(':')[-1]) + 1)] ) ctx",
"%.3f [Mb/s]\" % (megabits, ) time.sleep(1.0) if __name__ == \"__main__\":",
"e: print 'array-count must be integers' sys.exit (1) ctx =",
"(elapsed) message_size = a.nbytes megabits = float (throughput * message_size",
"2010 <NAME> # # Distributed under the terms of the",
"'array-count must be integers' sys.exit (1) ctx = zmq.Context() s",
"throughput: %.0f [msg/s]\" % (throughput, ) print \"mean throughput: %.3f",
"print \"Receiving arrays...\" for i in range(array_count): a = s.recv_pyobj()",
"(c) 2010 <NAME> # # Distributed under the terms of",
"%.0f [msg/s]\" % (throughput, ) print \"mean throughput: %.3f [Mb/s]\"",
"to synchronize \"\"\" #----------------------------------------------------------------------------- # Copyright (c) 2010 <NAME> #",
"end = time.clock() elapsed = (end - start) * 1000000",
"import numpy def sync(connect_to): # use connect socket + 1",
"import zmq import numpy def sync(connect_to): # use connect socket",
"sync_with = ':'.join(connect_to.split(':')[:-1] + [str(int(connect_to.split(':')[-1]) + 1)] ) ctx =",
"BSD License. The full license is in # the file",
"0: elapsed = 1 throughput = (1000000.0 * float (array_count))",
"numpy def sync(connect_to): # use connect socket + 1 sync_with",
"sys.argv[1] array_count = int (sys.argv[2]) except (ValueError, OverflowError), e: print",
"\"array count: %.0f\" % (array_count, ) print \"mean throughput: %.0f",
"i in range(array_count): a = s.recv_pyobj() print \" Done.\" end",
"The full license is in # the file COPYING.BSD, distributed",
"= zmq.Context.instance() s = ctx.socket(zmq.REQ) s.connect(sync_with) s.send('READY') s.recv() def main():",
"(throughput * message_size * 8) / 1000000 print \"message size:",
"OverflowError), e: print 'array-count must be integers' sys.exit (1) ctx",
"= ctx.socket(zmq.REQ) s.connect(sync_with) s.send('READY') s.recv() def main(): if len (sys.argv)",
"def sync(connect_to): # use connect socket + 1 sync_with =",
"# Copyright (c) 2010 <NAME> # # Distributed under the",
"s.setsockopt(zmq.SUBSCRIBE,'') sync(connect_to) start = time.clock() print \"Receiving arrays...\" for i",
"terms of the New BSD License. The full license is",
"of this software. #----------------------------------------------------------------------------- import sys import time import zmq",
"COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- import sys",
"# # Distributed under the terms of the New BSD",
"= zmq.Context() s = ctx.socket(zmq.SUB) s.connect(connect_to) s.setsockopt(zmq.SUBSCRIBE,'') sync(connect_to) start =",
"# Distributed under the terms of the New BSD License.",
"\"mean throughput: %.3f [Mb/s]\" % (megabits, ) time.sleep(1.0) if __name__",
"\"message size: %.0f [B]\" % (message_size, ) print \"array count:",
"s.send('READY') s.recv() def main(): if len (sys.argv) != 3: print",
") print \"array count: %.0f\" % (array_count, ) print \"mean",
"elapsed == 0: elapsed = 1 throughput = (1000000.0 *",
"print \"mean throughput: %.0f [msg/s]\" % (throughput, ) print \"mean",
"Done.\" end = time.clock() elapsed = (end - start) *",
"if elapsed == 0: elapsed = 1 throughput = (1000000.0",
"':'.join(connect_to.split(':')[:-1] + [str(int(connect_to.split(':')[-1]) + 1)] ) ctx = zmq.Context.instance() s",
"zmq.Context.instance() s = ctx.socket(zmq.REQ) s.connect(sync_with) s.send('READY') s.recv() def main(): if",
"(1000000.0 * float (array_count)) / float (elapsed) message_size = a.nbytes",
"(throughput, ) print \"mean throughput: %.3f [Mb/s]\" % (megabits, )",
"- start) * 1000000 if elapsed == 0: elapsed =",
"\"Receiving arrays...\" for i in range(array_count): a = s.recv_pyobj() print",
"print 'array-count must be integers' sys.exit (1) ctx = zmq.Context()",
"Uses REQ/REP (on PUB/SUB socket + 1) to synchronize \"\"\"",
"to NumPy arrays. Uses REQ/REP (on PUB/SUB socket + 1)",
"+ [str(int(connect_to.split(':')[-1]) + 1)] ) ctx = zmq.Context.instance() s =",
"file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- import",
"part of this software. #----------------------------------------------------------------------------- import sys import time import",
"(1) ctx = zmq.Context() s = ctx.socket(zmq.SUB) s.connect(connect_to) s.setsockopt(zmq.SUBSCRIBE,'') sync(connect_to)",
"this software. #----------------------------------------------------------------------------- import sys import time import zmq import",
"[Mb/s]\" % (megabits, ) time.sleep(1.0) if __name__ == \"__main__\": main()",
"main(): if len (sys.argv) != 3: print 'usage: subscriber <connect_to>",
"1)] ) ctx = zmq.Context.instance() s = ctx.socket(zmq.REQ) s.connect(sync_with) s.send('READY')",
"subscriber <connect_to> <array-count>' sys.exit (1) try: connect_to = sys.argv[1] array_count",
"range(array_count): a = s.recv_pyobj() print \" Done.\" end = time.clock()",
"of the New BSD License. The full license is in",
"is in # the file COPYING.BSD, distributed as part of",
"a = s.recv_pyobj() print \" Done.\" end = time.clock() elapsed",
"software. #----------------------------------------------------------------------------- import sys import time import zmq import numpy",
"(1) try: connect_to = sys.argv[1] array_count = int (sys.argv[2]) except",
"= s.recv_pyobj() print \" Done.\" end = time.clock() elapsed =",
"full license is in # the file COPYING.BSD, distributed as",
"elapsed = 1 throughput = (1000000.0 * float (array_count)) /",
"a.nbytes megabits = float (throughput * message_size * 8) /",
"= (end - start) * 1000000 if elapsed == 0:",
"+ 1 sync_with = ':'.join(connect_to.split(':')[:-1] + [str(int(connect_to.split(':')[-1]) + 1)] )",
"s.recv_pyobj() print \" Done.\" end = time.clock() elapsed = (end",
"license is in # the file COPYING.BSD, distributed as part",
"% (message_size, ) print \"array count: %.0f\" % (array_count, )",
"throughput = (1000000.0 * float (array_count)) / float (elapsed) message_size",
"= time.clock() elapsed = (end - start) * 1000000 if",
"= (1000000.0 * float (array_count)) / float (elapsed) message_size =",
"print \"message size: %.0f [B]\" % (message_size, ) print \"array",
"sync(connect_to) start = time.clock() print \"Receiving arrays...\" for i in",
"that subscribes to NumPy arrays. Uses REQ/REP (on PUB/SUB socket",
"* 8) / 1000000 print \"message size: %.0f [B]\" %",
"8) / 1000000 print \"message size: %.0f [B]\" % (message_size,",
"arrays...\" for i in range(array_count): a = s.recv_pyobj() print \"",
"ctx.socket(zmq.REQ) s.connect(sync_with) s.send('READY') s.recv() def main(): if len (sys.argv) !=",
"% (array_count, ) print \"mean throughput: %.0f [msg/s]\" % (throughput,",
"as part of this software. #----------------------------------------------------------------------------- import sys import time",
"'usage: subscriber <connect_to> <array-count>' sys.exit (1) try: connect_to = sys.argv[1]",
"distributed as part of this software. #----------------------------------------------------------------------------- import sys import",
"time import zmq import numpy def sync(connect_to): # use connect",
") print \"mean throughput: %.0f [msg/s]\" % (throughput, ) print",
"= 1 throughput = (1000000.0 * float (array_count)) / float",
"s.connect(sync_with) s.send('READY') s.recv() def main(): if len (sys.argv) != 3:",
"ctx = zmq.Context() s = ctx.socket(zmq.SUB) s.connect(connect_to) s.setsockopt(zmq.SUBSCRIBE,'') sync(connect_to) start",
"array_count = int (sys.argv[2]) except (ValueError, OverflowError), e: print 'array-count",
"time.clock() elapsed = (end - start) * 1000000 if elapsed",
"message_size = a.nbytes megabits = float (throughput * message_size *",
"socket + 1) to synchronize \"\"\" #----------------------------------------------------------------------------- # Copyright (c)",
"sync(connect_to): # use connect socket + 1 sync_with = ':'.join(connect_to.split(':')[:-1]",
"\"mean throughput: %.0f [msg/s]\" % (throughput, ) print \"mean throughput:",
"under the terms of the New BSD License. The full",
"<connect_to> <array-count>' sys.exit (1) try: connect_to = sys.argv[1] array_count =",
"for i in range(array_count): a = s.recv_pyobj() print \" Done.\"",
"%.0f [B]\" % (message_size, ) print \"array count: %.0f\" %",
"<NAME> # # Distributed under the terms of the New",
"1 throughput = (1000000.0 * float (array_count)) / float (elapsed)",
"== 0: elapsed = 1 throughput = (1000000.0 * float",
"print \"mean throughput: %.3f [Mb/s]\" % (megabits, ) time.sleep(1.0) if",
"if len (sys.argv) != 3: print 'usage: subscriber <connect_to> <array-count>'",
"arrays. Uses REQ/REP (on PUB/SUB socket + 1) to synchronize",
"= sys.argv[1] array_count = int (sys.argv[2]) except (ValueError, OverflowError), e:",
"\"\"\"A test that subscribes to NumPy arrays. Uses REQ/REP (on",
"# the file COPYING.BSD, distributed as part of this software.",
"#----------------------------------------------------------------------------- import sys import time import zmq import numpy def",
"float (throughput * message_size * 8) / 1000000 print \"message",
"% (throughput, ) print \"mean throughput: %.3f [Mb/s]\" % (megabits,",
"must be integers' sys.exit (1) ctx = zmq.Context() s =",
"PUB/SUB socket + 1) to synchronize \"\"\" #----------------------------------------------------------------------------- # Copyright",
"= int (sys.argv[2]) except (ValueError, OverflowError), e: print 'array-count must",
"(array_count)) / float (elapsed) message_size = a.nbytes megabits = float",
"import sys import time import zmq import numpy def sync(connect_to):",
"# use connect socket + 1 sync_with = ':'.join(connect_to.split(':')[:-1] +",
"New BSD License. The full license is in # the",
"synchronize \"\"\" #----------------------------------------------------------------------------- # Copyright (c) 2010 <NAME> # #",
"[msg/s]\" % (throughput, ) print \"mean throughput: %.3f [Mb/s]\" %",
") print \"mean throughput: %.3f [Mb/s]\" % (megabits, ) time.sleep(1.0)",
"\"\"\" #----------------------------------------------------------------------------- # Copyright (c) 2010 <NAME> # # Distributed",
"s = ctx.socket(zmq.REQ) s.connect(sync_with) s.send('READY') s.recv() def main(): if len",
"s.recv() def main(): if len (sys.argv) != 3: print 'usage:",
"(on PUB/SUB socket + 1) to synchronize \"\"\" #----------------------------------------------------------------------------- #",
"(ValueError, OverflowError), e: print 'array-count must be integers' sys.exit (1)",
"1) to synchronize \"\"\" #----------------------------------------------------------------------------- # Copyright (c) 2010 <NAME>",
"in range(array_count): a = s.recv_pyobj() print \" Done.\" end =",
"[B]\" % (message_size, ) print \"array count: %.0f\" % (array_count,",
"= time.clock() print \"Receiving arrays...\" for i in range(array_count): a",
"len (sys.argv) != 3: print 'usage: subscriber <connect_to> <array-count>' sys.exit",
"* float (array_count)) / float (elapsed) message_size = a.nbytes megabits",
"+ 1)] ) ctx = zmq.Context.instance() s = ctx.socket(zmq.REQ) s.connect(sync_with)",
"* 1000000 if elapsed == 0: elapsed = 1 throughput",
"message_size * 8) / 1000000 print \"message size: %.0f [B]\"",
"be integers' sys.exit (1) ctx = zmq.Context() s = ctx.socket(zmq.SUB)",
"def main(): if len (sys.argv) != 3: print 'usage: subscriber",
"the terms of the New BSD License. The full license",
"connect socket + 1 sync_with = ':'.join(connect_to.split(':')[:-1] + [str(int(connect_to.split(':')[-1]) +",
"time.clock() print \"Receiving arrays...\" for i in range(array_count): a =",
"Copyright (c) 2010 <NAME> # # Distributed under the terms",
"[str(int(connect_to.split(':')[-1]) + 1)] ) ctx = zmq.Context.instance() s = ctx.socket(zmq.REQ)",
"<array-count>' sys.exit (1) try: connect_to = sys.argv[1] array_count = int",
"REQ/REP (on PUB/SUB socket + 1) to synchronize \"\"\" #-----------------------------------------------------------------------------",
"try: connect_to = sys.argv[1] array_count = int (sys.argv[2]) except (ValueError,",
"(end - start) * 1000000 if elapsed == 0: elapsed",
"ctx = zmq.Context.instance() s = ctx.socket(zmq.REQ) s.connect(sync_with) s.send('READY') s.recv() def",
"(sys.argv[2]) except (ValueError, OverflowError), e: print 'array-count must be integers'",
"sys import time import zmq import numpy def sync(connect_to): #",
"elapsed = (end - start) * 1000000 if elapsed ==",
"NumPy arrays. Uses REQ/REP (on PUB/SUB socket + 1) to",
"import time import zmq import numpy def sync(connect_to): # use",
"Distributed under the terms of the New BSD License. The",
"use connect socket + 1 sync_with = ':'.join(connect_to.split(':')[:-1] + [str(int(connect_to.split(':')[-1])",
"float (array_count)) / float (elapsed) message_size = a.nbytes megabits =",
"= a.nbytes megabits = float (throughput * message_size * 8)",
"1000000 print \"message size: %.0f [B]\" % (message_size, ) print",
"count: %.0f\" % (array_count, ) print \"mean throughput: %.0f [msg/s]\"",
"ctx.socket(zmq.SUB) s.connect(connect_to) s.setsockopt(zmq.SUBSCRIBE,'') sync(connect_to) start = time.clock() print \"Receiving arrays...\"",
"(message_size, ) print \"array count: %.0f\" % (array_count, ) print",
"sys.exit (1) try: connect_to = sys.argv[1] array_count = int (sys.argv[2])",
"\" Done.\" end = time.clock() elapsed = (end - start)",
"= ctx.socket(zmq.SUB) s.connect(connect_to) s.setsockopt(zmq.SUBSCRIBE,'') sync(connect_to) start = time.clock() print \"Receiving",
"* message_size * 8) / 1000000 print \"message size: %.0f",
"#----------------------------------------------------------------------------- # Copyright (c) 2010 <NAME> # # Distributed under",
"zmq import numpy def sync(connect_to): # use connect socket +"
] |
[
"the fulltext search extension con.execute(\"select load_extension('./fts3.so')\") # alternatively you can",
"extension loading con.enable_load_extension(True) # Load the fulltext search extension con.execute(\"select",
"pie', 'broccoli cheese onions flour'); insert into recipe (name, ingredients)",
"con.executescript(\"\"\" insert into recipe (name, ingredients) values ('broccoli stew', 'broccoli",
"sqlite3.connect(\":memory:\") # enable extension loading con.enable_load_extension(True) # Load the fulltext",
"load the extension using an API call: # con.load_extension(\"./fts3.so\") #",
"insert into recipe (name, ingredients) values ('broccoli pie', 'broccoli cheese",
"# example from SQLite wiki con.execute(\"create virtual table recipe using",
"values ('broccoli stew', 'broccoli peppers cheese tomatoes'); insert into recipe",
"('pumpkin pie', 'pumpkin sugar flour butter'); \"\"\") for row in",
"recipe using fts3(name, ingredients)\") con.executescript(\"\"\" insert into recipe (name, ingredients)",
"stew', 'pumpkin onions garlic celery'); insert into recipe (name, ingredients)",
"fts3(name, ingredients)\") con.executescript(\"\"\" insert into recipe (name, ingredients) values ('broccoli",
"con.enable_load_extension(False) # example from SQLite wiki con.execute(\"create virtual table recipe",
"tomatoes'); insert into recipe (name, ingredients) values ('pumpkin stew', 'pumpkin",
"disable extension loading again con.enable_load_extension(False) # example from SQLite wiki",
"flour'); insert into recipe (name, ingredients) values ('pumpkin pie', 'pumpkin",
"('broccoli stew', 'broccoli peppers cheese tomatoes'); insert into recipe (name,",
"from SQLite wiki con.execute(\"create virtual table recipe using fts3(name, ingredients)\")",
"alternatively you can load the extension using an API call:",
"insert into recipe (name, ingredients) values ('pumpkin pie', 'pumpkin sugar",
"'broccoli peppers cheese tomatoes'); insert into recipe (name, ingredients) values",
"# con.load_extension(\"./fts3.so\") # disable extension loading again con.enable_load_extension(False) # example",
"wiki con.execute(\"create virtual table recipe using fts3(name, ingredients)\") con.executescript(\"\"\" insert",
"import sqlite3 con = sqlite3.connect(\":memory:\") # enable extension loading con.enable_load_extension(True)",
"for row in con.execute(\"select rowid, name, ingredients from recipe where",
"search extension con.execute(\"select load_extension('./fts3.so')\") # alternatively you can load the",
"(name, ingredients) values ('pumpkin pie', 'pumpkin sugar flour butter'); \"\"\")",
"con = sqlite3.connect(\":memory:\") # enable extension loading con.enable_load_extension(True) # Load",
"celery'); insert into recipe (name, ingredients) values ('broccoli pie', 'broccoli",
"(name, ingredients) values ('broccoli pie', 'broccoli cheese onions flour'); insert",
"(name, ingredients) values ('broccoli stew', 'broccoli peppers cheese tomatoes'); insert",
"API call: # con.load_extension(\"./fts3.so\") # disable extension loading again con.enable_load_extension(False)",
"'pumpkin sugar flour butter'); \"\"\") for row in con.execute(\"select rowid,",
"in con.execute(\"select rowid, name, ingredients from recipe where name match",
"recipe (name, ingredients) values ('pumpkin pie', 'pumpkin sugar flour butter');",
"loading again con.enable_load_extension(False) # example from SQLite wiki con.execute(\"create virtual",
"values ('pumpkin stew', 'pumpkin onions garlic celery'); insert into recipe",
"butter'); \"\"\") for row in con.execute(\"select rowid, name, ingredients from",
"insert into recipe (name, ingredients) values ('broccoli stew', 'broccoli peppers",
"table recipe using fts3(name, ingredients)\") con.executescript(\"\"\" insert into recipe (name,",
"an API call: # con.load_extension(\"./fts3.so\") # disable extension loading again",
"= sqlite3.connect(\":memory:\") # enable extension loading con.enable_load_extension(True) # Load the",
"cheese tomatoes'); insert into recipe (name, ingredients) values ('pumpkin stew',",
"the extension using an API call: # con.load_extension(\"./fts3.so\") # disable",
"'broccoli cheese onions flour'); insert into recipe (name, ingredients) values",
"into recipe (name, ingredients) values ('broccoli pie', 'broccoli cheese onions",
"# alternatively you can load the extension using an API",
"sqlite3 con = sqlite3.connect(\":memory:\") # enable extension loading con.enable_load_extension(True) #",
"fulltext search extension con.execute(\"select load_extension('./fts3.so')\") # alternatively you can load",
"('broccoli pie', 'broccoli cheese onions flour'); insert into recipe (name,",
"recipe (name, ingredients) values ('pumpkin stew', 'pumpkin onions garlic celery');",
"cheese onions flour'); insert into recipe (name, ingredients) values ('pumpkin",
"row in con.execute(\"select rowid, name, ingredients from recipe where name",
"load_extension('./fts3.so')\") # alternatively you can load the extension using an",
"Load the fulltext search extension con.execute(\"select load_extension('./fts3.so')\") # alternatively you",
"enable extension loading con.enable_load_extension(True) # Load the fulltext search extension",
"SQLite wiki con.execute(\"create virtual table recipe using fts3(name, ingredients)\") con.executescript(\"\"\"",
"con.execute(\"create virtual table recipe using fts3(name, ingredients)\") con.executescript(\"\"\" insert into",
"# Load the fulltext search extension con.execute(\"select load_extension('./fts3.so')\") # alternatively",
"loading con.enable_load_extension(True) # Load the fulltext search extension con.execute(\"select load_extension('./fts3.so')\")",
"recipe (name, ingredients) values ('broccoli stew', 'broccoli peppers cheese tomatoes');",
"recipe (name, ingredients) values ('broccoli pie', 'broccoli cheese onions flour');",
"using fts3(name, ingredients)\") con.executescript(\"\"\" insert into recipe (name, ingredients) values",
"into recipe (name, ingredients) values ('pumpkin pie', 'pumpkin sugar flour",
"extension con.execute(\"select load_extension('./fts3.so')\") # alternatively you can load the extension",
"extension loading again con.enable_load_extension(False) # example from SQLite wiki con.execute(\"create",
"again con.enable_load_extension(False) # example from SQLite wiki con.execute(\"create virtual table",
"flour butter'); \"\"\") for row in con.execute(\"select rowid, name, ingredients",
"con.load_extension(\"./fts3.so\") # disable extension loading again con.enable_load_extension(False) # example from",
"example from SQLite wiki con.execute(\"create virtual table recipe using fts3(name,",
"garlic celery'); insert into recipe (name, ingredients) values ('broccoli pie',",
"peppers cheese tomatoes'); insert into recipe (name, ingredients) values ('pumpkin",
"con.execute(\"select rowid, name, ingredients from recipe where name match 'pie'\"):",
"virtual table recipe using fts3(name, ingredients)\") con.executescript(\"\"\" insert into recipe",
"('pumpkin stew', 'pumpkin onions garlic celery'); insert into recipe (name,",
"call: # con.load_extension(\"./fts3.so\") # disable extension loading again con.enable_load_extension(False) #",
"rowid, name, ingredients from recipe where name match 'pie'\"): print(row)",
"can load the extension using an API call: # con.load_extension(\"./fts3.so\")",
"values ('pumpkin pie', 'pumpkin sugar flour butter'); \"\"\") for row",
"\"\"\") for row in con.execute(\"select rowid, name, ingredients from recipe",
"# enable extension loading con.enable_load_extension(True) # Load the fulltext search",
"ingredients) values ('broccoli pie', 'broccoli cheese onions flour'); insert into",
"ingredients) values ('pumpkin stew', 'pumpkin onions garlic celery'); insert into",
"pie', 'pumpkin sugar flour butter'); \"\"\") for row in con.execute(\"select",
"ingredients) values ('pumpkin pie', 'pumpkin sugar flour butter'); \"\"\") for",
"ingredients) values ('broccoli stew', 'broccoli peppers cheese tomatoes'); insert into",
"extension using an API call: # con.load_extension(\"./fts3.so\") # disable extension",
"ingredients)\") con.executescript(\"\"\" insert into recipe (name, ingredients) values ('broccoli stew',",
"into recipe (name, ingredients) values ('broccoli stew', 'broccoli peppers cheese",
"(name, ingredients) values ('pumpkin stew', 'pumpkin onions garlic celery'); insert",
"into recipe (name, ingredients) values ('pumpkin stew', 'pumpkin onions garlic",
"'pumpkin onions garlic celery'); insert into recipe (name, ingredients) values",
"onions flour'); insert into recipe (name, ingredients) values ('pumpkin pie',",
"using an API call: # con.load_extension(\"./fts3.so\") # disable extension loading",
"stew', 'broccoli peppers cheese tomatoes'); insert into recipe (name, ingredients)",
"con.enable_load_extension(True) # Load the fulltext search extension con.execute(\"select load_extension('./fts3.so')\") #",
"insert into recipe (name, ingredients) values ('pumpkin stew', 'pumpkin onions",
"sugar flour butter'); \"\"\") for row in con.execute(\"select rowid, name,",
"# disable extension loading again con.enable_load_extension(False) # example from SQLite",
"you can load the extension using an API call: #",
"values ('broccoli pie', 'broccoli cheese onions flour'); insert into recipe",
"onions garlic celery'); insert into recipe (name, ingredients) values ('broccoli",
"con.execute(\"select load_extension('./fts3.so')\") # alternatively you can load the extension using"
] |
[
"inference_graph_proto @classmethod def _SetClusterParams(cls, cluster_params, device_options): \"\"\"Sets cluster params. Args:",
"with tf.control_dependencies(None): return tf.guarantee_const( getter(name, *args, **kwargs), name=name + '/GuaranteeConst')",
"ValueError( 'device_options{dtype_override,fprop_dtype_override) can not both be' 'set.') if subgraph_filter and",
"in inference_graph_proto.subgraphs.items(): if subgraphs and subgraph_name not in subgraphs: tf.logging.info('Skip",
"= cluster.GetPlacer() tpu_const_scope = _DummyScope() if (IsTpu(device_options) and device_options.var_options ==",
"2.0 (the \"License\"); # you may not use this file",
"freeze_defaults: Default initializes the graph and freeze. Useful for early",
"this assets registered from # TextFileInitializer. assets_collection = tf.compat.v1.get_collection( tf.compat.v1.GraphKeys.ASSET_FILEPATHS)",
"freeze_checkpoint: The checkpoint to load. Loads and freezes the model",
"1 p.tpus_per_replica = 1 if IsTpu(device_options) else 0 p.gpus_per_replica =",
"'device', 'retain_device_placement', 'var_options', 'gen_init_op', 'dtype_override', 'fprop_dtype_override' ]) _CONST_GUARANTEE = None",
"single-task models. device_options: Device options for the accelerator used for",
"= tf.train.export_meta_graph(graph=graph) for key in meta_graph.collection_def: tf.logging.info('copying collection %s', key)",
"export_path: with tf.io.gfile.GFile(export_path, 'w') as f: f.write(text_format.MessageToString(inference_graph_proto)) return inference_graph_proto @classmethod",
"tf.Session(graph=graph, config=py_utils.SessionConfig()) saver.restore(sess, checkpoint) return tf.graph_util.convert_variables_to_constants( sess, graph.as_graph_def(), output_op_names) def",
"out how much we need to specify here in terms",
"in the graph, so this op has to be #",
"for inference writing purposes. Returns: InferenceGraph proto. Raises: ValueError: if",
"model_cfg: a Params instance as returned by model_registry.GetParams(modelname, 'Test') or",
"constants with AS_CONSTANTS. # gen_init_op: Whether to serialize initialization ops",
"for asset in assets_collection: if asset.op.type == 'Const' and asset.op.get_attr(",
"not tf.executing_eagerly(): meta_graph = tf.train.export_meta_graph(graph=graph) for key in meta_graph.collection_def: tf.logging.info('copying",
"@classmethod def Export(cls, model_cfg, model_task_name=None, device_options=InferenceDeviceOptions( device='', retain_device_placement=False, var_options=None, gen_init_op=True,",
"weight-sharing and multi-core inference on TPUs work properly. Args: model_cfg:",
"Device to infer on. # retain_device_placement: If true, the specified",
"tf.Session(graph=graph, config=py_utils.SessionConfig()) as sess: sess.run(graph.get_operation_by_name('init_all_variables')) return tf.graph_util.convert_variables_to_constants(sess, graph.as_graph_def(), output_op_names) class",
"not device_options.retain_device_placement: # Clear the device so that the runtime",
"ShouldForceBfloat16ForActivations( device_options) if act_bfloat16_override: py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16) # Hard-code TPU-related flags",
"will be cleared, so that the runtime can choose automatically.",
"cannot be used with device ' + device_options.device) if freeze_checkpoint:",
"_FreezeGraphFromCheckpoint(graph, saver, checkpoint, output_op_names): \"\"\"Freezes a graph from a checkpoint.",
"constants. device = '' tpu_const_scope = ConstGuaranteeScope() with cluster, tf.device(device),",
"'tpu' try: mdl = model_cfg.Instantiate() task = mdl.GetTask(model_task_name) variables_to_restore =",
"collection %s', key) inference_graph_proto.collection_def[key].CopyFrom( meta_graph.collection_def[key]) else: tf.logging.warning('Not exporting collection defs",
"tensor_or_op_name): # Tensor-name. t = graph.get_tensor_by_name(tensor_or_op_name) return t.op.name else: op",
"tf.bfloat16. InferenceDeviceOptions = collections.namedtuple('InferenceDeviceOptions', [ 'device', 'retain_device_placement', 'var_options', 'gen_init_op', 'dtype_override',",
"in graph_def.node: node.ClearField('device') for function in graph_def.library.function: for node_def in",
"= False yield _CONST_GUARANTEE = old_val var_scope.set_caching_device(old_caching_device) # Marks variable",
"TensorFlow will raise a ValueError with # a description of",
"try: output_op_names.add(_GetOpName(op_name)) except KeyError: tf.logging.info('Op/tensor %s not in the graph.",
"variable with-in scope.\"\"\" global _CONST_GUARANTEE var_scope = tf.get_variable_scope() old_caching_device =",
"graph.get_operations(): if preserve_extra_ops and node.name in preserve_extra_ops: output_op_names.add(node.name) elif preserve_colocation_nodes",
"if IsTpu(device_options): FLAGS.enable_asserts = False FLAGS.xla_device = 'tpu' try: mdl",
"import lingvo.compat as tf from lingvo.core import base_model from lingvo.core",
"Configure the model. model_cfg.random_seed = random_seed model_cfg.is_inference = True if",
"subgraphs: tf.logging.info('Skip subgraph %s.', subgraph_name) continue # Sometimes feeds aren't",
"Options on handling variables. For TPUs, variables can be #",
"in the feeds/fetches (depends # on how it is used).",
"device_options.dtype_override and device_options.fprop_dtype_override: raise ValueError( 'device_options{dtype_override,fprop_dtype_override) can not both be'",
"errors. for tensor_or_op_name in (list(subgraph.feeds.values()) + list(subgraph.fetches.values())): output_op_names.add(_GetOpName(tensor_or_op_name)) if preserve_saver_restore_nodes:",
"License for the specific language governing permissions and # limitations",
"model params.\"\"\" import collections import contextlib import re import lingvo.compat",
"op name of the given node name.\"\"\" # Tensor names",
"to sanity check (versus relying on the text manipulation). #",
"FLAGS['enable_asserts'].using_default_value: FLAGS.enable_asserts = False # TODO(laurenzo): Work out how much",
"import six from google.protobuf import text_format FLAGS = tf.flags.FLAGS #",
"Device options for the accelerator used for serving. freeze_checkpoint: The",
"saver_def.restore_op_name]: try: output_op_names.add(_GetOpName(op_name)) except KeyError: tf.logging.info('Op/tensor %s not in the",
"Reserved. # # Licensed under the Apache License, Version 2.0",
"proto from model params.\"\"\" import collections import contextlib import re",
"not preserve_extra_ops: return sorted(list(output_op_names)) # We also need to preserve",
"Reset TPU-related flags after model instantiation. FLAGS.enable_asserts = old_enable_asserts FLAGS.xla_device",
"graph. inference_graph_proto: an InferenceGraph proto. subgraphs: an optional list of",
"the inference graph in ASCII to this path. subgraph_filter: A",
"else: saver_var_spec = variables_to_restore saver = tf.train.Saver(saver_var_spec) tf.variables_initializer( tf.global_variables(), name='init_all_variables')",
"# tf.graph_util.extract_sub_graph. graph_def = tf.graph_util.extract_sub_graph(graph.as_graph_def(), list(output_op_names)) reachable_vars = [node.name for",
"graph = tf.Graph() with graph.as_default(): tf.random.set_seed(random_seed) cluster = model_cfg.cluster.Instantiate() device",
"to any outputs but keep them in the graph #",
"continue # Sometimes feeds aren't connected to any outputs but",
"for the accelerator used for serving. freeze_checkpoint: The checkpoint to",
"if constant_value.string_val: tf.logging.info('Found asset file_path: %s', constant_value.string_val[0]) asset_file_def = inference_graph_proto.asset_file_def.add()",
"inference_graph_proto.graph_def.CopyFrom(graph_def) if export_path: with tf.io.gfile.GFile(export_path, 'w') as f: f.write(text_format.MessageToString(inference_graph_proto)) return",
"E.g., a node may have this attr: # attr {",
"The checkpoint to restore. output_op_names: Names of output ops. Returns:",
"A string or a list of subgraph names. If not",
"============================================================================== \"\"\"Utility for exporting an InferenceGraph proto from model params.\"\"\"",
"_DisablePackedInput(task): if (_ParamExists(task, 'encoder') and _ParamExists(task.encoder, 'packed_input')): task.encoder.packed_input = False",
"they present in the graph. Returns: Array of tf op",
"_CONST_GUARANTEE = old_val var_scope.set_caching_device(old_caching_device) # Marks variable as constants for",
"= 'decoder' cluster_params.add_summary = False cluster_params.do_eval = True Update(cluster_params.controller) Update(cluster_params.worker)",
"AddIdentityToTheta(task) inference_graph_proto = inference_graph_pb2.InferenceGraph() subgraphs_proto = task.Inference() if isinstance(subgraphs_proto, dict):",
"inference_graph_proto.subgraphs[name].CopyFrom(subgraph) # Yes, graph collections are bad, however this seems",
"node may have this attr: # attr { # key:",
"to the graph. # Tables can be declared anywhere in",
"1 if IsTpu(device_options) else 0 p.gpus_per_replica = 0 p.devices_per_split =",
"saver: The tf.Saver to use for restoration. checkpoint: The checkpoint",
"op_name in [saver_def.filename_tensor_name, saver_def.restore_op_name]: try: output_op_names.add(_GetOpName(op_name)) except KeyError: tf.logging.info('Op/tensor %s",
"# the graph to sanity check (versus relying on the",
"%s', freeze_checkpoint) graph_def = _FreezeGraphFromCheckpoint(graph, saver, freeze_checkpoint, output_op_names) elif freeze_defaults:",
"= graph.as_graph_def() tf.logging.info('Pruning graph to output ops: %r', output_op_names) graph_def",
"to output ops: %r', output_op_names) graph_def = tf.graph_util.extract_sub_graph(graph_def, output_op_names) if",
"this op has to be # added last. tf.tables_initializer(name='init_all_tables') finally:",
"freeze_defaults: tf.logging.info('Default initializing graph and freezing.') graph_def = _FreezeDefaults(graph, output_op_names)",
"python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved.",
"not both be' 'set.') if subgraph_filter and not isinstance(subgraph_filter, (tuple,",
"manipulation). # If this logic ever breaks, TensorFlow will raise",
"DT_RESOURCE. def AddIdentityToTheta(layer): layer._private_theta = layer._private_theta.Transform(tf.identity) # pylint: disable=protected-access layer.children.Transform(AddIdentityToTheta)",
"None} named_feeds = {k: v.name for k, v in feeds.items()",
"= False FLAGS.xla_device = 'tpu' try: mdl = model_cfg.Instantiate() task",
"of # cluster configuration. cls._SetClusterParams(model_cfg.cluster, device_options) # Configure the model.",
"return t.op.name else: op = graph.get_operation_by_name(tensor_or_op_name) return op.name for subgraph_name,",
"return inference_graph_proto def GetOutputOpNames(graph, inference_graph_proto, subgraphs=None, preserve_colocation_nodes=True, preserve_saver_restore_nodes=False, preserve_extra_ops=None): \"\"\"Gets",
"for activations and # weights in the model. Options supported",
"used with device ' + device_options.device) if freeze_checkpoint: tf.logging.info('Freezing graph",
"OF ANY KIND, either express or implied. # See the",
"to generate an inference graph for. Should be None for",
"the License. # ============================================================================== \"\"\"Utility for exporting an InferenceGraph proto",
"See the License for the specific language governing permissions and",
"get this assets registered from # TextFileInitializer. assets_collection = tf.compat.v1.get_collection(",
"support restoring, we have to not prune out the restore",
"to in writing, software # distributed under the License is",
"and device_options.gen_init_op: tf.group(tf.tpu.initialize_system(), name='tpu_init_op') if freeze_checkpoint or freeze_defaults: # Replace",
"as sess: sess.run(graph.get_operation_by_name('init_all_variables')) return tf.graph_util.convert_variables_to_constants(sess, graph.as_graph_def(), output_op_names) class InferenceGraphExporter: \"\"\"Class",
"or agreed to in writing, software # distributed under the",
"a Python bool, default to False. Preserves nodes for restoring",
"graph referencing types of DT_RESOURCE. def AddIdentityToTheta(layer): layer._private_theta = layer._private_theta.Transform(tf.identity)",
"# constants with AS_CONSTANTS. # gen_init_op: Whether to serialize initialization",
"subgraph_filter = [subgraph_filter] # Disable assertions unless user explicitly enables",
"'w') as f: f.write(text_format.MessageToString(inference_graph_proto)) return inference_graph_proto @classmethod def _SetClusterParams(cls, cluster_params,",
"To support restoring, we have to not prune out the",
"= True if disable_packed_input: def _DisablePackedInput(task): if (_ParamExists(task, 'encoder') and",
"task.Inference() if isinstance(subgraphs_proto, dict): subgraphs_proto = ConvertSubgraphDictToProto(subgraphs_proto) for name, subgraph",
"if disable_packed_input: def _DisablePackedInput(task): if (_ParamExists(task, 'encoder') and _ParamExists(task.encoder, 'packed_input')):",
"for tensor_or_op_name in (list(subgraph.feeds.values()) + list(subgraph.fetches.values())): output_op_names.add(_GetOpName(tensor_or_op_name)) if preserve_saver_restore_nodes: #",
"# } # } # # In this case, we",
"compliance with the License. # You may obtain a copy",
"All Rights Reserved. # # Licensed under the Apache License,",
"= old_val var_scope.set_caching_device(old_caching_device) # Marks variable as constants for compilation",
"output inference graph. inference_graph_proto = inference_graph_pb2.InferenceGraph() for subgraph_name, tensors in",
"device ' + device_options.device) if freeze_checkpoint: tf.logging.info('Freezing graph from checkpoint:",
"tools without having a checkpoint. export_path: If not None, write",
"a Params instance as returned by model_registry.GetParams(modelname, 'Test') or model_params.Model().",
"we need. # To support restoring, we have to not",
"ConvertSubgraphDictToProto(subgraphs_dict): \"\"\"Converts dict of subgraphs/feeds/fetches to InferenceGraph. Args: subgraphs_dict: Dict",
"# TextFileInitializer. assets_collection = tf.compat.v1.get_collection( tf.compat.v1.GraphKeys.ASSET_FILEPATHS) for asset in assets_collection:",
"seems to be the # easiest way to get this",
"the op name of the given node name.\"\"\" # Tensor",
"user explicitly enables it. if FLAGS['enable_asserts'].using_default_value: FLAGS.enable_asserts = False #",
"restore. output_op_names: Names of output ops. Returns: Resulting tf.GraphDef. \"\"\"",
"of downstream tools without having a checkpoint. export_path: If not",
"IsTpu(device_options) and device_options.gen_init_op: output_op_names.append('tpu_init_op') graph_def = graph.as_graph_def() tf.logging.info('Pruning graph to",
"and freezes it. Args: graph: tf.Graph. output_op_names: Names of output",
"graph. Ignoring.' % op_name) if not preserve_colocation_nodes and not preserve_extra_ops:",
"cluster params `p`.\"\"\" p.name = '/job:localhost' p.replicas = 1 p.tpus_per_replica",
"tf.GraphDef. \"\"\" sess = tf.Session(graph=graph, config=py_utils.SessionConfig()) saver.restore(sess, checkpoint) return tf.graph_util.convert_variables_to_constants(",
"not use this file except in compliance with the License.",
"if not device_options.retain_device_placement: # Clear the device so that the",
"raise a ValueError with # a description of the syntax",
"line in model_cfg.ToText().split('\\n'): tf.logging.debug('%s', line) # Instantiate the graph. graph",
"FLAGS.enable_asserts to False unless user explicitly sets it to True.",
"ops: %r', output_op_names) graph_def = tf.graph_util.extract_sub_graph(graph_def, output_op_names) if not device_options.retain_device_placement:",
"constants.\"\"\" global _CONST_GUARANTEE var_scope = tf.get_variable_scope() old_custom_getter = var_scope.custom_getter old_caching_device",
"def _FreezeDefaults(graph, output_op_names): \"\"\"Default initializes a graph and freezes it.",
"params. Args: cluster_params: Model().cluster config. device_options: InferenceDeviceOptions. \"\"\" def Update(p):",
"you may not use this file except in compliance with",
"fetches = tensors[0] feeds = tensors[1] # Rewrite fetches and",
"if isinstance(subgraphs_proto, dict): subgraphs_proto = ConvertSubgraphDictToProto(subgraphs_proto) for name, subgraph in",
"under the License. # ============================================================================== \"\"\"Utility for exporting an InferenceGraph",
"# } # # In this case, we need to",
"instantiation. FLAGS.enable_asserts = old_enable_asserts FLAGS.xla_device = old_xla_device tf.logging.info('Graph contains ops:",
"ops in the feeds/fetches (depends # on how it is",
"for node in graph.get_operations(): if preserve_extra_ops and node.name in preserve_extra_ops:",
"else mdl.ema.variables_to_restore(mdl.variables_for_ema)) if bfloat16_override: saver_var_spec = ( bfloat16_variables .get_saver_spec_for_variables_with_bf16_overrides( variables_to_restore))",
"all variables under this scope as constants.\"\"\" global _CONST_GUARANTEE var_scope",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"from these subgraphs are preserved. Otherwise, all subgraphs are included.",
"are None or tf.bfloat16. InferenceDeviceOptions = collections.namedtuple('InferenceDeviceOptions', [ 'device', 'retain_device_placement',",
"extra op names to preserve as long as they present",
"= [node.name for node in graph_def.node] for node in graph.get_operations():",
"# retain_device_placement: If true, the specified device in the generated",
"True Update(cluster_params.controller) Update(cluster_params.worker) Update(cluster_params.ps) Update(cluster_params.evaler) Update(cluster_params.decoder) Update(cluster_params.input) @classmethod def _DeviceSupportsFreezing(cls,",
"in which case this should be # turned off to",
"should be preserved in the graph. \"\"\" output_op_names = set()",
"marking them as # constants. device = '' tpu_const_scope =",
"TODO(zhifengc): It's possible that it's better to fix in #",
"defs ' 'since operating in eager mode.') # Freezing. if",
"0 p.devices_per_split = 1 cluster_params.mode = 'sync' cluster_params.job = 'decoder'",
"instance. named_fetches = {k: v.name for k, v in fetches.items()",
"ops in the returned array. preserve_saver_restore_nodes: a Python bool, default",
"for node_def in function.node_def: node_def.ClearField('device') inference_graph_proto.graph_def.CopyFrom(graph_def) if export_path: with tf.io.gfile.GFile(export_path,",
"%s.', subgraph_name) continue # Sometimes feeds aren't connected to any",
"The checkpoint to load. Loads and freezes the model if",
"layer._private_theta = layer._private_theta.Transform(tf.identity) # pylint: disable=protected-access layer.children.Transform(AddIdentityToTheta) AddIdentityToTheta(task) inference_graph_proto =",
"= {} for v in variables: vars_dict[_GetVarName(v)] = v return",
"tf.io.gfile.GFile(export_path, 'w') as f: f.write(text_format.MessageToString(inference_graph_proto)) return inference_graph_proto @classmethod def _SetClusterParams(cls,",
"_DummyScope(): yield None def _GetVarName(v): return v.name[:-len(':0')] def _MakeVariableDictionary(variables): \"\"\"Returns",
"freeze_checkpoint, output_op_names) elif freeze_defaults: tf.logging.info('Default initializing graph and freezing.') graph_def",
"purposes. Returns: InferenceGraph proto. Raises: ValueError: if the model does",
"subgraphs/feeds/fetches to InferenceGraph. Args: subgraphs_dict: Dict of (fetches, feeds) where",
"colocating with the closure of output ops in the returned",
"whether param_name is contained in param_obj.\"\"\" if not param_obj: return",
"and device_options.gen_init_op: output_op_names.append('tpu_init_op') graph_def = graph.as_graph_def() tf.logging.info('Pruning graph to output",
"be reached from the pruned graph. continue output_op_names.add(node.name) return sorted(list(output_op_names))",
"pruned graph. continue output_op_names.add(node.name) return sorted(list(output_op_names)) def _ParamExists(param_obj, param_name): \"\"\"Tests",
"how it is used). We differentiate here. We still do",
"= ConstGuaranteeScope() with cluster, tf.device(device), tpu_const_scope: bfloat16_override = ShouldForceBfloat16ForWeightsAndActivations( device_options)",
"and freezes the model if given. freeze_defaults: Default initializes the",
"The tf graph. inference_graph_proto: an InferenceGraph proto. subgraphs: an optional",
"for variables if we are marking them as # constants.",
"subgraphs_dict: Dict of (fetches, feeds) where each fetches/feeds is a",
"ops. Returns: Resulting tf.GraphDef. \"\"\" with tf.Session(graph=graph, config=py_utils.SessionConfig()) as sess:",
"= _CONST_GUARANTEE var_scope.set_custom_getter(MaybeGuaranteeConstGetter) var_scope.set_caching_device(lambda op: op.device) _CONST_GUARANTEE = True yield",
"param_obj.\"\"\" if not param_obj: return for k, _ in param_obj.IterParams():",
"-> tf.Variable() mapping.\"\"\" vars_dict = {} for v in variables:",
"because it's only used for saving. saver_def = inference_graph_proto.saver_def for",
"subgraph names. If provided, only output ops from these subgraphs",
"Update(p): \"\"\"Update cluster params `p`.\"\"\" p.name = '/job:localhost' p.replicas =",
"asset.op.get_attr('value') if constant_value.string_val: tf.logging.info('Found asset file_path: %s', constant_value.string_val[0]) asset_file_def =",
"name -> tf.Variable() mapping.\"\"\" vars_dict = {} for v in",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"if cls._DeviceSupportsFreezing(device_options): raise ValueError('freeze_checkpoint cannot be used with device '",
"dict of subgraphs/feeds/fetches to InferenceGraph. Args: subgraphs_dict: Dict of (fetches,",
"generate an inference graph for. Should be None for single-task",
"collection defs ' 'since operating in eager mode.') # Freezing.",
"an inference graph. Args: graph: The tf graph. inference_graph_proto: an",
"restoring, we have to not prune out the restore node.",
"a dictionary with name -> tf.Variable() mapping.\"\"\" vars_dict = {}",
"asset.name asset_file_def.filename = constant_value.string_val[0] # Add a table init op",
"initializing graph and freezing.') graph_def = _FreezeDefaults(graph, output_op_names) else: inference_graph_proto.saver_def.CopyFrom(saver.as_saver_def())",
"cls._SetClusterParams(model_cfg.cluster, device_options) # Configure the model. model_cfg.random_seed = random_seed model_cfg.is_inference",
"Returns: Resulting tf.GraphDef. \"\"\" with tf.Session(graph=graph, config=py_utils.SessionConfig()) as sess: sess.run(graph.get_operation_by_name('init_all_variables'))",
"k == param_name: return True return False def _FreezeGraphFromCheckpoint(graph, saver,",
"lingvo.core import py_utils import six from google.protobuf import text_format FLAGS",
"device_options.gen_init_op: tf.group(tf.tpu.initialize_system(), name='tpu_init_op') if freeze_checkpoint or freeze_defaults: # Replace variables",
"in the exported inference graph. disable_packed_input: Disable packed input for",
"to avoid errors. for tensor_or_op_name in (list(subgraph.feeds.values()) + list(subgraph.fetches.values())): output_op_names.add(_GetOpName(tensor_or_op_name))",
"output ops from these subgraphs are preserved. Otherwise, all subgraphs",
"_, task_param in model_cfg.task_params.IterParams(): _DisablePackedInput(task_param) else: _DisablePackedInput(model_cfg.task) tf.logging.debug('Model %s params:',",
"ValueError('freeze_checkpoint cannot be used with device ' + device_options.device) if",
"op.device) _CONST_GUARANTEE = True yield _CONST_GUARANTEE = old_val var_scope.set_custom_getter(old_custom_getter) var_scope.set_caching_device(old_caching_device)",
"an optional list of extra op names to preserve as",
"random_seed=None, disable_packed_input=True): \"\"\"Exports a InferenceGraph proto with piecewise subgraphs. Sets",
"the text manipulation). # If this logic ever breaks, TensorFlow",
"(_ParamExists(task, 'decoder') and _ParamExists(task.decoder, 'packed_input')): task.decoder.packed_input = False if issubclass(model_cfg.cls,",
"of output ops. Returns: Resulting tf.GraphDef. \"\"\" with tf.Session(graph=graph, config=py_utils.SessionConfig())",
"file except in compliance with the License. # You may",
"saver_var_spec = variables_to_restore saver = tf.train.Saver(saver_var_spec) tf.variables_initializer( tf.global_variables(), name='init_all_variables') if",
"defs if not tf.executing_eagerly(): meta_graph = tf.train.export_meta_graph(graph=graph) for key in",
"variable init op to the graph. # Tables can be",
"+ device_options.device) if freeze_checkpoint: tf.logging.info('Freezing graph from checkpoint: %s', freeze_checkpoint)",
"restoring according to inference_graph_proto.saver_def. preserve_extra_ops: an optional list of extra",
"Whether to serialize initialization ops for the device. For TPUs,",
"an InferenceGraph proto. subgraphs: an optional list of subgraph names.",
"restore node. output_op_names.append('init_all_tables') output_op_names.append('init_all_variables') output_op_names.append('save/control_dependency') output_op_names.append('save/restore_all') if IsTpu(device_options) and device_options.gen_init_op:",
"Otherwise, the specified device # will be cleared, so that",
"tf.logging.info('Clearing device placement for: %s', device_options.device) for node in graph_def.node:",
"Whether to override the dtype to use for activations and",
"feeds to map to their tensor name instead of #",
"return for k, _ in param_obj.IterParams(): if k == param_name:",
"GetOutputOpNames(graph, inference_graph_proto, subgraphs=None, preserve_colocation_nodes=True, preserve_saver_restore_nodes=False, preserve_extra_ops=None): \"\"\"Gets output op names",
"cluster_params: Model().cluster config. device_options: InferenceDeviceOptions. \"\"\" def Update(p): \"\"\"Update cluster",
"lingvo.compat as tf from lingvo.core import base_model from lingvo.core import",
"this path. subgraph_filter: A string or a list of subgraph",
"types of DT_RESOURCE. def AddIdentityToTheta(layer): layer._private_theta = layer._private_theta.Transform(tf.identity) # pylint:",
"def _ParamExists(param_obj, param_name): \"\"\"Tests whether param_name is contained in param_obj.\"\"\"",
"graph.get_operations()]) # Collection defs if not tf.executing_eagerly(): meta_graph = tf.train.export_meta_graph(graph=graph)",
"tf.train.Saver(saver_var_spec) tf.variables_initializer( tf.global_variables(), name='init_all_variables') if IsTpu(device_options) and device_options.gen_init_op: tf.group(tf.tpu.initialize_system(), name='tpu_init_op')",
"lookup in # the graph to sanity check (versus relying",
"the device. For TPUs, # servers can be initialized globally",
"old_caching_device = var_scope.caching_device old_val = _CONST_GUARANTEE var_scope.set_custom_getter(MaybeGuaranteeConstGetter) var_scope.set_caching_device(lambda op: op.device)",
"loc in node.node_def.attr['_class'].list.s: loc = six.ensure_text(loc, 'utf-8') if loc.startswith('loc:@'): loc_name",
"not prune out the restore node. output_op_names.append('init_all_tables') output_op_names.append('init_all_variables') output_op_names.append('save/control_dependency') output_op_names.append('save/restore_all')",
"InferenceGraphExporter: \"\"\"Class for exporting inference graphs.\"\"\" @classmethod def Export(cls, model_cfg,",
"subgraph in subgraphs_proto.subgraphs.items(): if not subgraph_filter or name in subgraph_filter:",
"a Python bool, default to True. Preserves nodes colocating with",
"_CONST_GUARANTEE var_scope.set_custom_getter(MaybeGuaranteeConstGetter) var_scope.set_caching_device(lambda op: op.device) _CONST_GUARANTEE = True yield _CONST_GUARANTEE",
"op names from an inference graph. Args: graph: The tf",
"# servers can be initialized globally once, in which case",
"KIND, either express or implied. # See the License for",
"model. old_enable_asserts = FLAGS.enable_asserts old_xla_device = FLAGS.xla_device if IsTpu(device_options): FLAGS.enable_asserts",
"device # will be cleared, so that the runtime can",
"# In this case, we need to make sure the",
"def _FreezeGraphFromCheckpoint(graph, saver, checkpoint, output_op_names): \"\"\"Freezes a graph from a",
"has to be # added last. tf.tables_initializer(name='init_all_tables') finally: # Reset",
"subgraph_filter=None, random_seed=None, disable_packed_input=True): \"\"\"Exports a InferenceGraph proto with piecewise subgraphs.",
"to False. Preserves nodes for restoring according to inference_graph_proto.saver_def. preserve_extra_ops:",
"= random_seed model_cfg.is_inference = True if disable_packed_input: def _DisablePackedInput(task): if",
"inference_graph_proto) # Prune the graph to just the parts we",
"constant_value.string_val[0] # Add a table init op and global variable",
"preserve any nodes that are used for colocation. # E.g.,",
"# Freezing. if freeze_defaults or freeze_checkpoint: output_op_names = GetOutputOpNames( graph,",
"subgraph_filter and not isinstance(subgraph_filter, (tuple, list)): subgraph_filter = [subgraph_filter] #",
"(the \"License\"); # you may not use this file except",
"IsTpu(device_options) else 0 p.gpus_per_replica = 0 p.devices_per_split = 1 cluster_params.mode",
"as constants for compilation def MaybeGuaranteeConstGetter(getter, name, *args, **kwargs): global",
"tf.graph_util.convert_variables_to_constants( sess, graph.as_graph_def(), output_op_names) def _FreezeDefaults(graph, output_op_names): \"\"\"Default initializes a",
"= [subgraph_filter] # Disable assertions unless user explicitly enables it.",
"# Marks variable as constants for compilation def MaybeGuaranteeConstGetter(getter, name,",
"the graph referencing types of DT_RESOURCE. def AddIdentityToTheta(layer): layer._private_theta =",
"in the graph # anyways to avoid errors. for tensor_or_op_name",
"and '_class' in node.node_def.attr: for loc in node.node_def.attr['_class'].list.s: loc =",
"= six.ensure_text(loc, 'utf-8') if loc.startswith('loc:@'): loc_name = loc[5:] if loc_name",
"disable_packed_input: Disable packed input for inference writing purposes. Returns: InferenceGraph",
"returned by model_registry.GetParams(modelname, 'Test') or model_params.Model(). model_task_name: The task to",
"tf.logging.info('copying collection %s', key) inference_graph_proto.collection_def[key].CopyFrom( meta_graph.collection_def[key]) else: tf.logging.warning('Not exporting collection",
"list of extra op names to preserve as long as",
"ever breaks, TensorFlow will raise a ValueError with # a",
"# # Unless required by applicable law or agreed to",
"assets registered from # TextFileInitializer. assets_collection = tf.compat.v1.get_collection( tf.compat.v1.GraphKeys.ASSET_FILEPATHS) for",
"'_class' in node.node_def.attr: for loc in node.node_def.attr['_class'].list.s: loc = six.ensure_text(loc,",
"a list of subgraph names. If not None or empty,",
"model. model_cfg.random_seed = random_seed model_cfg.is_inference = True if disable_packed_input: def",
"return device_options.fprop_dtype_override == tf.bfloat16 def ConvertSubgraphDictToProto(subgraphs_dict): \"\"\"Converts dict of subgraphs/feeds/fetches",
"only this list of inference subgraphs. random_seed: Fixes the random",
"= graph.get_tensor_by_name(tensor_or_op_name) return t.op.name else: op = graph.get_operation_by_name(tensor_or_op_name) return op.name",
"t.op.name else: op = graph.get_operation_by_name(tensor_or_op_name) return op.name for subgraph_name, subgraph",
"be cleared, so that the runtime can choose automatically. #",
"graph_def = tf.graph_util.extract_sub_graph(graph.as_graph_def(), list(output_op_names)) reachable_vars = [node.name for node in",
"preserve as long as they present in the graph. Returns:",
"'Test') or model_params.Model(). model_task_name: The task to generate an inference",
"graph to output ops: %r', output_op_names) graph_def = tf.graph_util.extract_sub_graph(graph_def, output_op_names)",
"Copyright 2018 The TensorFlow Authors. All Rights Reserved. # #",
"global _CONST_GUARANTEE if _CONST_GUARANTEE: with tf.control_dependencies(None): return tf.guarantee_const( getter(name, *args,",
"implied. # See the License for the specific language governing",
"name, subgraph in subgraphs_proto.subgraphs.items(): if not subgraph_filter or name in",
"\"\"\"Utility for exporting an InferenceGraph proto from model params.\"\"\" import",
"NoConstGuaranteeScope(): \"\"\"Disallow const gauranteeing variable with-in scope.\"\"\" global _CONST_GUARANTEE var_scope",
"and node.name in preserve_extra_ops: output_op_names.add(node.name) elif preserve_colocation_nodes and '_class' in",
"Disable assertions unless user explicitly enables it. if FLAGS['enable_asserts'].using_default_value: FLAGS.enable_asserts",
"return sorted(list(output_op_names)) def _ParamExists(param_obj, param_name): \"\"\"Tests whether param_name is contained",
"list { # s: \"loc:@inference/embedding_lookup/Read/ReadVariableOp\" # } # } #",
"specify here in terms of # cluster configuration. cls._SetClusterParams(model_cfg.cluster, device_options)",
"= var_scope.caching_device old_val = _CONST_GUARANTEE var_scope.set_caching_device(None) _CONST_GUARANTEE = False yield",
"# s: \"loc:@inference/embedding_lookup/Read/ReadVariableOp\" # } # } # } #",
"in graph_def.node] for node in graph.get_operations(): if preserve_extra_ops and node.name",
"device in the generated # inference graph nodes will be",
"const gauranteeing variable with-in scope.\"\"\" global _CONST_GUARANTEE var_scope = tf.get_variable_scope()",
"def ConvertSubgraphDictToProto(subgraphs_dict): \"\"\"Converts dict of subgraphs/feeds/fetches to InferenceGraph. Args: subgraphs_dict:",
"'gen_init_op', 'dtype_override', 'fprop_dtype_override' ]) _CONST_GUARANTEE = None @contextlib.contextmanager def NoConstGuaranteeScope():",
"we need to make sure the node # inference/embedding_lookup/Read/ReadVariableOp is",
"testing of downstream tools without having a checkpoint. export_path: If",
"cleared, so that the runtime can choose automatically. # var_options:",
"if we are marking them as # constants. device =",
"= loc[5:] if loc_name not in reachable_vars: # Skip nodes",
"None for single-task models. device_options: Device options for the accelerator",
"gen_init_op=True, dtype_override=None, fprop_dtype_override=None), freeze_checkpoint=None, freeze_defaults=False, export_path=None, subgraph_filter=None, random_seed=None, disable_packed_input=True): \"\"\"Exports",
"easiest way to get this assets registered from # TextFileInitializer.",
"for v in variables: vars_dict[_GetVarName(v)] = v return vars_dict def",
"to override the dtype to use for activations and #",
"with # a description of the syntax of each. if",
"init op to the graph. # Tables can be declared",
"Unless required by applicable law or agreed to in writing,",
"{} for v in variables: vars_dict[_GetVarName(v)] = v return vars_dict",
"layer.children.Transform(AddIdentityToTheta) AddIdentityToTheta(task) inference_graph_proto = inference_graph_pb2.InferenceGraph() subgraphs_proto = task.Inference() if isinstance(subgraphs_proto,",
"isinstance(subgraph_filter, (tuple, list)): subgraph_filter = [subgraph_filter] # Disable assertions unless",
"'utf-8') if loc.startswith('loc:@'): loc_name = loc[5:] if loc_name not in",
"the specific language governing permissions and # limitations under the",
"IsTpu(device_options): FLAGS.enable_asserts = False FLAGS.xla_device = 'tpu' try: mdl =",
"AddIdentityToTheta(layer): layer._private_theta = layer._private_theta.Transform(tf.identity) # pylint: disable=protected-access layer.children.Transform(AddIdentityToTheta) AddIdentityToTheta(task) inference_graph_proto",
"placement for: %s', device_options.device) for node in graph_def.node: node.ClearField('device') for",
"to just the parts we need. # To support restoring,",
"bfloat16_override = ShouldForceBfloat16ForWeightsAndActivations( device_options) if bfloat16_override: py_utils.UpdateDtype(model_cfg, tf.bfloat16) py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16)",
"with tf.io.gfile.GFile(export_path, 'w') as f: f.write(text_format.MessageToString(inference_graph_proto)) return inference_graph_proto @classmethod def",
"# weights in the model. Options supported are None or",
"here. We still do the lookup in # the graph",
"the given node name.\"\"\" # Tensor names have format <op_name>:<output_index>.",
"op in graph.get_operations()]) # Collection defs if not tf.executing_eagerly(): meta_graph",
"be used with device ' + device_options.device) if freeze_checkpoint: tf.logging.info('Freezing",
"import re import lingvo.compat as tf from lingvo.core import base_model",
"InferenceGraph proto. Raises: ValueError: if the model does not support",
"to use for activations and # weights in the model.",
"by model_registry.GetParams(modelname, 'Test') or model_params.Model(). model_task_name: The task to generate",
"class InferenceGraphExporter: \"\"\"Class for exporting inference graphs.\"\"\" @classmethod def Export(cls,",
"keep them in the graph # anyways to avoid errors.",
"so this op has to be # added last. tf.tables_initializer(name='init_all_tables')",
"accelerator used for serving. freeze_checkpoint: The checkpoint to load. Loads",
"that it's better to fix in # tf.graph_util.extract_sub_graph. graph_def =",
"k, _ in param_obj.IterParams(): if k == param_name: return True",
"# Replace variables with tensors using tf.identity in theta before",
"= GetOutputOpNames( graph, inference_graph_proto, preserve_colocation_nodes=False, preserve_saver_restore_nodes=False) if cls._DeviceSupportsFreezing(device_options): raise ValueError('freeze_checkpoint",
"# Rewrite fetches and feeds to map to their tensor",
"var_scope.set_caching_device(None) _CONST_GUARANTEE = False yield _CONST_GUARANTEE = old_val var_scope.set_caching_device(old_caching_device) #",
"tf.graph_util.extract_sub_graph(graph_def, output_op_names) if not device_options.retain_device_placement: # Clear the device so",
"the graph, so this op has to be # added",
"variables can be # either placed on device through 'ON_DEVICE'",
"default to False. Preserves nodes for restoring according to inference_graph_proto.saver_def.",
"tpu_const_scope: bfloat16_override = ShouldForceBfloat16ForWeightsAndActivations( device_options) if bfloat16_override: py_utils.UpdateDtype(model_cfg, tf.bfloat16) py_utils.UpdateFpropDtype(model_cfg,",
"Sets FLAGS.enable_asserts to False unless user explicitly sets it to",
"Disable packed input for inference writing purposes. Returns: InferenceGraph proto.",
"op: op.device) _CONST_GUARANTEE = True yield _CONST_GUARANTEE = old_val var_scope.set_custom_getter(old_custom_getter)",
"for serving. freeze_checkpoint: The checkpoint to load. Loads and freezes",
"inference writing purposes. Returns: InferenceGraph proto. Raises: ValueError: if the",
"p.gpus_per_replica = 0 p.devices_per_split = 1 cluster_params.mode = 'sync' cluster_params.job",
"be retained. Otherwise, the specified device # will be cleared,",
"relying on the text manipulation). # If this logic ever",
"be initialized globally once, in which case this should be",
"explicitly sets it to True. Note: Enable FLAGS.pin_vars_to_cpu (default false)",
"in node.node_def.attr: for loc in node.node_def.attr['_class'].list.s: loc = six.ensure_text(loc, 'utf-8')",
"to be the # easiest way to get this assets",
"be None for single-task models. device_options: Device options for the",
"however this seems to be the # easiest way to",
"for op in graph.get_operations()]) # Collection defs if not tf.executing_eagerly():",
"model. Options supported are None or tf.bfloat16. InferenceDeviceOptions = collections.namedtuple('InferenceDeviceOptions',",
"It's possible that it's better to fix in # tf.graph_util.extract_sub_graph.",
"FLAGS = tf.flags.FLAGS # InferenceDeviceOptions contains options to configure inference",
"and _ParamExists(task.encoder, 'packed_input')): task.encoder.packed_input = False if (_ParamExists(task, 'decoder') and",
"p.replicas = 1 p.tpus_per_replica = 1 if IsTpu(device_options) else 0",
"have format <op_name>:<output_index>. Some inference # graphs put tensors and",
"False FLAGS.xla_device = 'tpu' try: mdl = model_cfg.Instantiate() task =",
"vars_dict[_GetVarName(v)] = v return vars_dict def IsTpu(device_options): return device_options.device ==",
"device_options: InferenceDeviceOptions. \"\"\" def Update(p): \"\"\"Update cluster params `p`.\"\"\" p.name",
"= _DummyScope() if (IsTpu(device_options) and device_options.var_options == 'AS_CONSTANTS'): # Do",
"graph.as_graph_def() tf.logging.info('Pruning graph to output ops: %r', output_op_names) graph_def =",
"== tf.bfloat16 def ShouldForceBfloat16ForActivations(device_options): return device_options.fprop_dtype_override == tf.bfloat16 def ConvertSubgraphDictToProto(subgraphs_dict):",
"tf from lingvo.core import base_model from lingvo.core import bfloat16_variables from",
"for restoration. checkpoint: The checkpoint to restore. output_op_names: Names of",
"list(output_op_names)) reachable_vars = [node.name for node in graph_def.node] for node",
"inference_graph_proto.saver_def. preserve_extra_ops: an optional list of extra op names to",
"import contextlib import re import lingvo.compat as tf from lingvo.core",
"in subgraph_filter: inference_graph_proto.subgraphs[name].CopyFrom(subgraph) # Yes, graph collections are bad, however",
"# Export as subgraph. inference_graph_proto.subgraphs[subgraph_name].fetches.update(named_fetches) inference_graph_proto.subgraphs[subgraph_name].feeds.update(named_feeds) return inference_graph_proto def GetOutputOpNames(graph,",
"# inference graph nodes will be retained. Otherwise, the specified",
"supported are None or tf.bfloat16. InferenceDeviceOptions = collections.namedtuple('InferenceDeviceOptions', [ 'device',",
"provided, only output ops from these subgraphs are preserved. Otherwise,",
"_DummyScope() if (IsTpu(device_options) and device_options.var_options == 'AS_CONSTANTS'): # Do not",
"Note: Enable FLAGS.pin_vars_to_cpu (default false) to make weight-sharing and multi-core",
"# key: \"_class\" # value { # list { #",
"Lint as: python3 # Copyright 2018 The TensorFlow Authors. All",
"to map to their tensor name instead of # Tensor",
"= var_scope.caching_device old_val = _CONST_GUARANTEE var_scope.set_custom_getter(MaybeGuaranteeConstGetter) var_scope.set_caching_device(lambda op: op.device) _CONST_GUARANTEE",
"= tensors[0] feeds = tensors[1] # Rewrite fetches and feeds",
"and feeds to map to their tensor name instead of",
"_MakeVariableDictionary(tf.global_variables()) if not mdl.ema else mdl.ema.variables_to_restore(mdl.variables_for_ema)) if bfloat16_override: saver_var_spec =",
"be # added last. tf.tables_initializer(name='init_all_tables') finally: # Reset TPU-related flags",
"name, *args, **kwargs): global _CONST_GUARANTEE if _CONST_GUARANTEE: with tf.control_dependencies(None): return",
"asset_file_def = inference_graph_proto.asset_file_def.add() asset_file_def.tensor_info.name = asset.name asset_file_def.filename = constant_value.string_val[0] #",
"declared anywhere in the graph, so this op has to",
"subgraphs=None, preserve_colocation_nodes=True, preserve_saver_restore_nodes=False, preserve_extra_ops=None): \"\"\"Gets output op names from an",
"graph.as_graph_def(), output_op_names) class InferenceGraphExporter: \"\"\"Class for exporting inference graphs.\"\"\" @classmethod",
"preserved in the graph. \"\"\" output_op_names = set() def _GetOpName(tensor_or_op_name):",
"You may obtain a copy of the License at #",
"k, v in fetches.items() if v is not None} named_feeds",
"py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16) # Hard-code TPU-related flags prior to instantiating model.",
"and # weights in the model. Options supported are None",
"exporting collection defs ' 'since operating in eager mode.') #",
"Otherwise, all subgraphs are included. preserve_colocation_nodes: a Python bool, default",
"on handling variables. For TPUs, variables can be # either",
"\"\"\"Gets output op names from an inference graph. Args: graph:",
"subgraph names. If not None or empty, export only this",
"to True. Note: Enable FLAGS.pin_vars_to_cpu (default false) to make weight-sharing",
"= 'tpu' try: mdl = model_cfg.Instantiate() task = mdl.GetTask(model_task_name) variables_to_restore",
"graph. # Tables can be declared anywhere in the graph,",
"is contained in param_obj.\"\"\" if not param_obj: return for k,",
"inference_graph_pb2.InferenceGraph() subgraphs_proto = task.Inference() if isinstance(subgraphs_proto, dict): subgraphs_proto = ConvertSubgraphDictToProto(subgraphs_proto)",
"Array of tf op names that should be preserved in",
"how much we need to specify here in terms of",
"1 cluster_params.mode = 'sync' cluster_params.job = 'decoder' cluster_params.add_summary = False",
"make sure the node # inference/embedding_lookup/Read/ReadVariableOp is not pruned. #",
"tf.graph_util.extract_sub_graph. graph_def = tf.graph_util.extract_sub_graph(graph.as_graph_def(), list(output_op_names)) reachable_vars = [node.name for node",
"# Only nodes for restoring is preserved. saver_def.save_tensor_name is #",
"for name, subgraph in subgraphs_proto.subgraphs.items(): if not subgraph_filter or name",
"var_scope.set_caching_device(old_caching_device) @contextlib.contextmanager def _DummyScope(): yield None def _GetVarName(v): return v.name[:-len(':0')]",
"node in graph_def.node] for node in graph.get_operations(): if preserve_extra_ops and",
"ShouldForceBfloat16ForWeightsAndActivations(device_options): return device_options.dtype_override == tf.bfloat16 def ShouldForceBfloat16ForActivations(device_options): return device_options.fprop_dtype_override ==",
"tf.guarantee_const( getter(name, *args, **kwargs), name=name + '/GuaranteeConst') else: return getter(name,",
"{k: v.name for k, v in feeds.items() if v is",
"MaybeGuaranteeConstGetter(getter, name, *args, **kwargs): global _CONST_GUARANTEE if _CONST_GUARANTEE: with tf.control_dependencies(None):",
"if freeze_checkpoint: tf.logging.info('Freezing graph from checkpoint: %s', freeze_checkpoint) graph_def =",
"graph and freeze. Useful for early testing of downstream tools",
"inference graph in ASCII to this path. subgraph_filter: A string",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"inference_graph_pb2 from lingvo.core import py_utils import six from google.protobuf import",
"@contextlib.contextmanager def ConstGuaranteeScope(): \"\"\"Treats all variables under this scope as",
"init op and global variable init op to the graph.",
"# value { # list { # s: \"loc:@inference/embedding_lookup/Read/ReadVariableOp\" #",
"a ValueError with # a description of the syntax of",
"== tf.bfloat16 def ConvertSubgraphDictToProto(subgraphs_dict): \"\"\"Converts dict of subgraphs/feeds/fetches to InferenceGraph.",
"tf.graph_util.extract_sub_graph(graph.as_graph_def(), list(output_op_names)) reachable_vars = [node.name for node in graph_def.node] for",
"= None @contextlib.contextmanager def NoConstGuaranteeScope(): \"\"\"Disallow const gauranteeing variable with-in",
"False yield _CONST_GUARANTEE = old_val var_scope.set_caching_device(old_caching_device) # Marks variable as",
"multi-core inference on TPUs work properly. Args: model_cfg: a Params",
"if (_ParamExists(task, 'encoder') and _ParamExists(task.encoder, 'packed_input')): task.encoder.packed_input = False if",
"put ops in the feeds/fetches (depends # on how it",
"the pruned graph. continue output_op_names.add(node.name) return sorted(list(output_op_names)) def _ParamExists(param_obj, param_name):",
"for the device. For TPUs, # servers can be initialized",
"a table init op and global variable init op to",
"_CONST_GUARANTEE = True yield _CONST_GUARANTEE = old_val var_scope.set_custom_getter(old_custom_getter) var_scope.set_caching_device(old_caching_device) @contextlib.contextmanager",
"return device_options.dtype_override == tf.bfloat16 def ShouldForceBfloat16ForActivations(device_options): return device_options.fprop_dtype_override == tf.bfloat16",
"# var_options: Options on handling variables. For TPUs, variables can",
"can not both be' 'set.') if subgraph_filter and not isinstance(subgraph_filter,",
"+ '/GuaranteeConst') else: return getter(name, *args, **kwargs) @contextlib.contextmanager def ConstGuaranteeScope():",
"v.name for k, v in feeds.items() if v is not",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"License. # You may obtain a copy of the License",
"without having a checkpoint. export_path: If not None, write the",
"*args, **kwargs), name=name + '/GuaranteeConst') else: return getter(name, *args, **kwargs)",
"the listed subgraphs. \"\"\" assert issubclass(model_cfg.cls, base_model.BaseModel) if device_options.dtype_override and",
"GetOutputOpNames( graph, inference_graph_proto, preserve_colocation_nodes=False, preserve_saver_restore_nodes=False) if cls._DeviceSupportsFreezing(device_options): raise ValueError('freeze_checkpoint cannot",
"preserve_saver_restore_nodes: a Python bool, default to False. Preserves nodes for",
"# ============================================================================== \"\"\"Utility for exporting an InferenceGraph proto from model",
"need to preserve any nodes that are used for colocation.",
"'set.') if subgraph_filter and not isinstance(subgraph_filter, (tuple, list)): subgraph_filter =",
"\"\"\" def Update(p): \"\"\"Update cluster params `p`.\"\"\" p.name = '/job:localhost'",
"= ShouldForceBfloat16ForWeightsAndActivations( device_options) if bfloat16_override: py_utils.UpdateDtype(model_cfg, tf.bfloat16) py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16) act_bfloat16_override",
"vars_dict = {} for v in variables: vars_dict[_GetVarName(v)] = v",
"case this should be # turned off to avoid tripping",
"better to fix in # tf.graph_util.extract_sub_graph. graph_def = tf.graph_util.extract_sub_graph(graph.as_graph_def(), list(output_op_names))",
"each fetches/feeds is a NestedMap. Returns: Equivalent InferenceGraph. \"\"\" #",
"before # freezing to avoid the graph referencing types of",
"**kwargs) @contextlib.contextmanager def ConstGuaranteeScope(): \"\"\"Treats all variables under this scope",
"not None} named_feeds = {k: v.name for k, v in",
"make weight-sharing and multi-core inference on TPUs work properly. Args:",
"initialized globally once, in which case this should be #",
"governing permissions and # limitations under the License. # ==============================================================================",
"Update(cluster_params.controller) Update(cluster_params.worker) Update(cluster_params.ps) Update(cluster_params.evaler) Update(cluster_params.decoder) Update(cluster_params.input) @classmethod def _DeviceSupportsFreezing(cls, device_options):",
"return getter(name, *args, **kwargs) @contextlib.contextmanager def ConstGuaranteeScope(): \"\"\"Treats all variables",
"TensorFlow Authors. All Rights Reserved. # # Licensed under the",
"Preserves nodes colocating with the closure of output ops in",
"ops for the device. For TPUs, # servers can be",
"config=py_utils.SessionConfig()) saver.restore(sess, checkpoint) return tf.graph_util.convert_variables_to_constants( sess, graph.as_graph_def(), output_op_names) def _FreezeDefaults(graph,",
"once, in which case this should be # turned off",
"be' 'set.') if subgraph_filter and not isinstance(subgraph_filter, (tuple, list)): subgraph_filter",
"constant_value = asset.op.get_attr('value') if constant_value.string_val: tf.logging.info('Found asset file_path: %s', constant_value.string_val[0])",
"of subgraph names. If not None or empty, export only",
"_ParamExists(task.encoder, 'packed_input')): task.encoder.packed_input = False if (_ParamExists(task, 'decoder') and _ParamExists(task.decoder,",
"subgraphs are included. preserve_colocation_nodes: a Python bool, default to True.",
"cluster configuration. cls._SetClusterParams(model_cfg.cluster, device_options) # Configure the model. model_cfg.random_seed =",
"output ops. Returns: Resulting tf.GraphDef. \"\"\" with tf.Session(graph=graph, config=py_utils.SessionConfig()) as",
"text_format FLAGS = tf.flags.FLAGS # InferenceDeviceOptions contains options to configure",
"and device_options.var_options == 'AS_CONSTANTS'): # Do not specify devices for",
"subgraphs_proto = task.Inference() if isinstance(subgraphs_proto, dict): subgraphs_proto = ConvertSubgraphDictToProto(subgraphs_proto) for",
"\"\"\"Tests whether param_name is contained in param_obj.\"\"\" if not param_obj:",
"Ignoring.' % op_name) if not preserve_colocation_nodes and not preserve_extra_ops: return",
"anyways to avoid errors. for tensor_or_op_name in (list(subgraph.feeds.values()) + list(subgraph.fetches.values())):",
"to preserve as long as they present in the graph.",
"tf.random.set_seed(random_seed) cluster = model_cfg.cluster.Instantiate() device = cluster.GetPlacer() tpu_const_scope = _DummyScope()",
"for subgraph_name, tensors in subgraphs_dict.items(): fetches = tensors[0] feeds =",
"= False cluster_params.do_eval = True Update(cluster_params.controller) Update(cluster_params.worker) Update(cluster_params.ps) Update(cluster_params.evaler) Update(cluster_params.decoder)",
"op names to preserve as long as they present in",
"and # limitations under the License. # ============================================================================== \"\"\"Utility for",
"with graph.as_default(): tf.random.set_seed(random_seed) cluster = model_cfg.cluster.Instantiate() device = cluster.GetPlacer() tpu_const_scope",
"= set() def _GetOpName(tensor_or_op_name): \"\"\"Returns the op name of the",
"is not pruned. # # TODO(zhifengc): It's possible that it's",
"mdl.GetTask(model_task_name) variables_to_restore = ( _MakeVariableDictionary(tf.global_variables()) if not mdl.ema else mdl.ema.variables_to_restore(mdl.variables_for_ema))",
"# dtype_override: Whether to override the dtype to use for",
"nodes for restoring is preserved. saver_def.save_tensor_name is # skipped because",
"InferenceDeviceOptions = collections.namedtuple('InferenceDeviceOptions', [ 'device', 'retain_device_placement', 'var_options', 'gen_init_op', 'dtype_override', 'fprop_dtype_override'",
"output_op_names.add(node.name) elif preserve_colocation_nodes and '_class' in node.node_def.attr: for loc in",
"# inference/embedding_lookup/Read/ReadVariableOp is not pruned. # # TODO(zhifengc): It's possible",
"long as they present in the graph. Returns: Array of",
"as long as they present in the graph. Returns: Array",
"is preserved. saver_def.save_tensor_name is # skipped because it's only used",
"Args: graph: tf.Graph. output_op_names: Names of output ops. Returns: Resulting",
"tf.logging.debug('Model %s params:', model_cfg.name) for line in model_cfg.ToText().split('\\n'): tf.logging.debug('%s', line)",
"var_scope = tf.get_variable_scope() old_caching_device = var_scope.caching_device old_val = _CONST_GUARANTEE var_scope.set_caching_device(None)",
"Options supported are None or tf.bfloat16. InferenceDeviceOptions = collections.namedtuple('InferenceDeviceOptions', [",
"graph. inference_graph_proto = inference_graph_pb2.InferenceGraph() for subgraph_name, tensors in subgraphs_dict.items(): fetches",
"model if given. freeze_defaults: Default initializes the graph and freeze.",
"referencing types of DT_RESOURCE. def AddIdentityToTheta(layer): layer._private_theta = layer._private_theta.Transform(tf.identity) #",
"on device through 'ON_DEVICE' option, or treated as # constants",
"variables_to_restore)) else: saver_var_spec = variables_to_restore saver = tf.train.Saver(saver_var_spec) tf.variables_initializer( tf.global_variables(),",
"If not None, write the inference graph in ASCII to",
"task.decoder.packed_input = False if issubclass(model_cfg.cls, base_model.MultiTaskModel): for _, task_param in",
"Raises: ValueError: if the model does not support the listed",
"support the listed subgraphs. \"\"\" assert issubclass(model_cfg.cls, base_model.BaseModel) if device_options.dtype_override",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"to InferenceGraph. Args: subgraphs_dict: Dict of (fetches, feeds) where each",
"in # tf.graph_util.extract_sub_graph. graph_def = tf.graph_util.extract_sub_graph(graph.as_graph_def(), list(output_op_names)) reachable_vars = [node.name",
"{ # s: \"loc:@inference/embedding_lookup/Read/ReadVariableOp\" # } # } # }",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"for the specific language governing permissions and # limitations under",
"if not preserve_colocation_nodes and not preserve_extra_ops: return sorted(list(output_op_names)) # We",
"to be # added last. tf.tables_initializer(name='init_all_tables') finally: # Reset TPU-related",
"the generated # inference graph nodes will be retained. Otherwise,",
"\"_class\" # value { # list { # s: \"loc:@inference/embedding_lookup/Read/ReadVariableOp\"",
"preserved. Otherwise, all subgraphs are included. preserve_colocation_nodes: a Python bool,",
"if loc.startswith('loc:@'): loc_name = loc[5:] if loc_name not in reachable_vars:",
"checkpoint to load. Loads and freezes the model if given.",
"in meta_graph.collection_def: tf.logging.info('copying collection %s', key) inference_graph_proto.collection_def[key].CopyFrom( meta_graph.collection_def[key]) else: tf.logging.warning('Not",
"device_options.device) for node in graph_def.node: node.ClearField('device') for function in graph_def.library.function:",
"user explicitly sets it to True. Note: Enable FLAGS.pin_vars_to_cpu (default",
"language governing permissions and # limitations under the License. #",
"= _CONST_GUARANTEE var_scope.set_caching_device(None) _CONST_GUARANTEE = False yield _CONST_GUARANTEE = old_val",
"it is used). We differentiate here. We still do the",
"seed in the exported inference graph. disable_packed_input: Disable packed input",
"required by applicable law or agreed to in writing, software",
"that are used for colocation. # E.g., a node may",
"unless user explicitly enables it. if FLAGS['enable_asserts'].using_default_value: FLAGS.enable_asserts = False",
"Tables can be declared anywhere in the graph, so this",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"retain_device_placement=False, var_options=None, gen_init_op=True, dtype_override=None, fprop_dtype_override=None), freeze_checkpoint=None, freeze_defaults=False, export_path=None, subgraph_filter=None, random_seed=None,",
"_SetClusterParams(cls, cluster_params, device_options): \"\"\"Sets cluster params. Args: cluster_params: Model().cluster config.",
"# Tensor names have format <op_name>:<output_index>. Some inference # graphs",
"'fprop_dtype_override' ]) _CONST_GUARANTEE = None @contextlib.contextmanager def NoConstGuaranteeScope(): \"\"\"Disallow const",
"treated as # constants with AS_CONSTANTS. # gen_init_op: Whether to",
"base_model.BaseModel) if device_options.dtype_override and device_options.fprop_dtype_override: raise ValueError( 'device_options{dtype_override,fprop_dtype_override) can not",
"import bfloat16_variables from lingvo.core import inference_graph_pb2 from lingvo.core import py_utils",
"# Prune the graph to just the parts we need.",
"= inference_graph_pb2.InferenceGraph() for subgraph_name, tensors in subgraphs_dict.items(): fetches = tensors[0]",
"retained. Otherwise, the specified device # will be cleared, so",
"to restore. output_op_names: Names of output ops. Returns: Resulting tf.GraphDef.",
"= {k: v.name for k, v in feeds.items() if v",
"preserve_colocation_nodes=False, preserve_saver_restore_nodes=False) if cls._DeviceSupportsFreezing(device_options): raise ValueError('freeze_checkpoint cannot be used with",
"agreed to in writing, software # distributed under the License",
"Args: subgraphs_dict: Dict of (fetches, feeds) where each fetches/feeds is",
"if preserve_saver_restore_nodes: # Only nodes for restoring is preserved. saver_def.save_tensor_name",
"Clear the device so that the runtime can choose. tf.logging.info('Clearing",
"distributed under the License is distributed on an \"AS IS\"",
"output ops: %r', output_op_names) graph_def = tf.graph_util.extract_sub_graph(graph_def, output_op_names) if not",
"tensors using tf.identity in theta before # freezing to avoid",
"_GetVarName(v): return v.name[:-len(':0')] def _MakeVariableDictionary(variables): \"\"\"Returns a dictionary with name",
"else 0 p.gpus_per_replica = 0 p.devices_per_split = 1 cluster_params.mode =",
"in terms of # cluster configuration. cls._SetClusterParams(model_cfg.cluster, device_options) # Configure",
"def AddIdentityToTheta(layer): layer._private_theta = layer._private_theta.Transform(tf.identity) # pylint: disable=protected-access layer.children.Transform(AddIdentityToTheta) AddIdentityToTheta(task)",
"= 1 if IsTpu(device_options) else 0 p.gpus_per_replica = 0 p.devices_per_split",
"not param_obj: return for k, _ in param_obj.IterParams(): if k",
"on TPUs work properly. Args: model_cfg: a Params instance as",
"TPUs, # servers can be initialized globally once, in which",
"not in subgraphs: tf.logging.info('Skip subgraph %s.', subgraph_name) continue # Sometimes",
"as returned by model_registry.GetParams(modelname, 'Test') or model_params.Model(). model_task_name: The task",
"in theta before # freezing to avoid the graph referencing",
"v.name[:-len(':0')] def _MakeVariableDictionary(variables): \"\"\"Returns a dictionary with name -> tf.Variable()",
"output_op_names = GetOutputOpNames( graph, inference_graph_proto, preserve_colocation_nodes=False, preserve_saver_restore_nodes=False) if cls._DeviceSupportsFreezing(device_options): raise",
"_ParamExists(param_obj, param_name): \"\"\"Tests whether param_name is contained in param_obj.\"\"\" if",
"put tensors and others put ops in the feeds/fetches (depends",
"turned off to avoid tripping initialization checks. # dtype_override: Whether",
"with device ' + device_options.device) if freeze_checkpoint: tf.logging.info('Freezing graph from",
"inference_graph_pb2.InferenceGraph() for subgraph_name, tensors in subgraphs_dict.items(): fetches = tensors[0] feeds",
"import collections import contextlib import re import lingvo.compat as tf",
"ValueError: if the model does not support the listed subgraphs.",
"@contextlib.contextmanager def NoConstGuaranteeScope(): \"\"\"Disallow const gauranteeing variable with-in scope.\"\"\" global",
"tf.flags.FLAGS # InferenceDeviceOptions contains options to configure inference on the",
"anywhere in the graph, so this op has to be",
"tf.get_variable_scope() old_caching_device = var_scope.caching_device old_val = _CONST_GUARANTEE var_scope.set_caching_device(None) _CONST_GUARANTEE =",
"# Sometimes feeds aren't connected to any outputs but keep",
"For TPUs, variables can be # either placed on device",
"tf.variables_initializer( tf.global_variables(), name='init_all_variables') if IsTpu(device_options) and device_options.gen_init_op: tf.group(tf.tpu.initialize_system(), name='tpu_init_op') if",
"saver = tf.train.Saver(saver_var_spec) tf.variables_initializer( tf.global_variables(), name='init_all_variables') if IsTpu(device_options) and device_options.gen_init_op:",
"restoring is preserved. saver_def.save_tensor_name is # skipped because it's only",
"Preserves nodes for restoring according to inference_graph_proto.saver_def. preserve_extra_ops: an optional",
"# attr { # key: \"_class\" # value { #",
"pylint: disable=protected-access layer.children.Transform(AddIdentityToTheta) AddIdentityToTheta(task) inference_graph_proto = inference_graph_pb2.InferenceGraph() subgraphs_proto = task.Inference()",
"is # skipped because it's only used for saving. saver_def",
"device_options.fprop_dtype_override == tf.bfloat16 def ConvertSubgraphDictToProto(subgraphs_dict): \"\"\"Converts dict of subgraphs/feeds/fetches to",
"piecewise subgraphs. Sets FLAGS.enable_asserts to False unless user explicitly sets",
"global _CONST_GUARANTEE var_scope = tf.get_variable_scope() old_custom_getter = var_scope.custom_getter old_caching_device =",
"freeze_checkpoint) graph_def = _FreezeGraphFromCheckpoint(graph, saver, freeze_checkpoint, output_op_names) elif freeze_defaults: tf.logging.info('Default",
"sess = tf.Session(graph=graph, config=py_utils.SessionConfig()) saver.restore(sess, checkpoint) return tf.graph_util.convert_variables_to_constants( sess, graph.as_graph_def(),",
"serialize initialization ops for the device. For TPUs, # servers",
"this list of inference subgraphs. random_seed: Fixes the random seed",
"tf.Variable() mapping.\"\"\" vars_dict = {} for v in variables: vars_dict[_GetVarName(v)]",
"inference_graph_proto.asset_file_def.add() asset_file_def.tensor_info.name = asset.name asset_file_def.filename = constant_value.string_val[0] # Add a",
"\"\"\"Update cluster params `p`.\"\"\" p.name = '/job:localhost' p.replicas = 1",
"cluster_params, device_options): \"\"\"Sets cluster params. Args: cluster_params: Model().cluster config. device_options:",
"checkpoint) return tf.graph_util.convert_variables_to_constants( sess, graph.as_graph_def(), output_op_names) def _FreezeDefaults(graph, output_op_names): \"\"\"Default",
"OR CONDITIONS OF ANY KIND, either express or implied. #",
"name.\"\"\" # Tensor names have format <op_name>:<output_index>. Some inference #",
"for node in graph_def.node: node.ClearField('device') for function in graph_def.library.function: for",
"the License is distributed on an \"AS IS\" BASIS, #",
"# Tensor-name. t = graph.get_tensor_by_name(tensor_or_op_name) return t.op.name else: op =",
"names to preserve as long as they present in the",
"added last. tf.tables_initializer(name='init_all_tables') finally: # Reset TPU-related flags after model",
"tf.Graph. output_op_names: Names of output ops. Returns: Resulting tf.GraphDef. \"\"\"",
"InferenceGraph proto from model params.\"\"\" import collections import contextlib import",
"not subgraph_filter or name in subgraph_filter: inference_graph_proto.subgraphs[name].CopyFrom(subgraph) # Yes, graph",
"return vars_dict def IsTpu(device_options): return device_options.device == 'tpu' def ShouldForceBfloat16ForWeightsAndActivations(device_options):",
"freeze_checkpoint: tf.logging.info('Freezing graph from checkpoint: %s', freeze_checkpoint) graph_def = _FreezeGraphFromCheckpoint(graph,",
"device_options.retain_device_placement: # Clear the device so that the runtime can",
"for compilation def MaybeGuaranteeConstGetter(getter, name, *args, **kwargs): global _CONST_GUARANTEE if",
"reachable_vars = [node.name for node in graph_def.node] for node in",
"only used for saving. saver_def = inference_graph_proto.saver_def for op_name in",
"old_val = _CONST_GUARANTEE var_scope.set_caching_device(None) _CONST_GUARANTEE = False yield _CONST_GUARANTEE =",
"Hard-code TPU-related flags prior to instantiating model. old_enable_asserts = FLAGS.enable_asserts",
"checkpoint: %s', freeze_checkpoint) graph_def = _FreezeGraphFromCheckpoint(graph, saver, freeze_checkpoint, output_op_names) elif",
"model does not support the listed subgraphs. \"\"\" assert issubclass(model_cfg.cls,",
"\"\"\"Freezes a graph from a checkpoint. Args: graph: tf.Graph. saver:",
"to instantiating model. old_enable_asserts = FLAGS.enable_asserts old_xla_device = FLAGS.xla_device if",
"# gen_init_op: Whether to serialize initialization ops for the device.",
"weights in the model. Options supported are None or tf.bfloat16.",
"law or agreed to in writing, software # distributed under",
"bfloat16_override: py_utils.UpdateDtype(model_cfg, tf.bfloat16) py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16) act_bfloat16_override = ShouldForceBfloat16ForActivations( device_options) if",
"]) _CONST_GUARANTEE = None @contextlib.contextmanager def NoConstGuaranteeScope(): \"\"\"Disallow const gauranteeing",
"if freeze_checkpoint or freeze_defaults: # Replace variables with tensors using",
"var_scope.caching_device old_val = _CONST_GUARANTEE var_scope.set_caching_device(None) _CONST_GUARANTEE = False yield _CONST_GUARANTEE",
"line) # Instantiate the graph. graph = tf.Graph() with graph.as_default():",
"loc_name not in reachable_vars: # Skip nodes that cannot be",
"\"\"\"Returns the op name of the given node name.\"\"\" #",
"None, write the inference graph in ASCII to this path.",
"work properly. Args: model_cfg: a Params instance as returned by",
"graph.as_default(): tf.random.set_seed(random_seed) cluster = model_cfg.cluster.Instantiate() device = cluster.GetPlacer() tpu_const_scope =",
"sure the node # inference/embedding_lookup/Read/ReadVariableOp is not pruned. # #",
"device = '' tpu_const_scope = ConstGuaranteeScope() with cluster, tf.device(device), tpu_const_scope:",
"under this scope as constants.\"\"\" global _CONST_GUARANTEE var_scope = tf.get_variable_scope()",
"TODO(laurenzo): Work out how much we need to specify here",
"may obtain a copy of the License at # #",
"with cluster, tf.device(device), tpu_const_scope: bfloat16_override = ShouldForceBfloat16ForWeightsAndActivations( device_options) if bfloat16_override:",
"output_op_names = GetOutputOpNames(graph, inference_graph_proto) # Prune the graph to just",
"outputs but keep them in the graph # anyways to",
"= old_xla_device tf.logging.info('Graph contains ops: %r', [op.name for op in",
"from lingvo.core import bfloat16_variables from lingvo.core import inference_graph_pb2 from lingvo.core",
"output_op_names.append('init_all_variables') output_op_names.append('save/control_dependency') output_op_names.append('save/restore_all') if IsTpu(device_options) and device_options.gen_init_op: output_op_names.append('tpu_init_op') graph_def =",
"may not use this file except in compliance with the",
"use for activations and # weights in the model. Options",
"a graph from a checkpoint. Args: graph: tf.Graph. saver: The",
"from a checkpoint. Args: graph: tf.Graph. saver: The tf.Saver to",
"name of the given node name.\"\"\" # Tensor names have",
"Args: model_cfg: a Params instance as returned by model_registry.GetParams(modelname, 'Test')",
"of inference subgraphs. random_seed: Fixes the random seed in the",
"name in subgraph_filter: inference_graph_proto.subgraphs[name].CopyFrom(subgraph) # Yes, graph collections are bad,",
"tf.logging.warning('Not exporting collection defs ' 'since operating in eager mode.')",
"this file except in compliance with the License. # You",
"yield None def _GetVarName(v): return v.name[:-len(':0')] def _MakeVariableDictionary(variables): \"\"\"Returns a",
"var_scope.set_caching_device(lambda op: op.device) _CONST_GUARANTEE = True yield _CONST_GUARANTEE = old_val",
"= {k: v.name for k, v in fetches.items() if v",
"\"\"\"Returns a dictionary with name -> tf.Variable() mapping.\"\"\" vars_dict =",
"in graph_def.library.function: for node_def in function.node_def: node_def.ClearField('device') inference_graph_proto.graph_def.CopyFrom(graph_def) if export_path:",
"# graphs put tensors and others put ops in the",
"inference # graphs put tensors and others put ops in",
"# To support restoring, we have to not prune out",
"any outputs but keep them in the graph # anyways",
"graph, inference_graph_proto, preserve_colocation_nodes=False, preserve_saver_restore_nodes=False) if cls._DeviceSupportsFreezing(device_options): raise ValueError('freeze_checkpoint cannot be",
"else: op = graph.get_operation_by_name(tensor_or_op_name) return op.name for subgraph_name, subgraph in",
"# # Licensed under the Apache License, Version 2.0 (the",
"subgraphs_proto = ConvertSubgraphDictToProto(subgraphs_proto) for name, subgraph in subgraphs_proto.subgraphs.items(): if not",
"= old_val var_scope.set_custom_getter(old_custom_getter) var_scope.set_caching_device(old_caching_device) @contextlib.contextmanager def _DummyScope(): yield None def",
"model_task_name=None, device_options=InferenceDeviceOptions( device='', retain_device_placement=False, var_options=None, gen_init_op=True, dtype_override=None, fprop_dtype_override=None), freeze_checkpoint=None, freeze_defaults=False,",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"cluster_params.mode = 'sync' cluster_params.job = 'decoder' cluster_params.add_summary = False cluster_params.do_eval",
"graph.get_operation_by_name(tensor_or_op_name) return op.name for subgraph_name, subgraph in inference_graph_proto.subgraphs.items(): if subgraphs",
"*args, **kwargs) @contextlib.contextmanager def ConstGuaranteeScope(): \"\"\"Treats all variables under this",
"saver.restore(sess, checkpoint) return tf.graph_util.convert_variables_to_constants( sess, graph.as_graph_def(), output_op_names) def _FreezeDefaults(graph, output_op_names):",
"list(subgraph.fetches.values())): output_op_names.add(_GetOpName(tensor_or_op_name)) if preserve_saver_restore_nodes: # Only nodes for restoring is",
"if (IsTpu(device_options) and device_options.var_options == 'AS_CONSTANTS'): # Do not specify",
"op and global variable init op to the graph. #",
"InferenceGraph. \"\"\" # Build the output inference graph. inference_graph_proto =",
"True. Preserves nodes colocating with the closure of output ops",
"subgraphs. Sets FLAGS.enable_asserts to False unless user explicitly sets it",
"%s', key) inference_graph_proto.collection_def[key].CopyFrom( meta_graph.collection_def[key]) else: tf.logging.warning('Not exporting collection defs '",
"ops. Returns: Resulting tf.GraphDef. \"\"\" sess = tf.Session(graph=graph, config=py_utils.SessionConfig()) saver.restore(sess,",
"if given. freeze_defaults: Default initializes the graph and freeze. Useful",
"terms of # cluster configuration. cls._SetClusterParams(model_cfg.cluster, device_options) # Configure the",
"need to specify here in terms of # cluster configuration.",
"write the inference graph in ASCII to this path. subgraph_filter:",
"contextlib import re import lingvo.compat as tf from lingvo.core import",
"in the graph. \"\"\" output_op_names = set() def _GetOpName(tensor_or_op_name): \"\"\"Returns",
"and device_options.fprop_dtype_override: raise ValueError( 'device_options{dtype_override,fprop_dtype_override) can not both be' 'set.')",
"Work out how much we need to specify here in",
"but keep them in the graph # anyways to avoid",
"= _FreezeDefaults(graph, output_op_names) else: inference_graph_proto.saver_def.CopyFrom(saver.as_saver_def()) output_op_names = GetOutputOpNames(graph, inference_graph_proto) #",
"\"\"\" # Build the output inference graph. inference_graph_proto = inference_graph_pb2.InferenceGraph()",
"the returned array. preserve_saver_restore_nodes: a Python bool, default to False.",
"meta_graph = tf.train.export_meta_graph(graph=graph) for key in meta_graph.collection_def: tf.logging.info('copying collection %s',",
"from model params.\"\"\" import collections import contextlib import re import",
"ConstGuaranteeScope() with cluster, tf.device(device), tpu_const_scope: bfloat16_override = ShouldForceBfloat16ForWeightsAndActivations( device_options) if",
"tf.global_variables(), name='init_all_variables') if IsTpu(device_options) and device_options.gen_init_op: tf.group(tf.tpu.initialize_system(), name='tpu_init_op') if freeze_checkpoint",
"tf.logging.info('Freezing graph from checkpoint: %s', freeze_checkpoint) graph_def = _FreezeGraphFromCheckpoint(graph, saver,",
"or implied. # See the License for the specific language",
"cluster_params.add_summary = False cluster_params.do_eval = True Update(cluster_params.controller) Update(cluster_params.worker) Update(cluster_params.ps) Update(cluster_params.evaler)",
"# We also need to preserve any nodes that are",
"not in the graph. Ignoring.' % op_name) if not preserve_colocation_nodes",
"graph_def = _FreezeDefaults(graph, output_op_names) else: inference_graph_proto.saver_def.CopyFrom(saver.as_saver_def()) output_op_names = GetOutputOpNames(graph, inference_graph_proto)",
"layer._private_theta.Transform(tf.identity) # pylint: disable=protected-access layer.children.Transform(AddIdentityToTheta) AddIdentityToTheta(task) inference_graph_proto = inference_graph_pb2.InferenceGraph() subgraphs_proto",
"(default false) to make weight-sharing and multi-core inference on TPUs",
"are used for colocation. # E.g., a node may have",
"nodes for restoring according to inference_graph_proto.saver_def. preserve_extra_ops: an optional list",
"value { # list { # s: \"loc:@inference/embedding_lookup/Read/ReadVariableOp\" # }",
"if IsTpu(device_options) and device_options.gen_init_op: output_op_names.append('tpu_init_op') graph_def = graph.as_graph_def() tf.logging.info('Pruning graph",
"need to make sure the node # inference/embedding_lookup/Read/ReadVariableOp is not",
"Python bool, default to False. Preserves nodes for restoring according",
"NestedMap. Returns: Equivalent InferenceGraph. \"\"\" # Build the output inference",
"_DisablePackedInput(model_cfg.task) tf.logging.debug('Model %s params:', model_cfg.name) for line in model_cfg.ToText().split('\\n'): tf.logging.debug('%s',",
"cls._DeviceSupportsFreezing(device_options): raise ValueError('freeze_checkpoint cannot be used with device ' +",
"asset.op.type == 'Const' and asset.op.get_attr( 'dtype') == tf.dtypes.string: constant_value =",
"# Add a table init op and global variable init",
"else: inference_graph_proto.saver_def.CopyFrom(saver.as_saver_def()) output_op_names = GetOutputOpNames(graph, inference_graph_proto) # Prune the graph",
"name=name + '/GuaranteeConst') else: return getter(name, *args, **kwargs) @contextlib.contextmanager def",
"permissions and # limitations under the License. # ============================================================================== \"\"\"Utility",
"input for inference writing purposes. Returns: InferenceGraph proto. Raises: ValueError:",
"much we need to specify here in terms of #",
"graph and freezes it. Args: graph: tf.Graph. output_op_names: Names of",
"base_model.MultiTaskModel): for _, task_param in model_cfg.task_params.IterParams(): _DisablePackedInput(task_param) else: _DisablePackedInput(model_cfg.task) tf.logging.debug('Model",
"cluster.GetPlacer() tpu_const_scope = _DummyScope() if (IsTpu(device_options) and device_options.var_options == 'AS_CONSTANTS'):",
"Prune the graph to just the parts we need. #",
"for node in graph_def.node] for node in graph.get_operations(): if preserve_extra_ops",
"preserve_extra_ops: output_op_names.add(node.name) elif preserve_colocation_nodes and '_class' in node.node_def.attr: for loc",
"placed on device through 'ON_DEVICE' option, or treated as #",
"output_op_names: Names of output ops. Returns: Resulting tf.GraphDef. \"\"\" sess",
"= tf.flags.FLAGS # InferenceDeviceOptions contains options to configure inference on",
"output_op_names) if not device_options.retain_device_placement: # Clear the device so that",
"names that should be preserved in the graph. \"\"\" output_op_names",
"node. output_op_names.append('init_all_tables') output_op_names.append('init_all_variables') output_op_names.append('save/control_dependency') output_op_names.append('save/restore_all') if IsTpu(device_options) and device_options.gen_init_op: output_op_names.append('tpu_init_op')",
"the restore node. output_op_names.append('init_all_tables') output_op_names.append('init_all_variables') output_op_names.append('save/control_dependency') output_op_names.append('save/restore_all') if IsTpu(device_options) and",
"We differentiate here. We still do the lookup in #",
"a checkpoint. export_path: If not None, write the inference graph",
"'retain_device_placement', 'var_options', 'gen_init_op', 'dtype_override', 'fprop_dtype_override' ]) _CONST_GUARANTEE = None @contextlib.contextmanager",
"names have format <op_name>:<output_index>. Some inference # graphs put tensors",
"not mdl.ema else mdl.ema.variables_to_restore(mdl.variables_for_ema)) if bfloat16_override: saver_var_spec = ( bfloat16_variables",
"so that the runtime can choose automatically. # var_options: Options",
"0 p.gpus_per_replica = 0 p.devices_per_split = 1 cluster_params.mode = 'sync'",
"'encoder') and _ParamExists(task.encoder, 'packed_input')): task.encoder.packed_input = False if (_ParamExists(task, 'decoder')",
"IsTpu(device_options): return device_options.device == 'tpu' def ShouldForceBfloat16ForWeightsAndActivations(device_options): return device_options.dtype_override ==",
"Only nodes for restoring is preserved. saver_def.save_tensor_name is # skipped",
"# If this logic ever breaks, TensorFlow will raise a",
"def ShouldForceBfloat16ForActivations(device_options): return device_options.fprop_dtype_override == tf.bfloat16 def ConvertSubgraphDictToProto(subgraphs_dict): \"\"\"Converts dict",
"InferenceGraph proto with piecewise subgraphs. Sets FLAGS.enable_asserts to False unless",
"# Do not specify devices for variables if we are",
"enables it. if FLAGS['enable_asserts'].using_default_value: FLAGS.enable_asserts = False # TODO(laurenzo): Work",
"inference_graph_proto: an InferenceGraph proto. subgraphs: an optional list of subgraph",
"is used). We differentiate here. We still do the lookup",
"cluster params. Args: cluster_params: Model().cluster config. device_options: InferenceDeviceOptions. \"\"\" def",
"[subgraph_filter] # Disable assertions unless user explicitly enables it. if",
"InferenceDeviceOptions contains options to configure inference on the device. #",
"graph for. Should be None for single-task models. device_options: Device",
"early testing of downstream tools without having a checkpoint. export_path:",
"does not support the listed subgraphs. \"\"\" assert issubclass(model_cfg.cls, base_model.BaseModel)",
"Args: graph: tf.Graph. saver: The tf.Saver to use for restoration.",
"old_val var_scope.set_caching_device(old_caching_device) # Marks variable as constants for compilation def",
"not None} # Export as subgraph. inference_graph_proto.subgraphs[subgraph_name].fetches.update(named_fetches) inference_graph_proto.subgraphs[subgraph_name].feeds.update(named_feeds) return inference_graph_proto",
"( bfloat16_variables .get_saver_spec_for_variables_with_bf16_overrides( variables_to_restore)) else: saver_var_spec = variables_to_restore saver =",
"None or empty, export only this list of inference subgraphs.",
"[ 'device', 'retain_device_placement', 'var_options', 'gen_init_op', 'dtype_override', 'fprop_dtype_override' ]) _CONST_GUARANTEE =",
"in the returned array. preserve_saver_restore_nodes: a Python bool, default to",
"returned array. preserve_saver_restore_nodes: a Python bool, default to False. Preserves",
"TPU-related flags prior to instantiating model. old_enable_asserts = FLAGS.enable_asserts old_xla_device",
"'sync' cluster_params.job = 'decoder' cluster_params.add_summary = False cluster_params.do_eval = True",
"as subgraph. inference_graph_proto.subgraphs[subgraph_name].fetches.update(named_fetches) inference_graph_proto.subgraphs[subgraph_name].feeds.update(named_feeds) return inference_graph_proto def GetOutputOpNames(graph, inference_graph_proto, subgraphs=None,",
"# list { # s: \"loc:@inference/embedding_lookup/Read/ReadVariableOp\" # } # }",
"each. if re.search(r':[0-9]+$', tensor_or_op_name): # Tensor-name. t = graph.get_tensor_by_name(tensor_or_op_name) return",
"saver, checkpoint, output_op_names): \"\"\"Freezes a graph from a checkpoint. Args:",
"an InferenceGraph proto from model params.\"\"\" import collections import contextlib",
"assets_collection = tf.compat.v1.get_collection( tf.compat.v1.GraphKeys.ASSET_FILEPATHS) for asset in assets_collection: if asset.op.type",
"that the runtime can choose. tf.logging.info('Clearing device placement for: %s',",
"feeds/fetches (depends # on how it is used). We differentiate",
"graph_def = _FreezeGraphFromCheckpoint(graph, saver, freeze_checkpoint, output_op_names) elif freeze_defaults: tf.logging.info('Default initializing",
"device_options.fprop_dtype_override: raise ValueError( 'device_options{dtype_override,fprop_dtype_override) can not both be' 'set.') if",
"with the closure of output ops in the returned array.",
"FLAGS.enable_asserts = False FLAGS.xla_device = 'tpu' try: mdl = model_cfg.Instantiate()",
"feeds.items() if v is not None} # Export as subgraph.",
"old_enable_asserts = FLAGS.enable_asserts old_xla_device = FLAGS.xla_device if IsTpu(device_options): FLAGS.enable_asserts =",
"if preserve_extra_ops and node.name in preserve_extra_ops: output_op_names.add(node.name) elif preserve_colocation_nodes and",
"graph. Args: graph: The tf graph. inference_graph_proto: an InferenceGraph proto.",
"if IsTpu(device_options) else 0 p.gpus_per_replica = 0 p.devices_per_split = 1",
"(depends # on how it is used). We differentiate here.",
"tf.logging.info('Op/tensor %s not in the graph. Ignoring.' % op_name) if",
"# InferenceDeviceOptions contains options to configure inference on the device.",
"in assets_collection: if asset.op.type == 'Const' and asset.op.get_attr( 'dtype') ==",
"%s', device_options.device) for node in graph_def.node: node.ClearField('device') for function in",
"them as # constants. device = '' tpu_const_scope = ConstGuaranteeScope()",
"preserve_saver_restore_nodes: # Only nodes for restoring is preserved. saver_def.save_tensor_name is",
"will be retained. Otherwise, the specified device # will be",
"FLAGS.enable_asserts = False # TODO(laurenzo): Work out how much we",
"= model_cfg.cluster.Instantiate() device = cluster.GetPlacer() tpu_const_scope = _DummyScope() if (IsTpu(device_options)",
"} # } # } # # In this case,",
"the specified device in the generated # inference graph nodes",
"# } # } # } # # In this",
"graph_def = graph.as_graph_def() tf.logging.info('Pruning graph to output ops: %r', output_op_names)",
"from lingvo.core import base_model from lingvo.core import bfloat16_variables from lingvo.core",
"of (fetches, feeds) where each fetches/feeds is a NestedMap. Returns:",
"table init op and global variable init op to the",
"freezes it. Args: graph: tf.Graph. output_op_names: Names of output ops.",
"return tf.graph_util.convert_variables_to_constants(sess, graph.as_graph_def(), output_op_names) class InferenceGraphExporter: \"\"\"Class for exporting inference",
"graph, so this op has to be # added last.",
"'Const' and asset.op.get_attr( 'dtype') == tf.dtypes.string: constant_value = asset.op.get_attr('value') if",
"sess, graph.as_graph_def(), output_op_names) def _FreezeDefaults(graph, output_op_names): \"\"\"Default initializes a graph",
"in writing, software # distributed under the License is distributed",
"are marking them as # constants. device = '' tpu_const_scope",
"def _MakeVariableDictionary(variables): \"\"\"Returns a dictionary with name -> tf.Variable() mapping.\"\"\"",
"where each fetches/feeds is a NestedMap. Returns: Equivalent InferenceGraph. \"\"\"",
"_CONST_GUARANTEE = None @contextlib.contextmanager def NoConstGuaranteeScope(): \"\"\"Disallow const gauranteeing variable",
"for subgraph_name, subgraph in inference_graph_proto.subgraphs.items(): if subgraphs and subgraph_name not",
"mdl.ema else mdl.ema.variables_to_restore(mdl.variables_for_ema)) if bfloat16_override: saver_var_spec = ( bfloat16_variables .get_saver_spec_for_variables_with_bf16_overrides(",
"op to the graph. # Tables can be declared anywhere",
"False unless user explicitly sets it to True. Note: Enable",
"either placed on device through 'ON_DEVICE' option, or treated as",
"True return False def _FreezeGraphFromCheckpoint(graph, saver, checkpoint, output_op_names): \"\"\"Freezes a",
"model_task_name: The task to generate an inference graph for. Should",
"tensor_or_op_name in (list(subgraph.feeds.values()) + list(subgraph.fetches.values())): output_op_names.add(_GetOpName(tensor_or_op_name)) if preserve_saver_restore_nodes: # Only",
"= asset.name asset_file_def.filename = constant_value.string_val[0] # Add a table init",
"in param_obj.\"\"\" if not param_obj: return for k, _ in",
"for restoring is preserved. saver_def.save_tensor_name is # skipped because it's",
"if not param_obj: return for k, _ in param_obj.IterParams(): if",
"graph: The tf graph. inference_graph_proto: an InferenceGraph proto. subgraphs: an",
"used). We differentiate here. We still do the lookup in",
"_CONST_GUARANTEE var_scope.set_caching_device(None) _CONST_GUARANTEE = False yield _CONST_GUARANTEE = old_val var_scope.set_caching_device(old_caching_device)",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"if act_bfloat16_override: py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16) # Hard-code TPU-related flags prior to",
"License, Version 2.0 (the \"License\"); # you may not use",
"Replace variables with tensors using tf.identity in theta before #",
"for single-task models. device_options: Device options for the accelerator used",
"\"loc:@inference/embedding_lookup/Read/ReadVariableOp\" # } # } # } # # In",
"= 1 cluster_params.mode = 'sync' cluster_params.job = 'decoder' cluster_params.add_summary =",
"subgraph_name) continue # Sometimes feeds aren't connected to any outputs",
"Names of output ops. Returns: Resulting tf.GraphDef. \"\"\" sess =",
"for k, _ in param_obj.IterParams(): if k == param_name: return",
"tf.bfloat16 def ShouldForceBfloat16ForActivations(device_options): return device_options.fprop_dtype_override == tf.bfloat16 def ConvertSubgraphDictToProto(subgraphs_dict): \"\"\"Converts",
"path. subgraph_filter: A string or a list of subgraph names.",
"'' tpu_const_scope = ConstGuaranteeScope() with cluster, tf.device(device), tpu_const_scope: bfloat16_override =",
"return sorted(list(output_op_names)) # We also need to preserve any nodes",
"Python bool, default to True. Preserves nodes colocating with the",
"ValueError with # a description of the syntax of each.",
"with tf.Session(graph=graph, config=py_utils.SessionConfig()) as sess: sess.run(graph.get_operation_by_name('init_all_variables')) return tf.graph_util.convert_variables_to_constants(sess, graph.as_graph_def(), output_op_names)",
"from the pruned graph. continue output_op_names.add(node.name) return sorted(list(output_op_names)) def _ParamExists(param_obj,",
"default to True. Preserves nodes colocating with the closure of",
"the License for the specific language governing permissions and #",
"old_xla_device tf.logging.info('Graph contains ops: %r', [op.name for op in graph.get_operations()])",
"output_op_names.add(_GetOpName(tensor_or_op_name)) if preserve_saver_restore_nodes: # Only nodes for restoring is preserved.",
"device = cluster.GetPlacer() tpu_const_scope = _DummyScope() if (IsTpu(device_options) and device_options.var_options",
"fetches and feeds to map to their tensor name instead",
"def _DisablePackedInput(task): if (_ParamExists(task, 'encoder') and _ParamExists(task.encoder, 'packed_input')): task.encoder.packed_input =",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"the model if given. freeze_defaults: Default initializes the graph and",
"instantiating model. old_enable_asserts = FLAGS.enable_asserts old_xla_device = FLAGS.xla_device if IsTpu(device_options):",
"sets it to True. Note: Enable FLAGS.pin_vars_to_cpu (default false) to",
"\"\"\"Converts dict of subgraphs/feeds/fetches to InferenceGraph. Args: subgraphs_dict: Dict of",
"cluster = model_cfg.cluster.Instantiate() device = cluster.GetPlacer() tpu_const_scope = _DummyScope() if",
"key: \"_class\" # value { # list { # s:",
"task to generate an inference graph for. Should be None",
"= tf.graph_util.extract_sub_graph(graph_def, output_op_names) if not device_options.retain_device_placement: # Clear the device",
"freeze_defaults=False, export_path=None, subgraph_filter=None, random_seed=None, disable_packed_input=True): \"\"\"Exports a InferenceGraph proto with",
"model_cfg.ToText().split('\\n'): tf.logging.debug('%s', line) # Instantiate the graph. graph = tf.Graph()",
"in (list(subgraph.feeds.values()) + list(subgraph.fetches.values())): output_op_names.add(_GetOpName(tensor_or_op_name)) if preserve_saver_restore_nodes: # Only nodes",
"downstream tools without having a checkpoint. export_path: If not None,",
"list)): subgraph_filter = [subgraph_filter] # Disable assertions unless user explicitly",
"Args: graph: The tf graph. inference_graph_proto: an InferenceGraph proto. subgraphs:",
"asset file_path: %s', constant_value.string_val[0]) asset_file_def = inference_graph_proto.asset_file_def.add() asset_file_def.tensor_info.name = asset.name",
"for key in meta_graph.collection_def: tf.logging.info('copying collection %s', key) inference_graph_proto.collection_def[key].CopyFrom( meta_graph.collection_def[key])",
"can choose. tf.logging.info('Clearing device placement for: %s', device_options.device) for node",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"name='tpu_init_op') if freeze_checkpoint or freeze_defaults: # Replace variables with tensors",
"op has to be # added last. tf.tables_initializer(name='init_all_tables') finally: #",
"def _GetVarName(v): return v.name[:-len(':0')] def _MakeVariableDictionary(variables): \"\"\"Returns a dictionary with",
"Sometimes feeds aren't connected to any outputs but keep them",
"prune out the restore node. output_op_names.append('init_all_tables') output_op_names.append('init_all_variables') output_op_names.append('save/control_dependency') output_op_names.append('save/restore_all') if",
"node_def in function.node_def: node_def.ClearField('device') inference_graph_proto.graph_def.CopyFrom(graph_def) if export_path: with tf.io.gfile.GFile(export_path, 'w')",
"tf op names that should be preserved in the graph.",
"saver_def.save_tensor_name is # skipped because it's only used for saving.",
"tf.tables_initializer(name='init_all_tables') finally: # Reset TPU-related flags after model instantiation. FLAGS.enable_asserts",
"# Hard-code TPU-related flags prior to instantiating model. old_enable_asserts =",
"may have this attr: # attr { # key: \"_class\"",
"The task to generate an inference graph for. Should be",
"if IsTpu(device_options) and device_options.gen_init_op: tf.group(tf.tpu.initialize_system(), name='tpu_init_op') if freeze_checkpoint or freeze_defaults:",
"'since operating in eager mode.') # Freezing. if freeze_defaults or",
"in the graph. Returns: Array of tf op names that",
"except KeyError: tf.logging.info('Op/tensor %s not in the graph. Ignoring.' %",
"variables. For TPUs, variables can be # either placed on",
"# added last. tf.tables_initializer(name='init_all_tables') finally: # Reset TPU-related flags after",
"tf.compat.v1.GraphKeys.ASSET_FILEPATHS) for asset in assets_collection: if asset.op.type == 'Const' and",
"this should be # turned off to avoid tripping initialization",
"act_bfloat16_override = ShouldForceBfloat16ForActivations( device_options) if act_bfloat16_override: py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16) # Hard-code",
"f.write(text_format.MessageToString(inference_graph_proto)) return inference_graph_proto @classmethod def _SetClusterParams(cls, cluster_params, device_options): \"\"\"Sets cluster",
"list of subgraph names. If provided, only output ops from",
"names. If provided, only output ops from these subgraphs are",
"tf.identity in theta before # freezing to avoid the graph",
"it. if FLAGS['enable_asserts'].using_default_value: FLAGS.enable_asserts = False # TODO(laurenzo): Work out",
"# distributed under the License is distributed on an \"AS",
"graph. continue output_op_names.add(node.name) return sorted(list(output_op_names)) def _ParamExists(param_obj, param_name): \"\"\"Tests whether",
"# Unless required by applicable law or agreed to in",
"tf.bfloat16) # Hard-code TPU-related flags prior to instantiating model. old_enable_asserts",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"_FreezeDefaults(graph, output_op_names): \"\"\"Default initializes a graph and freezes it. Args:",
"\"\"\"Exports a InferenceGraph proto with piecewise subgraphs. Sets FLAGS.enable_asserts to",
"as # constants. device = '' tpu_const_scope = ConstGuaranteeScope() with",
"as # constants with AS_CONSTANTS. # gen_init_op: Whether to serialize",
"device_options.dtype_override == tf.bfloat16 def ShouldForceBfloat16ForActivations(device_options): return device_options.fprop_dtype_override == tf.bfloat16 def",
"'decoder' cluster_params.add_summary = False cluster_params.do_eval = True Update(cluster_params.controller) Update(cluster_params.worker) Update(cluster_params.ps)",
"FLAGS.pin_vars_to_cpu (default false) to make weight-sharing and multi-core inference on",
"} # } # # In this case, we need",
"graph to sanity check (versus relying on the text manipulation).",
"it. Args: graph: tf.Graph. output_op_names: Names of output ops. Returns:",
"not isinstance(subgraph_filter, (tuple, list)): subgraph_filter = [subgraph_filter] # Disable assertions",
"TextFileInitializer. assets_collection = tf.compat.v1.get_collection( tf.compat.v1.GraphKeys.ASSET_FILEPATHS) for asset in assets_collection: if",
"contained in param_obj.\"\"\" if not param_obj: return for k, _",
"bfloat16_override: saver_var_spec = ( bfloat16_variables .get_saver_spec_for_variables_with_bf16_overrides( variables_to_restore)) else: saver_var_spec =",
"raise ValueError( 'device_options{dtype_override,fprop_dtype_override) can not both be' 'set.') if subgraph_filter",
"the Apache License, Version 2.0 (the \"License\"); # you may",
"tf.logging.info('Pruning graph to output ops: %r', output_op_names) graph_def = tf.graph_util.extract_sub_graph(graph_def,",
"to specify here in terms of # cluster configuration. cls._SetClusterParams(model_cfg.cluster,",
"loc.startswith('loc:@'): loc_name = loc[5:] if loc_name not in reachable_vars: #",
"output_op_names) elif freeze_defaults: tf.logging.info('Default initializing graph and freezing.') graph_def =",
"graph nodes will be retained. Otherwise, the specified device #",
"set() def _GetOpName(tensor_or_op_name): \"\"\"Returns the op name of the given",
"graph collections are bad, however this seems to be the",
"reachable_vars: # Skip nodes that cannot be reached from the",
"False if issubclass(model_cfg.cls, base_model.MultiTaskModel): for _, task_param in model_cfg.task_params.IterParams(): _DisablePackedInput(task_param)",
"tensor name instead of # Tensor instance. named_fetches = {k:",
"= inference_graph_pb2.InferenceGraph() subgraphs_proto = task.Inference() if isinstance(subgraphs_proto, dict): subgraphs_proto =",
"yield _CONST_GUARANTEE = old_val var_scope.set_caching_device(old_caching_device) # Marks variable as constants",
"def _SetClusterParams(cls, cluster_params, device_options): \"\"\"Sets cluster params. Args: cluster_params: Model().cluster",
"bad, however this seems to be the # easiest way",
"initializes a graph and freezes it. Args: graph: tf.Graph. output_op_names:",
"output_op_names.append('save/restore_all') if IsTpu(device_options) and device_options.gen_init_op: output_op_names.append('tpu_init_op') graph_def = graph.as_graph_def() tf.logging.info('Pruning",
"freeze. Useful for early testing of downstream tools without having",
"or freeze_checkpoint: output_op_names = GetOutputOpNames( graph, inference_graph_proto, preserve_colocation_nodes=False, preserve_saver_restore_nodes=False) if",
"issubclass(model_cfg.cls, base_model.MultiTaskModel): for _, task_param in model_cfg.task_params.IterParams(): _DisablePackedInput(task_param) else: _DisablePackedInput(model_cfg.task)",
"runtime can choose. tf.logging.info('Clearing device placement for: %s', device_options.device) for",
"in # the graph to sanity check (versus relying on",
"Returns: Array of tf op names that should be preserved",
"tf.logging.info('Found asset file_path: %s', constant_value.string_val[0]) asset_file_def = inference_graph_proto.asset_file_def.add() asset_file_def.tensor_info.name =",
"preserve_extra_ops=None): \"\"\"Gets output op names from an inference graph. Args:",
"task_param in model_cfg.task_params.IterParams(): _DisablePackedInput(task_param) else: _DisablePackedInput(model_cfg.task) tf.logging.debug('Model %s params:', model_cfg.name)",
"for exporting an InferenceGraph proto from model params.\"\"\" import collections",
"to serialize initialization ops for the device. For TPUs, #",
"def MaybeGuaranteeConstGetter(getter, name, *args, **kwargs): global _CONST_GUARANTEE if _CONST_GUARANTEE: with",
"device_options) if act_bfloat16_override: py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16) # Hard-code TPU-related flags prior",
"cluster_params.job = 'decoder' cluster_params.add_summary = False cluster_params.do_eval = True Update(cluster_params.controller)",
"return device_options.device == 'tpu' def ShouldForceBfloat16ForWeightsAndActivations(device_options): return device_options.dtype_override == tf.bfloat16",
"Resulting tf.GraphDef. \"\"\" with tf.Session(graph=graph, config=py_utils.SessionConfig()) as sess: sess.run(graph.get_operation_by_name('init_all_variables')) return",
"syntax of each. if re.search(r':[0-9]+$', tensor_or_op_name): # Tensor-name. t =",
"as: python3 # Copyright 2018 The TensorFlow Authors. All Rights",
"False cluster_params.do_eval = True Update(cluster_params.controller) Update(cluster_params.worker) Update(cluster_params.ps) Update(cluster_params.evaler) Update(cluster_params.decoder) Update(cluster_params.input)",
"exporting an InferenceGraph proto from model params.\"\"\" import collections import",
"# skipped because it's only used for saving. saver_def =",
"TPUs work properly. Args: model_cfg: a Params instance as returned",
"# constants. device = '' tpu_const_scope = ConstGuaranteeScope() with cluster,",
"specified device # will be cleared, so that the runtime",
"output ops. Returns: Resulting tf.GraphDef. \"\"\" sess = tf.Session(graph=graph, config=py_utils.SessionConfig())",
"global variable init op to the graph. # Tables can",
"Returns: Resulting tf.GraphDef. \"\"\" sess = tf.Session(graph=graph, config=py_utils.SessionConfig()) saver.restore(sess, checkpoint)",
"google.protobuf import text_format FLAGS = tf.flags.FLAGS # InferenceDeviceOptions contains options",
"[op.name for op in graph.get_operations()]) # Collection defs if not",
"subgraphs are preserved. Otherwise, all subgraphs are included. preserve_colocation_nodes: a",
"the accelerator used for serving. freeze_checkpoint: The checkpoint to load.",
"global _CONST_GUARANTEE var_scope = tf.get_variable_scope() old_caching_device = var_scope.caching_device old_val =",
"the runtime can choose. tf.logging.info('Clearing device placement for: %s', device_options.device)",
"Some inference # graphs put tensors and others put ops",
"under the License is distributed on an \"AS IS\" BASIS,",
"the graph to just the parts we need. # To",
"= v return vars_dict def IsTpu(device_options): return device_options.device == 'tpu'",
"given node name.\"\"\" # Tensor names have format <op_name>:<output_index>. Some",
"var_scope.custom_getter old_caching_device = var_scope.caching_device old_val = _CONST_GUARANTEE var_scope.set_custom_getter(MaybeGuaranteeConstGetter) var_scope.set_caching_device(lambda op:",
"in model_cfg.task_params.IterParams(): _DisablePackedInput(task_param) else: _DisablePackedInput(model_cfg.task) tf.logging.debug('Model %s params:', model_cfg.name) for",
"the closure of output ops in the returned array. preserve_saver_restore_nodes:",
"operating in eager mode.') # Freezing. if freeze_defaults or freeze_checkpoint:",
"(IsTpu(device_options) and device_options.var_options == 'AS_CONSTANTS'): # Do not specify devices",
"named_fetches = {k: v.name for k, v in fetches.items() if",
"freezes the model if given. freeze_defaults: Default initializes the graph",
"list of subgraph names. If not None or empty, export",
"contains ops: %r', [op.name for op in graph.get_operations()]) # Collection",
"skipped because it's only used for saving. saver_def = inference_graph_proto.saver_def",
"to get this assets registered from # TextFileInitializer. assets_collection =",
"the graph to sanity check (versus relying on the text",
"inference_graph_proto.subgraphs.items(): if subgraphs and subgraph_name not in subgraphs: tf.logging.info('Skip subgraph",
"in preserve_extra_ops: output_op_names.add(node.name) elif preserve_colocation_nodes and '_class' in node.node_def.attr: for",
"= '/job:localhost' p.replicas = 1 p.tpus_per_replica = 1 if IsTpu(device_options)",
"= 0 p.devices_per_split = 1 cluster_params.mode = 'sync' cluster_params.job =",
"== 'Const' and asset.op.get_attr( 'dtype') == tf.dtypes.string: constant_value = asset.op.get_attr('value')",
"# a description of the syntax of each. if re.search(r':[0-9]+$',",
"avoid tripping initialization checks. # dtype_override: Whether to override the",
"= tf.get_variable_scope() old_custom_getter = var_scope.custom_getter old_caching_device = var_scope.caching_device old_val =",
"this case, we need to make sure the node #",
"bool, default to False. Preserves nodes for restoring according to",
"output_op_names) def _FreezeDefaults(graph, output_op_names): \"\"\"Default initializes a graph and freezes",
"we need to specify here in terms of # cluster",
"= tf.Session(graph=graph, config=py_utils.SessionConfig()) saver.restore(sess, checkpoint) return tf.graph_util.convert_variables_to_constants( sess, graph.as_graph_def(), output_op_names)",
"proto. Raises: ValueError: if the model does not support the",
"graph in ASCII to this path. subgraph_filter: A string or",
"= constant_value.string_val[0] # Add a table init op and global",
"feeds) where each fetches/feeds is a NestedMap. Returns: Equivalent InferenceGraph.",
"_MakeVariableDictionary(variables): \"\"\"Returns a dictionary with name -> tf.Variable() mapping.\"\"\" vars_dict",
"Rewrite fetches and feeds to map to their tensor name",
"v in fetches.items() if v is not None} named_feeds =",
"not support the listed subgraphs. \"\"\" assert issubclass(model_cfg.cls, base_model.BaseModel) if",
"node in graph.get_operations(): if preserve_extra_ops and node.name in preserve_extra_ops: output_op_names.add(node.name)",
"subgraph_filter or name in subgraph_filter: inference_graph_proto.subgraphs[name].CopyFrom(subgraph) # Yes, graph collections",
"inference on the device. # device: Device to infer on.",
"def NoConstGuaranteeScope(): \"\"\"Disallow const gauranteeing variable with-in scope.\"\"\" global _CONST_GUARANTEE",
"this scope as constants.\"\"\" global _CONST_GUARANTEE var_scope = tf.get_variable_scope() old_custom_getter",
"= True Update(cluster_params.controller) Update(cluster_params.worker) Update(cluster_params.ps) Update(cluster_params.evaler) Update(cluster_params.decoder) Update(cluster_params.input) @classmethod def",
"configuration. cls._SetClusterParams(model_cfg.cluster, device_options) # Configure the model. model_cfg.random_seed = random_seed",
"through 'ON_DEVICE' option, or treated as # constants with AS_CONSTANTS.",
"dictionary with name -> tf.Variable() mapping.\"\"\" vars_dict = {} for",
"if asset.op.type == 'Const' and asset.op.get_attr( 'dtype') == tf.dtypes.string: constant_value",
"device_options): \"\"\"Sets cluster params. Args: cluster_params: Model().cluster config. device_options: InferenceDeviceOptions.",
"that the runtime can choose automatically. # var_options: Options on",
"constant_value.string_val: tf.logging.info('Found asset file_path: %s', constant_value.string_val[0]) asset_file_def = inference_graph_proto.asset_file_def.add() asset_file_def.tensor_info.name",
"def GetOutputOpNames(graph, inference_graph_proto, subgraphs=None, preserve_colocation_nodes=True, preserve_saver_restore_nodes=False, preserve_extra_ops=None): \"\"\"Gets output op",
"var_scope.set_custom_getter(old_custom_getter) var_scope.set_caching_device(old_caching_device) @contextlib.contextmanager def _DummyScope(): yield None def _GetVarName(v): return",
"assert issubclass(model_cfg.cls, base_model.BaseModel) if device_options.dtype_override and device_options.fprop_dtype_override: raise ValueError( 'device_options{dtype_override,fprop_dtype_override)",
"bfloat16_variables .get_saver_spec_for_variables_with_bf16_overrides( variables_to_restore)) else: saver_var_spec = variables_to_restore saver = tf.train.Saver(saver_var_spec)",
"handling variables. For TPUs, variables can be # either placed",
"if the model does not support the listed subgraphs. \"\"\"",
"tf.train.export_meta_graph(graph=graph) for key in meta_graph.collection_def: tf.logging.info('copying collection %s', key) inference_graph_proto.collection_def[key].CopyFrom(",
"ASCII to this path. subgraph_filter: A string or a list",
"inference_graph_proto.subgraphs[subgraph_name].fetches.update(named_fetches) inference_graph_proto.subgraphs[subgraph_name].feeds.update(named_feeds) return inference_graph_proto def GetOutputOpNames(graph, inference_graph_proto, subgraphs=None, preserve_colocation_nodes=True, preserve_saver_restore_nodes=False,",
"output_op_names = set() def _GetOpName(tensor_or_op_name): \"\"\"Returns the op name of",
"ANY KIND, either express or implied. # See the License",
"if subgraph_filter and not isinstance(subgraph_filter, (tuple, list)): subgraph_filter = [subgraph_filter]",
"fix in # tf.graph_util.extract_sub_graph. graph_def = tf.graph_util.extract_sub_graph(graph.as_graph_def(), list(output_op_names)) reachable_vars =",
"the License. # You may obtain a copy of the",
"def ConstGuaranteeScope(): \"\"\"Treats all variables under this scope as constants.\"\"\"",
"Export as subgraph. inference_graph_proto.subgraphs[subgraph_name].fetches.update(named_fetches) inference_graph_proto.subgraphs[subgraph_name].feeds.update(named_feeds) return inference_graph_proto def GetOutputOpNames(graph, inference_graph_proto,",
"# See the License for the specific language governing permissions",
"model_registry.GetParams(modelname, 'Test') or model_params.Model(). model_task_name: The task to generate an",
"assets_collection: if asset.op.type == 'Const' and asset.op.get_attr( 'dtype') == tf.dtypes.string:",
"tf.logging.info('Skip subgraph %s.', subgraph_name) continue # Sometimes feeds aren't connected",
"graph: tf.Graph. saver: The tf.Saver to use for restoration. checkpoint:",
"to make sure the node # inference/embedding_lookup/Read/ReadVariableOp is not pruned.",
"checkpoint, output_op_names): \"\"\"Freezes a graph from a checkpoint. Args: graph:",
"( _MakeVariableDictionary(tf.global_variables()) if not mdl.ema else mdl.ema.variables_to_restore(mdl.variables_for_ema)) if bfloat16_override: saver_var_spec",
"all subgraphs are included. preserve_colocation_nodes: a Python bool, default to",
"of extra op names to preserve as long as they",
"to True. Preserves nodes colocating with the closure of output",
"export_path=None, subgraph_filter=None, random_seed=None, disable_packed_input=True): \"\"\"Exports a InferenceGraph proto with piecewise",
"collections.namedtuple('InferenceDeviceOptions', [ 'device', 'retain_device_placement', 'var_options', 'gen_init_op', 'dtype_override', 'fprop_dtype_override' ]) _CONST_GUARANTEE",
"be preserved in the graph. \"\"\" output_op_names = set() def",
"= graph.get_operation_by_name(tensor_or_op_name) return op.name for subgraph_name, subgraph in inference_graph_proto.subgraphs.items(): if",
"tf.graph_util.convert_variables_to_constants(sess, graph.as_graph_def(), output_op_names) class InferenceGraphExporter: \"\"\"Class for exporting inference graphs.\"\"\"",
"the graph and freeze. Useful for early testing of downstream",
"tf.bfloat16) py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16) act_bfloat16_override = ShouldForceBfloat16ForActivations( device_options) if act_bfloat16_override: py_utils.UpdateFpropDtype(model_cfg,",
"for saving. saver_def = inference_graph_proto.saver_def for op_name in [saver_def.filename_tensor_name, saver_def.restore_op_name]:",
"If not None or empty, export only this list of",
"tf.GraphDef. \"\"\" with tf.Session(graph=graph, config=py_utils.SessionConfig()) as sess: sess.run(graph.get_operation_by_name('init_all_variables')) return tf.graph_util.convert_variables_to_constants(sess,",
"choose. tf.logging.info('Clearing device placement for: %s', device_options.device) for node in",
"# Collection defs if not tf.executing_eagerly(): meta_graph = tf.train.export_meta_graph(graph=graph) for",
"automatically. # var_options: Options on handling variables. For TPUs, variables",
"= 'sync' cluster_params.job = 'decoder' cluster_params.add_summary = False cluster_params.do_eval =",
"Equivalent InferenceGraph. \"\"\" # Build the output inference graph. inference_graph_proto",
"meta_graph.collection_def[key]) else: tf.logging.warning('Not exporting collection defs ' 'since operating in",
"task = mdl.GetTask(model_task_name) variables_to_restore = ( _MakeVariableDictionary(tf.global_variables()) if not mdl.ema",
"preserve_extra_ops and node.name in preserve_extra_ops: output_op_names.add(node.name) elif preserve_colocation_nodes and '_class'",
"variable as constants for compilation def MaybeGuaranteeConstGetter(getter, name, *args, **kwargs):",
"the device. # device: Device to infer on. # retain_device_placement:",
"graph.get_tensor_by_name(tensor_or_op_name) return t.op.name else: op = graph.get_operation_by_name(tensor_or_op_name) return op.name for",
"of the syntax of each. if re.search(r':[0-9]+$', tensor_or_op_name): # Tensor-name.",
"Params instance as returned by model_registry.GetParams(modelname, 'Test') or model_params.Model(). model_task_name:",
"*args, **kwargs): global _CONST_GUARANTEE if _CONST_GUARANTEE: with tf.control_dependencies(None): return tf.guarantee_const(",
"false) to make weight-sharing and multi-core inference on TPUs work",
"ops from these subgraphs are preserved. Otherwise, all subgraphs are",
"_GetOpName(tensor_or_op_name): \"\"\"Returns the op name of the given node name.\"\"\"",
"a checkpoint. Args: graph: tf.Graph. saver: The tf.Saver to use",
"still do the lookup in # the graph to sanity",
"Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"task.encoder.packed_input = False if (_ParamExists(task, 'decoder') and _ParamExists(task.decoder, 'packed_input')): task.decoder.packed_input",
"of DT_RESOURCE. def AddIdentityToTheta(layer): layer._private_theta = layer._private_theta.Transform(tf.identity) # pylint: disable=protected-access",
"the output inference graph. inference_graph_proto = inference_graph_pb2.InferenceGraph() for subgraph_name, tensors",
"writing, software # distributed under the License is distributed on",
"or a list of subgraph names. If not None or",
"AS_CONSTANTS. # gen_init_op: Whether to serialize initialization ops for the",
"base_model from lingvo.core import bfloat16_variables from lingvo.core import inference_graph_pb2 from",
"Returns: Equivalent InferenceGraph. \"\"\" # Build the output inference graph.",
"continue output_op_names.add(node.name) return sorted(list(output_op_names)) def _ParamExists(param_obj, param_name): \"\"\"Tests whether param_name",
"tf.Graph. saver: The tf.Saver to use for restoration. checkpoint: The",
"graph from a checkpoint. Args: graph: tf.Graph. saver: The tf.Saver",
"subgraphs: an optional list of subgraph names. If provided, only",
"graph: tf.Graph. output_op_names: Names of output ops. Returns: Resulting tf.GraphDef.",
"in the graph. Ignoring.' % op_name) if not preserve_colocation_nodes and",
"last. tf.tables_initializer(name='init_all_tables') finally: # Reset TPU-related flags after model instantiation.",
"v is not None} named_feeds = {k: v.name for k,",
"old_custom_getter = var_scope.custom_getter old_caching_device = var_scope.caching_device old_val = _CONST_GUARANTEE var_scope.set_custom_getter(MaybeGuaranteeConstGetter)",
"is not None} named_feeds = {k: v.name for k, v",
"use for restoration. checkpoint: The checkpoint to restore. output_op_names: Names",
"Collection defs if not tf.executing_eagerly(): meta_graph = tf.train.export_meta_graph(graph=graph) for key",
"for op_name in [saver_def.filename_tensor_name, saver_def.restore_op_name]: try: output_op_names.add(_GetOpName(op_name)) except KeyError: tf.logging.info('Op/tensor",
"instance as returned by model_registry.GetParams(modelname, 'Test') or model_params.Model(). model_task_name: The",
"re import lingvo.compat as tf from lingvo.core import base_model from",
"string or a list of subgraph names. If not None",
"for function in graph_def.library.function: for node_def in function.node_def: node_def.ClearField('device') inference_graph_proto.graph_def.CopyFrom(graph_def)",
"or freeze_defaults: # Replace variables with tensors using tf.identity in",
"var_scope.set_caching_device(old_caching_device) # Marks variable as constants for compilation def MaybeGuaranteeConstGetter(getter,",
"finally: # Reset TPU-related flags after model instantiation. FLAGS.enable_asserts =",
"var_scope.set_custom_getter(MaybeGuaranteeConstGetter) var_scope.set_caching_device(lambda op: op.device) _CONST_GUARANTEE = True yield _CONST_GUARANTEE =",
"to make weight-sharing and multi-core inference on TPUs work properly.",
"= task.Inference() if isinstance(subgraphs_proto, dict): subgraphs_proto = ConvertSubgraphDictToProto(subgraphs_proto) for name,",
"if (_ParamExists(task, 'decoder') and _ParamExists(task.decoder, 'packed_input')): task.decoder.packed_input = False if",
"export_path: If not None, write the inference graph in ASCII",
"variables_to_restore = ( _MakeVariableDictionary(tf.global_variables()) if not mdl.ema else mdl.ema.variables_to_restore(mdl.variables_for_ema)) if",
"subgraph_name not in subgraphs: tf.logging.info('Skip subgraph %s.', subgraph_name) continue #",
"<op_name>:<output_index>. Some inference # graphs put tensors and others put",
"avoid the graph referencing types of DT_RESOURCE. def AddIdentityToTheta(layer): layer._private_theta",
"param_name): \"\"\"Tests whether param_name is contained in param_obj.\"\"\" if not",
"to load. Loads and freezes the model if given. freeze_defaults:",
"ConstGuaranteeScope(): \"\"\"Treats all variables under this scope as constants.\"\"\" global",
"op names that should be preserved in the graph. \"\"\"",
"== tf.dtypes.string: constant_value = asset.op.get_attr('value') if constant_value.string_val: tf.logging.info('Found asset file_path:",
"# will be cleared, so that the runtime can choose",
"nodes that cannot be reached from the pruned graph. continue",
"scope.\"\"\" global _CONST_GUARANTEE var_scope = tf.get_variable_scope() old_caching_device = var_scope.caching_device old_val",
"FLAGS.xla_device = 'tpu' try: mdl = model_cfg.Instantiate() task = mdl.GetTask(model_task_name)",
"is not None} # Export as subgraph. inference_graph_proto.subgraphs[subgraph_name].fetches.update(named_fetches) inference_graph_proto.subgraphs[subgraph_name].feeds.update(named_feeds) return",
"output_op_names.append('tpu_init_op') graph_def = graph.as_graph_def() tf.logging.info('Pruning graph to output ops: %r',",
"Fixes the random seed in the exported inference graph. disable_packed_input:",
"which case this should be # turned off to avoid",
"freeze_checkpoint or freeze_defaults: # Replace variables with tensors using tf.identity",
"not specify devices for variables if we are marking them",
"random seed in the exported inference graph. disable_packed_input: Disable packed",
"= GetOutputOpNames(graph, inference_graph_proto) # Prune the graph to just the",
"are preserved. Otherwise, all subgraphs are included. preserve_colocation_nodes: a Python",
"None def _GetVarName(v): return v.name[:-len(':0')] def _MakeVariableDictionary(variables): \"\"\"Returns a dictionary",
"inference_graph_proto = inference_graph_pb2.InferenceGraph() for subgraph_name, tensors in subgraphs_dict.items(): fetches =",
"import base_model from lingvo.core import bfloat16_variables from lingvo.core import inference_graph_pb2",
"gen_init_op: Whether to serialize initialization ops for the device. For",
"v return vars_dict def IsTpu(device_options): return device_options.device == 'tpu' def",
"off to avoid tripping initialization checks. # dtype_override: Whether to",
"model instantiation. FLAGS.enable_asserts = old_enable_asserts FLAGS.xla_device = old_xla_device tf.logging.info('Graph contains",
"s: \"loc:@inference/embedding_lookup/Read/ReadVariableOp\" # } # } # } # #",
"devices for variables if we are marking them as #",
"_CONST_GUARANTEE: with tf.control_dependencies(None): return tf.guarantee_const( getter(name, *args, **kwargs), name=name +",
"FLAGS.xla_device if IsTpu(device_options): FLAGS.enable_asserts = False FLAGS.xla_device = 'tpu' try:",
"else: _DisablePackedInput(model_cfg.task) tf.logging.debug('Model %s params:', model_cfg.name) for line in model_cfg.ToText().split('\\n'):",
"in subgraphs_dict.items(): fetches = tensors[0] feeds = tensors[1] # Rewrite",
"{k: v.name for k, v in fetches.items() if v is",
"preserve_saver_restore_nodes=False) if cls._DeviceSupportsFreezing(device_options): raise ValueError('freeze_checkpoint cannot be used with device",
"p.tpus_per_replica = 1 if IsTpu(device_options) else 0 p.gpus_per_replica = 0",
"in the model. Options supported are None or tf.bfloat16. InferenceDeviceOptions",
"fetches.items() if v is not None} named_feeds = {k: v.name",
"{ # key: \"_class\" # value { # list {",
"model_cfg.is_inference = True if disable_packed_input: def _DisablePackedInput(task): if (_ParamExists(task, 'encoder')",
"eager mode.') # Freezing. if freeze_defaults or freeze_checkpoint: output_op_names =",
"collections are bad, however this seems to be the #",
"gauranteeing variable with-in scope.\"\"\" global _CONST_GUARANTEE var_scope = tf.get_variable_scope() old_caching_device",
"graph and freezing.') graph_def = _FreezeDefaults(graph, output_op_names) else: inference_graph_proto.saver_def.CopyFrom(saver.as_saver_def()) output_op_names",
"with AS_CONSTANTS. # gen_init_op: Whether to serialize initialization ops for",
"== 'tpu' def ShouldForceBfloat16ForWeightsAndActivations(device_options): return device_options.dtype_override == tf.bfloat16 def ShouldForceBfloat16ForActivations(device_options):",
"device_options: Device options for the accelerator used for serving. freeze_checkpoint:",
"py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16) act_bfloat16_override = ShouldForceBfloat16ForActivations( device_options) if act_bfloat16_override: py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16)",
"return inference_graph_proto @classmethod def _SetClusterParams(cls, cluster_params, device_options): \"\"\"Sets cluster params.",
"inference_graph_proto.collection_def[key].CopyFrom( meta_graph.collection_def[key]) else: tf.logging.warning('Not exporting collection defs ' 'since operating",
"the specified device # will be cleared, so that the",
"= FLAGS.enable_asserts old_xla_device = FLAGS.xla_device if IsTpu(device_options): FLAGS.enable_asserts = False",
"loc_name = loc[5:] if loc_name not in reachable_vars: # Skip",
"preserve_colocation_nodes and '_class' in node.node_def.attr: for loc in node.node_def.attr['_class'].list.s: loc",
"\"\"\"Sets cluster params. Args: cluster_params: Model().cluster config. device_options: InferenceDeviceOptions. \"\"\"",
"format <op_name>:<output_index>. Some inference # graphs put tensors and others",
"graph_def.node: node.ClearField('device') for function in graph_def.library.function: for node_def in function.node_def:",
"# anyways to avoid errors. for tensor_or_op_name in (list(subgraph.feeds.values()) +",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"if freeze_defaults or freeze_checkpoint: output_op_names = GetOutputOpNames( graph, inference_graph_proto, preserve_colocation_nodes=False,",
"with-in scope.\"\"\" global _CONST_GUARANTEE var_scope = tf.get_variable_scope() old_caching_device = var_scope.caching_device",
"lingvo.core import bfloat16_variables from lingvo.core import inference_graph_pb2 from lingvo.core import",
"explicitly enables it. if FLAGS['enable_asserts'].using_default_value: FLAGS.enable_asserts = False # TODO(laurenzo):",
"graphs.\"\"\" @classmethod def Export(cls, model_cfg, model_task_name=None, device_options=InferenceDeviceOptions( device='', retain_device_placement=False, var_options=None,",
"GetOutputOpNames(graph, inference_graph_proto) # Prune the graph to just the parts",
"checkpoint: The checkpoint to restore. output_op_names: Names of output ops.",
"We also need to preserve any nodes that are used",
"with tensors using tf.identity in theta before # freezing to",
"to not prune out the restore node. output_op_names.append('init_all_tables') output_op_names.append('init_all_variables') output_op_names.append('save/control_dependency')",
"Resulting tf.GraphDef. \"\"\" sess = tf.Session(graph=graph, config=py_utils.SessionConfig()) saver.restore(sess, checkpoint) return",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"freeze_checkpoint: output_op_names = GetOutputOpNames( graph, inference_graph_proto, preserve_colocation_nodes=False, preserve_saver_restore_nodes=False) if cls._DeviceSupportsFreezing(device_options):",
"activations and # weights in the model. Options supported are",
"\"\"\" sess = tf.Session(graph=graph, config=py_utils.SessionConfig()) saver.restore(sess, checkpoint) return tf.graph_util.convert_variables_to_constants( sess,",
"list of inference subgraphs. random_seed: Fixes the random seed in",
"name instead of # Tensor instance. named_fetches = {k: v.name",
"TPU-related flags after model instantiation. FLAGS.enable_asserts = old_enable_asserts FLAGS.xla_device =",
"in fetches.items() if v is not None} named_feeds = {k:",
"device_options.var_options == 'AS_CONSTANTS'): # Do not specify devices for variables",
"subgraphs. random_seed: Fixes the random seed in the exported inference",
"= var_scope.custom_getter old_caching_device = var_scope.caching_device old_val = _CONST_GUARANTEE var_scope.set_custom_getter(MaybeGuaranteeConstGetter) var_scope.set_caching_device(lambda",
"infer on. # retain_device_placement: If true, the specified device in",
"'AS_CONSTANTS'): # Do not specify devices for variables if we",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"can be declared anywhere in the graph, so this op",
"freeze_defaults or freeze_checkpoint: output_op_names = GetOutputOpNames( graph, inference_graph_proto, preserve_colocation_nodes=False, preserve_saver_restore_nodes=False)",
"the device so that the runtime can choose. tf.logging.info('Clearing device",
"freezing.') graph_def = _FreezeDefaults(graph, output_op_names) else: inference_graph_proto.saver_def.CopyFrom(saver.as_saver_def()) output_op_names = GetOutputOpNames(graph,",
"node # inference/embedding_lookup/Read/ReadVariableOp is not pruned. # # TODO(zhifengc): It's",
"tensors in subgraphs_dict.items(): fetches = tensors[0] feeds = tensors[1] #",
"given. freeze_defaults: Default initializes the graph and freeze. Useful for",
"and global variable init op to the graph. # Tables",
"== param_name: return True return False def _FreezeGraphFromCheckpoint(graph, saver, checkpoint,",
"a node may have this attr: # attr { #",
"node name.\"\"\" # Tensor names have format <op_name>:<output_index>. Some inference",
"# Tensor instance. named_fetches = {k: v.name for k, v",
"getter(name, *args, **kwargs) @contextlib.contextmanager def ConstGuaranteeScope(): \"\"\"Treats all variables under",
"models. device_options: Device options for the accelerator used for serving.",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"device through 'ON_DEVICE' option, or treated as # constants with",
"properly. Args: model_cfg: a Params instance as returned by model_registry.GetParams(modelname,",
"import inference_graph_pb2 from lingvo.core import py_utils import six from google.protobuf",
"their tensor name instead of # Tensor instance. named_fetches =",
"of subgraph names. If provided, only output ops from these",
"breaks, TensorFlow will raise a ValueError with # a description",
"= inference_graph_proto.asset_file_def.add() asset_file_def.tensor_info.name = asset.name asset_file_def.filename = constant_value.string_val[0] # Add",
"inference graphs.\"\"\" @classmethod def Export(cls, model_cfg, model_task_name=None, device_options=InferenceDeviceOptions( device='', retain_device_placement=False,",
"Rights Reserved. # # Licensed under the Apache License, Version",
"named_feeds = {k: v.name for k, v in feeds.items() if",
"output op names from an inference graph. Args: graph: The",
"specific language governing permissions and # limitations under the License.",
"map to their tensor name instead of # Tensor instance.",
"the random seed in the exported inference graph. disable_packed_input: Disable",
"saver_var_spec = ( bfloat16_variables .get_saver_spec_for_variables_with_bf16_overrides( variables_to_restore)) else: saver_var_spec = variables_to_restore",
"%r', [op.name for op in graph.get_operations()]) # Collection defs if",
"# turned off to avoid tripping initialization checks. # dtype_override:",
"freeze_checkpoint=None, freeze_defaults=False, export_path=None, subgraph_filter=None, random_seed=None, disable_packed_input=True): \"\"\"Exports a InferenceGraph proto",
"sorted(list(output_op_names)) # We also need to preserve any nodes that",
"Yes, graph collections are bad, however this seems to be",
"subgraph_name, tensors in subgraphs_dict.items(): fetches = tensors[0] feeds = tensors[1]",
"having a checkpoint. export_path: If not None, write the inference",
"connected to any outputs but keep them in the graph",
"disable_packed_input: def _DisablePackedInput(task): if (_ParamExists(task, 'encoder') and _ParamExists(task.encoder, 'packed_input')): task.encoder.packed_input",
"Names of output ops. Returns: Resulting tf.GraphDef. \"\"\" with tf.Session(graph=graph,",
"or treated as # constants with AS_CONSTANTS. # gen_init_op: Whether",
"py_utils import six from google.protobuf import text_format FLAGS = tf.flags.FLAGS",
"graph # anyways to avoid errors. for tensor_or_op_name in (list(subgraph.feeds.values())",
"# device: Device to infer on. # retain_device_placement: If true,",
"not pruned. # # TODO(zhifengc): It's possible that it's better",
"# you may not use this file except in compliance",
"True yield _CONST_GUARANTEE = old_val var_scope.set_custom_getter(old_custom_getter) var_scope.set_caching_device(old_caching_device) @contextlib.contextmanager def _DummyScope():",
"= mdl.GetTask(model_task_name) variables_to_restore = ( _MakeVariableDictionary(tf.global_variables()) if not mdl.ema else",
"_CONST_GUARANTEE = False yield _CONST_GUARANTEE = old_val var_scope.set_caching_device(old_caching_device) # Marks",
"graph. graph = tf.Graph() with graph.as_default(): tf.random.set_seed(random_seed) cluster = model_cfg.cluster.Instantiate()",
"theta before # freezing to avoid the graph referencing types",
"the lookup in # the graph to sanity check (versus",
"if FLAGS['enable_asserts'].using_default_value: FLAGS.enable_asserts = False # TODO(laurenzo): Work out how",
"sess: sess.run(graph.get_operation_by_name('init_all_variables')) return tf.graph_util.convert_variables_to_constants(sess, graph.as_graph_def(), output_op_names) class InferenceGraphExporter: \"\"\"Class for",
"# easiest way to get this assets registered from #",
"are bad, however this seems to be the # easiest",
"ConvertSubgraphDictToProto(subgraphs_proto) for name, subgraph in subgraphs_proto.subgraphs.items(): if not subgraph_filter or",
"specified device in the generated # inference graph nodes will",
"param_obj.IterParams(): if k == param_name: return True return False def",
"param_obj: return for k, _ in param_obj.IterParams(): if k ==",
"runtime can choose automatically. # var_options: Options on handling variables.",
"can be initialized globally once, in which case this should",
"for _, task_param in model_cfg.task_params.IterParams(): _DisablePackedInput(task_param) else: _DisablePackedInput(model_cfg.task) tf.logging.debug('Model %s",
"to avoid the graph referencing types of DT_RESOURCE. def AddIdentityToTheta(layer):",
"graph to just the parts we need. # To support",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"do the lookup in # the graph to sanity check",
"for restoring according to inference_graph_proto.saver_def. preserve_extra_ops: an optional list of",
"[saver_def.filename_tensor_name, saver_def.restore_op_name]: try: output_op_names.add(_GetOpName(op_name)) except KeyError: tf.logging.info('Op/tensor %s not in",
"if issubclass(model_cfg.cls, base_model.MultiTaskModel): for _, task_param in model_cfg.task_params.IterParams(): _DisablePackedInput(task_param) else:",
"constant_value.string_val[0]) asset_file_def = inference_graph_proto.asset_file_def.add() asset_file_def.tensor_info.name = asset.name asset_file_def.filename = constant_value.string_val[0]",
"the graph. graph = tf.Graph() with graph.as_default(): tf.random.set_seed(random_seed) cluster =",
"exported inference graph. disable_packed_input: Disable packed input for inference writing",
"inference graph. inference_graph_proto = inference_graph_pb2.InferenceGraph() for subgraph_name, tensors in subgraphs_dict.items():",
"the # easiest way to get this assets registered from",
"from an inference graph. Args: graph: The tf graph. inference_graph_proto:",
"'/job:localhost' p.replicas = 1 p.tpus_per_replica = 1 if IsTpu(device_options) else",
"limitations under the License. # ============================================================================== \"\"\"Utility for exporting an",
"def IsTpu(device_options): return device_options.device == 'tpu' def ShouldForceBfloat16ForWeightsAndActivations(device_options): return device_options.dtype_override",
"Tensor-name. t = graph.get_tensor_by_name(tensor_or_op_name) return t.op.name else: op = graph.get_operation_by_name(tensor_or_op_name)",
"under the Apache License, Version 2.0 (the \"License\"); # you",
"the graph. \"\"\" output_op_names = set() def _GetOpName(tensor_or_op_name): \"\"\"Returns the",
"after model instantiation. FLAGS.enable_asserts = old_enable_asserts FLAGS.xla_device = old_xla_device tf.logging.info('Graph",
"'decoder') and _ParamExists(task.decoder, 'packed_input')): task.decoder.packed_input = False if issubclass(model_cfg.cls, base_model.MultiTaskModel):",
"inference_graph_proto.saver_def for op_name in [saver_def.filename_tensor_name, saver_def.restore_op_name]: try: output_op_names.add(_GetOpName(op_name)) except KeyError:",
"not preserve_colocation_nodes and not preserve_extra_ops: return sorted(list(output_op_names)) # We also",
"bfloat16_variables from lingvo.core import inference_graph_pb2 from lingvo.core import py_utils import",
"feeds = tensors[1] # Rewrite fetches and feeds to map",
"parts we need. # To support restoring, we have to",
"else: tf.logging.warning('Not exporting collection defs ' 'since operating in eager",
"model_params.Model(). model_task_name: The task to generate an inference graph for.",
"if export_path: with tf.io.gfile.GFile(export_path, 'w') as f: f.write(text_format.MessageToString(inference_graph_proto)) return inference_graph_proto",
"var_scope = tf.get_variable_scope() old_custom_getter = var_scope.custom_getter old_caching_device = var_scope.caching_device old_val",
"tf.logging.debug('%s', line) # Instantiate the graph. graph = tf.Graph() with",
"if _CONST_GUARANTEE: with tf.control_dependencies(None): return tf.guarantee_const( getter(name, *args, **kwargs), name=name",
"act_bfloat16_override: py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16) # Hard-code TPU-related flags prior to instantiating",
"initializes the graph and freeze. Useful for early testing of",
"array. preserve_saver_restore_nodes: a Python bool, default to False. Preserves nodes",
"= layer._private_theta.Transform(tf.identity) # pylint: disable=protected-access layer.children.Transform(AddIdentityToTheta) AddIdentityToTheta(task) inference_graph_proto = inference_graph_pb2.InferenceGraph()",
"params `p`.\"\"\" p.name = '/job:localhost' p.replicas = 1 p.tpus_per_replica =",
"if not mdl.ema else mdl.ema.variables_to_restore(mdl.variables_for_ema)) if bfloat16_override: saver_var_spec = (",
"yield _CONST_GUARANTEE = old_val var_scope.set_custom_getter(old_custom_getter) var_scope.set_caching_device(old_caching_device) @contextlib.contextmanager def _DummyScope(): yield",
"old_val var_scope.set_custom_getter(old_custom_getter) var_scope.set_caching_device(old_caching_device) @contextlib.contextmanager def _DummyScope(): yield None def _GetVarName(v):",
"The tf.Saver to use for restoration. checkpoint: The checkpoint to",
"freeze_defaults: # Replace variables with tensors using tf.identity in theta",
"tensors and others put ops in the feeds/fetches (depends #",
"pruned. # # TODO(zhifengc): It's possible that it's better to",
"any nodes that are used for colocation. # E.g., a",
"that cannot be reached from the pruned graph. continue output_op_names.add(node.name)",
"= tf.train.Saver(saver_var_spec) tf.variables_initializer( tf.global_variables(), name='init_all_variables') if IsTpu(device_options) and device_options.gen_init_op: tf.group(tf.tpu.initialize_system(),",
"import text_format FLAGS = tf.flags.FLAGS # InferenceDeviceOptions contains options to",
"cannot be reached from the pruned graph. continue output_op_names.add(node.name) return",
"= False # TODO(laurenzo): Work out how much we need",
"serving. freeze_checkpoint: The checkpoint to load. Loads and freezes the",
"graph from checkpoint: %s', freeze_checkpoint) graph_def = _FreezeGraphFromCheckpoint(graph, saver, freeze_checkpoint,",
"in function.node_def: node_def.ClearField('device') inference_graph_proto.graph_def.CopyFrom(graph_def) if export_path: with tf.io.gfile.GFile(export_path, 'w') as",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"if bfloat16_override: py_utils.UpdateDtype(model_cfg, tf.bfloat16) py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16) act_bfloat16_override = ShouldForceBfloat16ForActivations( device_options)",
"'dtype_override', 'fprop_dtype_override' ]) _CONST_GUARANTEE = None @contextlib.contextmanager def NoConstGuaranteeScope(): \"\"\"Disallow",
"attr { # key: \"_class\" # value { # list",
"# TODO(zhifengc): It's possible that it's better to fix in",
"getter(name, *args, **kwargs), name=name + '/GuaranteeConst') else: return getter(name, *args,",
"= model_cfg.Instantiate() task = mdl.GetTask(model_task_name) variables_to_restore = ( _MakeVariableDictionary(tf.global_variables()) if",
"contains options to configure inference on the device. # device:",
"in model_cfg.ToText().split('\\n'): tf.logging.debug('%s', line) # Instantiate the graph. graph =",
"can choose automatically. # var_options: Options on handling variables. For",
"v in variables: vars_dict[_GetVarName(v)] = v return vars_dict def IsTpu(device_options):",
"out the restore node. output_op_names.append('init_all_tables') output_op_names.append('init_all_variables') output_op_names.append('save/control_dependency') output_op_names.append('save/restore_all') if IsTpu(device_options)",
"have this attr: # attr { # key: \"_class\" #",
"from lingvo.core import inference_graph_pb2 from lingvo.core import py_utils import six",
"{ # list { # s: \"loc:@inference/embedding_lookup/Read/ReadVariableOp\" # } #",
"an inference graph for. Should be None for single-task models.",
"this attr: # attr { # key: \"_class\" # value",
"isinstance(subgraphs_proto, dict): subgraphs_proto = ConvertSubgraphDictToProto(subgraphs_proto) for name, subgraph in subgraphs_proto.subgraphs.items():",
"just the parts we need. # To support restoring, we",
"'tpu' def ShouldForceBfloat16ForWeightsAndActivations(device_options): return device_options.dtype_override == tf.bfloat16 def ShouldForceBfloat16ForActivations(device_options): return",
"'packed_input')): task.decoder.packed_input = False if issubclass(model_cfg.cls, base_model.MultiTaskModel): for _, task_param",
"of tf op names that should be preserved in the",
"None or tf.bfloat16. InferenceDeviceOptions = collections.namedtuple('InferenceDeviceOptions', [ 'device', 'retain_device_placement', 'var_options',",
"# pylint: disable=protected-access layer.children.Transform(AddIdentityToTheta) AddIdentityToTheta(task) inference_graph_proto = inference_graph_pb2.InferenceGraph() subgraphs_proto =",
"asset.op.get_attr( 'dtype') == tf.dtypes.string: constant_value = asset.op.get_attr('value') if constant_value.string_val: tf.logging.info('Found",
"differentiate here. We still do the lookup in # the",
"For TPUs, # servers can be initialized globally once, in",
"a graph and freezes it. Args: graph: tf.Graph. output_op_names: Names",
"inference_graph_proto = inference_graph_pb2.InferenceGraph() subgraphs_proto = task.Inference() if isinstance(subgraphs_proto, dict): subgraphs_proto",
"for k, v in fetches.items() if v is not None}",
"as f: f.write(text_format.MessageToString(inference_graph_proto)) return inference_graph_proto @classmethod def _SetClusterParams(cls, cluster_params, device_options):",
"and not isinstance(subgraph_filter, (tuple, list)): subgraph_filter = [subgraph_filter] # Disable",
"the graph # anyways to avoid errors. for tensor_or_op_name in",
"If true, the specified device in the generated # inference",
"**kwargs), name=name + '/GuaranteeConst') else: return getter(name, *args, **kwargs) @contextlib.contextmanager",
"= tensors[1] # Rewrite fetches and feeds to map to",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"Authors. All Rights Reserved. # # Licensed under the Apache",
"case, we need to make sure the node # inference/embedding_lookup/Read/ReadVariableOp",
"inference_graph_proto.saver_def.CopyFrom(saver.as_saver_def()) output_op_names = GetOutputOpNames(graph, inference_graph_proto) # Prune the graph to",
"var_options=None, gen_init_op=True, dtype_override=None, fprop_dtype_override=None), freeze_checkpoint=None, freeze_defaults=False, export_path=None, subgraph_filter=None, random_seed=None, disable_packed_input=True):",
"asset in assets_collection: if asset.op.type == 'Const' and asset.op.get_attr( 'dtype')",
"bool, default to True. Preserves nodes colocating with the closure",
"# freezing to avoid the graph referencing types of DT_RESOURCE.",
"if not subgraph_filter or name in subgraph_filter: inference_graph_proto.subgraphs[name].CopyFrom(subgraph) # Yes,",
"FLAGS.enable_asserts old_xla_device = FLAGS.xla_device if IsTpu(device_options): FLAGS.enable_asserts = False FLAGS.xla_device",
"old_enable_asserts FLAGS.xla_device = old_xla_device tf.logging.info('Graph contains ops: %r', [op.name for",
"Apache License, Version 2.0 (the \"License\"); # you may not",
"# Disable assertions unless user explicitly enables it. if FLAGS['enable_asserts'].using_default_value:",
"either express or implied. # See the License for the",
"return op.name for subgraph_name, subgraph in inference_graph_proto.subgraphs.items(): if subgraphs and",
"# limitations under the License. # ============================================================================== \"\"\"Utility for exporting",
"model_cfg.random_seed = random_seed model_cfg.is_inference = True if disable_packed_input: def _DisablePackedInput(task):",
"to use for restoration. checkpoint: The checkpoint to restore. output_op_names:",
"config. device_options: InferenceDeviceOptions. \"\"\" def Update(p): \"\"\"Update cluster params `p`.\"\"\"",
"dtype to use for activations and # weights in the",
"reached from the pruned graph. continue output_op_names.add(node.name) return sorted(list(output_op_names)) def",
"output_op_names) class InferenceGraphExporter: \"\"\"Class for exporting inference graphs.\"\"\" @classmethod def",
"to infer on. # retain_device_placement: If true, the specified device",
"None} # Export as subgraph. inference_graph_proto.subgraphs[subgraph_name].fetches.update(named_fetches) inference_graph_proto.subgraphs[subgraph_name].feeds.update(named_feeds) return inference_graph_proto def",
"to this path. subgraph_filter: A string or a list of",
"not None or empty, export only this list of inference",
"loc = six.ensure_text(loc, 'utf-8') if loc.startswith('loc:@'): loc_name = loc[5:] if",
"def Export(cls, model_cfg, model_task_name=None, device_options=InferenceDeviceOptions( device='', retain_device_placement=False, var_options=None, gen_init_op=True, dtype_override=None,",
"issubclass(model_cfg.cls, base_model.BaseModel) if device_options.dtype_override and device_options.fprop_dtype_override: raise ValueError( 'device_options{dtype_override,fprop_dtype_override) can",
"as they present in the graph. Returns: Array of tf",
"be the # easiest way to get this assets registered",
"param_name: return True return False def _FreezeGraphFromCheckpoint(graph, saver, checkpoint, output_op_names):",
"empty, export only this list of inference subgraphs. random_seed: Fixes",
"freezing to avoid the graph referencing types of DT_RESOURCE. def",
"= ShouldForceBfloat16ForActivations( device_options) if act_bfloat16_override: py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16) # Hard-code TPU-related",
"output_op_names.add(_GetOpName(op_name)) except KeyError: tf.logging.info('Op/tensor %s not in the graph. Ignoring.'",
"False def _FreezeGraphFromCheckpoint(graph, saver, checkpoint, output_op_names): \"\"\"Freezes a graph from",
"'packed_input')): task.encoder.packed_input = False if (_ParamExists(task, 'decoder') and _ParamExists(task.decoder, 'packed_input')):",
"on how it is used). We differentiate here. We still",
"preserve_colocation_nodes and not preserve_extra_ops: return sorted(list(output_op_names)) # We also need",
"@classmethod def _SetClusterParams(cls, cluster_params, device_options): \"\"\"Sets cluster params. Args: cluster_params:",
"op_name) if not preserve_colocation_nodes and not preserve_extra_ops: return sorted(list(output_op_names)) #",
"with piecewise subgraphs. Sets FLAGS.enable_asserts to False unless user explicitly",
"as constants.\"\"\" global _CONST_GUARANTEE var_scope = tf.get_variable_scope() old_custom_getter = var_scope.custom_getter",
"according to inference_graph_proto.saver_def. preserve_extra_ops: an optional list of extra op",
"are included. preserve_colocation_nodes: a Python bool, default to True. Preserves",
"name='init_all_variables') if IsTpu(device_options) and device_options.gen_init_op: tf.group(tf.tpu.initialize_system(), name='tpu_init_op') if freeze_checkpoint or",
"the runtime can choose automatically. # var_options: Options on handling",
"checkpoint. export_path: If not None, write the inference graph in",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"subgraph_name, subgraph in inference_graph_proto.subgraphs.items(): if subgraphs and subgraph_name not in",
"checkpoint. Args: graph: tf.Graph. saver: The tf.Saver to use for",
"them in the graph # anyways to avoid errors. for",
"node_def.ClearField('device') inference_graph_proto.graph_def.CopyFrom(graph_def) if export_path: with tf.io.gfile.GFile(export_path, 'w') as f: f.write(text_format.MessageToString(inference_graph_proto))",
"cluster_params.do_eval = True Update(cluster_params.controller) Update(cluster_params.worker) Update(cluster_params.ps) Update(cluster_params.evaler) Update(cluster_params.decoder) Update(cluster_params.input) @classmethod",
"output_op_names.add(node.name) return sorted(list(output_op_names)) def _ParamExists(param_obj, param_name): \"\"\"Tests whether param_name is",
"# Skip nodes that cannot be reached from the pruned",
"these subgraphs are preserved. Otherwise, all subgraphs are included. preserve_colocation_nodes:",
"Model().cluster config. device_options: InferenceDeviceOptions. \"\"\" def Update(p): \"\"\"Update cluster params",
"to preserve any nodes that are used for colocation. #",
"writing purposes. Returns: InferenceGraph proto. Raises: ValueError: if the model",
"on the text manipulation). # If this logic ever breaks,",
"the model does not support the listed subgraphs. \"\"\" assert",
"the feeds/fetches (depends # on how it is used). We",
"the node # inference/embedding_lookup/Read/ReadVariableOp is not pruned. # # TODO(zhifengc):",
"[node.name for node in graph_def.node] for node in graph.get_operations(): if",
"params:', model_cfg.name) for line in model_cfg.ToText().split('\\n'): tf.logging.debug('%s', line) # Instantiate",
"True if disable_packed_input: def _DisablePackedInput(task): if (_ParamExists(task, 'encoder') and _ParamExists(task.encoder,",
"ops: %r', [op.name for op in graph.get_operations()]) # Collection defs",
"to inference_graph_proto.saver_def. preserve_extra_ops: an optional list of extra op names",
"specify devices for variables if we are marking them as",
"inference_graph_proto def GetOutputOpNames(graph, inference_graph_proto, subgraphs=None, preserve_colocation_nodes=True, preserve_saver_restore_nodes=False, preserve_extra_ops=None): \"\"\"Gets output",
"tensors[0] feeds = tensors[1] # Rewrite fetches and feeds to",
"inference graph for. Should be None for single-task models. device_options:",
"op = graph.get_operation_by_name(tensor_or_op_name) return op.name for subgraph_name, subgraph in inference_graph_proto.subgraphs.items():",
"the graph. Returns: Array of tf op names that should",
"preserved. saver_def.save_tensor_name is # skipped because it's only used for",
"tpu_const_scope = _DummyScope() if (IsTpu(device_options) and device_options.var_options == 'AS_CONSTANTS'): #",
"collections import contextlib import re import lingvo.compat as tf from",
"colocation. # E.g., a node may have this attr: #",
"aren't connected to any outputs but keep them in the",
"nodes will be retained. Otherwise, the specified device # will",
"if bfloat16_override: saver_var_spec = ( bfloat16_variables .get_saver_spec_for_variables_with_bf16_overrides( variables_to_restore)) else: saver_var_spec",
"KeyError: tf.logging.info('Op/tensor %s not in the graph. Ignoring.' % op_name)",
"Export(cls, model_cfg, model_task_name=None, device_options=InferenceDeviceOptions( device='', retain_device_placement=False, var_options=None, gen_init_op=True, dtype_override=None, fprop_dtype_override=None),",
"to fix in # tf.graph_util.extract_sub_graph. graph_def = tf.graph_util.extract_sub_graph(graph.as_graph_def(), list(output_op_names)) reachable_vars",
"= variables_to_restore saver = tf.train.Saver(saver_var_spec) tf.variables_initializer( tf.global_variables(), name='init_all_variables') if IsTpu(device_options)",
"and others put ops in the feeds/fetches (depends # on",
"dtype_override: Whether to override the dtype to use for activations",
"@contextlib.contextmanager def _DummyScope(): yield None def _GetVarName(v): return v.name[:-len(':0')] def",
"and not preserve_extra_ops: return sorted(list(output_op_names)) # We also need to",
"inference_graph_proto, preserve_colocation_nodes=False, preserve_saver_restore_nodes=False) if cls._DeviceSupportsFreezing(device_options): raise ValueError('freeze_checkpoint cannot be used",
"old_caching_device = var_scope.caching_device old_val = _CONST_GUARANTEE var_scope.set_caching_device(None) _CONST_GUARANTEE = False",
"p.devices_per_split = 1 cluster_params.mode = 'sync' cluster_params.job = 'decoder' cluster_params.add_summary",
"inference subgraphs. random_seed: Fixes the random seed in the exported",
"(tuple, list)): subgraph_filter = [subgraph_filter] # Disable assertions unless user",
"it to True. Note: Enable FLAGS.pin_vars_to_cpu (default false) to make",
"used for colocation. # E.g., a node may have this",
"= False if (_ParamExists(task, 'decoder') and _ParamExists(task.decoder, 'packed_input')): task.decoder.packed_input =",
"raise ValueError('freeze_checkpoint cannot be used with device ' + device_options.device)",
"use this file except in compliance with the License. #",
"proto with piecewise subgraphs. Sets FLAGS.enable_asserts to False unless user",
"Useful for early testing of downstream tools without having a",
"= tf.graph_util.extract_sub_graph(graph.as_graph_def(), list(output_op_names)) reachable_vars = [node.name for node in graph_def.node]",
"\"\"\"Default initializes a graph and freezes it. Args: graph: tf.Graph.",
"%r', output_op_names) graph_def = tf.graph_util.extract_sub_graph(graph_def, output_op_names) if not device_options.retain_device_placement: #",
"in node.node_def.attr['_class'].list.s: loc = six.ensure_text(loc, 'utf-8') if loc.startswith('loc:@'): loc_name =",
"key in meta_graph.collection_def: tf.logging.info('copying collection %s', key) inference_graph_proto.collection_def[key].CopyFrom( meta_graph.collection_def[key]) else:",
"model_cfg.cluster.Instantiate() device = cluster.GetPlacer() tpu_const_scope = _DummyScope() if (IsTpu(device_options) and",
"and _ParamExists(task.decoder, 'packed_input')): task.decoder.packed_input = False if issubclass(model_cfg.cls, base_model.MultiTaskModel): for",
"Add a table init op and global variable init op",
"in variables: vars_dict[_GetVarName(v)] = v return vars_dict def IsTpu(device_options): return",
"override the dtype to use for activations and # weights",
"The TensorFlow Authors. All Rights Reserved. # # Licensed under",
"and asset.op.get_attr( 'dtype') == tf.dtypes.string: constant_value = asset.op.get_attr('value') if constant_value.string_val:",
"inference on TPUs work properly. Args: model_cfg: a Params instance",
"subgraphs_dict.items(): fetches = tensors[0] feeds = tensors[1] # Rewrite fetches",
"model_cfg.Instantiate() task = mdl.GetTask(model_task_name) variables_to_restore = ( _MakeVariableDictionary(tf.global_variables()) if not",
"attr: # attr { # key: \"_class\" # value {",
"constants for compilation def MaybeGuaranteeConstGetter(getter, name, *args, **kwargs): global _CONST_GUARANTEE",
"a description of the syntax of each. if re.search(r':[0-9]+$', tensor_or_op_name):",
"not None, write the inference graph in ASCII to this",
"FLAGS.enable_asserts = old_enable_asserts FLAGS.xla_device = old_xla_device tf.logging.info('Graph contains ops: %r',",
"def Update(p): \"\"\"Update cluster params `p`.\"\"\" p.name = '/job:localhost' p.replicas",
"\"\"\"Class for exporting inference graphs.\"\"\" @classmethod def Export(cls, model_cfg, model_task_name=None,",
"We still do the lookup in # the graph to",
"InferenceDeviceOptions. \"\"\" def Update(p): \"\"\"Update cluster params `p`.\"\"\" p.name =",
"# # TODO(zhifengc): It's possible that it's better to fix",
"output_op_names): \"\"\"Freezes a graph from a checkpoint. Args: graph: tf.Graph.",
"export only this list of inference subgraphs. random_seed: Fixes the",
"Dict of (fetches, feeds) where each fetches/feeds is a NestedMap.",
"p.name = '/job:localhost' p.replicas = 1 p.tpus_per_replica = 1 if",
"= True yield _CONST_GUARANTEE = old_val var_scope.set_custom_getter(old_custom_getter) var_scope.set_caching_device(old_caching_device) @contextlib.contextmanager def",
"in ASCII to this path. subgraph_filter: A string or a",
"inference graph. Args: graph: The tf graph. inference_graph_proto: an InferenceGraph",
"**kwargs): global _CONST_GUARANTEE if _CONST_GUARANTEE: with tf.control_dependencies(None): return tf.guarantee_const( getter(name,",
"in subgraphs: tf.logging.info('Skip subgraph %s.', subgraph_name) continue # Sometimes feeds",
"ShouldForceBfloat16ForWeightsAndActivations( device_options) if bfloat16_override: py_utils.UpdateDtype(model_cfg, tf.bfloat16) py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16) act_bfloat16_override =",
"in compliance with the License. # You may obtain a",
"inference/embedding_lookup/Read/ReadVariableOp is not pruned. # # TODO(zhifengc): It's possible that",
"of # Tensor instance. named_fetches = {k: v.name for k,",
"the model. model_cfg.random_seed = random_seed model_cfg.is_inference = True if disable_packed_input:",
"software # distributed under the License is distributed on an",
"feeds aren't connected to any outputs but keep them in",
"disable=protected-access layer.children.Transform(AddIdentityToTheta) AddIdentityToTheta(task) inference_graph_proto = inference_graph_pb2.InferenceGraph() subgraphs_proto = task.Inference() if",
"in graph.get_operations()]) # Collection defs if not tf.executing_eagerly(): meta_graph =",
"subgraph %s.', subgraph_name) continue # Sometimes feeds aren't connected to",
"Build the output inference graph. inference_graph_proto = inference_graph_pb2.InferenceGraph() for subgraph_name,",
"v.name for k, v in fetches.items() if v is not",
"used for saving. saver_def = inference_graph_proto.saver_def for op_name in [saver_def.filename_tensor_name,",
"flags after model instantiation. FLAGS.enable_asserts = old_enable_asserts FLAGS.xla_device = old_xla_device",
"def _GetOpName(tensor_or_op_name): \"\"\"Returns the op name of the given node",
"be # either placed on device through 'ON_DEVICE' option, or",
"it's better to fix in # tf.graph_util.extract_sub_graph. graph_def = tf.graph_util.extract_sub_graph(graph.as_graph_def(),",
"\"\"\"Disallow const gauranteeing variable with-in scope.\"\"\" global _CONST_GUARANTEE var_scope =",
"variables with tensors using tf.identity in theta before # freezing",
"the model. Options supported are None or tf.bfloat16. InferenceDeviceOptions =",
"(fetches, feeds) where each fetches/feeds is a NestedMap. Returns: Equivalent",
"used for serving. freeze_checkpoint: The checkpoint to load. Loads and",
"choose automatically. # var_options: Options on handling variables. For TPUs,",
"if v is not None} # Export as subgraph. inference_graph_proto.subgraphs[subgraph_name].fetches.update(named_fetches)",
"from google.protobuf import text_format FLAGS = tf.flags.FLAGS # InferenceDeviceOptions contains",
"if device_options.dtype_override and device_options.fprop_dtype_override: raise ValueError( 'device_options{dtype_override,fprop_dtype_override) can not both",
"Instantiate the graph. graph = tf.Graph() with graph.as_default(): tf.random.set_seed(random_seed) cluster",
"old_xla_device = FLAGS.xla_device if IsTpu(device_options): FLAGS.enable_asserts = False FLAGS.xla_device =",
"'ON_DEVICE' option, or treated as # constants with AS_CONSTANTS. #",
"mdl = model_cfg.Instantiate() task = mdl.GetTask(model_task_name) variables_to_restore = ( _MakeVariableDictionary(tf.global_variables())",
"_CONST_GUARANTEE = old_val var_scope.set_custom_getter(old_custom_getter) var_scope.set_caching_device(old_caching_device) @contextlib.contextmanager def _DummyScope(): yield None",
"for: %s', device_options.device) for node in graph_def.node: node.ClearField('device') for function",
"in feeds.items() if v is not None} # Export as",
"= tf.compat.v1.get_collection( tf.compat.v1.GraphKeys.ASSET_FILEPATHS) for asset in assets_collection: if asset.op.type ==",
"' 'since operating in eager mode.') # Freezing. if freeze_defaults",
"tf.bfloat16) act_bfloat16_override = ShouldForceBfloat16ForActivations( device_options) if act_bfloat16_override: py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16) #",
"device_options=InferenceDeviceOptions( device='', retain_device_placement=False, var_options=None, gen_init_op=True, dtype_override=None, fprop_dtype_override=None), freeze_checkpoint=None, freeze_defaults=False, export_path=None,",
"or empty, export only this list of inference subgraphs. random_seed:",
"= ( bfloat16_variables .get_saver_spec_for_variables_with_bf16_overrides( variables_to_restore)) else: saver_var_spec = variables_to_restore saver",
"# # In this case, we need to make sure",
"servers can be initialized globally once, in which case this",
"asset_file_def.tensor_info.name = asset.name asset_file_def.filename = constant_value.string_val[0] # Add a table",
"py_utils.UpdateDtype(model_cfg, tf.bfloat16) py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16) act_bfloat16_override = ShouldForceBfloat16ForActivations( device_options) if act_bfloat16_override:",
"graph. disable_packed_input: Disable packed input for inference writing purposes. Returns:",
"v is not None} # Export as subgraph. inference_graph_proto.subgraphs[subgraph_name].fetches.update(named_fetches) inference_graph_proto.subgraphs[subgraph_name].feeds.update(named_feeds)",
"inference_graph_proto.subgraphs[subgraph_name].feeds.update(named_feeds) return inference_graph_proto def GetOutputOpNames(graph, inference_graph_proto, subgraphs=None, preserve_colocation_nodes=True, preserve_saver_restore_nodes=False, preserve_extra_ops=None):",
"= ( _MakeVariableDictionary(tf.global_variables()) if not mdl.ema else mdl.ema.variables_to_restore(mdl.variables_for_ema)) if bfloat16_override:",
"also need to preserve any nodes that are used for",
"for. Should be None for single-task models. device_options: Device options",
"present in the graph. Returns: Array of tf op names",
"in eager mode.') # Freezing. if freeze_defaults or freeze_checkpoint: output_op_names",
"the graph. # Tables can be declared anywhere in the",
"with the License. # You may obtain a copy of",
"only output ops from these subgraphs are preserved. Otherwise, all",
"subgraphs. \"\"\" assert issubclass(model_cfg.cls, base_model.BaseModel) if device_options.dtype_override and device_options.fprop_dtype_override: raise",
"flags prior to instantiating model. old_enable_asserts = FLAGS.enable_asserts old_xla_device =",
"%s params:', model_cfg.name) for line in model_cfg.ToText().split('\\n'): tf.logging.debug('%s', line) #",
"of subgraphs/feeds/fetches to InferenceGraph. Args: subgraphs_dict: Dict of (fetches, feeds)",
"lingvo.core import base_model from lingvo.core import bfloat16_variables from lingvo.core import",
"= ConvertSubgraphDictToProto(subgraphs_proto) for name, subgraph in subgraphs_proto.subgraphs.items(): if not subgraph_filter",
"preserve_extra_ops: return sorted(list(output_op_names)) # We also need to preserve any",
"from # TextFileInitializer. assets_collection = tf.compat.v1.get_collection( tf.compat.v1.GraphKeys.ASSET_FILEPATHS) for asset in",
"'var_options', 'gen_init_op', 'dtype_override', 'fprop_dtype_override' ]) _CONST_GUARANTEE = None @contextlib.contextmanager def",
"= '' tpu_const_scope = ConstGuaranteeScope() with cluster, tf.device(device), tpu_const_scope: bfloat16_override",
"closure of output ops in the returned array. preserve_saver_restore_nodes: a",
"random_seed model_cfg.is_inference = True if disable_packed_input: def _DisablePackedInput(task): if (_ParamExists(task,",
"Should be None for single-task models. device_options: Device options for",
"instead of # Tensor instance. named_fetches = {k: v.name for",
"express or implied. # See the License for the specific",
"the parts we need. # To support restoring, we have",
"except in compliance with the License. # You may obtain",
"nodes that are used for colocation. # E.g., a node",
"the syntax of each. if re.search(r':[0-9]+$', tensor_or_op_name): # Tensor-name. t",
"var_options: Options on handling variables. For TPUs, variables can be",
"in [saver_def.filename_tensor_name, saver_def.restore_op_name]: try: output_op_names.add(_GetOpName(op_name)) except KeyError: tf.logging.info('Op/tensor %s not",
"# TODO(laurenzo): Work out how much we need to specify",
"checks. # dtype_override: Whether to override the dtype to use",
"return True return False def _FreezeGraphFromCheckpoint(graph, saver, checkpoint, output_op_names): \"\"\"Freezes",
"for colocation. # E.g., a node may have this attr:",
"included. preserve_colocation_nodes: a Python bool, default to True. Preserves nodes",
"possible that it's better to fix in # tf.graph_util.extract_sub_graph. graph_def",
"_CONST_GUARANTEE var_scope = tf.get_variable_scope() old_caching_device = var_scope.caching_device old_val = _CONST_GUARANTEE",
"text manipulation). # If this logic ever breaks, TensorFlow will",
"tf.dtypes.string: constant_value = asset.op.get_attr('value') if constant_value.string_val: tf.logging.info('Found asset file_path: %s',",
"key) inference_graph_proto.collection_def[key].CopyFrom( meta_graph.collection_def[key]) else: tf.logging.warning('Not exporting collection defs ' 'since",
"# Licensed under the Apache License, Version 2.0 (the \"License\");",
"output_op_names.append('save/control_dependency') output_op_names.append('save/restore_all') if IsTpu(device_options) and device_options.gen_init_op: output_op_names.append('tpu_init_op') graph_def = graph.as_graph_def()",
"subgraphs_proto.subgraphs.items(): if not subgraph_filter or name in subgraph_filter: inference_graph_proto.subgraphs[name].CopyFrom(subgraph) #",
"this logic ever breaks, TensorFlow will raise a ValueError with",
"load. Loads and freezes the model if given. freeze_defaults: Default",
"License. # ============================================================================== \"\"\"Utility for exporting an InferenceGraph proto from",
"in reachable_vars: # Skip nodes that cannot be reached from",
"function.node_def: node_def.ClearField('device') inference_graph_proto.graph_def.CopyFrom(graph_def) if export_path: with tf.io.gfile.GFile(export_path, 'w') as f:",
"Freezing. if freeze_defaults or freeze_checkpoint: output_op_names = GetOutputOpNames( graph, inference_graph_proto,",
"'device_options{dtype_override,fprop_dtype_override) can not both be' 'set.') if subgraph_filter and not",
"generated # inference graph nodes will be retained. Otherwise, the",
"return v.name[:-len(':0')] def _MakeVariableDictionary(variables): \"\"\"Returns a dictionary with name ->",
"CONDITIONS OF ANY KIND, either express or implied. # See",
"op.name for subgraph_name, subgraph in inference_graph_proto.subgraphs.items(): if subgraphs and subgraph_name",
"True. Note: Enable FLAGS.pin_vars_to_cpu (default false) to make weight-sharing and",
"_FreezeGraphFromCheckpoint(graph, saver, freeze_checkpoint, output_op_names) elif freeze_defaults: tf.logging.info('Default initializing graph and",
"output_op_names) graph_def = tf.graph_util.extract_sub_graph(graph_def, output_op_names) if not device_options.retain_device_placement: # Clear",
"output ops in the returned array. preserve_saver_restore_nodes: a Python bool,",
"option, or treated as # constants with AS_CONSTANTS. # gen_init_op:",
"tf.get_variable_scope() old_custom_getter = var_scope.custom_getter old_caching_device = var_scope.caching_device old_val = _CONST_GUARANTEE",
"v in feeds.items() if v is not None} # Export",
"_ParamExists(task.decoder, 'packed_input')): task.decoder.packed_input = False if issubclass(model_cfg.cls, base_model.MultiTaskModel): for _,",
"to their tensor name instead of # Tensor instance. named_fetches",
"sanity check (versus relying on the text manipulation). # If",
"file_path: %s', constant_value.string_val[0]) asset_file_def = inference_graph_proto.asset_file_def.add() asset_file_def.tensor_info.name = asset.name asset_file_def.filename",
"# Reset TPU-related flags after model instantiation. FLAGS.enable_asserts = old_enable_asserts",
"this seems to be the # easiest way to get",
"device placement for: %s', device_options.device) for node in graph_def.node: node.ClearField('device')",
"and freeze. Useful for early testing of downstream tools without",
"random_seed: Fixes the random seed in the exported inference graph.",
"\"\"\" output_op_names = set() def _GetOpName(tensor_or_op_name): \"\"\"Returns the op name",
"= inference_graph_proto.saver_def for op_name in [saver_def.filename_tensor_name, saver_def.restore_op_name]: try: output_op_names.add(_GetOpName(op_name)) except",
"== 'AS_CONSTANTS'): # Do not specify devices for variables if",
"tf.executing_eagerly(): meta_graph = tf.train.export_meta_graph(graph=graph) for key in meta_graph.collection_def: tf.logging.info('copying collection",
"a InferenceGraph proto with piecewise subgraphs. Sets FLAGS.enable_asserts to False",
"# Configure the model. model_cfg.random_seed = random_seed model_cfg.is_inference = True",
"if loc_name not in reachable_vars: # Skip nodes that cannot",
"for early testing of downstream tools without having a checkpoint.",
"if not tf.executing_eagerly(): meta_graph = tf.train.export_meta_graph(graph=graph) for key in meta_graph.collection_def:",
"= old_enable_asserts FLAGS.xla_device = old_xla_device tf.logging.info('Graph contains ops: %r', [op.name",
"and freezing.') graph_def = _FreezeDefaults(graph, output_op_names) else: inference_graph_proto.saver_def.CopyFrom(saver.as_saver_def()) output_op_names =",
"subgraph. inference_graph_proto.subgraphs[subgraph_name].fetches.update(named_fetches) inference_graph_proto.subgraphs[subgraph_name].feeds.update(named_feeds) return inference_graph_proto def GetOutputOpNames(graph, inference_graph_proto, subgraphs=None, preserve_colocation_nodes=True,",
"exporting inference graphs.\"\"\" @classmethod def Export(cls, model_cfg, model_task_name=None, device_options=InferenceDeviceOptions( device='',",
"# Tables can be declared anywhere in the graph, so",
"on the device. # device: Device to infer on. #",
"graph_def.library.function: for node_def in function.node_def: node_def.ClearField('device') inference_graph_proto.graph_def.CopyFrom(graph_def) if export_path: with",
"variables under this scope as constants.\"\"\" global _CONST_GUARANTEE var_scope =",
"tf.bfloat16 def ConvertSubgraphDictToProto(subgraphs_dict): \"\"\"Converts dict of subgraphs/feeds/fetches to InferenceGraph. Args:",
"subgraph_filter: A string or a list of subgraph names. If",
"way to get this assets registered from # TextFileInitializer. assets_collection",
"configure inference on the device. # device: Device to infer",
"device_options.device) if freeze_checkpoint: tf.logging.info('Freezing graph from checkpoint: %s', freeze_checkpoint) graph_def",
"inference graph. disable_packed_input: Disable packed input for inference writing purposes.",
"tf.Graph() with graph.as_default(): tf.random.set_seed(random_seed) cluster = model_cfg.cluster.Instantiate() device = cluster.GetPlacer()",
"tf.compat.v1.get_collection( tf.compat.v1.GraphKeys.ASSET_FILEPATHS) for asset in assets_collection: if asset.op.type == 'Const'",
"vars_dict def IsTpu(device_options): return device_options.device == 'tpu' def ShouldForceBfloat16ForWeightsAndActivations(device_options): return",
"of output ops. Returns: Resulting tf.GraphDef. \"\"\" sess = tf.Session(graph=graph,",
"model_cfg.task_params.IterParams(): _DisablePackedInput(task_param) else: _DisablePackedInput(model_cfg.task) tf.logging.debug('Model %s params:', model_cfg.name) for line",
"and multi-core inference on TPUs work properly. Args: model_cfg: a",
"preserve_colocation_nodes=True, preserve_saver_restore_nodes=False, preserve_extra_ops=None): \"\"\"Gets output op names from an inference",
"saver_def = inference_graph_proto.saver_def for op_name in [saver_def.filename_tensor_name, saver_def.restore_op_name]: try: output_op_names.add(_GetOpName(op_name))",
"assertions unless user explicitly enables it. if FLAGS['enable_asserts'].using_default_value: FLAGS.enable_asserts =",
"node.node_def.attr: for loc in node.node_def.attr['_class'].list.s: loc = six.ensure_text(loc, 'utf-8') if",
"tensors[1] # Rewrite fetches and feeds to map to their",
"for exporting inference graphs.\"\"\" @classmethod def Export(cls, model_cfg, model_task_name=None, device_options=InferenceDeviceOptions(",
"Enable FLAGS.pin_vars_to_cpu (default false) to make weight-sharing and multi-core inference",
"device_options) if bfloat16_override: py_utils.UpdateDtype(model_cfg, tf.bfloat16) py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16) act_bfloat16_override = ShouldForceBfloat16ForActivations(",
"= tf.get_variable_scope() old_caching_device = var_scope.caching_device old_val = _CONST_GUARANTEE var_scope.set_caching_device(None) _CONST_GUARANTEE",
"from lingvo.core import py_utils import six from google.protobuf import text_format",
"with name -> tf.Variable() mapping.\"\"\" vars_dict = {} for v",
"= tf.Graph() with graph.as_default(): tf.random.set_seed(random_seed) cluster = model_cfg.cluster.Instantiate() device =",
"False # TODO(laurenzo): Work out how much we need to",
"graph_def.node] for node in graph.get_operations(): if preserve_extra_ops and node.name in",
"retain_device_placement: If true, the specified device in the generated #",
"t = graph.get_tensor_by_name(tensor_or_op_name) return t.op.name else: op = graph.get_operation_by_name(tensor_or_op_name) return",
"loc[5:] if loc_name not in reachable_vars: # Skip nodes that",
"to False unless user explicitly sets it to True. Note:",
"= _FreezeGraphFromCheckpoint(graph, saver, freeze_checkpoint, output_op_names) elif freeze_defaults: tf.logging.info('Default initializing graph",
"def _DummyScope(): yield None def _GetVarName(v): return v.name[:-len(':0')] def _MakeVariableDictionary(variables):",
"that should be preserved in the graph. \"\"\" output_op_names =",
"dict): subgraphs_proto = ConvertSubgraphDictToProto(subgraphs_proto) for name, subgraph in subgraphs_proto.subgraphs.items(): if",
"we are marking them as # constants. device = ''",
"% op_name) if not preserve_colocation_nodes and not preserve_extra_ops: return sorted(list(output_op_names))",
"is a NestedMap. Returns: Equivalent InferenceGraph. \"\"\" # Build the",
"sorted(list(output_op_names)) def _ParamExists(param_obj, param_name): \"\"\"Tests whether param_name is contained in",
"variables_to_restore saver = tf.train.Saver(saver_var_spec) tf.variables_initializer( tf.global_variables(), name='init_all_variables') if IsTpu(device_options) and",
"disable_packed_input=True): \"\"\"Exports a InferenceGraph proto with piecewise subgraphs. Sets FLAGS.enable_asserts",
"inference graph nodes will be retained. Otherwise, the specified device",
"mapping.\"\"\" vars_dict = {} for v in variables: vars_dict[_GetVarName(v)] =",
"and subgraph_name not in subgraphs: tf.logging.info('Skip subgraph %s.', subgraph_name) continue",
"in param_obj.IterParams(): if k == param_name: return True return False",
"config=py_utils.SessionConfig()) as sess: sess.run(graph.get_operation_by_name('init_all_variables')) return tf.graph_util.convert_variables_to_constants(sess, graph.as_graph_def(), output_op_names) class InferenceGraphExporter:",
"# either placed on device through 'ON_DEVICE' option, or treated",
"def ShouldForceBfloat16ForWeightsAndActivations(device_options): return device_options.dtype_override == tf.bfloat16 def ShouldForceBfloat16ForActivations(device_options): return device_options.fprop_dtype_override",
"node.name in preserve_extra_ops: output_op_names.add(node.name) elif preserve_colocation_nodes and '_class' in node.node_def.attr:",
"Loads and freezes the model if given. freeze_defaults: Default initializes",
"check (versus relying on the text manipulation). # If this",
"preserve_colocation_nodes: a Python bool, default to True. Preserves nodes colocating",
"None @contextlib.contextmanager def NoConstGuaranteeScope(): \"\"\"Disallow const gauranteeing variable with-in scope.\"\"\"",
"_ in param_obj.IterParams(): if k == param_name: return True return",
"_FreezeDefaults(graph, output_op_names) else: inference_graph_proto.saver_def.CopyFrom(saver.as_saver_def()) output_op_names = GetOutputOpNames(graph, inference_graph_proto) # Prune",
"params.\"\"\" import collections import contextlib import re import lingvo.compat as",
"return tf.guarantee_const( getter(name, *args, **kwargs), name=name + '/GuaranteeConst') else: return",
"device. For TPUs, # servers can be initialized globally once,",
"names from an inference graph. Args: graph: The tf graph.",
"= asset.op.get_attr('value') if constant_value.string_val: tf.logging.info('Found asset file_path: %s', constant_value.string_val[0]) asset_file_def",
"_CONST_GUARANTEE if _CONST_GUARANTEE: with tf.control_dependencies(None): return tf.guarantee_const( getter(name, *args, **kwargs),",
"initialization checks. # dtype_override: Whether to override the dtype to",
"proto. subgraphs: an optional list of subgraph names. If provided,",
"k, v in feeds.items() if v is not None} #",
"output_op_names) else: inference_graph_proto.saver_def.CopyFrom(saver.as_saver_def()) output_op_names = GetOutputOpNames(graph, inference_graph_proto) # Prune the",
"a NestedMap. Returns: Equivalent InferenceGraph. \"\"\" # Build the output",
"prior to instantiating model. old_enable_asserts = FLAGS.enable_asserts old_xla_device = FLAGS.xla_device",
"inference_graph_proto, subgraphs=None, preserve_colocation_nodes=True, preserve_saver_restore_nodes=False, preserve_extra_ops=None): \"\"\"Gets output op names from",
"f: f.write(text_format.MessageToString(inference_graph_proto)) return inference_graph_proto @classmethod def _SetClusterParams(cls, cluster_params, device_options): \"\"\"Sets",
"\"\"\" with tf.Session(graph=graph, config=py_utils.SessionConfig()) as sess: sess.run(graph.get_operation_by_name('init_all_variables')) return tf.graph_util.convert_variables_to_constants(sess, graph.as_graph_def(),",
"so that the runtime can choose. tf.logging.info('Clearing device placement for:",
"return tf.graph_util.convert_variables_to_constants( sess, graph.as_graph_def(), output_op_names) def _FreezeDefaults(graph, output_op_names): \"\"\"Default initializes",
"tripping initialization checks. # dtype_override: Whether to override the dtype",
"node.node_def.attr['_class'].list.s: loc = six.ensure_text(loc, 'utf-8') if loc.startswith('loc:@'): loc_name = loc[5:]",
"2018 The TensorFlow Authors. All Rights Reserved. # # Licensed",
"False. Preserves nodes for restoring according to inference_graph_proto.saver_def. preserve_extra_ops: an",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"node.ClearField('device') for function in graph_def.library.function: for node_def in function.node_def: node_def.ClearField('device')",
"subgraph in inference_graph_proto.subgraphs.items(): if subgraphs and subgraph_name not in subgraphs:",
"In this case, we need to make sure the node",
"if re.search(r':[0-9]+$', tensor_or_op_name): # Tensor-name. t = graph.get_tensor_by_name(tensor_or_op_name) return t.op.name",
"= 1 p.tpus_per_replica = 1 if IsTpu(device_options) else 0 p.gpus_per_replica",
"If this logic ever breaks, TensorFlow will raise a ValueError",
"try: mdl = model_cfg.Instantiate() task = mdl.GetTask(model_task_name) variables_to_restore = (",
"Tensor instance. named_fetches = {k: v.name for k, v in",
"# on how it is used). We differentiate here. We",
"_CONST_GUARANTEE var_scope = tf.get_variable_scope() old_custom_getter = var_scope.custom_getter old_caching_device = var_scope.caching_device",
"names. If not None or empty, export only this list",
"preserve_extra_ops: an optional list of extra op names to preserve",
"true, the specified device in the generated # inference graph",
"elif freeze_defaults: tf.logging.info('Default initializing graph and freezing.') graph_def = _FreezeDefaults(graph,",
"%s', constant_value.string_val[0]) asset_file_def = inference_graph_proto.asset_file_def.add() asset_file_def.tensor_info.name = asset.name asset_file_def.filename =",
"(versus relying on the text manipulation). # If this logic",
"# Build the output inference graph. inference_graph_proto = inference_graph_pb2.InferenceGraph() for",
"Update(cluster_params.ps) Update(cluster_params.evaler) Update(cluster_params.decoder) Update(cluster_params.input) @classmethod def _DeviceSupportsFreezing(cls, device_options): return IsTpu(device_options)",
"param_name is contained in param_obj.\"\"\" if not param_obj: return for",
"sess.run(graph.get_operation_by_name('init_all_variables')) return tf.graph_util.convert_variables_to_constants(sess, graph.as_graph_def(), output_op_names) class InferenceGraphExporter: \"\"\"Class for exporting",
"node in graph_def.node: node.ClearField('device') for function in graph_def.library.function: for node_def",
"options to configure inference on the device. # device: Device",
"for loc in node.node_def.attr['_class'].list.s: loc = six.ensure_text(loc, 'utf-8') if loc.startswith('loc:@'):",
"# E.g., a node may have this attr: # attr",
"in graph.get_operations(): if preserve_extra_ops and node.name in preserve_extra_ops: output_op_names.add(node.name) elif",
"tf graph. inference_graph_proto: an InferenceGraph proto. subgraphs: an optional list",
"optional list of subgraph names. If provided, only output ops",
"Version 2.0 (the \"License\"); # you may not use this",
"mdl.ema.variables_to_restore(mdl.variables_for_ema)) if bfloat16_override: saver_var_spec = ( bfloat16_variables .get_saver_spec_for_variables_with_bf16_overrides( variables_to_restore)) else:",
"ShouldForceBfloat16ForActivations(device_options): return device_options.fprop_dtype_override == tf.bfloat16 def ConvertSubgraphDictToProto(subgraphs_dict): \"\"\"Converts dict of",
"output_op_names): \"\"\"Default initializes a graph and freezes it. Args: graph:",
"Args: cluster_params: Model().cluster config. device_options: InferenceDeviceOptions. \"\"\" def Update(p): \"\"\"Update",
"InferenceGraph proto. subgraphs: an optional list of subgraph names. If",
"tf.Saver to use for restoration. checkpoint: The checkpoint to restore.",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved. #",
"var_scope.caching_device old_val = _CONST_GUARANTEE var_scope.set_custom_getter(MaybeGuaranteeConstGetter) var_scope.set_caching_device(lambda op: op.device) _CONST_GUARANTEE =",
"Skip nodes that cannot be reached from the pruned graph.",
"= False if issubclass(model_cfg.cls, base_model.MultiTaskModel): for _, task_param in model_cfg.task_params.IterParams():",
"should be # turned off to avoid tripping initialization checks.",
"TPUs, variables can be # either placed on device through",
"' + device_options.device) if freeze_checkpoint: tf.logging.info('Freezing graph from checkpoint: %s',",
"in subgraphs_proto.subgraphs.items(): if not subgraph_filter or name in subgraph_filter: inference_graph_proto.subgraphs[name].CopyFrom(subgraph)",
"have to not prune out the restore node. output_op_names.append('init_all_tables') output_op_names.append('init_all_variables')",
"scope as constants.\"\"\" global _CONST_GUARANTEE var_scope = tf.get_variable_scope() old_custom_getter =",
"by applicable law or agreed to in writing, software #",
"old_val = _CONST_GUARANTEE var_scope.set_custom_getter(MaybeGuaranteeConstGetter) var_scope.set_caching_device(lambda op: op.device) _CONST_GUARANTEE = True",
"saver, freeze_checkpoint, output_op_names) elif freeze_defaults: tf.logging.info('Default initializing graph and freezing.')",
"# Instantiate the graph. graph = tf.Graph() with graph.as_default(): tf.random.set_seed(random_seed)",
"if subgraphs and subgraph_name not in subgraphs: tf.logging.info('Skip subgraph %s.',",
"Marks variable as constants for compilation def MaybeGuaranteeConstGetter(getter, name, *args,",
"`p`.\"\"\" p.name = '/job:localhost' p.replicas = 1 p.tpus_per_replica = 1",
"False if (_ParamExists(task, 'decoder') and _ParamExists(task.decoder, 'packed_input')): task.decoder.packed_input = False",
"using tf.identity in theta before # freezing to avoid the",
"initialization ops for the device. For TPUs, # servers can",
"tf.group(tf.tpu.initialize_system(), name='tpu_init_op') if freeze_checkpoint or freeze_defaults: # Replace variables with",
"_DisablePackedInput(task_param) else: _DisablePackedInput(model_cfg.task) tf.logging.debug('Model %s params:', model_cfg.name) for line in",
"# Lint as: python3 # Copyright 2018 The TensorFlow Authors.",
"output_op_names.append('init_all_tables') output_op_names.append('init_all_variables') output_op_names.append('save/control_dependency') output_op_names.append('save/restore_all') if IsTpu(device_options) and device_options.gen_init_op: output_op_names.append('tpu_init_op') graph_def",
"device='', retain_device_placement=False, var_options=None, gen_init_op=True, dtype_override=None, fprop_dtype_override=None), freeze_checkpoint=None, freeze_defaults=False, export_path=None, subgraph_filter=None,",
"# cluster configuration. cls._SetClusterParams(model_cfg.cluster, device_options) # Configure the model. model_cfg.random_seed",
"device. # device: Device to infer on. # retain_device_placement: If",
"avoid errors. for tensor_or_op_name in (list(subgraph.feeds.values()) + list(subgraph.fetches.values())): output_op_names.add(_GetOpName(tensor_or_op_name)) if",
"options for the accelerator used for serving. freeze_checkpoint: The checkpoint",
"graph. Returns: Array of tf op names that should be",
"graph_def = tf.graph_util.extract_sub_graph(graph_def, output_op_names) if not device_options.retain_device_placement: # Clear the",
"registered from # TextFileInitializer. assets_collection = tf.compat.v1.get_collection( tf.compat.v1.GraphKeys.ASSET_FILEPATHS) for asset",
"device_options) # Configure the model. model_cfg.random_seed = random_seed model_cfg.is_inference =",
"an optional list of subgraph names. If provided, only output",
"be declared anywhere in the graph, so this op has",
"function in graph_def.library.function: for node_def in function.node_def: node_def.ClearField('device') inference_graph_proto.graph_def.CopyFrom(graph_def) if",
"Default initializes the graph and freeze. Useful for early testing",
"Update(cluster_params.worker) Update(cluster_params.ps) Update(cluster_params.evaler) Update(cluster_params.decoder) Update(cluster_params.input) @classmethod def _DeviceSupportsFreezing(cls, device_options): return",
"both be' 'set.') if subgraph_filter and not isinstance(subgraph_filter, (tuple, list)):",
"import py_utils import six from google.protobuf import text_format FLAGS =",
"applicable law or agreed to in writing, software # distributed",
"tf.device(device), tpu_const_scope: bfloat16_override = ShouldForceBfloat16ForWeightsAndActivations( device_options) if bfloat16_override: py_utils.UpdateDtype(model_cfg, tf.bfloat16)",
"nodes colocating with the closure of output ops in the",
"return False def _FreezeGraphFromCheckpoint(graph, saver, checkpoint, output_op_names): \"\"\"Freezes a graph",
"will raise a ValueError with # a description of the",
"+ list(subgraph.fetches.values())): output_op_names.add(_GetOpName(tensor_or_op_name)) if preserve_saver_restore_nodes: # Only nodes for restoring",
"} # # In this case, we need to make",
"checkpoint to restore. output_op_names: Names of output ops. Returns: Resulting",
"subgraphs and subgraph_name not in subgraphs: tf.logging.info('Skip subgraph %s.', subgraph_name)",
"six from google.protobuf import text_format FLAGS = tf.flags.FLAGS # InferenceDeviceOptions",
"device so that the runtime can choose. tf.logging.info('Clearing device placement",
"# You may obtain a copy of the License at",
"the exported inference graph. disable_packed_input: Disable packed input for inference",
"for line in model_cfg.ToText().split('\\n'): tf.logging.debug('%s', line) # Instantiate the graph.",
"saving. saver_def = inference_graph_proto.saver_def for op_name in [saver_def.filename_tensor_name, saver_def.restore_op_name]: try:",
"= FLAGS.xla_device if IsTpu(device_options): FLAGS.enable_asserts = False FLAGS.xla_device = 'tpu'",
"\"\"\"Treats all variables under this scope as constants.\"\"\" global _CONST_GUARANTEE",
"output_op_names: Names of output ops. Returns: Resulting tf.GraphDef. \"\"\" with",
"compilation def MaybeGuaranteeConstGetter(getter, name, *args, **kwargs): global _CONST_GUARANTEE if _CONST_GUARANTEE:",
"subgraph_filter: inference_graph_proto.subgraphs[name].CopyFrom(subgraph) # Yes, graph collections are bad, however this",
"here in terms of # cluster configuration. cls._SetClusterParams(model_cfg.cluster, device_options) #",
"of the given node name.\"\"\" # Tensor names have format",
"packed input for inference writing purposes. Returns: InferenceGraph proto. Raises:",
"meta_graph.collection_def: tf.logging.info('copying collection %s', key) inference_graph_proto.collection_def[key].CopyFrom( meta_graph.collection_def[key]) else: tf.logging.warning('Not exporting",
"fetches/feeds is a NestedMap. Returns: Equivalent InferenceGraph. \"\"\" # Build",
"to configure inference on the device. # device: Device to",
"InferenceGraph. Args: subgraphs_dict: Dict of (fetches, feeds) where each fetches/feeds",
"description of the syntax of each. if re.search(r':[0-9]+$', tensor_or_op_name): #",
"if v is not None} named_feeds = {k: v.name for",
"can be # either placed on device through 'ON_DEVICE' option,",
"'/GuaranteeConst') else: return getter(name, *args, **kwargs) @contextlib.contextmanager def ConstGuaranteeScope(): \"\"\"Treats",
"be # turned off to avoid tripping initialization checks. #",
"of each. if re.search(r':[0-9]+$', tensor_or_op_name): # Tensor-name. t = graph.get_tensor_by_name(tensor_or_op_name)",
"'dtype') == tf.dtypes.string: constant_value = asset.op.get_attr('value') if constant_value.string_val: tf.logging.info('Found asset",
"others put ops in the feeds/fetches (depends # on how",
"six.ensure_text(loc, 'utf-8') if loc.startswith('loc:@'): loc_name = loc[5:] if loc_name not",
"fprop_dtype_override=None), freeze_checkpoint=None, freeze_defaults=False, export_path=None, subgraph_filter=None, random_seed=None, disable_packed_input=True): \"\"\"Exports a InferenceGraph",
"the dtype to use for activations and # weights in",
"if k == param_name: return True return False def _FreezeGraphFromCheckpoint(graph,",
".get_saver_spec_for_variables_with_bf16_overrides( variables_to_restore)) else: saver_var_spec = variables_to_restore saver = tf.train.Saver(saver_var_spec) tf.variables_initializer(",
"graphs put tensors and others put ops in the feeds/fetches",
"%s not in the graph. Ignoring.' % op_name) if not",
"as tf from lingvo.core import base_model from lingvo.core import bfloat16_variables",
"tpu_const_scope = ConstGuaranteeScope() with cluster, tf.device(device), tpu_const_scope: bfloat16_override = ShouldForceBfloat16ForWeightsAndActivations(",
"to avoid tripping initialization checks. # dtype_override: Whether to override",
"for k, v in feeds.items() if v is not None}",
"# Yes, graph collections are bad, however this seems to",
"on. # retain_device_placement: If true, the specified device in the",
"model_cfg, model_task_name=None, device_options=InferenceDeviceOptions( device='', retain_device_placement=False, var_options=None, gen_init_op=True, dtype_override=None, fprop_dtype_override=None), freeze_checkpoint=None,",
"Tensor names have format <op_name>:<output_index>. Some inference # graphs put",
"(list(subgraph.feeds.values()) + list(subgraph.fetches.values())): output_op_names.add(_GetOpName(tensor_or_op_name)) if preserve_saver_restore_nodes: # Only nodes for",
"of output ops in the returned array. preserve_saver_restore_nodes: a Python",
"listed subgraphs. \"\"\" assert issubclass(model_cfg.cls, base_model.BaseModel) if device_options.dtype_override and device_options.fprop_dtype_override:",
"asset_file_def.filename = constant_value.string_val[0] # Add a table init op and",
"cluster, tf.device(device), tpu_const_scope: bfloat16_override = ShouldForceBfloat16ForWeightsAndActivations( device_options) if bfloat16_override: py_utils.UpdateDtype(model_cfg,",
"in the generated # inference graph nodes will be retained.",
"If provided, only output ops from these subgraphs are preserved.",
"graph.as_graph_def(), output_op_names) def _FreezeDefaults(graph, output_op_names): \"\"\"Default initializes a graph and",
"logic ever breaks, TensorFlow will raise a ValueError with #",
"IsTpu(device_options) and device_options.gen_init_op: tf.group(tf.tpu.initialize_system(), name='tpu_init_op') if freeze_checkpoint or freeze_defaults: #",
"FLAGS.xla_device = old_xla_device tf.logging.info('Graph contains ops: %r', [op.name for op",
"need. # To support restoring, we have to not prune",
"variables if we are marking them as # constants. device",
"from checkpoint: %s', freeze_checkpoint) graph_def = _FreezeGraphFromCheckpoint(graph, saver, freeze_checkpoint, output_op_names)",
"device: Device to infer on. # retain_device_placement: If true, the",
"tf.control_dependencies(None): return tf.guarantee_const( getter(name, *args, **kwargs), name=name + '/GuaranteeConst') else:",
"preserve_saver_restore_nodes=False, preserve_extra_ops=None): \"\"\"Gets output op names from an inference graph.",
"it's only used for saving. saver_def = inference_graph_proto.saver_def for op_name",
"\"License\"); # you may not use this file except in",
"Do not specify devices for variables if we are marking",
"graph. \"\"\" output_op_names = set() def _GetOpName(tensor_or_op_name): \"\"\"Returns the op",
"= collections.namedtuple('InferenceDeviceOptions', [ 'device', 'retain_device_placement', 'var_options', 'gen_init_op', 'dtype_override', 'fprop_dtype_override' ])",
"mode.') # Freezing. if freeze_defaults or freeze_checkpoint: output_op_names = GetOutputOpNames(",
"or name in subgraph_filter: inference_graph_proto.subgraphs[name].CopyFrom(subgraph) # Yes, graph collections are",
"# Clear the device so that the runtime can choose.",
"optional list of extra op names to preserve as long",
"\"\"\" assert issubclass(model_cfg.cls, base_model.BaseModel) if device_options.dtype_override and device_options.fprop_dtype_override: raise ValueError(",
"tf.logging.info('Graph contains ops: %r', [op.name for op in graph.get_operations()]) #",
"dtype_override=None, fprop_dtype_override=None), freeze_checkpoint=None, freeze_defaults=False, export_path=None, subgraph_filter=None, random_seed=None, disable_packed_input=True): \"\"\"Exports a",
"else: return getter(name, *args, **kwargs) @contextlib.contextmanager def ConstGuaranteeScope(): \"\"\"Treats all",
"(_ParamExists(task, 'encoder') and _ParamExists(task.encoder, 'packed_input')): task.encoder.packed_input = False if (_ParamExists(task,",
"not in reachable_vars: # Skip nodes that cannot be reached",
"the graph. Ignoring.' % op_name) if not preserve_colocation_nodes and not",
"we have to not prune out the restore node. output_op_names.append('init_all_tables')",
"variables: vars_dict[_GetVarName(v)] = v return vars_dict def IsTpu(device_options): return device_options.device",
"device_options.device == 'tpu' def ShouldForceBfloat16ForWeightsAndActivations(device_options): return device_options.dtype_override == tf.bfloat16 def",
"globally once, in which case this should be # turned",
"restoration. checkpoint: The checkpoint to restore. output_op_names: Names of output",
"Returns: InferenceGraph proto. Raises: ValueError: if the model does not",
"or model_params.Model(). model_task_name: The task to generate an inference graph",
"lingvo.core import inference_graph_pb2 from lingvo.core import py_utils import six from",
"elif preserve_colocation_nodes and '_class' in node.node_def.attr: for loc in node.node_def.attr['_class'].list.s:",
"model_cfg.name) for line in model_cfg.ToText().split('\\n'): tf.logging.debug('%s', line) # Instantiate the",
"device_options.gen_init_op: output_op_names.append('tpu_init_op') graph_def = graph.as_graph_def() tf.logging.info('Pruning graph to output ops:",
"tf.logging.info('Default initializing graph and freezing.') graph_def = _FreezeDefaults(graph, output_op_names) else:",
"re.search(r':[0-9]+$', tensor_or_op_name): # Tensor-name. t = graph.get_tensor_by_name(tensor_or_op_name) return t.op.name else:",
"unless user explicitly sets it to True. Note: Enable FLAGS.pin_vars_to_cpu",
"or tf.bfloat16. InferenceDeviceOptions = collections.namedtuple('InferenceDeviceOptions', [ 'device', 'retain_device_placement', 'var_options', 'gen_init_op',"
] |
[
"import tqdm def main(): # Fetch File Paths file_paths =",
"result to disk df_res.to_csv('./data/processed/ucr/annual_hc_count_by_place.csv',index=False) def get_place_crime_count(path:str)->pd.DataFrame: \"\"\" Function to return",
"File Paths file_paths = glob.glob(r'./data/raw/ucr/hc_count_by_place/*.xls') # Sort them according to",
"#! usr/env/bin python import glob import numpy as np import",
"main(): # Fetch File Paths file_paths = glob.glob(r'./data/raw/ucr/hc_count_by_place/*.xls') # Sort",
"table name from and year from the given file path",
"# Slice the dataset df = df.iloc[start:end,0:2] # Reset the",
"Sort them according to year file_paths.sort(key = lambda x: int(x[-8:-4]))",
"the interested datapoints start = df.index[df[t_name] == \"Total\"][0] + 1",
"pd from tqdm import tqdm def main(): # Fetch File",
"data df_res = get_place_crime_count(file_paths[0]) # Iterate over the rest of",
"rest of the files for p in tqdm(file_paths[1:]): df_temp =",
"= pd.merge(df_res, df_temp, on = \"Place\", how = \"left\") #",
"# Return the value return df except: # If there",
"the given file path t_name = \" \".join(path[path.index(\"Table\"):path.index(\"_Incidents\")].split(\"_\")) t_year =",
"the index for the reduced dataframe df.reset_index(drop = True, inplace",
"a result dataframe to store the data df_res = get_place_crime_count(file_paths[0])",
"t_year}, inplace = True) # Return the value return df",
"df_res.to_csv('./data/processed/ucr/annual_hc_count_by_place.csv',index=False) def get_place_crime_count(path:str)->pd.DataFrame: \"\"\" Function to return \"\"\" # Extracting",
"pd.DataFrame(np.nan, index= i_list, columns=['Place', t_year]) if __name__ == '__main__': main()",
"the rest of the files for p in tqdm(file_paths[1:]): df_temp",
"# Fetch File Paths file_paths = glob.glob(r'./data/raw/ucr/hc_count_by_place/*.xls') # Sort them",
"glob import numpy as np import pandas as pd from",
"\"Multiple locations\"][0] # Slice the dataset df = df.iloc[start:end,0:2] #",
"get_place_crime_count(p) df_res = pd.merge(df_res, df_temp, on = \"Place\", how =",
"empty dataframe i_list = list(range(0,47)) return pd.DataFrame(np.nan, index= i_list, columns=['Place',",
"= df.index[df[t_name] == \"Multiple locations\"][0] # Slice the dataset df",
"= df.iloc[start:end,0:2] # Reset the index for the reduced dataframe",
"import pandas as pd from tqdm import tqdm def main():",
"the dataset df = df.iloc[start:end,0:2] # Reset the index for",
"from tqdm import tqdm def main(): # Fetch File Paths",
"df_temp = get_place_crime_count(p) df_res = pd.merge(df_res, df_temp, on = \"Place\",",
"year from the given file path t_name = \" \".join(path[path.index(\"Table\"):path.index(\"_Incidents\")].split(\"_\"))",
"tqdm(file_paths[1:]): df_temp = get_place_crime_count(p) df_res = pd.merge(df_res, df_temp, on =",
"import glob import numpy as np import pandas as pd",
"python import glob import numpy as np import pandas as",
"dataframe to store the data df_res = get_place_crime_count(file_paths[0]) # Iterate",
"store the data df_res = get_place_crime_count(file_paths[0]) # Iterate over the",
"return an empty dataframe i_list = list(range(0,47)) return pd.DataFrame(np.nan, index=",
"get_place_crime_count(path:str)->pd.DataFrame: \"\"\" Function to return \"\"\" # Extracting the table",
"return \"\"\" # Extracting the table name from and year",
"Paths file_paths = glob.glob(r'./data/raw/ucr/hc_count_by_place/*.xls') # Sort them according to year",
"index for the reduced dataframe df.reset_index(drop = True, inplace =",
"= True, inplace = True) # Rename the columns df.rename(columns={t_name:",
"\"Place\", \"Unnamed: 1\": t_year}, inplace = True) # Return the",
"== \"Multiple locations\"][0] # Slice the dataset df = df.iloc[start:end,0:2]",
"no such data return an empty dataframe i_list = list(range(0,47))",
"datapoints start = df.index[df[t_name] == \"Total\"][0] + 1 end =",
"there is no such data return an empty dataframe i_list",
"path[path.index(\".xls\")-4:path.index(\".xls\")] try: # Read the Excel spreadsheet df = pd.read_excel(path,sheet_name=t_name)",
"df = pd.read_excel(path,sheet_name=t_name) # Get the start and end indices",
"the reduced dataframe df.reset_index(drop = True, inplace = True) #",
"Extracting the table name from and year from the given",
"i_list = list(range(0,47)) return pd.DataFrame(np.nan, index= i_list, columns=['Place', t_year]) if",
"name from and year from the given file path t_name",
"tqdm def main(): # Fetch File Paths file_paths = glob.glob(r'./data/raw/ucr/hc_count_by_place/*.xls')",
"# Create a result dataframe to store the data df_res",
"such data return an empty dataframe i_list = list(range(0,47)) return",
"from the given file path t_name = \" \".join(path[path.index(\"Table\"):path.index(\"_Incidents\")].split(\"_\")) t_year",
"spreadsheet df = pd.read_excel(path,sheet_name=t_name) # Get the start and end",
"over the rest of the files for p in tqdm(file_paths[1:]):",
"try: # Read the Excel spreadsheet df = pd.read_excel(path,sheet_name=t_name) #",
"result dataframe to store the data df_res = get_place_crime_count(file_paths[0]) #",
"and year from the given file path t_name = \"",
"= pd.read_excel(path,sheet_name=t_name) # Get the start and end indices of",
"inplace = True) # Return the value return df except:",
"files for p in tqdm(file_paths[1:]): df_temp = get_place_crime_count(p) df_res =",
"True) # Rename the columns df.rename(columns={t_name: \"Place\", \"Unnamed: 1\": t_year},",
"to store the data df_res = get_place_crime_count(file_paths[0]) # Iterate over",
"pd.read_excel(path,sheet_name=t_name) # Get the start and end indices of the",
"\"\"\" # Extracting the table name from and year from",
"dataset df = df.iloc[start:end,0:2] # Reset the index for the",
"= lambda x: int(x[-8:-4])) # Create a result dataframe to",
"glob.glob(r'./data/raw/ucr/hc_count_by_place/*.xls') # Sort them according to year file_paths.sort(key = lambda",
"of the interested datapoints start = df.index[df[t_name] == \"Total\"][0] +",
"to disk df_res.to_csv('./data/processed/ucr/annual_hc_count_by_place.csv',index=False) def get_place_crime_count(path:str)->pd.DataFrame: \"\"\" Function to return \"\"\"",
"Slice the dataset df = df.iloc[start:end,0:2] # Reset the index",
"given file path t_name = \" \".join(path[path.index(\"Table\"):path.index(\"_Incidents\")].split(\"_\")) t_year = path[path.index(\".xls\")-4:path.index(\".xls\")]",
"an empty dataframe i_list = list(range(0,47)) return pd.DataFrame(np.nan, index= i_list,",
"list(range(0,47)) return pd.DataFrame(np.nan, index= i_list, columns=['Place', t_year]) if __name__ ==",
"+ 1 end = df.index[df[t_name] == \"Multiple locations\"][0] # Slice",
"# Iterate over the rest of the files for p",
"def main(): # Fetch File Paths file_paths = glob.glob(r'./data/raw/ucr/hc_count_by_place/*.xls') #",
"<filename>src/preprocessing/annual_hc_by_crime_loc.py #! usr/env/bin python import glob import numpy as np",
"# Reset the index for the reduced dataframe df.reset_index(drop =",
"file_paths = glob.glob(r'./data/raw/ucr/hc_count_by_place/*.xls') # Sort them according to year file_paths.sort(key",
"df = df.iloc[start:end,0:2] # Reset the index for the reduced",
"# If there is no such data return an empty",
"== \"Total\"][0] + 1 end = df.index[df[t_name] == \"Multiple locations\"][0]",
"def get_place_crime_count(path:str)->pd.DataFrame: \"\"\" Function to return \"\"\" # Extracting the",
"True) # Return the value return df except: # If",
"= glob.glob(r'./data/raw/ucr/hc_count_by_place/*.xls') # Sort them according to year file_paths.sort(key =",
"\"Unnamed: 1\": t_year}, inplace = True) # Return the value",
"= df.index[df[t_name] == \"Total\"][0] + 1 end = df.index[df[t_name] ==",
"of the files for p in tqdm(file_paths[1:]): df_temp = get_place_crime_count(p)",
"Fetch File Paths file_paths = glob.glob(r'./data/raw/ucr/hc_count_by_place/*.xls') # Sort them according",
"file_paths.sort(key = lambda x: int(x[-8:-4])) # Create a result dataframe",
"tqdm import tqdm def main(): # Fetch File Paths file_paths",
"in tqdm(file_paths[1:]): df_temp = get_place_crime_count(p) df_res = pd.merge(df_res, df_temp, on",
"1 end = df.index[df[t_name] == \"Multiple locations\"][0] # Slice the",
"inplace = True) # Rename the columns df.rename(columns={t_name: \"Place\", \"Unnamed:",
"year file_paths.sort(key = lambda x: int(x[-8:-4])) # Create a result",
"p in tqdm(file_paths[1:]): df_temp = get_place_crime_count(p) df_res = pd.merge(df_res, df_temp,",
"to year file_paths.sort(key = lambda x: int(x[-8:-4])) # Create a",
"as pd from tqdm import tqdm def main(): # Fetch",
"dataframe i_list = list(range(0,47)) return pd.DataFrame(np.nan, index= i_list, columns=['Place', t_year])",
"# Read the Excel spreadsheet df = pd.read_excel(path,sheet_name=t_name) # Get",
"and end indices of the interested datapoints start = df.index[df[t_name]",
"the result to disk df_res.to_csv('./data/processed/ucr/annual_hc_count_by_place.csv',index=False) def get_place_crime_count(path:str)->pd.DataFrame: \"\"\" Function to",
"return df except: # If there is no such data",
"df except: # If there is no such data return",
"# Sort them according to year file_paths.sort(key = lambda x:",
"data return an empty dataframe i_list = list(range(0,47)) return pd.DataFrame(np.nan,",
"# Save the result to disk df_res.to_csv('./data/processed/ucr/annual_hc_count_by_place.csv',index=False) def get_place_crime_count(path:str)->pd.DataFrame: \"\"\"",
"np import pandas as pd from tqdm import tqdm def",
"how = \"left\") # Save the result to disk df_res.to_csv('./data/processed/ucr/annual_hc_count_by_place.csv',index=False)",
"on = \"Place\", how = \"left\") # Save the result",
"t_name = \" \".join(path[path.index(\"Table\"):path.index(\"_Incidents\")].split(\"_\")) t_year = path[path.index(\".xls\")-4:path.index(\".xls\")] try: # Read",
"# Extracting the table name from and year from the",
"= True) # Return the value return df except: #",
"= \"left\") # Save the result to disk df_res.to_csv('./data/processed/ucr/annual_hc_count_by_place.csv',index=False) def",
"\"\"\" Function to return \"\"\" # Extracting the table name",
"end = df.index[df[t_name] == \"Multiple locations\"][0] # Slice the dataset",
"according to year file_paths.sort(key = lambda x: int(x[-8:-4])) # Create",
"locations\"][0] # Slice the dataset df = df.iloc[start:end,0:2] # Reset",
"Save the result to disk df_res.to_csv('./data/processed/ucr/annual_hc_count_by_place.csv',index=False) def get_place_crime_count(path:str)->pd.DataFrame: \"\"\" Function",
"# Get the start and end indices of the interested",
"them according to year file_paths.sort(key = lambda x: int(x[-8:-4])) #",
"= get_place_crime_count(p) df_res = pd.merge(df_res, df_temp, on = \"Place\", how",
"from and year from the given file path t_name =",
"Get the start and end indices of the interested datapoints",
"the Excel spreadsheet df = pd.read_excel(path,sheet_name=t_name) # Get the start",
"Excel spreadsheet df = pd.read_excel(path,sheet_name=t_name) # Get the start and",
"pd.merge(df_res, df_temp, on = \"Place\", how = \"left\") # Save",
"Create a result dataframe to store the data df_res =",
"end indices of the interested datapoints start = df.index[df[t_name] ==",
"df.index[df[t_name] == \"Multiple locations\"][0] # Slice the dataset df =",
"\" \".join(path[path.index(\"Table\"):path.index(\"_Incidents\")].split(\"_\")) t_year = path[path.index(\".xls\")-4:path.index(\".xls\")] try: # Read the Excel",
"= True) # Rename the columns df.rename(columns={t_name: \"Place\", \"Unnamed: 1\":",
"= list(range(0,47)) return pd.DataFrame(np.nan, index= i_list, columns=['Place', t_year]) if __name__",
"Read the Excel spreadsheet df = pd.read_excel(path,sheet_name=t_name) # Get the",
"Function to return \"\"\" # Extracting the table name from",
"disk df_res.to_csv('./data/processed/ucr/annual_hc_count_by_place.csv',index=False) def get_place_crime_count(path:str)->pd.DataFrame: \"\"\" Function to return \"\"\" #",
"= \" \".join(path[path.index(\"Table\"):path.index(\"_Incidents\")].split(\"_\")) t_year = path[path.index(\".xls\")-4:path.index(\".xls\")] try: # Read the",
"the start and end indices of the interested datapoints start",
"True, inplace = True) # Rename the columns df.rename(columns={t_name: \"Place\",",
"as np import pandas as pd from tqdm import tqdm",
"\"Place\", how = \"left\") # Save the result to disk",
"Return the value return df except: # If there is",
"for p in tqdm(file_paths[1:]): df_temp = get_place_crime_count(p) df_res = pd.merge(df_res,",
"reduced dataframe df.reset_index(drop = True, inplace = True) # Rename",
"usr/env/bin python import glob import numpy as np import pandas",
"df.iloc[start:end,0:2] # Reset the index for the reduced dataframe df.reset_index(drop",
"for the reduced dataframe df.reset_index(drop = True, inplace = True)",
"= path[path.index(\".xls\")-4:path.index(\".xls\")] try: # Read the Excel spreadsheet df =",
"dataframe df.reset_index(drop = True, inplace = True) # Rename the",
"df_temp, on = \"Place\", how = \"left\") # Save the",
"to return \"\"\" # Extracting the table name from and",
"the files for p in tqdm(file_paths[1:]): df_temp = get_place_crime_count(p) df_res",
"If there is no such data return an empty dataframe",
"t_year = path[path.index(\".xls\")-4:path.index(\".xls\")] try: # Read the Excel spreadsheet df",
"df.index[df[t_name] == \"Total\"][0] + 1 end = df.index[df[t_name] == \"Multiple",
"Reset the index for the reduced dataframe df.reset_index(drop = True,",
"numpy as np import pandas as pd from tqdm import",
"Iterate over the rest of the files for p in",
"df.reset_index(drop = True, inplace = True) # Rename the columns",
"the value return df except: # If there is no",
"is no such data return an empty dataframe i_list =",
"Rename the columns df.rename(columns={t_name: \"Place\", \"Unnamed: 1\": t_year}, inplace =",
"the data df_res = get_place_crime_count(file_paths[0]) # Iterate over the rest",
"start = df.index[df[t_name] == \"Total\"][0] + 1 end = df.index[df[t_name]",
"int(x[-8:-4])) # Create a result dataframe to store the data",
"\"left\") # Save the result to disk df_res.to_csv('./data/processed/ucr/annual_hc_count_by_place.csv',index=False) def get_place_crime_count(path:str)->pd.DataFrame:",
"indices of the interested datapoints start = df.index[df[t_name] == \"Total\"][0]",
"import numpy as np import pandas as pd from tqdm",
"value return df except: # If there is no such",
"\".join(path[path.index(\"Table\"):path.index(\"_Incidents\")].split(\"_\")) t_year = path[path.index(\".xls\")-4:path.index(\".xls\")] try: # Read the Excel spreadsheet",
"pandas as pd from tqdm import tqdm def main(): #",
"df_res = get_place_crime_count(file_paths[0]) # Iterate over the rest of the",
"start and end indices of the interested datapoints start =",
"get_place_crime_count(file_paths[0]) # Iterate over the rest of the files for",
"# Rename the columns df.rename(columns={t_name: \"Place\", \"Unnamed: 1\": t_year}, inplace",
"= get_place_crime_count(file_paths[0]) # Iterate over the rest of the files",
"df_res = pd.merge(df_res, df_temp, on = \"Place\", how = \"left\")",
"except: # If there is no such data return an",
"df.rename(columns={t_name: \"Place\", \"Unnamed: 1\": t_year}, inplace = True) # Return",
"interested datapoints start = df.index[df[t_name] == \"Total\"][0] + 1 end",
"= \"Place\", how = \"left\") # Save the result to",
"path t_name = \" \".join(path[path.index(\"Table\"):path.index(\"_Incidents\")].split(\"_\")) t_year = path[path.index(\".xls\")-4:path.index(\".xls\")] try: #",
"x: int(x[-8:-4])) # Create a result dataframe to store the",
"\"Total\"][0] + 1 end = df.index[df[t_name] == \"Multiple locations\"][0] #",
"the table name from and year from the given file",
"return pd.DataFrame(np.nan, index= i_list, columns=['Place', t_year]) if __name__ == '__main__':",
"file path t_name = \" \".join(path[path.index(\"Table\"):path.index(\"_Incidents\")].split(\"_\")) t_year = path[path.index(\".xls\")-4:path.index(\".xls\")] try:",
"1\": t_year}, inplace = True) # Return the value return",
"lambda x: int(x[-8:-4])) # Create a result dataframe to store",
"columns df.rename(columns={t_name: \"Place\", \"Unnamed: 1\": t_year}, inplace = True) #",
"the columns df.rename(columns={t_name: \"Place\", \"Unnamed: 1\": t_year}, inplace = True)"
] |
[
"assert_almost_equal import torch from allennlp.common import Params from allennlp.data import",
"BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params) numpy_tensor = np.array([[2, 0], [3, 0], [4, 4]])",
"= np.array([[2, 0], [3, 0], [4, 4]]) inputs = torch.from_numpy(numpy_tensor).unsqueeze(1)",
"Params from allennlp.data import Vocabulary from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder from",
"0, 0, 2, 0]]) manual_output = torch.from_numpy(numpy_tensor).float() assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy()) def",
"= BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params) numpy_tensor = np.array([[2, 0], [3, 0], [4,",
"params = Params({\"projection_dim\": 50}) embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params) numpy_tensor =",
"super(TestBagOfWordCountsTokenEmbedder, self).setUp() self.vocab = Vocabulary() self.vocab.add_token_to_namespace(\"1\") self.vocab.add_token_to_namespace(\"2\") self.vocab.add_token_to_namespace(\"3\") self.vocab.add_token_to_namespace(\"4\") def",
"[4, 4]]) inputs = torch.from_numpy(numpy_tensor).unsqueeze(1) embedder_output = embedder(inputs) numpy_tensor =",
"allennlp.common.testing import AllenNlpTestCase class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase): def setUp(self): super(TestBagOfWordCountsTokenEmbedder, self).setUp() self.vocab",
"import BagOfWordCountsTokenEmbedder from allennlp.common.testing import AllenNlpTestCase class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase): def setUp(self):",
"params = Params({}) embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params) numpy_tensor = np.array([[2,",
"self.vocab.add_token_to_namespace(\"1\") self.vocab.add_token_to_namespace(\"2\") self.vocab.add_token_to_namespace(\"3\") self.vocab.add_token_to_namespace(\"4\") def test_forward_calculates_bow_properly(self): params = Params({}) embedder",
"assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy()) def test_projects_properly(self): params = Params({\"projection_dim\": 50}) embedder =",
"disable=no-self-use,invalid-name import numpy as np from numpy.testing import assert_almost_equal import",
"0, 0], [1, 0, 0, 1, 0, 0], [0, 0,",
"torch.from_numpy(numpy_tensor).unsqueeze(1) embedder_output = embedder(inputs) numpy_tensor = np.array([[1, 0, 1, 0,",
"0], [4, 4]]) inputs = torch.from_numpy(numpy_tensor).unsqueeze(1) embedder_output = embedder(inputs) numpy_tensor",
"for x in [\"1\", \"2\", \"3\"]]) inputs = torch.from_numpy(numpy_tensor).unsqueeze(1) embedder_output",
"params=params) numpy_tensor = np.array([self.vocab.get_token_index(x) for x in [\"1\", \"2\", \"3\"]])",
"self).setUp() self.vocab = Vocabulary() self.vocab.add_token_to_namespace(\"1\") self.vocab.add_token_to_namespace(\"2\") self.vocab.add_token_to_namespace(\"3\") self.vocab.add_token_to_namespace(\"4\") def test_forward_calculates_bow_properly(self):",
"as np from numpy.testing import assert_almost_equal import torch from allennlp.common",
"0, 1, 0, 0], [0, 0, 0, 0, 2, 0]])",
"0], [0, 0, 0, 0, 2, 0]]) manual_output = torch.from_numpy(numpy_tensor).float()",
"embedder(inputs) numpy_tensor = np.array([[1, 0, 1, 0, 0, 0], [1,",
"pylint: disable=no-self-use,invalid-name import numpy as np from numpy.testing import assert_almost_equal",
"numpy_tensor = np.array([[2, 0], [3, 0], [4, 4]]) inputs =",
"manual_output.data.numpy()) def test_projects_properly(self): params = Params({\"projection_dim\": 50}) embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab,",
"embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params) numpy_tensor = np.array([[2, 0], [3, 0],",
"from allennlp.common.testing import AllenNlpTestCase class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase): def setUp(self): super(TestBagOfWordCountsTokenEmbedder, self).setUp()",
"Params({}) embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params) numpy_tensor = np.array([[2, 0], [3,",
"[0, 0, 0, 0, 2, 0]]) manual_output = torch.from_numpy(numpy_tensor).float() assert_almost_equal(embedder_output.data.numpy(),",
"= np.array([[1, 0, 1, 0, 0, 0], [1, 0, 0,",
"import torch from allennlp.common import Params from allennlp.data import Vocabulary",
"0], [3, 0], [4, 4]]) inputs = torch.from_numpy(numpy_tensor).unsqueeze(1) embedder_output =",
"in [\"1\", \"2\", \"3\"]]) inputs = torch.from_numpy(numpy_tensor).unsqueeze(1) embedder_output = embedder(inputs)",
"from allennlp.data import Vocabulary from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder from allennlp.common.testing",
"= torch.from_numpy(numpy_tensor).unsqueeze(1) embedder_output = embedder(inputs) numpy_tensor = np.array([[1, 0, 1,",
"params=params) numpy_tensor = np.array([[2, 0], [3, 0], [4, 4]]) inputs",
"Vocabulary() self.vocab.add_token_to_namespace(\"1\") self.vocab.add_token_to_namespace(\"2\") self.vocab.add_token_to_namespace(\"3\") self.vocab.add_token_to_namespace(\"4\") def test_forward_calculates_bow_properly(self): params = Params({})",
"torch from allennlp.common import Params from allennlp.data import Vocabulary from",
"TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase): def setUp(self): super(TestBagOfWordCountsTokenEmbedder, self).setUp() self.vocab = Vocabulary() self.vocab.add_token_to_namespace(\"1\") self.vocab.add_token_to_namespace(\"2\")",
"self.vocab.add_token_to_namespace(\"4\") def test_forward_calculates_bow_properly(self): params = Params({}) embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params)",
"= BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params) numpy_tensor = np.array([self.vocab.get_token_index(x) for x in [\"1\",",
"allennlp.data import Vocabulary from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder from allennlp.common.testing import",
"= Vocabulary() self.vocab.add_token_to_namespace(\"1\") self.vocab.add_token_to_namespace(\"2\") self.vocab.add_token_to_namespace(\"3\") self.vocab.add_token_to_namespace(\"4\") def test_forward_calculates_bow_properly(self): params =",
"embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params) numpy_tensor = np.array([self.vocab.get_token_index(x) for x in",
"allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder from allennlp.common.testing import AllenNlpTestCase class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase): def",
"numpy as np from numpy.testing import assert_almost_equal import torch from",
"np.array([[2, 0], [3, 0], [4, 4]]) inputs = torch.from_numpy(numpy_tensor).unsqueeze(1) embedder_output",
"[1, 0, 0, 1, 0, 0], [0, 0, 0, 0,",
"BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params) numpy_tensor = np.array([self.vocab.get_token_index(x) for x in [\"1\", \"2\",",
"numpy.testing import assert_almost_equal import torch from allennlp.common import Params from",
"= Params({\"projection_dim\": 50}) embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params) numpy_tensor = np.array([self.vocab.get_token_index(x)",
"Vocabulary from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder from allennlp.common.testing import AllenNlpTestCase class",
"numpy_tensor = np.array([[1, 0, 1, 0, 0, 0], [1, 0,",
"from allennlp.common import Params from allennlp.data import Vocabulary from allennlp.modules.token_embedders",
"1, 0, 0, 0], [1, 0, 0, 1, 0, 0],",
"import AllenNlpTestCase class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase): def setUp(self): super(TestBagOfWordCountsTokenEmbedder, self).setUp() self.vocab =",
"0, 1, 0, 0, 0], [1, 0, 0, 1, 0,",
"inputs = torch.from_numpy(numpy_tensor).unsqueeze(1) embedder_output = embedder(inputs) numpy_tensor = np.array([[1, 0,",
"test_forward_calculates_bow_properly(self): params = Params({}) embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params) numpy_tensor =",
"1, 0, 0], [0, 0, 0, 0, 2, 0]]) manual_output",
"embedder_output = embedder(inputs) numpy_tensor = np.array([[1, 0, 1, 0, 0,",
"Params({\"projection_dim\": 50}) embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params) numpy_tensor = np.array([self.vocab.get_token_index(x) for",
"= torch.from_numpy(numpy_tensor).float() assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy()) def test_projects_properly(self): params = Params({\"projection_dim\": 50})",
"AllenNlpTestCase class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase): def setUp(self): super(TestBagOfWordCountsTokenEmbedder, self).setUp() self.vocab = Vocabulary()",
"[\"1\", \"2\", \"3\"]]) inputs = torch.from_numpy(numpy_tensor).unsqueeze(1) embedder_output = embedder(inputs) assert",
"from numpy.testing import assert_almost_equal import torch from allennlp.common import Params",
"BagOfWordCountsTokenEmbedder from allennlp.common.testing import AllenNlpTestCase class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase): def setUp(self): super(TestBagOfWordCountsTokenEmbedder,",
"class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase): def setUp(self): super(TestBagOfWordCountsTokenEmbedder, self).setUp() self.vocab = Vocabulary() self.vocab.add_token_to_namespace(\"1\")",
"np.array([[1, 0, 1, 0, 0, 0], [1, 0, 0, 1,",
"import assert_almost_equal import torch from allennlp.common import Params from allennlp.data",
"from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder from allennlp.common.testing import AllenNlpTestCase class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase):",
"= Params({}) embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params) numpy_tensor = np.array([[2, 0],",
"allennlp.common import Params from allennlp.data import Vocabulary from allennlp.modules.token_embedders import",
"np.array([self.vocab.get_token_index(x) for x in [\"1\", \"2\", \"3\"]]) inputs = torch.from_numpy(numpy_tensor).unsqueeze(1)",
"0]]) manual_output = torch.from_numpy(numpy_tensor).float() assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy()) def test_projects_properly(self): params =",
"0, 0], [0, 0, 0, 0, 2, 0]]) manual_output =",
"0], [1, 0, 0, 1, 0, 0], [0, 0, 0,",
"import Params from allennlp.data import Vocabulary from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder",
"0, 0, 0], [1, 0, 0, 1, 0, 0], [0,",
"\"2\", \"3\"]]) inputs = torch.from_numpy(numpy_tensor).unsqueeze(1) embedder_output = embedder(inputs) assert embedder_output.shape[1]",
"x in [\"1\", \"2\", \"3\"]]) inputs = torch.from_numpy(numpy_tensor).unsqueeze(1) embedder_output =",
"inputs = torch.from_numpy(numpy_tensor).unsqueeze(1) embedder_output = embedder(inputs) assert embedder_output.shape[1] == 50",
"0, 0, 1, 0, 0], [0, 0, 0, 0, 2,",
"def test_forward_calculates_bow_properly(self): params = Params({}) embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params) numpy_tensor",
"test_projects_properly(self): params = Params({\"projection_dim\": 50}) embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params) numpy_tensor",
"def setUp(self): super(TestBagOfWordCountsTokenEmbedder, self).setUp() self.vocab = Vocabulary() self.vocab.add_token_to_namespace(\"1\") self.vocab.add_token_to_namespace(\"2\") self.vocab.add_token_to_namespace(\"3\")",
"0, 2, 0]]) manual_output = torch.from_numpy(numpy_tensor).float() assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy()) def test_projects_properly(self):",
"manual_output = torch.from_numpy(numpy_tensor).float() assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy()) def test_projects_properly(self): params = Params({\"projection_dim\":",
"import numpy as np from numpy.testing import assert_almost_equal import torch",
"\"3\"]]) inputs = torch.from_numpy(numpy_tensor).unsqueeze(1) embedder_output = embedder(inputs) assert embedder_output.shape[1] ==",
"50}) embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params) numpy_tensor = np.array([self.vocab.get_token_index(x) for x",
"2, 0]]) manual_output = torch.from_numpy(numpy_tensor).float() assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy()) def test_projects_properly(self): params",
"0, 0, 0, 2, 0]]) manual_output = torch.from_numpy(numpy_tensor).float() assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy())",
"# pylint: disable=no-self-use,invalid-name import numpy as np from numpy.testing import",
"= embedder(inputs) numpy_tensor = np.array([[1, 0, 1, 0, 0, 0],",
"numpy_tensor = np.array([self.vocab.get_token_index(x) for x in [\"1\", \"2\", \"3\"]]) inputs",
"np from numpy.testing import assert_almost_equal import torch from allennlp.common import",
"[3, 0], [4, 4]]) inputs = torch.from_numpy(numpy_tensor).unsqueeze(1) embedder_output = embedder(inputs)",
"4]]) inputs = torch.from_numpy(numpy_tensor).unsqueeze(1) embedder_output = embedder(inputs) numpy_tensor = np.array([[1,",
"= np.array([self.vocab.get_token_index(x) for x in [\"1\", \"2\", \"3\"]]) inputs =",
"self.vocab.add_token_to_namespace(\"3\") self.vocab.add_token_to_namespace(\"4\") def test_forward_calculates_bow_properly(self): params = Params({}) embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab,",
"self.vocab.add_token_to_namespace(\"2\") self.vocab.add_token_to_namespace(\"3\") self.vocab.add_token_to_namespace(\"4\") def test_forward_calculates_bow_properly(self): params = Params({}) embedder =",
"torch.from_numpy(numpy_tensor).float() assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy()) def test_projects_properly(self): params = Params({\"projection_dim\": 50}) embedder",
"setUp(self): super(TestBagOfWordCountsTokenEmbedder, self).setUp() self.vocab = Vocabulary() self.vocab.add_token_to_namespace(\"1\") self.vocab.add_token_to_namespace(\"2\") self.vocab.add_token_to_namespace(\"3\") self.vocab.add_token_to_namespace(\"4\")",
"import Vocabulary from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder from allennlp.common.testing import AllenNlpTestCase",
"self.vocab = Vocabulary() self.vocab.add_token_to_namespace(\"1\") self.vocab.add_token_to_namespace(\"2\") self.vocab.add_token_to_namespace(\"3\") self.vocab.add_token_to_namespace(\"4\") def test_forward_calculates_bow_properly(self): params",
"def test_projects_properly(self): params = Params({\"projection_dim\": 50}) embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params)"
] |
[
"voxel_odir = os.path.join(curr_path, \"voxel_score\") if not Path(voxel_odir).is_dir(): os.mkdir(voxel_odir) voxel_file =",
"os.path.join(curr_path, \"img_feat\") if not Path(imgfeat_odir).is_dir(): os.mkdir(imgfeat_odir) img_feat_file = os.path.join(imgfeat_odir, \"%s.pt\"",
"\"--config-file\", default=\"configs/shapenet/voxmesh_R50.yaml\", metavar=\"FILE\", help=\"path to config file\", ) parser.add_argument(\"--input\", help=\"A",
"checkpoint: %s\" % (cfg.MODEL.CHECKPOINT)) cp = torch.load(PathManager.get_local_path(cfg.MODEL.CHECKPOINT)) state_dict = clean_state_dict(cp[\"best_states\"][\"model\"])",
"detectron2.utils.collect_env import collect_env_info from detectron2.utils.logger import setup_logger from fvcore.common.file_io import",
"print(pv_mesh) clus = pyacvd.Clustering(pv_mesh) clus.subdivide(3) clus.cluster(count) # remesh remesh =",
"c_verts, c_faces = cubified_meshes[-1].get_mesh_verts_faces(0) save_obj(cube_mesh_file, c_verts, c_faces) # Save predicted",
") return parser def resample_mesh(mesh, count=2466): pv_mesh = pv.wrap(mesh) #",
"% (im_name)) torch.save(voxel_scores, voxel_file) # Save image features imgfeat_odir =",
"imagenet_preprocess from shapenet.modeling.heads import voxel_head from shapenet.modeling.mesh_arch import build_model from",
"logger.info(\"Arguments: \" + str(args)) cfg = setup_cfgs(args) # load checkpoing",
"\"cube_mesh\") if not Path(cmesh_odir).is_dir(): os.mkdir(cmesh_odir) cube_mesh_file = os.path.join(cmesh_odir, \"%s_cube.obj\" %",
"logging import multiprocessing as mp import logging import os from",
"Path from pytorch3d.io import save_obj from shapenet.config.config import get_shapenet_cfg from",
"os.path.join(curr_path, \"cube_mesh\") if not Path(cmesh_odir).is_dir(): os.mkdir(cmesh_odir) cube_mesh_file = os.path.join(cmesh_odir, \"%s_cube.obj\"",
"from shapenet.modeling.heads import voxel_head from shapenet.modeling.mesh_arch import build_model from shapenet.utils.checkpoint",
"return only the highest scoring detection\" ) parser.add_argument( \"opts\", help=\"Modify",
"detectron2.evaluation import inference_context import torch import torch.distributed as dist import",
"path to an input main folder\") # parser.add_argument(\"--output\", help=\"A directory",
"import Image import trimesh import pyvista as pv import pyacvd",
"parser.add_argument( \"opts\", help=\"Modify model config options using the command-line\", default=None,",
"inference_context(model): img_feats, voxel_scores, meshes_pred, P, cubified_meshes = model(img) # Save",
"setup_cfgs(args): cfg = get_shapenet_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() return cfg def",
"os.mkdir(mesh_odir) save_file = os.path.join(mesh_odir, \"%s.obj\" % (im_name)) verts, faces =",
"to config file\", ) parser.add_argument(\"--input\", help=\"A path to an input",
"os from detectron2.evaluation import inference_context import torch import torch.distributed as",
"import collect_env_info from detectron2.utils.logger import setup_logger from fvcore.common.file_io import PathManager",
"remesh.faces.reshape((-1, 4))[:, 1:] return remesh if __name__ == \"__main__\": mp.set_start_method(\"spawn\",",
"cubified_meshes = model(img) # Save voxel_score voxel_odir = os.path.join(curr_path, \"voxel_score\")",
"help=\"path to config file\", ) parser.add_argument(\"--input\", help=\"A path to an",
"def setup_cfgs(args): cfg = get_shapenet_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() return cfg",
"not Path(mesh_odir).is_dir(): os.mkdir(mesh_odir) save_file = os.path.join(mesh_odir, \"%s.obj\" % (im_name)) verts,",
"= torch.load(PathManager.get_local_path(cfg.MODEL.CHECKPOINT)) state_dict = clean_state_dict(cp[\"best_states\"][\"model\"]) model = build_model(cfg) model.load_state_dict(state_dict) logger.info(\"Model",
"args = get_parser().parse_args() device = torch.device(\"cuda:%d\" % 0) logger =",
"ValueError(\"Invalid checkpoing provided\") logger.info(\"Loading model from checkpoint: %s\" % (cfg.MODEL.CHECKPOINT))",
"# Save image features imgfeat_odir = os.path.join(curr_path, \"img_feat\") if not",
"# Save cubified mesh cmesh_odir = os.path.join(curr_path, \"cube_mesh\") if not",
"import imagenet_preprocess from shapenet.modeling.heads import voxel_head from shapenet.modeling.mesh_arch import build_model",
"p_file = os.path.join(p_odir, \"%s.pt\" % (im_name)) torch.save(P, p_file) # Save",
"PIL import Image import trimesh import pyvista as pv import",
"cfg = get_shapenet_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() return cfg def get_parser():",
"= img.to(device) with inference_context(model): img_feats, voxel_scores, meshes_pred, P, cubified_meshes =",
"mesh_odir = os.path.join(curr_path, \"final_mesh\") if not Path(mesh_odir).is_dir(): os.mkdir(mesh_odir) save_file =",
"in sub_dir: curr_path = os.path.join(args.input, sd) images = glob.glob(curr_path +",
"argparse.ArgumentParser(description=\"MeshRCNN Demo\") parser.add_argument( \"--config-file\", default=\"configs/shapenet/voxmesh_R50.yaml\", metavar=\"FILE\", help=\"path to config file\",",
"= os.path.join(p_odir, \"%s.pt\" % (im_name)) torch.save(P, p_file) # Save cubified",
"= clean_state_dict(cp[\"best_states\"][\"model\"]) model = build_model(cfg) model.load_state_dict(state_dict) logger.info(\"Model loaded\") model.to(device) sub_dir",
"= img[None, :, :, :] img = img.to(device) with inference_context(model):",
"img[None, :, :, :] img = img.to(device) with inference_context(model): img_feats,",
"save_obj(save_file, verts, faces) logger.info(\"Predictions saved for %s/%s\" % (curr_path.split('/')[-1], im_name))",
"provided\") logger.info(\"Loading model from checkpoint: %s\" % (cfg.MODEL.CHECKPOINT)) cp =",
"# load checkpoing and build model if cfg.MODEL.CHECKPOINT == \"\":",
"images: # load image transform = [T.ToTensor()] transform.append(imagenet_preprocess()) transform =",
"mp from detectron2.utils.collect_env import collect_env_info from detectron2.utils.logger import setup_logger from",
"PathManager from pathlib import Path from pytorch3d.io import save_obj from",
"\"%s_cube.obj\" % (im_name)) c_verts, c_faces = cubified_meshes[-1].get_mesh_verts_faces(0) save_obj(cube_mesh_file, c_verts, c_faces)",
"sorted(os.listdir(args.input)) for sd in sub_dir: curr_path = os.path.join(args.input, sd) images",
"as np logger = logging.getLogger('demo') def setup_cfgs(args): cfg = get_shapenet_cfg()",
"mp import logging import os from detectron2.evaluation import inference_context import",
"help=\"Focal length for the image\" ) parser.add_argument( \"--onlyhighest\", action=\"store_true\", help=\"will",
"directory to save output visualizations\") parser.add_argument( \"--focal-length\", type=float, default=20.0, help=\"Focal",
"= cubified_meshes[-1].get_mesh_verts_faces(0) save_obj(cube_mesh_file, c_verts, c_faces) # Save predicted mesh mesh_odir",
"shapenet.modeling.mesh_arch import build_model from shapenet.utils.checkpoint import clean_state_dict import torchvision.transforms as",
"% 0) logger = setup_logger(name=\"demo shapenet\") logger.info(\"Arguments: \" + str(args))",
"default=\"configs/shapenet/voxmesh_R50.yaml\", metavar=\"FILE\", help=\"path to config file\", ) parser.add_argument(\"--input\", help=\"A path",
"config file\", ) parser.add_argument(\"--input\", help=\"A path to an input main",
"remesh if __name__ == \"__main__\": mp.set_start_method(\"spawn\", force=True) args = get_parser().parse_args()",
"dist import torch.multiprocessing as mp from detectron2.utils.collect_env import collect_env_info from",
"img = transform(img) img = img[None, :, :, :] img",
"with PathManager.open(img_dir, \"rb\") as f: img = Image.open(f).convert(\"RGB\") img =",
"%s\" % (cfg.MODEL.CHECKPOINT)) cp = torch.load(PathManager.get_local_path(cfg.MODEL.CHECKPOINT)) state_dict = clean_state_dict(cp[\"best_states\"][\"model\"]) model",
"import torch import torch.distributed as dist import torch.multiprocessing as mp",
"imgfeat_odir = os.path.join(curr_path, \"img_feat\") if not Path(imgfeat_odir).is_dir(): os.mkdir(imgfeat_odir) img_feat_file =",
"remesh.points # faces = remesh.faces.reshape((-1, 4))[:, 1:] return remesh if",
"= os.path.join(curr_path, \"P\") if not Path(p_odir).is_dir(): os.mkdir(p_odir) p_file = os.path.join(p_odir,",
"Save predicted mesh mesh_odir = os.path.join(curr_path, \"final_mesh\") if not Path(mesh_odir).is_dir():",
"pyacvd.Clustering(pv_mesh) clus.subdivide(3) clus.cluster(count) # remesh remesh = clus.create_mesh() # verts",
"force=True) args = get_parser().parse_args() device = torch.device(\"cuda:%d\" % 0) logger",
"img_dir.split(\"/\")[-1].split(\".\")[0] with PathManager.open(img_dir, \"rb\") as f: img = Image.open(f).convert(\"RGB\") img",
"logger.info(\"Loading model from checkpoint: %s\" % (cfg.MODEL.CHECKPOINT)) cp = torch.load(PathManager.get_local_path(cfg.MODEL.CHECKPOINT))",
"= [T.ToTensor()] transform.append(imagenet_preprocess()) transform = T.Compose(transform) im_name = img_dir.split(\"/\")[-1].split(\".\")[0] with",
"default=20.0, help=\"Focal length for the image\" ) parser.add_argument( \"--onlyhighest\", action=\"store_true\",",
"torch.device(\"cuda:%d\" % 0) logger = setup_logger(name=\"demo shapenet\") logger.info(\"Arguments: \" +",
"\"rb\") as f: img = Image.open(f).convert(\"RGB\") img = transform(img) img",
"the image\" ) parser.add_argument( \"--onlyhighest\", action=\"store_true\", help=\"will return only the",
"% (cfg.MODEL.CHECKPOINT)) cp = torch.load(PathManager.get_local_path(cfg.MODEL.CHECKPOINT)) state_dict = clean_state_dict(cp[\"best_states\"][\"model\"]) model =",
"model = build_model(cfg) model.load_state_dict(state_dict) logger.info(\"Model loaded\") model.to(device) sub_dir = sorted(os.listdir(args.input))",
"= argparse.ArgumentParser(description=\"MeshRCNN Demo\") parser.add_argument( \"--config-file\", default=\"configs/shapenet/voxmesh_R50.yaml\", metavar=\"FILE\", help=\"path to config",
"import torch.multiprocessing as mp from detectron2.utils.collect_env import collect_env_info from detectron2.utils.logger",
"config options using the command-line\", default=None, nargs=argparse.REMAINDER, ) return parser",
"shapenet.config.config import get_shapenet_cfg from shapenet.data.utils import imagenet_preprocess from shapenet.modeling.heads import",
"build model if cfg.MODEL.CHECKPOINT == \"\": raise ValueError(\"Invalid checkpoing provided\")",
"os.mkdir(voxel_odir) voxel_file = os.path.join(voxel_odir, \"%s.pt\" % (im_name)) torch.save(voxel_scores, voxel_file) #",
"the command-line\", default=None, nargs=argparse.REMAINDER, ) return parser def resample_mesh(mesh, count=2466):",
"= glob.glob(curr_path + \"/*.png\") for img_dir in images: # load",
"visualizations\") parser.add_argument( \"--focal-length\", type=float, default=20.0, help=\"Focal length for the image\"",
"model from checkpoint: %s\" % (cfg.MODEL.CHECKPOINT)) cp = torch.load(PathManager.get_local_path(cfg.MODEL.CHECKPOINT)) state_dict",
"input main folder\") # parser.add_argument(\"--output\", help=\"A directory to save output",
"parser.add_argument( \"--config-file\", default=\"configs/shapenet/voxmesh_R50.yaml\", metavar=\"FILE\", help=\"path to config file\", ) parser.add_argument(\"--input\",",
"\"%s.pt\" % (im_name)) torch.save(img_feats, img_feat_file) # Save P p_odir =",
"os.path.join(voxel_odir, \"%s.pt\" % (im_name)) torch.save(voxel_scores, voxel_file) # Save image features",
"using the command-line\", default=None, nargs=argparse.REMAINDER, ) return parser def resample_mesh(mesh,",
"# parser.add_argument(\"--output\", help=\"A directory to save output visualizations\") parser.add_argument( \"--focal-length\",",
"from detectron2.utils.logger import setup_logger from fvcore.common.file_io import PathManager from pathlib",
"(im_name)) verts, faces = meshes_pred[-1].get_mesh_verts_faces(0) save_obj(save_file, verts, faces) logger.info(\"Predictions saved",
"if not Path(mesh_odir).is_dir(): os.mkdir(mesh_odir) save_file = os.path.join(mesh_odir, \"%s.obj\" % (im_name))",
"verts = remesh.points # faces = remesh.faces.reshape((-1, 4))[:, 1:] return",
"import os from detectron2.evaluation import inference_context import torch import torch.distributed",
"im_name = img_dir.split(\"/\")[-1].split(\".\")[0] with PathManager.open(img_dir, \"rb\") as f: img =",
"os.path.join(curr_path, \"P\") if not Path(p_odir).is_dir(): os.mkdir(p_odir) p_file = os.path.join(p_odir, \"%s.pt\"",
"the highest scoring detection\" ) parser.add_argument( \"opts\", help=\"Modify model config",
"setup_logger(name=\"demo shapenet\") logger.info(\"Arguments: \" + str(args)) cfg = setup_cfgs(args) #",
"as pv import pyacvd import numpy as np logger =",
"os.mkdir(cmesh_odir) cube_mesh_file = os.path.join(cmesh_odir, \"%s_cube.obj\" % (im_name)) c_verts, c_faces =",
"import inference_context import torch import torch.distributed as dist import torch.multiprocessing",
"4))[:, 1:] return remesh if __name__ == \"__main__\": mp.set_start_method(\"spawn\", force=True)",
"torch import torch.distributed as dist import torch.multiprocessing as mp from",
"meshes_pred[-1].get_mesh_verts_faces(0) save_obj(save_file, verts, faces) logger.info(\"Predictions saved for %s/%s\" % (curr_path.split('/')[-1],",
"import torch.distributed as dist import torch.multiprocessing as mp from detectron2.utils.collect_env",
"pytorch3d.io import save_obj from shapenet.config.config import get_shapenet_cfg from shapenet.data.utils import",
"help=\"A path to an input main folder\") # parser.add_argument(\"--output\", help=\"A",
"\"--onlyhighest\", action=\"store_true\", help=\"will return only the highest scoring detection\" )",
"= os.path.join(mesh_odir, \"%s.obj\" % (im_name)) verts, faces = meshes_pred[-1].get_mesh_verts_faces(0) save_obj(save_file,",
"trimesh import pyvista as pv import pyacvd import numpy as",
"= sorted(os.listdir(args.input)) for sd in sub_dir: curr_path = os.path.join(args.input, sd)",
"import pyvista as pv import pyacvd import numpy as np",
"cmesh_odir = os.path.join(curr_path, \"cube_mesh\") if not Path(cmesh_odir).is_dir(): os.mkdir(cmesh_odir) cube_mesh_file =",
"= remesh.faces.reshape((-1, 4))[:, 1:] return remesh if __name__ == \"__main__\":",
"voxel_score voxel_odir = os.path.join(curr_path, \"voxel_score\") if not Path(voxel_odir).is_dir(): os.mkdir(voxel_odir) voxel_file",
"state_dict = clean_state_dict(cp[\"best_states\"][\"model\"]) model = build_model(cfg) model.load_state_dict(state_dict) logger.info(\"Model loaded\") model.to(device)",
") parser.add_argument( \"opts\", help=\"Modify model config options using the command-line\",",
"pathlib import Path from pytorch3d.io import save_obj from shapenet.config.config import",
"str(args)) cfg = setup_cfgs(args) # load checkpoing and build model",
"logger = logging.getLogger('demo') def setup_cfgs(args): cfg = get_shapenet_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts)",
"np logger = logging.getLogger('demo') def setup_cfgs(args): cfg = get_shapenet_cfg() cfg.merge_from_file(args.config_file)",
"cfg = setup_cfgs(args) # load checkpoing and build model if",
"(im_name)) torch.save(P, p_file) # Save cubified mesh cmesh_odir = os.path.join(curr_path,",
"= transform(img) img = img[None, :, :, :] img =",
"(im_name)) c_verts, c_faces = cubified_meshes[-1].get_mesh_verts_faces(0) save_obj(cube_mesh_file, c_verts, c_faces) # Save",
"= meshes_pred[-1].get_mesh_verts_faces(0) save_obj(save_file, verts, faces) logger.info(\"Predictions saved for %s/%s\" %",
"== \"\": raise ValueError(\"Invalid checkpoing provided\") logger.info(\"Loading model from checkpoint:",
"parser.add_argument(\"--output\", help=\"A directory to save output visualizations\") parser.add_argument( \"--focal-length\", type=float,",
"model.load_state_dict(state_dict) logger.info(\"Model loaded\") model.to(device) sub_dir = sorted(os.listdir(args.input)) for sd in",
"get_parser().parse_args() device = torch.device(\"cuda:%d\" % 0) logger = setup_logger(name=\"demo shapenet\")",
"fvcore.common.file_io import PathManager from pathlib import Path from pytorch3d.io import",
"if not Path(imgfeat_odir).is_dir(): os.mkdir(imgfeat_odir) img_feat_file = os.path.join(imgfeat_odir, \"%s.pt\" % (im_name))",
"raise ValueError(\"Invalid checkpoing provided\") logger.info(\"Loading model from checkpoint: %s\" %",
"P, cubified_meshes = model(img) # Save voxel_score voxel_odir = os.path.join(curr_path,",
"folder\") # parser.add_argument(\"--output\", help=\"A directory to save output visualizations\") parser.add_argument(",
"get_shapenet_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() return cfg def get_parser(): parser =",
"= os.path.join(curr_path, \"voxel_score\") if not Path(voxel_odir).is_dir(): os.mkdir(voxel_odir) voxel_file = os.path.join(voxel_odir,",
"predicted mesh mesh_odir = os.path.join(curr_path, \"final_mesh\") if not Path(mesh_odir).is_dir(): os.mkdir(mesh_odir)",
"highest scoring detection\" ) parser.add_argument( \"opts\", help=\"Modify model config options",
"if cfg.MODEL.CHECKPOINT == \"\": raise ValueError(\"Invalid checkpoing provided\") logger.info(\"Loading model",
"Path(p_odir).is_dir(): os.mkdir(p_odir) p_file = os.path.join(p_odir, \"%s.pt\" % (im_name)) torch.save(P, p_file)",
"Demo\") parser.add_argument( \"--config-file\", default=\"configs/shapenet/voxmesh_R50.yaml\", metavar=\"FILE\", help=\"path to config file\", )",
"image transform = [T.ToTensor()] transform.append(imagenet_preprocess()) transform = T.Compose(transform) im_name =",
"os.path.join(mesh_odir, \"%s.obj\" % (im_name)) verts, faces = meshes_pred[-1].get_mesh_verts_faces(0) save_obj(save_file, verts,",
"load image transform = [T.ToTensor()] transform.append(imagenet_preprocess()) transform = T.Compose(transform) im_name",
"os.path.join(imgfeat_odir, \"%s.pt\" % (im_name)) torch.save(img_feats, img_feat_file) # Save P p_odir",
"import build_model from shapenet.utils.checkpoint import clean_state_dict import torchvision.transforms as T",
"os.path.join(args.input, sd) images = glob.glob(curr_path + \"/*.png\") for img_dir in",
"P p_odir = os.path.join(curr_path, \"P\") if not Path(p_odir).is_dir(): os.mkdir(p_odir) p_file",
"transform = [T.ToTensor()] transform.append(imagenet_preprocess()) transform = T.Compose(transform) im_name = img_dir.split(\"/\")[-1].split(\".\")[0]",
"c_faces) # Save predicted mesh mesh_odir = os.path.join(curr_path, \"final_mesh\") if",
"torch.save(P, p_file) # Save cubified mesh cmesh_odir = os.path.join(curr_path, \"cube_mesh\")",
"clus.cluster(count) # remesh remesh = clus.create_mesh() # verts = remesh.points",
") parser.add_argument(\"--input\", help=\"A path to an input main folder\") #",
"device = torch.device(\"cuda:%d\" % 0) logger = setup_logger(name=\"demo shapenet\") logger.info(\"Arguments:",
"\"/*.png\") for img_dir in images: # load image transform =",
"\"opts\", help=\"Modify model config options using the command-line\", default=None, nargs=argparse.REMAINDER,",
"img_feat_file) # Save P p_odir = os.path.join(curr_path, \"P\") if not",
"for img_dir in images: # load image transform = [T.ToTensor()]",
"import numpy as np logger = logging.getLogger('demo') def setup_cfgs(args): cfg",
"image\" ) parser.add_argument( \"--onlyhighest\", action=\"store_true\", help=\"will return only the highest",
"default=None, nargs=argparse.REMAINDER, ) return parser def resample_mesh(mesh, count=2466): pv_mesh =",
"if not Path(voxel_odir).is_dir(): os.mkdir(voxel_odir) voxel_file = os.path.join(voxel_odir, \"%s.pt\" % (im_name))",
"= get_shapenet_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() return cfg def get_parser(): parser",
"model config options using the command-line\", default=None, nargs=argparse.REMAINDER, ) return",
"= os.path.join(curr_path, \"cube_mesh\") if not Path(cmesh_odir).is_dir(): os.mkdir(cmesh_odir) cube_mesh_file = os.path.join(cmesh_odir,",
"checkpoing and build model if cfg.MODEL.CHECKPOINT == \"\": raise ValueError(\"Invalid",
"cfg def get_parser(): parser = argparse.ArgumentParser(description=\"MeshRCNN Demo\") parser.add_argument( \"--config-file\", default=\"configs/shapenet/voxmesh_R50.yaml\",",
"faces = remesh.faces.reshape((-1, 4))[:, 1:] return remesh if __name__ ==",
"mesh cmesh_odir = os.path.join(curr_path, \"cube_mesh\") if not Path(cmesh_odir).is_dir(): os.mkdir(cmesh_odir) cube_mesh_file",
"voxel_file = os.path.join(voxel_odir, \"%s.pt\" % (im_name)) torch.save(voxel_scores, voxel_file) # Save",
"img = Image.open(f).convert(\"RGB\") img = transform(img) img = img[None, :,",
"PathManager.open(img_dir, \"rb\") as f: img = Image.open(f).convert(\"RGB\") img = transform(img)",
"parser def resample_mesh(mesh, count=2466): pv_mesh = pv.wrap(mesh) # logger.info('Original mesh:')",
"# Save P p_odir = os.path.join(curr_path, \"P\") if not Path(p_odir).is_dir():",
"import logging import multiprocessing as mp import logging import os",
"from shapenet.data.utils import imagenet_preprocess from shapenet.modeling.heads import voxel_head from shapenet.modeling.mesh_arch",
"features imgfeat_odir = os.path.join(curr_path, \"img_feat\") if not Path(imgfeat_odir).is_dir(): os.mkdir(imgfeat_odir) img_feat_file",
"cfg.freeze() return cfg def get_parser(): parser = argparse.ArgumentParser(description=\"MeshRCNN Demo\") parser.add_argument(",
"\"final_mesh\") if not Path(mesh_odir).is_dir(): os.mkdir(mesh_odir) save_file = os.path.join(mesh_odir, \"%s.obj\" %",
"parser.add_argument( \"--focal-length\", type=float, default=20.0, help=\"Focal length for the image\" )",
"model(img) # Save voxel_score voxel_odir = os.path.join(curr_path, \"voxel_score\") if not",
"detection\" ) parser.add_argument( \"opts\", help=\"Modify model config options using the",
"voxel_scores, meshes_pred, P, cubified_meshes = model(img) # Save voxel_score voxel_odir",
"img = img[None, :, :, :] img = img.to(device) with",
"from detectron2.evaluation import inference_context import torch import torch.distributed as dist",
"if not Path(cmesh_odir).is_dir(): os.mkdir(cmesh_odir) cube_mesh_file = os.path.join(cmesh_odir, \"%s_cube.obj\" % (im_name))",
"from shapenet.modeling.mesh_arch import build_model from shapenet.utils.checkpoint import clean_state_dict import torchvision.transforms",
"pv import pyacvd import numpy as np logger = logging.getLogger('demo')",
"shapenet.data.utils import imagenet_preprocess from shapenet.modeling.heads import voxel_head from shapenet.modeling.mesh_arch import",
"parser = argparse.ArgumentParser(description=\"MeshRCNN Demo\") parser.add_argument( \"--config-file\", default=\"configs/shapenet/voxmesh_R50.yaml\", metavar=\"FILE\", help=\"path to",
"pv.wrap(mesh) # logger.info('Original mesh:') # print(pv_mesh) clus = pyacvd.Clustering(pv_mesh) clus.subdivide(3)",
"% (im_name)) verts, faces = meshes_pred[-1].get_mesh_verts_faces(0) save_obj(save_file, verts, faces) logger.info(\"Predictions",
"command-line\", default=None, nargs=argparse.REMAINDER, ) return parser def resample_mesh(mesh, count=2466): pv_mesh",
"img = img.to(device) with inference_context(model): img_feats, voxel_scores, meshes_pred, P, cubified_meshes",
"setup_logger from fvcore.common.file_io import PathManager from pathlib import Path from",
"clus = pyacvd.Clustering(pv_mesh) clus.subdivide(3) clus.cluster(count) # remesh remesh = clus.create_mesh()",
"= logging.getLogger('demo') def setup_cfgs(args): cfg = get_shapenet_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze()",
"\"%s.pt\" % (im_name)) torch.save(voxel_scores, voxel_file) # Save image features imgfeat_odir",
"logging.getLogger('demo') def setup_cfgs(args): cfg = get_shapenet_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() return",
"help=\"will return only the highest scoring detection\" ) parser.add_argument( \"opts\",",
"numpy as np logger = logging.getLogger('demo') def setup_cfgs(args): cfg =",
"\"%s.pt\" % (im_name)) torch.save(P, p_file) # Save cubified mesh cmesh_odir",
"import pyacvd import numpy as np logger = logging.getLogger('demo') def",
"\"img_feat\") if not Path(imgfeat_odir).is_dir(): os.mkdir(imgfeat_odir) img_feat_file = os.path.join(imgfeat_odir, \"%s.pt\" %",
"argparse import logging import multiprocessing as mp import logging import",
"T.Compose(transform) im_name = img_dir.split(\"/\")[-1].split(\".\")[0] with PathManager.open(img_dir, \"rb\") as f: img",
"import setup_logger from fvcore.common.file_io import PathManager from pathlib import Path",
"return cfg def get_parser(): parser = argparse.ArgumentParser(description=\"MeshRCNN Demo\") parser.add_argument( \"--config-file\",",
"setup_cfgs(args) # load checkpoing and build model if cfg.MODEL.CHECKPOINT ==",
"# verts = remesh.points # faces = remesh.faces.reshape((-1, 4))[:, 1:]",
"not Path(imgfeat_odir).is_dir(): os.mkdir(imgfeat_odir) img_feat_file = os.path.join(imgfeat_odir, \"%s.pt\" % (im_name)) torch.save(img_feats,",
"= pyacvd.Clustering(pv_mesh) clus.subdivide(3) clus.cluster(count) # remesh remesh = clus.create_mesh() #",
":] img = img.to(device) with inference_context(model): img_feats, voxel_scores, meshes_pred, P,",
"Path(mesh_odir).is_dir(): os.mkdir(mesh_odir) save_file = os.path.join(mesh_odir, \"%s.obj\" % (im_name)) verts, faces",
"get_shapenet_cfg from shapenet.data.utils import imagenet_preprocess from shapenet.modeling.heads import voxel_head from",
":, :, :] img = img.to(device) with inference_context(model): img_feats, voxel_scores,",
") parser.add_argument( \"--onlyhighest\", action=\"store_true\", help=\"will return only the highest scoring",
"shapenet.utils.checkpoint import clean_state_dict import torchvision.transforms as T import glob from",
"from pytorch3d.io import save_obj from shapenet.config.config import get_shapenet_cfg from shapenet.data.utils",
"% (im_name)) torch.save(P, p_file) # Save cubified mesh cmesh_odir =",
"glob from PIL import Image import trimesh import pyvista as",
"main folder\") # parser.add_argument(\"--output\", help=\"A directory to save output visualizations\")",
"as mp from detectron2.utils.collect_env import collect_env_info from detectron2.utils.logger import setup_logger",
"T import glob from PIL import Image import trimesh import",
"torch.load(PathManager.get_local_path(cfg.MODEL.CHECKPOINT)) state_dict = clean_state_dict(cp[\"best_states\"][\"model\"]) model = build_model(cfg) model.load_state_dict(state_dict) logger.info(\"Model loaded\")",
"= os.path.join(voxel_odir, \"%s.pt\" % (im_name)) torch.save(voxel_scores, voxel_file) # Save image",
"get_parser(): parser = argparse.ArgumentParser(description=\"MeshRCNN Demo\") parser.add_argument( \"--config-file\", default=\"configs/shapenet/voxmesh_R50.yaml\", metavar=\"FILE\", help=\"path",
"clus.create_mesh() # verts = remesh.points # faces = remesh.faces.reshape((-1, 4))[:,",
"def resample_mesh(mesh, count=2466): pv_mesh = pv.wrap(mesh) # logger.info('Original mesh:') #",
"logger = setup_logger(name=\"demo shapenet\") logger.info(\"Arguments: \" + str(args)) cfg =",
"import argparse import logging import multiprocessing as mp import logging",
"cubified mesh cmesh_odir = os.path.join(curr_path, \"cube_mesh\") if not Path(cmesh_odir).is_dir(): os.mkdir(cmesh_odir)",
"cfg.merge_from_list(args.opts) cfg.freeze() return cfg def get_parser(): parser = argparse.ArgumentParser(description=\"MeshRCNN Demo\")",
"multiprocessing as mp import logging import os from detectron2.evaluation import",
"= Image.open(f).convert(\"RGB\") img = transform(img) img = img[None, :, :,",
"build_model from shapenet.utils.checkpoint import clean_state_dict import torchvision.transforms as T import",
"(im_name)) torch.save(voxel_scores, voxel_file) # Save image features imgfeat_odir = os.path.join(curr_path,",
"= setup_logger(name=\"demo shapenet\") logger.info(\"Arguments: \" + str(args)) cfg = setup_cfgs(args)",
"c_faces = cubified_meshes[-1].get_mesh_verts_faces(0) save_obj(cube_mesh_file, c_verts, c_faces) # Save predicted mesh",
"for the image\" ) parser.add_argument( \"--onlyhighest\", action=\"store_true\", help=\"will return only",
"faces = meshes_pred[-1].get_mesh_verts_faces(0) save_obj(save_file, verts, faces) logger.info(\"Predictions saved for %s/%s\"",
"logging import os from detectron2.evaluation import inference_context import torch import",
"and build model if cfg.MODEL.CHECKPOINT == \"\": raise ValueError(\"Invalid checkpoing",
"import save_obj from shapenet.config.config import get_shapenet_cfg from shapenet.data.utils import imagenet_preprocess",
"(cfg.MODEL.CHECKPOINT)) cp = torch.load(PathManager.get_local_path(cfg.MODEL.CHECKPOINT)) state_dict = clean_state_dict(cp[\"best_states\"][\"model\"]) model = build_model(cfg)",
"mp.set_start_method(\"spawn\", force=True) args = get_parser().parse_args() device = torch.device(\"cuda:%d\" % 0)",
"voxel_file) # Save image features imgfeat_odir = os.path.join(curr_path, \"img_feat\") if",
"= os.path.join(imgfeat_odir, \"%s.pt\" % (im_name)) torch.save(img_feats, img_feat_file) # Save P",
"from pathlib import Path from pytorch3d.io import save_obj from shapenet.config.config",
"file\", ) parser.add_argument(\"--input\", help=\"A path to an input main folder\")",
"with inference_context(model): img_feats, voxel_scores, meshes_pred, P, cubified_meshes = model(img) #",
"transform.append(imagenet_preprocess()) transform = T.Compose(transform) im_name = img_dir.split(\"/\")[-1].split(\".\")[0] with PathManager.open(img_dir, \"rb\")",
"\"%s.obj\" % (im_name)) verts, faces = meshes_pred[-1].get_mesh_verts_faces(0) save_obj(save_file, verts, faces)",
"Save voxel_score voxel_odir = os.path.join(curr_path, \"voxel_score\") if not Path(voxel_odir).is_dir(): os.mkdir(voxel_odir)",
"os.path.join(curr_path, \"voxel_score\") if not Path(voxel_odir).is_dir(): os.mkdir(voxel_odir) voxel_file = os.path.join(voxel_odir, \"%s.pt\"",
"if __name__ == \"__main__\": mp.set_start_method(\"spawn\", force=True) args = get_parser().parse_args() device",
"sub_dir = sorted(os.listdir(args.input)) for sd in sub_dir: curr_path = os.path.join(args.input,",
"= os.path.join(args.input, sd) images = glob.glob(curr_path + \"/*.png\") for img_dir",
"collect_env_info from detectron2.utils.logger import setup_logger from fvcore.common.file_io import PathManager from",
"+ \"/*.png\") for img_dir in images: # load image transform",
"def get_parser(): parser = argparse.ArgumentParser(description=\"MeshRCNN Demo\") parser.add_argument( \"--config-file\", default=\"configs/shapenet/voxmesh_R50.yaml\", metavar=\"FILE\",",
"\"voxel_score\") if not Path(voxel_odir).is_dir(): os.mkdir(voxel_odir) voxel_file = os.path.join(voxel_odir, \"%s.pt\" %",
"help=\"A directory to save output visualizations\") parser.add_argument( \"--focal-length\", type=float, default=20.0,",
"torchvision.transforms as T import glob from PIL import Image import",
"load checkpoing and build model if cfg.MODEL.CHECKPOINT == \"\": raise",
"import multiprocessing as mp import logging import os from detectron2.evaluation",
"build_model(cfg) model.load_state_dict(state_dict) logger.info(\"Model loaded\") model.to(device) sub_dir = sorted(os.listdir(args.input)) for sd",
"loaded\") model.to(device) sub_dir = sorted(os.listdir(args.input)) for sd in sub_dir: curr_path",
"clean_state_dict(cp[\"best_states\"][\"model\"]) model = build_model(cfg) model.load_state_dict(state_dict) logger.info(\"Model loaded\") model.to(device) sub_dir =",
"img_feats, voxel_scores, meshes_pred, P, cubified_meshes = model(img) # Save voxel_score",
"nargs=argparse.REMAINDER, ) return parser def resample_mesh(mesh, count=2466): pv_mesh = pv.wrap(mesh)",
"Image.open(f).convert(\"RGB\") img = transform(img) img = img[None, :, :, :]",
"shapenet\") logger.info(\"Arguments: \" + str(args)) cfg = setup_cfgs(args) # load",
"parser.add_argument( \"--onlyhighest\", action=\"store_true\", help=\"will return only the highest scoring detection\"",
"logger.info(\"Model loaded\") model.to(device) sub_dir = sorted(os.listdir(args.input)) for sd in sub_dir:",
"# load image transform = [T.ToTensor()] transform.append(imagenet_preprocess()) transform = T.Compose(transform)",
"(im_name)) torch.save(img_feats, img_feat_file) # Save P p_odir = os.path.join(curr_path, \"P\")",
"clus.subdivide(3) clus.cluster(count) # remesh remesh = clus.create_mesh() # verts =",
"f: img = Image.open(f).convert(\"RGB\") img = transform(img) img = img[None,",
"os.path.join(curr_path, \"final_mesh\") if not Path(mesh_odir).is_dir(): os.mkdir(mesh_odir) save_file = os.path.join(mesh_odir, \"%s.obj\"",
"0) logger = setup_logger(name=\"demo shapenet\") logger.info(\"Arguments: \" + str(args)) cfg",
"image features imgfeat_odir = os.path.join(curr_path, \"img_feat\") if not Path(imgfeat_odir).is_dir(): os.mkdir(imgfeat_odir)",
"= pv.wrap(mesh) # logger.info('Original mesh:') # print(pv_mesh) clus = pyacvd.Clustering(pv_mesh)",
"return parser def resample_mesh(mesh, count=2466): pv_mesh = pv.wrap(mesh) # logger.info('Original",
"cfg.MODEL.CHECKPOINT == \"\": raise ValueError(\"Invalid checkpoing provided\") logger.info(\"Loading model from",
"in images: # load image transform = [T.ToTensor()] transform.append(imagenet_preprocess()) transform",
"os.mkdir(imgfeat_odir) img_feat_file = os.path.join(imgfeat_odir, \"%s.pt\" % (im_name)) torch.save(img_feats, img_feat_file) #",
"cubified_meshes[-1].get_mesh_verts_faces(0) save_obj(cube_mesh_file, c_verts, c_faces) # Save predicted mesh mesh_odir =",
"from detectron2.utils.collect_env import collect_env_info from detectron2.utils.logger import setup_logger from fvcore.common.file_io",
"= clus.create_mesh() # verts = remesh.points # faces = remesh.faces.reshape((-1,",
"img_feat_file = os.path.join(imgfeat_odir, \"%s.pt\" % (im_name)) torch.save(img_feats, img_feat_file) # Save",
"% (im_name)) torch.save(img_feats, img_feat_file) # Save P p_odir = os.path.join(curr_path,",
"from fvcore.common.file_io import PathManager from pathlib import Path from pytorch3d.io",
"torch.save(voxel_scores, voxel_file) # Save image features imgfeat_odir = os.path.join(curr_path, \"img_feat\")",
"curr_path = os.path.join(args.input, sd) images = glob.glob(curr_path + \"/*.png\") for",
"# faces = remesh.faces.reshape((-1, 4))[:, 1:] return remesh if __name__",
"as mp import logging import os from detectron2.evaluation import inference_context",
"remesh remesh = clus.create_mesh() # verts = remesh.points # faces",
"pv_mesh = pv.wrap(mesh) # logger.info('Original mesh:') # print(pv_mesh) clus =",
"img_dir in images: # load image transform = [T.ToTensor()] transform.append(imagenet_preprocess())",
"inference_context import torch import torch.distributed as dist import torch.multiprocessing as",
"sd in sub_dir: curr_path = os.path.join(args.input, sd) images = glob.glob(curr_path",
"= os.path.join(curr_path, \"final_mesh\") if not Path(mesh_odir).is_dir(): os.mkdir(mesh_odir) save_file = os.path.join(mesh_odir,",
"sub_dir: curr_path = os.path.join(args.input, sd) images = glob.glob(curr_path + \"/*.png\")",
"torch.distributed as dist import torch.multiprocessing as mp from detectron2.utils.collect_env import",
"1:] return remesh if __name__ == \"__main__\": mp.set_start_method(\"spawn\", force=True) args",
"checkpoing provided\") logger.info(\"Loading model from checkpoint: %s\" % (cfg.MODEL.CHECKPOINT)) cp",
"mesh:') # print(pv_mesh) clus = pyacvd.Clustering(pv_mesh) clus.subdivide(3) clus.cluster(count) # remesh",
"Path(imgfeat_odir).is_dir(): os.mkdir(imgfeat_odir) img_feat_file = os.path.join(imgfeat_odir, \"%s.pt\" % (im_name)) torch.save(img_feats, img_feat_file)",
"import voxel_head from shapenet.modeling.mesh_arch import build_model from shapenet.utils.checkpoint import clean_state_dict",
"only the highest scoring detection\" ) parser.add_argument( \"opts\", help=\"Modify model",
"logger.info('Original mesh:') # print(pv_mesh) clus = pyacvd.Clustering(pv_mesh) clus.subdivide(3) clus.cluster(count) #",
"clean_state_dict import torchvision.transforms as T import glob from PIL import",
"import glob from PIL import Image import trimesh import pyvista",
"options using the command-line\", default=None, nargs=argparse.REMAINDER, ) return parser def",
"# print(pv_mesh) clus = pyacvd.Clustering(pv_mesh) clus.subdivide(3) clus.cluster(count) # remesh remesh",
"== \"__main__\": mp.set_start_method(\"spawn\", force=True) args = get_parser().parse_args() device = torch.device(\"cuda:%d\"",
"Path(cmesh_odir).is_dir(): os.mkdir(cmesh_odir) cube_mesh_file = os.path.join(cmesh_odir, \"%s_cube.obj\" % (im_name)) c_verts, c_faces",
"images = glob.glob(curr_path + \"/*.png\") for img_dir in images: #",
"import trimesh import pyvista as pv import pyacvd import numpy",
"= img_dir.split(\"/\")[-1].split(\".\")[0] with PathManager.open(img_dir, \"rb\") as f: img = Image.open(f).convert(\"RGB\")",
"count=2466): pv_mesh = pv.wrap(mesh) # logger.info('Original mesh:') # print(pv_mesh) clus",
"# remesh remesh = clus.create_mesh() # verts = remesh.points #",
"not Path(voxel_odir).is_dir(): os.mkdir(voxel_odir) voxel_file = os.path.join(voxel_odir, \"%s.pt\" % (im_name)) torch.save(voxel_scores,",
"torch.save(img_feats, img_feat_file) # Save P p_odir = os.path.join(curr_path, \"P\") if",
"\"--focal-length\", type=float, default=20.0, help=\"Focal length for the image\" ) parser.add_argument(",
"detectron2.utils.logger import setup_logger from fvcore.common.file_io import PathManager from pathlib import",
"import get_shapenet_cfg from shapenet.data.utils import imagenet_preprocess from shapenet.modeling.heads import voxel_head",
"voxel_head from shapenet.modeling.mesh_arch import build_model from shapenet.utils.checkpoint import clean_state_dict import",
"verts, faces = meshes_pred[-1].get_mesh_verts_faces(0) save_obj(save_file, verts, faces) logger.info(\"Predictions saved for",
"from shapenet.utils.checkpoint import clean_state_dict import torchvision.transforms as T import glob",
"os.mkdir(p_odir) p_file = os.path.join(p_odir, \"%s.pt\" % (im_name)) torch.save(P, p_file) #",
"not Path(cmesh_odir).is_dir(): os.mkdir(cmesh_odir) cube_mesh_file = os.path.join(cmesh_odir, \"%s_cube.obj\" % (im_name)) c_verts,",
"# Save predicted mesh mesh_odir = os.path.join(curr_path, \"final_mesh\") if not",
"Save P p_odir = os.path.join(curr_path, \"P\") if not Path(p_odir).is_dir(): os.mkdir(p_odir)",
"# Save voxel_score voxel_odir = os.path.join(curr_path, \"voxel_score\") if not Path(voxel_odir).is_dir():",
"to save output visualizations\") parser.add_argument( \"--focal-length\", type=float, default=20.0, help=\"Focal length",
"save_file = os.path.join(mesh_odir, \"%s.obj\" % (im_name)) verts, faces = meshes_pred[-1].get_mesh_verts_faces(0)",
"import torchvision.transforms as T import glob from PIL import Image",
"% (im_name)) c_verts, c_faces = cubified_meshes[-1].get_mesh_verts_faces(0) save_obj(cube_mesh_file, c_verts, c_faces) #",
"= build_model(cfg) model.load_state_dict(state_dict) logger.info(\"Model loaded\") model.to(device) sub_dir = sorted(os.listdir(args.input)) for",
"\"P\") if not Path(p_odir).is_dir(): os.mkdir(p_odir) p_file = os.path.join(p_odir, \"%s.pt\" %",
"if not Path(p_odir).is_dir(): os.mkdir(p_odir) p_file = os.path.join(p_odir, \"%s.pt\" % (im_name))",
"\"\": raise ValueError(\"Invalid checkpoing provided\") logger.info(\"Loading model from checkpoint: %s\"",
"[T.ToTensor()] transform.append(imagenet_preprocess()) transform = T.Compose(transform) im_name = img_dir.split(\"/\")[-1].split(\".\")[0] with PathManager.open(img_dir,",
"os.path.join(p_odir, \"%s.pt\" % (im_name)) torch.save(P, p_file) # Save cubified mesh",
"as f: img = Image.open(f).convert(\"RGB\") img = transform(img) img =",
"Image import trimesh import pyvista as pv import pyacvd import",
"model if cfg.MODEL.CHECKPOINT == \"\": raise ValueError(\"Invalid checkpoing provided\") logger.info(\"Loading",
"meshes_pred, P, cubified_meshes = model(img) # Save voxel_score voxel_odir =",
"metavar=\"FILE\", help=\"path to config file\", ) parser.add_argument(\"--input\", help=\"A path to",
"save_obj(cube_mesh_file, c_verts, c_faces) # Save predicted mesh mesh_odir = os.path.join(curr_path,",
"p_odir = os.path.join(curr_path, \"P\") if not Path(p_odir).is_dir(): os.mkdir(p_odir) p_file =",
"type=float, default=20.0, help=\"Focal length for the image\" ) parser.add_argument( \"--onlyhighest\",",
":, :] img = img.to(device) with inference_context(model): img_feats, voxel_scores, meshes_pred,",
"resample_mesh(mesh, count=2466): pv_mesh = pv.wrap(mesh) # logger.info('Original mesh:') # print(pv_mesh)",
"shapenet.modeling.heads import voxel_head from shapenet.modeling.mesh_arch import build_model from shapenet.utils.checkpoint import",
"transform = T.Compose(transform) im_name = img_dir.split(\"/\")[-1].split(\".\")[0] with PathManager.open(img_dir, \"rb\") as",
"cp = torch.load(PathManager.get_local_path(cfg.MODEL.CHECKPOINT)) state_dict = clean_state_dict(cp[\"best_states\"][\"model\"]) model = build_model(cfg) model.load_state_dict(state_dict)",
"sd) images = glob.glob(curr_path + \"/*.png\") for img_dir in images:",
"an input main folder\") # parser.add_argument(\"--output\", help=\"A directory to save",
"as T import glob from PIL import Image import trimesh",
"= remesh.points # faces = remesh.faces.reshape((-1, 4))[:, 1:] return remesh",
"Save cubified mesh cmesh_odir = os.path.join(curr_path, \"cube_mesh\") if not Path(cmesh_odir).is_dir():",
"from PIL import Image import trimesh import pyvista as pv",
"= T.Compose(transform) im_name = img_dir.split(\"/\")[-1].split(\".\")[0] with PathManager.open(img_dir, \"rb\") as f:",
"to an input main folder\") # parser.add_argument(\"--output\", help=\"A directory to",
"= os.path.join(curr_path, \"img_feat\") if not Path(imgfeat_odir).is_dir(): os.mkdir(imgfeat_odir) img_feat_file = os.path.join(imgfeat_odir,",
"mesh mesh_odir = os.path.join(curr_path, \"final_mesh\") if not Path(mesh_odir).is_dir(): os.mkdir(mesh_odir) save_file",
"import logging import os from detectron2.evaluation import inference_context import torch",
"+ str(args)) cfg = setup_cfgs(args) # load checkpoing and build",
"Path(voxel_odir).is_dir(): os.mkdir(voxel_odir) voxel_file = os.path.join(voxel_odir, \"%s.pt\" % (im_name)) torch.save(voxel_scores, voxel_file)",
"= setup_cfgs(args) # load checkpoing and build model if cfg.MODEL.CHECKPOINT",
"pyvista as pv import pyacvd import numpy as np logger",
"length for the image\" ) parser.add_argument( \"--onlyhighest\", action=\"store_true\", help=\"will return",
"scoring detection\" ) parser.add_argument( \"opts\", help=\"Modify model config options using",
"parser.add_argument(\"--input\", help=\"A path to an input main folder\") # parser.add_argument(\"--output\",",
"as dist import torch.multiprocessing as mp from detectron2.utils.collect_env import collect_env_info",
"__name__ == \"__main__\": mp.set_start_method(\"spawn\", force=True) args = get_parser().parse_args() device =",
"help=\"Modify model config options using the command-line\", default=None, nargs=argparse.REMAINDER, )",
"os.path.join(cmesh_odir, \"%s_cube.obj\" % (im_name)) c_verts, c_faces = cubified_meshes[-1].get_mesh_verts_faces(0) save_obj(cube_mesh_file, c_verts,",
"= model(img) # Save voxel_score voxel_odir = os.path.join(curr_path, \"voxel_score\") if",
"= get_parser().parse_args() device = torch.device(\"cuda:%d\" % 0) logger = setup_logger(name=\"demo",
"model.to(device) sub_dir = sorted(os.listdir(args.input)) for sd in sub_dir: curr_path =",
"= torch.device(\"cuda:%d\" % 0) logger = setup_logger(name=\"demo shapenet\") logger.info(\"Arguments: \"",
"cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() return cfg def get_parser(): parser = argparse.ArgumentParser(description=\"MeshRCNN",
"action=\"store_true\", help=\"will return only the highest scoring detection\" ) parser.add_argument(",
"# logger.info('Original mesh:') # print(pv_mesh) clus = pyacvd.Clustering(pv_mesh) clus.subdivide(3) clus.cluster(count)",
"cube_mesh_file = os.path.join(cmesh_odir, \"%s_cube.obj\" % (im_name)) c_verts, c_faces = cubified_meshes[-1].get_mesh_verts_faces(0)",
"save output visualizations\") parser.add_argument( \"--focal-length\", type=float, default=20.0, help=\"Focal length for",
"return remesh if __name__ == \"__main__\": mp.set_start_method(\"spawn\", force=True) args =",
"= os.path.join(cmesh_odir, \"%s_cube.obj\" % (im_name)) c_verts, c_faces = cubified_meshes[-1].get_mesh_verts_faces(0) save_obj(cube_mesh_file,",
"\"__main__\": mp.set_start_method(\"spawn\", force=True) args = get_parser().parse_args() device = torch.device(\"cuda:%d\" %",
"for sd in sub_dir: curr_path = os.path.join(args.input, sd) images =",
"c_verts, c_faces) # Save predicted mesh mesh_odir = os.path.join(curr_path, \"final_mesh\")",
"remesh = clus.create_mesh() # verts = remesh.points # faces =",
"import PathManager from pathlib import Path from pytorch3d.io import save_obj",
"glob.glob(curr_path + \"/*.png\") for img_dir in images: # load image",
"not Path(p_odir).is_dir(): os.mkdir(p_odir) p_file = os.path.join(p_odir, \"%s.pt\" % (im_name)) torch.save(P,",
"pyacvd import numpy as np logger = logging.getLogger('demo') def setup_cfgs(args):",
"p_file) # Save cubified mesh cmesh_odir = os.path.join(curr_path, \"cube_mesh\") if",
"import Path from pytorch3d.io import save_obj from shapenet.config.config import get_shapenet_cfg",
"output visualizations\") parser.add_argument( \"--focal-length\", type=float, default=20.0, help=\"Focal length for the",
"img.to(device) with inference_context(model): img_feats, voxel_scores, meshes_pred, P, cubified_meshes = model(img)",
"\" + str(args)) cfg = setup_cfgs(args) # load checkpoing and",
"import clean_state_dict import torchvision.transforms as T import glob from PIL",
"torch.multiprocessing as mp from detectron2.utils.collect_env import collect_env_info from detectron2.utils.logger import",
"from shapenet.config.config import get_shapenet_cfg from shapenet.data.utils import imagenet_preprocess from shapenet.modeling.heads",
"from checkpoint: %s\" % (cfg.MODEL.CHECKPOINT)) cp = torch.load(PathManager.get_local_path(cfg.MODEL.CHECKPOINT)) state_dict =",
"Save image features imgfeat_odir = os.path.join(curr_path, \"img_feat\") if not Path(imgfeat_odir).is_dir():",
"transform(img) img = img[None, :, :, :] img = img.to(device)",
"save_obj from shapenet.config.config import get_shapenet_cfg from shapenet.data.utils import imagenet_preprocess from"
] |
[
"np.mean(y) for leaf_id in np.unique(X): idxs_in_leaf = np.where(X == leaf_id)[0]",
"y) self._is_fitted = True return self def vote(self, X): \"\"\"",
"j in range(num_labels) ] posteriors = np.nan_to_num(np.array(label_counts) / np.sum(label_counts)) #",
"here. \"\"\" self.finite_sample_correction = finite_sample_correction self._is_fitted = False self.multilabel =",
"verbose=self.verbose, validation_split=self.validation_split, shuffle=True, ) self._is_fitted = True return self def",
"self._finite_sample_correction( posteriors, len(idxs_in_leaf), len(np.unique(y)) ) self.leaf_to_posterior[leaf_id] = posteriors self._is_fitted =",
"np.unique(y) ] posteriors = np.nan_to_num(np.array(class_counts) / np.sum(class_counts)) if self.finite_sample_correction: posteriors",
"/ num_classes self.leaf_to_posterior = {} for leaf_id in np.unique(X): idxs_in_leaf",
"self.fit_multilabel(X, y) num_classes = len(np.unique(y)) self.uniform_posterior = np.ones(num_classes) / num_classes",
"= keras.Sequential() self.voter.add( layers.Dense( 1, activation=\"linear\", input_shape=(X.shape[1],), name=\"transform_to_vote\", ) )",
"here. \"\"\" return self._is_fitted class TreeRegressionVoter(BaseVoter): def __init__(self): \"\"\" Doc",
"or binary probabilities. self.leaf_to_posterior = {} for leaf_id in np.unique(X):",
"strings here. \"\"\" X, y = check_X_y(X, y) self.voter =",
"epochs=100, lr=1e-4, verbose=False, ): \"\"\" Doc strings here. \"\"\" self.validation_split",
"= {} self.global_yhat = np.mean(y) for leaf_id in np.unique(X): idxs_in_leaf",
"== y_val)[0]) for y_val in np.unique(y) ] posteriors = np.nan_to_num(np.array(class_counts)",
"for x in X: if x in list(self.leaf_to_yhat.keys()): votes_per_example.append(self.leaf_to_yhat[x]) else:",
"% {\"name\": type(self).__name__}) X = check_array(X) return self.knn.predict_proba(X) def is_fitted(self):",
"\"\"\" Doc strings here. \"\"\" return self._is_fitted class TreeRegressionVoter(BaseVoter): def",
"kwargs def fit(self, X, y): \"\"\" Doc strings here. \"\"\"",
"return self._is_fitted def _finite_sample_correction(posteriors, num_points_in_partition, num_classes): \"\"\" encourage posteriors to",
"num_classes): \"\"\" encourage posteriors to approach uniform when there is",
"self, validation_split=0.25, loss=\"mse\", epochs=100, lr=1e-4, verbose=False, ): \"\"\" Doc strings",
"return np.array(votes_per_example) def is_fitted(self): \"\"\" Doc strings here. \"\"\" return",
"votes_per_example.append(self.global_yhat) return np.array(votes_per_example) def is_fitted(self): \"\"\" Doc strings here. \"\"\"",
"self._is_fitted class TreeRegressionVoter(BaseVoter): def __init__(self): \"\"\" Doc strings here. \"\"\"",
"in np.unique(y)] self.leaf_to_yhat[leaf_id] = np.nan_to_num(np.mean(y[idxs_in_leaf])) self._is_fitted = True return self",
"if self.finite_sample_correction: posteriors = self._finite_sample_correction( posteriors, len(idxs_in_leaf), len(np.unique(y)) ) self.leaf_to_posterior[leaf_id]",
") raise NotFittedError(msg % {\"name\": type(self).__name__}) votes_per_example = [] for",
"_finite_sample_correction(posteriors, num_points_in_partition, num_classes): \"\"\" encourage posteriors to approach uniform when",
"np.sum(label_counts)) # TODO: multilabel finite sample correction. self.leaf_to_posterior[leaf_id] = posteriors",
"1)[0]) for j in range(num_labels) ] posteriors = np.nan_to_num(np.array(label_counts) /",
"[] for x in X: if x in list(self.leaf_to_posterior.keys()): votes_per_example.append(self.leaf_to_posterior[x])",
"else: votes_per_example.append(self.uniform_posterior) return np.array(votes_per_example) def is_fitted(self): \"\"\" Doc strings here.",
"self.multilabel = False def fit(self, X, y): \"\"\" Doc strings",
"finite_sample_correction=False): \"\"\" Doc strings here. \"\"\" self.finite_sample_correction = finite_sample_correction self._is_fitted",
"if not self.is_fitted(): msg = ( \"This %(name)s instance is",
"is low data \"\"\" correction_constant = 1 / (num_classes *",
"= np.where(X == leaf_id)[0] label_counts = [ len(np.where(y[idxs_in_leaf, j] ==",
"posteriors self._is_fitted = True return self def fit_multilabel(self, X, y):",
"layers class TreeClassificationVoter(BaseVoter): def __init__(self, finite_sample_correction=False): \"\"\" Doc strings here.",
"return self._is_fitted class NeuralRegressionVoter(BaseVoter): def __init__( self, validation_split=0.25, loss=\"mse\", epochs=100,",
"self.uniform_posterior = np.ones(num_classes) / num_classes self.leaf_to_posterior = {} for leaf_id",
"here. \"\"\" self.leaf_to_yhat = {} self.global_yhat = np.mean(y) for leaf_id",
"arguments before using this voter.\" ) raise NotFittedError(msg % {\"name\":",
"/ np.sum(label_counts)) # TODO: multilabel finite sample correction. self.leaf_to_posterior[leaf_id] =",
"task. self.multilabel = True return self.fit_multilabel(X, y) num_classes = len(np.unique(y))",
"= lr self.verbose = verbose self._is_fitted = False def fit(self,",
"[ len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y) ] posteriors",
"self.leaf_to_posterior = {} for leaf_id in np.unique(X): idxs_in_leaf = np.where(X",
"% {\"name\": type(self).__name__}) X = check_array(X) return self.voter.predict(X) def is_fitted(self):",
"import layers class TreeClassificationVoter(BaseVoter): def __init__(self, finite_sample_correction=False): \"\"\" Doc strings",
"= 1 / (num_classes * num_points_in_partition) zero_posterior_idxs = np.where(posteriors ==",
"name=\"transform_to_vote\", ) ) self.voter.compile( loss=self.loss, metrics=[\"mae\"], optimizer=keras.optimizers.Adam(self.lr) ) self.voter.fit( X,",
"in np.unique(X): idxs_in_leaf = np.where(X == leaf_id)[0] class_counts = [",
"\" \"appropriate arguments before using this voter.\" ) raise NotFittedError(msg",
"self.kwargs = kwargs def fit(self, X, y): \"\"\" Doc strings",
"y_val in np.unique(y) ] posteriors = np.nan_to_num(np.array(class_counts) / np.sum(class_counts)) if",
"when there is low data \"\"\" correction_constant = 1 /",
"\"\"\" self._is_fitted = False def fit(self, X, y): \"\"\" Doc",
"before using this transformer.\" ) raise NotFittedError(msg % {\"name\": type(self).__name__})",
"loss self.epochs = epochs self.lr = lr self.verbose = verbose",
"posteriors, len(idxs_in_leaf), len(np.unique(y)) ) self.leaf_to_posterior[leaf_id] = posteriors self._is_fitted = True",
"self.lr = lr self.verbose = verbose self._is_fitted = False def",
"is not fitted yet. Call 'fit' with \" \"appropriate arguments",
"TreeRegressionVoter(BaseVoter): def __init__(self): \"\"\" Doc strings here. \"\"\" self._is_fitted =",
"= [] for x in X: if x in list(self.leaf_to_posterior.keys()):",
"import BaseVoter from tensorflow import keras from keras import layers",
"votes_per_example.append(self.leaf_to_posterior[x]) else: votes_per_example.append(self.uniform_posterior) return np.array(votes_per_example) def is_fitted(self): \"\"\" Doc strings",
"Each posterior is now a num_labels size vector or binary",
"posteriors class KNNClassificationVoter(BaseVoter): def __init__(self, k, kwargs={}): \"\"\" Doc strings",
"= np.nan_to_num(np.array(label_counts) / np.sum(label_counts)) # TODO: multilabel finite sample correction.",
"using this voter.\" ) raise NotFittedError(msg % {\"name\": type(self).__name__}) votes_per_example",
"def is_fitted(self): \"\"\" Doc strings here. \"\"\" return self._is_fitted class",
"y): \"\"\" Doc strings here. \"\"\" check_classification_targets(y) if type_of_target(y) ==",
"y_val)[0]) for y_val in np.unique(y) ] posteriors = np.nan_to_num(np.array(class_counts) /",
"class TreeRegressionVoter(BaseVoter): def __init__(self): \"\"\" Doc strings here. \"\"\" self._is_fitted",
"True return self def vote(self, X): \"\"\" Doc strings here.",
"# class_counts = [len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y)]",
"= check_array(X) return self.voter.predict(X) def is_fitted(self): \"\"\" Doc strings here.",
"\"\"\" Doc strings here. \"\"\" self.validation_split = validation_split self.loss =",
"check_array(X) return self.voter.predict(X) def is_fitted(self): \"\"\" Doc strings here. \"\"\"",
"# Fit multilabel binary task. self.multilabel = True return self.fit_multilabel(X,",
"\"\"\" Doc strings here. \"\"\" self.leaf_to_yhat = {} self.global_yhat =",
"% {\"name\": type(self).__name__}) votes_per_example = [] for x in X:",
"y_val in np.unique(y)] self.leaf_to_yhat[leaf_id] = np.nan_to_num(np.mean(y[idxs_in_leaf])) self._is_fitted = True return",
"from tensorflow import keras from keras import layers class TreeClassificationVoter(BaseVoter):",
"now a num_labels size vector or binary probabilities. self.leaf_to_posterior =",
"binary task. self.multilabel = True return self.fit_multilabel(X, y) num_classes =",
"is_fitted(self): \"\"\" Doc strings here. \"\"\" return self._is_fitted def _finite_sample_correction(posteriors,",
"def fit_multilabel(self, X, y): num_labels = y.shape[1] self.uniform_posterior = y.sum(axis=0)",
"1 / (num_classes * num_points_in_partition) zero_posterior_idxs = np.where(posteriors == 0)[0]",
"using this transformer.\" ) raise NotFittedError(msg % {\"name\": type(self).__name__}) X",
"idxs_in_leaf = np.where(X == leaf_id)[0] class_counts = [ len(np.where(y[idxs_in_leaf] ==",
"] posteriors = np.nan_to_num(np.array(label_counts) / np.sum(label_counts)) # TODO: multilabel finite",
"NeuralRegressionVoter(BaseVoter): def __init__( self, validation_split=0.25, loss=\"mse\", epochs=100, lr=1e-4, verbose=False, ):",
"check_X_y, check_array, NotFittedError, ) from sklearn.utils.multiclass import check_classification_targets, type_of_target from",
"= k self.kwargs = kwargs def fit(self, X, y): \"\"\"",
"# from sklearn.ensemble import BaggingClassifier # from sklearn.tree import DecisionTreeClassifier",
"def __init__(self, k, kwargs={}): \"\"\" Doc strings here. \"\"\" self._is_fitted",
"self._is_fitted = False self.multilabel = False def fit(self, X, y):",
".base import BaseVoter from tensorflow import keras from keras import",
"Doc strings here. \"\"\" X, y = check_X_y(X, y) self.voter",
"type(self).__name__}) X = check_array(X) return self.knn.predict_proba(X) def is_fitted(self): \"\"\" Doc",
"list(self.leaf_to_yhat.keys()): votes_per_example.append(self.leaf_to_yhat[x]) else: votes_per_example.append(self.global_yhat) return np.array(votes_per_example) def is_fitted(self): \"\"\" Doc",
"from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.utils.validation",
"vector or binary probabilities. self.leaf_to_posterior = {} for leaf_id in",
"here. \"\"\" return self._is_fitted class NeuralRegressionVoter(BaseVoter): def __init__( self, validation_split=0.25,",
"import KNeighborsClassifier from sklearn.utils.validation import ( check_X_y, check_array, NotFittedError, )",
"keras import layers class TreeClassificationVoter(BaseVoter): def __init__(self, finite_sample_correction=False): \"\"\" Doc",
"strings here. \"\"\" if not self.is_fitted(): msg = ( \"This",
"== y_val)[0]) for y_val in np.unique(y)] self.leaf_to_yhat[leaf_id] = np.nan_to_num(np.mean(y[idxs_in_leaf])) self._is_fitted",
"for y_val in np.unique(y)] self.leaf_to_yhat[leaf_id] = np.nan_to_num(np.mean(y[idxs_in_leaf])) self._is_fitted = True",
"range(num_labels) ] posteriors = np.nan_to_num(np.array(label_counts) / np.sum(label_counts)) # TODO: multilabel",
"posteriors /= sum(posteriors) return posteriors class KNNClassificationVoter(BaseVoter): def __init__(self, k,",
"type_of_target(y) == 'multilabel-indicator': # Fit multilabel binary task. self.multilabel =",
"y) num_classes = len(np.unique(y)) self.uniform_posterior = np.ones(num_classes) / num_classes self.leaf_to_posterior",
"from sklearn.neighbors import KNeighborsClassifier from sklearn.utils.validation import ( check_X_y, check_array,",
"in range(num_labels) ] posteriors = np.nan_to_num(np.array(label_counts) / np.sum(label_counts)) # TODO:",
"self._is_fitted class NeuralRegressionVoter(BaseVoter): def __init__( self, validation_split=0.25, loss=\"mse\", epochs=100, lr=1e-4,",
"0)[0] posteriors[zero_posterior_idxs] = correction_constant posteriors /= sum(posteriors) return posteriors class",
"return self.fit_multilabel(X, y) num_classes = len(np.unique(y)) self.uniform_posterior = np.ones(num_classes) /",
"binary probabilities. self.leaf_to_posterior = {} for leaf_id in np.unique(X): idxs_in_leaf",
"return self def vote(self, X): \"\"\" Doc strings here. \"\"\"",
"import ( check_X_y, check_array, NotFittedError, ) from sklearn.utils.multiclass import check_classification_targets,",
"class TreeClassificationVoter(BaseVoter): def __init__(self, finite_sample_correction=False): \"\"\" Doc strings here. \"\"\"",
"= check_array(X) return self.knn.predict_proba(X) def is_fitted(self): \"\"\" Doc strings here.",
"len(np.unique(y)) ) self.leaf_to_posterior[leaf_id] = posteriors self._is_fitted = True return self",
"validation_split self.loss = loss self.epochs = epochs self.lr = lr",
"strings here. \"\"\" return self._is_fitted class NeuralRegressionVoter(BaseVoter): def __init__( self,",
"here. \"\"\" X, y = check_X_y(X, y) self.knn = KNeighborsClassifier(self.k,",
"\" \"appropriate arguments before using this transformer.\" ) raise NotFittedError(msg",
"\"\"\" Doc strings here. \"\"\" check_classification_targets(y) if type_of_target(y) == 'multilabel-indicator':",
"for leaf_id in np.unique(X): idxs_in_leaf = np.where(X == leaf_id)[0] class_counts",
"def _finite_sample_correction(posteriors, num_points_in_partition, num_classes): \"\"\" encourage posteriors to approach uniform",
"= posteriors self._is_fitted = True return self def fit_multilabel(self, X,",
"self._is_fitted = True return self def vote(self, X): \"\"\" Doc",
"np.unique(X): idxs_in_leaf = np.where(X == leaf_id)[0] class_counts = [ len(np.where(y[idxs_in_leaf]",
"correction_constant = 1 / (num_classes * num_points_in_partition) zero_posterior_idxs = np.where(posteriors",
"= self._finite_sample_correction( posteriors, len(idxs_in_leaf), len(np.unique(y)) ) self.leaf_to_posterior[leaf_id] = posteriors self._is_fitted",
"votes_per_example.append(self.leaf_to_yhat[x]) else: votes_per_example.append(self.global_yhat) return np.array(votes_per_example) def is_fitted(self): \"\"\" Doc strings",
"class NeuralRegressionVoter(BaseVoter): def __init__( self, validation_split=0.25, loss=\"mse\", epochs=100, lr=1e-4, verbose=False,",
"X, y): \"\"\" Doc strings here. \"\"\" X, y =",
"self.leaf_to_yhat = {} self.global_yhat = np.mean(y) for leaf_id in np.unique(X):",
"keras.Sequential() self.voter.add( layers.Dense( 1, activation=\"linear\", input_shape=(X.shape[1],), name=\"transform_to_vote\", ) ) self.voter.compile(",
"[ len(np.where(y[idxs_in_leaf, j] == 1)[0]) for j in range(num_labels) ]",
"== 0)[0] posteriors[zero_posterior_idxs] = correction_constant posteriors /= sum(posteriors) return posteriors",
"return self.voter.predict(X) def is_fitted(self): \"\"\" Doc strings here. \"\"\" return",
"y): num_labels = y.shape[1] self.uniform_posterior = y.sum(axis=0) / len(y) #",
"class_counts = [ len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y)",
") self.leaf_to_posterior[leaf_id] = posteriors self._is_fitted = True return self def",
"strings here. \"\"\" self.leaf_to_yhat = {} self.global_yhat = np.mean(y) for",
"strings here. \"\"\" self.finite_sample_correction = finite_sample_correction self._is_fitted = False self.multilabel",
"== leaf_id)[0] label_counts = [ len(np.where(y[idxs_in_leaf, j] == 1)[0]) for",
"np.nan_to_num(np.array(label_counts) / np.sum(label_counts)) # TODO: multilabel finite sample correction. self.leaf_to_posterior[leaf_id]",
"self.global_yhat = np.mean(y) for leaf_id in np.unique(X): idxs_in_leaf = np.where(X",
"if type_of_target(y) == 'multilabel-indicator': # Fit multilabel binary task. self.multilabel",
"input_shape=(X.shape[1],), name=\"transform_to_vote\", ) ) self.voter.compile( loss=self.loss, metrics=[\"mae\"], optimizer=keras.optimizers.Adam(self.lr) ) self.voter.fit(",
"j] == 1)[0]) for j in range(num_labels) ] posteriors =",
"in np.unique(y) ] posteriors = np.nan_to_num(np.array(class_counts) / np.sum(class_counts)) if self.finite_sample_correction:",
"# from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from",
"TreeClassificationVoter(BaseVoter): def __init__(self, finite_sample_correction=False): \"\"\" Doc strings here. \"\"\" self.finite_sample_correction",
"self.leaf_to_posterior[leaf_id] = posteriors self._is_fitted = True return self def fit_multilabel(self,",
"= verbose self._is_fitted = False def fit(self, X, y): \"\"\"",
"\"appropriate arguments before using this voter.\" ) raise NotFittedError(msg %",
"finite_sample_correction self._is_fitted = False self.multilabel = False def fit(self, X,",
"np.where(X == leaf_id)[0] class_counts = [ len(np.where(y[idxs_in_leaf] == y_val)[0]) for",
"False self.k = k self.kwargs = kwargs def fit(self, X,",
"here. \"\"\" self._is_fitted = False def fit(self, X, y): \"\"\"",
"uniform when there is low data \"\"\" correction_constant = 1",
"Doc strings here. \"\"\" check_classification_targets(y) if type_of_target(y) == 'multilabel-indicator': #",
"posteriors self._is_fitted = True return self def vote(self, X): \"\"\"",
"a num_labels size vector or binary probabilities. self.leaf_to_posterior = {}",
"multilabel finite sample correction. self.leaf_to_posterior[leaf_id] = posteriors self._is_fitted = True",
"check_classification_targets, type_of_target from .base import BaseVoter from tensorflow import keras",
"lr=1e-4, verbose=False, ): \"\"\" Doc strings here. \"\"\" self.validation_split =",
"fitted yet. Call 'fit' with \" \"appropriate arguments before using",
"= [len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y)] self.leaf_to_yhat[leaf_id] =",
"TODO: multilabel finite sample correction. self.leaf_to_posterior[leaf_id] = posteriors self._is_fitted =",
"np.ones(num_classes) / num_classes self.leaf_to_posterior = {} for leaf_id in np.unique(X):",
"validation_split=0.25, loss=\"mse\", epochs=100, lr=1e-4, verbose=False, ): \"\"\" Doc strings here.",
"%(name)s instance is not fitted yet. Call 'fit' with \"",
"\"\"\" self.validation_split = validation_split self.loss = loss self.epochs = epochs",
"strings here. \"\"\" X, y = check_X_y(X, y) self.knn =",
"sklearn.utils.validation import ( check_X_y, check_array, NotFittedError, ) from sklearn.utils.multiclass import",
"self.validation_split = validation_split self.loss = loss self.epochs = epochs self.lr",
"* num_points_in_partition) zero_posterior_idxs = np.where(posteriors == 0)[0] posteriors[zero_posterior_idxs] = correction_constant",
"in list(self.leaf_to_yhat.keys()): votes_per_example.append(self.leaf_to_yhat[x]) else: votes_per_example.append(self.global_yhat) return np.array(votes_per_example) def is_fitted(self): \"\"\"",
"type(self).__name__}) votes_per_example = [] for x in X: if x",
"not fitted yet. Call 'fit' with \" \"appropriate arguments before",
"self._is_fitted = False self.k = k self.kwargs = kwargs def",
"len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y) ] posteriors =",
"layers.Dense( 1, activation=\"linear\", input_shape=(X.shape[1],), name=\"transform_to_vote\", ) ) self.voter.compile( loss=self.loss, metrics=[\"mae\"],",
"check_X_y(X, y) self.knn = KNeighborsClassifier(self.k, **self.kwargs) self.knn.fit(X, y) self._is_fitted =",
"leaf_id)[0] class_counts = [ len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in",
"low data \"\"\" correction_constant = 1 / (num_classes * num_points_in_partition)",
"strings here. \"\"\" self._is_fitted = False def fit(self, X, y):",
"X = check_array(X) return self.knn.predict_proba(X) def is_fitted(self): \"\"\" Doc strings",
"np # from sklearn.ensemble import BaggingClassifier # from sklearn.tree import",
"def fit(self, X, y): \"\"\" Doc strings here. \"\"\" X,",
"'fit' with \" \"appropriate arguments before using this transformer.\" )",
"Doc strings here. \"\"\" self.leaf_to_yhat = {} self.global_yhat = np.mean(y)",
"sklearn.neighbors import KNeighborsClassifier from sklearn.utils.validation import ( check_X_y, check_array, NotFittedError,",
"posteriors to approach uniform when there is low data \"\"\"",
") self.voter.compile( loss=self.loss, metrics=[\"mae\"], optimizer=keras.optimizers.Adam(self.lr) ) self.voter.fit( X, y, epochs=self.epochs,",
"\"\"\" self._is_fitted = False self.k = k self.kwargs = kwargs",
"posteriors[zero_posterior_idxs] = correction_constant posteriors /= sum(posteriors) return posteriors class KNNClassificationVoter(BaseVoter):",
"tensorflow import keras from keras import layers class TreeClassificationVoter(BaseVoter): def",
"here. \"\"\" check_classification_targets(y) if type_of_target(y) == 'multilabel-indicator': # Fit multilabel",
"correction_constant posteriors /= sum(posteriors) return posteriors class KNNClassificationVoter(BaseVoter): def __init__(self,",
"True return self.fit_multilabel(X, y) num_classes = len(np.unique(y)) self.uniform_posterior = np.ones(num_classes)",
"== 1)[0]) for j in range(num_labels) ] posteriors = np.nan_to_num(np.array(label_counts)",
"self def fit_multilabel(self, X, y): num_labels = y.shape[1] self.uniform_posterior =",
"import BaggingClassifier # from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import",
"Fit multilabel binary task. self.multilabel = True return self.fit_multilabel(X, y)",
"instance is not fitted yet. Call 'fit' with \" \"appropriate",
"zero_posterior_idxs = np.where(posteriors == 0)[0] posteriors[zero_posterior_idxs] = correction_constant posteriors /=",
"def vote(self, X): \"\"\" Doc strings here. \"\"\" if not",
"Doc strings here. \"\"\" if not self.is_fitted(): msg = (",
"x in X: if x in list(self.leaf_to_posterior.keys()): votes_per_example.append(self.leaf_to_posterior[x]) else: votes_per_example.append(self.uniform_posterior)",
"y = check_X_y(X, y) self.voter = keras.Sequential() self.voter.add( layers.Dense( 1,",
"== 'multilabel-indicator': # Fit multilabel binary task. self.multilabel = True",
"== leaf_id)[0] # class_counts = [len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val",
"list(self.leaf_to_posterior.keys()): votes_per_example.append(self.leaf_to_posterior[x]) else: votes_per_example.append(self.uniform_posterior) return np.array(votes_per_example) def is_fitted(self): \"\"\" Doc",
"epochs=self.epochs, callbacks=[keras.callbacks.EarlyStopping(patience=20, monitor=\"val_loss\")], verbose=self.verbose, validation_split=self.validation_split, shuffle=True, ) self._is_fitted = True",
"<gh_stars>0 import numpy as np # from sklearn.ensemble import BaggingClassifier",
"y, epochs=self.epochs, callbacks=[keras.callbacks.EarlyStopping(patience=20, monitor=\"val_loss\")], verbose=self.verbose, validation_split=self.validation_split, shuffle=True, ) self._is_fitted =",
"False self.multilabel = False def fit(self, X, y): \"\"\" Doc",
"y) self.voter = keras.Sequential() self.voter.add( layers.Dense( 1, activation=\"linear\", input_shape=(X.shape[1],), name=\"transform_to_vote\",",
"= y.sum(axis=0) / len(y) # Each posterior is now a",
"Doc strings here. \"\"\" return self._is_fitted class NeuralRegressionVoter(BaseVoter): def __init__(",
"monitor=\"val_loss\")], verbose=self.verbose, validation_split=self.validation_split, shuffle=True, ) self._is_fitted = True return self",
"check_X_y(X, y) self.voter = keras.Sequential() self.voter.add( layers.Dense( 1, activation=\"linear\", input_shape=(X.shape[1],),",
"\"This %(name)s instance is not fitted yet. Call 'fit' with",
"== leaf_id)[0] class_counts = [ len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val",
"return posteriors class KNNClassificationVoter(BaseVoter): def __init__(self, k, kwargs={}): \"\"\" Doc",
"from keras import layers class TreeClassificationVoter(BaseVoter): def __init__(self, finite_sample_correction=False): \"\"\"",
"posteriors = self._finite_sample_correction( posteriors, len(idxs_in_leaf), len(np.unique(y)) ) self.leaf_to_posterior[leaf_id] = posteriors",
"NotFittedError, ) from sklearn.utils.multiclass import check_classification_targets, type_of_target from .base import",
"self._is_fitted = False def fit(self, X, y): \"\"\" Doc strings",
"len(y) # Each posterior is now a num_labels size vector",
"yet. Call 'fit' with \" \"appropriate arguments before using this",
"Doc strings here. \"\"\" self._is_fitted = False def fit(self, X,",
"y.shape[1] self.uniform_posterior = y.sum(axis=0) / len(y) # Each posterior is",
"def fit(self, X, y): \"\"\" Doc strings here. \"\"\" check_classification_targets(y)",
"= {} for leaf_id in np.unique(X): idxs_in_leaf = np.where(X ==",
"shuffle=True, ) self._is_fitted = True return self def vote(self, X):",
"fit(self, X, y): \"\"\" Doc strings here. \"\"\" self.leaf_to_yhat =",
"{} self.global_yhat = np.mean(y) for leaf_id in np.unique(X): idxs_in_leaf =",
"self.loss = loss self.epochs = epochs self.lr = lr self.verbose",
"leaf_id in np.unique(X): idxs_in_leaf = np.where(X == leaf_id)[0] label_counts =",
"from .base import BaseVoter from tensorflow import keras from keras",
"Doc strings here. \"\"\" self.finite_sample_correction = finite_sample_correction self._is_fitted = False",
"Doc strings here. \"\"\" return self._is_fitted class TreeRegressionVoter(BaseVoter): def __init__(self):",
"lr self.verbose = verbose self._is_fitted = False def fit(self, X,",
"size vector or binary probabilities. self.leaf_to_posterior = {} for leaf_id",
"num_classes = len(np.unique(y)) self.uniform_posterior = np.ones(num_classes) / num_classes self.leaf_to_posterior =",
"self.k = k self.kwargs = kwargs def fit(self, X, y):",
"np.unique(X): idxs_in_leaf = np.where(X == leaf_id)[0] label_counts = [ len(np.where(y[idxs_in_leaf,",
"\"\"\" check_classification_targets(y) if type_of_target(y) == 'multilabel-indicator': # Fit multilabel binary",
"x in X: if x in list(self.leaf_to_yhat.keys()): votes_per_example.append(self.leaf_to_yhat[x]) else: votes_per_example.append(self.global_yhat)",
"= True return self def vote(self, X): \"\"\" Doc strings",
"NotFittedError(msg % {\"name\": type(self).__name__}) X = check_array(X) return self.voter.predict(X) def",
"\"\"\" X, y = check_X_y(X, y) self.knn = KNeighborsClassifier(self.k, **self.kwargs)",
"strings here. \"\"\" self.validation_split = validation_split self.loss = loss self.epochs",
"k self.kwargs = kwargs def fit(self, X, y): \"\"\" Doc",
"): \"\"\" Doc strings here. \"\"\" self.validation_split = validation_split self.loss",
"BaseVoter from tensorflow import keras from keras import layers class",
"\"\"\" Doc strings here. \"\"\" return self._is_fitted class NeuralRegressionVoter(BaseVoter): def",
"\"\"\" return self._is_fitted class TreeRegressionVoter(BaseVoter): def __init__(self): \"\"\" Doc strings",
"X, y): \"\"\" Doc strings here. \"\"\" self.leaf_to_yhat = {}",
"X, y): num_labels = y.shape[1] self.uniform_posterior = y.sum(axis=0) / len(y)",
"votes_per_example.append(self.uniform_posterior) return np.array(votes_per_example) def is_fitted(self): \"\"\" Doc strings here. \"\"\"",
"in np.unique(X): idxs_in_leaf = np.where(X == leaf_id)[0] label_counts = [",
"\"\"\" return self._is_fitted def _finite_sample_correction(posteriors, num_points_in_partition, num_classes): \"\"\" encourage posteriors",
"= validation_split self.loss = loss self.epochs = epochs self.lr =",
"= np.where(posteriors == 0)[0] posteriors[zero_posterior_idxs] = correction_constant posteriors /= sum(posteriors)",
"/ (num_classes * num_points_in_partition) zero_posterior_idxs = np.where(posteriors == 0)[0] posteriors[zero_posterior_idxs]",
"leaf_id)[0] # class_counts = [len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in",
"= [] for x in X: if x in list(self.leaf_to_yhat.keys()):",
"self.is_fitted(): msg = ( \"This %(name)s instance is not fitted",
"KNeighborsClassifier(self.k, **self.kwargs) self.knn.fit(X, y) self._is_fitted = True return self def",
"1, activation=\"linear\", input_shape=(X.shape[1],), name=\"transform_to_vote\", ) ) self.voter.compile( loss=self.loss, metrics=[\"mae\"], optimizer=keras.optimizers.Adam(self.lr)",
"= loss self.epochs = epochs self.lr = lr self.verbose =",
"strings here. \"\"\" return self._is_fitted class TreeRegressionVoter(BaseVoter): def __init__(self): \"\"\"",
"import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.utils.validation import (",
"for j in range(num_labels) ] posteriors = np.nan_to_num(np.array(label_counts) / np.sum(label_counts))",
"num_labels size vector or binary probabilities. self.leaf_to_posterior = {} for",
"__init__( self, validation_split=0.25, loss=\"mse\", epochs=100, lr=1e-4, verbose=False, ): \"\"\" Doc",
"self.voter.fit( X, y, epochs=self.epochs, callbacks=[keras.callbacks.EarlyStopping(patience=20, monitor=\"val_loss\")], verbose=self.verbose, validation_split=self.validation_split, shuffle=True, )",
"here. \"\"\" return self._is_fitted def _finite_sample_correction(posteriors, num_points_in_partition, num_classes): \"\"\" encourage",
"fit_multilabel(self, X, y): num_labels = y.shape[1] self.uniform_posterior = y.sum(axis=0) /",
"self def vote(self, X): \"\"\" Doc strings here. \"\"\" if",
"X, y, epochs=self.epochs, callbacks=[keras.callbacks.EarlyStopping(patience=20, monitor=\"val_loss\")], verbose=self.verbose, validation_split=self.validation_split, shuffle=True, ) self._is_fitted",
"multilabel binary task. self.multilabel = True return self.fit_multilabel(X, y) num_classes",
"in list(self.leaf_to_posterior.keys()): votes_per_example.append(self.leaf_to_posterior[x]) else: votes_per_example.append(self.uniform_posterior) return np.array(votes_per_example) def is_fitted(self): \"\"\"",
"class KNNClassificationVoter(BaseVoter): def __init__(self, k, kwargs={}): \"\"\" Doc strings here.",
"X): \"\"\" Doc strings here. \"\"\" if not self.is_fitted(): msg",
"for x in X: if x in list(self.leaf_to_posterior.keys()): votes_per_example.append(self.leaf_to_posterior[x]) else:",
"\"\"\" Doc strings here. \"\"\" self.finite_sample_correction = finite_sample_correction self._is_fitted =",
"\"\"\" Doc strings here. \"\"\" if not self.is_fitted(): msg =",
"loss=\"mse\", epochs=100, lr=1e-4, verbose=False, ): \"\"\" Doc strings here. \"\"\"",
"kwargs={}): \"\"\" Doc strings here. \"\"\" self._is_fitted = False self.k",
"keras from keras import layers class TreeClassificationVoter(BaseVoter): def __init__(self, finite_sample_correction=False):",
"np.where(X == leaf_id)[0] # class_counts = [len(np.where(y[idxs_in_leaf] == y_val)[0]) for",
"for y_val in np.unique(y) ] posteriors = np.nan_to_num(np.array(class_counts) / np.sum(class_counts))",
"idxs_in_leaf = np.where(X == leaf_id)[0] # class_counts = [len(np.where(y[idxs_in_leaf] ==",
"sum(posteriors) return posteriors class KNNClassificationVoter(BaseVoter): def __init__(self, k, kwargs={}): \"\"\"",
"( check_X_y, check_array, NotFittedError, ) from sklearn.utils.multiclass import check_classification_targets, type_of_target",
"= kwargs def fit(self, X, y): \"\"\" Doc strings here.",
"label_counts = [ len(np.where(y[idxs_in_leaf, j] == 1)[0]) for j in",
"y = check_X_y(X, y) self.knn = KNeighborsClassifier(self.k, **self.kwargs) self.knn.fit(X, y)",
"Doc strings here. \"\"\" X, y = check_X_y(X, y) self.knn",
"to approach uniform when there is low data \"\"\" correction_constant",
"False def fit(self, X, y): \"\"\" Doc strings here. \"\"\"",
"Call 'fit' with \" \"appropriate arguments before using this transformer.\"",
"= check_X_y(X, y) self.knn = KNeighborsClassifier(self.k, **self.kwargs) self.knn.fit(X, y) self._is_fitted",
"def __init__( self, validation_split=0.25, loss=\"mse\", epochs=100, lr=1e-4, verbose=False, ): \"\"\"",
"here. \"\"\" if not self.is_fitted(): msg = ( \"This %(name)s",
"voter.\" ) raise NotFittedError(msg % {\"name\": type(self).__name__}) votes_per_example = []",
"num_points_in_partition, num_classes): \"\"\" encourage posteriors to approach uniform when there",
"\"\"\" Doc strings here. \"\"\" X, y = check_X_y(X, y)",
"arguments before using this transformer.\" ) raise NotFittedError(msg % {\"name\":",
"from sklearn.utils.validation import ( check_X_y, check_array, NotFittedError, ) from sklearn.utils.multiclass",
"return self.knn.predict_proba(X) def is_fitted(self): \"\"\" Doc strings here. \"\"\" return",
"{} for leaf_id in np.unique(X): idxs_in_leaf = np.where(X == leaf_id)[0]",
"def fit(self, X, y): \"\"\" Doc strings here. \"\"\" self.leaf_to_yhat",
"there is low data \"\"\" correction_constant = 1 / (num_classes",
"= finite_sample_correction self._is_fitted = False self.multilabel = False def fit(self,",
"{\"name\": type(self).__name__}) X = check_array(X) return self.knn.predict_proba(X) def is_fitted(self): \"\"\"",
"[len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y)] self.leaf_to_yhat[leaf_id] = np.nan_to_num(np.mean(y[idxs_in_leaf]))",
"X, y): \"\"\" Doc strings here. \"\"\" check_classification_targets(y) if type_of_target(y)",
"= np.nan_to_num(np.array(class_counts) / np.sum(class_counts)) if self.finite_sample_correction: posteriors = self._finite_sample_correction( posteriors,",
"msg = ( \"This %(name)s instance is not fitted yet.",
"self.voter = keras.Sequential() self.voter.add( layers.Dense( 1, activation=\"linear\", input_shape=(X.shape[1],), name=\"transform_to_vote\", )",
"= np.where(X == leaf_id)[0] # class_counts = [len(np.where(y[idxs_in_leaf] == y_val)[0])",
"= np.ones(num_classes) / num_classes self.leaf_to_posterior = {} for leaf_id in",
"\"\"\" encourage posteriors to approach uniform when there is low",
"leaf_id in np.unique(X): idxs_in_leaf = np.where(X == leaf_id)[0] class_counts =",
"self.knn.fit(X, y) self._is_fitted = True return self def vote(self, X):",
"with \" \"appropriate arguments before using this voter.\" ) raise",
"(num_classes * num_points_in_partition) zero_posterior_idxs = np.where(posteriors == 0)[0] posteriors[zero_posterior_idxs] =",
"NotFittedError(msg % {\"name\": type(self).__name__}) X = check_array(X) return self.knn.predict_proba(X) def",
"self.voter.add( layers.Dense( 1, activation=\"linear\", input_shape=(X.shape[1],), name=\"transform_to_vote\", ) ) self.voter.compile( loss=self.loss,",
"before using this voter.\" ) raise NotFittedError(msg % {\"name\": type(self).__name__})",
"correction. self.leaf_to_posterior[leaf_id] = posteriors self._is_fitted = True return self def",
"X, y = check_X_y(X, y) self.voter = keras.Sequential() self.voter.add( layers.Dense(",
"\"\"\" if not self.is_fitted(): msg = ( \"This %(name)s instance",
"self.epochs = epochs self.lr = lr self.verbose = verbose self._is_fitted",
"metrics=[\"mae\"], optimizer=keras.optimizers.Adam(self.lr) ) self.voter.fit( X, y, epochs=self.epochs, callbacks=[keras.callbacks.EarlyStopping(patience=20, monitor=\"val_loss\")], verbose=self.verbose,",
"this transformer.\" ) raise NotFittedError(msg % {\"name\": type(self).__name__}) X =",
"= KNeighborsClassifier(self.k, **self.kwargs) self.knn.fit(X, y) self._is_fitted = True return self",
"Call 'fit' with \" \"appropriate arguments before using this voter.\"",
"'multilabel-indicator': # Fit multilabel binary task. self.multilabel = True return",
"raise NotFittedError(msg % {\"name\": type(self).__name__}) X = check_array(X) return self.voter.predict(X)",
"if x in list(self.leaf_to_posterior.keys()): votes_per_example.append(self.leaf_to_posterior[x]) else: votes_per_example.append(self.uniform_posterior) return np.array(votes_per_example) def",
"sklearn.utils.multiclass import check_classification_targets, type_of_target from .base import BaseVoter from tensorflow",
"type(self).__name__}) X = check_array(X) return self.voter.predict(X) def is_fitted(self): \"\"\" Doc",
"in X: if x in list(self.leaf_to_yhat.keys()): votes_per_example.append(self.leaf_to_yhat[x]) else: votes_per_example.append(self.global_yhat) return",
") self._is_fitted = True return self def vote(self, X): \"\"\"",
"epochs self.lr = lr self.verbose = verbose self._is_fitted = False",
"X, y = check_X_y(X, y) self.knn = KNeighborsClassifier(self.k, **self.kwargs) self.knn.fit(X,",
"self.leaf_to_posterior[leaf_id] = posteriors self._is_fitted = True return self def vote(self,",
"votes_per_example = [] for x in X: if x in",
"sample correction. self.leaf_to_posterior[leaf_id] = posteriors self._is_fitted = True return self",
"raise NotFittedError(msg % {\"name\": type(self).__name__}) X = check_array(X) return self.knn.predict_proba(X)",
"= [ len(np.where(y[idxs_in_leaf, j] == 1)[0]) for j in range(num_labels)",
"is now a num_labels size vector or binary probabilities. self.leaf_to_posterior",
"= y.shape[1] self.uniform_posterior = y.sum(axis=0) / len(y) # Each posterior",
"fit(self, X, y): \"\"\" Doc strings here. \"\"\" X, y",
") self.voter.fit( X, y, epochs=self.epochs, callbacks=[keras.callbacks.EarlyStopping(patience=20, monitor=\"val_loss\")], verbose=self.verbose, validation_split=self.validation_split, shuffle=True,",
"= ( \"This %(name)s instance is not fitted yet. Call",
"strings here. \"\"\" return self._is_fitted def _finite_sample_correction(posteriors, num_points_in_partition, num_classes): \"\"\"",
"self.verbose = verbose self._is_fitted = False def fit(self, X, y):",
"np.nan_to_num(np.array(class_counts) / np.sum(class_counts)) if self.finite_sample_correction: posteriors = self._finite_sample_correction( posteriors, len(idxs_in_leaf),",
"__init__(self): \"\"\" Doc strings here. \"\"\" self._is_fitted = False def",
") from sklearn.utils.multiclass import check_classification_targets, type_of_target from .base import BaseVoter",
"here. \"\"\" self.validation_split = validation_split self.loss = loss self.epochs =",
"x in list(self.leaf_to_yhat.keys()): votes_per_example.append(self.leaf_to_yhat[x]) else: votes_per_example.append(self.global_yhat) return np.array(votes_per_example) def is_fitted(self):",
"X: if x in list(self.leaf_to_posterior.keys()): votes_per_example.append(self.leaf_to_posterior[x]) else: votes_per_example.append(self.uniform_posterior) return np.array(votes_per_example)",
"check_array(X) return self.knn.predict_proba(X) def is_fitted(self): \"\"\" Doc strings here. \"\"\"",
"np.where(posteriors == 0)[0] posteriors[zero_posterior_idxs] = correction_constant posteriors /= sum(posteriors) return",
"strings here. \"\"\" check_classification_targets(y) if type_of_target(y) == 'multilabel-indicator': # Fit",
"here. \"\"\" self._is_fitted = False self.k = k self.kwargs =",
"KNNClassificationVoter(BaseVoter): def __init__(self, k, kwargs={}): \"\"\" Doc strings here. \"\"\"",
"self.finite_sample_correction = finite_sample_correction self._is_fitted = False self.multilabel = False def",
"self.voter.predict(X) def is_fitted(self): \"\"\" Doc strings here. \"\"\" return self._is_fitted",
"/ np.sum(class_counts)) if self.finite_sample_correction: posteriors = self._finite_sample_correction( posteriors, len(idxs_in_leaf), len(np.unique(y))",
"encourage posteriors to approach uniform when there is low data",
"( \"This %(name)s instance is not fitted yet. Call 'fit'",
"= False def fit(self, X, y): \"\"\" Doc strings here.",
"/ len(y) # Each posterior is now a num_labels size",
"len(np.where(y[idxs_in_leaf, j] == 1)[0]) for j in range(num_labels) ] posteriors",
"y.sum(axis=0) / len(y) # Each posterior is now a num_labels",
"import check_classification_targets, type_of_target from .base import BaseVoter from tensorflow import",
"here. \"\"\" X, y = check_X_y(X, y) self.voter = keras.Sequential()",
"= True return self.fit_multilabel(X, y) num_classes = len(np.unique(y)) self.uniform_posterior =",
"is_fitted(self): \"\"\" Doc strings here. \"\"\" return self._is_fitted class TreeRegressionVoter(BaseVoter):",
"= np.mean(y) for leaf_id in np.unique(X): idxs_in_leaf = np.where(X ==",
"from sklearn.ensemble import BaggingClassifier # from sklearn.tree import DecisionTreeClassifier from",
"= check_X_y(X, y) self.voter = keras.Sequential() self.voter.add( layers.Dense( 1, activation=\"linear\",",
"[] for x in X: if x in list(self.leaf_to_yhat.keys()): votes_per_example.append(self.leaf_to_yhat[x])",
"with \" \"appropriate arguments before using this transformer.\" ) raise",
"{\"name\": type(self).__name__}) X = check_array(X) return self.voter.predict(X) def is_fitted(self): \"\"\"",
"self.multilabel = True return self.fit_multilabel(X, y) num_classes = len(np.unique(y)) self.uniform_posterior",
"/= sum(posteriors) return posteriors class KNNClassificationVoter(BaseVoter): def __init__(self, k, kwargs={}):",
"__init__(self, finite_sample_correction=False): \"\"\" Doc strings here. \"\"\" self.finite_sample_correction = finite_sample_correction",
"posteriors = np.nan_to_num(np.array(class_counts) / np.sum(class_counts)) if self.finite_sample_correction: posteriors = self._finite_sample_correction(",
"= False self.k = k self.kwargs = kwargs def fit(self,",
"{\"name\": type(self).__name__}) votes_per_example = [] for x in X: if",
"Doc strings here. \"\"\" self.validation_split = validation_split self.loss = loss",
"= True return self def fit_multilabel(self, X, y): num_labels =",
"\"\"\" return self._is_fitted class NeuralRegressionVoter(BaseVoter): def __init__( self, validation_split=0.25, loss=\"mse\",",
"else: votes_per_example.append(self.global_yhat) return np.array(votes_per_example) def is_fitted(self): \"\"\" Doc strings here.",
"X = check_array(X) return self.voter.predict(X) def is_fitted(self): \"\"\" Doc strings",
"raise NotFittedError(msg % {\"name\": type(self).__name__}) votes_per_example = [] for x",
"= epochs self.lr = lr self.verbose = verbose self._is_fitted =",
"data \"\"\" correction_constant = 1 / (num_classes * num_points_in_partition) zero_posterior_idxs",
"import keras from keras import layers class TreeClassificationVoter(BaseVoter): def __init__(self,",
"'fit' with \" \"appropriate arguments before using this voter.\" )",
"= np.nan_to_num(np.mean(y[idxs_in_leaf])) self._is_fitted = True return self def vote(self, X):",
"strings here. \"\"\" self._is_fitted = False self.k = k self.kwargs",
"y_val)[0]) for y_val in np.unique(y)] self.leaf_to_yhat[leaf_id] = np.nan_to_num(np.mean(y[idxs_in_leaf])) self._is_fitted =",
"= np.where(X == leaf_id)[0] class_counts = [ len(np.where(y[idxs_in_leaf] == y_val)[0])",
"fit(self, X, y): \"\"\" Doc strings here. \"\"\" check_classification_targets(y) if",
"= len(np.unique(y)) self.uniform_posterior = np.ones(num_classes) / num_classes self.leaf_to_posterior = {}",
"NotFittedError(msg % {\"name\": type(self).__name__}) votes_per_example = [] for x in",
"\"\"\" Doc strings here. \"\"\" return self._is_fitted def _finite_sample_correction(posteriors, num_points_in_partition,",
"verbose self._is_fitted = False def fit(self, X, y): \"\"\" Doc",
"len(idxs_in_leaf), len(np.unique(y)) ) self.leaf_to_posterior[leaf_id] = posteriors self._is_fitted = True return",
"self.finite_sample_correction: posteriors = self._finite_sample_correction( posteriors, len(idxs_in_leaf), len(np.unique(y)) ) self.leaf_to_posterior[leaf_id] =",
"self._is_fitted = True return self def fit_multilabel(self, X, y): num_labels",
"y) self.knn = KNeighborsClassifier(self.k, **self.kwargs) self.knn.fit(X, y) self._is_fitted = True",
"# TODO: multilabel finite sample correction. self.leaf_to_posterior[leaf_id] = posteriors self._is_fitted",
"if x in list(self.leaf_to_yhat.keys()): votes_per_example.append(self.leaf_to_yhat[x]) else: votes_per_example.append(self.global_yhat) return np.array(votes_per_example) def",
"probabilities. self.leaf_to_posterior = {} for leaf_id in np.unique(X): idxs_in_leaf =",
") ) self.voter.compile( loss=self.loss, metrics=[\"mae\"], optimizer=keras.optimizers.Adam(self.lr) ) self.voter.fit( X, y,",
"this voter.\" ) raise NotFittedError(msg % {\"name\": type(self).__name__}) votes_per_example =",
"self.leaf_to_yhat[leaf_id] = np.nan_to_num(np.mean(y[idxs_in_leaf])) self._is_fitted = True return self def vote(self,",
"sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.utils.validation import",
"__init__(self, k, kwargs={}): \"\"\" Doc strings here. \"\"\" self._is_fitted =",
"BaggingClassifier # from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier",
"activation=\"linear\", input_shape=(X.shape[1],), name=\"transform_to_vote\", ) ) self.voter.compile( loss=self.loss, metrics=[\"mae\"], optimizer=keras.optimizers.Adam(self.lr) )",
"self.uniform_posterior = y.sum(axis=0) / len(y) # Each posterior is now",
"in np.unique(X): idxs_in_leaf = np.where(X == leaf_id)[0] # class_counts =",
"not self.is_fitted(): msg = ( \"This %(name)s instance is not",
"= False self.multilabel = False def fit(self, X, y): \"\"\"",
"idxs_in_leaf = np.where(X == leaf_id)[0] label_counts = [ len(np.where(y[idxs_in_leaf, j]",
"Doc strings here. \"\"\" return self._is_fitted def _finite_sample_correction(posteriors, num_points_in_partition, num_classes):",
"Doc strings here. \"\"\" self._is_fitted = False self.k = k",
"k, kwargs={}): \"\"\" Doc strings here. \"\"\" self._is_fitted = False",
"np.where(X == leaf_id)[0] label_counts = [ len(np.where(y[idxs_in_leaf, j] == 1)[0])",
"= correction_constant posteriors /= sum(posteriors) return posteriors class KNNClassificationVoter(BaseVoter): def",
"\"appropriate arguments before using this transformer.\" ) raise NotFittedError(msg %",
"def is_fitted(self): \"\"\" Doc strings here. \"\"\" return self._is_fitted def",
"x in list(self.leaf_to_posterior.keys()): votes_per_example.append(self.leaf_to_posterior[x]) else: votes_per_example.append(self.uniform_posterior) return np.array(votes_per_example) def is_fitted(self):",
"self.knn.predict_proba(X) def is_fitted(self): \"\"\" Doc strings here. \"\"\" return self._is_fitted",
"y): \"\"\" Doc strings here. \"\"\" X, y = check_X_y(X,",
") raise NotFittedError(msg % {\"name\": type(self).__name__}) X = check_array(X) return",
"verbose=False, ): \"\"\" Doc strings here. \"\"\" self.validation_split = validation_split",
"num_classes self.leaf_to_posterior = {} for leaf_id in np.unique(X): idxs_in_leaf =",
"for leaf_id in np.unique(X): idxs_in_leaf = np.where(X == leaf_id)[0] label_counts",
"finite sample correction. self.leaf_to_posterior[leaf_id] = posteriors self._is_fitted = True return",
"for leaf_id in np.unique(X): idxs_in_leaf = np.where(X == leaf_id)[0] #",
"y): \"\"\" Doc strings here. \"\"\" self.leaf_to_yhat = {} self.global_yhat",
"np.array(votes_per_example) def is_fitted(self): \"\"\" Doc strings here. \"\"\" return self._is_fitted",
"# Each posterior is now a num_labels size vector or",
"\"\"\" Doc strings here. \"\"\" self._is_fitted = False def fit(self,",
"\"\"\" correction_constant = 1 / (num_classes * num_points_in_partition) zero_posterior_idxs =",
"leaf_id in np.unique(X): idxs_in_leaf = np.where(X == leaf_id)[0] # class_counts",
"\"\"\" self.finite_sample_correction = finite_sample_correction self._is_fitted = False self.multilabel = False",
"leaf_id)[0] label_counts = [ len(np.where(y[idxs_in_leaf, j] == 1)[0]) for j",
"transformer.\" ) raise NotFittedError(msg % {\"name\": type(self).__name__}) X = check_array(X)",
"vote(self, X): \"\"\" Doc strings here. \"\"\" if not self.is_fitted():",
"KNeighborsClassifier from sklearn.utils.validation import ( check_X_y, check_array, NotFittedError, ) from",
"\"\"\" X, y = check_X_y(X, y) self.voter = keras.Sequential() self.voter.add(",
"X: if x in list(self.leaf_to_yhat.keys()): votes_per_example.append(self.leaf_to_yhat[x]) else: votes_per_example.append(self.global_yhat) return np.array(votes_per_example)",
"DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.utils.validation import ( check_X_y,",
"self.knn = KNeighborsClassifier(self.k, **self.kwargs) self.knn.fit(X, y) self._is_fitted = True return",
"= posteriors self._is_fitted = True return self def vote(self, X):",
"posteriors = np.nan_to_num(np.array(label_counts) / np.sum(label_counts)) # TODO: multilabel finite sample",
"self._is_fitted def _finite_sample_correction(posteriors, num_points_in_partition, num_classes): \"\"\" encourage posteriors to approach",
"len(np.unique(y)) self.uniform_posterior = np.ones(num_classes) / num_classes self.leaf_to_posterior = {} for",
"class_counts = [len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y)] self.leaf_to_yhat[leaf_id]",
"**self.kwargs) self.knn.fit(X, y) self._is_fitted = True return self def vote(self,",
"np.nan_to_num(np.mean(y[idxs_in_leaf])) self._is_fitted = True return self def vote(self, X): \"\"\"",
"from sklearn.utils.multiclass import check_classification_targets, type_of_target from .base import BaseVoter from",
"check_classification_targets(y) if type_of_target(y) == 'multilabel-indicator': # Fit multilabel binary task.",
"return self def fit_multilabel(self, X, y): num_labels = y.shape[1] self.uniform_posterior",
"\"\"\" Doc strings here. \"\"\" self._is_fitted = False self.k =",
"loss=self.loss, metrics=[\"mae\"], optimizer=keras.optimizers.Adam(self.lr) ) self.voter.fit( X, y, epochs=self.epochs, callbacks=[keras.callbacks.EarlyStopping(patience=20, monitor=\"val_loss\")],",
"import numpy as np # from sklearn.ensemble import BaggingClassifier #",
"optimizer=keras.optimizers.Adam(self.lr) ) self.voter.fit( X, y, epochs=self.epochs, callbacks=[keras.callbacks.EarlyStopping(patience=20, monitor=\"val_loss\")], verbose=self.verbose, validation_split=self.validation_split,",
"num_labels = y.shape[1] self.uniform_posterior = y.sum(axis=0) / len(y) # Each",
"is_fitted(self): \"\"\" Doc strings here. \"\"\" return self._is_fitted class NeuralRegressionVoter(BaseVoter):",
"] posteriors = np.nan_to_num(np.array(class_counts) / np.sum(class_counts)) if self.finite_sample_correction: posteriors =",
"sklearn.ensemble import BaggingClassifier # from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors",
"def __init__(self, finite_sample_correction=False): \"\"\" Doc strings here. \"\"\" self.finite_sample_correction =",
"np.sum(class_counts)) if self.finite_sample_correction: posteriors = self._finite_sample_correction( posteriors, len(idxs_in_leaf), len(np.unique(y)) )",
"return self._is_fitted class TreeRegressionVoter(BaseVoter): def __init__(self): \"\"\" Doc strings here.",
"\"\"\" self.leaf_to_yhat = {} self.global_yhat = np.mean(y) for leaf_id in",
"np.unique(X): idxs_in_leaf = np.where(X == leaf_id)[0] # class_counts = [len(np.where(y[idxs_in_leaf]",
"= [ len(np.where(y[idxs_in_leaf] == y_val)[0]) for y_val in np.unique(y) ]",
"approach uniform when there is low data \"\"\" correction_constant =",
"numpy as np # from sklearn.ensemble import BaggingClassifier # from",
"validation_split=self.validation_split, shuffle=True, ) self._is_fitted = True return self def vote(self,",
"check_array, NotFittedError, ) from sklearn.utils.multiclass import check_classification_targets, type_of_target from .base",
"callbacks=[keras.callbacks.EarlyStopping(patience=20, monitor=\"val_loss\")], verbose=self.verbose, validation_split=self.validation_split, shuffle=True, ) self._is_fitted = True return",
"def __init__(self): \"\"\" Doc strings here. \"\"\" self._is_fitted = False",
"as np # from sklearn.ensemble import BaggingClassifier # from sklearn.tree",
"num_points_in_partition) zero_posterior_idxs = np.where(posteriors == 0)[0] posteriors[zero_posterior_idxs] = correction_constant posteriors",
"np.unique(y)] self.leaf_to_yhat[leaf_id] = np.nan_to_num(np.mean(y[idxs_in_leaf])) self._is_fitted = True return self def",
"True return self def fit_multilabel(self, X, y): num_labels = y.shape[1]",
"posterior is now a num_labels size vector or binary probabilities.",
"self.voter.compile( loss=self.loss, metrics=[\"mae\"], optimizer=keras.optimizers.Adam(self.lr) ) self.voter.fit( X, y, epochs=self.epochs, callbacks=[keras.callbacks.EarlyStopping(patience=20,",
"in X: if x in list(self.leaf_to_posterior.keys()): votes_per_example.append(self.leaf_to_posterior[x]) else: votes_per_example.append(self.uniform_posterior) return",
"type_of_target from .base import BaseVoter from tensorflow import keras from"
] |
[
"save high res pics from camera. # If also uploading,",
"change behavior of photo booth monitor_w = 800 # width",
"first capture low res pics. False is faster. # Careful,",
"If false, do not. False is faster. make_gifs = True",
"name without .tumblr.com tagsForTumblr = \"photobooth\" # change to tags",
"tagsForTumblr = \"photobooth\" # change to tags you want, separated",
"values with your information # OAuth keys can be generated",
"button. Add more time if the button triggers too many",
"store locally only. capture_count_pics = True # if true, show",
"False # True to save high res pics from camera.",
"#replace with your oath secret code tumblr_blog = 'soniaetjeremie' #",
"hi_res_pics = False # True to save high res pics",
"issues. Normal is 100 or 200. Sort of dark is",
"the program will also convert each image to a smaller",
"replace with your tumblr account name without .tumblr.com tagsForTumblr =",
"= False # True will clear previously stored photos as",
"# height of the display monitor file_path = '/home/pi/photobooth/pics/' #",
"previously stored photos as the program launches. False will leave",
"True will clear previously stored photos as the program launches.",
"program will also convert each image to a smaller image",
"Tumblr upload max. camera_iso = 400 # adjust for lighting",
"True to make an animated gif. False to post 4",
"tumblr account name without .tumblr.com tagsForTumblr = \"photobooth\" # change",
"previous photos. debounce = 0.3 # how long to debounce",
"monitor_h = 480 # height of the display monitor file_path",
"max. camera_iso = 400 # adjust for lighting issues. Normal",
"for lighting issues. Normal is 100 or 200. Sort of",
"False # True will clear previously stored photos as the",
"post. hi_res_pics = False # True to save high res",
"behavior of photo booth monitor_w = 800 # width of",
"code oath_token='<KEY>' #replace with your oath token oath_secret='<KEY>' #replace with",
"photos. If false, do not. False is faster. make_gifs =",
"as the program launches. False will leave all previous photos.",
"only. capture_count_pics = True # if true, show a photo",
"with your secret code oath_token='<KEY>' #replace with your oath token",
"with your oath secret code tumblr_blog = 'soniaetjeremie' # replace",
"= 'soniaetjeremie' # replace with your tumblr account name without",
"a smaller image before making the gif. # False to",
"between taking photos. If false, do not. False is faster.",
"to save high res pics from camera. # If also",
"want, separated with commas #Config settings to change behavior of",
"from https://api.tumblr.com/console/calls/user/info consumer_key='<KEY>' #replace with your key consumer_secret='<KEY>' #replace with",
"count between taking photos. If false, do not. False is",
"# Tumblr Setup # Replace the values with your information",
"4 jpgs into one post. hi_res_pics = False # True",
"height of the display monitor file_path = '/home/pi/photobooth/pics/' # path",
"will leave all previous photos. debounce = 0.3 # how",
"of photo booth monitor_w = 800 # width of the",
"stored photos as the program launches. False will leave all",
"make_gifs = True # True to make an animated gif.",
"# OAuth keys can be generated from https://api.tumblr.com/console/calls/user/info consumer_key='<KEY>' #replace",
"key consumer_secret='<KEY>' #replace with your secret code oath_token='<KEY>' #replace with",
"of the display monitor file_path = '/home/pi/photobooth/pics/' # path to",
"oath_token='<KEY>' #replace with your oath token oath_secret='<KEY>' #replace with your",
"= 800 # width of the display monitor monitor_h =",
"debounce the button. Add more time if the button triggers",
"to tags you want, separated with commas #Config settings to",
"100 or 200. Sort of dark is 400. Dark is",
"# replace with your tumblr account name without .tumblr.com tagsForTumblr",
"= True # True to upload images. False to store",
"'/home/pi/photobooth/pics/' # path to save images clear_on_startup = False #",
"also uploading, the program will also convert each image to",
"If also uploading, the program will also convert each image",
"is faster. make_gifs = True # True to make an",
"= True # True to make an animated gif. False",
"res pics from camera. # If also uploading, the program",
"make an animated gif. False to post 4 jpgs into",
"post 4 jpgs into one post. hi_res_pics = False #",
"# change to tags you want, separated with commas #Config",
"Careful, each photo costs against your daily Tumblr upload max.",
"image before making the gif. # False to first capture",
"upload max. camera_iso = 400 # adjust for lighting issues.",
"#replace with your secret code oath_token='<KEY>' #replace with your oath",
"# Careful, each photo costs against your daily Tumblr upload",
"can be generated from https://api.tumblr.com/console/calls/user/info consumer_key='<KEY>' #replace with your key",
"an animated gif. False to post 4 jpgs into one",
"high res pics from camera. # If also uploading, the",
"to post 4 jpgs into one post. hi_res_pics = False",
"gif. # False to first capture low res pics. False",
"times. post_online = True # True to upload images. False",
"your daily Tumblr upload max. camera_iso = 400 # adjust",
"with your oath token oath_secret='<KEY>' #replace with your oath secret",
"# adjust for lighting issues. Normal is 100 or 200.",
"= 480 # height of the display monitor file_path =",
"Normal is 100 or 200. Sort of dark is 400.",
"capture_count_pics = True # if true, show a photo count",
"lighting issues. Normal is 100 or 200. Sort of dark",
"images. False to store locally only. capture_count_pics = True #",
"images clear_on_startup = False # True will clear previously stored",
"to debounce the button. Add more time if the button",
"a photo count between taking photos. If false, do not.",
"faster. make_gifs = True # True to make an animated",
"one post. hi_res_pics = False # True to save high",
"from camera. # If also uploading, the program will also",
"image to a smaller image before making the gif. #",
"faster. # Careful, each photo costs against your daily Tumblr",
"https://api.tumblr.com/console/calls/user/info consumer_key='<KEY>' #replace with your key consumer_secret='<KEY>' #replace with your",
"making the gif. # False to first capture low res",
"taking photos. If false, do not. False is faster. make_gifs",
"to save images clear_on_startup = False # True will clear",
"show a photo count between taking photos. If false, do",
"time if the button triggers too many times. post_online =",
"the display monitor monitor_h = 480 # height of the",
"= False # True to save high res pics from",
"your key consumer_secret='<KEY>' #replace with your secret code oath_token='<KEY>' #replace",
"daily Tumblr upload max. camera_iso = 400 # adjust for",
"uploading, the program will also convert each image to a",
"without .tumblr.com tagsForTumblr = \"photobooth\" # change to tags you",
"debounce = 0.3 # how long to debounce the button.",
"OAuth keys can be generated from https://api.tumblr.com/console/calls/user/info consumer_key='<KEY>' #replace with",
"with your information # OAuth keys can be generated from",
"dark is 400. Dark is 800 max. # available options:",
"= 400 # adjust for lighting issues. Normal is 100",
"many times. post_online = True # True to upload images.",
"# how long to debounce the button. Add more time",
"Add more time if the button triggers too many times.",
"False will leave all previous photos. debounce = 0.3 #",
"monitor file_path = '/home/pi/photobooth/pics/' # path to save images clear_on_startup",
"clear_on_startup = False # True will clear previously stored photos",
"path to save images clear_on_startup = False # True will",
"<filename>config.py # Tumblr Setup # Replace the values with your",
"# If also uploading, the program will also convert each",
"be generated from https://api.tumblr.com/console/calls/user/info consumer_key='<KEY>' #replace with your key consumer_secret='<KEY>'",
"to change behavior of photo booth monitor_w = 800 #",
"booth monitor_w = 800 # width of the display monitor",
"0.3 # how long to debounce the button. Add more",
"costs against your daily Tumblr upload max. camera_iso = 400",
"against your daily Tumblr upload max. camera_iso = 400 #",
".tumblr.com tagsForTumblr = \"photobooth\" # change to tags you want,",
"account name without .tumblr.com tagsForTumblr = \"photobooth\" # change to",
"Replace the values with your information # OAuth keys can",
"photo count between taking photos. If false, do not. False",
"all previous photos. debounce = 0.3 # how long to",
"post_online = True # True to upload images. False to",
"the values with your information # OAuth keys can be",
"True # True to upload images. False to store locally",
"to a smaller image before making the gif. # False",
"adjust for lighting issues. Normal is 100 or 200. Sort",
"True to upload images. False to store locally only. capture_count_pics",
"keys can be generated from https://api.tumblr.com/console/calls/user/info consumer_key='<KEY>' #replace with your",
"to store locally only. capture_count_pics = True # if true,",
"is 100 or 200. Sort of dark is 400. Dark",
"= '/home/pi/photobooth/pics/' # path to save images clear_on_startup = False",
"False to post 4 jpgs into one post. hi_res_pics =",
"# if true, show a photo count between taking photos.",
"will also convert each image to a smaller image before",
"clear previously stored photos as the program launches. False will",
"upload images. False to store locally only. capture_count_pics = True",
"token oath_secret='<KEY>' #replace with your oath secret code tumblr_blog =",
"the button triggers too many times. post_online = True #",
"400 # adjust for lighting issues. Normal is 100 or",
"the gif. # False to first capture low res pics.",
"res pics. False is faster. # Careful, each photo costs",
"800 max. # available options: 100, 200, 320, 400, 500,",
"# width of the display monitor monitor_h = 480 #",
"is 400. Dark is 800 max. # available options: 100,",
"oath token oath_secret='<KEY>' #replace with your oath secret code tumblr_blog",
"#Config settings to change behavior of photo booth monitor_w =",
"monitor_w = 800 # width of the display monitor monitor_h",
"true, show a photo count between taking photos. If false,",
"smaller image before making the gif. # False to first",
"oath secret code tumblr_blog = 'soniaetjeremie' # replace with your",
"file_path = '/home/pi/photobooth/pics/' # path to save images clear_on_startup =",
"not. False is faster. make_gifs = True # True to",
"animated gif. False to post 4 jpgs into one post.",
"True # if true, show a photo count between taking",
"camera. # If also uploading, the program will also convert",
"= \"photobooth\" # change to tags you want, separated with",
"photo booth monitor_w = 800 # width of the display",
"photos. debounce = 0.3 # how long to debounce the",
"# True to upload images. False to store locally only.",
"400. Dark is 800 max. # available options: 100, 200,",
"too many times. post_online = True # True to upload",
"do not. False is faster. make_gifs = True # True",
"= True # if true, show a photo count between",
"photo costs against your daily Tumblr upload max. camera_iso =",
"False to first capture low res pics. False is faster.",
"will clear previously stored photos as the program launches. False",
"# path to save images clear_on_startup = False # True",
"to make an animated gif. False to post 4 jpgs",
"tags you want, separated with commas #Config settings to change",
"capture low res pics. False is faster. # Careful, each",
"pics. False is faster. # Careful, each photo costs against",
"locally only. capture_count_pics = True # if true, show a",
"save images clear_on_startup = False # True will clear previously",
"the display monitor file_path = '/home/pi/photobooth/pics/' # path to save",
"with your tumblr account name without .tumblr.com tagsForTumblr = \"photobooth\"",
"convert each image to a smaller image before making the",
"# False to first capture low res pics. False is",
"false, do not. False is faster. make_gifs = True #",
"your secret code oath_token='<KEY>' #replace with your oath token oath_secret='<KEY>'",
"gif. False to post 4 jpgs into one post. hi_res_pics",
"display monitor monitor_h = 480 # height of the display",
"480 # height of the display monitor file_path = '/home/pi/photobooth/pics/'",
"display monitor file_path = '/home/pi/photobooth/pics/' # path to save images",
"each photo costs against your daily Tumblr upload max. camera_iso",
"Tumblr Setup # Replace the values with your information #",
"your oath token oath_secret='<KEY>' #replace with your oath secret code",
"program launches. False will leave all previous photos. debounce =",
"= 0.3 # how long to debounce the button. Add",
"more time if the button triggers too many times. post_online",
"False to store locally only. capture_count_pics = True # if",
"camera_iso = 400 # adjust for lighting issues. Normal is",
"of dark is 400. Dark is 800 max. # available",
"generated from https://api.tumblr.com/console/calls/user/info consumer_key='<KEY>' #replace with your key consumer_secret='<KEY>' #replace",
"#replace with your oath token oath_secret='<KEY>' #replace with your oath",
"your tumblr account name without .tumblr.com tagsForTumblr = \"photobooth\" #",
"Setup # Replace the values with your information # OAuth",
"# True will clear previously stored photos as the program",
"launches. False will leave all previous photos. debounce = 0.3",
"also convert each image to a smaller image before making",
"if true, show a photo count between taking photos. If",
"change to tags you want, separated with commas #Config settings",
"pics from camera. # If also uploading, the program will",
"#replace with your key consumer_secret='<KEY>' #replace with your secret code",
"before making the gif. # False to first capture low",
"True # True to make an animated gif. False to",
"settings to change behavior of photo booth monitor_w = 800",
"tumblr_blog = 'soniaetjeremie' # replace with your tumblr account name",
"monitor monitor_h = 480 # height of the display monitor",
"consumer_secret='<KEY>' #replace with your secret code oath_token='<KEY>' #replace with your",
"Sort of dark is 400. Dark is 800 max. #",
"the program launches. False will leave all previous photos. debounce",
"into one post. hi_res_pics = False # True to save",
"with your key consumer_secret='<KEY>' #replace with your secret code oath_token='<KEY>'",
"each image to a smaller image before making the gif.",
"commas #Config settings to change behavior of photo booth monitor_w",
"# True to make an animated gif. False to post",
"False is faster. # Careful, each photo costs against your",
"\"photobooth\" # change to tags you want, separated with commas",
"button triggers too many times. post_online = True # True",
"secret code oath_token='<KEY>' #replace with your oath token oath_secret='<KEY>' #replace",
"secret code tumblr_blog = 'soniaetjeremie' # replace with your tumblr",
"is faster. # Careful, each photo costs against your daily",
"the button. Add more time if the button triggers too",
"triggers too many times. post_online = True # True to",
"Dark is 800 max. # available options: 100, 200, 320,",
"code tumblr_blog = 'soniaetjeremie' # replace with your tumblr account",
"800 # width of the display monitor monitor_h = 480",
"leave all previous photos. debounce = 0.3 # how long",
"# available options: 100, 200, 320, 400, 500, 640, 800",
"separated with commas #Config settings to change behavior of photo",
"how long to debounce the button. Add more time if",
"# Replace the values with your information # OAuth keys",
"photos as the program launches. False will leave all previous",
"or 200. Sort of dark is 400. Dark is 800",
"# True to save high res pics from camera. #",
"low res pics. False is faster. # Careful, each photo",
"True to save high res pics from camera. # If",
"your oath secret code tumblr_blog = 'soniaetjeremie' # replace with",
"200. Sort of dark is 400. Dark is 800 max.",
"information # OAuth keys can be generated from https://api.tumblr.com/console/calls/user/info consumer_key='<KEY>'",
"if the button triggers too many times. post_online = True",
"is 800 max. # available options: 100, 200, 320, 400,",
"with commas #Config settings to change behavior of photo booth",
"False is faster. make_gifs = True # True to make",
"'soniaetjeremie' # replace with your tumblr account name without .tumblr.com",
"oath_secret='<KEY>' #replace with your oath secret code tumblr_blog = 'soniaetjeremie'",
"max. # available options: 100, 200, 320, 400, 500, 640,",
"of the display monitor monitor_h = 480 # height of",
"width of the display monitor monitor_h = 480 # height",
"long to debounce the button. Add more time if the",
"to first capture low res pics. False is faster. #",
"you want, separated with commas #Config settings to change behavior",
"jpgs into one post. hi_res_pics = False # True to",
"to upload images. False to store locally only. capture_count_pics =",
"consumer_key='<KEY>' #replace with your key consumer_secret='<KEY>' #replace with your secret",
"your information # OAuth keys can be generated from https://api.tumblr.com/console/calls/user/info"
] |
[
"accounts.models import EmailUser from shared.admin import ExportCsvMixin # no need",
"\"export_utilisateurs.csv\" list_display = (\"email\", \"last_name\", \"first_name\", \"is_superuser\", \"is_active\", \"email_confirmed\",) list_filter",
"EmailUser from shared.admin import ExportCsvMixin # no need for groups",
"Group from accounts.models import EmailUser from shared.admin import ExportCsvMixin #",
"and superusers admin.site.unregister(Group) @admin.register(EmailUser) class EmailUserAdmin(ExportCsvMixin, admin.ModelAdmin): \"\"\"option d'affichage des",
"(\"email\", \"last_name\", \"first_name\", \"is_superuser\", \"is_active\", \"email_confirmed\",) list_filter = (\"is_superuser\",\"is_active\", \"email_confirmed\",)",
"= (\"email\", \"last_name\", \"first_name\", \"is_superuser\", \"is_active\", \"email_confirmed\",) list_filter = (\"is_superuser\",\"is_active\",",
"\"is_staff\", \"is_active\", \"email_confirmed\", (\"date_joined\", \"last_login\",), ) ordering = (\"last_name\", \"first_name\")",
"django.contrib import admin from django.contrib.auth.models import Group from accounts.models import",
"\"first_name\", \"is_superuser\", \"is_staff\", \"is_active\", \"email_confirmed\", (\"date_joined\", \"last_login\",), ) ordering =",
"need for groups - we only have regular users and",
"for groups - we only have regular users and superusers",
"= \"export_utilisateurs.csv\" list_display = (\"email\", \"last_name\", \"first_name\", \"is_superuser\", \"is_active\", \"email_confirmed\",)",
"admin from django.contrib.auth.models import Group from accounts.models import EmailUser from",
"EmailUserAdmin(ExportCsvMixin, admin.ModelAdmin): \"\"\"option d'affichage des activités dans la vue django",
"from shared.admin import ExportCsvMixin # no need for groups -",
"- we only have regular users and superusers admin.site.unregister(Group) @admin.register(EmailUser)",
"superusers admin.site.unregister(Group) @admin.register(EmailUser) class EmailUserAdmin(ExportCsvMixin, admin.ModelAdmin): \"\"\"option d'affichage des activités",
"la vue django admin\"\"\" filename = \"export_utilisateurs.csv\" list_display = (\"email\",",
"\"last_name\", \"first_name\", \"is_superuser\", \"is_staff\", \"is_active\", \"email_confirmed\", (\"date_joined\", \"last_login\",), ) ordering",
"= (\"is_superuser\",\"is_active\", \"email_confirmed\",) fields = (\"email\", \"last_name\", \"first_name\", \"is_superuser\", \"is_staff\",",
"= (\"last_name\", \"first_name\") readonly_fields = (\"date_joined\", \"last_login\",) list_per_page = 200",
"\"first_name\") readonly_fields = (\"date_joined\", \"last_login\",) list_per_page = 200 csv_export_exclude =",
"import EmailUser from shared.admin import ExportCsvMixin # no need for",
"\"last_name\", \"first_name\", \"is_superuser\", \"is_active\", \"email_confirmed\",) list_filter = (\"is_superuser\",\"is_active\", \"email_confirmed\",) fields",
"dans la vue django admin\"\"\" filename = \"export_utilisateurs.csv\" list_display =",
"shared.admin import ExportCsvMixin # no need for groups - we",
"we only have regular users and superusers admin.site.unregister(Group) @admin.register(EmailUser) class",
"\"email_confirmed\",) list_filter = (\"is_superuser\",\"is_active\", \"email_confirmed\",) fields = (\"email\", \"last_name\", \"first_name\",",
"\"is_active\", \"email_confirmed\",) list_filter = (\"is_superuser\",\"is_active\", \"email_confirmed\",) fields = (\"email\", \"last_name\",",
"users and superusers admin.site.unregister(Group) @admin.register(EmailUser) class EmailUserAdmin(ExportCsvMixin, admin.ModelAdmin): \"\"\"option d'affichage",
"import admin from django.contrib.auth.models import Group from accounts.models import EmailUser",
"(\"is_superuser\",\"is_active\", \"email_confirmed\",) fields = (\"email\", \"last_name\", \"first_name\", \"is_superuser\", \"is_staff\", \"is_active\",",
") ordering = (\"last_name\", \"first_name\") readonly_fields = (\"date_joined\", \"last_login\",) list_per_page",
"readonly_fields = (\"date_joined\", \"last_login\",) list_per_page = 200 csv_export_exclude = [\"password\"]",
"\"email_confirmed\",) fields = (\"email\", \"last_name\", \"first_name\", \"is_superuser\", \"is_staff\", \"is_active\", \"email_confirmed\",",
"# no need for groups - we only have regular",
"class EmailUserAdmin(ExportCsvMixin, admin.ModelAdmin): \"\"\"option d'affichage des activités dans la vue",
"@admin.register(EmailUser) class EmailUserAdmin(ExportCsvMixin, admin.ModelAdmin): \"\"\"option d'affichage des activités dans la",
"d'affichage des activités dans la vue django admin\"\"\" filename =",
"\"email_confirmed\", (\"date_joined\", \"last_login\",), ) ordering = (\"last_name\", \"first_name\") readonly_fields =",
"activités dans la vue django admin\"\"\" filename = \"export_utilisateurs.csv\" list_display",
"regular users and superusers admin.site.unregister(Group) @admin.register(EmailUser) class EmailUserAdmin(ExportCsvMixin, admin.ModelAdmin): \"\"\"option",
"fields = (\"email\", \"last_name\", \"first_name\", \"is_superuser\", \"is_staff\", \"is_active\", \"email_confirmed\", (\"date_joined\",",
"list_display = (\"email\", \"last_name\", \"first_name\", \"is_superuser\", \"is_active\", \"email_confirmed\",) list_filter =",
"have regular users and superusers admin.site.unregister(Group) @admin.register(EmailUser) class EmailUserAdmin(ExportCsvMixin, admin.ModelAdmin):",
"admin.ModelAdmin): \"\"\"option d'affichage des activités dans la vue django admin\"\"\"",
"import ExportCsvMixin # no need for groups - we only",
"\"first_name\", \"is_superuser\", \"is_active\", \"email_confirmed\",) list_filter = (\"is_superuser\",\"is_active\", \"email_confirmed\",) fields =",
"from django.contrib.auth.models import Group from accounts.models import EmailUser from shared.admin",
"\"is_superuser\", \"is_staff\", \"is_active\", \"email_confirmed\", (\"date_joined\", \"last_login\",), ) ordering = (\"last_name\",",
"des activités dans la vue django admin\"\"\" filename = \"export_utilisateurs.csv\"",
"\"is_superuser\", \"is_active\", \"email_confirmed\",) list_filter = (\"is_superuser\",\"is_active\", \"email_confirmed\",) fields = (\"email\",",
"admin.site.unregister(Group) @admin.register(EmailUser) class EmailUserAdmin(ExportCsvMixin, admin.ModelAdmin): \"\"\"option d'affichage des activités dans",
"from accounts.models import EmailUser from shared.admin import ExportCsvMixin # no",
"vue django admin\"\"\" filename = \"export_utilisateurs.csv\" list_display = (\"email\", \"last_name\",",
"admin\"\"\" filename = \"export_utilisateurs.csv\" list_display = (\"email\", \"last_name\", \"first_name\", \"is_superuser\",",
"(\"date_joined\", \"last_login\",), ) ordering = (\"last_name\", \"first_name\") readonly_fields = (\"date_joined\",",
"django admin\"\"\" filename = \"export_utilisateurs.csv\" list_display = (\"email\", \"last_name\", \"first_name\",",
"\"is_active\", \"email_confirmed\", (\"date_joined\", \"last_login\",), ) ordering = (\"last_name\", \"first_name\") readonly_fields",
"filename = \"export_utilisateurs.csv\" list_display = (\"email\", \"last_name\", \"first_name\", \"is_superuser\", \"is_active\",",
"import Group from accounts.models import EmailUser from shared.admin import ExportCsvMixin",
"only have regular users and superusers admin.site.unregister(Group) @admin.register(EmailUser) class EmailUserAdmin(ExportCsvMixin,",
"no need for groups - we only have regular users",
"= (\"email\", \"last_name\", \"first_name\", \"is_superuser\", \"is_staff\", \"is_active\", \"email_confirmed\", (\"date_joined\", \"last_login\",),",
"\"\"\"option d'affichage des activités dans la vue django admin\"\"\" filename",
"ExportCsvMixin # no need for groups - we only have",
"(\"email\", \"last_name\", \"first_name\", \"is_superuser\", \"is_staff\", \"is_active\", \"email_confirmed\", (\"date_joined\", \"last_login\",), )",
"groups - we only have regular users and superusers admin.site.unregister(Group)",
"\"last_login\",), ) ordering = (\"last_name\", \"first_name\") readonly_fields = (\"date_joined\", \"last_login\",)",
"(\"last_name\", \"first_name\") readonly_fields = (\"date_joined\", \"last_login\",) list_per_page = 200 csv_export_exclude",
"ordering = (\"last_name\", \"first_name\") readonly_fields = (\"date_joined\", \"last_login\",) list_per_page =",
"django.contrib.auth.models import Group from accounts.models import EmailUser from shared.admin import",
"list_filter = (\"is_superuser\",\"is_active\", \"email_confirmed\",) fields = (\"email\", \"last_name\", \"first_name\", \"is_superuser\",",
"from django.contrib import admin from django.contrib.auth.models import Group from accounts.models"
] |
[
"def validate_api_key(self) -> Tuple[bool, str]: \"\"\"Validates that the Coinbase API",
"import ( ApiKey, ApiSecret, AssetMovementCategory, Fee, Location, Price, Timestamp, TradePair,",
"from rotkehlchen.typing import ( ApiKey, ApiSecret, AssetMovementCategory, Fee, Location, Price,",
"'sells' in method_str: permission = 'wallet:sells:read' elif 'deposits' in method_str:",
"not in json_ret: raise RemoteError(f'Coinbase json response does not contain",
"there # is no argument in the API call if",
"pass class Coinbase(ExchangeInterface): def __init__( self, api_key: ApiKey, secret: ApiSecret,",
"get the account ids and for each one query buys/sells",
"as e: raise RemoteError(f'Coinbase API request failed due to {str(e)}')",
"self.msg_aggregator.add_error( 'Error processing a coinbase account balance. Check logs '",
"range here since there # is no argument in the",
"from coinbase and deserializes it Can log error/warning and return",
"be a \"send\" which is the way Coinbase uses to",
"except UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found coinbase balance result with",
"log error/warning and return None if something went wrong at",
"Skipping balance entry', ) continue if asset in returned_balances: amount",
"asset ' f'{e.asset_name}. Ignoring it.', ) continue except UnsupportedAsset as",
"import logging import time from json.decoder import JSONDecodeError from typing",
"the subsequent queries if 'pagination' in json_ret and not pagination_next_uri",
"Timestamp, ) -> List[AssetMovement]: account_data = self._api_query('accounts') account_ids = self._get_account_ids(account_data)",
"for usage in Rotki Makes sure that the following permissions",
"which in Coinbase is the # way to send Crypto",
"deserialize_asset_amount_force_positive(raw_data['amount']['amount']) fee = deserialize_fee(raw_data['fee']['amount']) asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp) return AssetMovement(",
"wallet:withdrawals:read, ' f'wallet:deposits:read, wallet:trades:read' ) return None, msg except RemoteError",
"raw_data.get('payout_at', None) if payout_date: timestamp = deserialize_timestamp_from_date(payout_date, 'iso8601', 'coinbase') else:",
"_api_query( self, endpoint: str, options: Optional[Dict[str, Any]] = None, pagination_next_uri:",
"# Looking at coinbase's API no other type of transaction",
"except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase deposit/withdrawal with unknown",
"account_ids.append(account_data['id']) return account_ids def _api_query( self, endpoint: str, options: Optional[Dict[str,",
"= raw_network.get('transaction_fee', None) if raw_fee: # Since this is a",
"coinbase you are buying/selling tx_asset for native_asset pair = TradePair(f'{tx_asset.identifier}_{native_asset.identifier}')",
"signature' in error: return None, 'Failed to authenticate with the",
"we get an empty next_uri we are done return final_data",
"being charged from the \"send\" resource amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount']) asset",
"= {} returned_balances[asset]['amount'] = amount usd_value = returned_balances[asset]['amount'] * usd_price",
"elif 'accounts' in method_str: permission = 'wallet:accounts:read' else: raise AssertionError(",
"Inquirer from rotkehlchen.logging import RotkehlchenLogsAdapter from rotkehlchen.serialization.deserialize import ( deserialize_asset_amount,",
"not reach coinbase due ' 'to {}'.format(e) ) log.error(msg) return",
"self._validate_single_api_key_action('accounts') if result is None: return False, msg # now",
"with error status code: ' f'{response.status_code} and text: {response.text}', )",
"want to deal with completed trades return None if raw_trade['instant']:",
"raise RemoteError('Coinbase json response contained no \"next_uri\" key') next_uri =",
"method = f'accounts/{account_ids[0]}/buys' result, msg = self._validate_single_api_key_action(method) if result is",
"end since the word appears # in other endpoints elif",
"address from coinbase there is the network fee in the",
"self.msg_aggregator.add_error( 'Error processing a coinbase trade. Check logs ' 'for",
"msg # now get the account ids account_ids = self._get_account_ids(result)",
"trades def _deserialize_asset_movement(self, raw_data: Dict[str, Any]) -> Optional[AssetMovement]: \"\"\"Processes a",
"get address/transaction id for \"send\" type of transactions address =",
") continue return returned_balances, '' def query_online_trade_history( self, start_ts: Timestamp,",
"raw_data: address = deserialize_asset_movement_address(raw_data['to'], 'address', asset) else: movement_category = deserialize_asset_movement_category(raw_data['resource'])",
"coinbase deposit/withdrawal with unknown asset ' f'{e.asset_name}. Ignoring it.', )",
") log.error( 'Error processing a coinbase account balance', account_balance=account, error=msg,",
"permission = 'wallet:trades:read' # the accounts elif should be at",
"'id' not in account_data: self.msg_aggregator.add_error( 'Found coinbase account entry without",
"= deserialize_asset_amount(account['balance']['amount']) # ignore empty balances. Coinbase returns zero balances",
"transactions address = None transaction_id = None # movement_category: Union[Literal['deposit'],",
"returned_balances: Dict[Asset, Dict[str, Any]] = {} for account in resp:",
"in json_ret: raise RemoteError(f'Coinbase json response does not contain data:",
"balance entry due to inability to ' f'query USD price:",
"Price(native_amount / tx_amount) fee_amount = deserialize_fee(raw_trade['fee']['amount']) fee_asset = asset_from_coinbase(raw_trade['fee']['currency'], time=timestamp)",
"also get transactions to get the \"sends\", which in Coinbase",
"address = deserialize_asset_movement_address(raw_data['to'], 'address', asset) else: movement_category = deserialize_asset_movement_category(raw_data['resource']) amount",
"and trade.timestamp >= start_ts and trade.timestamp <= end_ts: trades.append(trade) return",
"= 'https://api.coinbase.com' self.msg_aggregator = msg_aggregator def first_connection(self) -> None: self.first_connection_made",
"{} for account in resp: try: if not account['balance']: continue",
"account to see if that's possible method = f'accounts/{account_ids[0]}/sells' result,",
"then set ignore_pagination to True. \"\"\" request_verb = \"GET\" if",
"but from an experiment of sending ETH # to an",
"raw_network = raw_data.get('network', None) if raw_network: raw_fee = raw_network.get('transaction_fee', None)",
"price: {str(e)}. Skipping balance entry', ) continue if asset in",
"movement_category = deserialize_asset_movement_category(raw_data['resource']) amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount']) fee = deserialize_fee(raw_data['fee']['amount']) asset",
"in method_str: permission = 'wallet:accounts:read' else: raise AssertionError( f'Unexpected coinbase",
"not isinstance(account_data['id'], str): self.msg_aggregator.add_error( f'Found coinbase account entry with a",
"request failed due to {str(e)}') if response.status_code == 403: raise",
"with unsupported asset ' f'{e.asset_name}. Ignoring it.', ) except (DeserializationError,",
"a \"send\" which is the way Coinbase uses to send",
") return None def query_online_deposits_withdrawals( self, start_ts: Timestamp, end_ts: Timestamp,",
"deserialize_asset_movement_address, get_key_if_has_val from rotkehlchen.inquirer import Inquirer from rotkehlchen.logging import RotkehlchenLogsAdapter",
"self.msg_aggregator.add_warning( f'Found coinbase balance result with unsupported asset ' f'{e.asset_name}.",
") else: fee = deserialize_fee(raw_fee['amount']) if 'network' in raw_data: transaction_id",
"api key' in error: return None, 'Provided API Key is",
"rotkehlchen.db.dbhandler import DBHandler logger = logging.getLogger(__name__) log = RotkehlchenLogsAdapter(logger) def",
"403: raise CoinbasePermissionError(f'API key does not have permission for {endpoint}')",
") continue except UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found coinbase balance",
"None) if raw_network: raw_fee = raw_network.get('transaction_fee', None) if raw_fee: #",
"If the coinbase transaction is not a trade related transaction",
"failed. Could not reach coinbase due ' 'to {}'.format(e) )",
"f'query USD price: {str(e)}. Skipping balance entry', ) continue if",
"\"send\" type of transactions address = None transaction_id = None",
"'next_uri' not in json_ret['pagination']: raise RemoteError('Coinbase json response contained no",
"transaction returns None Throws: - UnknownAsset due to Asset instantiation",
"possible method = f'accounts/{account_ids[0]}/transactions' result, msg = self._validate_single_api_key_action(method) if result",
"{asset.identifier} the fee' f'is denoted in {raw_fee[\"currency\"]}', ) else: fee",
"open a bug report.', ) log.error( f'Unexpected data encountered during",
"e: error = str(e) if 'invalid signature' in error: return",
"pair = TradePair(f'{tx_asset.identifier}_{native_asset.identifier}') amount = tx_amount # The rate is",
"result, msg = self._validate_single_api_key_action(method) if result is None: return False,",
"return None, 'Failed to authenticate with the Provided API key/secret'",
"except RemoteError as e: msg = ( 'Coinbase API request",
"a coinbase account balance', account_balance=account, error=msg, ) continue return returned_balances,",
"CoinbasePermissionError(f'API key does not have permission for {endpoint}') if response.status_code",
"\"send\" type found in coinbase deposit/withdrawal processing' assert raw_data['type'] ==",
"the network fee in the response fee = Fee(ZERO) raw_network",
"completed trades return None if raw_trade['instant']: raw_time = raw_trade['created_at'] else:",
"False, msg # and now try to get all withdrawals",
"continue amount = deserialize_asset_amount(account['balance']['amount']) # ignore empty balances. Coinbase returns",
"good for usage in Rotki Makes sure that the following",
"raw_network.get('transaction_fee', None) if raw_fee: # Since this is a withdrawal",
"the coinbase transaction is not a trade related transaction returns",
"' f'asset_movement {raw_data}. Error was: {str(e)}', ) return None def",
"the word appears # in other endpoints elif 'accounts' in",
"result, '' def validate_api_key(self) -> Tuple[bool, str]: \"\"\"Validates that the",
"argument in the API call if movement and movement.timestamp >=",
"response does not contain data: {response.text}') final_data = json_ret['data'] #",
"'withdrawals' in method_str: permission = 'wallet:withdrawals:read' elif 'trades' in method_str:",
"with a non string id: ' f'{account_data[\"id\"]}. Skipping it. ',",
"a coinbase trade', trade=raw_trade, error=msg, ) continue # limit coinbase",
"the response fee = Fee(ZERO) raw_network = raw_data.get('network', None) if",
"raise RemoteError(f'Coinbase returned invalid JSON response: {response.text}') if 'data' not",
"msg_aggregator def first_connection(self) -> None: self.first_connection_made = True def _validate_single_api_key_action(",
"rotkehlchen.serialization.deserialize import ( deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type, )",
"start_ts: Timestamp, end_ts: Timestamp, ) -> List[AssetMovement]: account_data = self._api_query('accounts')",
"key does not have permission for {endpoint}') if response.status_code !=",
"= self._api_query('accounts') except RemoteError as e: msg = ( 'Coinbase",
"start_ts: Timestamp, end_ts: Timestamp, ) -> List[Trade]: account_data = self._api_query('accounts')",
"results then set ignore_pagination to True. \"\"\" request_verb = \"GET\"",
"exchange # https://developers.coinbase.com/api/v2?python#transaction-resource msg = 'Non \"send\" type found in",
"msg = self._validate_single_api_key_action(method) if result is None: return False, msg",
"elif 'trades' in method_str: permission = 'wallet:trades:read' # the accounts",
"Ignoring it.', ) log.error( 'Error processing a coinbase trade', trade=raw_trade,",
"timestamp + request_verb + request_url signature = hmac.new( self.secret, message.encode(),",
"and return None if something went wrong at deserialization \"\"\"",
"ApiSecret, AssetMovementCategory, Fee, Location, Price, Timestamp, TradePair, ) from rotkehlchen.user_messages",
"amount else: returned_balances[asset] = {} returned_balances[asset]['amount'] = amount usd_value =",
"fee = deserialize_fee(raw_fee['amount']) if 'network' in raw_data: transaction_id = get_key_if_has_val(raw_data['network'],",
"for {msg}.' self.msg_aggregator.add_error( 'Error processing a coinbase trade. Check logs",
"the Coinbase API key is good for usage in Rotki",
"account_data: self.msg_aggregator.add_error( 'Found coinbase account entry without an id key.",
") from rotkehlchen.user_messages import MessagesAggregator from rotkehlchen.utils.interfaces import cache_response_timewise, protect_with_lock",
"= self.base_uri + request_url try: response = self.session.get(full_url) except requests.exceptions.RequestException",
"== 403: raise CoinbasePermissionError(f'API key does not have permission for",
"for endpoint You can optionally provide extra arguments to the",
"except (DeserializationError, KeyError) as e: msg = str(e) if isinstance(e,",
"it.', ) except UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found coinbase deposit/withdrawal",
"-> List[AssetMovement]: account_data = self._api_query('accounts') account_ids = self._get_account_ids(account_data) raw_data =",
"wallet:transactions:read, ' f'wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, ' f'wallet:deposits:read, wallet:trades:read' ) return",
"return True, '' def _get_account_ids(self, accounts: List[Dict[str, Any]]) -> List[str]:",
"msg # and now try to get all buys of",
"coinbase balance entry due to inability to ' f'query USD",
"UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase transaction with unknown asset",
"for each one query buys/sells # Looking at coinbase's API",
"\"next_uri\" key') next_uri = json_ret['pagination']['next_uri'] if not next_uri: # As",
"deposit/withdrawal processing' assert raw_data['type'] == 'send', msg movement_category = AssetMovementCategory.WITHDRAWAL",
"that's possible method = f'accounts/{account_ids[0]}/transactions' result, msg = self._validate_single_api_key_action(method) if",
"coinbase API Query for endpoint You can optionally provide extra",
"'invalid signature' in error: return None, 'Failed to authenticate with",
"provide extra arguments to the endpoint via the options argument.",
"sure that the following permissions are given to the key:",
"coinbase's API no other type of transaction # https://developers.coinbase.com/api/v2?python#list-transactions #",
"return None, msg except RemoteError as e: error = str(e)",
"get all withdrawals of an account to see if that's",
"first query, gather all the subsequent queries if 'pagination' in",
"the key: wallet:accounts:read, wallet:transactions:read, wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, wallet:deposits:read \"\"\" result,",
"balances. Coinbase returns zero balances for everything # a user",
"for {endpoint}') if response.status_code != 200: raise RemoteError( f'Coinbase query",
"{permission} permission activated. ' f'Please log into your coinbase account",
"coinbase there is the network fee in the response fee",
"fee = Fee(ZERO) raw_network = raw_data.get('network', None) if raw_network: raw_fee",
"False, msg # and now try to get all deposits",
"as e: msg = ( 'Coinbase API request failed. Could",
"in raw_data: transaction_id = get_key_if_has_val(raw_data['network'], 'hash') if 'to' in raw_data:",
"transaction_id=transaction_id, timestamp=timestamp, asset=asset, amount=amount, fee_asset=asset, fee=fee, link=str(raw_data['id']), ) except UnknownAsset",
"+ amount else: returned_balances[asset] = {} returned_balances[asset]['amount'] = amount usd_value",
"possible method = f'accounts/{account_ids[0]}/withdrawals' result, msg = self._validate_single_api_key_action(method) if result",
"movement. Check logs for details and open a bug report.',",
"__init__( self, api_key: ApiKey, secret: ApiSecret, database: 'DBHandler', msg_aggregator: MessagesAggregator,",
"in method_str: permission = 'wallet:sells:read' elif 'deposits' in method_str: permission",
"deserialization of coinbase ' f'asset_movement {raw_data}. Error was: {str(e)}', )",
"coinbase deposit/withdrawal with unsupported asset ' f'{e.asset_name}. Ignoring it.', )",
"asset_from_coinbase(account['balance']['currency']) try: usd_price = Inquirer().find_usd_price(asset=asset) except RemoteError as e: self.msg_aggregator.add_error(",
"if pagination_next_uri: request_url = pagination_next_uri else: request_url = f'/{self.apiversion}/{endpoint}' if",
"in account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/buys')) raw_data.extend(self._api_query(f'accounts/{account_id}/sells')) log.debug('coinbase buys/sells history result', results_num=len(raw_data)) trades",
"returned_balances[asset]['usd_value'] = usd_value except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase",
"= self._validate_single_api_key_action(method) if result is None: return False, msg #",
"an id key. Skipping it. ', ) continue if not",
"coinbase method {method_str} at API key validation', ) msg =",
"trades = [] for raw_trade in raw_data: try: trade =",
"to get the \"sends\", which in Coinbase is the #",
"try: response = self.session.get(full_url) except requests.exceptions.RequestException as e: raise RemoteError(f'Coinbase",
"key: wallet:accounts:read, wallet:transactions:read, wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, wallet:deposits:read \"\"\" result, msg",
"None payout_date = raw_data.get('payout_at', None) if payout_date: timestamp = deserialize_timestamp_from_date(payout_date,",
"get transactions to get the \"sends\", which in Coinbase is",
"is an ongoing paginating call then provide pagination_next_uri. If you",
"if 'data' not in json_ret: raise RemoteError(f'Coinbase json response does",
"if you buy/sell 1 unit of base currency rate =",
"user does not own if amount == ZERO: continue asset",
"in the API call if trade and trade.timestamp >= start_ts",
"{} returned_balances[asset]['amount'] = amount usd_value = returned_balances[asset]['amount'] * usd_price returned_balances[asset]['usd_value']",
"contained no \"next_uri\" key') next_uri = json_ret['pagination']['next_uri'] if not next_uri:",
"for tx in txs: if 'type' not in tx: continue",
"= False, ) -> Tuple[Optional[List[Any]], str]: try: result = self._api_query(method_str,",
"UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase balance result with unknown",
"for account in resp: try: if not account['balance']: continue amount",
"import Asset from rotkehlchen.assets.converters import asset_from_coinbase from rotkehlchen.constants.misc import ZERO",
"Coinbase(ExchangeInterface): def __init__( self, api_key: ApiKey, secret: ApiSecret, database: 'DBHandler',",
"self.base_uri = 'https://api.coinbase.com' self.msg_aggregator = msg_aggregator def first_connection(self) -> None:",
"as e: error = str(e) if 'transactions' in method_str: permission",
"RemoteError(f'Coinbase returned invalid JSON response: {response.text}') if 'data' not in",
"'Error processing a coinbase trade. Check logs ' 'for details.",
"# to an address from coinbase there is the network",
") final_data.extend(additional_data) return final_data @protect_with_lock() @cache_response_timewise() def query_balances(self) -> Tuple[Optional[Dict[Asset,",
"unit of base currency rate = Price(native_amount / tx_amount) fee_amount",
"balances for everything # a user does not own if",
"from json.decoder import JSONDecodeError from typing import TYPE_CHECKING, Any, Dict,",
"return trades def _deserialize_asset_movement(self, raw_data: Dict[str, Any]) -> Optional[AssetMovement]: \"\"\"Processes",
"RotkehlchenLogsAdapter(logger) def trade_from_coinbase(raw_trade: Dict[str, Any]) -> Optional[Trade]: \"\"\"Turns a coinbase",
"', ) continue account_ids.append(account_data['id']) return account_ids def _api_query( self, endpoint:",
"self._api_query('accounts') account_ids = self._get_account_ids(account_data) raw_data = [] for account_id in",
"account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/deposits')) raw_data.extend(self._api_query(f'accounts/{account_id}/withdrawals')) # also get transactions to get the",
"error: return None, 'Failed to authenticate with the Provided API",
"to ' f'query USD price: {str(e)}. Skipping balance entry', )",
"if not next_uri: # As per the docs: https://developers.coinbase.com/api/v2?python#pagination #",
"permissions: ' f'wallet:accounts:read, wallet:transactions:read, ' f'wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, ' f'wallet:deposits:read,",
"!= 'completed': return None payout_date = raw_data.get('payout_at', None) if payout_date:",
"= True def _validate_single_api_key_action( self, method_str: str, ignore_pagination: bool =",
"[] for account_id in account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/buys')) raw_data.extend(self._api_query(f'accounts/{account_id}/sells')) log.debug('coinbase buys/sells history",
"== 'send', msg movement_category = AssetMovementCategory.WITHDRAWAL # Can't see the",
"= msg_aggregator def first_connection(self) -> None: self.first_connection_made = True def",
"try to get all transactions of an account to see",
"account ids and for each one query buys/sells # Looking",
"RemoteError as e: msg = ( 'Coinbase API request failed.",
"with unsupported asset ' f'{e.asset_name}. Ignoring it.', ) continue except",
"ignore_pagination: bool = False, ) -> Tuple[Optional[List[Any]], str]: try: result",
"f'/{self.apiversion}/{endpoint}' if options: request_url += urlencode(options) timestamp = str(int(time.time())) message",
"balance result with unknown asset ' f'{e.asset_name}. Ignoring it.', )",
"if 'transactions' in method_str: permission = 'wallet:transactions:read' elif 'buys' in",
"self._validate_single_api_key_action(method) if result is None: return False, msg # and",
"We only want to deal with completed trades return None",
"permission for {endpoint}') if response.status_code != 200: raise RemoteError( f'Coinbase",
"this should be a \"send\" which is the way Coinbase",
"should be a \"send\" which is the way Coinbase uses",
"f'Found coinbase trade with unsupported asset ' f'{e.asset_name}. Ignoring it.',",
"None, pagination_next_uri: str = None, ignore_pagination: bool = False, )",
"= deserialize_timestamp_from_date( raw_data['created_at'], 'iso8601', 'coinbase', ) # Only get address/transaction",
"if raw_data['status'] != 'completed': return None payout_date = raw_data.get('payout_at', None)",
"None def query_online_deposits_withdrawals( self, start_ts: Timestamp, end_ts: Timestamp, ) ->",
"logs for details and open a bug report.', ) log.error(",
"'CB-ACCESS-SIGN': signature, 'CB-ACCESS-TIMESTAMP': timestamp, 'CB-ACCESS-KEY': self.api_key, # This is needed",
"possible method = f'accounts/{account_ids[0]}/deposits' result, msg = self._validate_single_api_key_action(method) if result",
"query_balances(self) -> Tuple[Optional[Dict[Asset, Dict[str, Any]]], str]: try: resp = self._api_query('accounts')",
"trade related transaction returns None Throws: - UnknownAsset due to",
"to Asset instantiation - DeserializationError due to unexpected format of",
"e: self.msg_aggregator.add_warning( f'Found coinbase deposit/withdrawal with unsupported asset ' f'{e.asset_name}.",
"wallet:withdrawals:read, wallet:deposits:read \"\"\" result, msg = self._validate_single_api_key_action('accounts') if result is",
"and movement.timestamp >= start_ts and movement.timestamp <= end_ts: movements.append(movement) return",
"https://developers.coinbase.com/api/v2?python#list-transactions # consitutes something that Rotkehlchen would need to return",
"if raw_trade['instant']: raw_time = raw_trade['created_at'] else: raw_time = raw_trade['payout_at'] timestamp",
"def _deserialize_asset_movement(self, raw_data: Dict[str, Any]) -> Optional[AssetMovement]: \"\"\"Processes a single",
"Union[Literal['deposit'], Literal['withdrawal']] if 'type' in raw_data: # Then this should",
"'type' not in tx: continue if tx['type'] == 'send': raw_data.append(tx)",
"rate=rate, fee=fee_amount, fee_currency=fee_asset, link=str(raw_trade['id']), ) class CoinbasePermissionError(Exception): pass class Coinbase(ExchangeInterface):",
"'iso8601', 'coinbase', ) # Only get address/transaction id for \"send\"",
"self.msg_aggregator.add_error( 'Found coinbase account entry without an id key. Skipping",
"the accounts response\"\"\" account_ids = [] for account_data in accounts:",
"as e: self.msg_aggregator.add_warning( f'Found coinbase transaction with unknown asset '",
"an expected entry \"\"\" if raw_trade['status'] != 'completed': # We",
"deserialize_timestamp_from_date(raw_time, 'iso8601', 'coinbase') trade_type = deserialize_trade_type(raw_trade['resource']) tx_amount = deserialize_asset_amount(raw_trade['amount']['amount']) tx_asset",
"coinbase account balance. Check logs ' 'for details. Ignoring it.',",
"if amount == ZERO: continue asset = asset_from_coinbase(account['balance']['currency']) try: usd_price",
"= self._deserialize_asset_movement(raw_movement) # limit coinbase deposit/withdrawals in the requested time",
"reach coinbase due ' 'to {}'.format(e) ) log.error(msg) return None,",
"asset_from_coinbase(raw_data['amount']['currency'], time=timestamp) return AssetMovement( location=Location.COINBASE, category=movement_category, address=address, transaction_id=transaction_id, timestamp=timestamp, asset=asset,",
"the docs but from an experiment of sending ETH #",
"the exchange # https://developers.coinbase.com/api/v2?python#transaction-resource msg = 'Non \"send\" type found",
"!= 'completed': # We only want to deal with completed",
"instantiation - DeserializationError due to unexpected format of dict entries",
"all sells of an account to see if that's possible",
"asset ' f'{e.asset_name}. Ignoring it.', ) except UnsupportedAsset as e:",
"from rotkehlchen.exchanges.exchange import ExchangeInterface from rotkehlchen.exchanges.utils import deserialize_asset_movement_address, get_key_if_has_val from",
"for raw_trade in raw_data: try: trade = trade_from_coinbase(raw_trade) except UnknownAsset",
"extra arguments to the endpoint via the options argument. If",
"asset_from_coinbase(raw_fee['currency'], time=timestamp): # If not we set ZERO fee and",
"'completed': # We only want to deal with completed trades",
"if response.status_code != 200: raise RemoteError( f'Coinbase query {full_url} responded",
"None: return False, msg return True, '' def _get_account_ids(self, accounts:",
"[] for raw_trade in raw_data: try: trade = trade_from_coinbase(raw_trade) except",
"= deserialize_timestamp_from_date(raw_time, 'iso8601', 'coinbase') trade_type = deserialize_trade_type(raw_trade['resource']) tx_amount = deserialize_asset_amount(raw_trade['amount']['amount'])",
"= TradePair(f'{tx_asset.identifier}_{native_asset.identifier}') amount = tx_amount # The rate is how",
"'2019-08-25', }) full_url = self.base_uri + request_url try: response =",
"asset != asset_from_coinbase(raw_fee['currency'], time=timestamp): # If not we set ZERO",
"'wallet:buys:read' elif 'sells' in method_str: permission = 'wallet:sells:read' elif 'deposits'",
"not have permission for {endpoint}') if response.status_code != 200: raise",
"possible method = f'accounts/{account_ids[0]}/buys' result, msg = self._validate_single_api_key_action(method) if result",
"entires missing an expected entry \"\"\" if raw_trade['status'] != 'completed':",
"usd_value = returned_balances[asset]['amount'] * usd_price returned_balances[asset]['usd_value'] = usd_value except UnknownAsset",
"ignore empty balances. Coinbase returns zero balances for everything #",
"UnsupportedAsset from rotkehlchen.exchanges.data_structures import AssetMovement, Trade from rotkehlchen.exchanges.exchange import ExchangeInterface",
"as e: self.msg_aggregator.add_error( f'Error processing coinbase balance entry due to",
"account_balance=account, error=msg, ) continue return returned_balances, '' def query_online_trade_history( self,",
"raw_data.extend(self._api_query(f'accounts/{account_id}/deposits')) raw_data.extend(self._api_query(f'accounts/{account_id}/withdrawals')) # also get transactions to get the \"sends\",",
"you get/give in quotecurrency if you buy/sell 1 unit of",
"have {permission} permission activated. ' f'Please log into your coinbase",
"Crypto out of the exchange txs = self._api_query(f'accounts/{account_id}/transactions') for tx",
"tx: continue if tx['type'] == 'send': raw_data.append(tx) log.debug('coinbase deposits/withdrawals history",
"'data' not in json_ret: raise RemoteError(f'Coinbase json response does not",
"does not have permission for {endpoint}') if response.status_code != 200:",
"to dict entires missing an expected entry \"\"\" if raw_trade['status']",
"msg movement_category = AssetMovementCategory.WITHDRAWAL # Can't see the fee being",
"= raw_trade['payout_at'] timestamp = deserialize_timestamp_from_date(raw_time, 'iso8601', 'coinbase') trade_type = deserialize_trade_type(raw_trade['resource'])",
"logging import time from json.decoder import JSONDecodeError from typing import",
"the up to the given date # API version response.",
"'Coinbase API request failed. Could not reach coinbase due '",
"Coinbase uses to send # crypto outside of the exchange",
"f'Please log into your coinbase account and set all required",
"now try to get all sells of an account to",
"processing coinbase balance entry due to inability to ' f'query",
"'v2' self.base_uri = 'https://api.coinbase.com' self.msg_aggregator = msg_aggregator def first_connection(self) ->",
"something that Rotkehlchen would need to return in query_trade_history account_ids",
"not contain data: {response.text}') final_data = json_ret['data'] # If we",
"balance. Check logs ' 'for details. Ignoring it.', ) log.error(",
"self.msg_aggregator.add_warning( f'Found coinbase trade with unsupported asset ' f'{e.asset_name}. Ignoring",
"if 'id' not in account_data: self.msg_aggregator.add_error( 'Found coinbase account entry",
"if tx['type'] == 'send': raw_data.append(tx) log.debug('coinbase deposits/withdrawals history result', results_num=len(raw_data))",
"'Provided API Key is invalid' else: # any other remote",
"asset) else: movement_category = deserialize_asset_movement_category(raw_data['resource']) amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount']) fee =",
") continue account_ids.append(account_data['id']) return account_ids def _api_query( self, endpoint: str,",
"key validation', ) msg = ( f'Provided Coinbase API key",
"-> List[Any]: \"\"\"Performs a coinbase API Query for endpoint You",
"coinbase deposit/withdrawals in the requested time range # here since",
"Any]] = {} for account in resp: try: if not",
"200: raise RemoteError( f'Coinbase query {full_url} responded with error status",
"this is the first query, gather all the subsequent queries",
"'' def query_online_trade_history( self, start_ts: Timestamp, end_ts: Timestamp, ) ->",
"tx['type'] == 'send': raw_data.append(tx) log.debug('coinbase deposits/withdrawals history result', results_num=len(raw_data)) movements",
"the requested time range here since there # is no",
"coinbase account entry without an id key. Skipping it. ',",
"tx_amount # The rate is how much you get/give in",
"done return final_data additional_data = self._api_query( endpoint=endpoint, options=options, pagination_next_uri=next_uri, )",
"if 'network' in raw_data: transaction_id = get_key_if_has_val(raw_data['network'], 'hash') if 'to'",
"= self._validate_single_api_key_action(method) if result is None: return False, msg return",
"try: resp = self._api_query('accounts') except RemoteError as e: msg =",
"'for details. Ignoring it.', ) log.error( 'Error processing a coinbase",
"ids account_ids = self._get_account_ids(result) if len(account_ids) != 0: # and",
"_deserialize_asset_movement(self, raw_data: Dict[str, Any]) -> Optional[AssetMovement]: \"\"\"Processes a single deposit/withdrawal",
"str(e) if 'transactions' in method_str: permission = 'wallet:transactions:read' elif 'buys'",
"continue except UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found coinbase balance result",
"' 'for details. Ignoring it.', ) log.error( 'Error processing a",
"self._api_query('accounts') except RemoteError as e: msg = ( 'Coinbase API",
"# consitutes something that Rotkehlchen would need to return in",
"fee = deserialize_fee(raw_data['fee']['amount']) asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp) return AssetMovement( location=Location.COINBASE,",
"try: usd_price = Inquirer().find_usd_price(asset=asset) except RemoteError as e: self.msg_aggregator.add_error( f'Error",
"deposits of an account to see if that's possible method",
"native_amount = deserialize_asset_amount(raw_trade['subtotal']['amount']) native_asset = asset_from_coinbase(raw_trade['subtotal']['currency'], time=timestamp) # in coinbase",
"= ( f'Provided Coinbase API key needs to have {permission}",
"Then this should be a \"send\" which is the way",
"'' def _get_account_ids(self, accounts: List[Dict[str, Any]]) -> List[str]: \"\"\"Gets the",
"from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from",
"in json_ret['pagination']: raise RemoteError('Coinbase json response contained no \"next_uri\" key')",
"are done return final_data additional_data = self._api_query( endpoint=endpoint, options=options, pagination_next_uri=next_uri,",
"== ZERO: continue asset = asset_from_coinbase(account['balance']['currency']) try: usd_price = Inquirer().find_usd_price(asset=asset)",
"options: Optional[Dict[str, Any]] = None, pagination_next_uri: str = None, ignore_pagination:",
"from rotkehlchen.assets.converters import asset_from_coinbase from rotkehlchen.constants.misc import ZERO from rotkehlchen.errors",
"# way to send Crypto out of the exchange txs",
"account_data in accounts: if 'id' not in account_data: self.msg_aggregator.add_error( 'Found",
"trade', trade=raw_trade, error=msg, ) continue # limit coinbase trades in",
"date # API version response. 'CB-VERSION': '2019-08-25', }) full_url =",
"coinbase transaction with unknown asset ' f'{e.asset_name}. Ignoring it.', )",
"raise AssertionError( f'Unexpected coinbase method {method_str} at API key validation',",
"account_ids = self._get_account_ids(account_data) raw_data = [] for account_id in account_ids:",
"fee_asset = asset_from_coinbase(raw_trade['fee']['currency'], time=timestamp) return Trade( timestamp=timestamp, location=Location.COINBASE, pair=pair, trade_type=trade_type,",
"entry with a non string id: ' f'{account_data[\"id\"]}. Skipping it.",
"UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found coinbase trade with unsupported asset",
"not a trade related transaction returns None Throws: - UnknownAsset",
"history result', results_num=len(raw_data)) movements = [] for raw_movement in raw_data:",
") continue if not isinstance(account_data['id'], str): self.msg_aggregator.add_error( f'Found coinbase account",
"query_online_deposits_withdrawals( self, start_ts: Timestamp, end_ts: Timestamp, ) -> List[AssetMovement]: account_data",
"'hash') if 'to' in raw_data: address = deserialize_asset_movement_address(raw_data['to'], 'address', asset)",
"This is needed to guarantee the up to the given",
"into your coinbase account and set all required permissions: '",
"except UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found coinbase deposit/withdrawal with unsupported",
"time range here since there # is no argument in",
"you buy/sell 1 unit of base currency rate = Price(native_amount",
"method_str: permission = 'wallet:deposits:read' elif 'withdrawals' in method_str: permission =",
"request_url = pagination_next_uri else: request_url = f'/{self.apiversion}/{endpoint}' if options: request_url",
"f'Error processing coinbase balance entry due to inability to '",
"request_url try: response = self.session.get(full_url) except requests.exceptions.RequestException as e: raise",
"native_asset pair = TradePair(f'{tx_asset.identifier}_{native_asset.identifier}') amount = tx_amount # The rate",
"buying/selling tx_asset for native_asset pair = TradePair(f'{tx_asset.identifier}_{native_asset.identifier}') amount = tx_amount",
"amount == ZERO: continue asset = asset_from_coinbase(account['balance']['currency']) try: usd_price =",
"in quotecurrency if you buy/sell 1 unit of base currency",
"If you want just the first results then set ignore_pagination",
"usd_price returned_balances[asset]['usd_value'] = usd_value except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found",
"key' in error: return None, 'Provided API Key is invalid'",
"out of the accounts response\"\"\" account_ids = [] for account_data",
"requests from rotkehlchen.assets.asset import Asset from rotkehlchen.assets.converters import asset_from_coinbase from",
"remote error return None, error return result, '' def validate_api_key(self)",
"\"\"\" if raw_trade['status'] != 'completed': # We only want to",
"dont appear in the docs but from an experiment of",
"account to see if that's possible method = f'accounts/{account_ids[0]}/transactions' result,",
"* usd_price returned_balances[asset]['usd_value'] = usd_value except UnknownAsset as e: self.msg_aggregator.add_warning(",
"result', results_num=len(raw_data)) movements = [] for raw_movement in raw_data: movement",
"'Error processing a coinbase account balance', account_balance=account, error=msg, ) continue",
"due to unexpected format of dict entries - KeyError due",
"coinbase ' f'asset_movement {raw_data}. Error was: {str(e)}', ) return None",
"= [] for raw_trade in raw_data: try: trade = trade_from_coinbase(raw_trade)",
"+ request_url signature = hmac.new( self.secret, message.encode(), hashlib.sha256, ).hexdigest() log.debug('Coinbase",
"coinbase trade', trade=raw_trade, error=msg, ) continue # limit coinbase trades",
"return None payout_date = raw_data.get('payout_at', None) if payout_date: timestamp =",
"requests.exceptions.RequestException as e: raise RemoteError(f'Coinbase API request failed due to",
"Dict[str, Any]) -> Optional[AssetMovement]: \"\"\"Processes a single deposit/withdrawal from coinbase",
"in returned_balances: amount = returned_balances[asset]['amount'] + amount else: returned_balances[asset] =",
"raw_movement in raw_data: movement = self._deserialize_asset_movement(raw_movement) # limit coinbase deposit/withdrawals",
"raw_time = raw_trade['payout_at'] timestamp = deserialize_timestamp_from_date(raw_time, 'iso8601', 'coinbase') trade_type =",
"deposit/withdrawal with unknown asset ' f'{e.asset_name}. Ignoring it.', ) except",
"if that's possible method = f'accounts/{account_ids[0]}/buys' result, msg = self._validate_single_api_key_action(method)",
"# ignore empty balances. Coinbase returns zero balances for everything",
"each one query buys/sells # Looking at coinbase's API no",
"False, ) -> Tuple[Optional[List[Any]], str]: try: result = self._api_query(method_str, ignore_pagination=ignore_pagination)",
"from rotkehlchen.utils.interfaces import cache_response_timewise, protect_with_lock from rotkehlchen.utils.serialization import rlk_jsonloads_dict if",
"# and now try to get all deposits of an",
"\"\"\" request_verb = \"GET\" if pagination_next_uri: request_url = pagination_next_uri else:",
"location=Location.COINBASE, pair=pair, trade_type=trade_type, amount=amount, rate=rate, fee=fee_amount, fee_currency=fee_asset, link=str(raw_trade['id']), ) class",
"and not ignore_pagination: if 'next_uri' not in json_ret['pagination']: raise RemoteError('Coinbase",
"False, msg return True, '' def _get_account_ids(self, accounts: List[Dict[str, Any]])",
"in account_data: self.msg_aggregator.add_error( 'Found coinbase account entry without an id",
"raw_data['type'] == 'send', msg movement_category = AssetMovementCategory.WITHDRAWAL # Can't see",
"rate is how much you get/give in quotecurrency if you",
"returned_balances: amount = returned_balances[asset]['amount'] + amount else: returned_balances[asset] = {}",
"Throws: - UnknownAsset due to Asset instantiation - DeserializationError due",
"needed to guarantee the up to the given date #",
"deal with completed trades return None if raw_trade['instant']: raw_time =",
"accounts elif should be at the end since the word",
"in the requested time range here since there # is",
") log.error( 'Error processing a coinbase trade', trade=raw_trade, error=msg, )",
"not we set ZERO fee and ignore log.error( f'In a",
"msg except RemoteError as e: error = str(e) if 'invalid",
"API call if trade and trade.timestamp >= start_ts and trade.timestamp",
"how much you get/give in quotecurrency if you buy/sell 1",
"self.msg_aggregator.add_error( 'Unexpected data encountered during deserialization of a coinbase '",
"raw_data.extend(self._api_query(f'accounts/{account_id}/sells')) log.debug('coinbase buys/sells history result', results_num=len(raw_data)) trades = [] for",
"as e: msg = str(e) if isinstance(e, KeyError): msg =",
"get an empty next_uri we are done return final_data additional_data",
"As per the docs: https://developers.coinbase.com/api/v2?python#pagination # once we get an",
"Timestamp, end_ts: Timestamp, ) -> List[Trade]: account_data = self._api_query('accounts') #",
"trade with unsupported asset ' f'{e.asset_name}. Ignoring it.', ) continue",
"deserialize_trade_type(raw_trade['resource']) tx_amount = deserialize_asset_amount(raw_trade['amount']['amount']) tx_asset = asset_from_coinbase(raw_trade['amount']['currency'], time=timestamp) native_amount =",
"rotkehlchen.exchanges.data_structures import AssetMovement, Trade from rotkehlchen.exchanges.exchange import ExchangeInterface from rotkehlchen.exchanges.utils",
"trade_from_coinbase(raw_trade: Dict[str, Any]) -> Optional[Trade]: \"\"\"Turns a coinbase transaction into",
"rotkehlchen.logging import RotkehlchenLogsAdapter from rotkehlchen.serialization.deserialize import ( deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category,",
"json_ret: raise RemoteError(f'Coinbase json response does not contain data: {response.text}')",
"( 'Coinbase API request failed. Could not reach coinbase due",
"found in coinbase deposit/withdrawal processing' assert raw_data['type'] == 'send', msg",
"UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase deposit/withdrawal with unknown asset",
"fee and ignore log.error( f'In a coinbase withdrawal of {asset.identifier}",
"not in tx: continue if tx['type'] == 'send': raw_data.append(tx) log.debug('coinbase",
"class CoinbasePermissionError(Exception): pass class Coinbase(ExchangeInterface): def __init__( self, api_key: ApiKey,",
"+= urlencode(options) timestamp = str(int(time.time())) message = timestamp + request_verb",
"None, 'Provided API Key is invalid' else: # any other",
"amount = deserialize_asset_amount(account['balance']['amount']) # ignore empty balances. Coinbase returns zero",
"if isinstance(e, KeyError): msg = f'Missing key entry for {msg}.'",
"for details and open a bug report.', ) log.error( f'Unexpected",
"https://developers.coinbase.com/api/v2?python#buys If the coinbase transaction is not a trade related",
"wallet:sells:read, wallet:withdrawals:read, wallet:deposits:read \"\"\" result, msg = self._validate_single_api_key_action('accounts') if result",
"does not contain data: {response.text}') final_data = json_ret['data'] # If",
"= self._api_query('accounts') # now get the account ids and for",
"trade and trade.timestamp >= start_ts and trade.timestamp <= end_ts: trades.append(trade)",
"end_ts: Timestamp, ) -> List[Trade]: account_data = self._api_query('accounts') # now",
"pair=pair, trade_type=trade_type, amount=amount, rate=rate, fee=fee_amount, fee_currency=fee_asset, link=str(raw_trade['id']), ) class CoinbasePermissionError(Exception):",
"contain data: {response.text}') final_data = json_ret['data'] # If we got",
"inability to ' f'query USD price: {str(e)}. Skipping balance entry',",
"start_ts and trade.timestamp <= end_ts: trades.append(trade) return trades def _deserialize_asset_movement(self,",
"'CB-VERSION': '2019-08-25', }) full_url = self.base_uri + request_url try: response",
"= RotkehlchenLogsAdapter(logger) def trade_from_coinbase(raw_trade: Dict[str, Any]) -> Optional[Trade]: \"\"\"Turns a",
"# and now try to get all sells of an",
") from rotkehlchen.typing import ( ApiKey, ApiSecret, AssetMovementCategory, Fee, Location,",
"zero balances for everything # a user does not own",
"deserialize_fee(raw_fee['amount']) if 'network' in raw_data: transaction_id = get_key_if_has_val(raw_data['network'], 'hash') if",
"in account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/deposits')) raw_data.extend(self._api_query(f'accounts/{account_id}/withdrawals')) # also get transactions to get",
"return False, msg return True, '' def _get_account_ids(self, accounts: List[Dict[str,",
"a withdrawal the fee should be the same as the",
"e: raise RemoteError(f'Coinbase API request failed due to {str(e)}') if",
"not in account_data: self.msg_aggregator.add_error( 'Found coinbase account entry without an",
"rotkehlchen.exchanges.exchange import ExchangeInterface from rotkehlchen.exchanges.utils import deserialize_asset_movement_address, get_key_if_has_val from rotkehlchen.inquirer",
"it.', ) continue except UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found coinbase",
"def query_balances(self) -> Tuple[Optional[Dict[Asset, Dict[str, Any]]], str]: try: resp =",
"account to see if that's possible method = f'accounts/{account_ids[0]}/buys' result,",
"return Trade( timestamp=timestamp, location=Location.COINBASE, pair=pair, trade_type=trade_type, amount=amount, rate=rate, fee=fee_amount, fee_currency=fee_asset,",
"returns None Throws: - UnknownAsset due to Asset instantiation -",
"withdrawals of an account to see if that's possible method",
"Optional[Dict[str, Any]] = None, pagination_next_uri: str = None, ignore_pagination: bool",
"now try to get all buys of an account to",
") continue # limit coinbase trades in the requested time",
"# If we got pagination and this is the first",
"if raw_trade['status'] != 'completed': # We only want to deal",
"= [] for account_id in account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/buys')) raw_data.extend(self._api_query(f'accounts/{account_id}/sells')) log.debug('coinbase buys/sells",
"Ignoring it.', ) log.error( 'Error processing a coinbase account balance',",
"as e: self.msg_aggregator.add_warning( f'Found coinbase balance result with unknown asset",
"str, ignore_pagination: bool = False, ) -> Tuple[Optional[List[Any]], str]: try:",
"payout_date: timestamp = deserialize_timestamp_from_date(payout_date, 'iso8601', 'coinbase') else: timestamp = deserialize_timestamp_from_date(",
"'to' in raw_data: address = deserialize_asset_movement_address(raw_data['to'], 'address', asset) else: movement_category",
"( f'Provided Coinbase API key needs to have {permission} permission",
"are given to the key: wallet:accounts:read, wallet:transactions:read, wallet:buys:read, wallet:sells:read, wallet:withdrawals:read,",
"account in resp: try: if not account['balance']: continue amount =",
"is a withdrawal the fee should be the same as",
"network fee in the response fee = Fee(ZERO) raw_network =",
"# The rate is how much you get/give in quotecurrency",
"key') next_uri = json_ret['pagination']['next_uri'] if not next_uri: # As per",
"def _validate_single_api_key_action( self, method_str: str, ignore_pagination: bool = False, )",
"now try to get all transactions of an account to",
"return None, error return result, '' def validate_api_key(self) -> Tuple[bool,",
"= f'accounts/{account_ids[0]}/transactions' result, msg = self._validate_single_api_key_action(method) if result is None:",
"import DeserializationError, RemoteError, UnknownAsset, UnsupportedAsset from rotkehlchen.exchanges.data_structures import AssetMovement, Trade",
"RemoteError as e: self.msg_aggregator.add_error( f'Error processing coinbase balance entry due",
"' f'{e.asset_name}. Ignoring it.', ) continue except UnsupportedAsset as e:",
"time=timestamp) # Fees dont appear in the docs but from",
"it Can log error/warning and return None if something went",
"coinbase balance result with unsupported asset ' f'{e.asset_name}. Ignoring it.',",
") log.error( f'Unexpected data encountered during deserialization of coinbase '",
"import ( deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type, ) from",
"have permission for {endpoint}') if response.status_code != 200: raise RemoteError(",
"' f'{response.status_code} and text: {response.text}', ) try: json_ret = rlk_jsonloads_dict(response.text)",
"returned_balances[asset]['amount'] + amount else: returned_balances[asset] = {} returned_balances[asset]['amount'] = amount",
"msg return True, '' def _get_account_ids(self, accounts: List[Dict[str, Any]]) ->",
"True. \"\"\" request_verb = \"GET\" if pagination_next_uri: request_url = pagination_next_uri",
"else: # any other remote error return None, error return",
"to the given date # API version response. 'CB-VERSION': '2019-08-25',",
"= f'accounts/{account_ids[0]}/deposits' result, msg = self._validate_single_api_key_action(method) if result is None:",
"only want to deal with completed trades return None if",
"is the network fee in the response fee = Fee(ZERO)",
"entry for {msg}.' self.msg_aggregator.add_error( 'Unexpected data encountered during deserialization of",
"with completed trades return None if raw_trade['instant']: raw_time = raw_trade['created_at']",
"from rotkehlchen.inquirer import Inquirer from rotkehlchen.logging import RotkehlchenLogsAdapter from rotkehlchen.serialization.deserialize",
"timestamp=timestamp, location=Location.COINBASE, pair=pair, trade_type=trade_type, amount=amount, rate=rate, fee=fee_amount, fee_currency=fee_asset, link=str(raw_trade['id']), )",
"for account_id in account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/buys')) raw_data.extend(self._api_query(f'accounts/{account_id}/sells')) log.debug('coinbase buys/sells history result',",
"= self._api_query(method_str, ignore_pagination=ignore_pagination) except CoinbasePermissionError as e: error = str(e)",
"): super(Coinbase, self).__init__('coinbase', api_key, secret, database) self.apiversion = 'v2' self.base_uri",
"docs: https://developers.coinbase.com/api/v2?python#pagination # once we get an empty next_uri we",
"if trade and trade.timestamp >= start_ts and trade.timestamp <= end_ts:",
"timestamp = deserialize_timestamp_from_date(payout_date, 'iso8601', 'coinbase') else: timestamp = deserialize_timestamp_from_date( raw_data['created_at'],",
"super(Coinbase, self).__init__('coinbase', api_key, secret, database) self.apiversion = 'v2' self.base_uri =",
"= deserialize_asset_amount_force_positive(raw_data['amount']['amount']) fee = deserialize_fee(raw_data['fee']['amount']) asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp) return",
"# This is needed to guarantee the up to the",
"account_id in account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/buys')) raw_data.extend(self._api_query(f'accounts/{account_id}/sells')) log.debug('coinbase buys/sells history result', results_num=len(raw_data))",
"# now get the account ids and for each one",
"(DeserializationError, KeyError) as e: msg = str(e) if isinstance(e, KeyError):",
"request failed. Could not reach coinbase due ' 'to {}'.format(e)",
"import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from urllib.parse import",
") return None, msg except RemoteError as e: error =",
"-> List[Trade]: account_data = self._api_query('accounts') # now get the account",
"key/secret' elif 'invalid api key' in error: return None, 'Provided",
"returned_balances, '' def query_online_trade_history( self, start_ts: Timestamp, end_ts: Timestamp, )",
"the fee being charged from the \"send\" resource amount =",
"a coinbase trade. Check logs ' 'for details. Ignoring it.',",
"self.base_uri + request_url try: response = self.session.get(full_url) except requests.exceptions.RequestException as",
"= str(e) if 'invalid signature' in error: return None, 'Failed",
"Ignoring it.', ) except (DeserializationError, KeyError) as e: msg =",
"None, ignore_pagination: bool = False, ) -> List[Any]: \"\"\"Performs a",
"'to {}'.format(e) ) log.error(msg) return None, msg returned_balances: Dict[Asset, Dict[str,",
"API version response. 'CB-VERSION': '2019-08-25', }) full_url = self.base_uri +",
"and text: {response.text}', ) try: json_ret = rlk_jsonloads_dict(response.text) except JSONDecodeError:",
"', ) continue if not isinstance(account_data['id'], str): self.msg_aggregator.add_error( f'Found coinbase",
"= self._get_account_ids(account_data) raw_data = [] for account_id in account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/deposits'))",
"== 'send': raw_data.append(tx) log.debug('coinbase deposits/withdrawals history result', results_num=len(raw_data)) movements =",
"type of transactions address = None transaction_id = None #",
"to get all deposits of an account to see if",
"deserialize_timestamp_from_date(payout_date, 'iso8601', 'coinbase') else: timestamp = deserialize_timestamp_from_date( raw_data['created_at'], 'iso8601', 'coinbase',",
"ignore_pagination: if 'next_uri' not in json_ret['pagination']: raise RemoteError('Coinbase json response",
"fee_asset=asset, fee=fee, link=str(raw_data['id']), ) except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found",
"= deserialize_trade_type(raw_trade['resource']) tx_amount = deserialize_asset_amount(raw_trade['amount']['amount']) tx_asset = asset_from_coinbase(raw_trade['amount']['currency'], time=timestamp) native_amount",
"history result', results_num=len(raw_data)) trades = [] for raw_trade in raw_data:",
"to see if that's possible method = f'accounts/{account_ids[0]}/transactions' result, msg",
"deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type, ) from rotkehlchen.typing import",
"urlencode import requests from rotkehlchen.assets.asset import Asset from rotkehlchen.assets.converters import",
"Dict[Asset, Dict[str, Any]] = {} for account in resp: try:",
"str(e) if 'invalid signature' in error: return None, 'Failed to",
") except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase deposit/withdrawal with",
"up to the given date # API version response. 'CB-VERSION':",
"since there # is no argument in the API call",
"Dict[str, Any]) -> Optional[Trade]: \"\"\"Turns a coinbase transaction into a",
"= json_ret['pagination']['next_uri'] if not next_uri: # As per the docs:",
"other remote error return None, error return result, '' def",
"None if raw_trade['instant']: raw_time = raw_trade['created_at'] else: raw_time = raw_trade['payout_at']",
"Optional, Tuple from urllib.parse import urlencode import requests from rotkehlchen.assets.asset",
"= Price(native_amount / tx_amount) fee_amount = deserialize_fee(raw_trade['fee']['amount']) fee_asset = asset_from_coinbase(raw_trade['fee']['currency'],",
"something went wrong at deserialization \"\"\" try: if raw_data['status'] !=",
"log into your coinbase account and set all required permissions:",
"would need to return in query_trade_history account_ids = self._get_account_ids(account_data) raw_data",
"and set all required permissions: ' f'wallet:accounts:read, wallet:transactions:read, ' f'wallet:buys:read,",
"any other remote error return None, error return result, ''",
"fee in the response fee = Fee(ZERO) raw_network = raw_data.get('network',",
"an experiment of sending ETH # to an address from",
"single deposit/withdrawal from coinbase and deserializes it Can log error/warning",
"log.error( f'Unexpected data encountered during deserialization of coinbase ' f'asset_movement",
"balance', account_balance=account, error=msg, ) continue return returned_balances, '' def query_online_trade_history(",
"deserialization \"\"\" try: if raw_data['status'] != 'completed': return None payout_date",
"exchange txs = self._api_query(f'accounts/{account_id}/transactions') for tx in txs: if 'type'",
"query_online_trade_history( self, start_ts: Timestamp, end_ts: Timestamp, ) -> List[Trade]: account_data",
"a coinbase withdrawal of {asset.identifier} the fee' f'is denoted in",
"fee_currency=fee_asset, link=str(raw_trade['id']), ) class CoinbasePermissionError(Exception): pass class Coinbase(ExchangeInterface): def __init__(",
"account and set all required permissions: ' f'wallet:accounts:read, wallet:transactions:read, '",
"return final_data additional_data = self._api_query( endpoint=endpoint, options=options, pagination_next_uri=next_uri, ) final_data.extend(additional_data)",
"all the subsequent queries if 'pagination' in json_ret and not",
"API key/secret' elif 'invalid api key' in error: return None,",
"Can't see the fee being charged from the \"send\" resource",
"does not own if amount == ZERO: continue asset =",
"'pagination' in json_ret and not pagination_next_uri and not ignore_pagination: if",
"' f'query USD price: {str(e)}. Skipping balance entry', ) continue",
"json_ret['data'] # If we got pagination and this is the",
"get the account ids account_ids = self._get_account_ids(result) if len(account_ids) !=",
"= timestamp + request_verb + request_url signature = hmac.new( self.secret,",
"rotkehlchen.assets.asset import Asset from rotkehlchen.assets.converters import asset_from_coinbase from rotkehlchen.constants.misc import",
"-> List[str]: \"\"\"Gets the account ids out of the accounts",
"resource amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount']) asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp) # Fees",
"no argument in the API call if trade and trade.timestamp",
"import urlencode import requests from rotkehlchen.assets.asset import Asset from rotkehlchen.assets.converters",
"out of the exchange txs = self._api_query(f'accounts/{account_id}/transactions') for tx in",
"of base currency rate = Price(native_amount / tx_amount) fee_amount =",
"permission = 'wallet:sells:read' elif 'deposits' in method_str: permission = 'wallet:deposits:read'",
"'Error processing a coinbase trade', trade=raw_trade, error=msg, ) continue #",
"= False, ) -> List[Any]: \"\"\"Performs a coinbase API Query",
"+ request_verb + request_url signature = hmac.new( self.secret, message.encode(), hashlib.sha256,",
"see if that's possible method = f'accounts/{account_ids[0]}/deposits' result, msg =",
"coinbase transaction is not a trade related transaction returns None",
"raw_data: try: trade = trade_from_coinbase(raw_trade) except UnknownAsset as e: self.msg_aggregator.add_warning(",
"to unexpected format of dict entries - KeyError due to",
"wallet:transactions:read, wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, wallet:deposits:read \"\"\" result, msg = self._validate_single_api_key_action('accounts')",
"msg = ( 'Coinbase API request failed. Could not reach",
"Dict[str, Any]]], str]: try: resp = self._api_query('accounts') except RemoteError as",
"not in json_ret['pagination']: raise RemoteError('Coinbase json response contained no \"next_uri\"",
"from rotkehlchen.logging import RotkehlchenLogsAdapter from rotkehlchen.serialization.deserialize import ( deserialize_asset_amount, deserialize_asset_amount_force_positive,",
"Dict[str, Any]] = {} for account in resp: try: if",
"None, error return result, '' def validate_api_key(self) -> Tuple[bool, str]:",
"time from json.decoder import JSONDecodeError from typing import TYPE_CHECKING, Any,",
"from rotkehlchen.user_messages import MessagesAggregator from rotkehlchen.utils.interfaces import cache_response_timewise, protect_with_lock from",
"log.debug('Coinbase API query', request_url=request_url) self.session.headers.update({ 'CB-ACCESS-SIGN': signature, 'CB-ACCESS-TIMESTAMP': timestamp, 'CB-ACCESS-KEY':",
"def query_online_deposits_withdrawals( self, start_ts: Timestamp, end_ts: Timestamp, ) -> List[AssetMovement]:",
"key entry for {msg}.' self.msg_aggregator.add_error( 'Unexpected data encountered during deserialization",
"return None if raw_trade['instant']: raw_time = raw_trade['created_at'] else: raw_time =",
"# crypto outside of the exchange # https://developers.coinbase.com/api/v2?python#transaction-resource msg =",
"def __init__( self, api_key: ApiKey, secret: ApiSecret, database: 'DBHandler', msg_aggregator:",
"rotkehlchen.inquirer import Inquirer from rotkehlchen.logging import RotkehlchenLogsAdapter from rotkehlchen.serialization.deserialize import",
"json_ret['pagination']: raise RemoteError('Coinbase json response contained no \"next_uri\" key') next_uri",
"pagination_next_uri. If you want just the first results then set",
"import deserialize_asset_movement_address, get_key_if_has_val from rotkehlchen.inquirer import Inquirer from rotkehlchen.logging import",
"transaction into a rotkehlchen Trade. https://developers.coinbase.com/api/v2?python#buys If the coinbase transaction",
"if movement and movement.timestamp >= start_ts and movement.timestamp <= end_ts:",
"in method_str: permission = 'wallet:trades:read' # the accounts elif should",
"f'wallet:accounts:read, wallet:transactions:read, ' f'wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, ' f'wallet:deposits:read, wallet:trades:read' )",
"= f'accounts/{account_ids[0]}/withdrawals' result, msg = self._validate_single_api_key_action(method) if result is None:",
"secret: ApiSecret, database: 'DBHandler', msg_aggregator: MessagesAggregator, ): super(Coinbase, self).__init__('coinbase', api_key,",
"of an account to see if that's possible method =",
") except UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found coinbase deposit/withdrawal with",
"permission = 'wallet:accounts:read' else: raise AssertionError( f'Unexpected coinbase method {method_str}",
"raw_data: Dict[str, Any]) -> Optional[AssetMovement]: \"\"\"Processes a single deposit/withdrawal from",
"coinbase ' 'asset movement. Check logs for details and open",
"{}'.format(e) ) log.error(msg) return None, msg returned_balances: Dict[Asset, Dict[str, Any]]",
"return None if something went wrong at deserialization \"\"\" try:",
"in {raw_fee[\"currency\"]}', ) else: fee = deserialize_fee(raw_fee['amount']) if 'network' in",
"except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase transaction with unknown",
"buys of an account to see if that's possible method",
"except RemoteError as e: self.msg_aggregator.add_error( f'Error processing coinbase balance entry",
"unsupported asset ' f'{e.asset_name}. Ignoring it.', ) continue except (DeserializationError,",
"signature, 'CB-ACCESS-TIMESTAMP': timestamp, 'CB-ACCESS-KEY': self.api_key, # This is needed to",
"raise RemoteError( f'Coinbase query {full_url} responded with error status code:",
"entry due to inability to ' f'query USD price: {str(e)}.",
"coinbase withdrawal of {asset.identifier} the fee' f'is denoted in {raw_fee[\"currency\"]}',",
"deserialize_asset_movement_category(raw_data['resource']) amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount']) fee = deserialize_fee(raw_data['fee']['amount']) asset = asset_from_coinbase(raw_data['amount']['currency'],",
"to see if that's possible method = f'accounts/{account_ids[0]}/deposits' result, msg",
"Makes sure that the following permissions are given to the",
"KeyError): msg = f'Missing key entry for {msg}.' self.msg_aggregator.add_error( 'Unexpected",
"is invalid' else: # any other remote error return None,",
"in Rotki Makes sure that the following permissions are given",
"method = f'accounts/{account_ids[0]}/deposits' result, msg = self._validate_single_api_key_action(method) if result is",
"movements = [] for raw_movement in raw_data: movement = self._deserialize_asset_movement(raw_movement)",
"# If not we set ZERO fee and ignore log.error(",
"f'Found coinbase balance result with unsupported asset ' f'{e.asset_name}. Ignoring",
"as e: self.msg_aggregator.add_warning( f'Found coinbase balance result with unsupported asset",
"provide pagination_next_uri. If you want just the first results then",
"fee=fee, link=str(raw_data['id']), ) except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase",
"pagination_next_uri else: request_url = f'/{self.apiversion}/{endpoint}' if options: request_url += urlencode(options)",
"deserialize_timestamp_from_date( raw_data['created_at'], 'iso8601', 'coinbase', ) # Only get address/transaction id",
") -> Tuple[Optional[List[Any]], str]: try: result = self._api_query(method_str, ignore_pagination=ignore_pagination) except",
"log.error( f'In a coinbase withdrawal of {asset.identifier} the fee' f'is",
"and now try to get all sells of an account",
"rotkehlchen.constants.misc import ZERO from rotkehlchen.errors import DeserializationError, RemoteError, UnknownAsset, UnsupportedAsset",
"type of transaction # https://developers.coinbase.com/api/v2?python#list-transactions # consitutes something that Rotkehlchen",
"method_str: str, ignore_pagination: bool = False, ) -> Tuple[Optional[List[Any]], str]:",
"f'Found coinbase deposit/withdrawal with unsupported asset ' f'{e.asset_name}. Ignoring it.',",
"import hashlib import hmac import logging import time from json.decoder",
"returned_balances[asset] = {} returned_balances[asset]['amount'] = amount usd_value = returned_balances[asset]['amount'] *",
"0: # and now try to get all transactions of",
"transaction with unknown asset ' f'{e.asset_name}. Ignoring it.', ) continue",
"possible method = f'accounts/{account_ids[0]}/sells' result, msg = self._validate_single_api_key_action(method) if result",
"unexpected format of dict entries - KeyError due to dict",
"= raw_trade['created_at'] else: raw_time = raw_trade['payout_at'] timestamp = deserialize_timestamp_from_date(raw_time, 'iso8601',",
"# Then this should be a \"send\" which is the",
"the account ids out of the accounts response\"\"\" account_ids =",
"Rotkehlchen would need to return in query_trade_history account_ids = self._get_account_ids(account_data)",
"-> Optional[AssetMovement]: \"\"\"Processes a single deposit/withdrawal from coinbase and deserializes",
"to an address from coinbase there is the network fee",
"( ApiKey, ApiSecret, AssetMovementCategory, Fee, Location, Price, Timestamp, TradePair, )",
"' f'wallet:deposits:read, wallet:trades:read' ) return None, msg except RemoteError as",
"= f'accounts/{account_ids[0]}/buys' result, msg = self._validate_single_api_key_action(method) if result is None:",
"native_asset = asset_from_coinbase(raw_trade['subtotal']['currency'], time=timestamp) # in coinbase you are buying/selling",
"raw_data: transaction_id = get_key_if_has_val(raw_data['network'], 'hash') if 'to' in raw_data: address",
"str(int(time.time())) message = timestamp + request_verb + request_url signature =",
"True, '' def _get_account_ids(self, accounts: List[Dict[str, Any]]) -> List[str]: \"\"\"Gets",
"= get_key_if_has_val(raw_data['network'], 'hash') if 'to' in raw_data: address = deserialize_asset_movement_address(raw_data['to'],",
"at the end since the word appears # in other",
"tx_amount) fee_amount = deserialize_fee(raw_trade['fee']['amount']) fee_asset = asset_from_coinbase(raw_trade['fee']['currency'], time=timestamp) return Trade(",
"# Only get address/transaction id for \"send\" type of transactions",
"not next_uri: # As per the docs: https://developers.coinbase.com/api/v2?python#pagination # once",
"Tuple[Optional[Dict[Asset, Dict[str, Any]]], str]: try: resp = self._api_query('accounts') except RemoteError",
"processing a coinbase account balance. Check logs ' 'for details.",
"details and open a bug report.', ) log.error( f'Unexpected data",
"withdrawal the fee should be the same as the moved",
"see if that's possible method = f'accounts/{account_ids[0]}/buys' result, msg =",
"all deposits of an account to see if that's possible",
"the docs: https://developers.coinbase.com/api/v2?python#pagination # once we get an empty next_uri",
"in json_ret and not pagination_next_uri and not ignore_pagination: if 'next_uri'",
"f'Missing key entry for {msg}.' self.msg_aggregator.add_error( 'Unexpected data encountered during",
"dict entires missing an expected entry \"\"\" if raw_trade['status'] !=",
"now try to get all deposits of an account to",
") continue except UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found coinbase trade",
"a single deposit/withdrawal from coinbase and deserializes it Can log",
"amount = tx_amount # The rate is how much you",
"method = f'accounts/{account_ids[0]}/sells' result, msg = self._validate_single_api_key_action(method) if result is",
"self.secret, message.encode(), hashlib.sha256, ).hexdigest() log.debug('Coinbase API query', request_url=request_url) self.session.headers.update({ 'CB-ACCESS-SIGN':",
"= self._validate_single_api_key_action('accounts') if result is None: return False, msg #",
"way to send Crypto out of the exchange txs =",
"except CoinbasePermissionError as e: error = str(e) if 'transactions' in",
"self._validate_single_api_key_action(method) if result is None: return False, msg return True,",
"address = None transaction_id = None # movement_category: Union[Literal['deposit'], Literal['withdrawal']]",
"just the first results then set ignore_pagination to True. \"\"\"",
"in resp: try: if not account['balance']: continue amount = deserialize_asset_amount(account['balance']['amount'])",
"error = str(e) if 'invalid signature' in error: return None,",
"coinbase transaction into a rotkehlchen Trade. https://developers.coinbase.com/api/v2?python#buys If the coinbase",
"continue except UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found coinbase trade with",
"TYPE_CHECKING, Any, Dict, List, Optional, Tuple from urllib.parse import urlencode",
"id: ' f'{account_data[\"id\"]}. Skipping it. ', ) continue account_ids.append(account_data['id']) return",
"= asset_from_coinbase(raw_trade['amount']['currency'], time=timestamp) native_amount = deserialize_asset_amount(raw_trade['subtotal']['amount']) native_asset = asset_from_coinbase(raw_trade['subtotal']['currency'], time=timestamp)",
"experiment of sending ETH # to an address from coinbase",
"Tuple[bool, str]: \"\"\"Validates that the Coinbase API key is good",
"a rotkehlchen Trade. https://developers.coinbase.com/api/v2?python#buys If the coinbase transaction is not",
"= asset_from_coinbase(raw_trade['fee']['currency'], time=timestamp) return Trade( timestamp=timestamp, location=Location.COINBASE, pair=pair, trade_type=trade_type, amount=amount,",
"= deserialize_fee(raw_data['fee']['amount']) asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp) return AssetMovement( location=Location.COINBASE, category=movement_category,",
"are buying/selling tx_asset for native_asset pair = TradePair(f'{tx_asset.identifier}_{native_asset.identifier}') amount =",
"self.first_connection_made = True def _validate_single_api_key_action( self, method_str: str, ignore_pagination: bool",
"Provided API key/secret' elif 'invalid api key' in error: return",
"List, Optional, Tuple from urllib.parse import urlencode import requests from",
"Any]) -> Optional[Trade]: \"\"\"Turns a coinbase transaction into a rotkehlchen",
"string id: ' f'{account_data[\"id\"]}. Skipping it. ', ) continue account_ids.append(account_data['id'])",
"str]: \"\"\"Validates that the Coinbase API key is good for",
"result', results_num=len(raw_data)) trades = [] for raw_trade in raw_data: try:",
"is None: return False, msg # now get the account",
"}) full_url = self.base_uri + request_url try: response = self.session.get(full_url)",
"JSONDecodeError: raise RemoteError(f'Coinbase returned invalid JSON response: {response.text}') if 'data'",
"= 'wallet:sells:read' elif 'deposits' in method_str: permission = 'wallet:deposits:read' elif",
"method_str: permission = 'wallet:buys:read' elif 'sells' in method_str: permission =",
"import requests from rotkehlchen.assets.asset import Asset from rotkehlchen.assets.converters import asset_from_coinbase",
"Optional[AssetMovement]: \"\"\"Processes a single deposit/withdrawal from coinbase and deserializes it",
"error/warning and return None if something went wrong at deserialization",
"rotkehlchen.assets.converters import asset_from_coinbase from rotkehlchen.constants.misc import ZERO from rotkehlchen.errors import",
"result with unsupported asset ' f'{e.asset_name}. Ignoring it.', ) continue",
"You can optionally provide extra arguments to the endpoint via",
"f'Unexpected coinbase method {method_str} at API key validation', ) msg",
"= self.session.get(full_url) except requests.exceptions.RequestException as e: raise RemoteError(f'Coinbase API request",
"None) if raw_fee: # Since this is a withdrawal the",
"id for \"send\" type of transactions address = None transaction_id",
"final_data additional_data = self._api_query( endpoint=endpoint, options=options, pagination_next_uri=next_uri, ) final_data.extend(additional_data) return",
"asset_from_coinbase from rotkehlchen.constants.misc import ZERO from rotkehlchen.errors import DeserializationError, RemoteError,",
"import asset_from_coinbase from rotkehlchen.constants.misc import ZERO from rotkehlchen.errors import DeserializationError,",
"request_url += urlencode(options) timestamp = str(int(time.time())) message = timestamp +",
"deserialize_trade_type, ) from rotkehlchen.typing import ( ApiKey, ApiSecret, AssetMovementCategory, Fee,",
"return False, msg # and now try to get all",
"if response.status_code == 403: raise CoinbasePermissionError(f'API key does not have",
"except JSONDecodeError: raise RemoteError(f'Coinbase returned invalid JSON response: {response.text}') if",
"be the same as the moved asset if asset !=",
"Query for endpoint You can optionally provide extra arguments to",
"unknown asset ' f'{e.asset_name}. Ignoring it.', ) except UnsupportedAsset as",
"{msg}.' self.msg_aggregator.add_error( 'Error processing a coinbase trade. Check logs '",
"-> Tuple[bool, str]: \"\"\"Validates that the Coinbase API key is",
"the account ids and for each one query buys/sells #",
"due to inability to ' f'query USD price: {str(e)}. Skipping",
"ExchangeInterface from rotkehlchen.exchanges.utils import deserialize_asset_movement_address, get_key_if_has_val from rotkehlchen.inquirer import Inquirer",
"= None, pagination_next_uri: str = None, ignore_pagination: bool = False,",
"List[Dict[str, Any]]) -> List[str]: \"\"\"Gets the account ids out of",
"Only get address/transaction id for \"send\" type of transactions address",
"Coinbase API key needs to have {permission} permission activated. '",
"# here since there is no argument in the API",
"final_data = json_ret['data'] # If we got pagination and this",
"# also get transactions to get the \"sends\", which in",
"argument. If this is an ongoing paginating call then provide",
"transactions to get the \"sends\", which in Coinbase is the",
"key entry for {msg}.' self.msg_aggregator.add_error( 'Error processing a coinbase account",
"charged from the \"send\" resource amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount']) asset =",
"permission = 'wallet:withdrawals:read' elif 'trades' in method_str: permission = 'wallet:trades:read'",
"transaction # https://developers.coinbase.com/api/v2?python#list-transactions # consitutes something that Rotkehlchen would need",
"msg # and now try to get all deposits of",
"and now try to get all withdrawals of an account",
"got pagination and this is the first query, gather all",
"time=timestamp) native_amount = deserialize_asset_amount(raw_trade['subtotal']['amount']) native_asset = asset_from_coinbase(raw_trade['subtotal']['currency'], time=timestamp) # in",
"request_url=request_url) self.session.headers.update({ 'CB-ACCESS-SIGN': signature, 'CB-ACCESS-TIMESTAMP': timestamp, 'CB-ACCESS-KEY': self.api_key, # This",
"final_data @protect_with_lock() @cache_response_timewise() def query_balances(self) -> Tuple[Optional[Dict[Asset, Dict[str, Any]]], str]:",
"query buys/sells # Looking at coinbase's API no other type",
"here since there # is no argument in the API",
"str]: try: result = self._api_query(method_str, ignore_pagination=ignore_pagination) except CoinbasePermissionError as e:",
"TradePair(f'{tx_asset.identifier}_{native_asset.identifier}') amount = tx_amount # The rate is how much",
"consitutes something that Rotkehlchen would need to return in query_trade_history",
"validate_api_key(self) -> Tuple[bool, str]: \"\"\"Validates that the Coinbase API key",
"amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount']) asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp) # Fees dont",
"= AssetMovementCategory.WITHDRAWAL # Can't see the fee being charged from",
"# We only want to deal with completed trades return",
"error=msg, ) continue return returned_balances, '' def query_online_trade_history( self, start_ts:",
"= json_ret['data'] # If we got pagination and this is",
"f'asset_movement {raw_data}. Error was: {str(e)}', ) return None def query_online_deposits_withdrawals(",
"subsequent queries if 'pagination' in json_ret and not pagination_next_uri and",
"ids out of the accounts response\"\"\" account_ids = [] for",
"during deserialization of a coinbase ' 'asset movement. Check logs",
"= 'wallet:deposits:read' elif 'withdrawals' in method_str: permission = 'wallet:withdrawals:read' elif",
"= self._api_query( endpoint=endpoint, options=options, pagination_next_uri=next_uri, ) final_data.extend(additional_data) return final_data @protect_with_lock()",
"# As per the docs: https://developers.coinbase.com/api/v2?python#pagination # once we get",
"tx_asset for native_asset pair = TradePair(f'{tx_asset.identifier}_{native_asset.identifier}') amount = tx_amount #",
"= f'Missing key entry for {msg}.' self.msg_aggregator.add_error( 'Error processing a",
"hmac.new( self.secret, message.encode(), hashlib.sha256, ).hexdigest() log.debug('Coinbase API query', request_url=request_url) self.session.headers.update({",
"self._api_query( endpoint=endpoint, options=options, pagination_next_uri=next_uri, ) final_data.extend(additional_data) return final_data @protect_with_lock() @cache_response_timewise()",
"account_ids def _api_query( self, endpoint: str, options: Optional[Dict[str, Any]] =",
"bool = False, ) -> Tuple[Optional[List[Any]], str]: try: result =",
"a coinbase transaction into a rotkehlchen Trade. https://developers.coinbase.com/api/v2?python#buys If the",
"typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from urllib.parse",
"UnknownAsset, UnsupportedAsset from rotkehlchen.exchanges.data_structures import AssetMovement, Trade from rotkehlchen.exchanges.exchange import",
"continue # limit coinbase trades in the requested time range",
"'accounts' in method_str: permission = 'wallet:accounts:read' else: raise AssertionError( f'Unexpected",
"# and now try to get all transactions of an",
"processing' assert raw_data['type'] == 'send', msg movement_category = AssetMovementCategory.WITHDRAWAL #",
"RemoteError(f'Coinbase json response does not contain data: {response.text}') final_data =",
"API request failed. Could not reach coinbase due ' 'to",
"f'Found coinbase balance result with unknown asset ' f'{e.asset_name}. Ignoring",
"if result is None: return False, msg return True, ''",
"tx_asset = asset_from_coinbase(raw_trade['amount']['currency'], time=timestamp) native_amount = deserialize_asset_amount(raw_trade['subtotal']['amount']) native_asset = asset_from_coinbase(raw_trade['subtotal']['currency'],",
"pagination_next_uri: str = None, ignore_pagination: bool = False, ) ->",
"deserialize_asset_movement_address(raw_data['to'], 'address', asset) else: movement_category = deserialize_asset_movement_category(raw_data['resource']) amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount'])",
"AssetMovementCategory.WITHDRAWAL # Can't see the fee being charged from the",
"not pagination_next_uri and not ignore_pagination: if 'next_uri' not in json_ret['pagination']:",
"results_num=len(raw_data)) movements = [] for raw_movement in raw_data: movement =",
"= deserialize_timestamp_from_date(payout_date, 'iso8601', 'coinbase') else: timestamp = deserialize_timestamp_from_date( raw_data['created_at'], 'iso8601',",
"Trade. https://developers.coinbase.com/api/v2?python#buys If the coinbase transaction is not a trade",
"at API key validation', ) msg = ( f'Provided Coinbase",
"as e: self.msg_aggregator.add_warning( f'Found coinbase trade with unsupported asset '",
"everything # a user does not own if amount ==",
"from rotkehlchen.db.dbhandler import DBHandler logger = logging.getLogger(__name__) log = RotkehlchenLogsAdapter(logger)",
"https://developers.coinbase.com/api/v2?python#pagination # once we get an empty next_uri we are",
"method = f'accounts/{account_ids[0]}/transactions' result, msg = self._validate_single_api_key_action(method) if result is",
"if not account['balance']: continue amount = deserialize_asset_amount(account['balance']['amount']) # ignore empty",
"= [] for account_id in account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/deposits')) raw_data.extend(self._api_query(f'accounts/{account_id}/withdrawals')) # also",
"it.', ) log.error( 'Error processing a coinbase account balance', account_balance=account,",
"given to the key: wallet:accounts:read, wallet:transactions:read, wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, wallet:deposits:read",
"continue if asset in returned_balances: amount = returned_balances[asset]['amount'] + amount",
"error=msg, ) continue # limit coinbase trades in the requested",
"!= 200: raise RemoteError( f'Coinbase query {full_url} responded with error",
"Asset instantiation - DeserializationError due to unexpected format of dict",
"f'accounts/{account_ids[0]}/deposits' result, msg = self._validate_single_api_key_action(method) if result is None: return",
"an account to see if that's possible method = f'accounts/{account_ids[0]}/deposits'",
"Can log error/warning and return None if something went wrong",
"= deserialize_asset_amount(raw_trade['subtotal']['amount']) native_asset = asset_from_coinbase(raw_trade['subtotal']['currency'], time=timestamp) # in coinbase you",
"ZERO from rotkehlchen.errors import DeserializationError, RemoteError, UnknownAsset, UnsupportedAsset from rotkehlchen.exchanges.data_structures",
"self.msg_aggregator.add_warning( f'Found coinbase balance result with unknown asset ' f'{e.asset_name}.",
"elif should be at the end since the word appears",
"self.api_key, # This is needed to guarantee the up to",
"'wallet:withdrawals:read' elif 'trades' in method_str: permission = 'wallet:trades:read' # the",
"= trade_from_coinbase(raw_trade) except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase transaction",
"self.session.headers.update({ 'CB-ACCESS-SIGN': signature, 'CB-ACCESS-TIMESTAMP': timestamp, 'CB-ACCESS-KEY': self.api_key, # This is",
"it. ', ) continue if not isinstance(account_data['id'], str): self.msg_aggregator.add_error( f'Found",
"RemoteError as e: error = str(e) if 'invalid signature' in",
"if raw_network: raw_fee = raw_network.get('transaction_fee', None) if raw_fee: # Since",
"'wallet:sells:read' elif 'deposits' in method_str: permission = 'wallet:deposits:read' elif 'withdrawals'",
"= deserialize_asset_amount_force_positive(raw_data['amount']['amount']) asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp) # Fees dont appear",
"Optional[Trade]: \"\"\"Turns a coinbase transaction into a rotkehlchen Trade. https://developers.coinbase.com/api/v2?python#buys",
"deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type, ) from rotkehlchen.typing import ( ApiKey, ApiSecret,",
"'transactions' in method_str: permission = 'wallet:transactions:read' elif 'buys' in method_str:",
"set ignore_pagination to True. \"\"\" request_verb = \"GET\" if pagination_next_uri:",
"buy/sell 1 unit of base currency rate = Price(native_amount /",
"RemoteError( f'Coinbase query {full_url} responded with error status code: '",
"permission activated. ' f'Please log into your coinbase account and",
"ZERO: continue asset = asset_from_coinbase(account['balance']['currency']) try: usd_price = Inquirer().find_usd_price(asset=asset) except",
"response fee = Fee(ZERO) raw_network = raw_data.get('network', None) if raw_network:",
"\"send\" resource amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount']) asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp) #",
"= raw_data.get('network', None) if raw_network: raw_fee = raw_network.get('transaction_fee', None) if",
"in coinbase you are buying/selling tx_asset for native_asset pair =",
"response\"\"\" account_ids = [] for account_data in accounts: if 'id'",
"see if that's possible method = f'accounts/{account_ids[0]}/transactions' result, msg =",
"to guarantee the up to the given date # API",
"f'Missing key entry for {msg}.' self.msg_aggregator.add_error( 'Error processing a coinbase",
"Literal['withdrawal']] if 'type' in raw_data: # Then this should be",
"raw_data.extend(self._api_query(f'accounts/{account_id}/withdrawals')) # also get transactions to get the \"sends\", which",
"return None, 'Provided API Key is invalid' else: # any",
"invalid JSON response: {response.text}') if 'data' not in json_ret: raise",
"from coinbase there is the network fee in the response",
"database) self.apiversion = 'v2' self.base_uri = 'https://api.coinbase.com' self.msg_aggregator = msg_aggregator",
"rotkehlchen.utils.interfaces import cache_response_timewise, protect_with_lock from rotkehlchen.utils.serialization import rlk_jsonloads_dict if TYPE_CHECKING:",
"your coinbase account and set all required permissions: ' f'wallet:accounts:read,",
"API request failed due to {str(e)}') if response.status_code == 403:",
"MessagesAggregator, ): super(Coinbase, self).__init__('coinbase', api_key, secret, database) self.apiversion = 'v2'",
"f'In a coinbase withdrawal of {asset.identifier} the fee' f'is denoted",
"account_ids = self._get_account_ids(result) if len(account_ids) != 0: # and now",
"should be the same as the moved asset if asset",
"account_data = self._api_query('accounts') # now get the account ids and",
"'buys' in method_str: permission = 'wallet:buys:read' elif 'sells' in method_str:",
"JSON response: {response.text}') if 'data' not in json_ret: raise RemoteError(f'Coinbase",
"asset_from_coinbase(raw_trade['amount']['currency'], time=timestamp) native_amount = deserialize_asset_amount(raw_trade['subtotal']['amount']) native_asset = asset_from_coinbase(raw_trade['subtotal']['currency'], time=timestamp) #",
"processing a coinbase account balance', account_balance=account, error=msg, ) continue return",
"trade. Check logs ' 'for details. Ignoring it.', ) log.error(",
"f'accounts/{account_ids[0]}/buys' result, msg = self._validate_single_api_key_action(method) if result is None: return",
"the end since the word appears # in other endpoints",
"to the key: wallet:accounts:read, wallet:transactions:read, wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, wallet:deposits:read \"\"\"",
"that the following permissions are given to the key: wallet:accounts:read,",
"If we got pagination and this is the first query,",
"requested time range # here since there is no argument",
"{raw_fee[\"currency\"]}', ) else: fee = deserialize_fee(raw_fee['amount']) if 'network' in raw_data:",
"'invalid api key' in error: return None, 'Provided API Key",
"range # here since there is no argument in the",
"status code: ' f'{response.status_code} and text: {response.text}', ) try: json_ret",
"# any other remote error return None, error return result,",
"is None: return False, msg # and now try to",
"return None, msg returned_balances: Dict[Asset, Dict[str, Any]] = {} for",
"api_key, secret, database) self.apiversion = 'v2' self.base_uri = 'https://api.coinbase.com' self.msg_aggregator",
"KeyError due to dict entires missing an expected entry \"\"\"",
"entry without an id key. Skipping it. ', ) continue",
"json response does not contain data: {response.text}') final_data = json_ret['data']",
"of coinbase ' f'asset_movement {raw_data}. Error was: {str(e)}', ) return",
"amount = returned_balances[asset]['amount'] + amount else: returned_balances[asset] = {} returned_balances[asset]['amount']",
"and now try to get all buys of an account",
"'coinbase', ) # Only get address/transaction id for \"send\" type",
"except requests.exceptions.RequestException as e: raise RemoteError(f'Coinbase API request failed due",
"msg = 'Non \"send\" type found in coinbase deposit/withdrawal processing'",
"buys/sells # Looking at coinbase's API no other type of",
"urlencode(options) timestamp = str(int(time.time())) message = timestamp + request_verb +",
"since there is no argument in the API call if",
"cache_response_timewise, protect_with_lock from rotkehlchen.utils.serialization import rlk_jsonloads_dict if TYPE_CHECKING: from rotkehlchen.db.dbhandler",
"secret, database) self.apiversion = 'v2' self.base_uri = 'https://api.coinbase.com' self.msg_aggregator =",
"if 'next_uri' not in json_ret['pagination']: raise RemoteError('Coinbase json response contained",
"Trade( timestamp=timestamp, location=Location.COINBASE, pair=pair, trade_type=trade_type, amount=amount, rate=rate, fee=fee_amount, fee_currency=fee_asset, link=str(raw_trade['id']),",
"Any, Dict, List, Optional, Tuple from urllib.parse import urlencode import",
"need to return in query_trade_history account_ids = self._get_account_ids(account_data) raw_data =",
"data encountered during deserialization of a coinbase ' 'asset movement.",
"options=options, pagination_next_uri=next_uri, ) final_data.extend(additional_data) return final_data @protect_with_lock() @cache_response_timewise() def query_balances(self)",
"= ( 'Coinbase API request failed. Could not reach coinbase",
"the way Coinbase uses to send # crypto outside of",
"protect_with_lock from rotkehlchen.utils.serialization import rlk_jsonloads_dict if TYPE_CHECKING: from rotkehlchen.db.dbhandler import",
"None) if payout_date: timestamp = deserialize_timestamp_from_date(payout_date, 'iso8601', 'coinbase') else: timestamp",
"set all required permissions: ' f'wallet:accounts:read, wallet:transactions:read, ' f'wallet:buys:read, wallet:sells:read,",
"related transaction returns None Throws: - UnknownAsset due to Asset",
"key. Skipping it. ', ) continue if not isinstance(account_data['id'], str):",
"all transactions of an account to see if that's possible",
"end_ts: trades.append(trade) return trades def _deserialize_asset_movement(self, raw_data: Dict[str, Any]) ->",
"f'wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, ' f'wallet:deposits:read, wallet:trades:read' ) return None, msg",
"a trade related transaction returns None Throws: - UnknownAsset due",
"= tx_amount # The rate is how much you get/give",
"coinbase due ' 'to {}'.format(e) ) log.error(msg) return None, msg",
"hmac import logging import time from json.decoder import JSONDecodeError from",
"return in query_trade_history account_ids = self._get_account_ids(account_data) raw_data = [] for",
"fee' f'is denoted in {raw_fee[\"currency\"]}', ) else: fee = deserialize_fee(raw_fee['amount'])",
"bug report.', ) log.error( f'Unexpected data encountered during deserialization of",
"trade.timestamp <= end_ts: trades.append(trade) return trades def _deserialize_asset_movement(self, raw_data: Dict[str,",
"Trade from rotkehlchen.exchanges.exchange import ExchangeInterface from rotkehlchen.exchanges.utils import deserialize_asset_movement_address, get_key_if_has_val",
"= returned_balances[asset]['amount'] * usd_price returned_balances[asset]['usd_value'] = usd_value except UnknownAsset as",
"options argument. If this is an ongoing paginating call then",
"a non string id: ' f'{account_data[\"id\"]}. Skipping it. ', )",
"endpoint via the options argument. If this is an ongoing",
"try to get all deposits of an account to see",
"resp: try: if not account['balance']: continue amount = deserialize_asset_amount(account['balance']['amount']) #",
"result is None: return False, msg # now get the",
"into a rotkehlchen Trade. https://developers.coinbase.com/api/v2?python#buys If the coinbase transaction is",
"self, start_ts: Timestamp, end_ts: Timestamp, ) -> List[AssetMovement]: account_data =",
"result = self._api_query(method_str, ignore_pagination=ignore_pagination) except CoinbasePermissionError as e: error =",
"# movement_category: Union[Literal['deposit'], Literal['withdrawal']] if 'type' in raw_data: # Then",
"str): self.msg_aggregator.add_error( f'Found coinbase account entry with a non string",
"no \"next_uri\" key') next_uri = json_ret['pagination']['next_uri'] if not next_uri: #",
"\"\"\" try: if raw_data['status'] != 'completed': return None payout_date =",
"and now try to get all deposits of an account",
"tx in txs: if 'type' not in tx: continue if",
"str, options: Optional[Dict[str, Any]] = None, pagination_next_uri: str = None,",
"{raw_data}. Error was: {str(e)}', ) return None def query_online_deposits_withdrawals( self,",
"{str(e)}', ) return None def query_online_deposits_withdrawals( self, start_ts: Timestamp, end_ts:",
"'send', msg movement_category = AssetMovementCategory.WITHDRAWAL # Can't see the fee",
"failed due to {str(e)}') if response.status_code == 403: raise CoinbasePermissionError(f'API",
"ApiKey, secret: ApiSecret, database: 'DBHandler', msg_aggregator: MessagesAggregator, ): super(Coinbase, self).__init__('coinbase',",
"to {str(e)}') if response.status_code == 403: raise CoinbasePermissionError(f'API key does",
"raise CoinbasePermissionError(f'API key does not have permission for {endpoint}') if",
"and trade.timestamp <= end_ts: trades.append(trade) return trades def _deserialize_asset_movement(self, raw_data:",
"' 'asset movement. Check logs for details and open a",
"and for each one query buys/sells # Looking at coinbase's",
"raw_trade in raw_data: try: trade = trade_from_coinbase(raw_trade) except UnknownAsset as",
"deposit/withdrawal from coinbase and deserializes it Can log error/warning and",
"amount=amount, rate=rate, fee=fee_amount, fee_currency=fee_asset, link=str(raw_trade['id']), ) class CoinbasePermissionError(Exception): pass class",
"None transaction_id = None # movement_category: Union[Literal['deposit'], Literal['withdrawal']] if 'type'",
"should be at the end since the word appears #",
"None, msg returned_balances: Dict[Asset, Dict[str, Any]] = {} for account",
"usd_value except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase balance result",
"class Coinbase(ExchangeInterface): def __init__( self, api_key: ApiKey, secret: ApiSecret, database:",
"final_data.extend(additional_data) return final_data @protect_with_lock() @cache_response_timewise() def query_balances(self) -> Tuple[Optional[Dict[Asset, Dict[str,",
"if that's possible method = f'accounts/{account_ids[0]}/withdrawals' result, msg = self._validate_single_api_key_action(method)",
"from rotkehlchen.exchanges.utils import deserialize_asset_movement_address, get_key_if_has_val from rotkehlchen.inquirer import Inquirer from",
"False, msg # and now try to get all buys",
"in the requested time range # here since there is",
"in coinbase deposit/withdrawal processing' assert raw_data['type'] == 'send', msg movement_category",
"f'Found coinbase account entry with a non string id: '",
"in the API call if movement and movement.timestamp >= start_ts",
"want just the first results then set ignore_pagination to True.",
"response = self.session.get(full_url) except requests.exceptions.RequestException as e: raise RemoteError(f'Coinbase API",
"Rotki Makes sure that the following permissions are given to",
"# Fees dont appear in the docs but from an",
"this is a withdrawal the fee should be the same",
"fee being charged from the \"send\" resource amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount'])",
"if 'to' in raw_data: address = deserialize_asset_movement_address(raw_data['to'], 'address', asset) else:",
"the fee should be the same as the moved asset",
"return account_ids def _api_query( self, endpoint: str, options: Optional[Dict[str, Any]]",
"other type of transaction # https://developers.coinbase.com/api/v2?python#list-transactions # consitutes something that",
"elif 'withdrawals' in method_str: permission = 'wallet:withdrawals:read' elif 'trades' in",
"= None # movement_category: Union[Literal['deposit'], Literal['withdrawal']] if 'type' in raw_data:",
"'wallet:accounts:read' else: raise AssertionError( f'Unexpected coinbase method {method_str} at API",
"raw_data.get('network', None) if raw_network: raw_fee = raw_network.get('transaction_fee', None) if raw_fee:",
"for \"send\" type of transactions address = None transaction_id =",
"deserialize_timestamp_from_date, deserialize_trade_type, ) from rotkehlchen.typing import ( ApiKey, ApiSecret, AssetMovementCategory,",
"Check logs for details and open a bug report.', )",
"raw_network: raw_fee = raw_network.get('transaction_fee', None) if raw_fee: # Since this",
"accounts response\"\"\" account_ids = [] for account_data in accounts: if",
"is good for usage in Rotki Makes sure that the",
"json response contained no \"next_uri\" key') next_uri = json_ret['pagination']['next_uri'] if",
"method = f'accounts/{account_ids[0]}/withdrawals' result, msg = self._validate_single_api_key_action(method) if result is",
"log = RotkehlchenLogsAdapter(logger) def trade_from_coinbase(raw_trade: Dict[str, Any]) -> Optional[Trade]: \"\"\"Turns",
"all required permissions: ' f'wallet:accounts:read, wallet:transactions:read, ' f'wallet:buys:read, wallet:sells:read, wallet:withdrawals:read,",
"the Provided API key/secret' elif 'invalid api key' in error:",
"Ignoring it.', ) except UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found coinbase",
"asset ' f'{e.asset_name}. Ignoring it.', ) except (DeserializationError, KeyError) as",
"continue return returned_balances, '' def query_online_trade_history( self, start_ts: Timestamp, end_ts:",
"API Query for endpoint You can optionally provide extra arguments",
"' 'to {}'.format(e) ) log.error(msg) return None, msg returned_balances: Dict[Asset,",
"UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found coinbase deposit/withdrawal with unsupported asset",
"ETH # to an address from coinbase there is the",
"quotecurrency if you buy/sell 1 unit of base currency rate",
"withdrawal of {asset.identifier} the fee' f'is denoted in {raw_fee[\"currency\"]}', )",
"coinbase balance result with unknown asset ' f'{e.asset_name}. Ignoring it.',",
"accounts: if 'id' not in account_data: self.msg_aggregator.add_error( 'Found coinbase account",
"f'is denoted in {raw_fee[\"currency\"]}', ) else: fee = deserialize_fee(raw_fee['amount']) if",
"from the \"send\" resource amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount']) asset = asset_from_coinbase(raw_data['amount']['currency'],",
"asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp) return AssetMovement( location=Location.COINBASE, category=movement_category, address=address, transaction_id=transaction_id,",
"# Since this is a withdrawal the fee should be",
"due to {str(e)}') if response.status_code == 403: raise CoinbasePermissionError(f'API key",
"to see if that's possible method = f'accounts/{account_ids[0]}/buys' result, msg",
"permissions are given to the key: wallet:accounts:read, wallet:transactions:read, wallet:buys:read, wallet:sells:read,",
"ignore_pagination: bool = False, ) -> List[Any]: \"\"\"Performs a coinbase",
"if raw_fee: # Since this is a withdrawal the fee",
"'iso8601', 'coinbase') else: timestamp = deserialize_timestamp_from_date( raw_data['created_at'], 'iso8601', 'coinbase', )",
"self.msg_aggregator.add_warning( f'Found coinbase transaction with unknown asset ' f'{e.asset_name}. Ignoring",
"None # movement_category: Union[Literal['deposit'], Literal['withdrawal']] if 'type' in raw_data: #",
"to return in query_trade_history account_ids = self._get_account_ids(account_data) raw_data = []",
"movement_category: Union[Literal['deposit'], Literal['withdrawal']] if 'type' in raw_data: # Then this",
"'Found coinbase account entry without an id key. Skipping it.",
"as e: error = str(e) if 'invalid signature' in error:",
"API Key is invalid' else: # any other remote error",
"account ids out of the accounts response\"\"\" account_ids = []",
"API call if movement and movement.timestamp >= start_ts and movement.timestamp",
"appears # in other endpoints elif 'accounts' in method_str: permission",
"{str(e)}. Skipping balance entry', ) continue if asset in returned_balances:",
"msg_aggregator: MessagesAggregator, ): super(Coinbase, self).__init__('coinbase', api_key, secret, database) self.apiversion =",
"= str(e) if 'transactions' in method_str: permission = 'wallet:transactions:read' elif",
"'coinbase') trade_type = deserialize_trade_type(raw_trade['resource']) tx_amount = deserialize_asset_amount(raw_trade['amount']['amount']) tx_asset = asset_from_coinbase(raw_trade['amount']['currency'],",
"Could not reach coinbase due ' 'to {}'.format(e) ) log.error(msg)",
"in txs: if 'type' not in tx: continue if tx['type']",
"ApiSecret, database: 'DBHandler', msg_aggregator: MessagesAggregator, ): super(Coinbase, self).__init__('coinbase', api_key, secret,",
"of the exchange txs = self._api_query(f'accounts/{account_id}/transactions') for tx in txs:",
"trades in the requested time range here since there #",
"Asset from rotkehlchen.assets.converters import asset_from_coinbase from rotkehlchen.constants.misc import ZERO from",
"= 'Non \"send\" type found in coinbase deposit/withdrawal processing' assert",
"'trades' in method_str: permission = 'wallet:trades:read' # the accounts elif",
"msg = f'Missing key entry for {msg}.' self.msg_aggregator.add_error( 'Unexpected data",
"return final_data @protect_with_lock() @cache_response_timewise() def query_balances(self) -> Tuple[Optional[Dict[Asset, Dict[str, Any]]],",
"for raw_movement in raw_data: movement = self._deserialize_asset_movement(raw_movement) # limit coinbase",
"<= end_ts: trades.append(trade) return trades def _deserialize_asset_movement(self, raw_data: Dict[str, Any])",
"# and now try to get all buys of an",
") continue if asset in returned_balances: amount = returned_balances[asset]['amount'] +",
"Inquirer().find_usd_price(asset=asset) except RemoteError as e: self.msg_aggregator.add_error( f'Error processing coinbase balance",
"= deserialize_fee(raw_trade['fee']['amount']) fee_asset = asset_from_coinbase(raw_trade['fee']['currency'], time=timestamp) return Trade( timestamp=timestamp, location=Location.COINBASE,",
"import Inquirer from rotkehlchen.logging import RotkehlchenLogsAdapter from rotkehlchen.serialization.deserialize import (",
"AssetMovementCategory, Fee, Location, Price, Timestamp, TradePair, ) from rotkehlchen.user_messages import",
"def _get_account_ids(self, accounts: List[Dict[str, Any]]) -> List[str]: \"\"\"Gets the account",
"self.msg_aggregator.add_error( f'Error processing coinbase balance entry due to inability to",
"via the options argument. If this is an ongoing paginating",
"'Non \"send\" type found in coinbase deposit/withdrawal processing' assert raw_data['type']",
"Any]] = None, pagination_next_uri: str = None, ignore_pagination: bool =",
"self.msg_aggregator.add_error( f'Found coinbase account entry with a non string id:",
"try: result = self._api_query(method_str, ignore_pagination=ignore_pagination) except CoinbasePermissionError as e: error",
"from rotkehlchen.utils.serialization import rlk_jsonloads_dict if TYPE_CHECKING: from rotkehlchen.db.dbhandler import DBHandler",
"limit coinbase trades in the requested time range here since",
"in accounts: if 'id' not in account_data: self.msg_aggregator.add_error( 'Found coinbase",
"transaction_id = None # movement_category: Union[Literal['deposit'], Literal['withdrawal']] if 'type' in",
"the # way to send Crypto out of the exchange",
"it. ', ) continue account_ids.append(account_data['id']) return account_ids def _api_query( self,",
"entry for {msg}.' self.msg_aggregator.add_error( 'Error processing a coinbase trade. Check",
"\"\"\"Gets the account ids out of the accounts response\"\"\" account_ids",
"account to see if that's possible method = f'accounts/{account_ids[0]}/withdrawals' result,",
"try to get all buys of an account to see",
"returned_balances[asset]['amount'] = amount usd_value = returned_balances[asset]['amount'] * usd_price returned_balances[asset]['usd_value'] =",
"argument in the API call if trade and trade.timestamp >=",
"Price, Timestamp, TradePair, ) from rotkehlchen.user_messages import MessagesAggregator from rotkehlchen.utils.interfaces",
"error return None, error return result, '' def validate_api_key(self) ->",
"in method_str: permission = 'wallet:deposits:read' elif 'withdrawals' in method_str: permission",
"= logging.getLogger(__name__) log = RotkehlchenLogsAdapter(logger) def trade_from_coinbase(raw_trade: Dict[str, Any]) ->",
"raise RemoteError(f'Coinbase API request failed due to {str(e)}') if response.status_code",
"# https://developers.coinbase.com/api/v2?python#list-transactions # consitutes something that Rotkehlchen would need to",
"entry', ) continue if asset in returned_balances: amount = returned_balances[asset]['amount']",
"the accounts elif should be at the end since the",
"[] for account_id in account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/deposits')) raw_data.extend(self._api_query(f'accounts/{account_id}/withdrawals')) # also get",
"account balance. Check logs ' 'for details. Ignoring it.', )",
"\"sends\", which in Coinbase is the # way to send",
"f'accounts/{account_ids[0]}/withdrawals' result, msg = self._validate_single_api_key_action(method) if result is None: return",
"not account['balance']: continue amount = deserialize_asset_amount(account['balance']['amount']) # ignore empty balances.",
"import hmac import logging import time from json.decoder import JSONDecodeError",
"= None transaction_id = None # movement_category: Union[Literal['deposit'], Literal['withdrawal']] if",
"currency rate = Price(native_amount / tx_amount) fee_amount = deserialize_fee(raw_trade['fee']['amount']) fee_asset",
") -> List[Any]: \"\"\"Performs a coinbase API Query for endpoint",
"'wallet:deposits:read' elif 'withdrawals' in method_str: permission = 'wallet:withdrawals:read' elif 'trades'",
"message.encode(), hashlib.sha256, ).hexdigest() log.debug('Coinbase API query', request_url=request_url) self.session.headers.update({ 'CB-ACCESS-SIGN': signature,",
"False, msg # now get the account ids account_ids =",
"-> Optional[Trade]: \"\"\"Turns a coinbase transaction into a rotkehlchen Trade.",
"account entry without an id key. Skipping it. ', )",
"'DBHandler', msg_aggregator: MessagesAggregator, ): super(Coinbase, self).__init__('coinbase', api_key, secret, database) self.apiversion",
"get all sells of an account to see if that's",
"hashlib.sha256, ).hexdigest() log.debug('Coinbase API query', request_url=request_url) self.session.headers.update({ 'CB-ACCESS-SIGN': signature, 'CB-ACCESS-TIMESTAMP':",
"# Can't see the fee being charged from the \"send\"",
"rate = Price(native_amount / tx_amount) fee_amount = deserialize_fee(raw_trade['fee']['amount']) fee_asset =",
"List[AssetMovement]: account_data = self._api_query('accounts') account_ids = self._get_account_ids(account_data) raw_data = []",
"that the Coinbase API key is good for usage in",
"this is an ongoing paginating call then provide pagination_next_uri. If",
"Timestamp, ) -> List[Trade]: account_data = self._api_query('accounts') # now get",
"coinbase account balance', account_balance=account, error=msg, ) continue return returned_balances, ''",
"raw_fee: # Since this is a withdrawal the fee should",
"for native_asset pair = TradePair(f'{tx_asset.identifier}_{native_asset.identifier}') amount = tx_amount # The",
"tx_amount = deserialize_asset_amount(raw_trade['amount']['amount']) tx_asset = asset_from_coinbase(raw_trade['amount']['currency'], time=timestamp) native_amount = deserialize_asset_amount(raw_trade['subtotal']['amount'])",
"of the accounts response\"\"\" account_ids = [] for account_data in",
"word appears # in other endpoints elif 'accounts' in method_str:",
"method_str: permission = 'wallet:sells:read' elif 'deposits' in method_str: permission =",
"\"\"\" result, msg = self._validate_single_api_key_action('accounts') if result is None: return",
"response.status_code != 200: raise RemoteError( f'Coinbase query {full_url} responded with",
"f'accounts/{account_ids[0]}/sells' result, msg = self._validate_single_api_key_action(method) if result is None: return",
"self._api_query(method_str, ignore_pagination=ignore_pagination) except CoinbasePermissionError as e: error = str(e) if",
"timestamp, 'CB-ACCESS-KEY': self.api_key, # This is needed to guarantee the",
"and ignore log.error( f'In a coinbase withdrawal of {asset.identifier} the",
"# the accounts elif should be at the end since",
"Skipping it. ', ) continue if not isinstance(account_data['id'], str): self.msg_aggregator.add_error(",
"msg # and now try to get all withdrawals of",
"raw_data.append(tx) log.debug('coinbase deposits/withdrawals history result', results_num=len(raw_data)) movements = [] for",
"'type' in raw_data: # Then this should be a \"send\"",
"if that's possible method = f'accounts/{account_ids[0]}/deposits' result, msg = self._validate_single_api_key_action(method)",
"# API version response. 'CB-VERSION': '2019-08-25', }) full_url = self.base_uri",
"None, 'Failed to authenticate with the Provided API key/secret' elif",
"that's possible method = f'accounts/{account_ids[0]}/deposits' result, msg = self._validate_single_api_key_action(method) if",
"Coinbase is the # way to send Crypto out of",
"= 'wallet:trades:read' # the accounts elif should be at the",
"txs = self._api_query(f'accounts/{account_id}/transactions') for tx in txs: if 'type' not",
"e: self.msg_aggregator.add_warning( f'Found coinbase balance result with unsupported asset '",
"required permissions: ' f'wallet:accounts:read, wallet:transactions:read, ' f'wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, '",
"import rlk_jsonloads_dict if TYPE_CHECKING: from rotkehlchen.db.dbhandler import DBHandler logger =",
"Coinbase returns zero balances for everything # a user does",
"timestamp = deserialize_timestamp_from_date(raw_time, 'iso8601', 'coinbase') trade_type = deserialize_trade_type(raw_trade['resource']) tx_amount =",
"continue except (DeserializationError, KeyError) as e: msg = str(e) if",
"msg = f'Missing key entry for {msg}.' self.msg_aggregator.add_error( 'Error processing",
"Ignoring it.', ) continue except (DeserializationError, KeyError) as e: msg",
"rotkehlchen.user_messages import MessagesAggregator from rotkehlchen.utils.interfaces import cache_response_timewise, protect_with_lock from rotkehlchen.utils.serialization",
"get all deposits of an account to see if that's",
"query_trade_history account_ids = self._get_account_ids(account_data) raw_data = [] for account_id in",
"key needs to have {permission} permission activated. ' f'Please log",
"Fees dont appear in the docs but from an experiment",
"the requested time range # here since there is no",
"trade_type = deserialize_trade_type(raw_trade['resource']) tx_amount = deserialize_asset_amount(raw_trade['amount']['amount']) tx_asset = asset_from_coinbase(raw_trade['amount']['currency'], time=timestamp)",
"account['balance']: continue amount = deserialize_asset_amount(account['balance']['amount']) # ignore empty balances. Coinbase",
"if options: request_url += urlencode(options) timestamp = str(int(time.time())) message =",
"database: 'DBHandler', msg_aggregator: MessagesAggregator, ): super(Coinbase, self).__init__('coinbase', api_key, secret, database)",
"'coinbase') else: timestamp = deserialize_timestamp_from_date( raw_data['created_at'], 'iso8601', 'coinbase', ) #",
"= 'v2' self.base_uri = 'https://api.coinbase.com' self.msg_aggregator = msg_aggregator def first_connection(self)",
"it.', ) except (DeserializationError, KeyError) as e: msg = str(e)",
"result, msg = self._validate_single_api_key_action('accounts') if result is None: return False,",
"try: json_ret = rlk_jsonloads_dict(response.text) except JSONDecodeError: raise RemoteError(f'Coinbase returned invalid",
"report.', ) log.error( f'Unexpected data encountered during deserialization of coinbase",
"from urllib.parse import urlencode import requests from rotkehlchen.assets.asset import Asset",
"def _api_query( self, endpoint: str, options: Optional[Dict[str, Any]] = None,",
"if 'type' not in tx: continue if tx['type'] == 'send':",
"self._api_query(f'accounts/{account_id}/transactions') for tx in txs: if 'type' not in tx:",
"deposit/withdrawal with unsupported asset ' f'{e.asset_name}. Ignoring it.', ) except",
"at coinbase's API no other type of transaction # https://developers.coinbase.com/api/v2?python#list-transactions",
"else: returned_balances[asset] = {} returned_balances[asset]['amount'] = amount usd_value = returned_balances[asset]['amount']",
"f'{e.asset_name}. Ignoring it.', ) except (DeserializationError, KeyError) as e: msg",
">= start_ts and trade.timestamp <= end_ts: trades.append(trade) return trades def",
"is not a trade related transaction returns None Throws: -",
"raise RemoteError(f'Coinbase json response does not contain data: {response.text}') final_data",
"log.debug('coinbase buys/sells history result', results_num=len(raw_data)) trades = [] for raw_trade",
"account_id in account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/deposits')) raw_data.extend(self._api_query(f'accounts/{account_id}/withdrawals')) # also get transactions to",
"we got pagination and this is the first query, gather",
"authenticate with the Provided API key/secret' elif 'invalid api key'",
"request_url = f'/{self.apiversion}/{endpoint}' if options: request_url += urlencode(options) timestamp =",
"deserialize_fee(raw_data['fee']['amount']) asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp) return AssetMovement( location=Location.COINBASE, category=movement_category, address=address,",
"json_ret['pagination']['next_uri'] if not next_uri: # As per the docs: https://developers.coinbase.com/api/v2?python#pagination",
"in raw_data: try: trade = trade_from_coinbase(raw_trade) except UnknownAsset as e:",
"self.apiversion = 'v2' self.base_uri = 'https://api.coinbase.com' self.msg_aggregator = msg_aggregator def",
"raw_trade['instant']: raw_time = raw_trade['created_at'] else: raw_time = raw_trade['payout_at'] timestamp =",
"trade_type=trade_type, amount=amount, rate=rate, fee=fee_amount, fee_currency=fee_asset, link=str(raw_trade['id']), ) class CoinbasePermissionError(Exception): pass",
"trade=raw_trade, error=msg, ) continue # limit coinbase trades in the",
"f'{e.asset_name}. Ignoring it.', ) continue except (DeserializationError, KeyError) as e:",
"in the docs but from an experiment of sending ETH",
"self._deserialize_asset_movement(raw_movement) # limit coinbase deposit/withdrawals in the requested time range",
"return result, '' def validate_api_key(self) -> Tuple[bool, str]: \"\"\"Validates that",
"per the docs: https://developers.coinbase.com/api/v2?python#pagination # once we get an empty",
"method_str: permission = 'wallet:accounts:read' else: raise AssertionError( f'Unexpected coinbase method",
"List[str]: \"\"\"Gets the account ids out of the accounts response\"\"\"",
"in method_str: permission = 'wallet:buys:read' elif 'sells' in method_str: permission",
"result is None: return False, msg # and now try",
"if len(account_ids) != 0: # and now try to get",
"with unknown asset ' f'{e.asset_name}. Ignoring it.', ) except UnsupportedAsset",
"the first results then set ignore_pagination to True. \"\"\" request_verb",
"except UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found coinbase trade with unsupported",
"log.debug('coinbase deposits/withdrawals history result', results_num=len(raw_data)) movements = [] for raw_movement",
"ignore_pagination to True. \"\"\" request_verb = \"GET\" if pagination_next_uri: request_url",
"deserialize_asset_amount(raw_trade['subtotal']['amount']) native_asset = asset_from_coinbase(raw_trade['subtotal']['currency'], time=timestamp) # in coinbase you are",
"as e: self.msg_aggregator.add_warning( f'Found coinbase deposit/withdrawal with unknown asset '",
"Tuple[Optional[List[Any]], str]: try: result = self._api_query(method_str, ignore_pagination=ignore_pagination) except CoinbasePermissionError as",
"'CB-ACCESS-KEY': self.api_key, # This is needed to guarantee the up",
"accounts: List[Dict[str, Any]]) -> List[str]: \"\"\"Gets the account ids out",
"e: self.msg_aggregator.add_warning( f'Found coinbase transaction with unknown asset ' f'{e.asset_name}.",
"JSONDecodeError from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple",
"API key is good for usage in Rotki Makes sure",
"try to get all sells of an account to see",
"see if that's possible method = f'accounts/{account_ids[0]}/withdrawals' result, msg =",
"buys/sells history result', results_num=len(raw_data)) trades = [] for raw_trade in",
"response contained no \"next_uri\" key') next_uri = json_ret['pagination']['next_uri'] if not",
"moved asset if asset != asset_from_coinbase(raw_fee['currency'], time=timestamp): # If not",
"logs ' 'for details. Ignoring it.', ) log.error( 'Error processing",
"if result is None: return False, msg # and now",
"dict entries - KeyError due to dict entires missing an",
"List[Any]: \"\"\"Performs a coinbase API Query for endpoint You can",
"expected entry \"\"\" if raw_trade['status'] != 'completed': # We only",
"the same as the moved asset if asset != asset_from_coinbase(raw_fee['currency'],",
"msg = str(e) if isinstance(e, KeyError): msg = f'Missing key",
"is no argument in the API call if movement and",
"'wallet:trades:read' # the accounts elif should be at the end",
"response: {response.text}') if 'data' not in json_ret: raise RemoteError(f'Coinbase json",
"that's possible method = f'accounts/{account_ids[0]}/buys' result, msg = self._validate_single_api_key_action(method) if",
"else: raise AssertionError( f'Unexpected coinbase method {method_str} at API key",
"is needed to guarantee the up to the given date",
"same as the moved asset if asset != asset_from_coinbase(raw_fee['currency'], time=timestamp):",
"an ongoing paginating call then provide pagination_next_uri. If you want",
"'deposits' in method_str: permission = 'wallet:deposits:read' elif 'withdrawals' in method_str:",
"transactions of an account to see if that's possible method",
"_validate_single_api_key_action( self, method_str: str, ignore_pagination: bool = False, ) ->",
"in the response fee = Fee(ZERO) raw_network = raw_data.get('network', None)",
"len(account_ids) != 0: # and now try to get all",
"entries - KeyError due to dict entires missing an expected",
"If this is an ongoing paginating call then provide pagination_next_uri.",
"once we get an empty next_uri we are done return",
"if not isinstance(account_data['id'], str): self.msg_aggregator.add_error( f'Found coinbase account entry with",
"else: request_url = f'/{self.apiversion}/{endpoint}' if options: request_url += urlencode(options) timestamp",
"Any]) -> Optional[AssetMovement]: \"\"\"Processes a single deposit/withdrawal from coinbase and",
"limit coinbase deposit/withdrawals in the requested time range # here",
"no argument in the API call if movement and movement.timestamp",
"of dict entries - KeyError due to dict entires missing",
"raw_fee = raw_network.get('transaction_fee', None) if raw_fee: # Since this is",
"trades.append(trade) return trades def _deserialize_asset_movement(self, raw_data: Dict[str, Any]) -> Optional[AssetMovement]:",
"None: self.first_connection_made = True def _validate_single_api_key_action( self, method_str: str, ignore_pagination:",
"address=address, transaction_id=transaction_id, timestamp=timestamp, asset=asset, amount=amount, fee_asset=asset, fee=fee, link=str(raw_data['id']), ) except",
"pagination and this is the first query, gather all the",
"denoted in {raw_fee[\"currency\"]}', ) else: fee = deserialize_fee(raw_fee['amount']) if 'network'",
"account ids account_ids = self._get_account_ids(result) if len(account_ids) != 0: #",
"in method_str: permission = 'wallet:withdrawals:read' elif 'trades' in method_str: permission",
"raw_data['status'] != 'completed': return None payout_date = raw_data.get('payout_at', None) if",
"Timestamp, TradePair, ) from rotkehlchen.user_messages import MessagesAggregator from rotkehlchen.utils.interfaces import",
"all withdrawals of an account to see if that's possible",
"rotkehlchen.utils.serialization import rlk_jsonloads_dict if TYPE_CHECKING: from rotkehlchen.db.dbhandler import DBHandler logger",
"self, method_str: str, ignore_pagination: bool = False, ) -> Tuple[Optional[List[Any]],",
"rlk_jsonloads_dict if TYPE_CHECKING: from rotkehlchen.db.dbhandler import DBHandler logger = logging.getLogger(__name__)",
"e: self.msg_aggregator.add_warning( f'Found coinbase balance result with unknown asset '",
"wallet:deposits:read \"\"\" result, msg = self._validate_single_api_key_action('accounts') if result is None:",
"# limit coinbase trades in the requested time range here",
"during deserialization of coinbase ' f'asset_movement {raw_data}. Error was: {str(e)}',",
"raw_data.extend(self._api_query(f'accounts/{account_id}/buys')) raw_data.extend(self._api_query(f'accounts/{account_id}/sells')) log.debug('coinbase buys/sells history result', results_num=len(raw_data)) trades = []",
").hexdigest() log.debug('Coinbase API query', request_url=request_url) self.session.headers.update({ 'CB-ACCESS-SIGN': signature, 'CB-ACCESS-TIMESTAMP': timestamp,",
"gather all the subsequent queries if 'pagination' in json_ret and",
"the API call if movement and movement.timestamp >= start_ts and",
"AssetMovement, Trade from rotkehlchen.exchanges.exchange import ExchangeInterface from rotkehlchen.exchanges.utils import deserialize_asset_movement_address,",
"movement_category = AssetMovementCategory.WITHDRAWAL # Can't see the fee being charged",
"raw_data: movement = self._deserialize_asset_movement(raw_movement) # limit coinbase deposit/withdrawals in the",
"Coinbase API key is good for usage in Rotki Makes",
"= deserialize_fee(raw_fee['amount']) if 'network' in raw_data: transaction_id = get_key_if_has_val(raw_data['network'], 'hash')",
"results_num=len(raw_data)) trades = [] for raw_trade in raw_data: try: trade",
"as e: self.msg_aggregator.add_warning( f'Found coinbase deposit/withdrawal with unsupported asset '",
"from rotkehlchen.serialization.deserialize import ( deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type,",
"with the Provided API key/secret' elif 'invalid api key' in",
"Ignoring it.', ) continue except UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found",
"query {full_url} responded with error status code: ' f'{response.status_code} and",
"= [] for account_data in accounts: if 'id' not in",
"rotkehlchen Trade. https://developers.coinbase.com/api/v2?python#buys If the coinbase transaction is not a",
"if payout_date: timestamp = deserialize_timestamp_from_date(payout_date, 'iso8601', 'coinbase') else: timestamp =",
"in raw_data: movement = self._deserialize_asset_movement(raw_movement) # limit coinbase deposit/withdrawals in",
"time range # here since there is no argument in",
"account_ids = [] for account_data in accounts: if 'id' not",
"rotkehlchen.exchanges.utils import deserialize_asset_movement_address, get_key_if_has_val from rotkehlchen.inquirer import Inquirer from rotkehlchen.logging",
"time=timestamp) return AssetMovement( location=Location.COINBASE, category=movement_category, address=address, transaction_id=transaction_id, timestamp=timestamp, asset=asset, amount=amount,",
"is how much you get/give in quotecurrency if you buy/sell",
"permission = 'wallet:buys:read' elif 'sells' in method_str: permission = 'wallet:sells:read'",
") -> List[Trade]: account_data = self._api_query('accounts') # now get the",
"self._api_query('accounts') # now get the account ids and for each",
") except (DeserializationError, KeyError) as e: msg = str(e) if",
"e: msg = str(e) if isinstance(e, KeyError): msg = f'Missing",
"= deserialize_asset_movement_category(raw_data['resource']) amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount']) fee = deserialize_fee(raw_data['fee']['amount']) asset =",
"now get the account ids account_ids = self._get_account_ids(result) if len(account_ids)",
"return False, msg # now get the account ids account_ids",
"resp = self._api_query('accounts') except RemoteError as e: msg = (",
"trades return None if raw_trade['instant']: raw_time = raw_trade['created_at'] else: raw_time",
"a coinbase ' 'asset movement. Check logs for details and",
"hashlib import hmac import logging import time from json.decoder import",
"to send # crypto outside of the exchange # https://developers.coinbase.com/api/v2?python#transaction-resource",
"details. Ignoring it.', ) log.error( 'Error processing a coinbase account",
"raw_trade['payout_at'] timestamp = deserialize_timestamp_from_date(raw_time, 'iso8601', 'coinbase') trade_type = deserialize_trade_type(raw_trade['resource']) tx_amount",
"raw_data = [] for account_id in account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/deposits')) raw_data.extend(self._api_query(f'accounts/{account_id}/withdrawals')) #",
"= deserialize_asset_amount(raw_trade['amount']['amount']) tx_asset = asset_from_coinbase(raw_trade['amount']['currency'], time=timestamp) native_amount = deserialize_asset_amount(raw_trade['subtotal']['amount']) native_asset",
"response.status_code == 403: raise CoinbasePermissionError(f'API key does not have permission",
"encountered during deserialization of coinbase ' f'asset_movement {raw_data}. Error was:",
"transaction is not a trade related transaction returns None Throws:",
"from rotkehlchen.exchanges.data_structures import AssetMovement, Trade from rotkehlchen.exchanges.exchange import ExchangeInterface from",
"1 unit of base currency rate = Price(native_amount / tx_amount)",
"# once we get an empty next_uri we are done",
"returns zero balances for everything # a user does not",
"and open a bug report.', ) log.error( f'Unexpected data encountered",
"else: fee = deserialize_fee(raw_fee['amount']) if 'network' in raw_data: transaction_id =",
"None: return False, msg # now get the account ids",
"If not we set ZERO fee and ignore log.error( f'In",
"f'{e.asset_name}. Ignoring it.', ) continue except UnsupportedAsset as e: self.msg_aggregator.add_warning(",
"Location, Price, Timestamp, TradePair, ) from rotkehlchen.user_messages import MessagesAggregator from",
"RotkehlchenLogsAdapter from rotkehlchen.serialization.deserialize import ( deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date,",
"Looking at coinbase's API no other type of transaction #",
"= self._get_account_ids(account_data) raw_data = [] for account_id in account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/buys'))",
"id key. Skipping it. ', ) continue if not isinstance(account_data['id'],",
"True def _validate_single_api_key_action( self, method_str: str, ignore_pagination: bool = False,",
"validation', ) msg = ( f'Provided Coinbase API key needs",
"key is good for usage in Rotki Makes sure that",
"# and now try to get all withdrawals of an",
"# is no argument in the API call if trade",
"deserialize_fee(raw_trade['fee']['amount']) fee_asset = asset_from_coinbase(raw_trade['fee']['currency'], time=timestamp) return Trade( timestamp=timestamp, location=Location.COINBASE, pair=pair,",
"an account to see if that's possible method = f'accounts/{account_ids[0]}/withdrawals'",
"self, start_ts: Timestamp, end_ts: Timestamp, ) -> List[Trade]: account_data =",
"raw_data['created_at'], 'iso8601', 'coinbase', ) # Only get address/transaction id for",
"in error: return None, 'Failed to authenticate with the Provided",
"\"\"\"Performs a coinbase API Query for endpoint You can optionally",
"Dict, List, Optional, Tuple from urllib.parse import urlencode import requests",
"get_key_if_has_val(raw_data['network'], 'hash') if 'to' in raw_data: address = deserialize_asset_movement_address(raw_data['to'], 'address',",
"' f'wallet:accounts:read, wallet:transactions:read, ' f'wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, ' f'wallet:deposits:read, wallet:trades:read'",
"ZERO fee and ignore log.error( f'In a coinbase withdrawal of",
"# now get the account ids account_ids = self._get_account_ids(result) if",
"get all buys of an account to see if that's",
"'' def validate_api_key(self) -> Tuple[bool, str]: \"\"\"Validates that the Coinbase",
"' f'wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, ' f'wallet:deposits:read, wallet:trades:read' ) return None,",
"in error: return None, 'Provided API Key is invalid' else:",
"continue account_ids.append(account_data['id']) return account_ids def _api_query( self, endpoint: str, options:",
"= asset_from_coinbase(raw_data['amount']['currency'], time=timestamp) return AssetMovement( location=Location.COINBASE, category=movement_category, address=address, transaction_id=transaction_id, timestamp=timestamp,",
"and deserializes it Can log error/warning and return None if",
"call if movement and movement.timestamp >= start_ts and movement.timestamp <=",
"'https://api.coinbase.com' self.msg_aggregator = msg_aggregator def first_connection(self) -> None: self.first_connection_made =",
"additional_data = self._api_query( endpoint=endpoint, options=options, pagination_next_uri=next_uri, ) final_data.extend(additional_data) return final_data",
"from an experiment of sending ETH # to an address",
"to get all withdrawals of an account to see if",
"if asset in returned_balances: amount = returned_balances[asset]['amount'] + amount else:",
"if something went wrong at deserialization \"\"\" try: if raw_data['status']",
"to see if that's possible method = f'accounts/{account_ids[0]}/sells' result, msg",
"with unknown asset ' f'{e.asset_name}. Ignoring it.', ) continue except",
"arguments to the endpoint via the options argument. If this",
"guarantee the up to the given date # API version",
"query', request_url=request_url) self.session.headers.update({ 'CB-ACCESS-SIGN': signature, 'CB-ACCESS-TIMESTAMP': timestamp, 'CB-ACCESS-KEY': self.api_key, #",
"if result is None: return False, msg # now get",
"paginating call then provide pagination_next_uri. If you want just the",
"here since there is no argument in the API call",
"= self._get_account_ids(result) if len(account_ids) != 0: # and now try",
"import JSONDecodeError from typing import TYPE_CHECKING, Any, Dict, List, Optional,",
"which is the way Coinbase uses to send # crypto",
"{msg}.' self.msg_aggregator.add_error( 'Error processing a coinbase account balance. Check logs",
") msg = ( f'Provided Coinbase API key needs to",
"API key needs to have {permission} permission activated. ' f'Please",
"for {msg}.' self.msg_aggregator.add_error( 'Unexpected data encountered during deserialization of a",
"deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type, ) from rotkehlchen.typing import ( ApiKey,",
"Since this is a withdrawal the fee should be the",
"{str(e)}') if response.status_code == 403: raise CoinbasePermissionError(f'API key does not",
"from rotkehlchen.constants.misc import ZERO from rotkehlchen.errors import DeserializationError, RemoteError, UnknownAsset,",
"raw_time = raw_trade['created_at'] else: raw_time = raw_trade['payout_at'] timestamp = deserialize_timestamp_from_date(raw_time,",
"- DeserializationError due to unexpected format of dict entries -",
") # Only get address/transaction id for \"send\" type of",
"from rotkehlchen.errors import DeserializationError, RemoteError, UnknownAsset, UnsupportedAsset from rotkehlchen.exchanges.data_structures import",
"self).__init__('coinbase', api_key, secret, database) self.apiversion = 'v2' self.base_uri = 'https://api.coinbase.com'",
"\"\"\"Validates that the Coinbase API key is good for usage",
"_get_account_ids(self, accounts: List[Dict[str, Any]]) -> List[str]: \"\"\"Gets the account ids",
"logging.getLogger(__name__) log = RotkehlchenLogsAdapter(logger) def trade_from_coinbase(raw_trade: Dict[str, Any]) -> Optional[Trade]:",
"msg # and now try to get all sells of",
"one query buys/sells # Looking at coinbase's API no other",
"= 'wallet:withdrawals:read' elif 'trades' in method_str: permission = 'wallet:trades:read' #",
"movement and movement.timestamp >= start_ts and movement.timestamp <= end_ts: movements.append(movement)",
"request_url signature = hmac.new( self.secret, message.encode(), hashlib.sha256, ).hexdigest() log.debug('Coinbase API",
"endpoint: str, options: Optional[Dict[str, Any]] = None, pagination_next_uri: str =",
"to have {permission} permission activated. ' f'Please log into your",
"except RemoteError as e: error = str(e) if 'invalid signature'",
"text: {response.text}', ) try: json_ret = rlk_jsonloads_dict(response.text) except JSONDecodeError: raise",
"there is the network fee in the response fee =",
"not ignore_pagination: if 'next_uri' not in json_ret['pagination']: raise RemoteError('Coinbase json",
"without an id key. Skipping it. ', ) continue if",
"' f'{e.asset_name}. Ignoring it.', ) except (DeserializationError, KeyError) as e:",
"self._get_account_ids(result) if len(account_ids) != 0: # and now try to",
"permission = 'wallet:transactions:read' elif 'buys' in method_str: permission = 'wallet:buys:read'",
"= pagination_next_uri else: request_url = f'/{self.apiversion}/{endpoint}' if options: request_url +=",
"code: ' f'{response.status_code} and text: {response.text}', ) try: json_ret =",
"processing a coinbase trade. Check logs ' 'for details. Ignoring",
"message = timestamp + request_verb + request_url signature = hmac.new(",
"log.error( 'Error processing a coinbase trade', trade=raw_trade, error=msg, ) continue",
"'Unexpected data encountered during deserialization of a coinbase ' 'asset",
"RemoteError(f'Coinbase API request failed due to {str(e)}') if response.status_code ==",
"a coinbase account balance. Check logs ' 'for details. Ignoring",
"to the endpoint via the options argument. If this is",
"logger = logging.getLogger(__name__) log = RotkehlchenLogsAdapter(logger) def trade_from_coinbase(raw_trade: Dict[str, Any])",
"KeyError): msg = f'Missing key entry for {msg}.' self.msg_aggregator.add_error( 'Error",
"self.session.get(full_url) except requests.exceptions.RequestException as e: raise RemoteError(f'Coinbase API request failed",
"method_str: permission = 'wallet:trades:read' # the accounts elif should be",
"elif 'invalid api key' in error: return None, 'Provided API",
"appear in the docs but from an experiment of sending",
"empty next_uri we are done return final_data additional_data = self._api_query(",
"{endpoint}') if response.status_code != 200: raise RemoteError( f'Coinbase query {full_url}",
"continue asset = asset_from_coinbase(account['balance']['currency']) try: usd_price = Inquirer().find_usd_price(asset=asset) except RemoteError",
"end_ts: Timestamp, ) -> List[AssetMovement]: account_data = self._api_query('accounts') account_ids =",
"no other type of transaction # https://developers.coinbase.com/api/v2?python#list-transactions # consitutes something",
"outside of the exchange # https://developers.coinbase.com/api/v2?python#transaction-resource msg = 'Non \"send\"",
"= f'/{self.apiversion}/{endpoint}' if options: request_url += urlencode(options) timestamp = str(int(time.time()))",
"e: self.msg_aggregator.add_warning( f'Found coinbase deposit/withdrawal with unknown asset ' f'{e.asset_name}.",
"wallet:accounts:read, wallet:transactions:read, wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, wallet:deposits:read \"\"\" result, msg =",
"f'Unexpected data encountered during deserialization of coinbase ' f'asset_movement {raw_data}.",
"- UnknownAsset due to Asset instantiation - DeserializationError due to",
"entry \"\"\" if raw_trade['status'] != 'completed': # We only want",
"due ' 'to {}'.format(e) ) log.error(msg) return None, msg returned_balances:",
"# https://developers.coinbase.com/api/v2?python#transaction-resource msg = 'Non \"send\" type found in coinbase",
"next_uri we are done return final_data additional_data = self._api_query( endpoint=endpoint,",
"json_ret and not pagination_next_uri and not ignore_pagination: if 'next_uri' not",
"the account ids account_ids = self._get_account_ids(result) if len(account_ids) != 0:",
"as the moved asset if asset != asset_from_coinbase(raw_fee['currency'], time=timestamp): #",
"coinbase account and set all required permissions: ' f'wallet:accounts:read, wallet:transactions:read,",
"try: if raw_data['status'] != 'completed': return None payout_date = raw_data.get('payout_at',",
"there is no argument in the API call if movement",
"<reponame>vnavascues/rotki import hashlib import hmac import logging import time from",
"= f'Missing key entry for {msg}.' self.msg_aggregator.add_error( 'Unexpected data encountered",
"for {msg}.' self.msg_aggregator.add_error( 'Error processing a coinbase account balance. Check",
"for account_id in account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/deposits')) raw_data.extend(self._api_query(f'accounts/{account_id}/withdrawals')) # also get transactions",
"own if amount == ZERO: continue asset = asset_from_coinbase(account['balance']['currency']) try:",
") class CoinbasePermissionError(Exception): pass class Coinbase(ExchangeInterface): def __init__( self, api_key:",
"error = str(e) if 'transactions' in method_str: permission = 'wallet:transactions:read'",
"call if trade and trade.timestamp >= start_ts and trade.timestamp <=",
"now get the account ids and for each one query",
"request_verb + request_url signature = hmac.new( self.secret, message.encode(), hashlib.sha256, ).hexdigest()",
"pagination_next_uri: request_url = pagination_next_uri else: request_url = f'/{self.apiversion}/{endpoint}' if options:",
"then provide pagination_next_uri. If you want just the first results",
"-> Tuple[Optional[List[Any]], str]: try: result = self._api_query(method_str, ignore_pagination=ignore_pagination) except CoinbasePermissionError",
"the following permissions are given to the key: wallet:accounts:read, wallet:transactions:read,",
"usd_price = Inquirer().find_usd_price(asset=asset) except RemoteError as e: self.msg_aggregator.add_error( f'Error processing",
"now try to get all withdrawals of an account to",
"coinbase trade with unsupported asset ' f'{e.asset_name}. Ignoring it.', )",
"other endpoints elif 'accounts' in method_str: permission = 'wallet:accounts:read' else:",
"address/transaction id for \"send\" type of transactions address = None",
"asset in returned_balances: amount = returned_balances[asset]['amount'] + amount else: returned_balances[asset]",
"it.', ) continue except (DeserializationError, KeyError) as e: msg =",
"= str(e) if isinstance(e, KeyError): msg = f'Missing key entry",
"UnknownAsset due to Asset instantiation - DeserializationError due to unexpected",
"{method_str} at API key validation', ) msg = ( f'Provided",
"missing an expected entry \"\"\" if raw_trade['status'] != 'completed': #",
"next_uri: # As per the docs: https://developers.coinbase.com/api/v2?python#pagination # once we",
"link=str(raw_data['id']), ) except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase deposit/withdrawal",
"Key is invalid' else: # any other remote error return",
"an account to see if that's possible method = f'accounts/{account_ids[0]}/buys'",
"deserialize_asset_amount_force_positive(raw_data['amount']['amount']) asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp) # Fees dont appear in",
"send Crypto out of the exchange txs = self._api_query(f'accounts/{account_id}/transactions') for",
"f'accounts/{account_ids[0]}/transactions' result, msg = self._validate_single_api_key_action(method) if result is None: return",
"unsupported asset ' f'{e.asset_name}. Ignoring it.', ) except (DeserializationError, KeyError)",
"asset=asset, amount=amount, fee_asset=asset, fee=fee, link=str(raw_data['id']), ) except UnknownAsset as e:",
"is the # way to send Crypto out of the",
"account entry with a non string id: ' f'{account_data[\"id\"]}. Skipping",
"return returned_balances, '' def query_online_trade_history( self, start_ts: Timestamp, end_ts: Timestamp,",
"format of dict entries - KeyError due to dict entires",
"get all transactions of an account to see if that's",
"the \"send\" resource amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount']) asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp)",
"empty balances. Coinbase returns zero balances for everything # a",
"in raw_data: # Then this should be a \"send\" which",
"that Rotkehlchen would need to return in query_trade_history account_ids =",
"in method_str: permission = 'wallet:transactions:read' elif 'buys' in method_str: permission",
"it.', ) log.error( 'Error processing a coinbase trade', trade=raw_trade, error=msg,",
"error: return None, 'Provided API Key is invalid' else: #",
"payout_date = raw_data.get('payout_at', None) if payout_date: timestamp = deserialize_timestamp_from_date(payout_date, 'iso8601',",
"log.error( 'Error processing a coinbase account balance', account_balance=account, error=msg, )",
"deposit/withdrawals in the requested time range # here since there",
"f'{e.asset_name}. Ignoring it.', ) except UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found",
"'network' in raw_data: transaction_id = get_key_if_has_val(raw_data['network'], 'hash') if 'to' in",
"\"GET\" if pagination_next_uri: request_url = pagination_next_uri else: request_url = f'/{self.apiversion}/{endpoint}'",
"to send Crypto out of the exchange txs = self._api_query(f'accounts/{account_id}/transactions')",
"KeyError) as e: msg = str(e) if isinstance(e, KeyError): msg",
"all buys of an account to see if that's possible",
"= asset_from_coinbase(raw_data['amount']['currency'], time=timestamp) # Fees dont appear in the docs",
"returned_balances[asset]['amount'] * usd_price returned_balances[asset]['usd_value'] = usd_value except UnknownAsset as e:",
"movement = self._deserialize_asset_movement(raw_movement) # limit coinbase deposit/withdrawals in the requested",
"since the word appears # in other endpoints elif 'accounts'",
"log.error(msg) return None, msg returned_balances: Dict[Asset, Dict[str, Any]] = {}",
"time=timestamp) # in coinbase you are buying/selling tx_asset for native_asset",
"@cache_response_timewise() def query_balances(self) -> Tuple[Optional[Dict[Asset, Dict[str, Any]]], str]: try: resp",
"and not pagination_next_uri and not ignore_pagination: if 'next_uri' not in",
"'send': raw_data.append(tx) log.debug('coinbase deposits/withdrawals history result', results_num=len(raw_data)) movements = []",
"query, gather all the subsequent queries if 'pagination' in json_ret",
"= Fee(ZERO) raw_network = raw_data.get('network', None) if raw_network: raw_fee =",
"!= 0: # and now try to get all transactions",
"msg = ( f'Provided Coinbase API key needs to have",
"to inability to ' f'query USD price: {str(e)}. Skipping balance",
"amount=amount, fee_asset=asset, fee=fee, link=str(raw_data['id']), ) except UnknownAsset as e: self.msg_aggregator.add_warning(",
"trade.timestamp >= start_ts and trade.timestamp <= end_ts: trades.append(trade) return trades",
"deserialization of a coinbase ' 'asset movement. Check logs for",
"api_key: ApiKey, secret: ApiSecret, database: 'DBHandler', msg_aggregator: MessagesAggregator, ): super(Coinbase,",
"\"\"\"Processes a single deposit/withdrawal from coinbase and deserializes it Can",
"in Coinbase is the # way to send Crypto out",
"was: {str(e)}', ) return None def query_online_deposits_withdrawals( self, start_ts: Timestamp,",
"and now try to get all transactions of an account",
"entry for {msg}.' self.msg_aggregator.add_error( 'Error processing a coinbase account balance.",
"balance entry', ) continue if asset in returned_balances: amount =",
"else: raw_time = raw_trade['payout_at'] timestamp = deserialize_timestamp_from_date(raw_time, 'iso8601', 'coinbase') trade_type",
"asset if asset != asset_from_coinbase(raw_fee['currency'], time=timestamp): # If not we",
"asset_from_coinbase(raw_data['amount']['currency'], time=timestamp) # Fees dont appear in the docs but",
"= returned_balances[asset]['amount'] + amount else: returned_balances[asset] = {} returned_balances[asset]['amount'] =",
"'asset movement. Check logs for details and open a bug",
"continue if tx['type'] == 'send': raw_data.append(tx) log.debug('coinbase deposits/withdrawals history result',",
"msg returned_balances: Dict[Asset, Dict[str, Any]] = {} for account in",
"a bug report.', ) log.error( f'Unexpected data encountered during deserialization",
"# limit coinbase deposit/withdrawals in the requested time range #",
"optionally provide extra arguments to the endpoint via the options",
"much you get/give in quotecurrency if you buy/sell 1 unit",
"if 'type' in raw_data: # Then this should be a",
"location=Location.COINBASE, category=movement_category, address=address, transaction_id=transaction_id, timestamp=timestamp, asset=asset, amount=amount, fee_asset=asset, fee=fee, link=str(raw_data['id']),",
"= deserialize_asset_movement_address(raw_data['to'], 'address', asset) else: movement_category = deserialize_asset_movement_category(raw_data['resource']) amount =",
"raw_trade['created_at'] else: raw_time = raw_trade['payout_at'] timestamp = deserialize_timestamp_from_date(raw_time, 'iso8601', 'coinbase')",
"if asset != asset_from_coinbase(raw_fee['currency'], time=timestamp): # If not we set",
"import RotkehlchenLogsAdapter from rotkehlchen.serialization.deserialize import ( deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee,",
"version response. 'CB-VERSION': '2019-08-25', }) full_url = self.base_uri + request_url",
"wallet:sells:read, wallet:withdrawals:read, ' f'wallet:deposits:read, wallet:trades:read' ) return None, msg except",
"https://developers.coinbase.com/api/v2?python#transaction-resource msg = 'Non \"send\" type found in coinbase deposit/withdrawal",
"raw_data: # Then this should be a \"send\" which is",
"json.decoder import JSONDecodeError from typing import TYPE_CHECKING, Any, Dict, List,",
"deserialize_asset_amount(raw_trade['amount']['amount']) tx_asset = asset_from_coinbase(raw_trade['amount']['currency'], time=timestamp) native_amount = deserialize_asset_amount(raw_trade['subtotal']['amount']) native_asset =",
"coinbase deposit/withdrawal processing' assert raw_data['type'] == 'send', msg movement_category =",
"RemoteError, UnknownAsset, UnsupportedAsset from rotkehlchen.exchanges.data_structures import AssetMovement, Trade from rotkehlchen.exchanges.exchange",
"crypto outside of the exchange # https://developers.coinbase.com/api/v2?python#transaction-resource msg = 'Non",
"# in other endpoints elif 'accounts' in method_str: permission =",
"ongoing paginating call then provide pagination_next_uri. If you want just",
"Check logs ' 'for details. Ignoring it.', ) log.error( 'Error",
"import DBHandler logger = logging.getLogger(__name__) log = RotkehlchenLogsAdapter(logger) def trade_from_coinbase(raw_trade:",
"deserialize_asset_amount(account['balance']['amount']) # ignore empty balances. Coinbase returns zero balances for",
"Any]]], str]: try: resp = self._api_query('accounts') except RemoteError as e:",
"MessagesAggregator from rotkehlchen.utils.interfaces import cache_response_timewise, protect_with_lock from rotkehlchen.utils.serialization import rlk_jsonloads_dict",
"Tuple from urllib.parse import urlencode import requests from rotkehlchen.assets.asset import",
"= raw_data.get('payout_at', None) if payout_date: timestamp = deserialize_timestamp_from_date(payout_date, 'iso8601', 'coinbase')",
"continue if not isinstance(account_data['id'], str): self.msg_aggregator.add_error( f'Found coinbase account entry",
"None if something went wrong at deserialization \"\"\" try: if",
"API key validation', ) msg = ( f'Provided Coinbase API",
"data: {response.text}') final_data = json_ret['data'] # If we got pagination",
"of sending ETH # to an address from coinbase there",
"asset_from_coinbase(raw_trade['fee']['currency'], time=timestamp) return Trade( timestamp=timestamp, location=Location.COINBASE, pair=pair, trade_type=trade_type, amount=amount, rate=rate,",
"see the fee being charged from the \"send\" resource amount",
"endpoint=endpoint, options=options, pagination_next_uri=next_uri, ) final_data.extend(additional_data) return final_data @protect_with_lock() @cache_response_timewise() def",
"timestamp=timestamp, asset=asset, amount=amount, fee_asset=asset, fee=fee, link=str(raw_data['id']), ) except UnknownAsset as",
"deserializes it Can log error/warning and return None if something",
"we set ZERO fee and ignore log.error( f'In a coinbase",
"in tx: continue if tx['type'] == 'send': raw_data.append(tx) log.debug('coinbase deposits/withdrawals",
"pagination_next_uri and not ignore_pagination: if 'next_uri' not in json_ret['pagination']: raise",
"= str(int(time.time())) message = timestamp + request_verb + request_url signature",
"# in coinbase you are buying/selling tx_asset for native_asset pair",
"{full_url} responded with error status code: ' f'{response.status_code} and text:",
"return AssetMovement( location=Location.COINBASE, category=movement_category, address=address, transaction_id=transaction_id, timestamp=timestamp, asset=asset, amount=amount, fee_asset=asset,",
"non string id: ' f'{account_data[\"id\"]}. Skipping it. ', ) continue",
"import MessagesAggregator from rotkehlchen.utils.interfaces import cache_response_timewise, protect_with_lock from rotkehlchen.utils.serialization import",
"f'Provided Coinbase API key needs to have {permission} permission activated.",
"ignore_pagination=ignore_pagination) except CoinbasePermissionError as e: error = str(e) if 'transactions'",
"due to Asset instantiation - DeserializationError due to unexpected format",
"f'Found coinbase deposit/withdrawal with unknown asset ' f'{e.asset_name}. Ignoring it.',",
"account to see if that's possible method = f'accounts/{account_ids[0]}/deposits' result,",
"msg = self._validate_single_api_key_action('accounts') if result is None: return False, msg",
"str = None, ignore_pagination: bool = False, ) -> List[Any]:",
"asset = asset_from_coinbase(account['balance']['currency']) try: usd_price = Inquirer().find_usd_price(asset=asset) except RemoteError as",
"Fee, Location, Price, Timestamp, TradePair, ) from rotkehlchen.user_messages import MessagesAggregator",
"ignore log.error( f'In a coinbase withdrawal of {asset.identifier} the fee'",
"fee_amount = deserialize_fee(raw_trade['fee']['amount']) fee_asset = asset_from_coinbase(raw_trade['fee']['currency'], time=timestamp) return Trade( timestamp=timestamp,",
"!= asset_from_coinbase(raw_fee['currency'], time=timestamp): # If not we set ZERO fee",
"-> None: self.first_connection_made = True def _validate_single_api_key_action( self, method_str: str,",
"amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount']) fee = deserialize_fee(raw_data['fee']['amount']) asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp)",
"the moved asset if asset != asset_from_coinbase(raw_fee['currency'], time=timestamp): # If",
"def query_online_trade_history( self, start_ts: Timestamp, end_ts: Timestamp, ) -> List[Trade]:",
"= f'accounts/{account_ids[0]}/sells' result, msg = self._validate_single_api_key_action(method) if result is None:",
"the \"sends\", which in Coinbase is the # way to",
"e: self.msg_aggregator.add_error( f'Error processing coinbase balance entry due to inability",
"the first query, gather all the subsequent queries if 'pagination'",
"to deal with completed trades return None if raw_trade['instant']: raw_time",
"f'Found coinbase transaction with unknown asset ' f'{e.asset_name}. Ignoring it.',",
"{response.text}', ) try: json_ret = rlk_jsonloads_dict(response.text) except JSONDecodeError: raise RemoteError(f'Coinbase",
"sending ETH # to an address from coinbase there is",
"CoinbasePermissionError(Exception): pass class Coinbase(ExchangeInterface): def __init__( self, api_key: ApiKey, secret:",
"a user does not own if amount == ZERO: continue",
"see if that's possible method = f'accounts/{account_ids[0]}/sells' result, msg =",
"options: request_url += urlencode(options) timestamp = str(int(time.time())) message = timestamp",
"fee=fee_amount, fee_currency=fee_asset, link=str(raw_trade['id']), ) class CoinbasePermissionError(Exception): pass class Coinbase(ExchangeInterface): def",
"in other endpoints elif 'accounts' in method_str: permission = 'wallet:accounts:read'",
"an empty next_uri we are done return final_data additional_data =",
"first results then set ignore_pagination to True. \"\"\" request_verb =",
"raw_data = [] for account_id in account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/buys')) raw_data.extend(self._api_query(f'accounts/{account_id}/sells')) log.debug('coinbase",
"= asset_from_coinbase(account['balance']['currency']) try: usd_price = Inquirer().find_usd_price(asset=asset) except RemoteError as e:",
"timestamp = str(int(time.time())) message = timestamp + request_verb + request_url",
"deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type, ) from rotkehlchen.typing import (",
"f'{account_data[\"id\"]}. Skipping it. ', ) continue account_ids.append(account_data['id']) return account_ids def",
"- KeyError due to dict entires missing an expected entry",
"-> Tuple[Optional[Dict[Asset, Dict[str, Any]]], str]: try: resp = self._api_query('accounts') except",
"try: if not account['balance']: continue amount = deserialize_asset_amount(account['balance']['amount']) # ignore",
"coinbase account entry with a non string id: ' f'{account_data[\"id\"]}.",
"= hmac.new( self.secret, message.encode(), hashlib.sha256, ).hexdigest() log.debug('Coinbase API query', request_url=request_url)",
"request_verb = \"GET\" if pagination_next_uri: request_url = pagination_next_uri else: request_url",
"{response.text}') if 'data' not in json_ret: raise RemoteError(f'Coinbase json response",
"to authenticate with the Provided API key/secret' elif 'invalid api",
"None: return False, msg # and now try to get",
"if TYPE_CHECKING: from rotkehlchen.db.dbhandler import DBHandler logger = logging.getLogger(__name__) log",
"( deserialize_asset_amount, deserialize_asset_amount_force_positive, deserialize_asset_movement_category, deserialize_fee, deserialize_timestamp_from_date, deserialize_trade_type, ) from rotkehlchen.typing",
"movement.timestamp >= start_ts and movement.timestamp <= end_ts: movements.append(movement) return movements",
"returned invalid JSON response: {response.text}') if 'data' not in json_ret:",
"an account to see if that's possible method = f'accounts/{account_ids[0]}/sells'",
"balance result with unsupported asset ' f'{e.asset_name}. Ignoring it.', )",
"asset = asset_from_coinbase(raw_data['amount']['currency'], time=timestamp) # Fees dont appear in the",
"# a user does not own if amount == ZERO:",
"None, msg except RemoteError as e: error = str(e) if",
"sells of an account to see if that's possible method",
"@protect_with_lock() @cache_response_timewise() def query_balances(self) -> Tuple[Optional[Dict[Asset, Dict[str, Any]]], str]: try:",
"' f'{account_data[\"id\"]}. Skipping it. ', ) continue account_ids.append(account_data['id']) return account_ids",
"json_ret = rlk_jsonloads_dict(response.text) except JSONDecodeError: raise RemoteError(f'Coinbase returned invalid JSON",
"signature = hmac.new( self.secret, message.encode(), hashlib.sha256, ).hexdigest() log.debug('Coinbase API query',",
"= self._api_query(f'accounts/{account_id}/transactions') for tx in txs: if 'type' not in",
"for account_data in accounts: if 'id' not in account_data: self.msg_aggregator.add_error(",
"try: trade = trade_from_coinbase(raw_trade) except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found",
") continue except (DeserializationError, KeyError) as e: msg = str(e)",
"'Error processing a coinbase account balance. Check logs ' 'for",
"wrong at deserialization \"\"\" try: if raw_data['status'] != 'completed': return",
"uses to send # crypto outside of the exchange #",
"you want just the first results then set ignore_pagination to",
"except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase balance result with",
"queries if 'pagination' in json_ret and not pagination_next_uri and not",
"rotkehlchen.errors import DeserializationError, RemoteError, UnknownAsset, UnsupportedAsset from rotkehlchen.exchanges.data_structures import AssetMovement,",
"rlk_jsonloads_dict(response.text) except JSONDecodeError: raise RemoteError(f'Coinbase returned invalid JSON response: {response.text}')",
"account_data = self._api_query('accounts') account_ids = self._get_account_ids(account_data) raw_data = [] for",
"activated. ' f'Please log into your coinbase account and set",
"account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/buys')) raw_data.extend(self._api_query(f'accounts/{account_id}/sells')) log.debug('coinbase buys/sells history result', results_num=len(raw_data)) trades =",
"not own if amount == ZERO: continue asset = asset_from_coinbase(account['balance']['currency'])",
"time=timestamp): # If not we set ZERO fee and ignore",
"self, endpoint: str, options: Optional[Dict[str, Any]] = None, pagination_next_uri: str",
"needs to have {permission} permission activated. ' f'Please log into",
"at deserialization \"\"\" try: if raw_data['status'] != 'completed': return None",
"data encountered during deserialization of coinbase ' f'asset_movement {raw_data}. Error",
"[] for raw_movement in raw_data: movement = self._deserialize_asset_movement(raw_movement) # limit",
"= None, ignore_pagination: bool = False, ) -> List[Any]: \"\"\"Performs",
"wallet:trades:read' ) return None, msg except RemoteError as e: error",
"USD price: {str(e)}. Skipping balance entry', ) continue if asset",
"TradePair, ) from rotkehlchen.user_messages import MessagesAggregator from rotkehlchen.utils.interfaces import cache_response_timewise,",
"unknown asset ' f'{e.asset_name}. Ignoring it.', ) continue except UnsupportedAsset",
"trade_from_coinbase(raw_trade) except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase transaction with",
"= amount usd_value = returned_balances[asset]['amount'] * usd_price returned_balances[asset]['usd_value'] = usd_value",
"assert raw_data['type'] == 'send', msg movement_category = AssetMovementCategory.WITHDRAWAL # Can't",
"'completed': return None payout_date = raw_data.get('payout_at', None) if payout_date: timestamp",
"def trade_from_coinbase(raw_trade: Dict[str, Any]) -> Optional[Trade]: \"\"\"Turns a coinbase transaction",
"is the way Coinbase uses to send # crypto outside",
"DeserializationError due to unexpected format of dict entries - KeyError",
"is None: return False, msg return True, '' def _get_account_ids(self,",
"that's possible method = f'accounts/{account_ids[0]}/sells' result, msg = self._validate_single_api_key_action(method) if",
"List[Trade]: account_data = self._api_query('accounts') # now get the account ids",
"in query_trade_history account_ids = self._get_account_ids(account_data) raw_data = [] for account_id",
"type found in coinbase deposit/withdrawal processing' assert raw_data['type'] == 'send',",
"import ZERO from rotkehlchen.errors import DeserializationError, RemoteError, UnknownAsset, UnsupportedAsset from",
"API no other type of transaction # https://developers.coinbase.com/api/v2?python#list-transactions # consitutes",
"elif 'buys' in method_str: permission = 'wallet:buys:read' elif 'sells' in",
"The rate is how much you get/give in quotecurrency if",
"transaction_id = get_key_if_has_val(raw_data['network'], 'hash') if 'to' in raw_data: address =",
"CoinbasePermissionError as e: error = str(e) if 'transactions' in method_str:",
"' f'Please log into your coinbase account and set all",
"send # crypto outside of the exchange # https://developers.coinbase.com/api/v2?python#transaction-resource msg",
"{response.text}') final_data = json_ret['data'] # If we got pagination and",
"of a coinbase ' 'asset movement. Check logs for details",
"endpoint You can optionally provide extra arguments to the endpoint",
"False, msg # and now try to get all sells",
"API query', request_url=request_url) self.session.headers.update({ 'CB-ACCESS-SIGN': signature, 'CB-ACCESS-TIMESTAMP': timestamp, 'CB-ACCESS-KEY': self.api_key,",
"trade = trade_from_coinbase(raw_trade) except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase",
"f'{response.status_code} and text: {response.text}', ) try: json_ret = rlk_jsonloads_dict(response.text) except",
"elif 'deposits' in method_str: permission = 'wallet:deposits:read' elif 'withdrawals' in",
"else: movement_category = deserialize_asset_movement_category(raw_data['resource']) amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount']) fee = deserialize_fee(raw_data['fee']['amount'])",
"and this is the first query, gather all the subsequent",
"a coinbase API Query for endpoint You can optionally provide",
"self.msg_aggregator = msg_aggregator def first_connection(self) -> None: self.first_connection_made = True",
"method_str: permission = 'wallet:transactions:read' elif 'buys' in method_str: permission =",
"' f'{e.asset_name}. Ignoring it.', ) except UnsupportedAsset as e: self.msg_aggregator.add_warning(",
"= rlk_jsonloads_dict(response.text) except JSONDecodeError: raise RemoteError(f'Coinbase returned invalid JSON response:",
"import time from json.decoder import JSONDecodeError from typing import TYPE_CHECKING,",
"DeserializationError, RemoteError, UnknownAsset, UnsupportedAsset from rotkehlchen.exchanges.data_structures import AssetMovement, Trade from",
"processing a coinbase trade', trade=raw_trade, error=msg, ) continue # limit",
"DBHandler logger = logging.getLogger(__name__) log = RotkehlchenLogsAdapter(logger) def trade_from_coinbase(raw_trade: Dict[str,",
"str(e) if isinstance(e, KeyError): msg = f'Missing key entry for",
"get the \"sends\", which in Coinbase is the # way",
"of transaction # https://developers.coinbase.com/api/v2?python#list-transactions # consitutes something that Rotkehlchen would",
"the options argument. If this is an ongoing paginating call",
"in raw_data: address = deserialize_asset_movement_address(raw_data['to'], 'address', asset) else: movement_category =",
"/ tx_amount) fee_amount = deserialize_fee(raw_trade['fee']['amount']) fee_asset = asset_from_coinbase(raw_trade['fee']['currency'], time=timestamp) return",
"get_key_if_has_val from rotkehlchen.inquirer import Inquirer from rotkehlchen.logging import RotkehlchenLogsAdapter from",
"coinbase trade. Check logs ' 'for details. Ignoring it.', )",
"'address', asset) else: movement_category = deserialize_asset_movement_category(raw_data['resource']) amount = deserialize_asset_amount_force_positive(raw_data['amount']['amount']) fee",
"can optionally provide extra arguments to the endpoint via the",
"method_str: permission = 'wallet:withdrawals:read' elif 'trades' in method_str: permission =",
"\"send\" which is the way Coinbase uses to send #",
"isinstance(e, KeyError): msg = f'Missing key entry for {msg}.' self.msg_aggregator.add_error(",
"= 'wallet:transactions:read' elif 'buys' in method_str: permission = 'wallet:buys:read' elif",
"AssertionError( f'Unexpected coinbase method {method_str} at API key validation', )",
"we are done return final_data additional_data = self._api_query( endpoint=endpoint, options=options,",
"Any]]) -> List[str]: \"\"\"Gets the account ids out of the",
"self._get_account_ids(account_data) raw_data = [] for account_id in account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/buys')) raw_data.extend(self._api_query(f'accounts/{account_id}/sells'))",
"rotkehlchen.typing import ( ApiKey, ApiSecret, AssetMovementCategory, Fee, Location, Price, Timestamp,",
"for everything # a user does not own if amount",
"[] for account_data in accounts: if 'id' not in account_data:",
"fee should be the same as the moved asset if",
"asset ' f'{e.asset_name}. Ignoring it.', ) continue except (DeserializationError, KeyError)",
"= [] for raw_movement in raw_data: movement = self._deserialize_asset_movement(raw_movement) #",
"else: timestamp = deserialize_timestamp_from_date( raw_data['created_at'], 'iso8601', 'coinbase', ) # Only",
"error return result, '' def validate_api_key(self) -> Tuple[bool, str]: \"\"\"Validates",
"that's possible method = f'accounts/{account_ids[0]}/withdrawals' result, msg = self._validate_single_api_key_action(method) if",
"permission = 'wallet:deposits:read' elif 'withdrawals' in method_str: permission = 'wallet:withdrawals:read'",
"error status code: ' f'{response.status_code} and text: {response.text}', ) try:",
"if 'pagination' in json_ret and not pagination_next_uri and not ignore_pagination:",
"= {} for account in resp: try: if not account['balance']:",
"an address from coinbase there is the network fee in",
"'iso8601', 'coinbase') trade_type = deserialize_trade_type(raw_trade['resource']) tx_amount = deserialize_asset_amount(raw_trade['amount']['amount']) tx_asset =",
"ApiKey, ApiSecret, AssetMovementCategory, Fee, Location, Price, Timestamp, TradePair, ) from",
"'CB-ACCESS-TIMESTAMP': timestamp, 'CB-ACCESS-KEY': self.api_key, # This is needed to guarantee",
"False, ) -> List[Any]: \"\"\"Performs a coinbase API Query for",
"next_uri = json_ret['pagination']['next_uri'] if not next_uri: # As per the",
"an account to see if that's possible method = f'accounts/{account_ids[0]}/transactions'",
") -> List[AssetMovement]: account_data = self._api_query('accounts') account_ids = self._get_account_ids(account_data) raw_data",
"wallet:buys:read, wallet:sells:read, wallet:withdrawals:read, wallet:deposits:read \"\"\" result, msg = self._validate_single_api_key_action('accounts') if",
"+ request_url try: response = self.session.get(full_url) except requests.exceptions.RequestException as e:",
"responded with error status code: ' f'{response.status_code} and text: {response.text}',",
"return None def query_online_deposits_withdrawals( self, start_ts: Timestamp, end_ts: Timestamp, )",
"txs: if 'type' not in tx: continue if tx['type'] ==",
"first_connection(self) -> None: self.first_connection_made = True def _validate_single_api_key_action( self, method_str:",
"to get all sells of an account to see if",
"e: self.msg_aggregator.add_warning( f'Found coinbase trade with unsupported asset ' f'{e.asset_name}.",
"e: msg = ( 'Coinbase API request failed. Could not",
"UnsupportedAsset as e: self.msg_aggregator.add_warning( f'Found coinbase balance result with unsupported",
"asset_from_coinbase(raw_trade['subtotal']['currency'], time=timestamp) # in coinbase you are buying/selling tx_asset for",
"import cache_response_timewise, protect_with_lock from rotkehlchen.utils.serialization import rlk_jsonloads_dict if TYPE_CHECKING: from",
"method {method_str} at API key validation', ) msg = (",
"urllib.parse import urlencode import requests from rotkehlchen.assets.asset import Asset from",
"Timestamp, end_ts: Timestamp, ) -> List[AssetMovement]: account_data = self._api_query('accounts') account_ids",
"= 'wallet:buys:read' elif 'sells' in method_str: permission = 'wallet:sells:read' elif",
"is no argument in the API call if trade and",
"the endpoint via the options argument. If this is an",
"Skipping it. ', ) continue account_ids.append(account_data['id']) return account_ids def _api_query(",
"pagination_next_uri=next_uri, ) final_data.extend(additional_data) return final_data @protect_with_lock() @cache_response_timewise() def query_balances(self) ->",
"TYPE_CHECKING: from rotkehlchen.db.dbhandler import DBHandler logger = logging.getLogger(__name__) log =",
"given date # API version response. 'CB-VERSION': '2019-08-25', }) full_url",
"result is None: return False, msg return True, '' def",
"RemoteError('Coinbase json response contained no \"next_uri\" key') next_uri = json_ret['pagination']['next_uri']",
"e: error = str(e) if 'transactions' in method_str: permission =",
"amount usd_value = returned_balances[asset]['amount'] * usd_price returned_balances[asset]['usd_value'] = usd_value except",
"deposits/withdrawals history result', results_num=len(raw_data)) movements = [] for raw_movement in",
"be at the end since the word appears # in",
"the given date # API version response. 'CB-VERSION': '2019-08-25', })",
"to see if that's possible method = f'accounts/{account_ids[0]}/withdrawals' result, msg",
"from rotkehlchen.assets.asset import Asset from rotkehlchen.assets.converters import asset_from_coinbase from rotkehlchen.constants.misc",
"Fee(ZERO) raw_network = raw_data.get('network', None) if raw_network: raw_fee = raw_network.get('transaction_fee',",
"of {asset.identifier} the fee' f'is denoted in {raw_fee[\"currency\"]}', ) else:",
"= \"GET\" if pagination_next_uri: request_url = pagination_next_uri else: request_url =",
"{msg}.' self.msg_aggregator.add_error( 'Unexpected data encountered during deserialization of a coinbase",
"category=movement_category, address=address, transaction_id=transaction_id, timestamp=timestamp, asset=asset, amount=amount, fee_asset=asset, fee=fee, link=str(raw_data['id']), )",
"call then provide pagination_next_uri. If you want just the first",
"if 'invalid signature' in error: return None, 'Failed to authenticate",
"coinbase and deserializes it Can log error/warning and return None",
"to get all transactions of an account to see if",
"requested time range here since there # is no argument",
"following permissions are given to the key: wallet:accounts:read, wallet:transactions:read, wallet:buys:read,",
"details. Ignoring it.', ) log.error( 'Error processing a coinbase trade',",
"' f'{e.asset_name}. Ignoring it.', ) continue except (DeserializationError, KeyError) as",
"key entry for {msg}.' self.msg_aggregator.add_error( 'Error processing a coinbase trade.",
"base currency rate = Price(native_amount / tx_amount) fee_amount = deserialize_fee(raw_trade['fee']['amount'])",
"coinbase trades in the requested time range here since there",
"is the first query, gather all the subsequent queries if",
"to get all buys of an account to see if",
"'Failed to authenticate with the Provided API key/secret' elif 'invalid",
"import AssetMovement, Trade from rotkehlchen.exchanges.exchange import ExchangeInterface from rotkehlchen.exchanges.utils import",
"f'Coinbase query {full_url} responded with error status code: ' f'{response.status_code}",
"docs but from an experiment of sending ETH # to",
"full_url = self.base_uri + request_url try: response = self.session.get(full_url) except",
"raw_trade['status'] != 'completed': # We only want to deal with",
"encountered during deserialization of a coinbase ' 'asset movement. Check",
"the exchange txs = self._api_query(f'accounts/{account_id}/transactions') for tx in txs: if",
"isinstance(account_data['id'], str): self.msg_aggregator.add_error( f'Found coinbase account entry with a non",
"account balance', account_balance=account, error=msg, ) continue return returned_balances, '' def",
"self._get_account_ids(account_data) raw_data = [] for account_id in account_ids: raw_data.extend(self._api_query(f'accounts/{account_id}/deposits')) raw_data.extend(self._api_query(f'accounts/{account_id}/withdrawals'))",
"import ExchangeInterface from rotkehlchen.exchanges.utils import deserialize_asset_movement_address, get_key_if_has_val from rotkehlchen.inquirer import",
"result with unknown asset ' f'{e.asset_name}. Ignoring it.', ) continue",
"= 'wallet:accounts:read' else: raise AssertionError( f'Unexpected coinbase method {method_str} at",
"invalid' else: # any other remote error return None, error",
"if that's possible method = f'accounts/{account_ids[0]}/sells' result, msg = self._validate_single_api_key_action(method)",
"bool = False, ) -> List[Any]: \"\"\"Performs a coinbase API",
"to True. \"\"\" request_verb = \"GET\" if pagination_next_uri: request_url =",
"way Coinbase uses to send # crypto outside of the",
"f'wallet:deposits:read, wallet:trades:read' ) return None, msg except RemoteError as e:",
"usage in Rotki Makes sure that the following permissions are",
"of transactions address = None transaction_id = None # movement_category:",
"= usd_value except UnknownAsset as e: self.msg_aggregator.add_warning( f'Found coinbase balance",
"'wallet:transactions:read' elif 'buys' in method_str: permission = 'wallet:buys:read' elif 'sells'",
"try to get all withdrawals of an account to see",
"went wrong at deserialization \"\"\" try: if raw_data['status'] != 'completed':",
"None Throws: - UnknownAsset due to Asset instantiation - DeserializationError",
"timestamp = deserialize_timestamp_from_date( raw_data['created_at'], 'iso8601', 'coinbase', ) # Only get",
"link=str(raw_trade['id']), ) class CoinbasePermissionError(Exception): pass class Coinbase(ExchangeInterface): def __init__( self,",
"response. 'CB-VERSION': '2019-08-25', }) full_url = self.base_uri + request_url try:",
"endpoints elif 'accounts' in method_str: permission = 'wallet:accounts:read' else: raise",
"set ZERO fee and ignore log.error( f'In a coinbase withdrawal",
"ids and for each one query buys/sells # Looking at",
") log.error(msg) return None, msg returned_balances: Dict[Asset, Dict[str, Any]] =",
"= self._api_query('accounts') account_ids = self._get_account_ids(account_data) raw_data = [] for account_id",
"the fee' f'is denoted in {raw_fee[\"currency\"]}', ) else: fee =",
"Error was: {str(e)}', ) return None def query_online_deposits_withdrawals( self, start_ts:",
"time=timestamp) return Trade( timestamp=timestamp, location=Location.COINBASE, pair=pair, trade_type=trade_type, amount=amount, rate=rate, fee=fee_amount,",
"= asset_from_coinbase(raw_trade['subtotal']['currency'], time=timestamp) # in coinbase you are buying/selling tx_asset",
"get/give in quotecurrency if you buy/sell 1 unit of base",
"AssetMovement( location=Location.COINBASE, category=movement_category, address=address, transaction_id=transaction_id, timestamp=timestamp, asset=asset, amount=amount, fee_asset=asset, fee=fee,",
"of the exchange # https://developers.coinbase.com/api/v2?python#transaction-resource msg = 'Non \"send\" type",
"\"\"\"Turns a coinbase transaction into a rotkehlchen Trade. https://developers.coinbase.com/api/v2?python#buys If",
"elif 'sells' in method_str: permission = 'wallet:sells:read' elif 'deposits' in",
"= Inquirer().find_usd_price(asset=asset) except RemoteError as e: self.msg_aggregator.add_error( f'Error processing coinbase",
"def first_connection(self) -> None: self.first_connection_made = True def _validate_single_api_key_action( self,",
"str]: try: resp = self._api_query('accounts') except RemoteError as e: msg",
"the API call if trade and trade.timestamp >= start_ts and",
"due to dict entires missing an expected entry \"\"\" if",
"you are buying/selling tx_asset for native_asset pair = TradePair(f'{tx_asset.identifier}_{native_asset.identifier}') amount",
"self, api_key: ApiKey, secret: ApiSecret, database: 'DBHandler', msg_aggregator: MessagesAggregator, ):",
"self.msg_aggregator.add_warning( f'Found coinbase deposit/withdrawal with unsupported asset ' f'{e.asset_name}. Ignoring",
"if that's possible method = f'accounts/{account_ids[0]}/transactions' result, msg = self._validate_single_api_key_action(method)",
") try: json_ret = rlk_jsonloads_dict(response.text) except JSONDecodeError: raise RemoteError(f'Coinbase returned",
"self.msg_aggregator.add_warning( f'Found coinbase deposit/withdrawal with unknown asset ' f'{e.asset_name}. Ignoring"
] |
[
"AttributeDescription()), namedtype.NamedType('derefVal', LDAPDN()), namedtype.OptionalNamedType('attrVals', PartialAttributeList()), ) class DerefResultControlValue(univ.SequenceOf): componentType =",
"v in tv[1]] for tv in deref_vals or [] }",
"= '1.3.6.1.4.1.4203.666.5.16' # Request types #--------------------------------------------------------------------------- # For compatibility with",
"), ) class DerefSpecs(univ.SequenceOf): componentType = DerefSpec() # Response types",
"tv in deref_vals or [] } try: self.derefRes[str(deref_attr)].append((str(deref_val),partial_attrs_dict)) except KeyError:",
") class DerefRes(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType('derefAttr', AttributeDescription()), namedtype.NamedType('derefVal', LDAPDN()),",
"__all__ = [ 'DEREF_CONTROL_OID', 'DereferenceControl', ] import ldap.controls from ldap.controls",
"try: self.derefRes[str(deref_attr)].append((str(deref_val),partial_attrs_dict)) except KeyError: self.derefRes[str(deref_attr)] = [(str(deref_val),partial_attrs_dict)] KNOWN_RESPONSE_CONTROLS[DereferenceControl.controlType] = DereferenceControl",
"LDAPControl,KNOWN_RESPONSE_CONTROLS import pyasn1_modules.rfc2251 from pyasn1.type import namedtype,univ,tag from pyasn1.codec.ber import",
"-*- coding: utf-8 -*- \"\"\" ldap.controls.deref - classes for (see",
"self.derefSpecs.items(): deref_spec = DerefSpec() deref_attributes = AttributeList() for j in",
"from ldap.controls import LDAPControl,KNOWN_RESPONSE_CONTROLS import pyasn1_modules.rfc2251 from pyasn1.type import namedtype,univ,tag",
"componentType = namedtype.NamedTypes( namedtype.NamedType('derefAttr', AttributeDescription()), namedtype.NamedType('derefVal', LDAPDN()), namedtype.OptionalNamedType('attrVals', PartialAttributeList()), )",
"ldap.controls.deref - classes for (see https://tools.ietf.org/html/draft-masarati-ldap-deref) See https://www.python-ldap.org/ for project",
"= AttributeValue() class PartialAttribute(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType('type', AttributeDescription()), namedtype.NamedType('vals',",
"tagSet = univ.Sequence.tagSet.tagImplicitly( tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0) ) class DerefRes(univ.Sequence): componentType = namedtype.NamedTypes(",
"= DerefSpec() deref_attributes = AttributeList() for j in range(len(deref_attribute_names)): deref_attributes.setComponentByPosition(j,deref_attribute_names[j])",
"DerefSpec() deref_attributes = AttributeList() for j in range(len(deref_attribute_names)): deref_attributes.setComponentByPosition(j,deref_attribute_names[j]) deref_spec.setComponentByName('derefAttr',AttributeDescription(deref_attr))",
"pyasn1.type import namedtype,univ,tag from pyasn1.codec.ber import encoder,decoder from pyasn1_modules.rfc2251 import",
"[ 'DEREF_CONTROL_OID', 'DereferenceControl', ] import ldap.controls from ldap.controls import LDAPControl,KNOWN_RESPONSE_CONTROLS",
"https://tools.ietf.org/html/draft-masarati-ldap-deref) See https://www.python-ldap.org/ for project details. \"\"\" __all__ = [",
"utf-8 -*- \"\"\" ldap.controls.deref - classes for (see https://tools.ietf.org/html/draft-masarati-ldap-deref) See",
"for project details. \"\"\" __all__ = [ 'DEREF_CONTROL_OID', 'DereferenceControl', ]",
"= DEREF_CONTROL_OID def __init__(self,criticality=False,derefSpecs=None): LDAPControl.__init__(self,self.controlType,criticality) self.derefSpecs = derefSpecs or {}",
"DerefSpecs() i = 0 for deref_attr,deref_attribute_names in self.derefSpecs.items(): deref_spec =",
"{ str(tv[0]): [str(v) for v in tv[1]] for tv in",
"for (see https://tools.ietf.org/html/draft-masarati-ldap-deref) See https://www.python-ldap.org/ for project details. \"\"\" __all__",
"ldap.controls from ldap.controls import LDAPControl,KNOWN_RESPONSE_CONTROLS import pyasn1_modules.rfc2251 from pyasn1.type import",
"def __init__(self,criticality=False,derefSpecs=None): LDAPControl.__init__(self,self.controlType,criticality) self.derefSpecs = derefSpecs or {} def _derefSpecs(self):",
"{} for deref_res in decodedValue: deref_attr,deref_val,deref_vals = deref_res[0],deref_res[1],deref_res[2] partial_attrs_dict =",
"See https://www.python-ldap.org/ for project details. \"\"\" __all__ = [ 'DEREF_CONTROL_OID',",
"componentType = AttributeValue() class PartialAttribute(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType('type', AttributeDescription()),",
"for deref_res in decodedValue: deref_attr,deref_val,deref_vals = deref_res[0],deref_res[1],deref_res[2] partial_attrs_dict = {",
"(see https://tools.ietf.org/html/draft-masarati-ldap-deref) See https://www.python-ldap.org/ for project details. \"\"\" __all__ =",
"details. \"\"\" __all__ = [ 'DEREF_CONTROL_OID', 'DereferenceControl', ] import ldap.controls",
"AttributeValue() class PartialAttribute(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType('type', AttributeDescription()), namedtype.NamedType('vals', AttributeValues()),",
"'1.3.6.1.4.1.4203.666.5.16' # Request types #--------------------------------------------------------------------------- # For compatibility with ASN.1",
"deref_attributes.setComponentByPosition(j,deref_attribute_names[j]) deref_spec.setComponentByName('derefAttr',AttributeDescription(deref_attr)) deref_spec.setComponentByName('attributes',deref_attributes) deref_specs.setComponentByPosition(i,deref_spec) i += 1 return deref_specs def",
"-*- \"\"\" ldap.controls.deref - classes for (see https://tools.ietf.org/html/draft-masarati-ldap-deref) See https://www.python-ldap.org/",
"= PartialAttribute() tagSet = univ.Sequence.tagSet.tagImplicitly( tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0) ) class DerefRes(univ.Sequence): componentType",
"import LDAPDN,AttributeDescription,AttributeDescriptionList,AttributeValue DEREF_CONTROL_OID = '1.3.6.1.4.1.4203.666.5.16' # Request types #--------------------------------------------------------------------------- #",
"= { str(tv[0]): [str(v) for v in tv[1]] for tv",
"class DereferenceControl(LDAPControl): controlType = DEREF_CONTROL_OID def __init__(self,criticality=False,derefSpecs=None): LDAPControl.__init__(self,self.controlType,criticality) self.derefSpecs =",
"PartialAttribute() tagSet = univ.Sequence.tagSet.tagImplicitly( tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0) ) class DerefRes(univ.Sequence): componentType =",
"DerefSpec(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType( 'derefAttr', AttributeDescription() ), namedtype.NamedType( 'attributes',",
"controlType = DEREF_CONTROL_OID def __init__(self,criticality=False,derefSpecs=None): LDAPControl.__init__(self,self.controlType,criticality) self.derefSpecs = derefSpecs or",
"class AttributeValues(univ.SetOf): componentType = AttributeValue() class PartialAttribute(univ.Sequence): componentType = namedtype.NamedTypes(",
") class PartialAttributeList(univ.SequenceOf): componentType = PartialAttribute() tagSet = univ.Sequence.tagSet.tagImplicitly( tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0)",
"\"\"\" __all__ = [ 'DEREF_CONTROL_OID', 'DereferenceControl', ] import ldap.controls from",
"declaration in I-D AttributeList = AttributeDescriptionList class DerefSpec(univ.Sequence): componentType =",
"in I-D AttributeList = AttributeDescriptionList class DerefSpec(univ.Sequence): componentType = namedtype.NamedTypes(",
"AttributeDescriptionList class DerefSpec(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType( 'derefAttr', AttributeDescription() ),",
"for j in range(len(deref_attribute_names)): deref_attributes.setComponentByPosition(j,deref_attribute_names[j]) deref_spec.setComponentByName('derefAttr',AttributeDescription(deref_attr)) deref_spec.setComponentByName('attributes',deref_attributes) deref_specs.setComponentByPosition(i,deref_spec) i +=",
"class PartialAttribute(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType('type', AttributeDescription()), namedtype.NamedType('vals', AttributeValues()), )",
"i += 1 return deref_specs def encodeControlValue(self): return encoder.encode(self._derefSpecs()) def",
"from pyasn1_modules.rfc2251 import LDAPDN,AttributeDescription,AttributeDescriptionList,AttributeValue DEREF_CONTROL_OID = '1.3.6.1.4.1.4203.666.5.16' # Request types",
"<gh_stars>1-10 # -*- coding: utf-8 -*- \"\"\" ldap.controls.deref - classes",
"'DEREF_CONTROL_OID', 'DereferenceControl', ] import ldap.controls from ldap.controls import LDAPControl,KNOWN_RESPONSE_CONTROLS import",
"for tv in deref_vals or [] } try: self.derefRes[str(deref_attr)].append((str(deref_val),partial_attrs_dict)) except",
"AttributeValues(univ.SetOf): componentType = AttributeValue() class PartialAttribute(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType('type',",
"DereferenceControl(LDAPControl): controlType = DEREF_CONTROL_OID def __init__(self,criticality=False,derefSpecs=None): LDAPControl.__init__(self,self.controlType,criticality) self.derefSpecs = derefSpecs",
"# Request types #--------------------------------------------------------------------------- # For compatibility with ASN.1 declaration",
"DEREF_CONTROL_OID def __init__(self,criticality=False,derefSpecs=None): LDAPControl.__init__(self,self.controlType,criticality) self.derefSpecs = derefSpecs or {} def",
"range(len(deref_attribute_names)): deref_attributes.setComponentByPosition(j,deref_attribute_names[j]) deref_spec.setComponentByName('derefAttr',AttributeDescription(deref_attr)) deref_spec.setComponentByName('attributes',deref_attributes) deref_specs.setComponentByPosition(i,deref_spec) i += 1 return deref_specs",
"class DerefSpec(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType( 'derefAttr', AttributeDescription() ), namedtype.NamedType(",
"namedtype.OptionalNamedType('attrVals', PartialAttributeList()), ) class DerefResultControlValue(univ.SequenceOf): componentType = DerefRes() class DereferenceControl(LDAPControl):",
"j in range(len(deref_attribute_names)): deref_attributes.setComponentByPosition(j,deref_attribute_names[j]) deref_spec.setComponentByName('derefAttr',AttributeDescription(deref_attr)) deref_spec.setComponentByName('attributes',deref_attributes) deref_specs.setComponentByPosition(i,deref_spec) i += 1",
"compatibility with ASN.1 declaration in I-D AttributeList = AttributeDescriptionList class",
"deref_res in decodedValue: deref_attr,deref_val,deref_vals = deref_res[0],deref_res[1],deref_res[2] partial_attrs_dict = { str(tv[0]):",
"or {} def _derefSpecs(self): deref_specs = DerefSpecs() i = 0",
") class DerefSpecs(univ.SequenceOf): componentType = DerefSpec() # Response types #---------------------------------------------------------------------------",
"deref_vals or [] } try: self.derefRes[str(deref_attr)].append((str(deref_val),partial_attrs_dict)) except KeyError: self.derefRes[str(deref_attr)] =",
"0 for deref_attr,deref_attribute_names in self.derefSpecs.items(): deref_spec = DerefSpec() deref_attributes =",
"#--------------------------------------------------------------------------- class AttributeValues(univ.SetOf): componentType = AttributeValue() class PartialAttribute(univ.Sequence): componentType =",
"LDAPDN,AttributeDescription,AttributeDescriptionList,AttributeValue DEREF_CONTROL_OID = '1.3.6.1.4.1.4203.666.5.16' # Request types #--------------------------------------------------------------------------- # For",
"= namedtype.NamedTypes( namedtype.NamedType( 'derefAttr', AttributeDescription() ), namedtype.NamedType( 'attributes', AttributeList() ),",
"in self.derefSpecs.items(): deref_spec = DerefSpec() deref_attributes = AttributeList() for j",
"deref_res[0],deref_res[1],deref_res[2] partial_attrs_dict = { str(tv[0]): [str(v) for v in tv[1]]",
"[] } try: self.derefRes[str(deref_attr)].append((str(deref_val),partial_attrs_dict)) except KeyError: self.derefRes[str(deref_attr)] = [(str(deref_val),partial_attrs_dict)] KNOWN_RESPONSE_CONTROLS[DereferenceControl.controlType]",
"I-D AttributeList = AttributeDescriptionList class DerefSpec(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType(",
"ldap.controls import LDAPControl,KNOWN_RESPONSE_CONTROLS import pyasn1_modules.rfc2251 from pyasn1.type import namedtype,univ,tag from",
"decodeControlValue(self,encodedControlValue): decodedValue,_ = decoder.decode(encodedControlValue,asn1Spec=DerefResultControlValue()) self.derefRes = {} for deref_res in",
"for deref_attr,deref_attribute_names in self.derefSpecs.items(): deref_spec = DerefSpec() deref_attributes = AttributeList()",
"#--------------------------------------------------------------------------- # For compatibility with ASN.1 declaration in I-D AttributeList",
"deref_spec.setComponentByName('attributes',deref_attributes) deref_specs.setComponentByPosition(i,deref_spec) i += 1 return deref_specs def encodeControlValue(self): return",
"namedtype,univ,tag from pyasn1.codec.ber import encoder,decoder from pyasn1_modules.rfc2251 import LDAPDN,AttributeDescription,AttributeDescriptionList,AttributeValue DEREF_CONTROL_OID",
"self.derefRes = {} for deref_res in decodedValue: deref_attr,deref_val,deref_vals = deref_res[0],deref_res[1],deref_res[2]",
"= 0 for deref_attr,deref_attribute_names in self.derefSpecs.items(): deref_spec = DerefSpec() deref_attributes",
"deref_specs def encodeControlValue(self): return encoder.encode(self._derefSpecs()) def decodeControlValue(self,encodedControlValue): decodedValue,_ = decoder.decode(encodedControlValue,asn1Spec=DerefResultControlValue())",
"types #--------------------------------------------------------------------------- # For compatibility with ASN.1 declaration in I-D",
"[str(v) for v in tv[1]] for tv in deref_vals or",
"import pyasn1_modules.rfc2251 from pyasn1.type import namedtype,univ,tag from pyasn1.codec.ber import encoder,decoder",
"LDAPDN()), namedtype.OptionalNamedType('attrVals', PartialAttributeList()), ) class DerefResultControlValue(univ.SequenceOf): componentType = DerefRes() class",
"def encodeControlValue(self): return encoder.encode(self._derefSpecs()) def decodeControlValue(self,encodedControlValue): decodedValue,_ = decoder.decode(encodedControlValue,asn1Spec=DerefResultControlValue()) self.derefRes",
"'DereferenceControl', ] import ldap.controls from ldap.controls import LDAPControl,KNOWN_RESPONSE_CONTROLS import pyasn1_modules.rfc2251",
"DerefRes(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType('derefAttr', AttributeDescription()), namedtype.NamedType('derefVal', LDAPDN()), namedtype.OptionalNamedType('attrVals', PartialAttributeList()),",
"componentType = DerefRes() class DereferenceControl(LDAPControl): controlType = DEREF_CONTROL_OID def __init__(self,criticality=False,derefSpecs=None):",
"project details. \"\"\" __all__ = [ 'DEREF_CONTROL_OID', 'DereferenceControl', ] import",
"tv[1]] for tv in deref_vals or [] } try: self.derefRes[str(deref_attr)].append((str(deref_val),partial_attrs_dict))",
"componentType = namedtype.NamedTypes( namedtype.NamedType( 'derefAttr', AttributeDescription() ), namedtype.NamedType( 'attributes', AttributeList()",
"classes for (see https://tools.ietf.org/html/draft-masarati-ldap-deref) See https://www.python-ldap.org/ for project details. \"\"\"",
"def _derefSpecs(self): deref_specs = DerefSpecs() i = 0 for deref_attr,deref_attribute_names",
"Response types #--------------------------------------------------------------------------- class AttributeValues(univ.SetOf): componentType = AttributeValue() class PartialAttribute(univ.Sequence):",
"pyasn1_modules.rfc2251 from pyasn1.type import namedtype,univ,tag from pyasn1.codec.ber import encoder,decoder from",
"_derefSpecs(self): deref_specs = DerefSpecs() i = 0 for deref_attr,deref_attribute_names in",
"deref_spec.setComponentByName('derefAttr',AttributeDescription(deref_attr)) deref_spec.setComponentByName('attributes',deref_attributes) deref_specs.setComponentByPosition(i,deref_spec) i += 1 return deref_specs def encodeControlValue(self):",
"), namedtype.NamedType( 'attributes', AttributeList() ), ) class DerefSpecs(univ.SequenceOf): componentType =",
"univ.Sequence.tagSet.tagImplicitly( tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0) ) class DerefRes(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType('derefAttr', AttributeDescription()),",
"decodedValue: deref_attr,deref_val,deref_vals = deref_res[0],deref_res[1],deref_res[2] partial_attrs_dict = { str(tv[0]): [str(v) for",
"= deref_res[0],deref_res[1],deref_res[2] partial_attrs_dict = { str(tv[0]): [str(v) for v in",
"for v in tv[1]] for tv in deref_vals or []",
"namedtype.NamedType( 'derefAttr', AttributeDescription() ), namedtype.NamedType( 'attributes', AttributeList() ), ) class",
"namedtype.NamedType('derefVal', LDAPDN()), namedtype.OptionalNamedType('attrVals', PartialAttributeList()), ) class DerefResultControlValue(univ.SequenceOf): componentType = DerefRes()",
"] import ldap.controls from ldap.controls import LDAPControl,KNOWN_RESPONSE_CONTROLS import pyasn1_modules.rfc2251 from",
"i = 0 for deref_attr,deref_attribute_names in self.derefSpecs.items(): deref_spec = DerefSpec()",
"import namedtype,univ,tag from pyasn1.codec.ber import encoder,decoder from pyasn1_modules.rfc2251 import LDAPDN,AttributeDescription,AttributeDescriptionList,AttributeValue",
"deref_spec = DerefSpec() deref_attributes = AttributeList() for j in range(len(deref_attribute_names)):",
"PartialAttributeList()), ) class DerefResultControlValue(univ.SequenceOf): componentType = DerefRes() class DereferenceControl(LDAPControl): controlType",
"pyasn1.codec.ber import encoder,decoder from pyasn1_modules.rfc2251 import LDAPDN,AttributeDescription,AttributeDescriptionList,AttributeValue DEREF_CONTROL_OID = '1.3.6.1.4.1.4203.666.5.16'",
"str(tv[0]): [str(v) for v in tv[1]] for tv in deref_vals",
"= namedtype.NamedTypes( namedtype.NamedType('type', AttributeDescription()), namedtype.NamedType('vals', AttributeValues()), ) class PartialAttributeList(univ.SequenceOf): componentType",
"with ASN.1 declaration in I-D AttributeList = AttributeDescriptionList class DerefSpec(univ.Sequence):",
"return deref_specs def encodeControlValue(self): return encoder.encode(self._derefSpecs()) def decodeControlValue(self,encodedControlValue): decodedValue,_ =",
"in decodedValue: deref_attr,deref_val,deref_vals = deref_res[0],deref_res[1],deref_res[2] partial_attrs_dict = { str(tv[0]): [str(v)",
"DerefResultControlValue(univ.SequenceOf): componentType = DerefRes() class DereferenceControl(LDAPControl): controlType = DEREF_CONTROL_OID def",
"tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0) ) class DerefRes(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType('derefAttr', AttributeDescription()), namedtype.NamedType('derefVal',",
"namedtype.NamedTypes( namedtype.NamedType('derefAttr', AttributeDescription()), namedtype.NamedType('derefVal', LDAPDN()), namedtype.OptionalNamedType('attrVals', PartialAttributeList()), ) class DerefResultControlValue(univ.SequenceOf):",
"return encoder.encode(self._derefSpecs()) def decodeControlValue(self,encodedControlValue): decodedValue,_ = decoder.decode(encodedControlValue,asn1Spec=DerefResultControlValue()) self.derefRes = {}",
"self.derefSpecs = derefSpecs or {} def _derefSpecs(self): deref_specs = DerefSpecs()",
"componentType = namedtype.NamedTypes( namedtype.NamedType('type', AttributeDescription()), namedtype.NamedType('vals', AttributeValues()), ) class PartialAttributeList(univ.SequenceOf):",
"namedtype.NamedTypes( namedtype.NamedType( 'derefAttr', AttributeDescription() ), namedtype.NamedType( 'attributes', AttributeList() ), )",
"deref_attributes = AttributeList() for j in range(len(deref_attribute_names)): deref_attributes.setComponentByPosition(j,deref_attribute_names[j]) deref_spec.setComponentByName('derefAttr',AttributeDescription(deref_attr)) deref_spec.setComponentByName('attributes',deref_attributes)",
"- classes for (see https://tools.ietf.org/html/draft-masarati-ldap-deref) See https://www.python-ldap.org/ for project details.",
"AttributeDescription()), namedtype.NamedType('vals', AttributeValues()), ) class PartialAttributeList(univ.SequenceOf): componentType = PartialAttribute() tagSet",
"componentType = PartialAttribute() tagSet = univ.Sequence.tagSet.tagImplicitly( tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0) ) class DerefRes(univ.Sequence):",
"PartialAttributeList(univ.SequenceOf): componentType = PartialAttribute() tagSet = univ.Sequence.tagSet.tagImplicitly( tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0) ) class",
"namedtype.NamedType( 'attributes', AttributeList() ), ) class DerefSpecs(univ.SequenceOf): componentType = DerefSpec()",
"__init__(self,criticality=False,derefSpecs=None): LDAPControl.__init__(self,self.controlType,criticality) self.derefSpecs = derefSpecs or {} def _derefSpecs(self): deref_specs",
"in tv[1]] for tv in deref_vals or [] } try:",
"AttributeValues()), ) class PartialAttributeList(univ.SequenceOf): componentType = PartialAttribute() tagSet = univ.Sequence.tagSet.tagImplicitly(",
"from pyasn1.codec.ber import encoder,decoder from pyasn1_modules.rfc2251 import LDAPDN,AttributeDescription,AttributeDescriptionList,AttributeValue DEREF_CONTROL_OID =",
"= DerefSpec() # Response types #--------------------------------------------------------------------------- class AttributeValues(univ.SetOf): componentType =",
"decodedValue,_ = decoder.decode(encodedControlValue,asn1Spec=DerefResultControlValue()) self.derefRes = {} for deref_res in decodedValue:",
"AttributeList = AttributeDescriptionList class DerefSpec(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType( 'derefAttr',",
"'derefAttr', AttributeDescription() ), namedtype.NamedType( 'attributes', AttributeList() ), ) class DerefSpecs(univ.SequenceOf):",
"deref_specs.setComponentByPosition(i,deref_spec) i += 1 return deref_specs def encodeControlValue(self): return encoder.encode(self._derefSpecs())",
"+= 1 return deref_specs def encodeControlValue(self): return encoder.encode(self._derefSpecs()) def decodeControlValue(self,encodedControlValue):",
"= decoder.decode(encodedControlValue,asn1Spec=DerefResultControlValue()) self.derefRes = {} for deref_res in decodedValue: deref_attr,deref_val,deref_vals",
"encodeControlValue(self): return encoder.encode(self._derefSpecs()) def decodeControlValue(self,encodedControlValue): decodedValue,_ = decoder.decode(encodedControlValue,asn1Spec=DerefResultControlValue()) self.derefRes =",
"class PartialAttributeList(univ.SequenceOf): componentType = PartialAttribute() tagSet = univ.Sequence.tagSet.tagImplicitly( tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0) )",
"DerefSpecs(univ.SequenceOf): componentType = DerefSpec() # Response types #--------------------------------------------------------------------------- class AttributeValues(univ.SetOf):",
"PartialAttribute(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType('type', AttributeDescription()), namedtype.NamedType('vals', AttributeValues()), ) class",
"ASN.1 declaration in I-D AttributeList = AttributeDescriptionList class DerefSpec(univ.Sequence): componentType",
"namedtype.NamedType('derefAttr', AttributeDescription()), namedtype.NamedType('derefVal', LDAPDN()), namedtype.OptionalNamedType('attrVals', PartialAttributeList()), ) class DerefResultControlValue(univ.SequenceOf): componentType",
"deref_specs = DerefSpecs() i = 0 for deref_attr,deref_attribute_names in self.derefSpecs.items():",
"= {} for deref_res in decodedValue: deref_attr,deref_val,deref_vals = deref_res[0],deref_res[1],deref_res[2] partial_attrs_dict",
"1 return deref_specs def encodeControlValue(self): return encoder.encode(self._derefSpecs()) def decodeControlValue(self,encodedControlValue): decodedValue,_",
"pyasn1_modules.rfc2251 import LDAPDN,AttributeDescription,AttributeDescriptionList,AttributeValue DEREF_CONTROL_OID = '1.3.6.1.4.1.4203.666.5.16' # Request types #---------------------------------------------------------------------------",
"= DerefRes() class DereferenceControl(LDAPControl): controlType = DEREF_CONTROL_OID def __init__(self,criticality=False,derefSpecs=None): LDAPControl.__init__(self,self.controlType,criticality)",
"AttributeDescription() ), namedtype.NamedType( 'attributes', AttributeList() ), ) class DerefSpecs(univ.SequenceOf): componentType",
"For compatibility with ASN.1 declaration in I-D AttributeList = AttributeDescriptionList",
"derefSpecs or {} def _derefSpecs(self): deref_specs = DerefSpecs() i =",
"componentType = DerefSpec() # Response types #--------------------------------------------------------------------------- class AttributeValues(univ.SetOf): componentType",
"https://www.python-ldap.org/ for project details. \"\"\" __all__ = [ 'DEREF_CONTROL_OID', 'DereferenceControl',",
"= AttributeList() for j in range(len(deref_attribute_names)): deref_attributes.setComponentByPosition(j,deref_attribute_names[j]) deref_spec.setComponentByName('derefAttr',AttributeDescription(deref_attr)) deref_spec.setComponentByName('attributes',deref_attributes) deref_specs.setComponentByPosition(i,deref_spec)",
"from pyasn1.type import namedtype,univ,tag from pyasn1.codec.ber import encoder,decoder from pyasn1_modules.rfc2251",
"def decodeControlValue(self,encodedControlValue): decodedValue,_ = decoder.decode(encodedControlValue,asn1Spec=DerefResultControlValue()) self.derefRes = {} for deref_res",
"# Response types #--------------------------------------------------------------------------- class AttributeValues(univ.SetOf): componentType = AttributeValue() class",
"encoder,decoder from pyasn1_modules.rfc2251 import LDAPDN,AttributeDescription,AttributeDescriptionList,AttributeValue DEREF_CONTROL_OID = '1.3.6.1.4.1.4203.666.5.16' # Request",
"= namedtype.NamedTypes( namedtype.NamedType('derefAttr', AttributeDescription()), namedtype.NamedType('derefVal', LDAPDN()), namedtype.OptionalNamedType('attrVals', PartialAttributeList()), ) class",
"Request types #--------------------------------------------------------------------------- # For compatibility with ASN.1 declaration in",
"{} def _derefSpecs(self): deref_specs = DerefSpecs() i = 0 for",
"= [ 'DEREF_CONTROL_OID', 'DereferenceControl', ] import ldap.controls from ldap.controls import",
"namedtype.NamedTypes( namedtype.NamedType('type', AttributeDescription()), namedtype.NamedType('vals', AttributeValues()), ) class PartialAttributeList(univ.SequenceOf): componentType =",
"DerefSpec() # Response types #--------------------------------------------------------------------------- class AttributeValues(univ.SetOf): componentType = AttributeValue()",
"namedtype.NamedType('type', AttributeDescription()), namedtype.NamedType('vals', AttributeValues()), ) class PartialAttributeList(univ.SequenceOf): componentType = PartialAttribute()",
"= derefSpecs or {} def _derefSpecs(self): deref_specs = DerefSpecs() i",
"import encoder,decoder from pyasn1_modules.rfc2251 import LDAPDN,AttributeDescription,AttributeDescriptionList,AttributeValue DEREF_CONTROL_OID = '1.3.6.1.4.1.4203.666.5.16' #",
"# For compatibility with ASN.1 declaration in I-D AttributeList =",
"LDAPControl.__init__(self,self.controlType,criticality) self.derefSpecs = derefSpecs or {} def _derefSpecs(self): deref_specs =",
"deref_attr,deref_val,deref_vals = deref_res[0],deref_res[1],deref_res[2] partial_attrs_dict = { str(tv[0]): [str(v) for v",
"= DerefSpecs() i = 0 for deref_attr,deref_attribute_names in self.derefSpecs.items(): deref_spec",
"class DerefRes(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType('derefAttr', AttributeDescription()), namedtype.NamedType('derefVal', LDAPDN()), namedtype.OptionalNamedType('attrVals',",
"= univ.Sequence.tagSet.tagImplicitly( tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,0) ) class DerefRes(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType('derefAttr',",
"in range(len(deref_attribute_names)): deref_attributes.setComponentByPosition(j,deref_attribute_names[j]) deref_spec.setComponentByName('derefAttr',AttributeDescription(deref_attr)) deref_spec.setComponentByName('attributes',deref_attributes) deref_specs.setComponentByPosition(i,deref_spec) i += 1 return",
"AttributeList() ), ) class DerefSpecs(univ.SequenceOf): componentType = DerefSpec() # Response",
"or [] } try: self.derefRes[str(deref_attr)].append((str(deref_val),partial_attrs_dict)) except KeyError: self.derefRes[str(deref_attr)] = [(str(deref_val),partial_attrs_dict)]",
"DEREF_CONTROL_OID = '1.3.6.1.4.1.4203.666.5.16' # Request types #--------------------------------------------------------------------------- # For compatibility",
"AttributeList() for j in range(len(deref_attribute_names)): deref_attributes.setComponentByPosition(j,deref_attribute_names[j]) deref_spec.setComponentByName('derefAttr',AttributeDescription(deref_attr)) deref_spec.setComponentByName('attributes',deref_attributes) deref_specs.setComponentByPosition(i,deref_spec) i",
"import LDAPControl,KNOWN_RESPONSE_CONTROLS import pyasn1_modules.rfc2251 from pyasn1.type import namedtype,univ,tag from pyasn1.codec.ber",
"decoder.decode(encodedControlValue,asn1Spec=DerefResultControlValue()) self.derefRes = {} for deref_res in decodedValue: deref_attr,deref_val,deref_vals =",
"= AttributeDescriptionList class DerefSpec(univ.Sequence): componentType = namedtype.NamedTypes( namedtype.NamedType( 'derefAttr', AttributeDescription()",
"class DerefSpecs(univ.SequenceOf): componentType = DerefSpec() # Response types #--------------------------------------------------------------------------- class",
"namedtype.NamedType('vals', AttributeValues()), ) class PartialAttributeList(univ.SequenceOf): componentType = PartialAttribute() tagSet =",
"partial_attrs_dict = { str(tv[0]): [str(v) for v in tv[1]] for",
"deref_attr,deref_attribute_names in self.derefSpecs.items(): deref_spec = DerefSpec() deref_attributes = AttributeList() for",
"coding: utf-8 -*- \"\"\" ldap.controls.deref - classes for (see https://tools.ietf.org/html/draft-masarati-ldap-deref)",
"in deref_vals or [] } try: self.derefRes[str(deref_attr)].append((str(deref_val),partial_attrs_dict)) except KeyError: self.derefRes[str(deref_attr)]",
"class DerefResultControlValue(univ.SequenceOf): componentType = DerefRes() class DereferenceControl(LDAPControl): controlType = DEREF_CONTROL_OID",
"\"\"\" ldap.controls.deref - classes for (see https://tools.ietf.org/html/draft-masarati-ldap-deref) See https://www.python-ldap.org/ for",
"import ldap.controls from ldap.controls import LDAPControl,KNOWN_RESPONSE_CONTROLS import pyasn1_modules.rfc2251 from pyasn1.type",
"encoder.encode(self._derefSpecs()) def decodeControlValue(self,encodedControlValue): decodedValue,_ = decoder.decode(encodedControlValue,asn1Spec=DerefResultControlValue()) self.derefRes = {} for",
"# -*- coding: utf-8 -*- \"\"\" ldap.controls.deref - classes for",
"DerefRes() class DereferenceControl(LDAPControl): controlType = DEREF_CONTROL_OID def __init__(self,criticality=False,derefSpecs=None): LDAPControl.__init__(self,self.controlType,criticality) self.derefSpecs",
"} try: self.derefRes[str(deref_attr)].append((str(deref_val),partial_attrs_dict)) except KeyError: self.derefRes[str(deref_attr)] = [(str(deref_val),partial_attrs_dict)] KNOWN_RESPONSE_CONTROLS[DereferenceControl.controlType] =",
") class DerefResultControlValue(univ.SequenceOf): componentType = DerefRes() class DereferenceControl(LDAPControl): controlType =",
"types #--------------------------------------------------------------------------- class AttributeValues(univ.SetOf): componentType = AttributeValue() class PartialAttribute(univ.Sequence): componentType",
"'attributes', AttributeList() ), ) class DerefSpecs(univ.SequenceOf): componentType = DerefSpec() #"
] |
[
"(Or at least most of them). #Mutant standard is huge",
"instances] #Strip any weird issues from writing instances.sort() #Sort them",
"see if it exists. pass else: if \"ms_\" not in",
"them into one big folder for ease of access def",
"out Mutant Standard Emojis (Or at least most of them).",
"error then keep going. print(e) pass except Exception as e:",
"in r.json(): #Emoji = the json code from the request",
"in instances] #Strip any weird issues from writing instances.sort() #Sort",
"else: os.mkdir(\"emoji/\") #make it if it doesnt for name in",
"% name files = os.listdir(path) for name in files: #This",
"pass clone(instances) #Clone all of them into one big folder",
"r = requests.get('https://%s/api/v1/custom_emojis'% name, allow_redirects=True) #Throw the instance name into",
"Exception as e: print(\"Instance Error\") print(e) pass clone(instances) #Clone all",
"#If somethings fucky throw a nice error then keep going.",
"%s\"% name) path = \"emoji/%s/\" % name files = os.listdir(path)",
"% name #Because of the clone function we know all",
"json code from the request try: if os.path.isfile(path+emoji['shortcode']+\".png\"): #Check to",
"if it exists. pass else: if \"ms_\" not in emoji['shortcode']:",
"try: if os.path.isfile(path+emoji['shortcode']+\".png\"): #Check to see if it exists. pass",
"image from the json open(path + emoji['shortcode']+\".png\",'wb').write(emojiimage.content) #Now save it",
"name in files: #This gets alll files try: shutil.copyfile(path+name,\"emoji/all/\"+name) #Then",
"1 print(\"-----!\"+name+\"!-----\") print(str(i) +\" of \" + str(count) + \"",
"all folder except Exception as e: print(e) pass if __name__",
"def main(): with open(\"data.csv\") as i: #Open the data.csv file",
"Exception as e: print(e) pass if __name__ == '__main__': main()",
"into memory instances = [x.strip() for x in instances] #Strip",
"fucky throw a nice error then keep going. print(e) pass",
"instances: if (os.path.isdir(\"emoji/%s/\"%name)): pass else: os.mkdir(\"emoji/%s/\"%name) if (os.path.isdir(\"emoji/all\")): pass else:",
"most of them). #Mutant standard is huge and common #print(emoji['shortcode']",
"#Then copies them into the all folder except Exception as",
"try: for emoji in r.json(): #Emoji = the json code",
"Error\") print(e) pass clone(instances) #Clone all of them into one",
"the standard url for fetching data path = \"emoji/%s/\" %",
"json open(path + emoji['shortcode']+\".png\",'wb').write(emojiimage.content) #Now save it as an image",
"somethings fucky throw a nice error then keep going. print(e)",
"urls, etc pass #Don't stop the beat except Exception as",
"print(e) pass except Exception as e: print(e) def setup(instances): if",
"print(e) #Print the error. We catch errors here for pleroma",
"into the standard url for fetching data path = \"emoji/%s/\"",
"path = \"emoji/%s/\" % name #Because of the clone function",
"then keep going. print(e) pass except Exception as e: print(e)",
"for: %s\"% name) path = \"emoji/%s/\" % name files =",
"for ease of access def fetch(name): r = requests.get('https://%s/api/v1/custom_emojis'% name,",
"from writing instances.sort() #Sort them alphabetically setup(instances) #Run setup to",
"pleroma instances, weirdly encoded urls, etc pass #Don't stop the",
"from the json open(path + emoji['shortcode']+\".png\",'wb').write(emojiimage.content) #Now save it as",
"the fetching code except Exception as e: print(e) #Print the",
"name files = os.listdir(path) for name in files: #This gets",
"= the json code from the request try: if os.path.isfile(path+emoji['shortcode']+\".png\"):",
"instances, weirdly encoded urls, etc pass #Don't stop the beat",
"of these folders will exist try: for emoji in r.json():",
"Emojis (Or at least most of them). #Mutant standard is",
"#This gets alll files try: shutil.copyfile(path+name,\"emoji/all/\"+name) #Then copies them into",
"print(e) pass clone(instances) #Clone all of them into one big",
"= \"emoji/%s/\" % name #Because of the clone function we",
"= requests.get(emoji['static_url'],allow_redirects=True) #Get the image from the json open(path +",
"standard is huge and common #print(emoji['shortcode'] + \" found!\") emojiimage",
"print(e) def setup(instances): if (os.path.isdir(\"emoji/\")): #Check to see if emoji/",
"files try: shutil.copyfile(path+name,\"emoji/all/\"+name) #Then copies them into the all folder",
"= [x.strip() for x in instances] #Strip any weird issues",
"for emoji in r.json(): #Emoji = the json code from",
"print(\"-----!\"+name+\"!-----\") print(str(i) +\" of \" + str(count) + \" remaining!\")",
"#Check to see if emoji/ exists pass else: os.mkdir(\"emoji/\") #make",
"\" remaining!\") fetch(name) #Run the fetching code except Exception as",
"instances.sort() #Sort them alphabetically setup(instances) #Run setup to create all",
"requests.get('https://%s/api/v1/custom_emojis'% name, allow_redirects=True) #Throw the instance name into the standard",
"Exception as e: print(e) #Print the error. We catch errors",
"weird issues from writing instances.sort() #Sort them alphabetically setup(instances) #Run",
"except Exception as e: print(e) def setup(instances): if (os.path.isdir(\"emoji/\")): #Check",
"Mutant Standard Emojis (Or at least most of them). #Mutant",
"requests import urllib.request import os.path import shutil import csv def",
"fetch(name): r = requests.get('https://%s/api/v1/custom_emojis'% name, allow_redirects=True) #Throw the instance name",
"try: shutil.copyfile(path+name,\"emoji/all/\"+name) #Then copies them into the all folder except",
"if (os.path.isdir(\"emoji/\")): #Check to see if emoji/ exists pass else:",
"instance name into the standard url for fetching data path",
"0 try: for name in instances: try: i += 1",
"the json code from the request try: if os.path.isfile(path+emoji['shortcode']+\".png\"): #Check",
"\" + emoji['url']) #If somethings fucky throw a nice error",
"instances: print(\"Copying emoji for: %s\"% name) path = \"emoji/%s/\" %",
"emoji['shortcode']+\".png\",'wb').write(emojiimage.content) #Now save it as an image in the filesystem",
"(os.path.isdir(\"emoji/all\")): pass else: os.mkdir(\"emoji/all\") def clone(instances): for name in instances:",
"#Sort them alphabetically setup(instances) #Run setup to create all the",
"allow_redirects=True) #Throw the instance name into the standard url for",
"in instances: print(\"Copying emoji for: %s\"% name) path = \"emoji/%s/\"",
"to create all the necessary files and subfolders count =",
"import os.path import shutil import csv def main(): with open(\"data.csv\")",
"nice error then keep going. print(e) pass except Exception as",
"clone(instances): for name in instances: print(\"Copying emoji for: %s\"% name)",
"image in the filesystem except Exception as e: print(\"Did not",
"str(count) + \" remaining!\") fetch(name) #Run the fetching code except",
"Exception as e: print(\"Did not get: \" + emoji['url']) #If",
"+ emoji['url']) #If somethings fucky throw a nice error then",
"#Now save it as an image in the filesystem except",
"requests.get(emoji['static_url'],allow_redirects=True) #Get the image from the json open(path + emoji['shortcode']+\".png\",'wb').write(emojiimage.content)",
"all of them into one big folder for ease of",
"#Throw the instance name into the standard url for fetching",
"access def fetch(name): r = requests.get('https://%s/api/v1/custom_emojis'% name, allow_redirects=True) #Throw the",
"urllib.request import os.path import shutil import csv def main(): with",
"import csv def main(): with open(\"data.csv\") as i: #Open the",
"errors here for pleroma instances, weirdly encoded urls, etc pass",
"(os.path.isdir(\"emoji/\")): #Check to see if emoji/ exists pass else: os.mkdir(\"emoji/\")",
"the request try: if os.path.isfile(path+emoji['shortcode']+\".png\"): #Check to see if it",
"count just for fun i = 0 try: for name",
"subfolders count = len(instances) #Get the count just for fun",
"print(str(i) +\" of \" + str(count) + \" remaining!\") fetch(name)",
"into one big folder for ease of access def fetch(name):",
"just for fun i = 0 try: for name in",
"the json open(path + emoji['shortcode']+\".png\",'wb').write(emojiimage.content) #Now save it as an",
"i += 1 print(\"-----!\"+name+\"!-----\") print(str(i) +\" of \" + str(count)",
"+= 1 print(\"-----!\"+name+\"!-----\") print(str(i) +\" of \" + str(count) +",
"clone(instances) #Clone all of them into one big folder for",
"alphabetically setup(instances) #Run setup to create all the necessary files",
"\" found!\") emojiimage = requests.get(emoji['static_url'],allow_redirects=True) #Get the image from the",
"for name in instances: print(\"Copying emoji for: %s\"% name) path",
"files: #This gets alll files try: shutil.copyfile(path+name,\"emoji/all/\"+name) #Then copies them",
"except Exception as e: print(\"Instance Error\") print(e) pass clone(instances) #Clone",
"in instances: try: i += 1 print(\"-----!\"+name+\"!-----\") print(str(i) +\" of",
"copies them into the all folder except Exception as e:",
"for pleroma instances, weirdly encoded urls, etc pass #Don't stop",
"and subfolders count = len(instances) #Get the count just for",
"alll files try: shutil.copyfile(path+name,\"emoji/all/\"+name) #Then copies them into the all",
"+ \" remaining!\") fetch(name) #Run the fetching code except Exception",
"= os.listdir(path) for name in files: #This gets alll files",
"len(instances) #Get the count just for fun i = 0",
"#Open the data.csv file instances = i.readlines() #Write them into",
"e: print(e) def setup(instances): if (os.path.isdir(\"emoji/\")): #Check to see if",
"one big folder for ease of access def fetch(name): r",
"= requests.get('https://%s/api/v1/custom_emojis'% name, allow_redirects=True) #Throw the instance name into the",
"the clone function we know all of these folders will",
"data.csv file instances = i.readlines() #Write them into memory instances",
"as e: print(e) def setup(instances): if (os.path.isdir(\"emoji/\")): #Check to see",
"see if emoji/ exists pass else: os.mkdir(\"emoji/\") #make it if",
"count = len(instances) #Get the count just for fun i",
"shutil import csv def main(): with open(\"data.csv\") as i: #Open",
"import shutil import csv def main(): with open(\"data.csv\") as i:",
"keep going. print(e) pass except Exception as e: print(e) def",
"os.mkdir(\"emoji/all\") def clone(instances): for name in instances: print(\"Copying emoji for:",
"#make it if it doesnt for name in instances: if",
"os.listdir(path) for name in files: #This gets alll files try:",
"the error. We catch errors here for pleroma instances, weirdly",
"e: print(\"Instance Error\") print(e) pass clone(instances) #Clone all of them",
"x in instances] #Strip any weird issues from writing instances.sort()",
"instances = [x.strip() for x in instances] #Strip any weird",
"(os.path.isdir(\"emoji/%s/\"%name)): pass else: os.mkdir(\"emoji/%s/\"%name) if (os.path.isdir(\"emoji/all\")): pass else: os.mkdir(\"emoji/all\") def",
"for name in instances: if (os.path.isdir(\"emoji/%s/\"%name)): pass else: os.mkdir(\"emoji/%s/\"%name) if",
"the image from the json open(path + emoji['shortcode']+\".png\",'wb').write(emojiimage.content) #Now save",
"as an image in the filesystem except Exception as e:",
"if \"ms_\" not in emoji['shortcode']: #Cut out Mutant Standard Emojis",
"name in instances: try: i += 1 print(\"-----!\"+name+\"!-----\") print(str(i) +\"",
"it if it doesnt for name in instances: if (os.path.isdir(\"emoji/%s/\"%name)):",
"e: print(\"Did not get: \" + emoji['url']) #If somethings fucky",
"#Check to see if it exists. pass else: if \"ms_\"",
"emoji/ exists pass else: os.mkdir(\"emoji/\") #make it if it doesnt",
"from the request try: if os.path.isfile(path+emoji['shortcode']+\".png\"): #Check to see if",
"\" + str(count) + \" remaining!\") fetch(name) #Run the fetching",
"etc pass #Don't stop the beat except Exception as e:",
"#Write them into memory instances = [x.strip() for x in",
"else: if \"ms_\" not in emoji['shortcode']: #Cut out Mutant Standard",
"data path = \"emoji/%s/\" % name #Because of the clone",
"#Don't stop the beat except Exception as e: print(\"Instance Error\")",
"def clone(instances): for name in instances: print(\"Copying emoji for: %s\"%",
"pass #Don't stop the beat except Exception as e: print(\"Instance",
"try: for name in instances: try: i += 1 print(\"-----!\"+name+\"!-----\")",
"big folder for ease of access def fetch(name): r =",
"as i: #Open the data.csv file instances = i.readlines() #Write",
"in instances: if (os.path.isdir(\"emoji/%s/\"%name)): pass else: os.mkdir(\"emoji/%s/\"%name) if (os.path.isdir(\"emoji/all\")): pass",
"as e: print(\"Did not get: \" + emoji['url']) #If somethings",
"to see if emoji/ exists pass else: os.mkdir(\"emoji/\") #make it",
"of them into one big folder for ease of access",
"else: os.mkdir(\"emoji/all\") def clone(instances): for name in instances: print(\"Copying emoji",
"save it as an image in the filesystem except Exception",
"found!\") emojiimage = requests.get(emoji['static_url'],allow_redirects=True) #Get the image from the json",
"if os.path.isfile(path+emoji['shortcode']+\".png\"): #Check to see if it exists. pass else:",
"issues from writing instances.sort() #Sort them alphabetically setup(instances) #Run setup",
"remaining!\") fetch(name) #Run the fetching code except Exception as e:",
"Exception as e: print(e) def setup(instances): if (os.path.isdir(\"emoji/\")): #Check to",
"catch errors here for pleroma instances, weirdly encoded urls, etc",
"in the filesystem except Exception as e: print(\"Did not get:",
"any weird issues from writing instances.sort() #Sort them alphabetically setup(instances)",
"[x.strip() for x in instances] #Strip any weird issues from",
"if emoji/ exists pass else: os.mkdir(\"emoji/\") #make it if it",
"#Strip any weird issues from writing instances.sort() #Sort them alphabetically",
"fetching code except Exception as e: print(e) #Print the error.",
"a nice error then keep going. print(e) pass except Exception",
"for name in instances: try: i += 1 print(\"-----!\"+name+\"!-----\") print(str(i)",
"#Cut out Mutant Standard Emojis (Or at least most of",
"these folders will exist try: for emoji in r.json(): #Emoji",
"writing instances.sort() #Sort them alphabetically setup(instances) #Run setup to create",
"pass else: os.mkdir(\"emoji/all\") def clone(instances): for name in instances: print(\"Copying",
"i.readlines() #Write them into memory instances = [x.strip() for x",
"r.json(): #Emoji = the json code from the request try:",
"the data.csv file instances = i.readlines() #Write them into memory",
"try: i += 1 print(\"-----!\"+name+\"!-----\") print(str(i) +\" of \" +",
"= len(instances) #Get the count just for fun i =",
"shutil.copyfile(path+name,\"emoji/all/\"+name) #Then copies them into the all folder except Exception",
"#Print the error. We catch errors here for pleroma instances,",
"error. We catch errors here for pleroma instances, weirdly encoded",
"else: os.mkdir(\"emoji/%s/\"%name) if (os.path.isdir(\"emoji/all\")): pass else: os.mkdir(\"emoji/all\") def clone(instances): for",
"least most of them). #Mutant standard is huge and common",
"pass else: os.mkdir(\"emoji/\") #make it if it doesnt for name",
"main(): with open(\"data.csv\") as i: #Open the data.csv file instances",
"for x in instances] #Strip any weird issues from writing",
"fetching data path = \"emoji/%s/\" % name #Because of the",
"it as an image in the filesystem except Exception as",
"the beat except Exception as e: print(\"Instance Error\") print(e) pass",
"them). #Mutant standard is huge and common #print(emoji['shortcode'] + \"",
"fetch(name) #Run the fetching code except Exception as e: print(e)",
"instances: try: i += 1 print(\"-----!\"+name+\"!-----\") print(str(i) +\" of \"",
"ease of access def fetch(name): r = requests.get('https://%s/api/v1/custom_emojis'% name, allow_redirects=True)",
"import urllib.request import os.path import shutil import csv def main():",
"of the clone function we know all of these folders",
"common #print(emoji['shortcode'] + \" found!\") emojiimage = requests.get(emoji['static_url'],allow_redirects=True) #Get the",
"\"emoji/%s/\" % name files = os.listdir(path) for name in files:",
"folder except Exception as e: print(e) pass if __name__ ==",
"name) path = \"emoji/%s/\" % name files = os.listdir(path) for",
"clone function we know all of these folders will exist",
"= i.readlines() #Write them into memory instances = [x.strip() for",
"gets alll files try: shutil.copyfile(path+name,\"emoji/all/\"+name) #Then copies them into the",
"url for fetching data path = \"emoji/%s/\" % name #Because",
"open(path + emoji['shortcode']+\".png\",'wb').write(emojiimage.content) #Now save it as an image in",
"name into the standard url for fetching data path =",
"them into memory instances = [x.strip() for x in instances]",
"for fetching data path = \"emoji/%s/\" % name #Because of",
"know all of these folders will exist try: for emoji",
"the necessary files and subfolders count = len(instances) #Get the",
"path = \"emoji/%s/\" % name files = os.listdir(path) for name",
"if (os.path.isdir(\"emoji/all\")): pass else: os.mkdir(\"emoji/all\") def clone(instances): for name in",
"print(\"Did not get: \" + emoji['url']) #If somethings fucky throw",
"+ str(count) + \" remaining!\") fetch(name) #Run the fetching code",
"exists. pass else: if \"ms_\" not in emoji['shortcode']: #Cut out",
"files and subfolders count = len(instances) #Get the count just",
"will exist try: for emoji in r.json(): #Emoji = the",
"= \"emoji/%s/\" % name files = os.listdir(path) for name in",
"file instances = i.readlines() #Write them into memory instances =",
"for fun i = 0 try: for name in instances:",
"\"ms_\" not in emoji['shortcode']: #Cut out Mutant Standard Emojis (Or",
"emoji['url']) #If somethings fucky throw a nice error then keep",
"it exists. pass else: if \"ms_\" not in emoji['shortcode']: #Cut",
"not in emoji['shortcode']: #Cut out Mutant Standard Emojis (Or at",
"exists pass else: os.mkdir(\"emoji/\") #make it if it doesnt for",
"an image in the filesystem except Exception as e: print(\"Did",
"name #Because of the clone function we know all of",
"+\" of \" + str(count) + \" remaining!\") fetch(name) #Run",
"as e: print(\"Instance Error\") print(e) pass clone(instances) #Clone all of",
"os.path import shutil import csv def main(): with open(\"data.csv\") as",
"+ \" found!\") emojiimage = requests.get(emoji['static_url'],allow_redirects=True) #Get the image from",
"setup(instances) #Run setup to create all the necessary files and",
"the count just for fun i = 0 try: for",
"memory instances = [x.strip() for x in instances] #Strip any",
"#Mutant standard is huge and common #print(emoji['shortcode'] + \" found!\")",
"in emoji['shortcode']: #Cut out Mutant Standard Emojis (Or at least",
"print(\"Copying emoji for: %s\"% name) path = \"emoji/%s/\" % name",
"them into the all folder except Exception as e: print(e)",
"we know all of these folders will exist try: for",
"if (os.path.isdir(\"emoji/%s/\"%name)): pass else: os.mkdir(\"emoji/%s/\"%name) if (os.path.isdir(\"emoji/all\")): pass else: os.mkdir(\"emoji/all\")",
"pass else: os.mkdir(\"emoji/%s/\"%name) if (os.path.isdir(\"emoji/all\")): pass else: os.mkdir(\"emoji/all\") def clone(instances):",
"throw a nice error then keep going. print(e) pass except",
"#print(emoji['shortcode'] + \" found!\") emojiimage = requests.get(emoji['static_url'],allow_redirects=True) #Get the image",
"os.mkdir(\"emoji/%s/\"%name) if (os.path.isdir(\"emoji/all\")): pass else: os.mkdir(\"emoji/all\") def clone(instances): for name",
"standard url for fetching data path = \"emoji/%s/\" % name",
"i: #Open the data.csv file instances = i.readlines() #Write them",
"instances = i.readlines() #Write them into memory instances = [x.strip()",
"all of these folders will exist try: for emoji in",
"emoji in r.json(): #Emoji = the json code from the",
"it doesnt for name in instances: if (os.path.isdir(\"emoji/%s/\"%name)): pass else:",
"beat except Exception as e: print(\"Instance Error\") print(e) pass clone(instances)",
"huge and common #print(emoji['shortcode'] + \" found!\") emojiimage = requests.get(emoji['static_url'],allow_redirects=True)",
"We catch errors here for pleroma instances, weirdly encoded urls,",
"request try: if os.path.isfile(path+emoji['shortcode']+\".png\"): #Check to see if it exists.",
"#Run the fetching code except Exception as e: print(e) #Print",
"as e: print(e) #Print the error. We catch errors here",
"print(\"Instance Error\") print(e) pass clone(instances) #Clone all of them into",
"necessary files and subfolders count = len(instances) #Get the count",
"#Because of the clone function we know all of these",
"open(\"data.csv\") as i: #Open the data.csv file instances = i.readlines()",
"the filesystem except Exception as e: print(\"Did not get: \"",
"Standard Emojis (Or at least most of them). #Mutant standard",
"of them). #Mutant standard is huge and common #print(emoji['shortcode'] +",
"encoded urls, etc pass #Don't stop the beat except Exception",
"except Exception as e: print(e) #Print the error. We catch",
"def setup(instances): if (os.path.isdir(\"emoji/\")): #Check to see if emoji/ exists",
"pass except Exception as e: print(e) def setup(instances): if (os.path.isdir(\"emoji/\")):",
"except Exception as e: print(\"Did not get: \" + emoji['url'])",
"to see if it exists. pass else: if \"ms_\" not",
"#Run setup to create all the necessary files and subfolders",
"the all folder except Exception as e: print(e) pass if",
"except Exception as e: print(e) pass if __name__ == '__main__':",
"name in instances: print(\"Copying emoji for: %s\"% name) path =",
"at least most of them). #Mutant standard is huge and",
"going. print(e) pass except Exception as e: print(e) def setup(instances):",
"#Get the image from the json open(path + emoji['shortcode']+\".png\",'wb').write(emojiimage.content) #Now",
"not get: \" + emoji['url']) #If somethings fucky throw a",
"os.mkdir(\"emoji/\") #make it if it doesnt for name in instances:",
"folder for ease of access def fetch(name): r = requests.get('https://%s/api/v1/custom_emojis'%",
"them alphabetically setup(instances) #Run setup to create all the necessary",
"csv def main(): with open(\"data.csv\") as i: #Open the data.csv",
"is huge and common #print(emoji['shortcode'] + \" found!\") emojiimage =",
"filesystem except Exception as e: print(\"Did not get: \" +",
"fun i = 0 try: for name in instances: try:",
"all the necessary files and subfolders count = len(instances) #Get",
"import requests import urllib.request import os.path import shutil import csv",
"function we know all of these folders will exist try:",
"setup to create all the necessary files and subfolders count",
"for name in files: #This gets alll files try: shutil.copyfile(path+name,\"emoji/all/\"+name)",
"with open(\"data.csv\") as i: #Open the data.csv file instances =",
"emojiimage = requests.get(emoji['static_url'],allow_redirects=True) #Get the image from the json open(path",
"code except Exception as e: print(e) #Print the error. We",
"+ emoji['shortcode']+\".png\",'wb').write(emojiimage.content) #Now save it as an image in the",
"get: \" + emoji['url']) #If somethings fucky throw a nice",
"emoji for: %s\"% name) path = \"emoji/%s/\" % name files",
"of \" + str(count) + \" remaining!\") fetch(name) #Run the",
"stop the beat except Exception as e: print(\"Instance Error\") print(e)",
"pass else: if \"ms_\" not in emoji['shortcode']: #Cut out Mutant",
"weirdly encoded urls, etc pass #Don't stop the beat except",
"i = 0 try: for name in instances: try: i",
"doesnt for name in instances: if (os.path.isdir(\"emoji/%s/\"%name)): pass else: os.mkdir(\"emoji/%s/\"%name)",
"code from the request try: if os.path.isfile(path+emoji['shortcode']+\".png\"): #Check to see",
"folders will exist try: for emoji in r.json(): #Emoji =",
"and common #print(emoji['shortcode'] + \" found!\") emojiimage = requests.get(emoji['static_url'],allow_redirects=True) #Get",
"#Clone all of them into one big folder for ease",
"if it doesnt for name in instances: if (os.path.isdir(\"emoji/%s/\"%name)): pass",
"here for pleroma instances, weirdly encoded urls, etc pass #Don't",
"create all the necessary files and subfolders count = len(instances)",
"into the all folder except Exception as e: print(e) pass",
"name in instances: if (os.path.isdir(\"emoji/%s/\"%name)): pass else: os.mkdir(\"emoji/%s/\"%name) if (os.path.isdir(\"emoji/all\")):",
"= 0 try: for name in instances: try: i +=",
"\"emoji/%s/\" % name #Because of the clone function we know",
"os.path.isfile(path+emoji['shortcode']+\".png\"): #Check to see if it exists. pass else: if",
"in files: #This gets alll files try: shutil.copyfile(path+name,\"emoji/all/\"+name) #Then copies",
"emoji['shortcode']: #Cut out Mutant Standard Emojis (Or at least most",
"exist try: for emoji in r.json(): #Emoji = the json",
"name, allow_redirects=True) #Throw the instance name into the standard url",
"def fetch(name): r = requests.get('https://%s/api/v1/custom_emojis'% name, allow_redirects=True) #Throw the instance",
"#Get the count just for fun i = 0 try:",
"of access def fetch(name): r = requests.get('https://%s/api/v1/custom_emojis'% name, allow_redirects=True) #Throw",
"#Emoji = the json code from the request try: if",
"the instance name into the standard url for fetching data",
"setup(instances): if (os.path.isdir(\"emoji/\")): #Check to see if emoji/ exists pass",
"files = os.listdir(path) for name in files: #This gets alll",
"e: print(e) #Print the error. We catch errors here for"
] |
[
"- 1] + 1) else: f[i % 2][j] = min(f[(i",
"j in range(m + 1): f[0][j] = j for i",
"write your code here if s == t: return False",
"+ 1, f[i % 2][j - 1] + 1) else:",
"m = len(s), len(t) f = [[0] * (m +",
"= [[0] * (m + 1) for _ in range(2)]",
"both one edit distance apart or false \"\"\" def isOneEditDistance(self,",
"1, f[i % 2][j - 1] + 1) return f[n",
"for _ in range(2)] for j in range(m + 1):",
"they are both one edit distance apart or false \"\"\"",
"range(1, m + 1): if s[i - 1] == t[j",
"isOneEditDistance(self, s, t): # write your code here if s",
"distance apart or false \"\"\" def isOneEditDistance(self, s, t): #",
"i in range(1, n + 1): f[i % 2][0] =",
"string @return: true if they are both one edit distance",
"else: f[i % 2][j] = min(f[(i - 1) % 2][j",
"def isOneEditDistance(self, s, t): # write your code here if",
"% 2][j - 1] + 1) else: f[i % 2][j]",
"s == t: return False if abs(len(s) - len(t)) >",
"[[0] * (m + 1) for _ in range(2)] for",
"j for i in range(1, n + 1): f[i %",
"s: a string @param t: a string @return: true if",
"> 1: return False n, m = len(s), len(t) f",
"% 2][j - 1] + 1, f[(i - 1) %",
"% 2][j - 1], f[(i - 1) % 2][j] +",
"in range(m + 1): f[0][j] = j for i in",
"+ 1) for _ in range(2)] for j in range(m",
"abs(len(s) - len(t)) > 1: return False n, m =",
"class Solution: \"\"\" @param s: a string @param t: a",
"= min(f[(i - 1) % 2][j - 1], f[(i -",
"n, m = len(s), len(t) f = [[0] * (m",
"\"\"\" def isOneEditDistance(self, s, t): # write your code here",
"= min(f[(i - 1) % 2][j - 1] + 1,",
"range(2)] for j in range(m + 1): f[0][j] = j",
"1): f[i % 2][0] = i for j in range(1,",
"len(t) f = [[0] * (m + 1) for _",
"+ 1, f[i % 2][j - 1] + 1) return",
"1) for _ in range(2)] for j in range(m +",
"% 2][j] = min(f[(i - 1) % 2][j - 1],",
"m + 1): if s[i - 1] == t[j -",
"or false \"\"\" def isOneEditDistance(self, s, t): # write your",
"+ 1): f[i % 2][0] = i for j in",
"code here if s == t: return False if abs(len(s)",
"- 1] + 1, f[(i - 1) % 2][j] +",
"= i for j in range(1, m + 1): if",
"n + 1): f[i % 2][0] = i for j",
"1, f[i % 2][j - 1] + 1) else: f[i",
"a string @param t: a string @return: true if they",
"% 2][j - 1] + 1) return f[n % 2][m]",
"== t[j - 1]: f[i % 2][j] = min(f[(i -",
"f[(i - 1) % 2][j] + 1, f[i % 2][j",
"t): # write your code here if s == t:",
"+ 1): if s[i - 1] == t[j - 1]:",
"return False n, m = len(s), len(t) f = [[0]",
"- 1) % 2][j] + 1, f[i % 2][j -",
"for i in range(1, n + 1): f[i % 2][0]",
"2][j - 1] + 1, f[(i - 1) % 2][j]",
"2][j] = min(f[(i - 1) % 2][j - 1], f[(i",
"f[0][j] = j for i in range(1, n + 1):",
"range(1, n + 1): f[i % 2][0] = i for",
"i for j in range(1, m + 1): if s[i",
"- 1] == t[j - 1]: f[i % 2][j] =",
"- 1) % 2][j - 1] + 1, f[(i -",
"false \"\"\" def isOneEditDistance(self, s, t): # write your code",
"in range(1, n + 1): f[i % 2][0] = i",
"1) % 2][j] + 1, f[i % 2][j - 1]",
"@return: true if they are both one edit distance apart",
"% 2][0] = i for j in range(1, m +",
"2][j] + 1, f[i % 2][j - 1] + 1)",
"if abs(len(s) - len(t)) > 1: return False n, m",
"f[i % 2][j] = min(f[(i - 1) % 2][j -",
"(m + 1) for _ in range(2)] for j in",
"1] + 1) else: f[i % 2][j] = min(f[(i -",
"+ 1): f[0][j] = j for i in range(1, n",
"Solution: \"\"\" @param s: a string @param t: a string",
"f[i % 2][0] = i for j in range(1, m",
"2][j] = min(f[(i - 1) % 2][j - 1] +",
"are both one edit distance apart or false \"\"\" def",
"s[i - 1] == t[j - 1]: f[i % 2][j]",
"apart or false \"\"\" def isOneEditDistance(self, s, t): # write",
"1] == t[j - 1]: f[i % 2][j] = min(f[(i",
"+ 1) else: f[i % 2][j] = min(f[(i - 1)",
"true if they are both one edit distance apart or",
"j in range(1, m + 1): if s[i - 1]",
"- len(t)) > 1: return False n, m = len(s),",
"2][j - 1], f[(i - 1) % 2][j] + 1,",
"= j for i in range(1, n + 1): f[i",
"1) % 2][j - 1], f[(i - 1) % 2][j]",
"for j in range(m + 1): f[0][j] = j for",
"2][j - 1] + 1) else: f[i % 2][j] =",
"your code here if s == t: return False if",
"t[j - 1]: f[i % 2][j] = min(f[(i - 1)",
"1]: f[i % 2][j] = min(f[(i - 1) % 2][j",
"- 1], f[(i - 1) % 2][j] + 1, f[i",
"% 2][j] + 1, f[i % 2][j - 1] +",
"min(f[(i - 1) % 2][j - 1] + 1, f[(i",
"return False if abs(len(s) - len(t)) > 1: return False",
"@param s: a string @param t: a string @return: true",
"False n, m = len(s), len(t) f = [[0] *",
"1) else: f[i % 2][j] = min(f[(i - 1) %",
"1], f[(i - 1) % 2][j] + 1, f[i %",
"1): if s[i - 1] == t[j - 1]: f[i",
"+ 1, f[(i - 1) % 2][j] + 1, f[i",
"* (m + 1) for _ in range(2)] for j",
"f = [[0] * (m + 1) for _ in",
"string @param t: a string @return: true if they are",
"edit distance apart or false \"\"\" def isOneEditDistance(self, s, t):",
"if s[i - 1] == t[j - 1]: f[i %",
"here if s == t: return False if abs(len(s) -",
"a string @return: true if they are both one edit",
"one edit distance apart or false \"\"\" def isOneEditDistance(self, s,",
"== t: return False if abs(len(s) - len(t)) > 1:",
"= len(s), len(t) f = [[0] * (m + 1)",
"if they are both one edit distance apart or false",
"1) % 2][j - 1] + 1, f[(i - 1)",
"False if abs(len(s) - len(t)) > 1: return False n,",
"2][0] = i for j in range(1, m + 1):",
"t: a string @return: true if they are both one",
"t: return False if abs(len(s) - len(t)) > 1: return",
"f[i % 2][j - 1] + 1) else: f[i %",
"@param t: a string @return: true if they are both",
"_ in range(2)] for j in range(m + 1): f[0][j]",
"1, f[(i - 1) % 2][j] + 1, f[i %",
"% 2][j] = min(f[(i - 1) % 2][j - 1]",
"in range(1, m + 1): if s[i - 1] ==",
"1] + 1, f[(i - 1) % 2][j] + 1,",
"2][j - 1] + 1) return f[n % 2][m] ==",
"- 1) % 2][j - 1], f[(i - 1) %",
"\"\"\" @param s: a string @param t: a string @return:",
"1: return False n, m = len(s), len(t) f =",
"- 1] + 1) return f[n % 2][m] == 1",
"if s == t: return False if abs(len(s) - len(t))",
"len(t)) > 1: return False n, m = len(s), len(t)",
"in range(2)] for j in range(m + 1): f[0][j] =",
"1): f[0][j] = j for i in range(1, n +",
"s, t): # write your code here if s ==",
"# write your code here if s == t: return",
"for j in range(1, m + 1): if s[i -",
"min(f[(i - 1) % 2][j - 1], f[(i - 1)",
"- 1]: f[i % 2][j] = min(f[(i - 1) %",
"f[i % 2][j - 1] + 1) return f[n %",
"len(s), len(t) f = [[0] * (m + 1) for",
"range(m + 1): f[0][j] = j for i in range(1,"
] |
[
".backoff_timer import BackoffTimer from .sync import SyncConn from .async import",
"ready, finish, touch, requeue, nop, pub, mpub, FRAME_TYPE_RESPONSE, FRAME_TYPE_ERROR, FRAME_TYPE_MESSAGE,",
"\"run\", \"BackoffTimer\", \"Message\", \"Error\", \"LegacyReader\", \"SyncConn\", \"AsyncConn\", \"unpack_response\", \"decode_message\", \"identify\",",
"handler called with signal %r', sig_num) tornado.ioloop.IOLoop.instance().stop() def run(): \"\"\"",
"Writer from .version import __version__ # NOQA def _handle_term_signal(sig_num, frame):",
"from .message import Message from .backoff_timer import BackoffTimer from .sync",
"absolute_import import signal import tornado.ioloop import logging from .protocol import",
"signal %r', sig_num) tornado.ioloop.IOLoop.instance().stop() def run(): \"\"\" Starts any instantiated",
"\"\"\" Starts any instantiated :class:`nsq.Reader` or :class:`nsq.Writer` \"\"\" signal.signal(signal.SIGTERM, _handle_term_signal)",
"from __future__ import absolute_import import signal import tornado.ioloop import logging",
"\"ready\", \"finish\", \"touch\", \"requeue\", \"nop\", \"pub\", \"mpub\", \"valid_topic_name\", \"valid_channel_name\", \"FRAME_TYPE_RESPONSE\",",
".writer import Writer from .version import __version__ # NOQA def",
"def _handle_term_signal(sig_num, frame): logging.getLogger(__name__).info( 'TERM Signal handler called with signal",
"identify, subscribe, ready, finish, touch, requeue, nop, pub, mpub, FRAME_TYPE_RESPONSE,",
"AsyncConn from .reader import Reader from .legacy_reader import LegacyReader from",
"\"LegacyReader\", \"SyncConn\", \"AsyncConn\", \"unpack_response\", \"decode_message\", \"identify\", \"subscribe\", \"ready\", \"finish\", \"touch\",",
".reader import Reader from .legacy_reader import LegacyReader from .writer import",
"import __version__ # NOQA def _handle_term_signal(sig_num, frame): logging.getLogger(__name__).info( 'TERM Signal",
"instantiated :class:`nsq.Reader` or :class:`nsq.Writer` \"\"\" signal.signal(signal.SIGTERM, _handle_term_signal) tornado.ioloop.IOLoop.instance().start() __author__ =",
"\"Error\", \"LegacyReader\", \"SyncConn\", \"AsyncConn\", \"unpack_response\", \"decode_message\", \"identify\", \"subscribe\", \"ready\", \"finish\",",
"frame): logging.getLogger(__name__).info( 'TERM Signal handler called with signal %r', sig_num)",
"logging from .protocol import ( Error, unpack_response, decode_message, valid_topic_name, valid_channel_name,",
".async import AsyncConn from .reader import Reader from .legacy_reader import",
"import ( Error, unpack_response, decode_message, valid_topic_name, valid_channel_name, identify, subscribe, ready,",
"import SyncConn from .async import AsyncConn from .reader import Reader",
"signal import tornado.ioloop import logging from .protocol import ( Error,",
"or :class:`nsq.Writer` \"\"\" signal.signal(signal.SIGTERM, _handle_term_signal) tornado.ioloop.IOLoop.instance().start() __author__ = \"<NAME> <<EMAIL>>\"",
"\"Message\", \"Error\", \"LegacyReader\", \"SyncConn\", \"AsyncConn\", \"unpack_response\", \"decode_message\", \"identify\", \"subscribe\", \"ready\",",
"= \"<NAME> <<EMAIL>>\" __all__ = [\"Reader\", \"Writer\", \"run\", \"BackoffTimer\", \"Message\",",
"_handle_term_signal) tornado.ioloop.IOLoop.instance().start() __author__ = \"<NAME> <<EMAIL>>\" __all__ = [\"Reader\", \"Writer\",",
"\"touch\", \"requeue\", \"nop\", \"pub\", \"mpub\", \"valid_topic_name\", \"valid_channel_name\", \"FRAME_TYPE_RESPONSE\", \"FRAME_TYPE_ERROR\", \"FRAME_TYPE_MESSAGE\"]",
"__version__ # NOQA def _handle_term_signal(sig_num, frame): logging.getLogger(__name__).info( 'TERM Signal handler",
"valid_channel_name, identify, subscribe, ready, finish, touch, requeue, nop, pub, mpub,",
"\"unpack_response\", \"decode_message\", \"identify\", \"subscribe\", \"ready\", \"finish\", \"touch\", \"requeue\", \"nop\", \"pub\",",
"\"identify\", \"subscribe\", \"ready\", \"finish\", \"touch\", \"requeue\", \"nop\", \"pub\", \"mpub\", \"valid_topic_name\",",
"import LegacyReader from .writer import Writer from .version import __version__",
"__future__ import absolute_import import signal import tornado.ioloop import logging from",
"\"Writer\", \"run\", \"BackoffTimer\", \"Message\", \"Error\", \"LegacyReader\", \"SyncConn\", \"AsyncConn\", \"unpack_response\", \"decode_message\",",
"\"<NAME> <<EMAIL>>\" __all__ = [\"Reader\", \"Writer\", \"run\", \"BackoffTimer\", \"Message\", \"Error\",",
"from .legacy_reader import LegacyReader from .writer import Writer from .version",
"import tornado.ioloop import logging from .protocol import ( Error, unpack_response,",
".sync import SyncConn from .async import AsyncConn from .reader import",
"Error, unpack_response, decode_message, valid_topic_name, valid_channel_name, identify, subscribe, ready, finish, touch,",
"mpub, FRAME_TYPE_RESPONSE, FRAME_TYPE_ERROR, FRAME_TYPE_MESSAGE, ) from .message import Message from",
"NOQA def _handle_term_signal(sig_num, frame): logging.getLogger(__name__).info( 'TERM Signal handler called with",
"\"subscribe\", \"ready\", \"finish\", \"touch\", \"requeue\", \"nop\", \"pub\", \"mpub\", \"valid_topic_name\", \"valid_channel_name\",",
"import AsyncConn from .reader import Reader from .legacy_reader import LegacyReader",
"\"AsyncConn\", \"unpack_response\", \"decode_message\", \"identify\", \"subscribe\", \"ready\", \"finish\", \"touch\", \"requeue\", \"nop\",",
"logging.getLogger(__name__).info( 'TERM Signal handler called with signal %r', sig_num) tornado.ioloop.IOLoop.instance().stop()",
"run(): \"\"\" Starts any instantiated :class:`nsq.Reader` or :class:`nsq.Writer` \"\"\" signal.signal(signal.SIGTERM,",
"__author__ = \"<NAME> <<EMAIL>>\" __all__ = [\"Reader\", \"Writer\", \"run\", \"BackoffTimer\",",
":class:`nsq.Writer` \"\"\" signal.signal(signal.SIGTERM, _handle_term_signal) tornado.ioloop.IOLoop.instance().start() __author__ = \"<NAME> <<EMAIL>>\" __all__",
"import BackoffTimer from .sync import SyncConn from .async import AsyncConn",
"tornado.ioloop.IOLoop.instance().stop() def run(): \"\"\" Starts any instantiated :class:`nsq.Reader` or :class:`nsq.Writer`",
"%r', sig_num) tornado.ioloop.IOLoop.instance().stop() def run(): \"\"\" Starts any instantiated :class:`nsq.Reader`",
"# NOQA def _handle_term_signal(sig_num, frame): logging.getLogger(__name__).info( 'TERM Signal handler called",
"any instantiated :class:`nsq.Reader` or :class:`nsq.Writer` \"\"\" signal.signal(signal.SIGTERM, _handle_term_signal) tornado.ioloop.IOLoop.instance().start() __author__",
"FRAME_TYPE_MESSAGE, ) from .message import Message from .backoff_timer import BackoffTimer",
"requeue, nop, pub, mpub, FRAME_TYPE_RESPONSE, FRAME_TYPE_ERROR, FRAME_TYPE_MESSAGE, ) from .message",
"Message from .backoff_timer import BackoffTimer from .sync import SyncConn from",
"from .protocol import ( Error, unpack_response, decode_message, valid_topic_name, valid_channel_name, identify,",
"[\"Reader\", \"Writer\", \"run\", \"BackoffTimer\", \"Message\", \"Error\", \"LegacyReader\", \"SyncConn\", \"AsyncConn\", \"unpack_response\",",
"touch, requeue, nop, pub, mpub, FRAME_TYPE_RESPONSE, FRAME_TYPE_ERROR, FRAME_TYPE_MESSAGE, ) from",
"tornado.ioloop.IOLoop.instance().start() __author__ = \"<NAME> <<EMAIL>>\" __all__ = [\"Reader\", \"Writer\", \"run\",",
".version import __version__ # NOQA def _handle_term_signal(sig_num, frame): logging.getLogger(__name__).info( 'TERM",
"from .version import __version__ # NOQA def _handle_term_signal(sig_num, frame): logging.getLogger(__name__).info(",
"unpack_response, decode_message, valid_topic_name, valid_channel_name, identify, subscribe, ready, finish, touch, requeue,",
"from .sync import SyncConn from .async import AsyncConn from .reader",
"BackoffTimer from .sync import SyncConn from .async import AsyncConn from",
"_handle_term_signal(sig_num, frame): logging.getLogger(__name__).info( 'TERM Signal handler called with signal %r',",
"'TERM Signal handler called with signal %r', sig_num) tornado.ioloop.IOLoop.instance().stop() def",
"Reader from .legacy_reader import LegacyReader from .writer import Writer from",
"= [\"Reader\", \"Writer\", \"run\", \"BackoffTimer\", \"Message\", \"Error\", \"LegacyReader\", \"SyncConn\", \"AsyncConn\",",
"valid_topic_name, valid_channel_name, identify, subscribe, ready, finish, touch, requeue, nop, pub,",
":class:`nsq.Reader` or :class:`nsq.Writer` \"\"\" signal.signal(signal.SIGTERM, _handle_term_signal) tornado.ioloop.IOLoop.instance().start() __author__ = \"<NAME>",
"from .writer import Writer from .version import __version__ # NOQA",
"from .reader import Reader from .legacy_reader import LegacyReader from .writer",
"finish, touch, requeue, nop, pub, mpub, FRAME_TYPE_RESPONSE, FRAME_TYPE_ERROR, FRAME_TYPE_MESSAGE, )",
"FRAME_TYPE_RESPONSE, FRAME_TYPE_ERROR, FRAME_TYPE_MESSAGE, ) from .message import Message from .backoff_timer",
"<gh_stars>1-10 from __future__ import absolute_import import signal import tornado.ioloop import",
"import logging from .protocol import ( Error, unpack_response, decode_message, valid_topic_name,",
"\"finish\", \"touch\", \"requeue\", \"nop\", \"pub\", \"mpub\", \"valid_topic_name\", \"valid_channel_name\", \"FRAME_TYPE_RESPONSE\", \"FRAME_TYPE_ERROR\",",
"import Reader from .legacy_reader import LegacyReader from .writer import Writer",
"import absolute_import import signal import tornado.ioloop import logging from .protocol",
".message import Message from .backoff_timer import BackoffTimer from .sync import",
"nop, pub, mpub, FRAME_TYPE_RESPONSE, FRAME_TYPE_ERROR, FRAME_TYPE_MESSAGE, ) from .message import",
"FRAME_TYPE_ERROR, FRAME_TYPE_MESSAGE, ) from .message import Message from .backoff_timer import",
"import Writer from .version import __version__ # NOQA def _handle_term_signal(sig_num,",
"import Message from .backoff_timer import BackoffTimer from .sync import SyncConn",
".protocol import ( Error, unpack_response, decode_message, valid_topic_name, valid_channel_name, identify, subscribe,",
"subscribe, ready, finish, touch, requeue, nop, pub, mpub, FRAME_TYPE_RESPONSE, FRAME_TYPE_ERROR,",
"import signal import tornado.ioloop import logging from .protocol import (",
"SyncConn from .async import AsyncConn from .reader import Reader from",
"signal.signal(signal.SIGTERM, _handle_term_signal) tornado.ioloop.IOLoop.instance().start() __author__ = \"<NAME> <<EMAIL>>\" __all__ = [\"Reader\",",
"decode_message, valid_topic_name, valid_channel_name, identify, subscribe, ready, finish, touch, requeue, nop,",
"( Error, unpack_response, decode_message, valid_topic_name, valid_channel_name, identify, subscribe, ready, finish,",
"LegacyReader from .writer import Writer from .version import __version__ #",
"from .async import AsyncConn from .reader import Reader from .legacy_reader",
"sig_num) tornado.ioloop.IOLoop.instance().stop() def run(): \"\"\" Starts any instantiated :class:`nsq.Reader` or",
"__all__ = [\"Reader\", \"Writer\", \"run\", \"BackoffTimer\", \"Message\", \"Error\", \"LegacyReader\", \"SyncConn\",",
"from .backoff_timer import BackoffTimer from .sync import SyncConn from .async",
"<<EMAIL>>\" __all__ = [\"Reader\", \"Writer\", \"run\", \"BackoffTimer\", \"Message\", \"Error\", \"LegacyReader\",",
"\"SyncConn\", \"AsyncConn\", \"unpack_response\", \"decode_message\", \"identify\", \"subscribe\", \"ready\", \"finish\", \"touch\", \"requeue\",",
".legacy_reader import LegacyReader from .writer import Writer from .version import",
"tornado.ioloop import logging from .protocol import ( Error, unpack_response, decode_message,",
"pub, mpub, FRAME_TYPE_RESPONSE, FRAME_TYPE_ERROR, FRAME_TYPE_MESSAGE, ) from .message import Message",
") from .message import Message from .backoff_timer import BackoffTimer from",
"\"BackoffTimer\", \"Message\", \"Error\", \"LegacyReader\", \"SyncConn\", \"AsyncConn\", \"unpack_response\", \"decode_message\", \"identify\", \"subscribe\",",
"\"decode_message\", \"identify\", \"subscribe\", \"ready\", \"finish\", \"touch\", \"requeue\", \"nop\", \"pub\", \"mpub\",",
"Starts any instantiated :class:`nsq.Reader` or :class:`nsq.Writer` \"\"\" signal.signal(signal.SIGTERM, _handle_term_signal) tornado.ioloop.IOLoop.instance().start()",
"Signal handler called with signal %r', sig_num) tornado.ioloop.IOLoop.instance().stop() def run():",
"\"\"\" signal.signal(signal.SIGTERM, _handle_term_signal) tornado.ioloop.IOLoop.instance().start() __author__ = \"<NAME> <<EMAIL>>\" __all__ =",
"def run(): \"\"\" Starts any instantiated :class:`nsq.Reader` or :class:`nsq.Writer` \"\"\"",
"with signal %r', sig_num) tornado.ioloop.IOLoop.instance().stop() def run(): \"\"\" Starts any",
"called with signal %r', sig_num) tornado.ioloop.IOLoop.instance().stop() def run(): \"\"\" Starts"
] |
[
"ourTimeDir = \"../../results/LinearTimeKernelsScalingAll\" nearLinearDir = \"../../../triangle_counting_paper/MIS_sigmod_pub/results/NearLinear\" akibaDir = \"../../akiba_vertex_cover/results\" def",
"ax.scatter(lineartimetime, lineartimesize, label=\"LinearTime\", marker=\"^\", edgecolors=\"magenta\", facecolors=\"none\") plt.xlabel(\"time / VCSolver time\")",
"+ \"(parallel): \" + str(getOurTimeAndSizeParallel(graph)[\"size\"])) otp = getOurTimeAndSizeParallel(graph)[\"time\"] / mintime",
"edgecolors=\"blue\", facecolors=\"none\") ax.scatter(nearlineartime, nearlinearsize, label=\"NearLinear\", marker=\"o\", edgecolors=\"red\", facecolors=\"none\") ax.scatter(lineartimetime, lineartimesize,",
"akibasize = [] akibatime = [] nearlinearsize = [] nearlineartime",
"graphs = [\"uk-2002\", \"arabic-2005\", \"gsh-2015-tpd\", \"uk-2005\", \"it-2004\", \"sk-2005\", \"uk-2007-05\", \"webbase-2001\",",
"ourparallel, akiba, nearLinear, linearTime] # data = [oursequential, ourparallel, akiba,",
"ax.scatter(akibatime, akibasize, label=\"VCSolver\", marker=\"^\", edgecolors=\"blue\", facecolors=\"none\") ax.scatter(nearlineartime, nearlinearsize, label=\"NearLinear\", marker=\"o\",",
"= [oursequential, ourparallel, akiba, nearLinear] data = filter(lambda x :",
"akibasize, label=\"VCSolver\", marker=\"^\", edgecolors=\"blue\", facecolors=\"none\") ax.scatter(nearlineartime, nearlinearsize, label=\"NearLinear\", marker=\"o\", edgecolors=\"red\",",
"if nls > 0 and nlt > 0: nearlinearsize.append(nls) nearlineartime.append(nlt)",
"ax.scatter(ourtimeParallel, oursizeParallel, label=\"ParFastKer\", marker=\"+\", color=\"black\") # ax.scatter(akibatime, akibasize, label=\"VCSolver\", marker=\"^\",",
"\"webbase-2001\", \"asia.osm\", \"road_usa\", \"europe.osm\", \"rgg_n26_s0\", \"delaunay_n24\", \"del26\"] linearTimeDir = \"../../../triangle_counting_paper/MIS_sigmod_pub/results/LinearTimeKernels/logs\"",
"dict() result[\"time\"] = res[\"sequential_quasikernel_time\"] + res[\"lineartime_time\"] result[\"size\"] = res[\"sequential_quasikernel_size\"] return",
"lineartimetime.append(ltt) # print(\"We\") # print(oursizeSequential) # print(ourtimeSequential) # print(\"We (parallel)\")",
"getAkibaTimeAndSize(graph)[\"size\"] / minsize akt = getAkibaTimeAndSize(graph)[\"time\"] / mintime if aks",
"ax.scatter(nearlineartime, nearlinearsize, label=\"NearLinear\", marker=\"o\", edgecolors=\"red\", facecolors=\"none\") ax.scatter(lineartimetime, lineartimesize, label=\"LinearTime\", marker=\"^\",",
"oursizeParallel, label=\"ParFastKer\", marker=\"+\", color=\"black\") # ax.scatter(akibatime, akibasize, label=\"VCSolver\", marker=\"^\", edgecolors=\"blue\",",
"print(graph + \"(parallel): \" + str(getOurTimeAndSizeParallel(graph)[\"size\"])) otp = getOurTimeAndSizeParallel(graph)[\"time\"] /",
"and otp > 0: oursizeParallel.append(osp) ourtimeParallel.append(otp) aks = getAkibaTimeAndSize(graph)[\"size\"] /",
"def getNearLinearTimeAndSize(graph): return get_data_NearLinear.getNearLinearTimeAndSize(graph, nearLinearDir) def getLinearTimeTimeAndSize(graph): return get_data_LinearTime.getLinearTimeTimeAndSize(graph, linearTimeDir)",
"ourTimeDir) result = dict() result[\"time\"] = res[\"sequential_quasikernel_time\"] + res[\"lineartime_time\"] result[\"size\"]",
"osp = getOurTimeAndSizeParallel(graph)[\"size\"] / minsize # print(graph + \"(parallel): \"",
"edgecolors=\"magenta\", facecolors=\"none\") plt.xlabel(\"time / VCSolver time\") plt.ylabel(\"size / VCSolver size\")",
"\"(parallel): \" + str(getOurTimeAndSizeParallel(graph)[\"size\"])) otp = getOurTimeAndSizeParallel(graph)[\"time\"] / mintime if",
"\" + str(getOurTimeAndSizeParallel(graph)[\"size\"])) otp = getOurTimeAndSizeParallel(graph)[\"time\"] / mintime if osp",
"> 0: lineartimesize.append(lts) lineartimetime.append(ltt) # print(\"We\") # print(oursizeSequential) # print(ourtimeSequential)",
"[] akibasize = [] akibatime = [] nearlinearsize = []",
"result def getOurTimeAndSizeParallel(graph): res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir) result",
"getAkibaTimeAndSize(graph): return get_data_akiba.getAkibaTimeAndSize(graph, akibaDir) def getNearLinearTimeAndSize(graph): return get_data_NearLinear.getNearLinearTimeAndSize(graph, nearLinearDir) def",
"edgecolors=\"red\", facecolors=\"none\") ax.scatter(lineartimetime, lineartimesize, label=\"LinearTime\", marker=\"^\", edgecolors=\"magenta\", facecolors=\"none\") plt.xlabel(\"time /",
"nearLinear, linearTime] # data = [oursequential, ourparallel, akiba, nearLinear] data",
"print(nearlineartime) # print(\"LinearTime\") # print(lineartimesize) # print(lineartimetime) plt.rc('font', size=14) fig",
"# print(lineartimesize) # print(lineartimetime) plt.rc('font', size=14) fig = plt.figure(figsize=(3.2, 2.4))",
"import get_data_ours import get_data_akiba import get_data_NearLinear import get_data_LinearTime import os",
"= [] oursizeParallel = [] ourtimeParallel = [] akibasize =",
"\"europe.osm\", \"rgg_n26_s0\", \"RHG-100000000-nodes-2000000000-edges\", \"delaunay_n24\", \"del26\"] graphs = [\"uk-2002\", \"arabic-2005\", \"gsh-2015-tpd\",",
"akiba = getAkibaTimeAndSize(graph)[prop] nearLinear = getNearLinearTimeAndSize(graph)[prop] linearTime = getLinearTimeTimeAndSize(graph)[prop] data",
"(parallel)\") # print(oursizeParallel) # print(ourtimeParallel) # print(\"Akiba\") # print(akibasize) #",
"label=\"ParFastKer\", marker=\"+\", color=\"black\") # ax.scatter(akibatime, akibasize, label=\"VCSolver\", marker=\"^\", edgecolors=\"blue\", facecolors=\"none\")",
"marker=\"+\", color=\"black\") # ax.scatter(akibatime, akibasize, label=\"VCSolver\", marker=\"^\", edgecolors=\"blue\", facecolors=\"none\") ax.scatter(nearlineartime,",
"# print(\"NearLinear\") # print(nearlinearsize) # print(nearlineartime) # print(\"LinearTime\") # print(lineartimesize)",
"[oursequential, ourparallel, akiba, nearLinear] data = filter(lambda x : x",
"nls > 0 and nlt > 0: lineartimesize.append(lts) lineartimetime.append(ltt) #",
"plt.ylabel(\"size / VCSolver size\") plt.xticks([0.0001, 0.01, 1]) ax.legend(bbox_to_anchor=(0.35,-0.7), ncol=2, loc='lower",
"plt.rc('font', size=14) fig = plt.figure(figsize=(3.2, 2.4)) ax = fig.add_subplot(1,1,1) plt.title(\"Summary\",",
"if aks > 0 and akt > 0: akibasize.append(aks) akibatime.append(akt)",
"= dict() result[\"time\"] = res[\"parallel_quasikernel_time\"] + res[\"lineartime_time\"] + res[\"partitioning_time\"] result[\"size\"]",
"minsize # print(graph + \"(parallel): \" + str(getOurTimeAndSizeParallel(graph)[\"size\"])) otp =",
"result[\"size\"] = res[\"parallel_quasikernel_size\"] return result def getAkibaTimeAndSize(graph): return get_data_akiba.getAkibaTimeAndSize(graph, akibaDir)",
"linearTimeDir, partitioningDir, ourTimeDir) result = dict() result[\"time\"] = res[\"sequential_quasikernel_time\"] +",
"0: return 1 return minimum oursizeSequential = [] ourtimeSequential =",
"# print(oursizeParallel) # print(ourtimeParallel) # print(\"Akiba\") # print(akibasize) # print(akibatime)",
"+ res[\"partitioning_time\"] result[\"size\"] = res[\"parallel_quasikernel_size\"] return result def getAkibaTimeAndSize(graph): return",
"facecolors=\"none\") plt.xlabel(\"time / VCSolver time\") plt.ylabel(\"size / VCSolver size\") plt.xticks([0.0001,",
"plt.xlabel(\"time / VCSolver time\") plt.ylabel(\"size / VCSolver size\") plt.xticks([0.0001, 0.01,",
"/ mintime if oss > 0 and ots > 0:",
"> 0 and otp > 0: oursizeParallel.append(osp) ourtimeParallel.append(otp) aks =",
"\"asia.osm\", \"road_usa\", \"europe.osm\", \"rgg_n26_s0\", \"delaunay_n24\", \"del26\"] linearTimeDir = \"../../../triangle_counting_paper/MIS_sigmod_pub/results/LinearTimeKernels/logs\" partitioningDir",
"aks = getAkibaTimeAndSize(graph)[\"size\"] / minsize akt = getAkibaTimeAndSize(graph)[\"time\"] / mintime",
"0: oursizeParallel.append(osp) ourtimeParallel.append(otp) aks = getAkibaTimeAndSize(graph)[\"size\"] / minsize akt =",
"0 and otp > 0: oursizeParallel.append(osp) ourtimeParallel.append(otp) aks = getAkibaTimeAndSize(graph)[\"size\"]",
"otp = getOurTimeAndSizeParallel(graph)[\"time\"] / mintime if osp > 0 and",
"str(getOurTimeAndSizeSequential(graph)[\"size\"])) ots = getOurTimeAndSizeSequential(graph)[\"time\"] / mintime if oss > 0",
"minimum oursizeSequential = [] ourtimeSequential = [] oursizeParallel = []",
"mintime if nls > 0 and nlt > 0: lineartimesize.append(lts)",
"mintime if nls > 0 and nlt > 0: nearlinearsize.append(nls)",
"and nlt > 0: lineartimesize.append(lts) lineartimetime.append(ltt) # print(\"We\") # print(oursizeSequential)",
"getOurTimeAndSizeParallel(graph)[\"size\"] / minsize # print(graph + \"(parallel): \" + str(getOurTimeAndSizeParallel(graph)[\"size\"]))",
"and nlt > 0: nearlinearsize.append(nls) nearlineartime.append(nlt) lts = getLinearTimeTimeAndSize(graph)[\"size\"] /",
"= \"../../akiba_vertex_cover/results\" def getOurTimeAndSizeSequential(graph): res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir)",
"def getAkibaTimeAndSize(graph): return get_data_akiba.getAkibaTimeAndSize(graph, akibaDir) def getNearLinearTimeAndSize(graph): return get_data_NearLinear.getNearLinearTimeAndSize(graph, nearLinearDir)",
"\"gsh-2015-tpd\", \"uk-2005\", \"it-2004\", \"sk-2005\", \"uk-2007-05\", \"webbase-2001\", \"asia.osm\", \"road_usa\", \"europe.osm\", \"rgg_n26_s0\",",
"0 and ots > 0: oursizeSequential.append(oss) ourtimeSequential.append(ots) osp = getOurTimeAndSizeParallel(graph)[\"size\"]",
"result = dict() result[\"time\"] = res[\"sequential_quasikernel_time\"] + res[\"lineartime_time\"] result[\"size\"] =",
"/ minsize nlt = getNearLinearTimeAndSize(graph)[\"time\"] / mintime if nls >",
"and akt > 0: akibasize.append(aks) akibatime.append(akt) nls = getNearLinearTimeAndSize(graph)[\"size\"] /",
"getNearLinearTimeAndSize(graph)[prop] linearTime = getLinearTimeTimeAndSize(graph)[prop] data = [oursequential, ourparallel, akiba, nearLinear,",
"\"uk-2005\", \"it-2004\", \"sk-2005\", \"uk-2007-05\", \"webbase-2001\", \"asia.osm\", \"road_usa\", \"europe.osm\", \"rgg_n26_s0\", \"delaunay_n24\",",
"data) minimum = min(data) if minimum == 0: return 1",
"partitioningDir, ourTimeDir) result = dict() result[\"time\"] = res[\"parallel_quasikernel_time\"] + res[\"lineartime_time\"]",
"nearLinearDir = \"../../../triangle_counting_paper/MIS_sigmod_pub/results/NearLinear\" akibaDir = \"../../akiba_vertex_cover/results\" def getOurTimeAndSizeSequential(graph): res =",
"\"rgg_n26_s0\", \"delaunay_n24\", \"del26\"] linearTimeDir = \"../../../triangle_counting_paper/MIS_sigmod_pub/results/LinearTimeKernels/logs\" partitioningDir = \"../../LinearTimeKernels/partitions\" ourTimeDir",
"plt.title(\"Summary\", fontsize=14) ax.set_yscale(\"log\") ax.set_xscale(\"log\") ax.scatter(ourtimeSequential, oursizeSequential, label=\"FastKer\", marker=\"x\", color=\"green\") ax.scatter(ourtimeParallel,",
"getLinearTimeTimeAndSize(graph)[\"time\"] / mintime if nls > 0 and nlt >",
"in graphs: minsize = getAkibaTimeAndSize(graph)[\"size\"] mintime = getAkibaTimeAndSize(graph)[\"time\"] oss =",
"nlt > 0: nearlinearsize.append(nls) nearlineartime.append(nlt) lts = getLinearTimeTimeAndSize(graph)[\"size\"] / minsize",
"res[\"parallel_quasikernel_time\"] + res[\"lineartime_time\"] + res[\"partitioning_time\"] result[\"size\"] = res[\"parallel_quasikernel_size\"] return result",
"= getOurTimeAndSizeSequential(graph)[\"size\"] / minsize # print(graph + \"(sequential): \" +",
"if osp > 0 and otp > 0: oursizeParallel.append(osp) ourtimeParallel.append(otp)",
"0: akibasize.append(aks) akibatime.append(akt) nls = getNearLinearTimeAndSize(graph)[\"size\"] / minsize nlt =",
"print(oursizeParallel) # print(ourtimeParallel) # print(\"Akiba\") # print(akibasize) # print(akibatime) #",
": x >= 0, data) minimum = min(data) if minimum",
"= res[\"parallel_quasikernel_time\"] + res[\"lineartime_time\"] + res[\"partitioning_time\"] result[\"size\"] = res[\"parallel_quasikernel_size\"] return",
"result = dict() result[\"time\"] = res[\"parallel_quasikernel_time\"] + res[\"lineartime_time\"] + res[\"partitioning_time\"]",
"label=\"VCSolver\", marker=\"^\", edgecolors=\"blue\", facecolors=\"none\") ax.scatter(nearlineartime, nearlinearsize, label=\"NearLinear\", marker=\"o\", edgecolors=\"red\", facecolors=\"none\")",
"akt = getAkibaTimeAndSize(graph)[\"time\"] / mintime if aks > 0 and",
"fontsize=14) ax.set_yscale(\"log\") ax.set_xscale(\"log\") ax.scatter(ourtimeSequential, oursizeSequential, label=\"FastKer\", marker=\"x\", color=\"green\") ax.scatter(ourtimeParallel, oursizeParallel,",
"ourtimeParallel.append(otp) aks = getAkibaTimeAndSize(graph)[\"size\"] / minsize akt = getAkibaTimeAndSize(graph)[\"time\"] /",
"label=\"FastKer\", marker=\"x\", color=\"green\") ax.scatter(ourtimeParallel, oursizeParallel, label=\"ParFastKer\", marker=\"+\", color=\"black\") # ax.scatter(akibatime,",
"= getOurTimeAndSizeSequential(graph)[prop] ourparallel = getOurTimeAndSizeParallel(graph)[prop] akiba = getAkibaTimeAndSize(graph)[prop] nearLinear =",
"= [] akibatime = [] nearlinearsize = [] nearlineartime =",
"mintime = getAkibaTimeAndSize(graph)[\"time\"] oss = getOurTimeAndSizeSequential(graph)[\"size\"] / minsize # print(graph",
"# print(\"Akiba\") # print(akibasize) # print(akibatime) # print(\"NearLinear\") # print(nearlinearsize)",
"nearlinearsize.append(nls) nearlineartime.append(nlt) lts = getLinearTimeTimeAndSize(graph)[\"size\"] / minsize ltt = getLinearTimeTimeAndSize(graph)[\"time\"]",
"= getLinearTimeTimeAndSize(graph)[\"time\"] / mintime if nls > 0 and nlt",
"= getLinearTimeTimeAndSize(graph)[\"size\"] / minsize ltt = getLinearTimeTimeAndSize(graph)[\"time\"] / mintime if",
"x >= 0, data) minimum = min(data) if minimum ==",
"facecolors=\"none\") ax.scatter(lineartimetime, lineartimesize, label=\"LinearTime\", marker=\"^\", edgecolors=\"magenta\", facecolors=\"none\") plt.xlabel(\"time / VCSolver",
"filter(lambda x : x >= 0, data) minimum = min(data)",
"minimum = min(data) if minimum == 0: return 1 return",
"color=\"black\") # ax.scatter(akibatime, akibasize, label=\"VCSolver\", marker=\"^\", edgecolors=\"blue\", facecolors=\"none\") ax.scatter(nearlineartime, nearlinearsize,",
"ots = getOurTimeAndSizeSequential(graph)[\"time\"] / mintime if oss > 0 and",
"mintime if oss > 0 and ots > 0: oursizeSequential.append(oss)",
"minsize akt = getAkibaTimeAndSize(graph)[\"time\"] / mintime if aks > 0",
"return result def getOurTimeAndSizeParallel(graph): res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir)",
"[] oursizeParallel = [] ourtimeParallel = [] akibasize = []",
"getOurTimeAndSizeSequential(graph)[prop] ourparallel = getOurTimeAndSizeParallel(graph)[prop] akiba = getAkibaTimeAndSize(graph)[prop] nearLinear = getNearLinearTimeAndSize(graph)[prop]",
"= [] nearlinearsize = [] nearlineartime = [] lineartimesize =",
"size=14) fig = plt.figure(figsize=(3.2, 2.4)) ax = fig.add_subplot(1,1,1) plt.title(\"Summary\", fontsize=14)",
"import get_data_LinearTime import os import matplotlib.pyplot as plt # graphs",
"partitioningDir, ourTimeDir) result = dict() result[\"time\"] = res[\"sequential_quasikernel_time\"] + res[\"lineartime_time\"]",
"> 0: oursizeSequential.append(oss) ourtimeSequential.append(ots) osp = getOurTimeAndSizeParallel(graph)[\"size\"] / minsize #",
"[] ourtimeParallel = [] akibasize = [] akibatime = []",
"res[\"parallel_quasikernel_size\"] return result def getAkibaTimeAndSize(graph): return get_data_akiba.getAkibaTimeAndSize(graph, akibaDir) def getNearLinearTimeAndSize(graph):",
"graph in graphs: minsize = getAkibaTimeAndSize(graph)[\"size\"] mintime = getAkibaTimeAndSize(graph)[\"time\"] oss",
"0.01, 1]) ax.legend(bbox_to_anchor=(0.35,-0.7), ncol=2, loc='lower center', frameon=False, borderaxespad=0., mode=\"expand\") plt.savefig(\"summaryplot_vcsolver_baseline.pdf\",",
"# print(\"We (parallel)\") # print(oursizeParallel) # print(ourtimeParallel) # print(\"Akiba\") #",
"nearLinear = getNearLinearTimeAndSize(graph)[prop] linearTime = getLinearTimeTimeAndSize(graph)[prop] data = [oursequential, ourparallel,",
"/ VCSolver size\") plt.xticks([0.0001, 0.01, 1]) ax.legend(bbox_to_anchor=(0.35,-0.7), ncol=2, loc='lower center',",
"linearTimeDir) def minProperty(graph, prop): oursequential = getOurTimeAndSizeSequential(graph)[prop] ourparallel = getOurTimeAndSizeParallel(graph)[prop]",
"data = filter(lambda x : x >= 0, data) minimum",
"print(lineartimetime) plt.rc('font', size=14) fig = plt.figure(figsize=(3.2, 2.4)) ax = fig.add_subplot(1,1,1)",
"0, data) minimum = min(data) if minimum == 0: return",
"\"del26\"] linearTimeDir = \"../../../triangle_counting_paper/MIS_sigmod_pub/results/LinearTimeKernels/logs\" partitioningDir = \"../../LinearTimeKernels/partitions\" ourTimeDir = \"../../results/LinearTimeKernelsScalingAll\"",
"res[\"lineartime_time\"] result[\"size\"] = res[\"sequential_quasikernel_size\"] return result def getOurTimeAndSizeParallel(graph): res =",
"return result def getAkibaTimeAndSize(graph): return get_data_akiba.getAkibaTimeAndSize(graph, akibaDir) def getNearLinearTimeAndSize(graph): return",
"osp > 0 and otp > 0: oursizeParallel.append(osp) ourtimeParallel.append(otp) aks",
"# data = [oursequential, ourparallel, akiba, nearLinear] data = filter(lambda",
"[\"uk-2002\", \"arabic-2005\", \"gsh-2015-tpd\", \"uk-2005\", \"it-2004\", \"sk-2005\", \"uk-2007-05\", \"webbase-2001\", \"asia.osm\", \"road_usa\",",
"oursizeParallel = [] ourtimeParallel = [] akibasize = [] akibatime",
"facecolors=\"none\") ax.scatter(nearlineartime, nearlinearsize, label=\"NearLinear\", marker=\"o\", edgecolors=\"red\", facecolors=\"none\") ax.scatter(lineartimetime, lineartimesize, label=\"LinearTime\",",
"= [] for graph in graphs: minsize = getAkibaTimeAndSize(graph)[\"size\"] mintime",
"getOurTimeAndSizeParallel(graph)[\"time\"] / mintime if osp > 0 and otp >",
"plt.figure(figsize=(3.2, 2.4)) ax = fig.add_subplot(1,1,1) plt.title(\"Summary\", fontsize=14) ax.set_yscale(\"log\") ax.set_xscale(\"log\") ax.scatter(ourtimeSequential,",
"\"road_usa\", \"europe.osm\", \"rgg_n26_s0\", \"RHG-100000000-nodes-2000000000-edges\", \"delaunay_n24\", \"del26\"] graphs = [\"uk-2002\", \"arabic-2005\",",
"0 and akt > 0: akibasize.append(aks) akibatime.append(akt) nls = getNearLinearTimeAndSize(graph)[\"size\"]",
"[] ourtimeSequential = [] oursizeParallel = [] ourtimeParallel = []",
"\"../../akiba_vertex_cover/results\" def getOurTimeAndSizeSequential(graph): res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir) result",
"lineartimesize.append(lts) lineartimetime.append(ltt) # print(\"We\") # print(oursizeSequential) # print(ourtimeSequential) # print(\"We",
"marker=\"o\", edgecolors=\"red\", facecolors=\"none\") ax.scatter(lineartimetime, lineartimesize, label=\"LinearTime\", marker=\"^\", edgecolors=\"magenta\", facecolors=\"none\") plt.xlabel(\"time",
"oursizeSequential.append(oss) ourtimeSequential.append(ots) osp = getOurTimeAndSizeParallel(graph)[\"size\"] / minsize # print(graph +",
"def getOurTimeAndSizeParallel(graph): res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir) result =",
"if minimum == 0: return 1 return minimum oursizeSequential =",
"= \"../../../triangle_counting_paper/MIS_sigmod_pub/results/NearLinear\" akibaDir = \"../../akiba_vertex_cover/results\" def getOurTimeAndSizeSequential(graph): res = get_data_ours.getOurTimeAndSizeUltrafast(graph,",
"get_data_LinearTime.getLinearTimeTimeAndSize(graph, linearTimeDir) def minProperty(graph, prop): oursequential = getOurTimeAndSizeSequential(graph)[prop] ourparallel =",
"[] lineartimetime = [] for graph in graphs: minsize =",
"0: lineartimesize.append(lts) lineartimetime.append(ltt) # print(\"We\") # print(oursizeSequential) # print(ourtimeSequential) #",
"= getOurTimeAndSizeParallel(graph)[\"size\"] / minsize # print(graph + \"(parallel): \" +",
"print(lineartimesize) # print(lineartimetime) plt.rc('font', size=14) fig = plt.figure(figsize=(3.2, 2.4)) ax",
"result def getAkibaTimeAndSize(graph): return get_data_akiba.getAkibaTimeAndSize(graph, akibaDir) def getNearLinearTimeAndSize(graph): return get_data_NearLinear.getNearLinearTimeAndSize(graph,",
"getLinearTimeTimeAndSize(graph)[\"size\"] / minsize ltt = getLinearTimeTimeAndSize(graph)[\"time\"] / mintime if nls",
"def getOurTimeAndSizeSequential(graph): res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir) result =",
"= getAkibaTimeAndSize(graph)[prop] nearLinear = getNearLinearTimeAndSize(graph)[prop] linearTime = getLinearTimeTimeAndSize(graph)[prop] data =",
"data = [oursequential, ourparallel, akiba, nearLinear] data = filter(lambda x",
"/ minsize # print(graph + \"(parallel): \" + str(getOurTimeAndSizeParallel(graph)[\"size\"])) otp",
"\"asia.osm\", \"road_usa\", \"europe.osm\", \"rgg_n26_s0\", \"RHG-100000000-nodes-2000000000-edges\", \"delaunay_n24\", \"del26\"] graphs = [\"uk-2002\",",
"> 0: akibasize.append(aks) akibatime.append(akt) nls = getNearLinearTimeAndSize(graph)[\"size\"] / minsize nlt",
"lineartimesize = [] lineartimetime = [] for graph in graphs:",
"\"road_usa\", \"europe.osm\", \"rgg_n26_s0\", \"delaunay_n24\", \"del26\"] linearTimeDir = \"../../../triangle_counting_paper/MIS_sigmod_pub/results/LinearTimeKernels/logs\" partitioningDir =",
"ourtimeSequential = [] oursizeParallel = [] ourtimeParallel = [] akibasize",
"akiba, nearLinear] data = filter(lambda x : x >= 0,",
"result[\"size\"] = res[\"sequential_quasikernel_size\"] return result def getOurTimeAndSizeParallel(graph): res = get_data_ours.getOurTimeAndSizeUltrafast(graph,",
"# print(ourtimeParallel) # print(\"Akiba\") # print(akibasize) # print(akibatime) # print(\"NearLinear\")",
"getLinearTimeTimeAndSize(graph)[prop] data = [oursequential, ourparallel, akiba, nearLinear, linearTime] # data",
"[] lineartimesize = [] lineartimetime = [] for graph in",
"> 0: nearlinearsize.append(nls) nearlineartime.append(nlt) lts = getLinearTimeTimeAndSize(graph)[\"size\"] / minsize ltt",
"= min(data) if minimum == 0: return 1 return minimum",
"minsize = getAkibaTimeAndSize(graph)[\"size\"] mintime = getAkibaTimeAndSize(graph)[\"time\"] oss = getOurTimeAndSizeSequential(graph)[\"size\"] /",
"getOurTimeAndSizeParallel(graph)[prop] akiba = getAkibaTimeAndSize(graph)[prop] nearLinear = getNearLinearTimeAndSize(graph)[prop] linearTime = getLinearTimeTimeAndSize(graph)[prop]",
"= getNearLinearTimeAndSize(graph)[prop] linearTime = getLinearTimeTimeAndSize(graph)[prop] data = [oursequential, ourparallel, akiba,",
"0 and nlt > 0: nearlinearsize.append(nls) nearlineartime.append(nlt) lts = getLinearTimeTimeAndSize(graph)[\"size\"]",
"get_data_akiba.getAkibaTimeAndSize(graph, akibaDir) def getNearLinearTimeAndSize(graph): return get_data_NearLinear.getNearLinearTimeAndSize(graph, nearLinearDir) def getLinearTimeTimeAndSize(graph): return",
"= getAkibaTimeAndSize(graph)[\"time\"] / mintime if aks > 0 and akt",
"= [] lineartimesize = [] lineartimetime = [] for graph",
"lts = getLinearTimeTimeAndSize(graph)[\"size\"] / minsize ltt = getLinearTimeTimeAndSize(graph)[\"time\"] / mintime",
"print(nearlinearsize) # print(nearlineartime) # print(\"LinearTime\") # print(lineartimesize) # print(lineartimetime) plt.rc('font',",
"if oss > 0 and ots > 0: oursizeSequential.append(oss) ourtimeSequential.append(ots)",
"ax.set_yscale(\"log\") ax.set_xscale(\"log\") ax.scatter(ourtimeSequential, oursizeSequential, label=\"FastKer\", marker=\"x\", color=\"green\") ax.scatter(ourtimeParallel, oursizeParallel, label=\"ParFastKer\",",
"def minProperty(graph, prop): oursequential = getOurTimeAndSizeSequential(graph)[prop] ourparallel = getOurTimeAndSizeParallel(graph)[prop] akiba",
"linearTimeDir, partitioningDir, ourTimeDir) result = dict() result[\"time\"] = res[\"parallel_quasikernel_time\"] +",
"[] nearlineartime = [] lineartimesize = [] lineartimetime = []",
"print(ourtimeSequential) # print(\"We (parallel)\") # print(oursizeParallel) # print(ourtimeParallel) # print(\"Akiba\")",
"return get_data_LinearTime.getLinearTimeTimeAndSize(graph, linearTimeDir) def minProperty(graph, prop): oursequential = getOurTimeAndSizeSequential(graph)[prop] ourparallel",
"import matplotlib.pyplot as plt # graphs = [\"uk-2002\", \"arabic-2005\", \"gsh-2015-tpd\",",
"# print(\"LinearTime\") # print(lineartimesize) # print(lineartimetime) plt.rc('font', size=14) fig =",
"# print(graph + \"(sequential): \" + str(getOurTimeAndSizeSequential(graph)[\"size\"])) ots = getOurTimeAndSizeSequential(graph)[\"time\"]",
"return get_data_NearLinear.getNearLinearTimeAndSize(graph, nearLinearDir) def getLinearTimeTimeAndSize(graph): return get_data_LinearTime.getLinearTimeTimeAndSize(graph, linearTimeDir) def minProperty(graph,",
"ourtimeSequential.append(ots) osp = getOurTimeAndSizeParallel(graph)[\"size\"] / minsize # print(graph + \"(parallel):",
"minProperty(graph, prop): oursequential = getOurTimeAndSizeSequential(graph)[prop] ourparallel = getOurTimeAndSizeParallel(graph)[prop] akiba =",
"def getLinearTimeTimeAndSize(graph): return get_data_LinearTime.getLinearTimeTimeAndSize(graph, linearTimeDir) def minProperty(graph, prop): oursequential =",
"= getNearLinearTimeAndSize(graph)[\"size\"] / minsize nlt = getNearLinearTimeAndSize(graph)[\"time\"] / mintime if",
"get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir) result = dict() result[\"time\"] = res[\"sequential_quasikernel_time\"]",
"marker=\"^\", edgecolors=\"blue\", facecolors=\"none\") ax.scatter(nearlineartime, nearlinearsize, label=\"NearLinear\", marker=\"o\", edgecolors=\"red\", facecolors=\"none\") ax.scatter(lineartimetime,",
"label=\"NearLinear\", marker=\"o\", edgecolors=\"red\", facecolors=\"none\") ax.scatter(lineartimetime, lineartimesize, label=\"LinearTime\", marker=\"^\", edgecolors=\"magenta\", facecolors=\"none\")",
"0: nearlinearsize.append(nls) nearlineartime.append(nlt) lts = getLinearTimeTimeAndSize(graph)[\"size\"] / minsize ltt =",
"+ str(getOurTimeAndSizeSequential(graph)[\"size\"])) ots = getOurTimeAndSizeSequential(graph)[\"time\"] / mintime if oss >",
"linearTime] # data = [oursequential, ourparallel, akiba, nearLinear] data =",
"ourparallel = getOurTimeAndSizeParallel(graph)[prop] akiba = getAkibaTimeAndSize(graph)[prop] nearLinear = getNearLinearTimeAndSize(graph)[prop] linearTime",
"ltt = getLinearTimeTimeAndSize(graph)[\"time\"] / mintime if nls > 0 and",
"\"../../../triangle_counting_paper/MIS_sigmod_pub/results/NearLinear\" akibaDir = \"../../akiba_vertex_cover/results\" def getOurTimeAndSizeSequential(graph): res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir,",
"print(\"We (parallel)\") # print(oursizeParallel) # print(ourtimeParallel) # print(\"Akiba\") # print(akibasize)",
"get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir) result = dict() result[\"time\"] = res[\"parallel_quasikernel_time\"]",
"ncol=2, loc='lower center', frameon=False, borderaxespad=0., mode=\"expand\") plt.savefig(\"summaryplot_vcsolver_baseline.pdf\", bbox_inches=\"tight\") # plt.show()",
"ourtimeParallel = [] akibasize = [] akibatime = [] nearlinearsize",
"print(\"NearLinear\") # print(nearlinearsize) # print(nearlineartime) # print(\"LinearTime\") # print(lineartimesize) #",
"prop): oursequential = getOurTimeAndSizeSequential(graph)[prop] ourparallel = getOurTimeAndSizeParallel(graph)[prop] akiba = getAkibaTimeAndSize(graph)[prop]",
"= res[\"parallel_quasikernel_size\"] return result def getAkibaTimeAndSize(graph): return get_data_akiba.getAkibaTimeAndSize(graph, akibaDir) def",
"\"delaunay_n24\", \"del26\"] graphs = [\"uk-2002\", \"arabic-2005\", \"gsh-2015-tpd\", \"uk-2005\", \"it-2004\", \"sk-2005\",",
"lineartimetime = [] for graph in graphs: minsize = getAkibaTimeAndSize(graph)[\"size\"]",
"nlt > 0: lineartimesize.append(lts) lineartimetime.append(ltt) # print(\"We\") # print(oursizeSequential) #",
"getNearLinearTimeAndSize(graph): return get_data_NearLinear.getNearLinearTimeAndSize(graph, nearLinearDir) def getLinearTimeTimeAndSize(graph): return get_data_LinearTime.getLinearTimeTimeAndSize(graph, linearTimeDir) def",
"getNearLinearTimeAndSize(graph)[\"time\"] / mintime if nls > 0 and nlt >",
"return minimum oursizeSequential = [] ourtimeSequential = [] oursizeParallel =",
"res[\"partitioning_time\"] result[\"size\"] = res[\"parallel_quasikernel_size\"] return result def getAkibaTimeAndSize(graph): return get_data_akiba.getAkibaTimeAndSize(graph,",
"getAkibaTimeAndSize(graph)[\"time\"] oss = getOurTimeAndSizeSequential(graph)[\"size\"] / minsize # print(graph + \"(sequential):",
"plt # graphs = [\"uk-2002\", \"arabic-2005\", \"gsh-2015-tpd\", \"uk-2005\", \"it-2004\", \"sk-2005\",",
"res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir) result = dict() result[\"time\"]",
"/ mintime if osp > 0 and otp > 0:",
"+ res[\"lineartime_time\"] result[\"size\"] = res[\"sequential_quasikernel_size\"] return result def getOurTimeAndSizeParallel(graph): res",
"mintime if osp > 0 and otp > 0: oursizeParallel.append(osp)",
"time\") plt.ylabel(\"size / VCSolver size\") plt.xticks([0.0001, 0.01, 1]) ax.legend(bbox_to_anchor=(0.35,-0.7), ncol=2,",
"# graphs = [\"uk-2002\", \"arabic-2005\", \"gsh-2015-tpd\", \"uk-2005\", \"it-2004\", \"sk-2005\", \"uk-2007-05\",",
"data = [oursequential, ourparallel, akiba, nearLinear, linearTime] # data =",
"color=\"green\") ax.scatter(ourtimeParallel, oursizeParallel, label=\"ParFastKer\", marker=\"+\", color=\"black\") # ax.scatter(akibatime, akibasize, label=\"VCSolver\",",
"\"(sequential): \" + str(getOurTimeAndSizeSequential(graph)[\"size\"])) ots = getOurTimeAndSizeSequential(graph)[\"time\"] / mintime if",
"print(\"LinearTime\") # print(lineartimesize) # print(lineartimetime) plt.rc('font', size=14) fig = plt.figure(figsize=(3.2,",
"marker=\"x\", color=\"green\") ax.scatter(ourtimeParallel, oursizeParallel, label=\"ParFastKer\", marker=\"+\", color=\"black\") # ax.scatter(akibatime, akibasize,",
"oursizeSequential = [] ourtimeSequential = [] oursizeParallel = [] ourtimeParallel",
"oursizeParallel.append(osp) ourtimeParallel.append(otp) aks = getAkibaTimeAndSize(graph)[\"size\"] / minsize akt = getAkibaTimeAndSize(graph)[\"time\"]",
"> 0 and akt > 0: akibasize.append(aks) akibatime.append(akt) nls =",
"VCSolver size\") plt.xticks([0.0001, 0.01, 1]) ax.legend(bbox_to_anchor=(0.35,-0.7), ncol=2, loc='lower center', frameon=False,",
"print(akibatime) # print(\"NearLinear\") # print(nearlinearsize) # print(nearlineartime) # print(\"LinearTime\") #",
"= getOurTimeAndSizeParallel(graph)[\"time\"] / mintime if osp > 0 and otp",
"ax.legend(bbox_to_anchor=(0.35,-0.7), ncol=2, loc='lower center', frameon=False, borderaxespad=0., mode=\"expand\") plt.savefig(\"summaryplot_vcsolver_baseline.pdf\", bbox_inches=\"tight\") #",
"2.4)) ax = fig.add_subplot(1,1,1) plt.title(\"Summary\", fontsize=14) ax.set_yscale(\"log\") ax.set_xscale(\"log\") ax.scatter(ourtimeSequential, oursizeSequential,",
"import get_data_NearLinear import get_data_LinearTime import os import matplotlib.pyplot as plt",
"# print(graph + \"(parallel): \" + str(getOurTimeAndSizeParallel(graph)[\"size\"])) otp = getOurTimeAndSizeParallel(graph)[\"time\"]",
"# print(akibasize) # print(akibatime) # print(\"NearLinear\") # print(nearlinearsize) # print(nearlineartime)",
"if nls > 0 and nlt > 0: lineartimesize.append(lts) lineartimetime.append(ltt)",
"nlt = getNearLinearTimeAndSize(graph)[\"time\"] / mintime if nls > 0 and",
"ourTimeDir) result = dict() result[\"time\"] = res[\"parallel_quasikernel_time\"] + res[\"lineartime_time\"] +",
"> 0 and nlt > 0: nearlinearsize.append(nls) nearlineartime.append(nlt) lts =",
"plt.xticks([0.0001, 0.01, 1]) ax.legend(bbox_to_anchor=(0.35,-0.7), ncol=2, loc='lower center', frameon=False, borderaxespad=0., mode=\"expand\")",
"minsize ltt = getLinearTimeTimeAndSize(graph)[\"time\"] / mintime if nls > 0",
"/ mintime if aks > 0 and akt > 0:",
"/ mintime if nls > 0 and nlt > 0:",
"= \"../../LinearTimeKernels/partitions\" ourTimeDir = \"../../results/LinearTimeKernelsScalingAll\" nearLinearDir = \"../../../triangle_counting_paper/MIS_sigmod_pub/results/NearLinear\" akibaDir =",
"partitioningDir = \"../../LinearTimeKernels/partitions\" ourTimeDir = \"../../results/LinearTimeKernelsScalingAll\" nearLinearDir = \"../../../triangle_counting_paper/MIS_sigmod_pub/results/NearLinear\" akibaDir",
"print(\"We\") # print(oursizeSequential) # print(ourtimeSequential) # print(\"We (parallel)\") # print(oursizeParallel)",
"as plt # graphs = [\"uk-2002\", \"arabic-2005\", \"gsh-2015-tpd\", \"uk-2005\", \"it-2004\",",
"getOurTimeAndSizeSequential(graph)[\"size\"] / minsize # print(graph + \"(sequential): \" + str(getOurTimeAndSizeSequential(graph)[\"size\"]))",
"get_data_NearLinear.getNearLinearTimeAndSize(graph, nearLinearDir) def getLinearTimeTimeAndSize(graph): return get_data_LinearTime.getLinearTimeTimeAndSize(graph, linearTimeDir) def minProperty(graph, prop):",
"nearlineartime = [] lineartimesize = [] lineartimetime = [] for",
"getAkibaTimeAndSize(graph)[\"time\"] / mintime if aks > 0 and akt >",
"nearlineartime.append(nlt) lts = getLinearTimeTimeAndSize(graph)[\"size\"] / minsize ltt = getLinearTimeTimeAndSize(graph)[\"time\"] /",
"== 0: return 1 return minimum oursizeSequential = [] ourtimeSequential",
"= getAkibaTimeAndSize(graph)[\"size\"] mintime = getAkibaTimeAndSize(graph)[\"time\"] oss = getOurTimeAndSizeSequential(graph)[\"size\"] / minsize",
"ax = fig.add_subplot(1,1,1) plt.title(\"Summary\", fontsize=14) ax.set_yscale(\"log\") ax.set_xscale(\"log\") ax.scatter(ourtimeSequential, oursizeSequential, label=\"FastKer\",",
"\"it-2004\", \"sk-2005\", \"uk-2007-05\", \"webbase-2001\", \"asia.osm\", \"road_usa\", \"europe.osm\", \"rgg_n26_s0\", \"RHG-100000000-nodes-2000000000-edges\", \"delaunay_n24\",",
"# print(ourtimeSequential) # print(\"We (parallel)\") # print(oursizeParallel) # print(ourtimeParallel) #",
"# print(nearlinearsize) # print(nearlineartime) # print(\"LinearTime\") # print(lineartimesize) # print(lineartimetime)",
"\"arabic-2005\", \"gsh-2015-tpd\", \"uk-2005\", \"it-2004\", \"sk-2005\", \"uk-2007-05\", \"webbase-2001\", \"asia.osm\", \"road_usa\", \"europe.osm\",",
"nls > 0 and nlt > 0: nearlinearsize.append(nls) nearlineartime.append(nlt) lts",
"oursizeSequential, label=\"FastKer\", marker=\"x\", color=\"green\") ax.scatter(ourtimeParallel, oursizeParallel, label=\"ParFastKer\", marker=\"+\", color=\"black\") #",
"# print(akibatime) # print(\"NearLinear\") # print(nearlinearsize) # print(nearlineartime) # print(\"LinearTime\")",
"= res[\"sequential_quasikernel_time\"] + res[\"lineartime_time\"] result[\"size\"] = res[\"sequential_quasikernel_size\"] return result def",
"/ minsize # print(graph + \"(sequential): \" + str(getOurTimeAndSizeSequential(graph)[\"size\"])) ots",
"res[\"sequential_quasikernel_time\"] + res[\"lineartime_time\"] result[\"size\"] = res[\"sequential_quasikernel_size\"] return result def getOurTimeAndSizeParallel(graph):",
"\"it-2004\", \"sk-2005\", \"uk-2007-05\", \"webbase-2001\", \"asia.osm\", \"road_usa\", \"europe.osm\", \"rgg_n26_s0\", \"delaunay_n24\", \"del26\"]",
"akibatime = [] nearlinearsize = [] nearlineartime = [] lineartimesize",
"getOurTimeAndSizeSequential(graph): res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir) result = dict()",
"ax.scatter(ourtimeSequential, oursizeSequential, label=\"FastKer\", marker=\"x\", color=\"green\") ax.scatter(ourtimeParallel, oursizeParallel, label=\"ParFastKer\", marker=\"+\", color=\"black\")",
"print(graph + \"(sequential): \" + str(getOurTimeAndSizeSequential(graph)[\"size\"])) ots = getOurTimeAndSizeSequential(graph)[\"time\"] /",
"[] akibatime = [] nearlinearsize = [] nearlineartime = []",
"akibatime.append(akt) nls = getNearLinearTimeAndSize(graph)[\"size\"] / minsize nlt = getNearLinearTimeAndSize(graph)[\"time\"] /",
"marker=\"^\", edgecolors=\"magenta\", facecolors=\"none\") plt.xlabel(\"time / VCSolver time\") plt.ylabel(\"size / VCSolver",
"fig.add_subplot(1,1,1) plt.title(\"Summary\", fontsize=14) ax.set_yscale(\"log\") ax.set_xscale(\"log\") ax.scatter(ourtimeSequential, oursizeSequential, label=\"FastKer\", marker=\"x\", color=\"green\")",
"get_data_NearLinear import get_data_LinearTime import os import matplotlib.pyplot as plt #",
"/ VCSolver time\") plt.ylabel(\"size / VCSolver size\") plt.xticks([0.0001, 0.01, 1])",
"\"sk-2005\", \"uk-2007-05\", \"webbase-2001\", \"asia.osm\", \"road_usa\", \"europe.osm\", \"rgg_n26_s0\", \"RHG-100000000-nodes-2000000000-edges\", \"delaunay_n24\", \"del26\"]",
"get_data_ours import get_data_akiba import get_data_NearLinear import get_data_LinearTime import os import",
"nls = getNearLinearTimeAndSize(graph)[\"size\"] / minsize nlt = getNearLinearTimeAndSize(graph)[\"time\"] / mintime",
"+ \"(sequential): \" + str(getOurTimeAndSizeSequential(graph)[\"size\"])) ots = getOurTimeAndSizeSequential(graph)[\"time\"] / mintime",
"= [] lineartimetime = [] for graph in graphs: minsize",
"= [oursequential, ourparallel, akiba, nearLinear, linearTime] # data = [oursequential,",
"result[\"time\"] = res[\"parallel_quasikernel_time\"] + res[\"lineartime_time\"] + res[\"partitioning_time\"] result[\"size\"] = res[\"parallel_quasikernel_size\"]",
"oss = getOurTimeAndSizeSequential(graph)[\"size\"] / minsize # print(graph + \"(sequential): \"",
"\"sk-2005\", \"uk-2007-05\", \"webbase-2001\", \"asia.osm\", \"road_usa\", \"europe.osm\", \"rgg_n26_s0\", \"delaunay_n24\", \"del26\"] linearTimeDir",
"nearLinear] data = filter(lambda x : x >= 0, data)",
"# ax.scatter(akibatime, akibasize, label=\"VCSolver\", marker=\"^\", edgecolors=\"blue\", facecolors=\"none\") ax.scatter(nearlineartime, nearlinearsize, label=\"NearLinear\",",
"otp > 0: oursizeParallel.append(osp) ourtimeParallel.append(otp) aks = getAkibaTimeAndSize(graph)[\"size\"] / minsize",
"VCSolver time\") plt.ylabel(\"size / VCSolver size\") plt.xticks([0.0001, 0.01, 1]) ax.legend(bbox_to_anchor=(0.35,-0.7),",
"\"../../results/LinearTimeKernelsScalingAll\" nearLinearDir = \"../../../triangle_counting_paper/MIS_sigmod_pub/results/NearLinear\" akibaDir = \"../../akiba_vertex_cover/results\" def getOurTimeAndSizeSequential(graph): res",
"fig = plt.figure(figsize=(3.2, 2.4)) ax = fig.add_subplot(1,1,1) plt.title(\"Summary\", fontsize=14) ax.set_yscale(\"log\")",
"= get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir) result = dict() result[\"time\"] =",
"\"../../../triangle_counting_paper/MIS_sigmod_pub/results/LinearTimeKernels/logs\" partitioningDir = \"../../LinearTimeKernels/partitions\" ourTimeDir = \"../../results/LinearTimeKernelsScalingAll\" nearLinearDir = \"../../../triangle_counting_paper/MIS_sigmod_pub/results/NearLinear\"",
"= [] nearlineartime = [] lineartimesize = [] lineartimetime =",
"= [] akibasize = [] akibatime = [] nearlinearsize =",
"\"rgg_n26_s0\", \"RHG-100000000-nodes-2000000000-edges\", \"delaunay_n24\", \"del26\"] graphs = [\"uk-2002\", \"arabic-2005\", \"gsh-2015-tpd\", \"uk-2005\",",
"/ minsize akt = getAkibaTimeAndSize(graph)[\"time\"] / mintime if aks >",
"size\") plt.xticks([0.0001, 0.01, 1]) ax.legend(bbox_to_anchor=(0.35,-0.7), ncol=2, loc='lower center', frameon=False, borderaxespad=0.,",
"\"webbase-2001\", \"asia.osm\", \"road_usa\", \"europe.osm\", \"rgg_n26_s0\", \"RHG-100000000-nodes-2000000000-edges\", \"delaunay_n24\", \"del26\"] graphs =",
"aks > 0 and akt > 0: akibasize.append(aks) akibatime.append(akt) nls",
"akiba, nearLinear, linearTime] # data = [oursequential, ourparallel, akiba, nearLinear]",
">= 0, data) minimum = min(data) if minimum == 0:",
"\"delaunay_n24\", \"del26\"] linearTimeDir = \"../../../triangle_counting_paper/MIS_sigmod_pub/results/LinearTimeKernels/logs\" partitioningDir = \"../../LinearTimeKernels/partitions\" ourTimeDir =",
"linearTime = getLinearTimeTimeAndSize(graph)[prop] data = [oursequential, ourparallel, akiba, nearLinear, linearTime]",
"= getOurTimeAndSizeSequential(graph)[\"time\"] / mintime if oss > 0 and ots",
"result[\"time\"] = res[\"sequential_quasikernel_time\"] + res[\"lineartime_time\"] result[\"size\"] = res[\"sequential_quasikernel_size\"] return result",
"= [\"uk-2002\", \"arabic-2005\", \"gsh-2015-tpd\", \"uk-2005\", \"it-2004\", \"sk-2005\", \"uk-2007-05\", \"webbase-2001\", \"asia.osm\",",
"# print(\"We\") # print(oursizeSequential) # print(ourtimeSequential) # print(\"We (parallel)\") #",
"= [] ourtimeSequential = [] oursizeParallel = [] ourtimeParallel =",
"+ str(getOurTimeAndSizeParallel(graph)[\"size\"])) otp = getOurTimeAndSizeParallel(graph)[\"time\"] / mintime if osp >",
"akibasize.append(aks) akibatime.append(akt) nls = getNearLinearTimeAndSize(graph)[\"size\"] / minsize nlt = getNearLinearTimeAndSize(graph)[\"time\"]",
"nearlinearsize, label=\"NearLinear\", marker=\"o\", edgecolors=\"red\", facecolors=\"none\") ax.scatter(lineartimetime, lineartimesize, label=\"LinearTime\", marker=\"^\", edgecolors=\"magenta\",",
"return get_data_akiba.getAkibaTimeAndSize(graph, akibaDir) def getNearLinearTimeAndSize(graph): return get_data_NearLinear.getNearLinearTimeAndSize(graph, nearLinearDir) def getLinearTimeTimeAndSize(graph):",
"[] for graph in graphs: minsize = getAkibaTimeAndSize(graph)[\"size\"] mintime =",
"minsize nlt = getNearLinearTimeAndSize(graph)[\"time\"] / mintime if nls > 0",
"getOurTimeAndSizeSequential(graph)[\"time\"] / mintime if oss > 0 and ots >",
"for graph in graphs: minsize = getAkibaTimeAndSize(graph)[\"size\"] mintime = getAkibaTimeAndSize(graph)[\"time\"]",
"nearLinearDir) def getLinearTimeTimeAndSize(graph): return get_data_LinearTime.getLinearTimeTimeAndSize(graph, linearTimeDir) def minProperty(graph, prop): oursequential",
"label=\"LinearTime\", marker=\"^\", edgecolors=\"magenta\", facecolors=\"none\") plt.xlabel(\"time / VCSolver time\") plt.ylabel(\"size /",
"getAkibaTimeAndSize(graph)[\"size\"] mintime = getAkibaTimeAndSize(graph)[\"time\"] oss = getOurTimeAndSizeSequential(graph)[\"size\"] / minsize #",
"> 0 and nlt > 0: lineartimesize.append(lts) lineartimetime.append(ltt) # print(\"We\")",
"minsize # print(graph + \"(sequential): \" + str(getOurTimeAndSizeSequential(graph)[\"size\"])) ots =",
"oss > 0 and ots > 0: oursizeSequential.append(oss) ourtimeSequential.append(ots) osp",
"= \"../../../triangle_counting_paper/MIS_sigmod_pub/results/LinearTimeKernels/logs\" partitioningDir = \"../../LinearTimeKernels/partitions\" ourTimeDir = \"../../results/LinearTimeKernelsScalingAll\" nearLinearDir =",
"matplotlib.pyplot as plt # graphs = [\"uk-2002\", \"arabic-2005\", \"gsh-2015-tpd\", \"uk-2005\",",
"= [] ourtimeParallel = [] akibasize = [] akibatime =",
"= getAkibaTimeAndSize(graph)[\"size\"] / minsize akt = getAkibaTimeAndSize(graph)[\"time\"] / mintime if",
"+ res[\"lineartime_time\"] + res[\"partitioning_time\"] result[\"size\"] = res[\"parallel_quasikernel_size\"] return result def",
"str(getOurTimeAndSizeParallel(graph)[\"size\"])) otp = getOurTimeAndSizeParallel(graph)[\"time\"] / mintime if osp > 0",
"import os import matplotlib.pyplot as plt # graphs = [\"uk-2002\",",
"dict() result[\"time\"] = res[\"parallel_quasikernel_time\"] + res[\"lineartime_time\"] + res[\"partitioning_time\"] result[\"size\"] =",
"> 0: oursizeParallel.append(osp) ourtimeParallel.append(otp) aks = getAkibaTimeAndSize(graph)[\"size\"] / minsize akt",
"\"../../LinearTimeKernels/partitions\" ourTimeDir = \"../../results/LinearTimeKernelsScalingAll\" nearLinearDir = \"../../../triangle_counting_paper/MIS_sigmod_pub/results/NearLinear\" akibaDir = \"../../akiba_vertex_cover/results\"",
"graphs: minsize = getAkibaTimeAndSize(graph)[\"size\"] mintime = getAkibaTimeAndSize(graph)[\"time\"] oss = getOurTimeAndSizeSequential(graph)[\"size\"]",
"1 return minimum oursizeSequential = [] ourtimeSequential = [] oursizeParallel",
"[oursequential, ourparallel, akiba, nearLinear, linearTime] # data = [oursequential, ourparallel,",
"1]) ax.legend(bbox_to_anchor=(0.35,-0.7), ncol=2, loc='lower center', frameon=False, borderaxespad=0., mode=\"expand\") plt.savefig(\"summaryplot_vcsolver_baseline.pdf\", bbox_inches=\"tight\")",
"0 and nlt > 0: lineartimesize.append(lts) lineartimetime.append(ltt) # print(\"We\") #",
"oursequential = getOurTimeAndSizeSequential(graph)[prop] ourparallel = getOurTimeAndSizeParallel(graph)[prop] akiba = getAkibaTimeAndSize(graph)[prop] nearLinear",
"res[\"lineartime_time\"] + res[\"partitioning_time\"] result[\"size\"] = res[\"parallel_quasikernel_size\"] return result def getAkibaTimeAndSize(graph):",
"\"uk-2005\", \"it-2004\", \"sk-2005\", \"uk-2007-05\", \"webbase-2001\", \"asia.osm\", \"road_usa\", \"europe.osm\", \"rgg_n26_s0\", \"RHG-100000000-nodes-2000000000-edges\",",
"= getAkibaTimeAndSize(graph)[\"time\"] oss = getOurTimeAndSizeSequential(graph)[\"size\"] / minsize # print(graph +",
"/ minsize ltt = getLinearTimeTimeAndSize(graph)[\"time\"] / mintime if nls >",
"\"europe.osm\", \"rgg_n26_s0\", \"delaunay_n24\", \"del26\"] linearTimeDir = \"../../../triangle_counting_paper/MIS_sigmod_pub/results/LinearTimeKernels/logs\" partitioningDir = \"../../LinearTimeKernels/partitions\"",
"import get_data_akiba import get_data_NearLinear import get_data_LinearTime import os import matplotlib.pyplot",
"x : x >= 0, data) minimum = min(data) if",
"min(data) if minimum == 0: return 1 return minimum oursizeSequential",
"print(akibasize) # print(akibatime) # print(\"NearLinear\") # print(nearlinearsize) # print(nearlineartime) #",
"os import matplotlib.pyplot as plt # graphs = [\"uk-2002\", \"arabic-2005\",",
"ots > 0: oursizeSequential.append(oss) ourtimeSequential.append(ots) osp = getOurTimeAndSizeParallel(graph)[\"size\"] / minsize",
"and ots > 0: oursizeSequential.append(oss) ourtimeSequential.append(ots) osp = getOurTimeAndSizeParallel(graph)[\"size\"] /",
"= filter(lambda x : x >= 0, data) minimum =",
"print(ourtimeParallel) # print(\"Akiba\") # print(akibasize) # print(akibatime) # print(\"NearLinear\") #",
"linearTimeDir = \"../../../triangle_counting_paper/MIS_sigmod_pub/results/LinearTimeKernels/logs\" partitioningDir = \"../../LinearTimeKernels/partitions\" ourTimeDir = \"../../results/LinearTimeKernelsScalingAll\" nearLinearDir",
"get_data_LinearTime import os import matplotlib.pyplot as plt # graphs =",
"= plt.figure(figsize=(3.2, 2.4)) ax = fig.add_subplot(1,1,1) plt.title(\"Summary\", fontsize=14) ax.set_yscale(\"log\") ax.set_xscale(\"log\")",
"akt > 0: akibasize.append(aks) akibatime.append(akt) nls = getNearLinearTimeAndSize(graph)[\"size\"] / minsize",
"res[\"sequential_quasikernel_size\"] return result def getOurTimeAndSizeParallel(graph): res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir,",
"# print(oursizeSequential) # print(ourtimeSequential) # print(\"We (parallel)\") # print(oursizeParallel) #",
"# print(lineartimetime) plt.rc('font', size=14) fig = plt.figure(figsize=(3.2, 2.4)) ax =",
"# print(nearlineartime) # print(\"LinearTime\") # print(lineartimesize) # print(lineartimetime) plt.rc('font', size=14)",
"= getLinearTimeTimeAndSize(graph)[prop] data = [oursequential, ourparallel, akiba, nearLinear, linearTime] #",
"getLinearTimeTimeAndSize(graph): return get_data_LinearTime.getLinearTimeTimeAndSize(graph, linearTimeDir) def minProperty(graph, prop): oursequential = getOurTimeAndSizeSequential(graph)[prop]",
"getNearLinearTimeAndSize(graph)[\"size\"] / minsize nlt = getNearLinearTimeAndSize(graph)[\"time\"] / mintime if nls",
"\"uk-2007-05\", \"webbase-2001\", \"asia.osm\", \"road_usa\", \"europe.osm\", \"rgg_n26_s0\", \"delaunay_n24\", \"del26\"] linearTimeDir =",
"\" + str(getOurTimeAndSizeSequential(graph)[\"size\"])) ots = getOurTimeAndSizeSequential(graph)[\"time\"] / mintime if oss",
"akibaDir = \"../../akiba_vertex_cover/results\" def getOurTimeAndSizeSequential(graph): res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir,",
"= getOurTimeAndSizeParallel(graph)[prop] akiba = getAkibaTimeAndSize(graph)[prop] nearLinear = getNearLinearTimeAndSize(graph)[prop] linearTime =",
"ax.set_xscale(\"log\") ax.scatter(ourtimeSequential, oursizeSequential, label=\"FastKer\", marker=\"x\", color=\"green\") ax.scatter(ourtimeParallel, oursizeParallel, label=\"ParFastKer\", marker=\"+\",",
"\"del26\"] graphs = [\"uk-2002\", \"arabic-2005\", \"gsh-2015-tpd\", \"uk-2005\", \"it-2004\", \"sk-2005\", \"uk-2007-05\",",
"ourparallel, akiba, nearLinear] data = filter(lambda x : x >=",
"return 1 return minimum oursizeSequential = [] ourtimeSequential = []",
"nearlinearsize = [] nearlineartime = [] lineartimesize = [] lineartimetime",
"= dict() result[\"time\"] = res[\"sequential_quasikernel_time\"] + res[\"lineartime_time\"] result[\"size\"] = res[\"sequential_quasikernel_size\"]",
"mintime if aks > 0 and akt > 0: akibasize.append(aks)",
"0: oursizeSequential.append(oss) ourtimeSequential.append(ots) osp = getOurTimeAndSizeParallel(graph)[\"size\"] / minsize # print(graph",
"getAkibaTimeAndSize(graph)[prop] nearLinear = getNearLinearTimeAndSize(graph)[prop] linearTime = getLinearTimeTimeAndSize(graph)[prop] data = [oursequential,",
"get_data_akiba import get_data_NearLinear import get_data_LinearTime import os import matplotlib.pyplot as",
"minimum == 0: return 1 return minimum oursizeSequential = []",
"= res[\"sequential_quasikernel_size\"] return result def getOurTimeAndSizeParallel(graph): res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir,",
"[] nearlinearsize = [] nearlineartime = [] lineartimesize = []",
"lineartimesize, label=\"LinearTime\", marker=\"^\", edgecolors=\"magenta\", facecolors=\"none\") plt.xlabel(\"time / VCSolver time\") plt.ylabel(\"size",
"> 0 and ots > 0: oursizeSequential.append(oss) ourtimeSequential.append(ots) osp =",
"print(oursizeSequential) # print(ourtimeSequential) # print(\"We (parallel)\") # print(oursizeParallel) # print(ourtimeParallel)",
"\"uk-2007-05\", \"webbase-2001\", \"asia.osm\", \"road_usa\", \"europe.osm\", \"rgg_n26_s0\", \"RHG-100000000-nodes-2000000000-edges\", \"delaunay_n24\", \"del26\"] graphs",
"akibaDir) def getNearLinearTimeAndSize(graph): return get_data_NearLinear.getNearLinearTimeAndSize(graph, nearLinearDir) def getLinearTimeTimeAndSize(graph): return get_data_LinearTime.getLinearTimeTimeAndSize(graph,",
"= getNearLinearTimeAndSize(graph)[\"time\"] / mintime if nls > 0 and nlt",
"= \"../../results/LinearTimeKernelsScalingAll\" nearLinearDir = \"../../../triangle_counting_paper/MIS_sigmod_pub/results/NearLinear\" akibaDir = \"../../akiba_vertex_cover/results\" def getOurTimeAndSizeSequential(graph):",
"print(\"Akiba\") # print(akibasize) # print(akibatime) # print(\"NearLinear\") # print(nearlinearsize) #",
"getOurTimeAndSizeParallel(graph): res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir) result = dict()",
"= fig.add_subplot(1,1,1) plt.title(\"Summary\", fontsize=14) ax.set_yscale(\"log\") ax.set_xscale(\"log\") ax.scatter(ourtimeSequential, oursizeSequential, label=\"FastKer\", marker=\"x\",",
"\"RHG-100000000-nodes-2000000000-edges\", \"delaunay_n24\", \"del26\"] graphs = [\"uk-2002\", \"arabic-2005\", \"gsh-2015-tpd\", \"uk-2005\", \"it-2004\","
] |
[
"class SubParser(HelpMixin): def __init__(self, commands): self.commands = self._commands(commands) def _commands(self,",
"= [] self.commands = self._methods_with_opts() def _methods_with_opts(self): result = {}",
"method_args = {} # Get the expected method arguments, ignore",
"in method.options + self.global_options: parser.add_argument(*opt[0], **opt[1]) return vars(parser.parse_args(args)) def _call_method(self,",
"method.options = [] return method class HelpMixin(object): def help(self): print('available",
"value in self_args.items(): setattr(self, name, value) self.pre_command() return method(**method_args) def",
"description) return 1 class SubParser(HelpMixin): def __init__(self, commands): self.commands =",
"sys import inspect from argparse import ArgumentParser, RawDescriptionHelpFormatter def opt(*args,",
"import ArgumentParser, RawDescriptionHelpFormatter def opt(*args, **kwargs): def decorator(method): if not",
"method return decorator def noopts(method): method.options = [] return method",
"inspect.getargspec(method) expected_args.remove('self') self_args = self._parse_args(method, args) method_args = {} #",
"**kwargs): self.global_options.append((args, kwargs)) def pre_command(self): pass class BaseCommand(Command): def __init__(self):",
"self.commands = self._commands(commands) def _commands(self, commands): prog = sys.argv[0] result",
"in self.commands.keys(): args.pop(index) return self._call_method(self.commands[arg], args) return self.help() def opt(self,",
"'').strip('\\n') print(' ', name.ljust(10), description) return 1 class SubParser(HelpMixin): def",
"result = {} for name in dir(self): if name.startswith('__'): continue",
"out which arguments the method expects expected_args, _, _, _",
"*args, **kwargs): self.global_options.append((args, kwargs)) def pre_command(self): pass class BaseCommand(Command): def",
"in self_args.items(): setattr(self, name, value) self.pre_command() return method(**method_args) def __call__(self,",
"name, command in self.commands.items(): description = str(command.__doc__ or '').strip('\\n') print('",
"class Command(HelpMixin): def __init__(self): self.global_options = [] self.commands = self._methods_with_opts()",
"return vars(parser.parse_args(args)) def _call_method(self, method, args): # Find out which",
"formatter_class=RawDescriptionHelpFormatter ) for opt in method.options + self.global_options: parser.add_argument(*opt[0], **opt[1])",
"parser.add_argument(*opt[0], **opt[1]) return vars(parser.parse_args(args)) def _call_method(self, method, args): # Find",
"', name.ljust(10), description) return 1 class SubParser(HelpMixin): def __init__(self, commands):",
"commands: name = getattr(cmd, '_name', None) if not name: continue",
"self.commands[arg](args) return self.help() class Command(HelpMixin): def __init__(self): self.global_options = []",
"Command(HelpMixin): def __init__(self): self.global_options = [] self.commands = self._methods_with_opts() def",
"expected_args, _, _, _ = inspect.getargspec(method) expected_args.remove('self') self_args = self._parse_args(method,",
"setattr(self, name, value) self.pre_command() return method(**method_args) def __call__(self, args): for",
"or ''), formatter_class=RawDescriptionHelpFormatter ) for opt in method.options + self.global_options:",
"in enumerate(args): if arg in self.commands.keys(): args.pop(index) return self.commands[arg](args) return",
"= inspect.getargspec(method) expected_args.remove('self') self_args = self._parse_args(method, args) method_args = {}",
"= self._methods_with_opts() def _methods_with_opts(self): result = {} for name in",
"'options'): continue result[name] = method return result def _parse_args(self, method,",
"self_args = self._parse_args(method, args) method_args = {} # Get the",
"self_args.items(): setattr(self, name, value) self.pre_command() return method(**method_args) def __call__(self, args):",
"for name in dir(self): if name.startswith('__'): continue method = getattr(self,",
"result[name] = method return result def _parse_args(self, method, args): prog",
"if not hasattr(method, 'options'): continue result[name] = method return result",
"the method expects expected_args, _, _, _ = inspect.getargspec(method) expected_args.remove('self')",
"Put rest of the arguments in self for name, value",
"commands:') for name, command in self.commands.items(): description = str(command.__doc__ or",
"= [] return method class HelpMixin(object): def help(self): print('available commands:')",
"sys.argv[0] result = {} for cmd in commands: name =",
"expected method arguments, ignore rest for name in expected_args: if",
"return self._call_method(self.commands[arg], args) return self.help() def opt(self, *args, **kwargs): self.global_options.append((args,",
"return method return decorator def noopts(method): method.options = [] return",
"name in args: method_args[name] = args.pop(name) # Put rest of",
"return self.help() def opt(self, *args, **kwargs): self.global_options.append((args, kwargs)) def pre_command(self):",
"method(**method_args) def __call__(self, args): for index, arg in enumerate(args): if",
"arg in enumerate(args): if arg in self.commands.keys(): args.pop(index) return self.commands[arg](args)",
"self.commands.keys(): args.pop(index) return self.commands[arg](args) return self.help() class Command(HelpMixin): def __init__(self):",
"method arguments, ignore rest for name in expected_args: if name",
"{} for name in dir(self): if name.startswith('__'): continue method =",
"def _parse_args(self, method, args): prog = '{} {} {}'.format(self.prog, self._name,",
"in args: method_args[name] = args.pop(name) # Put rest of the",
"rest of the arguments in self for name, value in",
"dest='config_path', help='Configuration file', default='~/.test.conf' ) def pre_command(self): config = configparser.ConfigParser()",
"description=(method.__doc__ or ''), formatter_class=RawDescriptionHelpFormatter ) for opt in method.options +",
"self.global_options: parser.add_argument(*opt[0], **opt[1]) return vars(parser.parse_args(args)) def _call_method(self, method, args): #",
"def __init__(self, commands): self.commands = self._commands(commands) def _commands(self, commands): prog",
"_parse_args(self, method, args): prog = '{} {} {}'.format(self.prog, self._name, method.__name__)",
"def run(self): args = sys.argv[1:] for index, arg in enumerate(args):",
"the arguments in self for name, value in self_args.items(): setattr(self,",
"prog = sys.argv[0] result = {} for cmd in commands:",
"hasattr(method, 'options'): continue result[name] = method return result def _parse_args(self,",
"self._methods_with_opts() def _methods_with_opts(self): result = {} for name in dir(self):",
"= prog result[name] = cmd return result def run(self): args",
"cmd.prog = prog result[name] = cmd return result def run(self):",
"= {} for cmd in commands: name = getattr(cmd, '_name',",
"ArgumentParser, RawDescriptionHelpFormatter def opt(*args, **kwargs): def decorator(method): if not hasattr(method,",
"self.global_options = [] self.commands = self._methods_with_opts() def _methods_with_opts(self): result =",
"self._parse_args(method, args) method_args = {} # Get the expected method",
"arguments in self for name, value in self_args.items(): setattr(self, name,",
"class BaseCommand(Command): def __init__(self): super(BaseCommand, self).__init__() self.opt( '-c', dest='config_path', help='Configuration",
"SubParser(HelpMixin): def __init__(self, commands): self.commands = self._commands(commands) def _commands(self, commands):",
"name in expected_args: if name in args: method_args[name] = args.pop(name)",
"the expected method arguments, ignore rest for name in expected_args:",
"kwargs)) return method return decorator def noopts(method): method.options = []",
"name.startswith('__'): continue method = getattr(self, name) if not hasattr(method, 'options'):",
"arguments the method expects expected_args, _, _, _ = inspect.getargspec(method)",
"= [] method.options.append((args, kwargs)) return method return decorator def noopts(method):",
"self.help() class Command(HelpMixin): def __init__(self): self.global_options = [] self.commands =",
"return self.help() class Command(HelpMixin): def __init__(self): self.global_options = [] self.commands",
"= getattr(self, name) if not hasattr(method, 'options'): continue result[name] =",
"[] method.options.append((args, kwargs)) return method return decorator def noopts(method): method.options",
"{} for cmd in commands: name = getattr(cmd, '_name', None)",
"prog result[name] = cmd return result def run(self): args =",
"which arguments the method expects expected_args, _, _, _ =",
"decorator(method): if not hasattr(method, 'options'): method.options = [] method.options.append((args, kwargs))",
"Get the expected method arguments, ignore rest for name in",
"_ = inspect.getargspec(method) expected_args.remove('self') self_args = self._parse_args(method, args) method_args =",
"self._call_method(self.commands[arg], args) return self.help() def opt(self, *args, **kwargs): self.global_options.append((args, kwargs))",
"+ self.global_options: parser.add_argument(*opt[0], **opt[1]) return vars(parser.parse_args(args)) def _call_method(self, method, args):",
"parser = ArgumentParser( prog=prog, description=(method.__doc__ or ''), formatter_class=RawDescriptionHelpFormatter ) for",
"if not hasattr(method, 'options'): method.options = [] method.options.append((args, kwargs)) return",
"not name: continue cmd.prog = prog result[name] = cmd return",
"result = {} for cmd in commands: name = getattr(cmd,",
"getattr(self, name) if not hasattr(method, 'options'): continue result[name] = method",
"method.options + self.global_options: parser.add_argument(*opt[0], **opt[1]) return vars(parser.parse_args(args)) def _call_method(self, method,",
"self for name, value in self_args.items(): setattr(self, name, value) self.pre_command()",
"in expected_args: if name in args: method_args[name] = args.pop(name) #",
"self.commands.keys(): args.pop(index) return self._call_method(self.commands[arg], args) return self.help() def opt(self, *args,",
"def decorator(method): if not hasattr(method, 'options'): method.options = [] method.options.append((args,",
"# Put rest of the arguments in self for name,",
"return self.commands[arg](args) return self.help() class Command(HelpMixin): def __init__(self): self.global_options =",
"arg in self.commands.keys(): args.pop(index) return self.commands[arg](args) return self.help() class Command(HelpMixin):",
"__init__(self): self.global_options = [] self.commands = self._methods_with_opts() def _methods_with_opts(self): result",
"for name, command in self.commands.items(): description = str(command.__doc__ or '').strip('\\n')",
"import sys import inspect from argparse import ArgumentParser, RawDescriptionHelpFormatter def",
"args): for index, arg in enumerate(args): if arg in self.commands.keys():",
"= '{} {} {}'.format(self.prog, self._name, method.__name__) parser = ArgumentParser( prog=prog,",
"# Get the expected method arguments, ignore rest for name",
"opt(*args, **kwargs): def decorator(method): if not hasattr(method, 'options'): method.options =",
"str(command.__doc__ or '').strip('\\n') print(' ', name.ljust(10), description) return 1 class",
"self._name, method.__name__) parser = ArgumentParser( prog=prog, description=(method.__doc__ or ''), formatter_class=RawDescriptionHelpFormatter",
"enumerate(args): if arg in self.commands.keys(): args.pop(index) return self._call_method(self.commands[arg], args) return",
"file', default='~/.test.conf' ) def pre_command(self): config = configparser.ConfigParser() config.read(self.config_path) print(config.sections())",
"self._commands(commands) def _commands(self, commands): prog = sys.argv[0] result = {}",
"args: method_args[name] = args.pop(name) # Put rest of the arguments",
"expects expected_args, _, _, _ = inspect.getargspec(method) expected_args.remove('self') self_args =",
"_methods_with_opts(self): result = {} for name in dir(self): if name.startswith('__'):",
"help(self): print('available commands:') for name, command in self.commands.items(): description =",
"in self.commands.items(): description = str(command.__doc__ or '').strip('\\n') print(' ', name.ljust(10),",
"import configparser import sys import inspect from argparse import ArgumentParser,",
"= sys.argv[0] result = {} for cmd in commands: name",
"return decorator def noopts(method): method.options = [] return method class",
"return result def run(self): args = sys.argv[1:] for index, arg",
"'{} {} {}'.format(self.prog, self._name, method.__name__) parser = ArgumentParser( prog=prog, description=(method.__doc__",
"args): # Find out which arguments the method expects expected_args,",
"sys.argv[1:] for index, arg in enumerate(args): if arg in self.commands.keys():",
"def _commands(self, commands): prog = sys.argv[0] result = {} for",
"command in self.commands.items(): description = str(command.__doc__ or '').strip('\\n') print(' ',",
"__init__(self): super(BaseCommand, self).__init__() self.opt( '-c', dest='config_path', help='Configuration file', default='~/.test.conf' )",
"continue cmd.prog = prog result[name] = cmd return result def",
"**kwargs): def decorator(method): if not hasattr(method, 'options'): method.options = []",
"method class HelpMixin(object): def help(self): print('available commands:') for name, command",
"prog = '{} {} {}'.format(self.prog, self._name, method.__name__) parser = ArgumentParser(",
"= {} # Get the expected method arguments, ignore rest",
"noopts(method): method.options = [] return method class HelpMixin(object): def help(self):",
"commands): self.commands = self._commands(commands) def _commands(self, commands): prog = sys.argv[0]",
"for name in expected_args: if name in args: method_args[name] =",
"super(BaseCommand, self).__init__() self.opt( '-c', dest='config_path', help='Configuration file', default='~/.test.conf' ) def",
"name) if not hasattr(method, 'options'): continue result[name] = method return",
"opt(self, *args, **kwargs): self.global_options.append((args, kwargs)) def pre_command(self): pass class BaseCommand(Command):",
"args) method_args = {} # Get the expected method arguments,",
"dir(self): if name.startswith('__'): continue method = getattr(self, name) if not",
"{} {}'.format(self.prog, self._name, method.__name__) parser = ArgumentParser( prog=prog, description=(method.__doc__ or",
"# Find out which arguments the method expects expected_args, _,",
"method, args): # Find out which arguments the method expects",
"'options'): method.options = [] method.options.append((args, kwargs)) return method return decorator",
"= self._commands(commands) def _commands(self, commands): prog = sys.argv[0] result =",
"= ArgumentParser( prog=prog, description=(method.__doc__ or ''), formatter_class=RawDescriptionHelpFormatter ) for opt",
"print('available commands:') for name, command in self.commands.items(): description = str(command.__doc__",
"arg in self.commands.keys(): args.pop(index) return self._call_method(self.commands[arg], args) return self.help() def",
"method expects expected_args, _, _, _ = inspect.getargspec(method) expected_args.remove('self') self_args",
"self.help() def opt(self, *args, **kwargs): self.global_options.append((args, kwargs)) def pre_command(self): pass",
"self.commands = self._methods_with_opts() def _methods_with_opts(self): result = {} for name",
"of the arguments in self for name, value in self_args.items():",
"{} # Get the expected method arguments, ignore rest for",
"in dir(self): if name.startswith('__'): continue method = getattr(self, name) if",
"def _call_method(self, method, args): # Find out which arguments the",
"rest for name in expected_args: if name in args: method_args[name]",
"'_name', None) if not name: continue cmd.prog = prog result[name]",
"self.pre_command() return method(**method_args) def __call__(self, args): for index, arg in",
"_, _, _ = inspect.getargspec(method) expected_args.remove('self') self_args = self._parse_args(method, args)",
"= cmd return result def run(self): args = sys.argv[1:] for",
"name, value) self.pre_command() return method(**method_args) def __call__(self, args): for index,",
"continue result[name] = method return result def _parse_args(self, method, args):",
"method.options.append((args, kwargs)) return method return decorator def noopts(method): method.options =",
"description = str(command.__doc__ or '').strip('\\n') print(' ', name.ljust(10), description) return",
"<gh_stars>0 import configparser import sys import inspect from argparse import",
"cmd return result def run(self): args = sys.argv[1:] for index,",
"= sys.argv[1:] for index, arg in enumerate(args): if arg in",
"1 class SubParser(HelpMixin): def __init__(self, commands): self.commands = self._commands(commands) def",
"configparser import sys import inspect from argparse import ArgumentParser, RawDescriptionHelpFormatter",
"method return result def _parse_args(self, method, args): prog = '{}",
"= self._parse_args(method, args) method_args = {} # Get the expected",
"= str(command.__doc__ or '').strip('\\n') print(' ', name.ljust(10), description) return 1",
"expected_args: if name in args: method_args[name] = args.pop(name) # Put",
"if name in args: method_args[name] = args.pop(name) # Put rest",
"HelpMixin(object): def help(self): print('available commands:') for name, command in self.commands.items():",
"value) self.pre_command() return method(**method_args) def __call__(self, args): for index, arg",
"cmd in commands: name = getattr(cmd, '_name', None) if not",
"result[name] = cmd return result def run(self): args = sys.argv[1:]",
"def _methods_with_opts(self): result = {} for name in dir(self): if",
"__call__(self, args): for index, arg in enumerate(args): if arg in",
"self).__init__() self.opt( '-c', dest='config_path', help='Configuration file', default='~/.test.conf' ) def pre_command(self):",
"for cmd in commands: name = getattr(cmd, '_name', None) if",
"args = sys.argv[1:] for index, arg in enumerate(args): if arg",
"result def run(self): args = sys.argv[1:] for index, arg in",
"expected_args.remove('self') self_args = self._parse_args(method, args) method_args = {} # Get",
"args) return self.help() def opt(self, *args, **kwargs): self.global_options.append((args, kwargs)) def",
"'-c', dest='config_path', help='Configuration file', default='~/.test.conf' ) def pre_command(self): config =",
"[] return method class HelpMixin(object): def help(self): print('available commands:') for",
"Find out which arguments the method expects expected_args, _, _,",
"decorator def noopts(method): method.options = [] return method class HelpMixin(object):",
"if arg in self.commands.keys(): args.pop(index) return self.commands[arg](args) return self.help() class",
"arg in enumerate(args): if arg in self.commands.keys(): args.pop(index) return self._call_method(self.commands[arg],",
"def opt(self, *args, **kwargs): self.global_options.append((args, kwargs)) def pre_command(self): pass class",
"_call_method(self, method, args): # Find out which arguments the method",
"BaseCommand(Command): def __init__(self): super(BaseCommand, self).__init__() self.opt( '-c', dest='config_path', help='Configuration file',",
"name: continue cmd.prog = prog result[name] = cmd return result",
"args.pop(index) return self.commands[arg](args) return self.help() class Command(HelpMixin): def __init__(self): self.global_options",
"name in dir(self): if name.startswith('__'): continue method = getattr(self, name)",
"args): prog = '{} {} {}'.format(self.prog, self._name, method.__name__) parser =",
"def noopts(method): method.options = [] return method class HelpMixin(object): def",
"opt in method.options + self.global_options: parser.add_argument(*opt[0], **opt[1]) return vars(parser.parse_args(args)) def",
"from argparse import ArgumentParser, RawDescriptionHelpFormatter def opt(*args, **kwargs): def decorator(method):",
"method_args[name] = args.pop(name) # Put rest of the arguments in",
"method.options = [] method.options.append((args, kwargs)) return method return decorator def",
"name, value in self_args.items(): setattr(self, name, value) self.pre_command() return method(**method_args)",
"[] self.commands = self._methods_with_opts() def _methods_with_opts(self): result = {} for",
"return 1 class SubParser(HelpMixin): def __init__(self, commands): self.commands = self._commands(commands)",
"not hasattr(method, 'options'): method.options = [] method.options.append((args, kwargs)) return method",
"if not name: continue cmd.prog = prog result[name] = cmd",
"for opt in method.options + self.global_options: parser.add_argument(*opt[0], **opt[1]) return vars(parser.parse_args(args))",
"for name, value in self_args.items(): setattr(self, name, value) self.pre_command() return",
"return method(**method_args) def __call__(self, args): for index, arg in enumerate(args):",
"ArgumentParser( prog=prog, description=(method.__doc__ or ''), formatter_class=RawDescriptionHelpFormatter ) for opt in",
"in self for name, value in self_args.items(): setattr(self, name, value)",
"in enumerate(args): if arg in self.commands.keys(): args.pop(index) return self._call_method(self.commands[arg], args)",
"= {} for name in dir(self): if name.startswith('__'): continue method",
"args.pop(index) return self._call_method(self.commands[arg], args) return self.help() def opt(self, *args, **kwargs):",
"self.global_options.append((args, kwargs)) def pre_command(self): pass class BaseCommand(Command): def __init__(self): super(BaseCommand,",
"in self.commands.keys(): args.pop(index) return self.commands[arg](args) return self.help() class Command(HelpMixin): def",
"args.pop(name) # Put rest of the arguments in self for",
"name = getattr(cmd, '_name', None) if not name: continue cmd.prog",
"def help(self): print('available commands:') for name, command in self.commands.items(): description",
"inspect from argparse import ArgumentParser, RawDescriptionHelpFormatter def opt(*args, **kwargs): def",
"in commands: name = getattr(cmd, '_name', None) if not name:",
"def opt(*args, **kwargs): def decorator(method): if not hasattr(method, 'options'): method.options",
"= method return result def _parse_args(self, method, args): prog =",
"pre_command(self): pass class BaseCommand(Command): def __init__(self): super(BaseCommand, self).__init__() self.opt( '-c',",
") for opt in method.options + self.global_options: parser.add_argument(*opt[0], **opt[1]) return",
"__init__(self, commands): self.commands = self._commands(commands) def _commands(self, commands): prog =",
"result def _parse_args(self, method, args): prog = '{} {} {}'.format(self.prog,",
"''), formatter_class=RawDescriptionHelpFormatter ) for opt in method.options + self.global_options: parser.add_argument(*opt[0],",
"run(self): args = sys.argv[1:] for index, arg in enumerate(args): if",
"ignore rest for name in expected_args: if name in args:",
"_commands(self, commands): prog = sys.argv[0] result = {} for cmd",
"def __call__(self, args): for index, arg in enumerate(args): if arg",
"pass class BaseCommand(Command): def __init__(self): super(BaseCommand, self).__init__() self.opt( '-c', dest='config_path',",
"commands): prog = sys.argv[0] result = {} for cmd in",
"import inspect from argparse import ArgumentParser, RawDescriptionHelpFormatter def opt(*args, **kwargs):",
"return result def _parse_args(self, method, args): prog = '{} {}",
"argparse import ArgumentParser, RawDescriptionHelpFormatter def opt(*args, **kwargs): def decorator(method): if",
"def __init__(self): self.global_options = [] self.commands = self._methods_with_opts() def _methods_with_opts(self):",
"= getattr(cmd, '_name', None) if not name: continue cmd.prog =",
"for index, arg in enumerate(args): if arg in self.commands.keys(): args.pop(index)",
"help='Configuration file', default='~/.test.conf' ) def pre_command(self): config = configparser.ConfigParser() config.read(self.config_path)",
"name.ljust(10), description) return 1 class SubParser(HelpMixin): def __init__(self, commands): self.commands",
"= args.pop(name) # Put rest of the arguments in self",
"def pre_command(self): pass class BaseCommand(Command): def __init__(self): super(BaseCommand, self).__init__() self.opt(",
"or '').strip('\\n') print(' ', name.ljust(10), description) return 1 class SubParser(HelpMixin):",
"getattr(cmd, '_name', None) if not name: continue cmd.prog = prog",
"_, _ = inspect.getargspec(method) expected_args.remove('self') self_args = self._parse_args(method, args) method_args",
"hasattr(method, 'options'): method.options = [] method.options.append((args, kwargs)) return method return",
"prog=prog, description=(method.__doc__ or ''), formatter_class=RawDescriptionHelpFormatter ) for opt in method.options",
"if arg in self.commands.keys(): args.pop(index) return self._call_method(self.commands[arg], args) return self.help()",
"if name.startswith('__'): continue method = getattr(self, name) if not hasattr(method,",
"continue method = getattr(self, name) if not hasattr(method, 'options'): continue",
"method = getattr(self, name) if not hasattr(method, 'options'): continue result[name]",
"def __init__(self): super(BaseCommand, self).__init__() self.opt( '-c', dest='config_path', help='Configuration file', default='~/.test.conf'",
"return method class HelpMixin(object): def help(self): print('available commands:') for name,",
"**opt[1]) return vars(parser.parse_args(args)) def _call_method(self, method, args): # Find out",
"self.opt( '-c', dest='config_path', help='Configuration file', default='~/.test.conf' ) def pre_command(self): config",
"vars(parser.parse_args(args)) def _call_method(self, method, args): # Find out which arguments",
"index, arg in enumerate(args): if arg in self.commands.keys(): args.pop(index) return",
"method.__name__) parser = ArgumentParser( prog=prog, description=(method.__doc__ or ''), formatter_class=RawDescriptionHelpFormatter )",
"{}'.format(self.prog, self._name, method.__name__) parser = ArgumentParser( prog=prog, description=(method.__doc__ or ''),",
"RawDescriptionHelpFormatter def opt(*args, **kwargs): def decorator(method): if not hasattr(method, 'options'):",
"method, args): prog = '{} {} {}'.format(self.prog, self._name, method.__name__) parser",
"kwargs)) def pre_command(self): pass class BaseCommand(Command): def __init__(self): super(BaseCommand, self).__init__()",
"enumerate(args): if arg in self.commands.keys(): args.pop(index) return self.commands[arg](args) return self.help()",
"None) if not name: continue cmd.prog = prog result[name] =",
"arguments, ignore rest for name in expected_args: if name in",
"not hasattr(method, 'options'): continue result[name] = method return result def",
"class HelpMixin(object): def help(self): print('available commands:') for name, command in",
"print(' ', name.ljust(10), description) return 1 class SubParser(HelpMixin): def __init__(self,",
"self.commands.items(): description = str(command.__doc__ or '').strip('\\n') print(' ', name.ljust(10), description)"
] |
[
"Both hemisphere Object.SaveFeatureSet(X = X_N3 , y=y_fp2_N3 , path =",
"############# main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" fname_O_N3 = (main_path+\"tr90_N3_O1-M2_O2-M1.h5\") fname_O_REM =",
"30) X_fp2_REM,y_fp2_REM = Object_fp2_REM.FeatureExtraction() Object_fp2_REM.SaveFeatureSet(X = X_fp2_REM, y=y_fp2_REM, path =",
"save_path, filename = 'feat42_C3_N3') Object_C4_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C4,",
"{round(std2*100, 2)}') # SVM r3 = results_SVM[metric].mean() std3 = results_SVM[metric].std()",
"= results_xgb['test_f1_score']) #%% Extracting features from more than one channel:",
"fname = '42feats_N3' with h5py.File((directory+fname + '.h5'), 'w') as wf:",
"= wf.create_dataset('f1_xgb', results_xgb['test_f1_score'].shape, data = results_xgb['test_f1_score']) #%% Extracting features from",
"filename = 'feat42_l&rh_N3') Object.SaveFeatureSet(X = X_REM , y=y_fp2_N3 , path",
"= 'feat42_C3_REM') Object_C4_REM = ML_Depression(filename=fname_C_REM, channel = ch_C4, fs =",
"= 200, T = 30) path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' save_path =",
"= X_rh_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_rh_REM') Object.SaveFeatureSet(X",
"PCA_out = Object.FeatSelect_PCA(X, y, n_components = 5) # Boruta ranks_Boruta,",
"y_train = Object.LoadFeatureSet(path, fname, feats, labels) # Test set fname",
"RF BestParams_RandomSearch, Bestsocre_RandomSearch ,means, stds, params= Object.RandomSearchRF(X, y, estimator =",
"as wf: # Accuracies dset = wf.create_dataset('acc_SVM', results_SVM['test_accuracy'].shape, data =",
"= ch_C3, fs = 200, T = 30) X_C3_N3,y_C3_N3 =",
"scoring = scoring, n_estimators = [int(x) for x in np.arange(10,",
"Train set X_train, y_train = Object.LoadFeatureSet(path, fname, feats, labels) #",
", path = save_path, filename = 'feat42_lh_N3') # Both hemisphere",
"scoring = scoring, n_estimators = 200, cv = 10) #%%",
"False], n_iter = 100, cv = 10) #%% Test feature",
"from sklearn.model_selection import cross_validate from sklearn.metrics import make_scorer, accuracy_score, precision_score,",
"Recall dset = wf.create_dataset('rec_SVM', results_SVM['test_recall'].shape, data = results_SVM['test_recall']) dset =",
"= np.column_stack((X_fp2_REM,X_C4_REM)) X_rh_REM = np.column_stack((X_rh_REM,X_O2_REM)) # RIGHT hemisphere - N3",
"save_path, filename = 'feat42_C3_REM') Object_C4_REM = ML_Depression(filename=fname_C_REM, channel = ch_C4,",
"import ssccoorriinngg import numpy as np from sklearn.model_selection import cross_validate",
"= 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/results/' fname = '42feats_N3' with h5py.File((directory+fname + '.h5'), 'w')",
"T = 30) X_C4_N3,y_C4_N3 = Object_C4_N3.FeatureExtraction() Object_C4_N3.SaveFeatureSet(X = X_C4_N3, y=y_C4_N3,",
"y=y_fp2_REM, path = save_path, filename = 'feat42_rh_REM') Object.SaveFeatureSet(X = X_lh_REM,",
"T = 30) path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' feats",
"= {'accuracy' : make_scorer(accuracy_score), 'precision' : make_scorer(precision_score), 'recall' : make_scorer(recall_score),",
"= 'feat42_l&rh_N3') Object.SaveFeatureSet(X = X_REM , y=y_lh_REM , path =",
"numpy as np from sklearn.model_selection import cross_validate #%% Picking featureset",
"fname_fp_REM = (main_path+\"tr90_REM_fp1-M2_fp2-M1.h5\") ch_fp2 = 'fp2-M1' ch_fp1 = 'fp1-M2' Object_fp1_REM",
"= 200, T = 30) # one hemisphere Object.SaveFeatureSet(X =",
"hemisphere N3 fname_rh_N3 = 'feat42_rh_N3' X_rh_N3, y_rh_N3 = Object.LoadFeatureSet(path, fname_rh_N3,",
"save_path, filename = 'feat42_O1_N3') Object_O2_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O2,",
"= X_lh_N3 , y=y_fp2_N3 , path = save_path, filename =",
"200, T = 30) X_O1_REM,y_O1_REM = Object_O1_REM.FeatureExtraction() Object_O1_REM.SaveFeatureSet(X = X_O1_REM,",
"dset = wf.create_dataset('acc_SVM', results_SVM['test_accuracy'].shape, data = results_SVM['test_accuracy']) dset = wf.create_dataset('acc_LR'",
": make_scorer(precision_score), 'recall' : make_scorer(recall_score), 'f1_score' : make_scorer(f1_score)} # Cross-validation",
"'labels' # Pick right hemisphere N3 fname_rh_N3 = 'feat42_rh_N3' X_rh_N3,",
"some REM and SWS epochs Object.CombineEpochs(directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/train_test/', ch =",
"= \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" fname_O_N3 = (main_path+\"tr90_N3_O1-M2_O2-M1.h5\") fname_O_REM = (main_path+\"tr90_REM_O1-M2_O2-M1.h5\") ch_O2",
"wf.create_dataset('rec_xgb', results_xgb['test_recall'].shape, data = results_xgb['test_recall']) # f1-score dset = wf.create_dataset('f1_SVM',",
"Combine SWS and REM X_SWS_REM = np.row_stack((X_N3, X_REM)) y_SWS_REM =",
"than one channel: tic = time.time() ########### Central electrodes #############",
": make_scorer(recall_score), 'f1_score' : make_scorer(f1_score)} # Cross-validation using logistic Random",
"Object_O2_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O2, fs = 200, T",
"np.arange(10, 100, 30)], min_samples_split = [2, 5, 10], min_samples_leaf =",
"hemisphere - N3 X_lh_N3 = np.column_stack((X_fp1_N3,X_C3_N3)) X_lh_N3 = np.column_stack((X_lh_N3,X_O1_N3)) #",
"max_depth = [int(x) for x in np.arange(10, 100, 30)], min_samples_split",
"ch_C4 = 'C4-M1' ch_C3 = 'C3-M2' Object_C3_REM = ML_Depression(filename=fname_C_REM, channel",
"Object.FeatSelect_PCA(X, y, n_components = 5) # Boruta ranks_Boruta, Feat_selected_Boruta =",
"= save_path, filename = 'feat42_rh_N3') Object.SaveFeatureSet(X = X_lh_N3 , y=y_fp2_N3",
"#%% Picking featureset of interest and apply classification Object =",
"# one hemisphere Object.SaveFeatureSet(X = X_rh_REM, y=y_fp2_REM, path = save_path,",
"ssccoorriinngg import ssccoorriinngg import numpy as np from sklearn.model_selection import",
"Lasso Feat_selected_lasso = Object.FeatSelect_LASSO(X, y, C = 1) #ANOVA Feat_selected_ANOVA",
"fname_rh_REM = 'feat42_rh_REM' X_rh_REM, y_rh_REM = Object.LoadFeatureSet(path, fname_rh_REM, feats, labels)",
"X_REM = np.column_stack((X_rh_REM, X_lh_REM)) # Both sides - N3 X_N3",
"path = save_path, filename = 'feat42_C4_N3') ########### Occipital electrodes #############",
"grid search to find the best config. of RF BestParams_RandomSearch,",
"fname_lh_N3 = 'feat42_lh_N3' X_lh_N3, y_lh_N3 = Object.LoadFeatureSet(path, fname_lh_N3, feats, labels)",
"= wf.create_dataset('acc_RF' , results_RF['test_accuracy'].shape, data = results_RF['test_accuracy']) dset = wf.create_dataset('acc_xgb',",
"= X_fp2_N3, y=y_fp2_N3, path = save_path, filename = 'feat42_fp2_N3') toc",
"wf.create_dataset('acc_LR' , results_LR['test_accuracy'].shape, data = results_LR['test_accuracy']) dset = wf.create_dataset('acc_RF' ,",
"f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_xgb) #%% Outcome measures # Defien required",
"results? directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/results/' fname = '42feats_N3' with h5py.File((directory+fname +",
"filename = 'feat42_O1_REM') Object_O2_REM = ML_Depression(filename=fname_O_REM, channel = ch_O2, fs",
"'feat42_N3') #%% Example load features: X, y= Object.LoadFeatureSet(path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/',",
"Object_O1_N3.SaveFeatureSet(X = X_O1_N3, y=y_O1_N3, path = save_path, filename = 'feat42_O1_N3')",
"for RF is: {round(r1*100, 2)}+- {round(std1*100, 2)}') # xgb r2",
"artefact/train_test/\" fname_O_N3 = (main_path+\"tr90_N3_O1-M2_O2-M1.h5\") fname_O_REM = (main_path+\"tr90_REM_O1-M2_O2-M1.h5\") ch_O2 = 'O2-M1'",
"std4 = results_LR[metric].std() print(f'{metric} for LR is: {round(r4*100, 2)}+- {round(std4*100,",
"Metrics = ['test_accuracy', 'test_precision', 'test_recall', 'test_f1_score'] for metric in Metrics:",
"#### NOW TEST CLASSIFIERS WITH SELECTED FEATS results_RF = Object.RandomForest_Modelling(Feat_selected_Boruta,",
"RandomForestClassifier from sklearn.model_selection import cross_validate from sklearn.metrics import make_scorer, accuracy_score,",
"taken: {toc - tic}') ########## Concatenate all features ######### #",
"X_SWS_REM , y=y_SWS_REM , path = save_path, filename = 'feat42_l&rh_N3&REM')",
"channel = ch_fp2, fs = 200, T = 30) X_fp2_REM,y_fp2_REM",
"(main_path+\"tr90_REM_O1-M2_O2-M1.h5\") ch_O2 = 'O2-M1' ch_O1 = 'O1-M2' Object_O1_REM = ML_Depression(filename=fname_O_REM,",
", results_RF['test_recall'].shape, data = results_RF['test_recall']) dset = wf.create_dataset('rec_xgb', results_xgb['test_recall'].shape, data",
"10) #%% Example save featureset path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' Object.SaveFeatureSet(X, y,",
"= results_SVM[metric].std() print(f'{metric} for SVM is: {round(r3*100, 2)}+- {round(std3*100, 2)}')",
", y=y_fp2_N3 , path = save_path, filename = 'feat42_l&rh_N3') Object.SaveFeatureSet(X",
"X_lh_N3, y_lh_N3 = Object.LoadFeatureSet(path, fname_lh_N3, feats, labels) # Pick right",
"import RandomForestClassifier from sklearn.model_selection import cross_validate from sklearn.metrics import make_scorer,",
"= ML_Depression(filename=fname_O_REM, channel = ch_O2, fs = 200, T =",
"scoring, n_estimators = 500, cv = 10) Acc, Recall, prec,",
"= wf.create_dataset('acc_SVM', results_SVM['test_accuracy'].shape, data = results_SVM['test_accuracy']) dset = wf.create_dataset('acc_LR' ,",
"save_path, filename = 'feat42_C4_N3') ########### Occipital electrodes ############# main_path =",
"for LR is: {round(r4*100, 2)}+- {round(std4*100, 2)}') #%% Applying Randomized",
"Object_fp1_REM.FeatureExtraction() Object_fp1_REM.SaveFeatureSet(X = X_fp1_REM, y=y_fp1_REM, path = save_path, filename =",
"'feat42_O1_N3') Object_O2_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O2, fs = 200,",
"Object_fp2_REM.FeatureExtraction() Object_fp2_REM.SaveFeatureSet(X = X_fp2_REM, y=y_fp2_REM, path = save_path, filename =",
", path = save_path, filename = 'feat42_rh_N3') Object.SaveFeatureSet(X = X_lh_N3",
"y, scoring = scoring, n_estimators = 200, cv = 10)",
"'featureset' labels = 'labels' # Train set X_train, y_train =",
", max_depth=3, learning_rate=.1) Acc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_xgb)",
"= 'feat42_C4_N3') ########### Occipital electrodes ############# main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\"",
"= ML_Depression(filename=fname_O_N3, channel = ch_O2, fs = 200, T =",
"and combine them Object = ML_Depression(filename='', channel='', fs = 200,",
"Object_fp2_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp2, fs = 200, T",
"SAVE ALL COMBINATIONS Object = ML_Depression(filename='', channel='', fs = 200,",
"200, T = 30) X_C4_N3,y_C4_N3 = Object_C4_N3.FeatureExtraction() Object_C4_N3.SaveFeatureSet(X = X_C4_N3,",
"hemispheres- SWS &REM combination Object.SaveFeatureSet(X = X_SWS_REM , y=y_SWS_REM ,",
"n_estimators = 1000, cv = 10 , max_depth=3, learning_rate=.1) Acc,",
"fname_O_REM = (main_path+\"tr90_REM_O1-M2_O2-M1.h5\") ch_O2 = 'O2-M1' ch_O1 = 'O1-M2' Object_O1_REM",
"200, T = 30) X_C3_N3,y_C3_N3 = Object_C3_N3.FeatureExtraction() Object_C3_N3.SaveFeatureSet(X = X_C3_N3,",
"# Both sides - REM X_REM = np.column_stack((X_rh_REM, X_lh_REM)) #",
"# RIGHT hemisphere - N3 X_rh_N3 = np.column_stack((X_fp2_N3,X_C4_N3)) X_rh_N3 =",
"sleep stage and combine them Object = ML_Depression(filename='', channel='', fs",
", y=y_lh_N3 , path = save_path, filename = 'feat42_l&rh_N3') Object.SaveFeatureSet(X",
"# Both hemispheres- SWS &REM combination Object.SaveFeatureSet(X = X_SWS_REM ,",
"params= Object.RandomSearchRF(X, y, estimator = RandomForestClassifier(), scoring = scoring, n_estimators",
"results_SVM['test_precision'].shape, data = results_SVM['test_precision']) dset = wf.create_dataset('prec_LR' , results_LR['test_precision'].shape, data",
"500, 20)], max_features = ['log2', 'sqrt'], max_depth = [int(x) for",
"'tr90_N3&REM_fp1-M2') #%% How to save some results? directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/results/'",
"Object_O1_REM.SaveFeatureSet(X = X_O1_REM, y=y_O1_REM, path = save_path, filename = 'feat42_O1_REM')",
"ch_fp1, fs = 200, T = 30) X_fp1_REM,y_fp1_REM = Object_fp1_REM.FeatureExtraction()",
"sides - N3 X_N3 = np.column_stack((X_rh_N3, X_lh_N3)) # Combine SWS",
"= X_O1_REM, y=y_O1_REM, path = save_path, filename = 'feat42_O1_REM') Object_O2_REM",
"y_test, scoring, n_estimators = 1000, cv = 10 , max_depth=3,",
"= results_RF['test_accuracy']) dset = wf.create_dataset('acc_xgb', results_xgb['test_accuracy'].shape, data = results_xgb['test_accuracy']) #",
"Object_fp1_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp1, fs = 200, T",
"'test_recall', 'test_f1_score'] for metric in Metrics: #RF r1 = results_RF[metric].mean()",
"= [1, 2, 4], bootstrap = [True, False], n_iter =",
"import cross_validate #%% Picking featureset of interest and apply classification",
"X_C3_REM, y=y_C3_REM, path = save_path, filename = 'feat42_C3_REM') Object_C4_REM =",
"results_SVM['test_accuracy']) dset = wf.create_dataset('acc_LR' , results_LR['test_accuracy'].shape, data = results_LR['test_accuracy']) dset",
"dset = wf.create_dataset('rec_RF' , results_RF['test_recall'].shape, data = results_RF['test_recall']) dset =",
"ch_O2, fs = 200, T = 30) X_O2_N3,y_O2_N3 = Object_O2_N3.FeatureExtraction()",
"= results_LR['test_recall']) dset = wf.create_dataset('rec_RF' , results_RF['test_recall'].shape, data = results_RF['test_recall'])",
"as np from sklearn.model_selection import cross_validate #%% Picking featureset of",
"features: X, y= Object.LoadFeatureSet(path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/', fname = 'feat42_N3_fp2-M1', feats",
"= 'feat42_l&rh_REM') # Both hemispheres- SWS &REM combination Object.SaveFeatureSet(X =",
", results_RF['test_f1_score'].shape, data = results_RF['test_f1_score']) dset = wf.create_dataset('f1_xgb', results_xgb['test_f1_score'].shape, data",
"data = results_RF['test_accuracy']) dset = wf.create_dataset('acc_xgb', results_xgb['test_accuracy'].shape, data = results_xgb['test_accuracy'])",
"X_O2_REM, y=y_O2_REM, path = save_path, filename = 'feat42_O2_REM') Object_O1_N3 =",
"= Object.RandomForest_Modelling(X_train, y_train, X_test, y_test, scoring = scoring, n_estimators =",
"= save_path, filename = 'feat42_fp1_REM') Object_fp2_REM = ML_Depression(filename=fname_fp_REM, channel =",
"'precision' : make_scorer(precision_score), 'recall' : make_scorer(recall_score), 'f1_score' : make_scorer(f1_score)} #",
"ch_O1 = 'O1-M2' Object_O1_REM = ML_Depression(filename=fname_O_REM, channel = ch_O1, fs",
"path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' feats = 'featureset' labels",
"= np.column_stack((X_rh_REM,X_O2_REM)) # RIGHT hemisphere - N3 X_rh_N3 = np.column_stack((X_fp2_N3,X_C4_N3))",
"feature selection methods ## # PCA PCA_out = Object.FeatSelect_PCA(X, y,",
"T = 30) X_O1_REM,y_O1_REM = Object_O1_REM.FeatureExtraction() Object_O1_REM.SaveFeatureSet(X = X_O1_REM, y=y_O1_REM,",
"wf.create_dataset('f1_xgb', results_xgb['test_f1_score'].shape, data = results_xgb['test_f1_score']) #%% Extracting features from more",
"cv = 10 , max_depth=3, learning_rate=.1) Acc, Recall, prec, f1_sc",
"Object_fp1_N3.FeatureExtraction() Object_fp1_N3.SaveFeatureSet(X = X_fp1_N3, y=y_fp1_N3, path = save_path, filename =",
"T = 30) X_C4_REM,y_C4_REM = Object_C4_REM.FeatureExtraction() Object_C4_REM.SaveFeatureSet(X = X_C4_REM, y=y_C4_REM,",
"y_pred_RF = Object.RandomForest_Modelling(X_train, y_train, X_test, y_test, scoring = scoring, n_estimators",
"some results? directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/results/' fname = '42feats_N3' with h5py.File((directory+fname",
"= 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' feats = 'featureset' labels = 'labels' # Pick",
"RandomForestClassifier(), scoring = scoring, n_estimators = [int(x) for x in",
"Object.LoadFeatureSet(path, fname_lh_N3, feats, labels) # Pick right hemisphere REM fname_rh_REM",
"is: {round(r1*100, 2)}+- {round(std1*100, 2)}') # xgb r2 = results_xgb[metric].mean()",
"r1 = results_RF[metric].mean() std1 = results_RF[metric].std() print(f'{metric} for RF is:",
"'fp1-M2' Object_fp1_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp1, fs = 200,",
"= (main_path+\"tr90_REM_C3-M2_C4-M1.h5\") ch_C4 = 'C4-M1' ch_C3 = 'C3-M2' Object_C3_REM =",
"X_C3_N3,y_C3_N3 = Object_C3_N3.FeatureExtraction() Object_C3_N3.SaveFeatureSet(X = X_C3_N3, y=y_C3_N3, path = save_path,",
"= wf.create_dataset('f1_RF' , results_RF['test_f1_score'].shape, data = results_RF['test_f1_score']) dset = wf.create_dataset('f1_xgb',",
"= ML_Depression(filename=fname_C_N3, channel = ch_C3, fs = 200, T =",
"= 'feat42_O2_REM') Object_O1_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O1, fs =",
"results_xgb['test_recall'].shape, data = results_xgb['test_recall']) # f1-score dset = wf.create_dataset('f1_SVM', results_SVM['test_f1_score'].shape,",
"Bestsocre_RandomSearch ,means, stds, params= Object.RandomSearchRF(X, y, estimator = RandomForestClassifier(), scoring",
"= results_SVM['test_accuracy']) dset = wf.create_dataset('acc_LR' , results_LR['test_accuracy'].shape, data = results_LR['test_accuracy'])",
"min_samples_split = [2, 5, 10], min_samples_leaf = [1, 2, 4],",
"= 200, T = 30) X_O1_REM,y_O1_REM = Object_O1_REM.FeatureExtraction() Object_O1_REM.SaveFeatureSet(X =",
"T = 30) X_O2_REM,y_O2_REM = Object_O2_REM.FeatureExtraction() Object_O2_REM.SaveFeatureSet(X = X_O2_REM, y=y_O2_REM,",
", path = save_path, filename = 'feat42_l&rh_N3') Object.SaveFeatureSet(X = X_REM",
"= scoring, n_estimators = 500, cv = 10) Acc, Recall,",
"ch_O2 = 'O2-M1' ch_O1 = 'O1-M2' Object_O1_REM = ML_Depression(filename=fname_O_REM, channel",
"############# main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" fname_fp_N3 = (main_path+\"tr90_N3_fp1-M2_fp2-M1.h5\") fname_fp_REM =",
"Cross-validation using logistic Random Forests y_pred_RF = Object.RandomForest_Modelling(X_train, y_train, X_test,",
"X_lh_REM = np.column_stack((X_lh_REM,X_O1_REM)) # LEFT hemisphere - N3 X_lh_N3 =",
"filename = 'feat42_O2_N3') ########### Fp electrodes ############# main_path = \"D:/1D_TimeSeries/raw_EEG/without",
"= save_path, filename = 'feat42_lh_N3') # Both hemisphere Object.SaveFeatureSet(X =",
"path = save_path, filename = 'feat42_l&rh_N3') Object.SaveFeatureSet(X = X_REM ,",
"results_LR['test_precision']) dset = wf.create_dataset('prec_RF' , results_RF['test_precision'].shape, data = results_RF['test_precision']) dset",
"results_xgb['test_accuracy'].shape, data = results_xgb['test_accuracy']) # Precision dset = wf.create_dataset('prec_SVM', results_SVM['test_precision'].shape,",
"Object.RandomSearchRF(X, y, estimator = RandomForestClassifier(), scoring = scoring, n_estimators =",
"Object.SaveFeatureSet(X = X_rh_N3 , y=y_fp2_N3 , path = save_path, filename",
"{'accuracy' : make_scorer(accuracy_score), 'precision' : make_scorer(precision_score), 'recall' : make_scorer(recall_score), 'f1_score'",
"= np.column_stack((X_rh_REM, X_lh_REM)) # Save combination Object.SaveFeatureSet(X = X_N3 ,",
"wf.create_dataset('rec_RF' , results_RF['test_recall'].shape, data = results_RF['test_recall']) dset = wf.create_dataset('rec_xgb', results_xgb['test_recall'].shape,",
"make_scorer(f1_score)} # Cross-validation using logistic Random Forests y_pred_RF = Object.RandomForest_Modelling(X_train,",
"T = 30) X_fp1_REM,y_fp1_REM = Object_fp1_REM.FeatureExtraction() Object_fp1_REM.SaveFeatureSet(X = X_fp1_REM, y=y_fp1_REM,",
"'fp2-M1' ch_fp1 = 'fp1-M2' Object_fp1_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp1,",
"dset = wf.create_dataset('prec_SVM', results_SVM['test_precision'].shape, data = results_SVM['test_precision']) dset = wf.create_dataset('prec_LR'",
"'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' fname_C_N3 = (main_path+\"tr90_N3_C3-M2_C4-M1.h5\") fname_C_REM = (main_path+\"tr90_REM_C3-M2_C4-M1.h5\") ch_C4 = 'C4-M1'",
"y, path = path, filename = 'feat42_N3') #%% Example load",
"= path, filename = 'feat42_N3') #%% Example load features: X,",
"= save_path, filename = 'feat42_C4_REM') Object_C3_N3 = ML_Depression(filename=fname_C_N3, channel =",
"path = save_path, filename = 'feat42_fp1_N3') Object_fp2_N3 = ML_Depression(filename=fname_fp_N3, channel",
"C = 1) #ANOVA Feat_selected_ANOVA = Object.FeatSelect_ANOVA(X,y, k = 80)",
"y_pred_xgb = Object.XGB_Modelling(X_train, y_train,X_test, y_test, scoring, n_estimators = 1000, cv",
"regions, sleep stage and combine them Object = ML_Depression(filename='', channel='',",
"Metrics: #RF r1 = results_RF[metric].mean() std1 = results_RF[metric].std() print(f'{metric} for",
"Load features from different brain regions, sleep stage and combine",
"best config. of RF BestParams_RandomSearch, Bestsocre_RandomSearch ,means, stds, params= Object.RandomSearchRF(X,",
"Concatenate all features ######### # RIGHT hemisphere - REM X_rh_REM",
"Forests y_pred_RF = Object.RandomForest_Modelling(X_train, y_train, X_test, y_test, scoring = scoring,",
"criteria: scoring = {'accuracy' : make_scorer(accuracy_score), 'precision' : make_scorer(precision_score), 'recall'",
"from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score import h5py",
"dset = wf.create_dataset('rec_LR' , results_LR['test_recall'].shape, data = results_LR['test_recall']) dset =",
"X_lh_REM = np.column_stack((X_fp1_REM,X_C3_REM)) X_lh_REM = np.column_stack((X_lh_REM,X_O1_REM)) # LEFT hemisphere -",
"= X_lh_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_lh_REM') Object.SaveFeatureSet(X",
"features from different brain regions, sleep stage and combine them",
"np.column_stack((X_fp1_N3,X_C3_N3)) X_lh_N3 = np.column_stack((X_lh_N3,X_O1_N3)) # Both sides - REM X_REM",
"f1-score dset = wf.create_dataset('f1_SVM', results_SVM['test_f1_score'].shape, data = results_SVM['test_f1_score']) dset =",
"y_rh_REM = Object.LoadFeatureSet(path, fname_rh_REM, feats, labels) # Pick LEFT hemisphere",
"Object.SaveFeatureSet(X, y, path = path, filename = 'feat42_N3') #%% Example",
"= np.column_stack((X_rh_REM, X_lh_REM)) # Both sides - N3 X_N3 =",
"= (main_path+\"tr90_N3_O1-M2_O2-M1.h5\") fname_O_REM = (main_path+\"tr90_REM_O1-M2_O2-M1.h5\") ch_O2 = 'O2-M1' ch_O1 =",
"fs = 200, T = 30) X_C3_REM,y_C3_REM = Object_C3_REM.FeatureExtraction() Object_C3_REM.SaveFeatureSet(X",
"data = results_SVM['test_accuracy']) dset = wf.create_dataset('acc_LR' , results_LR['test_accuracy'].shape, data =",
"results_xgb['test_recall']) # f1-score dset = wf.create_dataset('f1_SVM', results_SVM['test_f1_score'].shape, data = results_SVM['test_f1_score'])",
"= 'feat42_rh_N3') Object.SaveFeatureSet(X = X_lh_N3 , y=y_fp2_N3 , path =",
"ch = 'fp1-M2', N3_fname = 'tr90_N3_fp1-M2_fp2-M1', REM_fname = 'tr90_fp1-M2_fp2-M1', saving",
"n_estimators = 500, cv = 10) Acc, Recall, prec, f1_sc",
"'feat42_C3_REM') Object_C4_REM = ML_Depression(filename=fname_C_REM, channel = ch_C4, fs = 200,",
"Object.CombineEpochs(directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/train_test/', ch = 'fp1-M2', N3_fname = 'tr90_N3_fp1-M2_fp2-M1', REM_fname",
"Object_C3_REM = ML_Depression(filename=fname_C_REM, channel = ch_C3, fs = 200, T",
"= save_path, filename = 'feat42_O2_REM') Object_O1_N3 = ML_Depression(filename=fname_O_N3, channel =",
"r3 = results_SVM[metric].mean() std3 = results_SVM[metric].std() print(f'{metric} for SVM is:",
"all features ######### # RIGHT hemisphere - REM X_rh_REM =",
"= save_path, filename = 'feat42_O2_N3') ########### Fp electrodes ############# main_path",
"np.column_stack((X_fp2_N3,X_C4_N3)) X_rh_N3 = np.column_stack((X_rh_N3,X_O2_N3)) # LEFT hemisphere - REM X_lh_REM",
"= 1000, cv = 10 , max_depth=3, learning_rate=.1) Acc, Recall,",
"channel = ch_C3, fs = 200, T = 30) X_C3_REM,y_C3_REM",
"path = save_path, filename = 'feat42_O2_REM') Object_O1_N3 = ML_Depression(filename=fname_O_N3, channel",
"hemisphere - REM X_lh_REM = np.column_stack((X_fp1_REM,X_C3_REM)) X_lh_REM = np.column_stack((X_lh_REM,X_O1_REM)) #",
"logistic Random Forests y_pred_RF = Object.RandomForest_Modelling(X_train, y_train, X_test, y_test, scoring",
"wf.create_dataset('rec_SVM', results_SVM['test_recall'].shape, data = results_SVM['test_recall']) dset = wf.create_dataset('rec_LR' , results_LR['test_recall'].shape,",
"X_fp1_REM,y_fp1_REM = Object_fp1_REM.FeatureExtraction() Object_fp1_REM.SaveFeatureSet(X = X_fp1_REM, y=y_fp1_REM, path = save_path,",
"path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' Object.SaveFeatureSet(X, y, path = path, filename =",
"'f1_score' : make_scorer(f1_score)} # Cross-validation using logistic Random Forests y_pred_RF",
"to find the best config. of RF BestParams_RandomSearch, Bestsocre_RandomSearch ,means,",
"hemisphere REM fname_rh_REM = 'feat42_rh_REM' X_rh_REM, y_rh_REM = Object.LoadFeatureSet(path, fname_rh_REM,",
"= 'featureset' labels = 'labels' # Train set X_train, y_train",
"X_fp1_N3, y=y_fp1_N3, path = save_path, filename = 'feat42_fp1_N3') Object_fp2_N3 =",
"hemisphere Object.SaveFeatureSet(X = X_N3 , y=y_fp2_N3 , path = save_path,",
"Object.SaveFeatureSet(X = X_lh_N3 , y=y_fp2_N3 , path = save_path, filename",
", results_LR['test_f1_score'].shape, data = results_LR['test_f1_score']) dset = wf.create_dataset('f1_RF' , results_RF['test_f1_score'].shape,",
"########### Occipital electrodes ############# main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" fname_O_N3 =",
"= 'feat42_rh_REM' X_rh_REM, y_rh_REM = Object.LoadFeatureSet(path, fname_rh_REM, feats, labels) #",
"labels) # Pick LEFT hemisphere REM fname_lh_REM = 'feat42_lh_REM' X_lh_REM,",
"scoring, n_estimators = [int(x) for x in np.arange(10, 500, 20)],",
"200, T = 30) path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/'",
": make_scorer(accuracy_score), 'precision' : make_scorer(precision_score), 'recall' : make_scorer(recall_score), 'f1_score' :",
"Object_C4_N3.SaveFeatureSet(X = X_C4_N3, y=y_C4_N3, path = save_path, filename = 'feat42_C4_N3')",
"COMBINATIONS Object = ML_Depression(filename='', channel='', fs = 200, T =",
"= 30) X_C3_N3,y_C3_N3 = Object_C3_N3.FeatureExtraction() Object_C3_N3.SaveFeatureSet(X = X_C3_N3, y=y_C3_N3, path",
"import cross_validate from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score",
"X_fp2_N3, y=y_fp2_N3, path = save_path, filename = 'feat42_fp2_N3') toc =",
"X_O2_N3, y=y_O2_N3, path = save_path, filename = 'feat42_O2_N3') ########### Fp",
"X_SWS_REM = np.row_stack((X_N3, X_REM)) y_SWS_REM = np.concatenate((y_fp2_N3, y_fp2_REM)) # SAVE",
"= 'C4-M1' ch_C3 = 'C3-M2' Object_C3_REM = ML_Depression(filename=fname_C_REM, channel =",
"'O1-M2' Object_O1_REM = ML_Depression(filename=fname_O_REM, channel = ch_O1, fs = 200,",
"Randomized grid search to find the best config. of RF",
"ch_fp1 = 'fp1-M2' Object_fp1_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp1, fs",
"combination Object.SaveFeatureSet(X = X_N3 , y=y_lh_N3 , path = save_path,",
"ch_fp2, fs = 200, T = 30) X_fp2_N3,y_fp2_N3 = Object_fp2_N3.FeatureExtraction()",
"1000, cv = 10 , max_depth=3, learning_rate=.1) Acc, Recall, prec,",
"data = results_RF['test_f1_score']) dset = wf.create_dataset('f1_xgb', results_xgb['test_f1_score'].shape, data = results_xgb['test_f1_score'])",
"Object_O2_REM.SaveFeatureSet(X = X_O2_REM, y=y_O2_REM, path = save_path, filename = 'feat42_O2_REM')",
"path = save_path, filename = 'feat42_fp1_REM') Object_fp2_REM = ML_Depression(filename=fname_fp_REM, channel",
"as np from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_validate",
"filename = 'feat42_l&rh_N3') Object.SaveFeatureSet(X = X_REM , y=y_lh_REM , path",
"# LEFT hemisphere - REM X_lh_REM = np.column_stack((X_fp1_REM,X_C3_REM)) X_lh_REM =",
"= results_xgb[metric].std() print(f'{metric} for xgb is: {round(r2*100, 2)}+- {round(std2*100, 2)}')",
"2)}') #%% Applying Randomized grid search to find the best",
"ML_Depression(filename='', channel='', fs = 200, T = 30) path =",
"dset = wf.create_dataset('prec_LR' , results_LR['test_precision'].shape, data = results_LR['test_precision']) dset =",
"= X_C4_N3, y=y_C4_N3, path = save_path, filename = 'feat42_C4_N3') ###########",
"2)}') # xgb r2 = results_xgb[metric].mean() std2 = results_xgb[metric].std() print(f'{metric}",
"{round(std1*100, 2)}') # xgb r2 = results_xgb[metric].mean() std2 = results_xgb[metric].std()",
"{round(r2*100, 2)}+- {round(std2*100, 2)}') # SVM r3 = results_SVM[metric].mean() std3",
"= Object.LoadFeatureSet(path, fname_rh_REM, feats, labels) # Pick LEFT hemisphere REM",
"30) X_C3_REM,y_C3_REM = Object_C3_REM.FeatureExtraction() Object_C3_REM.SaveFeatureSet(X = X_C3_REM, y=y_C3_REM, path =",
"Cross-validation using XGBoost y_pred_xgb = Object.XGB_Modelling(X_train, y_train,X_test, y_test, scoring, n_estimators",
"- REM X_rh_REM = np.column_stack((X_fp2_REM,X_C4_REM)) X_rh_REM = np.column_stack((X_rh_REM,X_O2_REM)) # RIGHT",
"= 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' feats = 'featureset' labels =",
"y=y_fp2_N3 , path = save_path, filename = 'feat42_rh_N3') Object.SaveFeatureSet(X =",
"10) Acc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_RF) # Cross-validation",
"feats = 'featureset' labels = 'labels' # Train set X_train,",
"data = results_RF['test_precision']) dset = wf.create_dataset('prec_xgb', results_xgb['test_precision'].shape, data = results_xgb['test_precision'])",
"results_LR[metric].mean() std4 = results_LR[metric].std() print(f'{metric} for LR is: {round(r4*100, 2)}+-",
"= results_LR[metric].std() print(f'{metric} for LR is: {round(r4*100, 2)}+- {round(std4*100, 2)}')",
"save_path, filename = 'feat42_rh_REM') Object.SaveFeatureSet(X = X_lh_REM, y=y_fp2_REM, path =",
"Object.multi_label_confusion_matrix(y_test, y_pred_xgb) #%% Outcome measures # Defien required metrics here:",
"h5py import time from ssccoorriinngg import ssccoorriinngg import numpy as",
"200, T = 30) X_fp1_N3,y_fp1_N3 = Object_fp1_N3.FeatureExtraction() Object_fp1_N3.SaveFeatureSet(X = X_fp1_N3,",
"np.column_stack((X_fp1_REM,X_C3_REM)) X_lh_REM = np.column_stack((X_lh_REM,X_O1_REM)) # LEFT hemisphere - N3 X_lh_N3",
"fname_lh_REM = 'feat42_lh_REM' X_lh_REM, y_lh_REM = Object.LoadFeatureSet(path, fname_lh_REM, feats, labels)",
"save_path, filename = 'feat42_O2_N3') ########### Fp electrodes ############# main_path =",
"np.column_stack((X_lh_N3,X_O1_N3)) # Both sides - REM X_REM = np.column_stack((X_rh_REM, X_lh_REM))",
"FEATS results_RF = Object.RandomForest_Modelling(Feat_selected_Boruta, y, scoring = scoring, n_estimators =",
"= results_LR[metric].mean() std4 = results_LR[metric].std() print(f'{metric} for LR is: {round(r4*100,",
"main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" fname_O_N3 = (main_path+\"tr90_N3_O1-M2_O2-M1.h5\") fname_O_REM = (main_path+\"tr90_REM_O1-M2_O2-M1.h5\")",
"X_rh_REM = np.column_stack((X_fp2_REM,X_C4_REM)) X_rh_REM = np.column_stack((X_rh_REM,X_O2_REM)) # RIGHT hemisphere -",
"30) # one hemisphere Object.SaveFeatureSet(X = X_rh_REM, y=y_fp2_REM, path =",
"= Object_fp1_REM.FeatureExtraction() Object_fp1_REM.SaveFeatureSet(X = X_fp1_REM, y=y_fp1_REM, path = save_path, filename",
"= 30) X_O2_REM,y_O2_REM = Object_O2_REM.FeatureExtraction() Object_O2_REM.SaveFeatureSet(X = X_O2_REM, y=y_O2_REM, path",
"Combine them X_N3 = np.column_stack((X_rh_N3, X_lh_N3)) X_REM = np.column_stack((X_rh_REM, X_lh_REM))",
"Save combination Object.SaveFeatureSet(X = X_N3 , y=y_lh_N3 , path =",
"X_C4_N3, y=y_C4_N3, path = save_path, filename = 'feat42_C4_N3') ########### Occipital",
",means, stds, params= Object.RandomSearchRF(X, y, estimator = RandomForestClassifier(), scoring =",
"= X_fp2_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_fp2_REM') Object_fp1_N3",
"T = 30) X_fp1_N3,y_fp1_N3 = Object_fp1_N3.FeatureExtraction() Object_fp1_N3.SaveFeatureSet(X = X_fp1_N3, y=y_fp1_N3,",
"in Metrics: #RF r1 = results_RF[metric].mean() std1 = results_RF[metric].std() print(f'{metric}",
"Object = ssccoorriinngg(filename='', channel='', fs = 200, T = 30)",
"wf.create_dataset('f1_LR' , results_LR['test_f1_score'].shape, data = results_LR['test_f1_score']) dset = wf.create_dataset('f1_RF' ,",
"N3 X_lh_N3 = np.column_stack((X_fp1_N3,X_C3_N3)) X_lh_N3 = np.column_stack((X_lh_N3,X_O1_N3)) # Both sides",
"directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/results/' fname = '42feats_N3' with h5py.File((directory+fname + '.h5'),",
"30) X_fp2_N3,y_fp2_N3 = Object_fp2_N3.FeatureExtraction() Object_fp2_N3.SaveFeatureSet(X = X_fp2_N3, y=y_fp2_N3, path =",
"2)}') # LR r4 = results_LR[metric].mean() std4 = results_LR[metric].std() print(f'{metric}",
"= True, fname_save = 'tr90_N3&REM_fp1-M2') #%% How to save some",
"y=y_fp1_N3, path = save_path, filename = 'feat42_fp1_N3') Object_fp2_N3 = ML_Depression(filename=fname_fp_N3,",
"feats, labels) # Pick right hemisphere REM fname_rh_REM = 'feat42_rh_REM'",
"combination Object.SaveFeatureSet(X = X_SWS_REM , y=y_SWS_REM , path = save_path,",
"Test feature selection methods ## # PCA PCA_out = Object.FeatSelect_PCA(X,",
"= save_path, filename = 'feat42_l&rh_N3') Object.SaveFeatureSet(X = X_REM , y=y_fp2_N3",
"RIGHT hemisphere - N3 X_rh_N3 = np.column_stack((X_fp2_N3,X_C4_N3)) X_rh_N3 = np.column_stack((X_rh_N3,X_O2_N3))",
"= 'feat42_C4_REM') Object_C3_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C3, fs =",
"scoring = {'accuracy' : make_scorer(accuracy_score), 'precision' : make_scorer(precision_score), 'recall' :",
"fname = 'feat42_N3_fp2-M1', feats = 'featureset', labels = 'labels') #%%",
"Pick left hemisphere N3 fname_lh_N3 = 'feat42_lh_N3' X_lh_N3, y_lh_N3 =",
"# RIGHT hemisphere - REM X_rh_REM = np.column_stack((X_fp2_REM,X_C4_REM)) X_rh_REM =",
"fname_fp_N3 = (main_path+\"tr90_N3_fp1-M2_fp2-M1.h5\") fname_fp_REM = (main_path+\"tr90_REM_fp1-M2_fp2-M1.h5\") ch_fp2 = 'fp2-M1' ch_fp1",
"results_xgb['test_f1_score'].shape, data = results_xgb['test_f1_score']) #%% Extracting features from more than",
"np from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_validate from",
"X_lh_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_lh_REM') Object.SaveFeatureSet(X =",
"data = results_RF['test_recall']) dset = wf.create_dataset('rec_xgb', results_xgb['test_recall'].shape, data = results_xgb['test_recall'])",
"REM X_REM = np.column_stack((X_rh_REM, X_lh_REM)) # Both sides - N3",
"data = results_xgb['test_precision']) # Recall dset = wf.create_dataset('rec_SVM', results_SVM['test_recall'].shape, data",
"np.column_stack((X_rh_REM, X_lh_REM)) # Save combination Object.SaveFeatureSet(X = X_N3 , y=y_lh_N3",
"of RF BestParams_RandomSearch, Bestsocre_RandomSearch ,means, stds, params= Object.RandomSearchRF(X, y, estimator",
"X_rh_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_rh_N3')",
"fs = 200, T = 30) X_O1_REM,y_O1_REM = Object_O1_REM.FeatureExtraction() Object_O1_REM.SaveFeatureSet(X",
"N3 X_N3 = np.column_stack((X_rh_N3, X_lh_N3)) # Combine SWS and REM",
"Object_fp2_N3.SaveFeatureSet(X = X_fp2_N3, y=y_fp2_N3, path = save_path, filename = 'feat42_fp2_N3')",
"channel = ch_O1, fs = 200, T = 30) X_O1_REM,y_O1_REM",
"channel='', fs = 200, T = 30) path = 'C:/PhD/ML",
"X_rh_REM = np.column_stack((X_rh_REM,X_O2_REM)) # RIGHT hemisphere - N3 X_rh_N3 =",
"Object.LoadFeatureSet(path, fname_lh_REM, feats, labels) # Combine them X_N3 = np.column_stack((X_rh_N3,",
"X_test, y_test = Object.LoadFeatureSet(path, fname, feats, labels) # Define the",
"Accuracies dset = wf.create_dataset('acc_SVM', results_SVM['test_accuracy'].shape, data = results_SVM['test_accuracy']) dset =",
"# Both sides - N3 X_N3 = np.column_stack((X_rh_N3, X_lh_N3)) #",
"= results_SVM[metric].mean() std3 = results_SVM[metric].std() print(f'{metric} for SVM is: {round(r3*100,",
"- REM X_lh_REM = np.column_stack((X_fp1_REM,X_C3_REM)) X_lh_REM = np.column_stack((X_lh_REM,X_O1_REM)) # LEFT",
"'O2-M1' ch_O1 = 'O1-M2' Object_O1_REM = ML_Depression(filename=fname_O_REM, channel = ch_O1,",
"labels) # Combine them X_N3 = np.column_stack((X_rh_N3, X_lh_N3)) X_REM =",
"Object_C3_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C3, fs = 200, T",
"Both sides - N3 X_N3 = np.column_stack((X_rh_N3, X_lh_N3)) # Combine",
"= Object_C4_N3.FeatureExtraction() Object_C4_N3.SaveFeatureSet(X = X_C4_N3, y=y_C4_N3, path = save_path, filename",
"data = results_xgb['test_f1_score']) #%% Extracting features from more than one",
"print(f'time taken: {toc - tic}') ########## Concatenate all features #########",
"Extracting features from more than one channel: tic = time.time()",
"= 200, T = 30) X_O2_REM,y_O2_REM = Object_O2_REM.FeatureExtraction() Object_O2_REM.SaveFeatureSet(X =",
"y=y_fp1_REM, path = save_path, filename = 'feat42_fp1_REM') Object_fp2_REM = ML_Depression(filename=fname_fp_REM,",
"Object_C3_N3.SaveFeatureSet(X = X_C3_N3, y=y_C3_N3, path = save_path, filename = 'feat42_C3_N3')",
"'feat42_O2_N3') ########### Fp electrodes ############# main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" fname_fp_N3",
"Object.LoadFeatureSet(path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/', fname = 'feat42_N3_fp2-M1', feats = 'featureset', labels",
"ML_Depression(filename=fname_O_REM, channel = ch_O2, fs = 200, T = 30)",
"= ssccoorriinngg(filename='', channel='', fs = 200, T = 30) path",
"make_scorer, accuracy_score, precision_score, recall_score, f1_score import h5py import time from",
"results_SVM[metric].mean() std3 = results_SVM[metric].std() print(f'{metric} for SVM is: {round(r3*100, 2)}+-",
"is: {round(r4*100, 2)}+- {round(std4*100, 2)}') #%% Applying Randomized grid search",
"= ML_Depression(filename=fname_O_N3, channel = ch_O1, fs = 200, T =",
"(main_path+\"tr90_REM_fp1-M2_fp2-M1.h5\") ch_fp2 = 'fp2-M1' ch_fp1 = 'fp1-M2' Object_fp1_REM = ML_Depression(filename=fname_fp_REM,",
"dset = wf.create_dataset('f1_xgb', results_xgb['test_f1_score'].shape, data = results_xgb['test_f1_score']) #%% Extracting features",
"for metric in Metrics: #RF r1 = results_RF[metric].mean() std1 =",
"X_C3_REM,y_C3_REM = Object_C3_REM.FeatureExtraction() Object_C3_REM.SaveFeatureSet(X = X_C3_REM, y=y_C3_REM, path = save_path,",
"= X_N3 , y=y_fp2_N3 , path = save_path, filename =",
"Object.RandomForest_Modelling(Feat_selected_Boruta, y, scoring = scoring, n_estimators = 200, cv =",
"save some results? directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/results/' fname = '42feats_N3' with",
"= 7) # Lasso Feat_selected_lasso = Object.FeatSelect_LASSO(X, y, C =",
"#%% Outcome measures # Defien required metrics here: Metrics =",
"X_O1_N3, y=y_O1_N3, path = save_path, filename = 'feat42_O1_N3') Object_O2_N3 =",
"2)}+- {round(std2*100, 2)}') # SVM r3 = results_SVM[metric].mean() std3 =",
"30) path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' feats = 'featureset'",
"REM X_lh_REM = np.column_stack((X_fp1_REM,X_C3_REM)) X_lh_REM = np.column_stack((X_lh_REM,X_O1_REM)) # LEFT hemisphere",
"k = 80) #Recruisive ranks_rec, Feat_selected_rec = Object.FeatSelect_Recrusive(X, y, k",
"= results_SVM['test_recall']) dset = wf.create_dataset('rec_LR' , results_LR['test_recall'].shape, data = results_LR['test_recall'])",
"= Object_fp2_REM.FeatureExtraction() Object_fp2_REM.SaveFeatureSet(X = X_fp2_REM, y=y_fp2_REM, path = save_path, filename",
"path = path, filename = 'feat42_N3') #%% Example load features:",
"ch_C3, fs = 200, T = 30) X_C3_N3,y_C3_N3 = Object_C3_N3.FeatureExtraction()",
"############# main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' fname_C_N3 =",
"interest and apply classification Object = ssccoorriinngg(filename='', channel='', fs =",
"set X_train, y_train = Object.LoadFeatureSet(path, fname, feats, labels) # Test",
"= 'feat42_l&rh_N3&REM') #%% Load features from different brain regions, sleep",
"LEFT hemisphere - N3 X_lh_N3 = np.column_stack((X_fp1_N3,X_C3_N3)) X_lh_N3 = np.column_stack((X_lh_N3,X_O1_N3))",
"wf.create_dataset('acc_RF' , results_RF['test_accuracy'].shape, data = results_RF['test_accuracy']) dset = wf.create_dataset('acc_xgb', results_xgb['test_accuracy'].shape,",
"{round(r4*100, 2)}+- {round(std4*100, 2)}') #%% Applying Randomized grid search to",
"= 'C3-M2' Object_C3_REM = ML_Depression(filename=fname_C_REM, channel = ch_C3, fs =",
"= (main_path+\"tr90_N3_C3-M2_C4-M1.h5\") fname_C_REM = (main_path+\"tr90_REM_C3-M2_C4-M1.h5\") ch_C4 = 'C4-M1' ch_C3 =",
"= 'O1-M2' Object_O1_REM = ML_Depression(filename=fname_O_REM, channel = ch_O1, fs =",
"'feat42_lh_N3') # Both hemisphere Object.SaveFeatureSet(X = X_N3 , y=y_fp2_N3 ,",
"= 'feat42_rh_REM') Object.SaveFeatureSet(X = X_lh_REM, y=y_fp2_REM, path = save_path, filename",
"X_lh_REM)) # Save combination Object.SaveFeatureSet(X = X_N3 , y=y_lh_N3 ,",
"200, T = 30) X_fp2_N3,y_fp2_N3 = Object_fp2_N3.FeatureExtraction() Object_fp2_N3.SaveFeatureSet(X = X_fp2_N3,",
"= X_N3 , y=y_lh_N3 , path = save_path, filename =",
"= ['log2', 'sqrt'], max_depth = [int(x) for x in np.arange(10,",
"= 'feat42_fp2_REM') Object_fp1_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp1, fs =",
"results_LR['test_recall'].shape, data = results_LR['test_recall']) dset = wf.create_dataset('rec_RF' , results_RF['test_recall'].shape, data",
"'C3-M2' Object_C3_REM = ML_Depression(filename=fname_C_REM, channel = ch_C3, fs = 200,",
"= 200, T = 30) X_O2_N3,y_O2_N3 = Object_O2_N3.FeatureExtraction() Object_O2_N3.SaveFeatureSet(X =",
"labels = 'labels' # Train set X_train, y_train = Object.LoadFeatureSet(path,",
"Feat_selected_ANOVA = Object.FeatSelect_ANOVA(X,y, k = 80) #Recruisive ranks_rec, Feat_selected_rec =",
"= 'O2-M1' ch_O1 = 'O1-M2' Object_O1_REM = ML_Depression(filename=fname_O_REM, channel =",
"'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' Object.SaveFeatureSet(X, y, path = path, filename = 'feat42_N3') #%%",
"Object_C4_N3.FeatureExtraction() Object_C4_N3.SaveFeatureSet(X = X_C4_N3, y=y_C4_N3, path = save_path, filename =",
"X_N3 , y=y_lh_N3 , path = save_path, filename = 'feat42_l&rh_N3')",
"y=y_C4_N3, path = save_path, filename = 'feat42_C4_N3') ########### Occipital electrodes",
"print(f'{metric} for RF is: {round(r1*100, 2)}+- {round(std1*100, 2)}') # xgb",
"= np.column_stack((X_rh_N3, X_lh_N3)) X_REM = np.column_stack((X_rh_REM, X_lh_REM)) # Save combination",
"'feat42_rh_N3') Object.SaveFeatureSet(X = X_lh_N3 , y=y_fp2_N3 , path = save_path,",
"apply classification Object = ssccoorriinngg(filename='', channel='', fs = 200, T",
"&REM combination Object.SaveFeatureSet(X = X_SWS_REM , y=y_SWS_REM , path =",
"results_xgb[metric].std() print(f'{metric} for xgb is: {round(r2*100, 2)}+- {round(std2*100, 2)}') #",
"labels) # Define the scoring criteria: scoring = {'accuracy' :",
"x in np.arange(10, 500, 20)], max_features = ['log2', 'sqrt'], max_depth",
"= Object.LoadFeatureSet(path, fname_rh_N3, feats, labels) # Pick left hemisphere N3",
"filename = 'feat42_l&rh_REM') # Both hemispheres- SWS &REM combination Object.SaveFeatureSet(X",
"np.concatenate((y_fp2_N3, y_fp2_REM)) # SAVE ALL COMBINATIONS Object = ML_Depression(filename='', channel='',",
"= save_path, filename = 'feat42_lh_REM') Object.SaveFeatureSet(X = X_rh_N3 , y=y_fp2_N3",
"scoring, n_estimators = 1000, cv = 10 , max_depth=3, learning_rate=.1)",
"= np.concatenate((y_fp2_N3, y_fp2_REM)) # SAVE ALL COMBINATIONS Object = ML_Depression(filename='',",
"save_path, filename = 'feat42_fp1_REM') Object_fp2_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp2,",
"path = 'C:/PhD/ML in depression/' fname = 'feat42_Fp1-Fp2_train' feats =",
"results_RF[metric].std() print(f'{metric} for RF is: {round(r1*100, 2)}+- {round(std1*100, 2)}') #",
"Object.SaveFeatureSet(X = X_lh_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_lh_REM')",
"30) X_O2_N3,y_O2_N3 = Object_O2_N3.FeatureExtraction() Object_O2_N3.SaveFeatureSet(X = X_O2_N3, y=y_O2_N3, path =",
"30) X_O1_N3,y_O1_N3 = Object_O1_N3.FeatureExtraction() Object_O1_N3.SaveFeatureSet(X = X_O1_N3, y=y_O1_N3, path =",
"data = results_xgb['test_recall']) # f1-score dset = wf.create_dataset('f1_SVM', results_SVM['test_f1_score'].shape, data",
"'feat42_l&rh_N3') Object.SaveFeatureSet(X = X_REM , y=y_lh_REM , path = save_path,",
"y_lh_N3 = Object.LoadFeatureSet(path, fname_lh_N3, feats, labels) # Pick right hemisphere",
"(main_path+\"tr90_N3_O1-M2_O2-M1.h5\") fname_O_REM = (main_path+\"tr90_REM_O1-M2_O2-M1.h5\") ch_O2 = 'O2-M1' ch_O1 = 'O1-M2'",
"'feat42_lh_N3' X_lh_N3, y_lh_N3 = Object.LoadFeatureSet(path, fname_lh_N3, feats, labels) # Pick",
"combine them Object = ML_Depression(filename='', channel='', fs = 200, T",
"data = results_LR['test_recall']) dset = wf.create_dataset('rec_RF' , results_RF['test_recall'].shape, data =",
"'P:/3013080.02/ml_project/scripts/1D_TimeSeries/results/' fname = '42feats_N3' with h5py.File((directory+fname + '.h5'), 'w') as",
"dset = wf.create_dataset('acc_LR' , results_LR['test_accuracy'].shape, data = results_LR['test_accuracy']) dset =",
"# Lasso Feat_selected_lasso = Object.FeatSelect_LASSO(X, y, C = 1) #ANOVA",
"Central electrodes ############# main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/'",
"TEST CLASSIFIERS WITH SELECTED FEATS results_RF = Object.RandomForest_Modelling(Feat_selected_Boruta, y, scoring",
"results_LR['test_accuracy'].shape, data = results_LR['test_accuracy']) dset = wf.create_dataset('acc_RF' , results_RF['test_accuracy'].shape, data",
"fs = 200, T = 30) X_fp1_REM,y_fp1_REM = Object_fp1_REM.FeatureExtraction() Object_fp1_REM.SaveFeatureSet(X",
"ML_Depression(filename='', channel='', fs = 200, T = 30) # one",
"10 , max_depth=3, learning_rate=.1) Acc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test,",
"print(f'{metric} for SVM is: {round(r3*100, 2)}+- {round(std3*100, 2)}') # LR",
"= results_xgb['test_recall']) # f1-score dset = wf.create_dataset('f1_SVM', results_SVM['test_f1_score'].shape, data =",
"= wf.create_dataset('rec_SVM', results_SVM['test_recall'].shape, data = results_SVM['test_recall']) dset = wf.create_dataset('rec_LR' ,",
"= results_xgb[metric].mean() std2 = results_xgb[metric].std() print(f'{metric} for xgb is: {round(r2*100,",
"channel: tic = time.time() ########### Central electrodes ############# main_path =",
"fs = 200, T = 30) X_fp1_N3,y_fp1_N3 = Object_fp1_N3.FeatureExtraction() Object_fp1_N3.SaveFeatureSet(X",
"= 500, cv = 10) Acc, Recall, prec, f1_sc =",
"X_fp2_REM,y_fp2_REM = Object_fp2_REM.FeatureExtraction() Object_fp2_REM.SaveFeatureSet(X = X_fp2_REM, y=y_fp2_REM, path = save_path,",
"n_estimators = 200, cv = 10) #%% Example save featureset",
"= 'feat42_lh_REM' X_lh_REM, y_lh_REM = Object.LoadFeatureSet(path, fname_lh_REM, feats, labels) #",
"electrodes ############# main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' fname_C_N3",
"NOW TEST CLASSIFIERS WITH SELECTED FEATS results_RF = Object.RandomForest_Modelling(Feat_selected_Boruta, y,",
"(main_path+\"tr90_N3_fp1-M2_fp2-M1.h5\") fname_fp_REM = (main_path+\"tr90_REM_fp1-M2_fp2-M1.h5\") ch_fp2 = 'fp2-M1' ch_fp1 = 'fp1-M2'",
"ch_fp2 = 'fp2-M1' ch_fp1 = 'fp1-M2' Object_fp1_REM = ML_Depression(filename=fname_fp_REM, channel",
"= X_C3_REM, y=y_C3_REM, path = save_path, filename = 'feat42_C3_REM') Object_C4_REM",
"T = 30) X_fp2_REM,y_fp2_REM = Object_fp2_REM.FeatureExtraction() Object_fp2_REM.SaveFeatureSet(X = X_fp2_REM, y=y_fp2_REM,",
"########### Fp electrodes ############# main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" fname_fp_N3 =",
"sides - REM X_REM = np.column_stack((X_rh_REM, X_lh_REM)) # Both sides",
"= Object_O2_N3.FeatureExtraction() Object_O2_N3.SaveFeatureSet(X = X_O2_N3, y=y_O2_N3, path = save_path, filename",
"save_path, filename = 'feat42_O1_REM') Object_O2_REM = ML_Depression(filename=fname_O_REM, channel = ch_O2,",
"channel = ch_C4, fs = 200, T = 30) X_C4_N3,y_C4_N3",
"{round(std4*100, 2)}') #%% Applying Randomized grid search to find the",
"= 200, cv = 10) #%% Example save featureset path",
"= Object_C3_REM.FeatureExtraction() Object_C3_REM.SaveFeatureSet(X = X_C3_REM, y=y_C3_REM, path = save_path, filename",
"= X_SWS_REM , y=y_SWS_REM , path = save_path, filename =",
"200, T = 30) X_C4_REM,y_C4_REM = Object_C4_REM.FeatureExtraction() Object_C4_REM.SaveFeatureSet(X = X_C4_REM,",
"= 'feat42_Fp1-Fp2_train' feats = 'featureset' labels = 'labels' # Train",
"= 'feat42_lh_N3') # Both hemisphere Object.SaveFeatureSet(X = X_N3 , y=y_fp2_N3",
"and apply classification Object = ssccoorriinngg(filename='', channel='', fs = 200,",
"X_train, y_train = Object.LoadFeatureSet(path, fname, feats, labels) # Test set",
"channel = ch_C4, fs = 200, T = 30) X_C4_REM,y_C4_REM",
"fname = 'feat42_Fp1-Fp2_train' feats = 'featureset' labels = 'labels' #",
"wf.create_dataset('rec_LR' , results_LR['test_recall'].shape, data = results_LR['test_recall']) dset = wf.create_dataset('rec_RF' ,",
"np.column_stack((X_lh_REM,X_O1_REM)) # LEFT hemisphere - N3 X_lh_N3 = np.column_stack((X_fp1_N3,X_C3_N3)) X_lh_N3",
"sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score import h5py import",
"200, T = 30) X_O2_N3,y_O2_N3 = Object_O2_N3.FeatureExtraction() Object_O2_N3.SaveFeatureSet(X = X_O2_N3,",
"X_lh_N3)) X_REM = np.column_stack((X_rh_REM, X_lh_REM)) # Save combination Object.SaveFeatureSet(X =",
"Object.LoadFeatureSet(path, fname_rh_REM, feats, labels) # Pick LEFT hemisphere REM fname_lh_REM",
"estimator = RandomForestClassifier(), scoring = scoring, n_estimators = [int(x) for",
"= save_path, filename = 'feat42_fp2_N3') toc = time.time() print(f'time taken:",
"np.arange(10, 500, 20)], max_features = ['log2', 'sqrt'], max_depth = [int(x)",
"y=y_fp2_REM, path = save_path, filename = 'feat42_fp2_REM') Object_fp1_N3 = ML_Depression(filename=fname_fp_N3,",
"= Object.FeatSelect_ANOVA(X,y, k = 80) #Recruisive ranks_rec, Feat_selected_rec = Object.FeatSelect_Recrusive(X,",
"results_RF['test_accuracy'].shape, data = results_RF['test_accuracy']) dset = wf.create_dataset('acc_xgb', results_xgb['test_accuracy'].shape, data =",
"= 200, T = 30) X_C3_N3,y_C3_N3 = Object_C3_N3.FeatureExtraction() Object_C3_N3.SaveFeatureSet(X =",
"np.column_stack((X_rh_N3,X_O2_N3)) # LEFT hemisphere - REM X_lh_REM = np.column_stack((X_fp1_REM,X_C3_REM)) X_lh_REM",
"= [int(x) for x in np.arange(10, 100, 30)], min_samples_split =",
"2)}+- {round(std1*100, 2)}') # xgb r2 = results_xgb[metric].mean() std2 =",
"one hemisphere Object.SaveFeatureSet(X = X_rh_REM, y=y_fp2_REM, path = save_path, filename",
"# Pick right hemisphere N3 fname_rh_N3 = 'feat42_rh_N3' X_rh_N3, y_rh_N3",
"= Object.LoadFeatureSet(path, fname, feats, labels) # Test set fname =",
"'tr90_fp1-M2_fp2-M1', saving = True, fname_save = 'tr90_N3&REM_fp1-M2') #%% How to",
"How to save some results? directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/results/' fname =",
"= (main_path+\"tr90_REM_fp1-M2_fp2-M1.h5\") ch_fp2 = 'fp2-M1' ch_fp1 = 'fp1-M2' Object_fp1_REM =",
"200, T = 30) X_O1_N3,y_O1_N3 = Object_O1_N3.FeatureExtraction() Object_O1_N3.SaveFeatureSet(X = X_O1_N3,",
"= 'feat42_fp1_REM') Object_fp2_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp2, fs =",
"= scoring, n_estimators = [int(x) for x in np.arange(10, 500,",
"y_lh_REM = Object.LoadFeatureSet(path, fname_lh_REM, feats, labels) # Combine them X_N3",
"= save_path, filename = 'feat42_l&rh_N3&REM') #%% Load features from different",
"fname_lh_N3, feats, labels) # Pick right hemisphere REM fname_rh_REM =",
"scoring = scoring, n_estimators = 500, cv = 10) Acc,",
"data = results_LR['test_precision']) dset = wf.create_dataset('prec_RF' , results_RF['test_precision'].shape, data =",
"X, y= Object.LoadFeatureSet(path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/', fname = 'feat42_N3_fp2-M1', feats =",
"= Object.FeatSelect_LASSO(X, y, C = 1) #ANOVA Feat_selected_ANOVA = Object.FeatSelect_ANOVA(X,y,",
"y_pred_xgb) #%% Outcome measures # Defien required metrics here: Metrics",
"X_N3 = np.column_stack((X_rh_N3, X_lh_N3)) X_REM = np.column_stack((X_rh_REM, X_lh_REM)) # Save",
"stds, params= Object.RandomSearchRF(X, y, estimator = RandomForestClassifier(), scoring = scoring,",
"save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' feats = 'featureset' labels = 'labels' #",
"T = 30) path = 'C:/PhD/ML in depression/' fname =",
"n_estimators = [int(x) for x in np.arange(10, 500, 20)], max_features",
"= np.column_stack((X_fp1_N3,X_C3_N3)) X_lh_N3 = np.column_stack((X_lh_N3,X_O1_N3)) # Both sides - REM",
"xgb r2 = results_xgb[metric].mean() std2 = results_xgb[metric].std() print(f'{metric} for xgb",
"# Defien required metrics here: Metrics = ['test_accuracy', 'test_precision', 'test_recall',",
"results_RF['test_recall'].shape, data = results_RF['test_recall']) dset = wf.create_dataset('rec_xgb', results_xgb['test_recall'].shape, data =",
"= 20) #### NOW TEST CLASSIFIERS WITH SELECTED FEATS results_RF",
", results_RF['test_accuracy'].shape, data = results_RF['test_accuracy']) dset = wf.create_dataset('acc_xgb', results_xgb['test_accuracy'].shape, data",
"= X_O2_N3, y=y_O2_N3, path = save_path, filename = 'feat42_O2_N3') ###########",
"Feat_selected_rec = Object.FeatSelect_Recrusive(X, y, k = 20) #### NOW TEST",
"set fname = 'feat42_Fp1-Fp2_test' X_test, y_test = Object.LoadFeatureSet(path, fname, feats,",
"20)], max_features = ['log2', 'sqrt'], max_depth = [int(x) for x",
"save_path, filename = 'feat42_l&rh_N3') Object.SaveFeatureSet(X = X_REM , y=y_lh_REM ,",
", results_LR['test_precision'].shape, data = results_LR['test_precision']) dset = wf.create_dataset('prec_RF' , results_RF['test_precision'].shape,",
"#%% Import libs import numpy as np from sklearn.ensemble import",
"right hemisphere REM fname_rh_REM = 'feat42_rh_REM' X_rh_REM, y_rh_REM = Object.LoadFeatureSet(path,",
"data = results_SVM['test_precision']) dset = wf.create_dataset('prec_LR' , results_LR['test_precision'].shape, data =",
"save_path, filename = 'feat42_lh_REM') Object.SaveFeatureSet(X = X_rh_N3 , y=y_fp2_N3 ,",
"= np.column_stack((X_lh_REM,X_O1_REM)) # LEFT hemisphere - N3 X_lh_N3 = np.column_stack((X_fp1_N3,X_C3_N3))",
", results_LR['test_recall'].shape, data = results_LR['test_recall']) dset = wf.create_dataset('rec_RF' , results_RF['test_recall'].shape,",
"= results_LR['test_f1_score']) dset = wf.create_dataset('f1_RF' , results_RF['test_f1_score'].shape, data = results_RF['test_f1_score'])",
"= 200, T = 30) X_O1_N3,y_O1_N3 = Object_O1_N3.FeatureExtraction() Object_O1_N3.SaveFeatureSet(X =",
"= 'C:/PhD/ML in depression/' fname = 'feat42_Fp1-Fp2_train' feats = 'featureset'",
"r4 = results_LR[metric].mean() std4 = results_LR[metric].std() print(f'{metric} for LR is:",
"y=y_C3_N3, path = save_path, filename = 'feat42_C3_N3') Object_C4_N3 = ML_Depression(filename=fname_C_N3,",
"= 80) #Recruisive ranks_rec, Feat_selected_rec = Object.FeatSelect_Recrusive(X, y, k =",
"y_test = Object.LoadFeatureSet(path, fname, feats, labels) # Define the scoring",
"max_depth = 7) # Lasso Feat_selected_lasso = Object.FeatSelect_LASSO(X, y, C",
"= wf.create_dataset('rec_xgb', results_xgb['test_recall'].shape, data = results_xgb['test_recall']) # f1-score dset =",
"path = save_path, filename = 'feat42_O2_N3') ########### Fp electrodes #############",
"results_xgb['test_f1_score']) #%% Extracting features from more than one channel: tic",
"= Object_fp2_N3.FeatureExtraction() Object_fp2_N3.SaveFeatureSet(X = X_fp2_N3, y=y_fp2_N3, path = save_path, filename",
"Object_O1_REM.FeatureExtraction() Object_O1_REM.SaveFeatureSet(X = X_O1_REM, y=y_O1_REM, path = save_path, filename =",
"results_LR['test_accuracy']) dset = wf.create_dataset('acc_RF' , results_RF['test_accuracy'].shape, data = results_RF['test_accuracy']) dset",
"and SWS epochs Object.CombineEpochs(directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/train_test/', ch = 'fp1-M2', N3_fname",
"= np.column_stack((X_lh_N3,X_O1_N3)) # Both sides - REM X_REM = np.column_stack((X_rh_REM,",
"save_path, filename = 'feat42_l&rh_REM') # Both hemispheres- SWS &REM combination",
"scoring criteria: scoring = {'accuracy' : make_scorer(accuracy_score), 'precision' : make_scorer(precision_score),",
"X_C4_REM, y=y_C4_REM, path = save_path, filename = 'feat42_C4_REM') Object_C3_N3 =",
"Object_O2_REM = ML_Depression(filename=fname_O_REM, channel = ch_O2, fs = 200, T",
"= ch_O2, fs = 200, T = 30) X_O2_REM,y_O2_REM =",
"= 'feat42_N3_fp2-M1', feats = 'featureset', labels = 'labels') #%% Combining",
"# Precision dset = wf.create_dataset('prec_SVM', results_SVM['test_precision'].shape, data = results_SVM['test_precision']) dset",
"X_fp1_REM, y=y_fp1_REM, path = save_path, filename = 'feat42_fp1_REM') Object_fp2_REM =",
"min_samples_leaf = [1, 2, 4], bootstrap = [True, False], n_iter",
"= 30) X_C3_REM,y_C3_REM = Object_C3_REM.FeatureExtraction() Object_C3_REM.SaveFeatureSet(X = X_C3_REM, y=y_C3_REM, path",
"x in np.arange(10, 100, 30)], min_samples_split = [2, 5, 10],",
"Object_O1_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O1, fs = 200, T",
"200, T = 30) X_fp2_REM,y_fp2_REM = Object_fp2_REM.FeatureExtraction() Object_fp2_REM.SaveFeatureSet(X = X_fp2_REM,",
"100, 30)], min_samples_split = [2, 5, 10], min_samples_leaf = [1,",
"WITH SELECTED FEATS results_RF = Object.RandomForest_Modelling(Feat_selected_Boruta, y, scoring = scoring,",
"feats = 'featureset' labels = 'labels' # Pick right hemisphere",
"with h5py.File((directory+fname + '.h5'), 'w') as wf: # Accuracies dset",
"= 30) X_O2_N3,y_O2_N3 = Object_O2_N3.FeatureExtraction() Object_O2_N3.SaveFeatureSet(X = X_O2_N3, y=y_O2_N3, path",
"prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_xgb) #%% Outcome measures # Defien",
"Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_RF) # Cross-validation using XGBoost",
"fs = 200, T = 30) X_C4_REM,y_C4_REM = Object_C4_REM.FeatureExtraction() Object_C4_REM.SaveFeatureSet(X",
"ML_Depression(filename=fname_fp_N3, channel = ch_fp1, fs = 200, T = 30)",
"ranks_Boruta, Feat_selected_Boruta = Object.FeatSelect_Boruta(X, y, max_depth = 7) # Lasso",
"is: {round(r2*100, 2)}+- {round(std2*100, 2)}') # SVM r3 = results_SVM[metric].mean()",
"#%% Example load features: X, y= Object.LoadFeatureSet(path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/', fname",
"fs = 200, T = 30) path = 'C:/PhD/ML in",
"# xgb r2 = results_xgb[metric].mean() std2 = results_xgb[metric].std() print(f'{metric} for",
"'feat42_N3_fp2-M1', feats = 'featureset', labels = 'labels') #%% Combining some",
"cv = 10) #%% Test feature selection methods ## #",
"X_O1_REM,y_O1_REM = Object_O1_REM.FeatureExtraction() Object_O1_REM.SaveFeatureSet(X = X_O1_REM, y=y_O1_REM, path = save_path,",
"scoring, n_estimators = 200, cv = 10) #%% Example save",
"Object_C3_N3.FeatureExtraction() Object_C3_N3.SaveFeatureSet(X = X_C3_N3, y=y_C3_N3, path = save_path, filename =",
"= wf.create_dataset('rec_RF' , results_RF['test_recall'].shape, data = results_RF['test_recall']) dset = wf.create_dataset('rec_xgb',",
"= 'feat42_O1_N3') Object_O2_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O2, fs =",
"Object_fp1_N3.SaveFeatureSet(X = X_fp1_N3, y=y_fp1_N3, path = save_path, filename = 'feat42_fp1_N3')",
"hemisphere N3 fname_lh_N3 = 'feat42_lh_N3' X_lh_N3, y_lh_N3 = Object.LoadFeatureSet(path, fname_lh_N3,",
"depression/' fname = 'feat42_Fp1-Fp2_train' feats = 'featureset' labels = 'labels'",
"fs = 200, T = 30) X_fp2_REM,y_fp2_REM = Object_fp2_REM.FeatureExtraction() Object_fp2_REM.SaveFeatureSet(X",
"filename = 'feat42_N3') #%% Example load features: X, y= Object.LoadFeatureSet(path",
"Object.LoadFeatureSet(path, fname, feats, labels) # Test set fname = 'feat42_Fp1-Fp2_test'",
"wf: # Accuracies dset = wf.create_dataset('acc_SVM', results_SVM['test_accuracy'].shape, data = results_SVM['test_accuracy'])",
"of interest and apply classification Object = ssccoorriinngg(filename='', channel='', fs",
"N3 fname_lh_N3 = 'feat42_lh_N3' X_lh_N3, y_lh_N3 = Object.LoadFeatureSet(path, fname_lh_N3, feats,",
"= '42feats_N3' with h5py.File((directory+fname + '.h5'), 'w') as wf: #",
"fs = 200, T = 30) # one hemisphere Object.SaveFeatureSet(X",
"fname_O_N3 = (main_path+\"tr90_N3_O1-M2_O2-M1.h5\") fname_O_REM = (main_path+\"tr90_REM_O1-M2_O2-M1.h5\") ch_O2 = 'O2-M1' ch_O1",
"Object_O2_N3.SaveFeatureSet(X = X_O2_N3, y=y_O2_N3, path = save_path, filename = 'feat42_O2_N3')",
"fname_save = 'tr90_N3&REM_fp1-M2') #%% How to save some results? directory",
"Applying Randomized grid search to find the best config. of",
"X_C4_N3,y_C4_N3 = Object_C4_N3.FeatureExtraction() Object_C4_N3.SaveFeatureSet(X = X_C4_N3, y=y_C4_N3, path = save_path,",
"filename = 'feat42_O2_REM') Object_O1_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O1, fs",
"save_path, filename = 'feat42_l&rh_N3') Object.SaveFeatureSet(X = X_REM , y=y_fp2_N3 ,",
"# Cross-validation using XGBoost y_pred_xgb = Object.XGB_Modelling(X_train, y_train,X_test, y_test, scoring,",
"'feat42_Fp1-Fp2_train' feats = 'featureset' labels = 'labels' # Train set",
"std2 = results_xgb[metric].std() print(f'{metric} for xgb is: {round(r2*100, 2)}+- {round(std2*100,",
"# Pick left hemisphere N3 fname_lh_N3 = 'feat42_lh_N3' X_lh_N3, y_lh_N3",
"[int(x) for x in np.arange(10, 500, 20)], max_features = ['log2',",
"results_RF['test_recall']) dset = wf.create_dataset('rec_xgb', results_xgb['test_recall'].shape, data = results_xgb['test_recall']) # f1-score",
"REM X_SWS_REM = np.row_stack((X_N3, X_REM)) y_SWS_REM = np.concatenate((y_fp2_N3, y_fp2_REM)) #",
"data = results_LR['test_f1_score']) dset = wf.create_dataset('f1_RF' , results_RF['test_f1_score'].shape, data =",
"# Cross-validation using logistic Random Forests y_pred_RF = Object.RandomForest_Modelling(X_train, y_train,",
"X_rh_REM, y_rh_REM = Object.LoadFeatureSet(path, fname_rh_REM, feats, labels) # Pick LEFT",
"dset = wf.create_dataset('f1_RF' , results_RF['test_f1_score'].shape, data = results_RF['test_f1_score']) dset =",
"metrics here: Metrics = ['test_accuracy', 'test_precision', 'test_recall', 'test_f1_score'] for metric",
"fname, feats, labels) # Test set fname = 'feat42_Fp1-Fp2_test' X_test,",
"= 30) X_C4_N3,y_C4_N3 = Object_C4_N3.FeatureExtraction() Object_C4_N3.SaveFeatureSet(X = X_C4_N3, y=y_C4_N3, path",
"= ch_fp2, fs = 200, T = 30) X_fp2_REM,y_fp2_REM =",
"= 'feat42_rh_N3' X_rh_N3, y_rh_N3 = Object.LoadFeatureSet(path, fname_rh_N3, feats, labels) #",
"right hemisphere N3 fname_rh_N3 = 'feat42_rh_N3' X_rh_N3, y_rh_N3 = Object.LoadFeatureSet(path,",
"= ML_Depression(filename=fname_C_REM, channel = ch_C3, fs = 200, T =",
"Object.FeatSelect_LASSO(X, y, C = 1) #ANOVA Feat_selected_ANOVA = Object.FeatSelect_ANOVA(X,y, k",
"ML_Depression(filename=fname_C_REM, channel = ch_C3, fs = 200, T = 30)",
"channel='', fs = 200, T = 30) # one hemisphere",
"20) #### NOW TEST CLASSIFIERS WITH SELECTED FEATS results_RF =",
"SELECTED FEATS results_RF = Object.RandomForest_Modelling(Feat_selected_Boruta, y, scoring = scoring, n_estimators",
"X_lh_REM)) # Both sides - N3 X_N3 = np.column_stack((X_rh_N3, X_lh_N3))",
"# Accuracies dset = wf.create_dataset('acc_SVM', results_SVM['test_accuracy'].shape, data = results_SVM['test_accuracy']) dset",
"import h5py import time from ssccoorriinngg import ssccoorriinngg import numpy",
"= 30) X_fp2_REM,y_fp2_REM = Object_fp2_REM.FeatureExtraction() Object_fp2_REM.SaveFeatureSet(X = X_fp2_REM, y=y_fp2_REM, path",
"= Object_C4_REM.FeatureExtraction() Object_C4_REM.SaveFeatureSet(X = X_C4_REM, y=y_C4_REM, path = save_path, filename",
"them X_N3 = np.column_stack((X_rh_N3, X_lh_N3)) X_REM = np.column_stack((X_rh_REM, X_lh_REM)) #",
"main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" fname_fp_N3 = (main_path+\"tr90_N3_fp1-M2_fp2-M1.h5\") fname_fp_REM = (main_path+\"tr90_REM_fp1-M2_fp2-M1.h5\")",
"'feat42_l&rh_N3&REM') #%% Load features from different brain regions, sleep stage",
"feats, labels) # Define the scoring criteria: scoring = {'accuracy'",
"dset = wf.create_dataset('acc_RF' , results_RF['test_accuracy'].shape, data = results_RF['test_accuracy']) dset =",
"for x in np.arange(10, 100, 30)], min_samples_split = [2, 5,",
"results_RF[metric].mean() std1 = results_RF[metric].std() print(f'{metric} for RF is: {round(r1*100, 2)}+-",
"import make_scorer, accuracy_score, precision_score, recall_score, f1_score import h5py import time",
"ML_Depression(filename=fname_C_N3, channel = ch_C3, fs = 200, T = 30)",
"= Object.LoadFeatureSet(path, fname_lh_REM, feats, labels) # Combine them X_N3 =",
"and REM X_SWS_REM = np.row_stack((X_N3, X_REM)) y_SWS_REM = np.concatenate((y_fp2_N3, y_fp2_REM))",
"ch_C3, fs = 200, T = 30) X_C3_REM,y_C3_REM = Object_C3_REM.FeatureExtraction()",
"stage and combine them Object = ML_Depression(filename='', channel='', fs =",
"wf.create_dataset('prec_xgb', results_xgb['test_precision'].shape, data = results_xgb['test_precision']) # Recall dset = wf.create_dataset('rec_SVM',",
"fname = 'feat42_Fp1-Fp2_test' X_test, y_test = Object.LoadFeatureSet(path, fname, feats, labels)",
"X_C4_REM,y_C4_REM = Object_C4_REM.FeatureExtraction() Object_C4_REM.SaveFeatureSet(X = X_C4_REM, y=y_C4_REM, path = save_path,",
"Outcome measures # Defien required metrics here: Metrics = ['test_accuracy',",
"wf.create_dataset('prec_SVM', results_SVM['test_precision'].shape, data = results_SVM['test_precision']) dset = wf.create_dataset('prec_LR' , results_LR['test_precision'].shape,",
"ML_Depression(filename=fname_fp_N3, channel = ch_fp2, fs = 200, T = 30)",
"tic}') ########## Concatenate all features ######### # RIGHT hemisphere -",
"CLASSIFIERS WITH SELECTED FEATS results_RF = Object.RandomForest_Modelling(Feat_selected_Boruta, y, scoring =",
"xgb is: {round(r2*100, 2)}+- {round(std2*100, 2)}') # SVM r3 =",
"= save_path, filename = 'feat42_rh_REM') Object.SaveFeatureSet(X = X_lh_REM, y=y_fp2_REM, path",
"results_SVM['test_f1_score']) dset = wf.create_dataset('f1_LR' , results_LR['test_f1_score'].shape, data = results_LR['test_f1_score']) dset",
"results_LR['test_precision'].shape, data = results_LR['test_precision']) dset = wf.create_dataset('prec_RF' , results_RF['test_precision'].shape, data",
"# Boruta ranks_Boruta, Feat_selected_Boruta = Object.FeatSelect_Boruta(X, y, max_depth = 7)",
"- N3 X_N3 = np.column_stack((X_rh_N3, X_lh_N3)) # Combine SWS and",
"+ '.h5'), 'w') as wf: # Accuracies dset = wf.create_dataset('acc_SVM',",
"fname_C_N3 = (main_path+\"tr90_N3_C3-M2_C4-M1.h5\") fname_C_REM = (main_path+\"tr90_REM_C3-M2_C4-M1.h5\") ch_C4 = 'C4-M1' ch_C3",
"r2 = results_xgb[metric].mean() std2 = results_xgb[metric].std() print(f'{metric} for xgb is:",
"= ML_Depression(filename=fname_fp_N3, channel = ch_fp1, fs = 200, T =",
"= RandomForestClassifier(), scoring = scoring, n_estimators = [int(x) for x",
"y=y_lh_N3 , path = save_path, filename = 'feat42_l&rh_N3') Object.SaveFeatureSet(X =",
"feats, labels) # Test set fname = 'feat42_Fp1-Fp2_test' X_test, y_test",
"cv = 10) Acc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_RF)",
"in np.arange(10, 100, 30)], min_samples_split = [2, 5, 10], min_samples_leaf",
"y_fp2_REM)) # SAVE ALL COMBINATIONS Object = ML_Depression(filename='', channel='', fs",
"= 30) X_fp1_N3,y_fp1_N3 = Object_fp1_N3.FeatureExtraction() Object_fp1_N3.SaveFeatureSet(X = X_fp1_N3, y=y_fp1_N3, path",
"Object.LoadFeatureSet(path, fname, feats, labels) # Define the scoring criteria: scoring",
"########### Central electrodes ############# main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" save_path =",
"= ML_Depression(filename=fname_fp_REM, channel = ch_fp2, fs = 200, T =",
"Object = ML_Depression(filename='', channel='', fs = 200, T = 30)",
"make_scorer(precision_score), 'recall' : make_scorer(recall_score), 'f1_score' : make_scorer(f1_score)} # Cross-validation using",
"= 'feat42_C3_N3') Object_C4_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C4, fs =",
"X_rh_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_rh_REM') Object.SaveFeatureSet(X =",
"= 'fp1-M2', N3_fname = 'tr90_N3_fp1-M2_fp2-M1', REM_fname = 'tr90_fp1-M2_fp2-M1', saving =",
"selection methods ## # PCA PCA_out = Object.FeatSelect_PCA(X, y, n_components",
"y=y_O1_REM, path = save_path, filename = 'feat42_O1_REM') Object_O2_REM = ML_Depression(filename=fname_O_REM,",
"= ch_C4, fs = 200, T = 30) X_C4_N3,y_C4_N3 =",
"y= Object.LoadFeatureSet(path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/', fname = 'feat42_N3_fp2-M1', feats = 'featureset',",
"= np.column_stack((X_rh_N3, X_lh_N3)) # Combine SWS and REM X_SWS_REM =",
"= 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' fname_C_N3 = (main_path+\"tr90_N3_C3-M2_C4-M1.h5\") fname_C_REM = (main_path+\"tr90_REM_C3-M2_C4-M1.h5\") ch_C4 =",
"y_SWS_REM = np.concatenate((y_fp2_N3, y_fp2_REM)) # SAVE ALL COMBINATIONS Object =",
"path = save_path, filename = 'feat42_C3_N3') Object_C4_N3 = ML_Depression(filename=fname_C_N3, channel",
"fs = 200, T = 30) X_C3_N3,y_C3_N3 = Object_C3_N3.FeatureExtraction() Object_C3_N3.SaveFeatureSet(X",
"= scoring, n_estimators = 200, cv = 10) #%% Example",
"artefact/train_test/\" fname_fp_N3 = (main_path+\"tr90_N3_fp1-M2_fp2-M1.h5\") fname_fp_REM = (main_path+\"tr90_REM_fp1-M2_fp2-M1.h5\") ch_fp2 = 'fp2-M1'",
"filename = 'feat42_fp2_REM') Object_fp1_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp1, fs",
"REM fname_lh_REM = 'feat42_lh_REM' X_lh_REM, y_lh_REM = Object.LoadFeatureSet(path, fname_lh_REM, feats,",
"for x in np.arange(10, 500, 20)], max_features = ['log2', 'sqrt'],",
"- N3 X_lh_N3 = np.column_stack((X_fp1_N3,X_C3_N3)) X_lh_N3 = np.column_stack((X_lh_N3,X_O1_N3)) # Both",
"y=y_C3_REM, path = save_path, filename = 'feat42_C3_REM') Object_C4_REM = ML_Depression(filename=fname_C_REM,",
"#RF r1 = results_RF[metric].mean() std1 = results_RF[metric].std() print(f'{metric} for RF",
"X_REM = np.column_stack((X_rh_REM, X_lh_REM)) # Save combination Object.SaveFeatureSet(X = X_N3",
"= ch_fp2, fs = 200, T = 30) X_fp2_N3,y_fp2_N3 =",
"= wf.create_dataset('acc_LR' , results_LR['test_accuracy'].shape, data = results_LR['test_accuracy']) dset = wf.create_dataset('acc_RF'",
"results_LR[metric].std() print(f'{metric} for LR is: {round(r4*100, 2)}+- {round(std4*100, 2)}') #%%",
"#%% Combining some REM and SWS epochs Object.CombineEpochs(directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/train_test/',",
"X_REM , y=y_lh_REM , path = save_path, filename = 'feat42_l&rh_REM')",
"= 10) Acc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_RF) #",
"X_N3 = np.column_stack((X_rh_N3, X_lh_N3)) # Combine SWS and REM X_SWS_REM",
"ML_Depression(filename=fname_O_N3, channel = ch_O2, fs = 200, T = 30)",
"= 'feat42_N3') #%% Example load features: X, y= Object.LoadFeatureSet(path =",
"Boruta ranks_Boruta, Feat_selected_Boruta = Object.FeatSelect_Boruta(X, y, max_depth = 7) #",
"#%% Extracting features from more than one channel: tic =",
"X_REM)) y_SWS_REM = np.concatenate((y_fp2_N3, y_fp2_REM)) # SAVE ALL COMBINATIONS Object",
"X_lh_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_lh_N3')",
"X_O2_N3,y_O2_N3 = Object_O2_N3.FeatureExtraction() Object_O2_N3.SaveFeatureSet(X = X_O2_N3, y=y_O2_N3, path = save_path,",
"accuracy_score, precision_score, recall_score, f1_score import h5py import time from ssccoorriinngg",
"= np.column_stack((X_fp2_N3,X_C4_N3)) X_rh_N3 = np.column_stack((X_rh_N3,X_O2_N3)) # LEFT hemisphere - REM",
"= results_RF[metric].mean() std1 = results_RF[metric].std() print(f'{metric} for RF is: {round(r1*100,",
"features from more than one channel: tic = time.time() ###########",
"dset = wf.create_dataset('acc_xgb', results_xgb['test_accuracy'].shape, data = results_xgb['test_accuracy']) # Precision dset",
"X_rh_N3 = np.column_stack((X_rh_N3,X_O2_N3)) # LEFT hemisphere - REM X_lh_REM =",
"= 10) #%% Example save featureset path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' Object.SaveFeatureSet(X,",
"7) # Lasso Feat_selected_lasso = Object.FeatSelect_LASSO(X, y, C = 1)",
"cross_validate from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score import",
"path = save_path, filename = 'feat42_lh_N3') # Both hemisphere Object.SaveFeatureSet(X",
"= (main_path+\"tr90_N3_fp1-M2_fp2-M1.h5\") fname_fp_REM = (main_path+\"tr90_REM_fp1-M2_fp2-M1.h5\") ch_fp2 = 'fp2-M1' ch_fp1 =",
"from sklearn.model_selection import cross_validate #%% Picking featureset of interest and",
"path = save_path, filename = 'feat42_l&rh_N3&REM') #%% Load features from",
"hemisphere Object.SaveFeatureSet(X = X_rh_REM, y=y_fp2_REM, path = save_path, filename =",
"5, 10], min_samples_leaf = [1, 2, 4], bootstrap = [True,",
"ch_fp2, fs = 200, T = 30) X_fp2_REM,y_fp2_REM = Object_fp2_REM.FeatureExtraction()",
"featureset of interest and apply classification Object = ssccoorriinngg(filename='', channel='',",
"fname, feats, labels) # Define the scoring criteria: scoring =",
"= X_O2_REM, y=y_O2_REM, path = save_path, filename = 'feat42_O2_REM') Object_O1_N3",
"electrodes ############# main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" fname_O_N3 = (main_path+\"tr90_N3_O1-M2_O2-M1.h5\") fname_O_REM",
"= [2, 5, 10], min_samples_leaf = [1, 2, 4], bootstrap",
"fname_lh_REM, feats, labels) # Combine them X_N3 = np.column_stack((X_rh_N3, X_lh_N3))",
"'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/', fname = 'feat42_N3_fp2-M1', feats = 'featureset', labels = 'labels')",
"Object_C3_REM.FeatureExtraction() Object_C3_REM.SaveFeatureSet(X = X_C3_REM, y=y_C3_REM, path = save_path, filename =",
"Both sides - REM X_REM = np.column_stack((X_rh_REM, X_lh_REM)) # Both",
"results_SVM[metric].std() print(f'{metric} for SVM is: {round(r3*100, 2)}+- {round(std3*100, 2)}') #",
"path = save_path, filename = 'feat42_l&rh_REM') # Both hemispheres- SWS",
"RF is: {round(r1*100, 2)}+- {round(std1*100, 2)}') # xgb r2 =",
"'test_f1_score'] for metric in Metrics: #RF r1 = results_RF[metric].mean() std1",
", y=y_SWS_REM , path = save_path, filename = 'feat42_l&rh_N3&REM') #%%",
"= 'feat42_fp1_N3') Object_fp2_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp2, fs =",
"= save_path, filename = 'feat42_fp1_N3') Object_fp2_N3 = ML_Depression(filename=fname_fp_N3, channel =",
"= results_RF['test_recall']) dset = wf.create_dataset('rec_xgb', results_xgb['test_recall'].shape, data = results_xgb['test_recall']) #",
"n_components = 5) # Boruta ranks_Boruta, Feat_selected_Boruta = Object.FeatSelect_Boruta(X, y,",
"= 'feat42_l&rh_N3') Object.SaveFeatureSet(X = X_REM , y=y_fp2_N3 , path =",
"make_scorer(accuracy_score), 'precision' : make_scorer(precision_score), 'recall' : make_scorer(recall_score), 'f1_score' : make_scorer(f1_score)}",
"Object.SaveFeatureSet(X = X_REM , y=y_fp2_N3 , path = save_path, filename",
"print(f'{metric} for LR is: {round(r4*100, 2)}+- {round(std4*100, 2)}') #%% Applying",
"y=y_SWS_REM , path = save_path, filename = 'feat42_l&rh_N3&REM') #%% Load",
"[1, 2, 4], bootstrap = [True, False], n_iter = 100,",
"features ######### # RIGHT hemisphere - REM X_rh_REM = np.column_stack((X_fp2_REM,X_C4_REM))",
"fname_rh_N3 = 'feat42_rh_N3' X_rh_N3, y_rh_N3 = Object.LoadFeatureSet(path, fname_rh_N3, feats, labels)",
"path = save_path, filename = 'feat42_fp2_N3') toc = time.time() print(f'time",
"80) #Recruisive ranks_rec, Feat_selected_rec = Object.FeatSelect_Recrusive(X, y, k = 20)",
"import time from ssccoorriinngg import ssccoorriinngg import numpy as np",
"ALL COMBINATIONS Object = ML_Depression(filename='', channel='', fs = 200, T",
"methods ## # PCA PCA_out = Object.FeatSelect_PCA(X, y, n_components =",
"path, filename = 'feat42_N3') #%% Example load features: X, y=",
"= wf.create_dataset('f1_SVM', results_SVM['test_f1_score'].shape, data = results_SVM['test_f1_score']) dset = wf.create_dataset('f1_LR' ,",
"= X_REM , y=y_fp2_N3 , path = save_path, filename =",
"fs = 200, T = 30) X_fp2_N3,y_fp2_N3 = Object_fp2_N3.FeatureExtraction() Object_fp2_N3.SaveFeatureSet(X",
"'labels') #%% Combining some REM and SWS epochs Object.CombineEpochs(directory =",
"main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' fname_C_N3 = (main_path+\"tr90_N3_C3-M2_C4-M1.h5\")",
"= Object.LoadFeatureSet(path, fname, feats, labels) # Define the scoring criteria:",
"= [True, False], n_iter = 100, cv = 10) #%%",
"y_train,X_test, y_test, scoring, n_estimators = 1000, cv = 10 ,",
"= 'feat42_fp2_N3') toc = time.time() print(f'time taken: {toc - tic}')",
"LEFT hemisphere REM fname_lh_REM = 'feat42_lh_REM' X_lh_REM, y_lh_REM = Object.LoadFeatureSet(path,",
"'42feats_N3' with h5py.File((directory+fname + '.h5'), 'w') as wf: # Accuracies",
"for SVM is: {round(r3*100, 2)}+- {round(std3*100, 2)}') # LR r4",
"Define the scoring criteria: scoring = {'accuracy' : make_scorer(accuracy_score), 'precision'",
"featureset path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' Object.SaveFeatureSet(X, y, path = path, filename",
"\"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" fname_fp_N3 = (main_path+\"tr90_N3_fp1-M2_fp2-M1.h5\") fname_fp_REM = (main_path+\"tr90_REM_fp1-M2_fp2-M1.h5\") ch_fp2 =",
"= X_REM , y=y_lh_REM , path = save_path, filename =",
"= wf.create_dataset('prec_LR' , results_LR['test_precision'].shape, data = results_LR['test_precision']) dset = wf.create_dataset('prec_RF'",
"results_SVM['test_recall'].shape, data = results_SVM['test_recall']) dset = wf.create_dataset('rec_LR' , results_LR['test_recall'].shape, data",
"= save_path, filename = 'feat42_C3_REM') Object_C4_REM = ML_Depression(filename=fname_C_REM, channel =",
"# LR r4 = results_LR[metric].mean() std4 = results_LR[metric].std() print(f'{metric} for",
"Pick right hemisphere N3 fname_rh_N3 = 'feat42_rh_N3' X_rh_N3, y_rh_N3 =",
"'feat42_C3_N3') Object_C4_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C4, fs = 200,",
"to save some results? directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/results/' fname = '42feats_N3'",
"Both hemispheres- SWS &REM combination Object.SaveFeatureSet(X = X_SWS_REM , y=y_SWS_REM",
"hemisphere - REM X_rh_REM = np.column_stack((X_fp2_REM,X_C4_REM)) X_rh_REM = np.column_stack((X_rh_REM,X_O2_REM)) #",
"SWS epochs Object.CombineEpochs(directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/train_test/', ch = 'fp1-M2', N3_fname =",
"results_SVM['test_recall']) dset = wf.create_dataset('rec_LR' , results_LR['test_recall'].shape, data = results_LR['test_recall']) dset",
"path = save_path, filename = 'feat42_fp2_REM') Object_fp1_N3 = ML_Depression(filename=fname_fp_N3, channel",
"Precision dset = wf.create_dataset('prec_SVM', results_SVM['test_precision'].shape, data = results_SVM['test_precision']) dset =",
"# Define the scoring criteria: scoring = {'accuracy' : make_scorer(accuracy_score),",
"RIGHT hemisphere - REM X_rh_REM = np.column_stack((X_fp2_REM,X_C4_REM)) X_rh_REM = np.column_stack((X_rh_REM,X_O2_REM))",
"Pick right hemisphere REM fname_rh_REM = 'feat42_rh_REM' X_rh_REM, y_rh_REM =",
"1) #ANOVA Feat_selected_ANOVA = Object.FeatSelect_ANOVA(X,y, k = 80) #Recruisive ranks_rec,",
"dset = wf.create_dataset('prec_RF' , results_RF['test_precision'].shape, data = results_RF['test_precision']) dset =",
"path = save_path, filename = 'feat42_O1_N3') Object_O2_N3 = ML_Depression(filename=fname_O_N3, channel",
"= 30) # one hemisphere Object.SaveFeatureSet(X = X_rh_REM, y=y_fp2_REM, path",
"#ANOVA Feat_selected_ANOVA = Object.FeatSelect_ANOVA(X,y, k = 80) #Recruisive ranks_rec, Feat_selected_rec",
"path = save_path, filename = 'feat42_lh_REM') Object.SaveFeatureSet(X = X_rh_N3 ,",
"= X_C4_REM, y=y_C4_REM, path = save_path, filename = 'feat42_C4_REM') Object_C3_N3",
"# SAVE ALL COMBINATIONS Object = ML_Depression(filename='', channel='', fs =",
"= 200, T = 30) path = 'C:/PhD/ML in depression/'",
"SWS &REM combination Object.SaveFeatureSet(X = X_SWS_REM , y=y_SWS_REM , path",
"results_RF = Object.RandomForest_Modelling(Feat_selected_Boruta, y, scoring = scoring, n_estimators = 200,",
"Object_C3_REM.SaveFeatureSet(X = X_C3_REM, y=y_C3_REM, path = save_path, filename = 'feat42_C3_REM')",
"= save_path, filename = 'feat42_O1_REM') Object_O2_REM = ML_Depression(filename=fname_O_REM, channel =",
"Occipital electrodes ############# main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" fname_O_N3 = (main_path+\"tr90_N3_O1-M2_O2-M1.h5\")",
"= 30) X_fp2_N3,y_fp2_N3 = Object_fp2_N3.FeatureExtraction() Object_fp2_N3.SaveFeatureSet(X = X_fp2_N3, y=y_fp2_N3, path",
"200, T = 30) path = 'C:/PhD/ML in depression/' fname",
"y=y_O2_REM, path = save_path, filename = 'feat42_O2_REM') Object_O1_N3 = ML_Depression(filename=fname_O_N3,",
"left hemisphere N3 fname_lh_N3 = 'feat42_lh_N3' X_lh_N3, y_lh_N3 = Object.LoadFeatureSet(path,",
"\"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' fname_C_N3 = (main_path+\"tr90_N3_C3-M2_C4-M1.h5\") fname_C_REM =",
"y=y_fp2_N3 , path = save_path, filename = 'feat42_l&rh_REM') # Both",
"fs = 200, T = 30) path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' save_path",
"brain regions, sleep stage and combine them Object = ML_Depression(filename='',",
"X_N3 , y=y_fp2_N3 , path = save_path, filename = 'feat42_l&rh_N3')",
"= 'feat42_Fp1-Fp2_test' X_test, y_test = Object.LoadFeatureSet(path, fname, feats, labels) #",
"channel = ch_fp1, fs = 200, T = 30) X_fp1_N3,y_fp1_N3",
"std3 = results_SVM[metric].std() print(f'{metric} for SVM is: {round(r3*100, 2)}+- {round(std3*100,",
"X_O1_REM, y=y_O1_REM, path = save_path, filename = 'feat42_O1_REM') Object_O2_REM =",
"= Object.LoadFeatureSet(path, fname_lh_N3, feats, labels) # Pick right hemisphere REM",
"<filename>Examples/ExampleCodes_ssccoorriinngg.py #%% Import libs import numpy as np from sklearn.ensemble",
"'w') as wf: # Accuracies dset = wf.create_dataset('acc_SVM', results_SVM['test_accuracy'].shape, data",
"'.h5'), 'w') as wf: # Accuracies dset = wf.create_dataset('acc_SVM', results_SVM['test_accuracy'].shape,",
"\"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" fname_O_N3 = (main_path+\"tr90_N3_O1-M2_O2-M1.h5\") fname_O_REM = (main_path+\"tr90_REM_O1-M2_O2-M1.h5\") ch_O2 =",
"= save_path, filename = 'feat42_fp2_REM') Object_fp1_N3 = ML_Depression(filename=fname_fp_N3, channel =",
"for xgb is: {round(r2*100, 2)}+- {round(std2*100, 2)}') # SVM r3",
"[True, False], n_iter = 100, cv = 10) #%% Test",
"X_rh_N3, y_rh_N3 = Object.LoadFeatureSet(path, fname_rh_N3, feats, labels) # Pick left",
"data = results_SVM['test_recall']) dset = wf.create_dataset('rec_LR' , results_LR['test_recall'].shape, data =",
"= ch_fp1, fs = 200, T = 30) X_fp1_REM,y_fp1_REM =",
"wf.create_dataset('acc_xgb', results_xgb['test_accuracy'].shape, data = results_xgb['test_accuracy']) # Precision dset = wf.create_dataset('prec_SVM',",
"sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_validate from sklearn.metrics import",
"= wf.create_dataset('acc_xgb', results_xgb['test_accuracy'].shape, data = results_xgb['test_accuracy']) # Precision dset =",
"= np.row_stack((X_N3, X_REM)) y_SWS_REM = np.concatenate((y_fp2_N3, y_fp2_REM)) # SAVE ALL",
"'feat42_lh_REM') Object.SaveFeatureSet(X = X_rh_N3 , y=y_fp2_N3 , path = save_path,",
"ch_C4, fs = 200, T = 30) X_C4_N3,y_C4_N3 = Object_C4_N3.FeatureExtraction()",
"N3_fname = 'tr90_N3_fp1-M2_fp2-M1', REM_fname = 'tr90_fp1-M2_fp2-M1', saving = True, fname_save",
"std1 = results_RF[metric].std() print(f'{metric} for RF is: {round(r1*100, 2)}+- {round(std1*100,",
"np.column_stack((X_rh_N3, X_lh_N3)) # Combine SWS and REM X_SWS_REM = np.row_stack((X_N3,",
"'sqrt'], max_depth = [int(x) for x in np.arange(10, 100, 30)],",
"= 'tr90_N3_fp1-M2_fp2-M1', REM_fname = 'tr90_fp1-M2_fp2-M1', saving = True, fname_save =",
"Fp electrodes ############# main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" fname_fp_N3 = (main_path+\"tr90_N3_fp1-M2_fp2-M1.h5\")",
": make_scorer(f1_score)} # Cross-validation using logistic Random Forests y_pred_RF =",
"'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' feats = 'featureset' labels = 'labels'",
"# SVM r3 = results_SVM[metric].mean() std3 = results_SVM[metric].std() print(f'{metric} for",
"30) X_C4_REM,y_C4_REM = Object_C4_REM.FeatureExtraction() Object_C4_REM.SaveFeatureSet(X = X_C4_REM, y=y_C4_REM, path =",
"= ML_Depression(filename=fname_C_REM, channel = ch_C4, fs = 200, T =",
"T = 30) X_C3_REM,y_C3_REM = Object_C3_REM.FeatureExtraction() Object_C3_REM.SaveFeatureSet(X = X_C3_REM, y=y_C3_REM,",
"= results_RF[metric].std() print(f'{metric} for RF is: {round(r1*100, 2)}+- {round(std1*100, 2)}')",
"'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' feats = 'featureset' labels = 'labels' # Pick right",
"#%% Example save featureset path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' Object.SaveFeatureSet(X, y, path",
"= [int(x) for x in np.arange(10, 500, 20)], max_features =",
"'feat42_fp2_REM') Object_fp1_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp1, fs = 200,",
"make_scorer(recall_score), 'f1_score' : make_scorer(f1_score)} # Cross-validation using logistic Random Forests",
"'labels' # Train set X_train, y_train = Object.LoadFeatureSet(path, fname, feats,",
"f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_RF) # Cross-validation using XGBoost y_pred_xgb =",
"import numpy as np from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection",
"bootstrap = [True, False], n_iter = 100, cv = 10)",
"from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_validate from sklearn.metrics",
"#%% How to save some results? directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/results/' fname",
"Acc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_RF) # Cross-validation using",
"= 'featureset', labels = 'labels') #%% Combining some REM and",
"[2, 5, 10], min_samples_leaf = [1, 2, 4], bootstrap =",
"path = save_path, filename = 'feat42_C3_REM') Object_C4_REM = ML_Depression(filename=fname_C_REM, channel",
", path = save_path, filename = 'feat42_l&rh_N3&REM') #%% Load features",
"Object.FeatSelect_Recrusive(X, y, k = 20) #### NOW TEST CLASSIFIERS WITH",
"from more than one channel: tic = time.time() ########### Central",
"X_lh_REM, y_lh_REM = Object.LoadFeatureSet(path, fname_lh_REM, feats, labels) # Combine them",
"Object.FeatSelect_ANOVA(X,y, k = 80) #Recruisive ranks_rec, Feat_selected_rec = Object.FeatSelect_Recrusive(X, y,",
"ML_Depression(filename=fname_O_N3, channel = ch_O1, fs = 200, T = 30)",
"fs = 200, T = 30) X_O1_N3,y_O1_N3 = Object_O1_N3.FeatureExtraction() Object_O1_N3.SaveFeatureSet(X",
"= \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' fname_C_N3 = (main_path+\"tr90_N3_C3-M2_C4-M1.h5\") fname_C_REM",
"= 'feat42_O1_REM') Object_O2_REM = ML_Depression(filename=fname_O_REM, channel = ch_O2, fs =",
"libs import numpy as np from sklearn.ensemble import RandomForestClassifier from",
"= Object_O1_N3.FeatureExtraction() Object_O1_N3.SaveFeatureSet(X = X_O1_N3, y=y_O1_N3, path = save_path, filename",
"results_RF['test_f1_score'].shape, data = results_RF['test_f1_score']) dset = wf.create_dataset('f1_xgb', results_xgb['test_f1_score'].shape, data =",
"= save_path, filename = 'feat42_l&rh_N3') Object.SaveFeatureSet(X = X_REM , y=y_lh_REM",
"= ML_Depression(filename=fname_C_N3, channel = ch_C4, fs = 200, T =",
"= 'labels' # Pick right hemisphere N3 fname_rh_N3 = 'feat42_rh_N3'",
"= 200, T = 30) X_C4_N3,y_C4_N3 = Object_C4_N3.FeatureExtraction() Object_C4_N3.SaveFeatureSet(X =",
"30) X_O1_REM,y_O1_REM = Object_O1_REM.FeatureExtraction() Object_O1_REM.SaveFeatureSet(X = X_O1_REM, y=y_O1_REM, path =",
"Object.FeatSelect_Boruta(X, y, max_depth = 7) # Lasso Feat_selected_lasso = Object.FeatSelect_LASSO(X,",
"np.column_stack((X_rh_N3, X_lh_N3)) X_REM = np.column_stack((X_rh_REM, X_lh_REM)) # Save combination Object.SaveFeatureSet(X",
"= ML_Depression(filename=fname_fp_REM, channel = ch_fp1, fs = 200, T =",
"# Test set fname = 'feat42_Fp1-Fp2_test' X_test, y_test = Object.LoadFeatureSet(path,",
"filename = 'feat42_lh_REM') Object.SaveFeatureSet(X = X_rh_N3 , y=y_fp2_N3 , path",
"one channel: tic = time.time() ########### Central electrodes ############# main_path",
"time.time() ########### Central electrodes ############# main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" save_path",
"dset = wf.create_dataset('f1_LR' , results_LR['test_f1_score'].shape, data = results_LR['test_f1_score']) dset =",
"in np.arange(10, 500, 20)], max_features = ['log2', 'sqrt'], max_depth =",
", y=y_fp2_N3 , path = save_path, filename = 'feat42_l&rh_REM') #",
"labels) # Pick right hemisphere REM fname_rh_REM = 'feat42_rh_REM' X_rh_REM,",
"#%% Applying Randomized grid search to find the best config.",
"filename = 'feat42_fp1_REM') Object_fp2_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp2, fs",
"30) X_fp1_N3,y_fp1_N3 = Object_fp1_N3.FeatureExtraction() Object_fp1_N3.SaveFeatureSet(X = X_fp1_N3, y=y_fp1_N3, path =",
", y=y_fp2_N3 , path = save_path, filename = 'feat42_rh_N3') Object.SaveFeatureSet(X",
"= 200, T = 30) X_fp2_N3,y_fp2_N3 = Object_fp2_N3.FeatureExtraction() Object_fp2_N3.SaveFeatureSet(X =",
"ch_C4, fs = 200, T = 30) X_C4_REM,y_C4_REM = Object_C4_REM.FeatureExtraction()",
"ch_O1, fs = 200, T = 30) X_O1_N3,y_O1_N3 = Object_O1_N3.FeatureExtraction()",
"SVM r3 = results_SVM[metric].mean() std3 = results_SVM[metric].std() print(f'{metric} for SVM",
"Object_O1_N3.FeatureExtraction() Object_O1_N3.SaveFeatureSet(X = X_O1_N3, y=y_O1_N3, path = save_path, filename =",
"XGBoost y_pred_xgb = Object.XGB_Modelling(X_train, y_train,X_test, y_test, scoring, n_estimators = 1000,",
"results_RF['test_f1_score']) dset = wf.create_dataset('f1_xgb', results_xgb['test_f1_score'].shape, data = results_xgb['test_f1_score']) #%% Extracting",
"classification Object = ssccoorriinngg(filename='', channel='', fs = 200, T =",
"REM and SWS epochs Object.CombineEpochs(directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/train_test/', ch = 'fp1-M2',",
"'feat42_rh_REM') Object.SaveFeatureSet(X = X_lh_REM, y=y_fp2_REM, path = save_path, filename =",
"Object_O1_REM = ML_Depression(filename=fname_O_REM, channel = ch_O1, fs = 200, T",
"= 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/train_test/', ch = 'fp1-M2', N3_fname = 'tr90_N3_fp1-M2_fp2-M1', REM_fname =",
"Object_fp2_REM.SaveFeatureSet(X = X_fp2_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_fp2_REM')",
"time from ssccoorriinngg import ssccoorriinngg import numpy as np from",
"= wf.create_dataset('prec_SVM', results_SVM['test_precision'].shape, data = results_SVM['test_precision']) dset = wf.create_dataset('prec_LR' ,",
"config. of RF BestParams_RandomSearch, Bestsocre_RandomSearch ,means, stds, params= Object.RandomSearchRF(X, y,",
"= 30) X_C4_REM,y_C4_REM = Object_C4_REM.FeatureExtraction() Object_C4_REM.SaveFeatureSet(X = X_C4_REM, y=y_C4_REM, path",
", results_LR['test_accuracy'].shape, data = results_LR['test_accuracy']) dset = wf.create_dataset('acc_RF' , results_RF['test_accuracy'].shape,",
"X_REM , y=y_fp2_N3 , path = save_path, filename = 'feat42_l&rh_REM')",
"max_depth=3, learning_rate=.1) Acc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_xgb) #%%",
"dset = wf.create_dataset('rec_xgb', results_xgb['test_recall'].shape, data = results_xgb['test_recall']) # f1-score dset",
"np.column_stack((X_fp2_REM,X_C4_REM)) X_rh_REM = np.column_stack((X_rh_REM,X_O2_REM)) # RIGHT hemisphere - N3 X_rh_N3",
"X_test, y_test, scoring = scoring, n_estimators = 500, cv =",
"'tr90_N3_fp1-M2_fp2-M1', REM_fname = 'tr90_fp1-M2_fp2-M1', saving = True, fname_save = 'tr90_N3&REM_fp1-M2')",
"= results_SVM['test_f1_score']) dset = wf.create_dataset('f1_LR' , results_LR['test_f1_score'].shape, data = results_LR['test_f1_score'])",
"= Object_O1_REM.FeatureExtraction() Object_O1_REM.SaveFeatureSet(X = X_O1_REM, y=y_O1_REM, path = save_path, filename",
"'feat42_fp1_REM') Object_fp2_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp2, fs = 200,",
"fname_C_REM = (main_path+\"tr90_REM_C3-M2_C4-M1.h5\") ch_C4 = 'C4-M1' ch_C3 = 'C3-M2' Object_C3_REM",
"= results_LR['test_accuracy']) dset = wf.create_dataset('acc_RF' , results_RF['test_accuracy'].shape, data = results_RF['test_accuracy'])",
"ch_fp1, fs = 200, T = 30) X_fp1_N3,y_fp1_N3 = Object_fp1_N3.FeatureExtraction()",
"= 10) #%% Test feature selection methods ## # PCA",
"Feat_selected_lasso = Object.FeatSelect_LASSO(X, y, C = 1) #ANOVA Feat_selected_ANOVA =",
"= Object.multi_label_confusion_matrix(y_test, y_pred_xgb) #%% Outcome measures # Defien required metrics",
"'C4-M1' ch_C3 = 'C3-M2' Object_C3_REM = ML_Depression(filename=fname_C_REM, channel = ch_C3,",
"filename = 'feat42_rh_REM') Object.SaveFeatureSet(X = X_lh_REM, y=y_fp2_REM, path = save_path,",
"artefact/train_test/\" save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' fname_C_N3 = (main_path+\"tr90_N3_C3-M2_C4-M1.h5\") fname_C_REM = (main_path+\"tr90_REM_C3-M2_C4-M1.h5\")",
"= wf.create_dataset('rec_LR' , results_LR['test_recall'].shape, data = results_LR['test_recall']) dset = wf.create_dataset('rec_RF'",
"fs = 200, T = 30) X_C4_N3,y_C4_N3 = Object_C4_N3.FeatureExtraction() Object_C4_N3.SaveFeatureSet(X",
"measures # Defien required metrics here: Metrics = ['test_accuracy', 'test_precision',",
"= 'labels') #%% Combining some REM and SWS epochs Object.CombineEpochs(directory",
"= 'feat42_lh_REM') Object.SaveFeatureSet(X = X_rh_N3 , y=y_fp2_N3 , path =",
"= X_C3_N3, y=y_C3_N3, path = save_path, filename = 'feat42_C3_N3') Object_C4_N3",
", results_RF['test_precision'].shape, data = results_RF['test_precision']) dset = wf.create_dataset('prec_xgb', results_xgb['test_precision'].shape, data",
"save_path, filename = 'feat42_fp2_N3') toc = time.time() print(f'time taken: {toc",
"Object.RandomForest_Modelling(X_train, y_train, X_test, y_test, scoring = scoring, n_estimators = 500,",
"channel = ch_O1, fs = 200, T = 30) X_O1_N3,y_O1_N3",
"X_lh_N3)) # Combine SWS and REM X_SWS_REM = np.row_stack((X_N3, X_REM))",
"N3 X_rh_N3 = np.column_stack((X_fp2_N3,X_C4_N3)) X_rh_N3 = np.column_stack((X_rh_N3,X_O2_N3)) # LEFT hemisphere",
"'feat42_O1_REM') Object_O2_REM = ML_Depression(filename=fname_O_REM, channel = ch_O2, fs = 200,",
"= np.column_stack((X_rh_N3,X_O2_N3)) # LEFT hemisphere - REM X_lh_REM = np.column_stack((X_fp1_REM,X_C3_REM))",
"Object_C4_REM.FeatureExtraction() Object_C4_REM.SaveFeatureSet(X = X_C4_REM, y=y_C4_REM, path = save_path, filename =",
"########## Concatenate all features ######### # RIGHT hemisphere - REM",
"30) path = 'C:/PhD/ML in depression/' fname = 'feat42_Fp1-Fp2_train' feats",
"Object_C4_REM.SaveFeatureSet(X = X_C4_REM, y=y_C4_REM, path = save_path, filename = 'feat42_C4_REM')",
"= results_SVM['test_precision']) dset = wf.create_dataset('prec_LR' , results_LR['test_precision'].shape, data = results_LR['test_precision'])",
"feats, labels) # Pick LEFT hemisphere REM fname_lh_REM = 'feat42_lh_REM'",
"= Object_fp1_N3.FeatureExtraction() Object_fp1_N3.SaveFeatureSet(X = X_fp1_N3, y=y_fp1_N3, path = save_path, filename",
"#Recruisive ranks_rec, Feat_selected_rec = Object.FeatSelect_Recrusive(X, y, k = 20) ####",
"save featureset path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' Object.SaveFeatureSet(X, y, path = path,",
"saving = True, fname_save = 'tr90_N3&REM_fp1-M2') #%% How to save",
"T = 30) X_O2_N3,y_O2_N3 = Object_O2_N3.FeatureExtraction() Object_O2_N3.SaveFeatureSet(X = X_O2_N3, y=y_O2_N3,",
"y=y_O1_N3, path = save_path, filename = 'feat42_O1_N3') Object_O2_N3 = ML_Depression(filename=fname_O_N3,",
"= ch_C4, fs = 200, T = 30) X_C4_REM,y_C4_REM =",
"= Object.multi_label_confusion_matrix(y_test, y_pred_RF) # Cross-validation using XGBoost y_pred_xgb = Object.XGB_Modelling(X_train,",
"load features: X, y= Object.LoadFeatureSet(path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/', fname = 'feat42_N3_fp2-M1',",
"30) X_C3_N3,y_C3_N3 = Object_C3_N3.FeatureExtraction() Object_C3_N3.SaveFeatureSet(X = X_C3_N3, y=y_C3_N3, path =",
", path = save_path, filename = 'feat42_l&rh_REM') # Both hemispheres-",
"f1_score import h5py import time from ssccoorriinngg import ssccoorriinngg import",
"'P:/3013080.02/ml_project/scripts/1D_TimeSeries/train_test/', ch = 'fp1-M2', N3_fname = 'tr90_N3_fp1-M2_fp2-M1', REM_fname = 'tr90_fp1-M2_fp2-M1',",
"= save_path, filename = 'feat42_C4_N3') ########### Occipital electrodes ############# main_path",
"= 30) path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' feats =",
"LR r4 = results_LR[metric].mean() std4 = results_LR[metric].std() print(f'{metric} for LR",
"= 100, cv = 10) #%% Test feature selection methods",
"= 5) # Boruta ranks_Boruta, Feat_selected_Boruta = Object.FeatSelect_Boruta(X, y, max_depth",
"prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_RF) # Cross-validation using XGBoost y_pred_xgb",
"2, 4], bootstrap = [True, False], n_iter = 100, cv",
"Object.XGB_Modelling(X_train, y_train,X_test, y_test, scoring, n_estimators = 1000, cv = 10",
"= ML_Depression(filename=fname_O_REM, channel = ch_O1, fs = 200, T =",
"save_path, filename = 'feat42_lh_N3') # Both hemisphere Object.SaveFeatureSet(X = X_N3",
"k = 20) #### NOW TEST CLASSIFIERS WITH SELECTED FEATS",
"Object.LoadFeatureSet(path, fname_rh_N3, feats, labels) # Pick left hemisphere N3 fname_lh_N3",
"## # PCA PCA_out = Object.FeatSelect_PCA(X, y, n_components = 5)",
"Object_fp1_REM.SaveFeatureSet(X = X_fp1_REM, y=y_fp1_REM, path = save_path, filename = 'feat42_fp1_REM')",
"{toc - tic}') ########## Concatenate all features ######### # RIGHT",
"using XGBoost y_pred_xgb = Object.XGB_Modelling(X_train, y_train,X_test, y_test, scoring, n_estimators =",
"'feat42_lh_REM' X_lh_REM, y_lh_REM = Object.LoadFeatureSet(path, fname_lh_REM, feats, labels) # Combine",
"= Object.FeatSelect_Recrusive(X, y, k = 20) #### NOW TEST CLASSIFIERS",
"X_lh_N3 = np.column_stack((X_fp1_N3,X_C3_N3)) X_lh_N3 = np.column_stack((X_lh_N3,X_O1_N3)) # Both sides -",
"= results_xgb['test_accuracy']) # Precision dset = wf.create_dataset('prec_SVM', results_SVM['test_precision'].shape, data =",
"results_RF['test_accuracy']) dset = wf.create_dataset('acc_xgb', results_xgb['test_accuracy'].shape, data = results_xgb['test_accuracy']) # Precision",
"Test set fname = 'feat42_Fp1-Fp2_test' X_test, y_test = Object.LoadFeatureSet(path, fname,",
"Object_C4_REM = ML_Depression(filename=fname_C_REM, channel = ch_C4, fs = 200, T",
"'recall' : make_scorer(recall_score), 'f1_score' : make_scorer(f1_score)} # Cross-validation using logistic",
"X_fp2_N3,y_fp2_N3 = Object_fp2_N3.FeatureExtraction() Object_fp2_N3.SaveFeatureSet(X = X_fp2_N3, y=y_fp2_N3, path = save_path,",
"the scoring criteria: scoring = {'accuracy' : make_scorer(accuracy_score), 'precision' :",
"more than one channel: tic = time.time() ########### Central electrodes",
"= time.time() ########### Central electrodes ############# main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\"",
"filename = 'feat42_lh_N3') # Both hemisphere Object.SaveFeatureSet(X = X_N3 ,",
"#%% Test feature selection methods ## # PCA PCA_out =",
"Object.SaveFeatureSet(X = X_rh_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_rh_REM')",
"30) X_O2_REM,y_O2_REM = Object_O2_REM.FeatureExtraction() Object_O2_REM.SaveFeatureSet(X = X_O2_REM, y=y_O2_REM, path =",
"h5py.File((directory+fname + '.h5'), 'w') as wf: # Accuracies dset =",
"y_pred_RF) # Cross-validation using XGBoost y_pred_xgb = Object.XGB_Modelling(X_train, y_train,X_test, y_test,",
"results_xgb['test_precision'].shape, data = results_xgb['test_precision']) # Recall dset = wf.create_dataset('rec_SVM', results_SVM['test_recall'].shape,",
"30) X_C4_N3,y_C4_N3 = Object_C4_N3.FeatureExtraction() Object_C4_N3.SaveFeatureSet(X = X_C4_N3, y=y_C4_N3, path =",
"= 30) X_O1_N3,y_O1_N3 = Object_O1_N3.FeatureExtraction() Object_O1_N3.SaveFeatureSet(X = X_O1_N3, y=y_O1_N3, path",
"= results_xgb['test_precision']) # Recall dset = wf.create_dataset('rec_SVM', results_SVM['test_recall'].shape, data =",
"= time.time() print(f'time taken: {toc - tic}') ########## Concatenate all",
"y=y_fp2_N3 , path = save_path, filename = 'feat42_lh_N3') # Both",
"feats, labels) # Combine them X_N3 = np.column_stack((X_rh_N3, X_lh_N3)) X_REM",
"results_xgb['test_accuracy']) # Precision dset = wf.create_dataset('prec_SVM', results_SVM['test_precision'].shape, data = results_SVM['test_precision'])",
"T = 30) X_O1_N3,y_O1_N3 = Object_O1_N3.FeatureExtraction() Object_O1_N3.SaveFeatureSet(X = X_O1_N3, y=y_O1_N3,",
"= results_LR['test_precision']) dset = wf.create_dataset('prec_RF' , results_RF['test_precision'].shape, data = results_RF['test_precision'])",
"ssccoorriinngg import numpy as np from sklearn.model_selection import cross_validate #%%",
"dset = wf.create_dataset('f1_SVM', results_SVM['test_f1_score'].shape, data = results_SVM['test_f1_score']) dset = wf.create_dataset('f1_LR'",
"'featureset', labels = 'labels') #%% Combining some REM and SWS",
"X_C3_N3, y=y_C3_N3, path = save_path, filename = 'feat42_C3_N3') Object_C4_N3 =",
"from ssccoorriinngg import ssccoorriinngg import numpy as np from sklearn.model_selection",
"- N3 X_rh_N3 = np.column_stack((X_fp2_N3,X_C4_N3)) X_rh_N3 = np.column_stack((X_rh_N3,X_O2_N3)) # LEFT",
"= 30) path = 'C:/PhD/ML in depression/' fname = 'feat42_Fp1-Fp2_train'",
"Object.SaveFeatureSet(X = X_N3 , y=y_lh_N3 , path = save_path, filename",
"save_path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' fname_C_N3 = (main_path+\"tr90_N3_C3-M2_C4-M1.h5\") fname_C_REM = (main_path+\"tr90_REM_C3-M2_C4-M1.h5\") ch_C4",
"= Object.FeatSelect_PCA(X, y, n_components = 5) # Boruta ranks_Boruta, Feat_selected_Boruta",
"BestParams_RandomSearch, Bestsocre_RandomSearch ,means, stds, params= Object.RandomSearchRF(X, y, estimator = RandomForestClassifier(),",
"Object.SaveFeatureSet(X = X_REM , y=y_lh_REM , path = save_path, filename",
"= Object_C3_N3.FeatureExtraction() Object_C3_N3.SaveFeatureSet(X = X_C3_N3, y=y_C3_N3, path = save_path, filename",
"wf.create_dataset('acc_SVM', results_SVM['test_accuracy'].shape, data = results_SVM['test_accuracy']) dset = wf.create_dataset('acc_LR' , results_LR['test_accuracy'].shape,",
"X_O2_REM,y_O2_REM = Object_O2_REM.FeatureExtraction() Object_O2_REM.SaveFeatureSet(X = X_O2_REM, y=y_O2_REM, path = save_path,",
"= 10 , max_depth=3, learning_rate=.1) Acc, Recall, prec, f1_sc =",
"= 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' Object.SaveFeatureSet(X, y, path = path, filename = 'feat42_N3')",
"# Train set X_train, y_train = Object.LoadFeatureSet(path, fname, feats, labels)",
"######### # RIGHT hemisphere - REM X_rh_REM = np.column_stack((X_fp2_REM,X_C4_REM)) X_rh_REM",
"y=y_C4_REM, path = save_path, filename = 'feat42_C4_REM') Object_C3_N3 = ML_Depression(filename=fname_C_N3,",
"labels) # Pick left hemisphere N3 fname_lh_N3 = 'feat42_lh_N3' X_lh_N3,",
"= 'fp2-M1' ch_fp1 = 'fp1-M2' Object_fp1_REM = ML_Depression(filename=fname_fp_REM, channel =",
"labels = 'labels') #%% Combining some REM and SWS epochs",
"['test_accuracy', 'test_precision', 'test_recall', 'test_f1_score'] for metric in Metrics: #RF r1",
"y_train, X_test, y_test, scoring = scoring, n_estimators = 500, cv",
"Object_O2_REM.FeatureExtraction() Object_O2_REM.SaveFeatureSet(X = X_O2_REM, y=y_O2_REM, path = save_path, filename =",
"= 'labels' # Train set X_train, y_train = Object.LoadFeatureSet(path, fname,",
"Object.SaveFeatureSet(X = X_N3 , y=y_fp2_N3 , path = save_path, filename",
"= 200, T = 30) X_fp2_REM,y_fp2_REM = Object_fp2_REM.FeatureExtraction() Object_fp2_REM.SaveFeatureSet(X =",
"[int(x) for x in np.arange(10, 100, 30)], min_samples_split = [2,",
"required metrics here: Metrics = ['test_accuracy', 'test_precision', 'test_recall', 'test_f1_score'] for",
"filename = 'feat42_l&rh_N3&REM') #%% Load features from different brain regions,",
"Import libs import numpy as np from sklearn.ensemble import RandomForestClassifier",
"fname_rh_N3, feats, labels) # Pick left hemisphere N3 fname_lh_N3 =",
"= ML_Depression(filename=fname_fp_N3, channel = ch_fp2, fs = 200, T =",
"fs = 200, T = 30) X_O2_N3,y_O2_N3 = Object_O2_N3.FeatureExtraction() Object_O2_N3.SaveFeatureSet(X",
"filename = 'feat42_C3_REM') Object_C4_REM = ML_Depression(filename=fname_C_REM, channel = ch_C4, fs",
"= \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" fname_fp_N3 = (main_path+\"tr90_N3_fp1-M2_fp2-M1.h5\") fname_fp_REM = (main_path+\"tr90_REM_fp1-M2_fp2-M1.h5\") ch_fp2",
"REM fname_rh_REM = 'feat42_rh_REM' X_rh_REM, y_rh_REM = Object.LoadFeatureSet(path, fname_rh_REM, feats,",
"np.column_stack((X_rh_REM,X_O2_REM)) # RIGHT hemisphere - N3 X_rh_N3 = np.column_stack((X_fp2_N3,X_C4_N3)) X_rh_N3",
"dset = wf.create_dataset('rec_SVM', results_SVM['test_recall'].shape, data = results_SVM['test_recall']) dset = wf.create_dataset('rec_LR'",
"np from sklearn.model_selection import cross_validate #%% Picking featureset of interest",
"Example load features: X, y= Object.LoadFeatureSet(path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/', fname =",
"recall_score, f1_score import h5py import time from ssccoorriinngg import ssccoorriinngg",
"200, cv = 10) #%% Example save featureset path =",
"REM_fname = 'tr90_fp1-M2_fp2-M1', saving = True, fname_save = 'tr90_N3&REM_fp1-M2') #%%",
"electrodes ############# main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" fname_fp_N3 = (main_path+\"tr90_N3_fp1-M2_fp2-M1.h5\") fname_fp_REM",
"channel='', fs = 200, T = 30) path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/'",
"= 200, T = 30) X_fp1_N3,y_fp1_N3 = Object_fp1_N3.FeatureExtraction() Object_fp1_N3.SaveFeatureSet(X =",
"different brain regions, sleep stage and combine them Object =",
"y, estimator = RandomForestClassifier(), scoring = scoring, n_estimators = [int(x)",
"= np.column_stack((X_fp1_REM,X_C3_REM)) X_lh_REM = np.column_stack((X_lh_REM,X_O1_REM)) # LEFT hemisphere - N3",
"= X_fp1_N3, y=y_fp1_N3, path = save_path, filename = 'feat42_fp1_N3') Object_fp2_N3",
"# Pick right hemisphere REM fname_rh_REM = 'feat42_rh_REM' X_rh_REM, y_rh_REM",
"np.column_stack((X_rh_REM, X_lh_REM)) # Both sides - N3 X_N3 = np.column_stack((X_rh_N3,",
"'feat42_fp2_N3') toc = time.time() print(f'time taken: {toc - tic}') ##########",
"'feat42_fp1_N3') Object_fp2_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp2, fs = 200,",
"# LEFT hemisphere - N3 X_lh_N3 = np.column_stack((X_fp1_N3,X_C3_N3)) X_lh_N3 =",
"save_path, filename = 'feat42_fp1_N3') Object_fp2_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp2,",
"filename = 'feat42_rh_N3') Object.SaveFeatureSet(X = X_lh_N3 , y=y_fp2_N3 , path",
", y=y_fp2_N3 , path = save_path, filename = 'feat42_lh_N3') #",
"y=y_fp2_N3 , path = save_path, filename = 'feat42_l&rh_N3') Object.SaveFeatureSet(X =",
"500, cv = 10) Acc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test,",
"labels = 'labels' # Pick right hemisphere N3 fname_rh_N3 =",
"{round(r3*100, 2)}+- {round(std3*100, 2)}') # LR r4 = results_LR[metric].mean() std4",
"in depression/' fname = 'feat42_Fp1-Fp2_train' feats = 'featureset' labels =",
"Object_O2_N3.FeatureExtraction() Object_O2_N3.SaveFeatureSet(X = X_O2_N3, y=y_O2_N3, path = save_path, filename =",
"Defien required metrics here: Metrics = ['test_accuracy', 'test_precision', 'test_recall', 'test_f1_score']",
"(main_path+\"tr90_REM_C3-M2_C4-M1.h5\") ch_C4 = 'C4-M1' ch_C3 = 'C3-M2' Object_C3_REM = ML_Depression(filename=fname_C_REM,",
"ML_Depression(filename=fname_fp_REM, channel = ch_fp1, fs = 200, T = 30)",
"hemisphere - N3 X_rh_N3 = np.column_stack((X_fp2_N3,X_C4_N3)) X_rh_N3 = np.column_stack((X_rh_N3,X_O2_N3)) #",
"True, fname_save = 'tr90_N3&REM_fp1-M2') #%% How to save some results?",
"here: Metrics = ['test_accuracy', 'test_precision', 'test_recall', 'test_f1_score'] for metric in",
"SVM is: {round(r3*100, 2)}+- {round(std3*100, 2)}') # LR r4 =",
"ML_Depression(filename=fname_fp_REM, channel = ch_fp2, fs = 200, T = 30)",
"'feat42_rh_N3' X_rh_N3, y_rh_N3 = Object.LoadFeatureSet(path, fname_rh_N3, feats, labels) # Pick",
"channel = ch_fp1, fs = 200, T = 30) X_fp1_REM,y_fp1_REM",
"fs = 200, T = 30) X_O2_REM,y_O2_REM = Object_O2_REM.FeatureExtraction() Object_O2_REM.SaveFeatureSet(X",
"print(f'{metric} for xgb is: {round(r2*100, 2)}+- {round(std2*100, 2)}') # SVM",
"cross_validate #%% Picking featureset of interest and apply classification Object",
"200, T = 30) # one hemisphere Object.SaveFeatureSet(X = X_rh_REM,",
"Object_fp2_N3.FeatureExtraction() Object_fp2_N3.SaveFeatureSet(X = X_fp2_N3, y=y_fp2_N3, path = save_path, filename =",
"results_LR['test_recall']) dset = wf.create_dataset('rec_RF' , results_RF['test_recall'].shape, data = results_RF['test_recall']) dset",
"30)], min_samples_split = [2, 5, 10], min_samples_leaf = [1, 2,",
"wf.create_dataset('f1_RF' , results_RF['test_f1_score'].shape, data = results_RF['test_f1_score']) dset = wf.create_dataset('f1_xgb', results_xgb['test_f1_score'].shape,",
"results_SVM['test_accuracy'].shape, data = results_SVM['test_accuracy']) dset = wf.create_dataset('acc_LR' , results_LR['test_accuracy'].shape, data",
"= ch_fp1, fs = 200, T = 30) X_fp1_N3,y_fp1_N3 =",
"filename = 'feat42_O1_N3') Object_O2_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O2, fs",
"is: {round(r3*100, 2)}+- {round(std3*100, 2)}') # LR r4 = results_LR[metric].mean()",
"sklearn.model_selection import cross_validate from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score,",
"y=y_O2_N3, path = save_path, filename = 'feat42_O2_N3') ########### Fp electrodes",
"metric in Metrics: #RF r1 = results_RF[metric].mean() std1 = results_RF[metric].std()",
"= ML_Depression(filename='', channel='', fs = 200, T = 30) path",
"'feat42_C4_REM') Object_C3_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C3, fs = 200,",
"results_SVM['test_precision']) dset = wf.create_dataset('prec_LR' , results_LR['test_precision'].shape, data = results_LR['test_precision']) dset",
"= 'feat42_O2_N3') ########### Fp electrodes ############# main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\"",
"2)}+- {round(std4*100, 2)}') #%% Applying Randomized grid search to find",
"T = 30) X_C3_N3,y_C3_N3 = Object_C3_N3.FeatureExtraction() Object_C3_N3.SaveFeatureSet(X = X_C3_N3, y=y_C3_N3,",
"= ML_Depression(filename='', channel='', fs = 200, T = 30) #",
"= X_rh_N3 , y=y_fp2_N3 , path = save_path, filename =",
"T = 30) X_fp2_N3,y_fp2_N3 = Object_fp2_N3.FeatureExtraction() Object_fp2_N3.SaveFeatureSet(X = X_fp2_N3, y=y_fp2_N3,",
"filename = 'feat42_C4_N3') ########### Occipital electrodes ############# main_path = \"D:/1D_TimeSeries/raw_EEG/without",
"= (main_path+\"tr90_REM_O1-M2_O2-M1.h5\") ch_O2 = 'O2-M1' ch_O1 = 'O1-M2' Object_O1_REM =",
"= Object.FeatSelect_Boruta(X, y, max_depth = 7) # Lasso Feat_selected_lasso =",
"Object.multi_label_confusion_matrix(y_test, y_pred_RF) # Cross-validation using XGBoost y_pred_xgb = Object.XGB_Modelling(X_train, y_train,X_test,",
"# Save combination Object.SaveFeatureSet(X = X_N3 , y=y_lh_N3 , path",
"= X_O1_N3, y=y_O1_N3, path = save_path, filename = 'feat42_O1_N3') Object_O2_N3",
"dset = wf.create_dataset('prec_xgb', results_xgb['test_precision'].shape, data = results_xgb['test_precision']) # Recall dset",
"= ch_O1, fs = 200, T = 30) X_O1_N3,y_O1_N3 =",
"#%% Load features from different brain regions, sleep stage and",
"= save_path, filename = 'feat42_O1_N3') Object_O2_N3 = ML_Depression(filename=fname_O_N3, channel =",
"data = results_xgb['test_accuracy']) # Precision dset = wf.create_dataset('prec_SVM', results_SVM['test_precision'].shape, data",
"fname_rh_REM, feats, labels) # Pick LEFT hemisphere REM fname_lh_REM =",
"# Combine SWS and REM X_SWS_REM = np.row_stack((X_N3, X_REM)) y_SWS_REM",
"# PCA PCA_out = Object.FeatSelect_PCA(X, y, n_components = 5) #",
"Random Forests y_pred_RF = Object.RandomForest_Modelling(X_train, y_train, X_test, y_test, scoring =",
"= wf.create_dataset('prec_RF' , results_RF['test_precision'].shape, data = results_RF['test_precision']) dset = wf.create_dataset('prec_xgb',",
"results_xgb['test_precision']) # Recall dset = wf.create_dataset('rec_SVM', results_SVM['test_recall'].shape, data = results_SVM['test_recall'])",
"= Object_O2_REM.FeatureExtraction() Object_O2_REM.SaveFeatureSet(X = X_O2_REM, y=y_O2_REM, path = save_path, filename",
"= 'featureset' labels = 'labels' # Pick right hemisphere N3",
"= ch_C3, fs = 200, T = 30) X_C3_REM,y_C3_REM =",
"Example save featureset path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/' Object.SaveFeatureSet(X, y, path =",
"= 'fp1-M2' Object_fp1_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp1, fs =",
"feats = 'featureset', labels = 'labels') #%% Combining some REM",
"X_fp2_REM, y=y_fp2_REM, path = save_path, filename = 'feat42_fp2_REM') Object_fp1_N3 =",
"'fp1-M2', N3_fname = 'tr90_N3_fp1-M2_fp2-M1', REM_fname = 'tr90_fp1-M2_fp2-M1', saving = True,",
"channel = ch_fp2, fs = 200, T = 30) X_fp2_N3,y_fp2_N3",
"learning_rate=.1) Acc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_xgb) #%% Outcome",
"ssccoorriinngg(filename='', channel='', fs = 200, T = 30) path =",
"{round(r1*100, 2)}+- {round(std1*100, 2)}') # xgb r2 = results_xgb[metric].mean() std2",
"2)}') # SVM r3 = results_SVM[metric].mean() std3 = results_SVM[metric].std() print(f'{metric}",
"= 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/', fname = 'feat42_N3_fp2-M1', feats = 'featureset', labels =",
"- REM X_REM = np.column_stack((X_rh_REM, X_lh_REM)) # Both sides -",
"'feat42_C4_N3') ########### Occipital electrodes ############# main_path = \"D:/1D_TimeSeries/raw_EEG/without artefact/train_test/\" fname_O_N3",
"results_LR['test_f1_score']) dset = wf.create_dataset('f1_RF' , results_RF['test_f1_score'].shape, data = results_RF['test_f1_score']) dset",
"ch_O1, fs = 200, T = 30) X_O1_REM,y_O1_REM = Object_O1_REM.FeatureExtraction()",
"Combining some REM and SWS epochs Object.CombineEpochs(directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/train_test/', ch",
"X_lh_N3 = np.column_stack((X_lh_N3,X_O1_N3)) # Both sides - REM X_REM =",
"using logistic Random Forests y_pred_RF = Object.RandomForest_Modelling(X_train, y_train, X_test, y_test,",
"= 30) X_O1_REM,y_O1_REM = Object_O1_REM.FeatureExtraction() Object_O1_REM.SaveFeatureSet(X = X_O1_REM, y=y_O1_REM, path",
"# Combine them X_N3 = np.column_stack((X_rh_N3, X_lh_N3)) X_REM = np.column_stack((X_rh_REM,",
"Object_C4_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C4, fs = 200, T",
"= ch_O2, fs = 200, T = 30) X_O2_N3,y_O2_N3 =",
"= wf.create_dataset('f1_LR' , results_LR['test_f1_score'].shape, data = results_LR['test_f1_score']) dset = wf.create_dataset('f1_RF'",
"'test_precision', 'test_recall', 'test_f1_score'] for metric in Metrics: #RF r1 =",
"wf.create_dataset('prec_LR' , results_LR['test_precision'].shape, data = results_LR['test_precision']) dset = wf.create_dataset('prec_RF' ,",
"results_RF['test_precision'].shape, data = results_RF['test_precision']) dset = wf.create_dataset('prec_xgb', results_xgb['test_precision'].shape, data =",
"30) X_fp1_REM,y_fp1_REM = Object_fp1_REM.FeatureExtraction() Object_fp1_REM.SaveFeatureSet(X = X_fp1_REM, y=y_fp1_REM, path =",
"# Recall dset = wf.create_dataset('rec_SVM', results_SVM['test_recall'].shape, data = results_SVM['test_recall']) dset",
"# Pick LEFT hemisphere REM fname_lh_REM = 'feat42_lh_REM' X_lh_REM, y_lh_REM",
"results_xgb[metric].mean() std2 = results_xgb[metric].std() print(f'{metric} for xgb is: {round(r2*100, 2)}+-",
"channel = ch_O2, fs = 200, T = 30) X_O2_REM,y_O2_REM",
"ML_Depression(filename=fname_C_REM, channel = ch_C4, fs = 200, T = 30)",
"{round(std3*100, 2)}') # LR r4 = results_LR[metric].mean() std4 = results_LR[metric].std()",
"= 1) #ANOVA Feat_selected_ANOVA = Object.FeatSelect_ANOVA(X,y, k = 80) #Recruisive",
"Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_xgb) #%% Outcome measures #",
"results_RF['test_precision']) dset = wf.create_dataset('prec_xgb', results_xgb['test_precision'].shape, data = results_xgb['test_precision']) # Recall",
"Acc, Recall, prec, f1_sc = Object.multi_label_confusion_matrix(y_test, y_pred_xgb) #%% Outcome measures",
"# f1-score dset = wf.create_dataset('f1_SVM', results_SVM['test_f1_score'].shape, data = results_SVM['test_f1_score']) dset",
"N3 fname_rh_N3 = 'feat42_rh_N3' X_rh_N3, y_rh_N3 = Object.LoadFeatureSet(path, fname_rh_N3, feats,",
"search to find the best config. of RF BestParams_RandomSearch, Bestsocre_RandomSearch",
"time.time() print(f'time taken: {toc - tic}') ########## Concatenate all features",
"save_path, filename = 'feat42_O2_REM') Object_O1_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O1,",
"max_features = ['log2', 'sqrt'], max_depth = [int(x) for x in",
"ML_Depression(filename=fname_O_REM, channel = ch_O1, fs = 200, T = 30)",
"X_O1_N3,y_O1_N3 = Object_O1_N3.FeatureExtraction() Object_O1_N3.SaveFeatureSet(X = X_O1_N3, y=y_O1_N3, path = save_path,",
"y_test, scoring = scoring, n_estimators = 500, cv = 10)",
"find the best config. of RF BestParams_RandomSearch, Bestsocre_RandomSearch ,means, stds,",
"'feat42_O2_REM') Object_O1_N3 = ML_Depression(filename=fname_O_N3, channel = ch_O1, fs = 200,",
"10], min_samples_leaf = [1, 2, 4], bootstrap = [True, False],",
"Object_fp2_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp2, fs = 200, T",
"= 'tr90_fp1-M2_fp2-M1', saving = True, fname_save = 'tr90_N3&REM_fp1-M2') #%% How",
"feats, labels) # Pick left hemisphere N3 fname_lh_N3 = 'feat42_lh_N3'",
"path = save_path, filename = 'feat42_rh_N3') Object.SaveFeatureSet(X = X_lh_N3 ,",
"'featureset' labels = 'labels' # Pick right hemisphere N3 fname_rh_N3",
"from different brain regions, sleep stage and combine them Object",
"save_path, filename = 'feat42_fp2_REM') Object_fp1_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp1,",
"LEFT hemisphere - REM X_lh_REM = np.column_stack((X_fp1_REM,X_C3_REM)) X_lh_REM = np.column_stack((X_lh_REM,X_O1_REM))",
"REM X_rh_REM = np.column_stack((X_fp2_REM,X_C4_REM)) X_rh_REM = np.column_stack((X_rh_REM,X_O2_REM)) # RIGHT hemisphere",
"cv = 10) #%% Example save featureset path = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/features/'",
"4], bootstrap = [True, False], n_iter = 100, cv =",
"Object_fp1_REM = ML_Depression(filename=fname_fp_REM, channel = ch_fp1, fs = 200, T",
"= ['test_accuracy', 'test_precision', 'test_recall', 'test_f1_score'] for metric in Metrics: #RF",
"= ch_O1, fs = 200, T = 30) X_O1_REM,y_O1_REM =",
"y_rh_N3 = Object.LoadFeatureSet(path, fname_rh_N3, feats, labels) # Pick left hemisphere",
"= save_path, filename = 'feat42_l&rh_REM') # Both hemispheres- SWS &REM",
"= 200, T = 30) X_C4_REM,y_C4_REM = Object_C4_REM.FeatureExtraction() Object_C4_REM.SaveFeatureSet(X =",
"filename = 'feat42_fp2_N3') toc = time.time() print(f'time taken: {toc -",
"y, n_components = 5) # Boruta ranks_Boruta, Feat_selected_Boruta = Object.FeatSelect_Boruta(X,",
"PCA PCA_out = Object.FeatSelect_PCA(X, y, n_components = 5) # Boruta",
"Feat_selected_Boruta = Object.FeatSelect_Boruta(X, y, max_depth = 7) # Lasso Feat_selected_lasso",
"import numpy as np from sklearn.model_selection import cross_validate #%% Picking",
"y, k = 20) #### NOW TEST CLASSIFIERS WITH SELECTED",
"save_path, filename = 'feat42_C4_REM') Object_C3_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C3,",
"100, cv = 10) #%% Test feature selection methods ##",
"= X_fp1_REM, y=y_fp1_REM, path = save_path, filename = 'feat42_fp1_REM') Object_fp2_REM",
"['log2', 'sqrt'], max_depth = [int(x) for x in np.arange(10, 100,",
"= Object.RandomForest_Modelling(Feat_selected_Boruta, y, scoring = scoring, n_estimators = 200, cv",
"labels) # Test set fname = 'feat42_Fp1-Fp2_test' X_test, y_test =",
"X_fp1_N3,y_fp1_N3 = Object_fp1_N3.FeatureExtraction() Object_fp1_N3.SaveFeatureSet(X = X_fp1_N3, y=y_fp1_N3, path = save_path,",
"np.row_stack((X_N3, X_REM)) y_SWS_REM = np.concatenate((y_fp2_N3, y_fp2_REM)) # SAVE ALL COMBINATIONS",
"y, max_depth = 7) # Lasso Feat_selected_lasso = Object.FeatSelect_LASSO(X, y,",
"LR is: {round(r4*100, 2)}+- {round(std4*100, 2)}') #%% Applying Randomized grid",
"save_path, filename = 'feat42_rh_N3') Object.SaveFeatureSet(X = X_lh_N3 , y=y_fp2_N3 ,",
"sklearn.model_selection import cross_validate #%% Picking featureset of interest and apply",
"y=y_fp2_REM, path = save_path, filename = 'feat42_lh_REM') Object.SaveFeatureSet(X = X_rh_N3",
"hemisphere REM fname_lh_REM = 'feat42_lh_REM' X_lh_REM, y_lh_REM = Object.LoadFeatureSet(path, fname_lh_REM,",
"= 'feat42_lh_N3' X_lh_N3, y_lh_N3 = Object.LoadFeatureSet(path, fname_lh_N3, feats, labels) #",
"2)}+- {round(std3*100, 2)}') # LR r4 = results_LR[metric].mean() std4 =",
"results_LR['test_f1_score'].shape, data = results_LR['test_f1_score']) dset = wf.create_dataset('f1_RF' , results_RF['test_f1_score'].shape, data",
"filename = 'feat42_fp1_N3') Object_fp2_N3 = ML_Depression(filename=fname_fp_N3, channel = ch_fp2, fs",
"path = save_path, filename = 'feat42_rh_REM') Object.SaveFeatureSet(X = X_lh_REM, y=y_fp2_REM,",
"numpy as np from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import",
"- tic}') ########## Concatenate all features ######### # RIGHT hemisphere",
"data = results_LR['test_accuracy']) dset = wf.create_dataset('acc_RF' , results_RF['test_accuracy'].shape, data =",
"'C:/PhD/ML in depression/' fname = 'feat42_Fp1-Fp2_train' feats = 'featureset' labels",
"= 30) X_fp1_REM,y_fp1_REM = Object_fp1_REM.FeatureExtraction() Object_fp1_REM.SaveFeatureSet(X = X_fp1_REM, y=y_fp1_REM, path",
"n_iter = 100, cv = 10) #%% Test feature selection",
"precision_score, recall_score, f1_score import h5py import time from ssccoorriinngg import",
"ch_O2, fs = 200, T = 30) X_O2_REM,y_O2_REM = Object_O2_REM.FeatureExtraction()",
"ranks_rec, Feat_selected_rec = Object.FeatSelect_Recrusive(X, y, k = 20) #### NOW",
"10) #%% Test feature selection methods ## # PCA PCA_out",
"= 'tr90_N3&REM_fp1-M2') #%% How to save some results? directory =",
"# Both hemisphere Object.SaveFeatureSet(X = X_N3 , y=y_fp2_N3 , path",
"wf.create_dataset('f1_SVM', results_SVM['test_f1_score'].shape, data = results_SVM['test_f1_score']) dset = wf.create_dataset('f1_LR' , results_LR['test_f1_score'].shape,",
"channel = ch_O2, fs = 200, T = 30) X_O2_N3,y_O2_N3",
"5) # Boruta ranks_Boruta, Feat_selected_Boruta = Object.FeatSelect_Boruta(X, y, max_depth =",
"'feat42_rh_REM' X_rh_REM, y_rh_REM = Object.LoadFeatureSet(path, fname_rh_REM, feats, labels) # Pick",
"= 200, T = 30) X_fp1_REM,y_fp1_REM = Object_fp1_REM.FeatureExtraction() Object_fp1_REM.SaveFeatureSet(X =",
"filename = 'feat42_C3_N3') Object_C4_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C4, fs",
"path = save_path, filename = 'feat42_O1_REM') Object_O2_REM = ML_Depression(filename=fname_O_REM, channel",
"data = results_SVM['test_f1_score']) dset = wf.create_dataset('f1_LR' , results_LR['test_f1_score'].shape, data =",
"wf.create_dataset('prec_RF' , results_RF['test_precision'].shape, data = results_RF['test_precision']) dset = wf.create_dataset('prec_xgb', results_xgb['test_precision'].shape,",
"toc = time.time() print(f'time taken: {toc - tic}') ########## Concatenate",
"'feat42_l&rh_REM') # Both hemispheres- SWS &REM combination Object.SaveFeatureSet(X = X_SWS_REM",
"= Object.XGB_Modelling(X_train, y_train,X_test, y_test, scoring, n_estimators = 1000, cv =",
"= results_RF['test_precision']) dset = wf.create_dataset('prec_xgb', results_xgb['test_precision'].shape, data = results_xgb['test_precision']) #",
"y, C = 1) #ANOVA Feat_selected_ANOVA = Object.FeatSelect_ANOVA(X,y, k =",
"T = 30) # one hemisphere Object.SaveFeatureSet(X = X_rh_REM, y=y_fp2_REM,",
"= results_RF['test_f1_score']) dset = wf.create_dataset('f1_xgb', results_xgb['test_f1_score'].shape, data = results_xgb['test_f1_score']) #%%",
"results_SVM['test_f1_score'].shape, data = results_SVM['test_f1_score']) dset = wf.create_dataset('f1_LR' , results_LR['test_f1_score'].shape, data",
"ch_C3 = 'C3-M2' Object_C3_REM = ML_Depression(filename=fname_C_REM, channel = ch_C3, fs",
"'feat42_l&rh_N3') Object.SaveFeatureSet(X = X_REM , y=y_fp2_N3 , path = save_path,",
"'feat42_Fp1-Fp2_test' X_test, y_test = Object.LoadFeatureSet(path, fname, feats, labels) # Define",
"epochs Object.CombineEpochs(directory = 'P:/3013080.02/ml_project/scripts/1D_TimeSeries/train_test/', ch = 'fp1-M2', N3_fname = 'tr90_N3_fp1-M2_fp2-M1',",
"Picking featureset of interest and apply classification Object = ssccoorriinngg(filename='',",
"= save_path, filename = 'feat42_C3_N3') Object_C4_N3 = ML_Depression(filename=fname_C_N3, channel =",
"200, T = 30) X_C3_REM,y_C3_REM = Object_C3_REM.FeatureExtraction() Object_C3_REM.SaveFeatureSet(X = X_C3_REM,",
"200, T = 30) X_O2_REM,y_O2_REM = Object_O2_REM.FeatureExtraction() Object_O2_REM.SaveFeatureSet(X = X_O2_REM,",
"Object.SaveFeatureSet(X = X_SWS_REM , y=y_SWS_REM , path = save_path, filename",
"the best config. of RF BestParams_RandomSearch, Bestsocre_RandomSearch ,means, stds, params=",
"200, T = 30) X_fp1_REM,y_fp1_REM = Object_fp1_REM.FeatureExtraction() Object_fp1_REM.SaveFeatureSet(X = X_fp1_REM,",
"= 200, T = 30) X_C3_REM,y_C3_REM = Object_C3_REM.FeatureExtraction() Object_C3_REM.SaveFeatureSet(X =",
"them Object = ML_Depression(filename='', channel='', fs = 200, T =",
"y=y_fp2_N3, path = save_path, filename = 'feat42_fp2_N3') toc = time.time()",
"filename = 'feat42_C4_REM') Object_C3_N3 = ML_Depression(filename=fname_C_N3, channel = ch_C3, fs",
"X_rh_N3 = np.column_stack((X_fp2_N3,X_C4_N3)) X_rh_N3 = np.column_stack((X_rh_N3,X_O2_N3)) # LEFT hemisphere -",
"ML_Depression(filename=fname_C_N3, channel = ch_C4, fs = 200, T = 30)",
"tic = time.time() ########### Central electrodes ############# main_path = \"D:/1D_TimeSeries/raw_EEG/without",
"save_path, filename = 'feat42_l&rh_N3&REM') #%% Load features from different brain",
"path = save_path, filename = 'feat42_C4_REM') Object_C3_N3 = ML_Depression(filename=fname_C_N3, channel",
"channel = ch_C3, fs = 200, T = 30) X_C3_N3,y_C3_N3",
"(main_path+\"tr90_N3_C3-M2_C4-M1.h5\") fname_C_REM = (main_path+\"tr90_REM_C3-M2_C4-M1.h5\") ch_C4 = 'C4-M1' ch_C3 = 'C3-M2'",
"= wf.create_dataset('prec_xgb', results_xgb['test_precision'].shape, data = results_xgb['test_precision']) # Recall dset =",
"SWS and REM X_SWS_REM = np.row_stack((X_N3, X_REM)) y_SWS_REM = np.concatenate((y_fp2_N3,",
"Pick LEFT hemisphere REM fname_lh_REM = 'feat42_lh_REM' X_lh_REM, y_lh_REM ="
] |
[
"\"Wainscott_0_int\", \"Wainscott_1_int\", ] task_id_choices = [0, 1] parser = argparse.ArgumentParser(description=\"Run",
"log_writer = None if not disable_save: timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") if",
"vr_settings=VrSettings(use_vr=True), physics_timestep=1 / 300.0, render_timestep=1 / 30.0, ) igbhvr_act_inst =",
"\"overlay_toggle\"): vr_cs.refresh_condition() if igbhvr_act_inst.simulator.query_vr_event(\"left_controller\", \"overlay_toggle\"): vr_cs.toggle_show_state() if log_writer and not",
"scene caches.\" ) parser.add_argument(\"--profile\", action=\"store_true\", help=\"Whether to print profiling data.\")",
"\"Benevolence_2_int\", \"Ihlen_0_int\", \"Ihlen_1_int\", \"Merom_0_int\", \"Merom_1_int\", \"Pomaria_0_int\", \"Pomaria_1_int\", \"Pomaria_2_int\", \"Rs_int\", \"Wainscott_0_int\",",
") parser.add_argument(\"--max_steps\", type=int, default=-1, help=\"Maximum number of steps to record",
"\"scenes\", \"background\", \"urban_street_01.jpg\") # VR rendering settings vr_rendering_settings = MeshRendererSettings(",
"= \"{}_{}_{}_{}.hdf5\".format(task, task_id, scene, timestamp) log_writer = IGLogWriter( s, log_filepath=vr_log_path,",
"or steps < max_steps: igbhvr_act_inst.simulator.step(print_stats=profile) task_done, satisfied_predicates = igbhvr_act_inst.check_success() if",
"= Simulator( mode=mode, rendering_settings=vr_rendering_settings, vr_settings=VrSettings(use_vr=True), physics_timestep=1 / 300.0, render_timestep=1 /",
"True if not disable_scene_cache: scene_kwargs = { \"urdf_file\": \"{}_task_{}_{}_0_fixed_furniture\".format(scene, task,",
"(and filename) of vr log\") parser.add_argument( \"--scene\", type=str, choices=scene_choices, nargs=\"?\",",
"integer ID, matching suffix of bddl.\", ) parser.add_argument(\"--vr_log_path\", type=str, help=\"Path",
"help=\"BDDL integer ID, matching suffix of bddl.\", ) parser.add_argument(\"--vr_log_path\", type=str,",
"scene, vr_log_path=None, disable_save=False, max_steps=-1, no_vr=False, disable_scene_cache=False, profile=False, ): # HDR",
"steps to record before stopping.\") return parser.parse_args() def main(): args",
"= os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"urban_street_01.jpg\") # VR rendering settings vr_rendering_settings",
"disable_save: log_writer.process_frame() if task_done: post_task_steps -= 1 if post_task_steps ==",
"else: action = igbhvr_act_inst.simulator.gen_vr_robot_action() if steps < physics_warming_steps: action =",
"an ATUS demo\") parser.add_argument( \"--task\", type=str, required=True, nargs=\"?\", help=\"Name of",
") log_writer.set_up_data_storage() satisfied_predicates_cached = {} post_task_steps = copy.deepcopy(POST_TASK_STEPS) physics_warming_steps =",
"30.0, ) igbhvr_act_inst = iGBEHAVIORActivityInstance(task, task_id) scene_kwargs = None online_sampling",
"if vr_log_path is None: vr_log_path = \"{}_{}_{}_{}.hdf5\".format(task, task_id, scene, timestamp)",
"= np.zeros_like(action) vr_agent.update(action) if not no_vr: if satisfied_predicates != satisfied_predicates_cached:",
"no_vr else \"vr\" s = Simulator( mode=mode, rendering_settings=vr_rendering_settings, vr_settings=VrSettings(use_vr=True), physics_timestep=1",
"= False igbhvr_act_inst.initialize_simulator( simulator=s, scene_id=scene, scene_kwargs=scene_kwargs, load_clutter=True, online_sampling=online_sampling ) vr_agent",
"from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings from igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher, VrSettings from",
"help=\"Name of ATUS activity matching parent folder in bddl.\" )",
"args.task, args.task_id, args.scene, args.vr_log_path, args.disable_save, args.max_steps, args.no_vr, args.disable_scene_cache, args.profile, )",
"\"Pomaria_1_int\", \"Pomaria_2_int\", \"Rs_int\", \"Wainscott_0_int\", \"Wainscott_1_int\", ] task_id_choices = [0, 1]",
"def parse_args(): scene_choices = [ \"Beechwood_0_int\", \"Beechwood_1_int\", \"Benevolence_0_int\", \"Benevolence_1_int\", \"Benevolence_2_int\",",
"required=True, nargs=\"?\", help=\"Name of ATUS activity matching parent folder in",
"stopping.\") return parser.parse_args() def main(): args = parse_args() bddl.set_backend(\"iGibson\") collect_demo(",
"satisfied_predicates_cached = satisfied_predicates if igbhvr_act_inst.simulator.query_vr_event(\"right_controller\", \"overlay_toggle\"): vr_cs.refresh_condition() if igbhvr_act_inst.simulator.query_vr_event(\"left_controller\", \"overlay_toggle\"):",
"parse_args(): scene_choices = [ \"Beechwood_0_int\", \"Beechwood_1_int\", \"Benevolence_0_int\", \"Benevolence_1_int\", \"Benevolence_2_int\", \"Ihlen_0_int\",",
"save random actions.\" ) parser.add_argument(\"--max_steps\", type=int, default=-1, help=\"Maximum number of",
"action = np.zeros((28,)) action[19] = 1 action[27] = 1 else:",
"if post_task_steps == 0: break steps += 1 if log_writer",
"os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"urban_street_01.jpg\") # VR rendering settings vr_rendering_settings =",
"help=\"Whether to disable saving logfiles.\") parser.add_argument( \"--disable_scene_cache\", action=\"store_true\", help=\"Whether to",
"disable saving logfiles.\") parser.add_argument( \"--disable_scene_cache\", action=\"store_true\", help=\"Whether to disable using",
"[ \"Beechwood_0_int\", \"Beechwood_1_int\", \"Benevolence_0_int\", \"Benevolence_1_int\", \"Benevolence_2_int\", \"Ihlen_0_int\", \"Ihlen_1_int\", \"Merom_0_int\", \"Merom_1_int\",",
"# HDR files for PBR rendering hdr_texture = os.path.join(igibson.ig_dataset_path, \"scenes\",",
"satisfied_predicates_cached = {} post_task_steps = copy.deepcopy(POST_TASK_STEPS) physics_warming_steps = copy.deepcopy(PHYSICS_WARMING_STEPS) steps",
"max_steps < 0 or steps < max_steps: igbhvr_act_inst.simulator.step(print_stats=profile) task_done, satisfied_predicates",
"igbhvr_act_inst.initialize_simulator( simulator=s, scene_id=scene, scene_kwargs=scene_kwargs, load_clutter=True, online_sampling=online_sampling ) vr_agent = igbhvr_act_inst.simulator.robots[0]",
"= satisfied_predicates if igbhvr_act_inst.simulator.query_vr_event(\"right_controller\", \"overlay_toggle\"): vr_cs.refresh_condition() if igbhvr_act_inst.simulator.query_vr_event(\"left_controller\", \"overlay_toggle\"): vr_cs.toggle_show_state()",
") parser.add_argument(\"--vr_log_path\", type=str, help=\"Path (and filename) of vr log\") parser.add_argument(",
"\"background\", \"probe_03.hdr\") light_modulation_map_filename = os.path.join( igibson.ig_dataset_path, \"scenes\", \"Rs_int\", \"layout\", \"floor_lighttype_0.png\"",
"from igibson.utils.ig_logging import IGLogWriter POST_TASK_STEPS = 200 PHYSICS_WARMING_STEPS = 200",
"satisfied_predicates != satisfied_predicates_cached: vr_cs.refresh_condition(switch=False) satisfied_predicates_cached = satisfied_predicates if igbhvr_act_inst.simulator.query_vr_event(\"right_controller\", \"overlay_toggle\"):",
"help=\"Whether to turn off VR recording and save random actions.\"",
"vr_cs = VrConditionSwitcher( igbhvr_act_inst.simulator, igbhvr_act_inst.show_instruction, igbhvr_act_inst.iterate_instruction ) log_writer = None",
"matching suffix of bddl.\", ) parser.add_argument(\"--vr_log_path\", type=str, help=\"Path (and filename)",
"physics_timestep=1 / 300.0, render_timestep=1 / 30.0, ) igbhvr_act_inst = iGBEHAVIORActivityInstance(task,",
"mode = \"headless\" if no_vr else \"vr\" s = Simulator(",
"rendering hdr_texture = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"probe_02.hdr\") hdr_texture2 = os.path.join(igibson.ig_dataset_path,",
"hdr_texture2 = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"probe_03.hdr\") light_modulation_map_filename = os.path.join( igibson.ig_dataset_path,",
"max_steps: igbhvr_act_inst.simulator.step(print_stats=profile) task_done, satisfied_predicates = igbhvr_act_inst.check_success() if no_vr: if steps",
"parser.add_argument( \"--disable_scene_cache\", action=\"store_true\", help=\"Whether to disable using pre-initialized scene caches.\"",
"def collect_demo( task, task_id, scene, vr_log_path=None, disable_save=False, max_steps=-1, no_vr=False, disable_scene_cache=False,",
"\"Ihlen_1_int\", \"Merom_0_int\", \"Merom_1_int\", \"Pomaria_0_int\", \"Pomaria_1_int\", \"Pomaria_2_int\", \"Rs_int\", \"Wainscott_0_int\", \"Wainscott_1_int\", ]",
"= copy.deepcopy(POST_TASK_STEPS) physics_warming_steps = copy.deepcopy(PHYSICS_WARMING_STEPS) steps = 0 while max_steps",
"\"\"\" Main BEHAVIOR demo collection entrypoint \"\"\" import argparse import",
"no_vr=False, disable_scene_cache=False, profile=False, ): # HDR files for PBR rendering",
"not no_vr: if satisfied_predicates != satisfied_predicates_cached: vr_cs.refresh_condition(switch=False) satisfied_predicates_cached = satisfied_predicates",
"= igbhvr_act_inst.simulator.gen_vr_robot_action() if steps < physics_warming_steps: action = np.zeros_like(action) vr_agent.update(action)",
"MeshRendererSettings from igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher, VrSettings from igibson.simulator import Simulator",
"no_vr: if steps < 2: action = np.zeros((28,)) action[19] =",
"None online_sampling = True if not disable_scene_cache: scene_kwargs = {",
"\"--task\", type=str, required=True, nargs=\"?\", help=\"Name of ATUS activity matching parent",
"name/ID matching iGibson interactive scenes.\" ) parser.add_argument(\"--disable_save\", action=\"store_true\", help=\"Whether to",
"Main BEHAVIOR demo collection entrypoint \"\"\" import argparse import copy",
"import argparse import copy import datetime import os import bddl",
"VrSettings from igibson.simulator import Simulator from igibson.utils.ig_logging import IGLogWriter POST_TASK_STEPS",
"scene_kwargs=scene_kwargs, load_clutter=True, online_sampling=online_sampling ) vr_agent = igbhvr_act_inst.simulator.robots[0] if not no_vr:",
"\"Merom_0_int\", \"Merom_1_int\", \"Pomaria_0_int\", \"Pomaria_1_int\", \"Pomaria_2_int\", \"Rs_int\", \"Wainscott_0_int\", \"Wainscott_1_int\", ] task_id_choices",
"/ 300.0, render_timestep=1 / 30.0, ) igbhvr_act_inst = iGBEHAVIORActivityInstance(task, task_id)",
"POST_TASK_STEPS = 200 PHYSICS_WARMING_STEPS = 200 def parse_args(): scene_choices =",
"# VR system settings mode = \"headless\" if no_vr else",
"\"vr\" s = Simulator( mode=mode, rendering_settings=vr_rendering_settings, vr_settings=VrSettings(use_vr=True), physics_timestep=1 / 300.0,",
"mode=mode, rendering_settings=vr_rendering_settings, vr_settings=VrSettings(use_vr=True), physics_timestep=1 / 300.0, render_timestep=1 / 30.0, )",
"settings vr_rendering_settings = MeshRendererSettings( optimized=True, fullscreen=False, env_texture_filename=hdr_texture, env_texture_filename2=hdr_texture2, env_texture_filename3=background_texture, light_modulation_map_filename=light_modulation_map_filename,",
"1 if post_task_steps == 0: break steps += 1 if",
"action=\"store_true\", help=\"Whether to turn off VR recording and save random",
"type=str, choices=scene_choices, nargs=\"?\", help=\"Scene name/ID matching iGibson interactive scenes.\" )",
"= argparse.ArgumentParser(description=\"Run and collect an ATUS demo\") parser.add_argument( \"--task\", type=str,",
"enable_shadow=True, enable_pbr=True, msaa=False, light_dimming_factor=1.0, ) # VR system settings mode",
"disable_scene_cache: scene_kwargs = { \"urdf_file\": \"{}_task_{}_{}_0_fixed_furniture\".format(scene, task, task_id), } online_sampling",
"1 else: action = np.random.uniform(-0.01, 0.01, size=(28,)) else: action =",
"= 1 else: action = np.random.uniform(-0.01, 0.01, size=(28,)) else: action",
"\"scenes\", \"background\", \"probe_02.hdr\") hdr_texture2 = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"probe_03.hdr\") light_modulation_map_filename",
"vr_agent.update(action) if not no_vr: if satisfied_predicates != satisfied_predicates_cached: vr_cs.refresh_condition(switch=False) satisfied_predicates_cached",
"of steps to record before stopping.\") return parser.parse_args() def main():",
"= os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"probe_02.hdr\") hdr_texture2 = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\",",
"os import bddl import numpy as np import igibson from",
"max_steps=-1, no_vr=False, disable_scene_cache=False, profile=False, ): # HDR files for PBR",
"filter_objects=True, ) log_writer.set_up_data_storage() satisfied_predicates_cached = {} post_task_steps = copy.deepcopy(POST_TASK_STEPS) physics_warming_steps",
"VrConditionSwitcher, VrSettings from igibson.simulator import Simulator from igibson.utils.ig_logging import IGLogWriter",
"\"background\", \"urban_street_01.jpg\") # VR rendering settings vr_rendering_settings = MeshRendererSettings( optimized=True,",
"if no_vr else \"vr\" s = Simulator( mode=mode, rendering_settings=vr_rendering_settings, vr_settings=VrSettings(use_vr=True),",
"help=\"Path (and filename) of vr log\") parser.add_argument( \"--scene\", type=str, choices=scene_choices,",
"action[19] = 1 action[27] = 1 else: action = np.random.uniform(-0.01,",
"saving logfiles.\") parser.add_argument( \"--disable_scene_cache\", action=\"store_true\", help=\"Whether to disable using pre-initialized",
"BEHAVIOR demo collection entrypoint \"\"\" import argparse import copy import",
") igbhvr_act_inst = iGBEHAVIORActivityInstance(task, task_id) scene_kwargs = None online_sampling =",
"): # HDR files for PBR rendering hdr_texture = os.path.join(igibson.ig_dataset_path,",
") # VR system settings mode = \"headless\" if no_vr",
"to disable saving logfiles.\") parser.add_argument( \"--disable_scene_cache\", action=\"store_true\", help=\"Whether to disable",
"data.\") parser.add_argument( \"--no_vr\", action=\"store_true\", help=\"Whether to turn off VR recording",
"\"overlay_toggle\"): vr_cs.toggle_show_state() if log_writer and not disable_save: log_writer.process_frame() if task_done:",
"MeshRendererSettings( optimized=True, fullscreen=False, env_texture_filename=hdr_texture, env_texture_filename2=hdr_texture2, env_texture_filename3=background_texture, light_modulation_map_filename=light_modulation_map_filename, enable_shadow=True, enable_pbr=True, msaa=False,",
"= \"headless\" if no_vr else \"vr\" s = Simulator( mode=mode,",
"return parser.parse_args() def main(): args = parse_args() bddl.set_backend(\"iGibson\") collect_demo( args.task,",
"= np.zeros((28,)) action[19] = 1 action[27] = 1 else: action",
"igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher, VrSettings from igibson.simulator import Simulator from igibson.utils.ig_logging",
"\"probe_03.hdr\") light_modulation_map_filename = os.path.join( igibson.ig_dataset_path, \"scenes\", \"Rs_int\", \"layout\", \"floor_lighttype_0.png\" )",
"default=-1, help=\"Maximum number of steps to record before stopping.\") return",
"steps += 1 if log_writer and not disable_save: log_writer.end_log_session() s.disconnect()",
"action = np.random.uniform(-0.01, 0.01, size=(28,)) else: action = igbhvr_act_inst.simulator.gen_vr_robot_action() if",
"PBR rendering hdr_texture = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"probe_02.hdr\") hdr_texture2 =",
"= True if not disable_scene_cache: scene_kwargs = { \"urdf_file\": \"{}_task_{}_{}_0_fixed_furniture\".format(scene,",
"choices=task_id_choices, nargs=\"?\", help=\"BDDL integer ID, matching suffix of bddl.\", )",
"IGLogWriter POST_TASK_STEPS = 200 PHYSICS_WARMING_STEPS = 200 def parse_args(): scene_choices",
"to print profiling data.\") parser.add_argument( \"--no_vr\", action=\"store_true\", help=\"Whether to turn",
"{ \"urdf_file\": \"{}_task_{}_{}_0_fixed_furniture\".format(scene, task, task_id), } online_sampling = False igbhvr_act_inst.initialize_simulator(",
"= copy.deepcopy(PHYSICS_WARMING_STEPS) steps = 0 while max_steps < 0 or",
"0 or steps < max_steps: igbhvr_act_inst.simulator.step(print_stats=profile) task_done, satisfied_predicates = igbhvr_act_inst.check_success()",
"= datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") if vr_log_path is None: vr_log_path = \"{}_{}_{}_{}.hdf5\".format(task, task_id,",
"args.no_vr, args.disable_scene_cache, args.profile, ) def collect_demo( task, task_id, scene, vr_log_path=None,",
"type=int, required=True, choices=task_id_choices, nargs=\"?\", help=\"BDDL integer ID, matching suffix of",
"no_vr: if satisfied_predicates != satisfied_predicates_cached: vr_cs.refresh_condition(switch=False) satisfied_predicates_cached = satisfied_predicates if",
"< physics_warming_steps: action = np.zeros_like(action) vr_agent.update(action) if not no_vr: if",
"files for PBR rendering hdr_texture = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"probe_02.hdr\")",
"steps < 2: action = np.zeros((28,)) action[19] = 1 action[27]",
"= os.path.join( igibson.ig_dataset_path, \"scenes\", \"Rs_int\", \"layout\", \"floor_lighttype_0.png\" ) background_texture =",
"optimized=True, fullscreen=False, env_texture_filename=hdr_texture, env_texture_filename2=hdr_texture2, env_texture_filename3=background_texture, light_modulation_map_filename=light_modulation_map_filename, enable_shadow=True, enable_pbr=True, msaa=False, light_dimming_factor=1.0,",
"help=\"Whether to disable using pre-initialized scene caches.\" ) parser.add_argument(\"--profile\", action=\"store_true\",",
"online_sampling=online_sampling ) vr_agent = igbhvr_act_inst.simulator.robots[0] if not no_vr: vr_cs =",
"Simulator from igibson.utils.ig_logging import IGLogWriter POST_TASK_STEPS = 200 PHYSICS_WARMING_STEPS =",
"args = parse_args() bddl.set_backend(\"iGibson\") collect_demo( args.task, args.task_id, args.scene, args.vr_log_path, args.disable_save,",
"\"headless\" if no_vr else \"vr\" s = Simulator( mode=mode, rendering_settings=vr_rendering_settings,",
"while max_steps < 0 or steps < max_steps: igbhvr_act_inst.simulator.step(print_stats=profile) task_done,",
"= 0 while max_steps < 0 or steps < max_steps:",
"task, task_id), } online_sampling = False igbhvr_act_inst.initialize_simulator( simulator=s, scene_id=scene, scene_kwargs=scene_kwargs,",
"<reponame>suresh-guttikonda/iGibson \"\"\" Main BEHAVIOR demo collection entrypoint \"\"\" import argparse",
"bddl.\", ) parser.add_argument(\"--vr_log_path\", type=str, help=\"Path (and filename) of vr log\")",
"collect_demo( task, task_id, scene, vr_log_path=None, disable_save=False, max_steps=-1, no_vr=False, disable_scene_cache=False, profile=False,",
"else True, vr_robot=vr_agent, profiling_mode=profile, filter_objects=True, ) log_writer.set_up_data_storage() satisfied_predicates_cached = {}",
"if no_vr else True, vr_robot=vr_agent, profiling_mode=profile, filter_objects=True, ) log_writer.set_up_data_storage() satisfied_predicates_cached",
"VR recording and save random actions.\" ) parser.add_argument(\"--max_steps\", type=int, default=-1,",
"ATUS activity matching parent folder in bddl.\" ) parser.add_argument( \"--task_id\",",
"import VrConditionSwitcher, VrSettings from igibson.simulator import Simulator from igibson.utils.ig_logging import",
"igbhvr_act_inst.simulator.gen_vr_robot_action() if steps < physics_warming_steps: action = np.zeros_like(action) vr_agent.update(action) if",
"\"\"\" import argparse import copy import datetime import os import",
"< 0 or steps < max_steps: igbhvr_act_inst.simulator.step(print_stats=profile) task_done, satisfied_predicates =",
"iGBEHAVIORActivityInstance from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings from igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher, VrSettings",
"argparse.ArgumentParser(description=\"Run and collect an ATUS demo\") parser.add_argument( \"--task\", type=str, required=True,",
"argparse import copy import datetime import os import bddl import",
"recording and save random actions.\" ) parser.add_argument(\"--max_steps\", type=int, default=-1, help=\"Maximum",
"igbhvr_act_inst = iGBEHAVIORActivityInstance(task, task_id) scene_kwargs = None online_sampling = True",
"iGBEHAVIORActivityInstance(task, task_id) scene_kwargs = None online_sampling = True if not",
"igibson.simulator import Simulator from igibson.utils.ig_logging import IGLogWriter POST_TASK_STEPS = 200",
"profiling_mode=profile, filter_objects=True, ) log_writer.set_up_data_storage() satisfied_predicates_cached = {} post_task_steps = copy.deepcopy(POST_TASK_STEPS)",
"caches.\" ) parser.add_argument(\"--profile\", action=\"store_true\", help=\"Whether to print profiling data.\") parser.add_argument(",
"steps < physics_warming_steps: action = np.zeros_like(action) vr_agent.update(action) if not no_vr:",
"store_vr=False if no_vr else True, vr_robot=vr_agent, profiling_mode=profile, filter_objects=True, ) log_writer.set_up_data_storage()",
"\"--scene\", type=str, choices=scene_choices, nargs=\"?\", help=\"Scene name/ID matching iGibson interactive scenes.\"",
"profiling data.\") parser.add_argument( \"--no_vr\", action=\"store_true\", help=\"Whether to turn off VR",
"\"Ihlen_0_int\", \"Ihlen_1_int\", \"Merom_0_int\", \"Merom_1_int\", \"Pomaria_0_int\", \"Pomaria_1_int\", \"Pomaria_2_int\", \"Rs_int\", \"Wainscott_0_int\", \"Wainscott_1_int\",",
"suffix of bddl.\", ) parser.add_argument(\"--vr_log_path\", type=str, help=\"Path (and filename) of",
"if no_vr: if steps < 2: action = np.zeros((28,)) action[19]",
"in bddl.\" ) parser.add_argument( \"--task_id\", type=int, required=True, choices=task_id_choices, nargs=\"?\", help=\"BDDL",
"datetime import os import bddl import numpy as np import",
") vr_agent = igbhvr_act_inst.simulator.robots[0] if not no_vr: vr_cs = VrConditionSwitcher(",
"s, log_filepath=vr_log_path, task=igbhvr_act_inst, store_vr=False if no_vr else True, vr_robot=vr_agent, profiling_mode=profile,",
"= 200 def parse_args(): scene_choices = [ \"Beechwood_0_int\", \"Beechwood_1_int\", \"Benevolence_0_int\",",
") def collect_demo( task, task_id, scene, vr_log_path=None, disable_save=False, max_steps=-1, no_vr=False,",
"if steps < physics_warming_steps: action = np.zeros_like(action) vr_agent.update(action) if not",
"+= 1 if log_writer and not disable_save: log_writer.end_log_session() s.disconnect() if",
"igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings from igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher, VrSettings from igibson.simulator",
"disable_save: timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") if vr_log_path is None: vr_log_path =",
"post_task_steps == 0: break steps += 1 if log_writer and",
"import os import bddl import numpy as np import igibson",
"import iGBEHAVIORActivityInstance from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings from igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher,",
") background_texture = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"urban_street_01.jpg\") # VR rendering",
"False igbhvr_act_inst.initialize_simulator( simulator=s, scene_id=scene, scene_kwargs=scene_kwargs, load_clutter=True, online_sampling=online_sampling ) vr_agent =",
"\"--task_id\", type=int, required=True, choices=task_id_choices, nargs=\"?\", help=\"BDDL integer ID, matching suffix",
"turn off VR recording and save random actions.\" ) parser.add_argument(\"--max_steps\",",
"type=str, required=True, nargs=\"?\", help=\"Name of ATUS activity matching parent folder",
"igbhvr_act_inst.show_instruction, igbhvr_act_inst.iterate_instruction ) log_writer = None if not disable_save: timestamp",
"of bddl.\", ) parser.add_argument(\"--vr_log_path\", type=str, help=\"Path (and filename) of vr",
") parser.add_argument(\"--disable_save\", action=\"store_true\", help=\"Whether to disable saving logfiles.\") parser.add_argument( \"--disable_scene_cache\",",
"VR rendering settings vr_rendering_settings = MeshRendererSettings( optimized=True, fullscreen=False, env_texture_filename=hdr_texture, env_texture_filename2=hdr_texture2,",
"VR system settings mode = \"headless\" if no_vr else \"vr\"",
"post_task_steps -= 1 if post_task_steps == 0: break steps +=",
"parser.parse_args() def main(): args = parse_args() bddl.set_backend(\"iGibson\") collect_demo( args.task, args.task_id,",
"is None: vr_log_path = \"{}_{}_{}_{}.hdf5\".format(task, task_id, scene, timestamp) log_writer =",
"igbhvr_act_inst.simulator.step(print_stats=profile) task_done, satisfied_predicates = igbhvr_act_inst.check_success() if no_vr: if steps <",
"] task_id_choices = [0, 1] parser = argparse.ArgumentParser(description=\"Run and collect",
"demo\") parser.add_argument( \"--task\", type=str, required=True, nargs=\"?\", help=\"Name of ATUS activity",
"log_writer.set_up_data_storage() satisfied_predicates_cached = {} post_task_steps = copy.deepcopy(POST_TASK_STEPS) physics_warming_steps = copy.deepcopy(PHYSICS_WARMING_STEPS)",
"no_vr: vr_cs = VrConditionSwitcher( igbhvr_act_inst.simulator, igbhvr_act_inst.show_instruction, igbhvr_act_inst.iterate_instruction ) log_writer =",
"< 2: action = np.zeros((28,)) action[19] = 1 action[27] =",
"0.01, size=(28,)) else: action = igbhvr_act_inst.simulator.gen_vr_robot_action() if steps < physics_warming_steps:",
"scene_choices = [ \"Beechwood_0_int\", \"Beechwood_1_int\", \"Benevolence_0_int\", \"Benevolence_1_int\", \"Benevolence_2_int\", \"Ihlen_0_int\", \"Ihlen_1_int\",",
"bddl.set_backend(\"iGibson\") collect_demo( args.task, args.task_id, args.scene, args.vr_log_path, args.disable_save, args.max_steps, args.no_vr, args.disable_scene_cache,",
"nargs=\"?\", help=\"Name of ATUS activity matching parent folder in bddl.\"",
"os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"probe_02.hdr\") hdr_texture2 = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"probe_03.hdr\")",
"activity matching parent folder in bddl.\" ) parser.add_argument( \"--task_id\", type=int,",
"not no_vr: vr_cs = VrConditionSwitcher( igbhvr_act_inst.simulator, igbhvr_act_inst.show_instruction, igbhvr_act_inst.iterate_instruction ) log_writer",
"args.max_steps, args.no_vr, args.disable_scene_cache, args.profile, ) def collect_demo( task, task_id, scene,",
"igbhvr_act_inst.iterate_instruction ) log_writer = None if not disable_save: timestamp =",
"HDR files for PBR rendering hdr_texture = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\",",
") log_writer = None if not disable_save: timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")",
"\"probe_02.hdr\") hdr_texture2 = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"probe_03.hdr\") light_modulation_map_filename = os.path.join(",
"entrypoint \"\"\" import argparse import copy import datetime import os",
"scenes.\" ) parser.add_argument(\"--disable_save\", action=\"store_true\", help=\"Whether to disable saving logfiles.\") parser.add_argument(",
"logfiles.\") parser.add_argument( \"--disable_scene_cache\", action=\"store_true\", help=\"Whether to disable using pre-initialized scene",
"np import igibson from igibson.activity.activity_base import iGBEHAVIORActivityInstance from igibson.render.mesh_renderer.mesh_renderer_cpu import",
"if not no_vr: if satisfied_predicates != satisfied_predicates_cached: vr_cs.refresh_condition(switch=False) satisfied_predicates_cached =",
"if log_writer and not disable_save: log_writer.process_frame() if task_done: post_task_steps -=",
"env_texture_filename=hdr_texture, env_texture_filename2=hdr_texture2, env_texture_filename3=background_texture, light_modulation_map_filename=light_modulation_map_filename, enable_shadow=True, enable_pbr=True, msaa=False, light_dimming_factor=1.0, ) #",
"import igibson from igibson.activity.activity_base import iGBEHAVIORActivityInstance from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings",
"if steps < 2: action = np.zeros((28,)) action[19] = 1",
"system settings mode = \"headless\" if no_vr else \"vr\" s",
"import datetime import os import bddl import numpy as np",
"{} post_task_steps = copy.deepcopy(POST_TASK_STEPS) physics_warming_steps = copy.deepcopy(PHYSICS_WARMING_STEPS) steps = 0",
"\"Rs_int\", \"Wainscott_0_int\", \"Wainscott_1_int\", ] task_id_choices = [0, 1] parser =",
"igibson.activity.activity_base import iGBEHAVIORActivityInstance from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings from igibson.render.mesh_renderer.mesh_renderer_vr import",
"action=\"store_true\", help=\"Whether to disable using pre-initialized scene caches.\" ) parser.add_argument(\"--profile\",",
"enable_pbr=True, msaa=False, light_dimming_factor=1.0, ) # VR system settings mode =",
"vr log\") parser.add_argument( \"--scene\", type=str, choices=scene_choices, nargs=\"?\", help=\"Scene name/ID matching",
"online_sampling = False igbhvr_act_inst.initialize_simulator( simulator=s, scene_id=scene, scene_kwargs=scene_kwargs, load_clutter=True, online_sampling=online_sampling )",
"vr_cs.refresh_condition() if igbhvr_act_inst.simulator.query_vr_event(\"left_controller\", \"overlay_toggle\"): vr_cs.toggle_show_state() if log_writer and not disable_save:",
"demo collection entrypoint \"\"\" import argparse import copy import datetime",
"task_id_choices = [0, 1] parser = argparse.ArgumentParser(description=\"Run and collect an",
"task_id), } online_sampling = False igbhvr_act_inst.initialize_simulator( simulator=s, scene_id=scene, scene_kwargs=scene_kwargs, load_clutter=True,",
"collect_demo( args.task, args.task_id, args.scene, args.vr_log_path, args.disable_save, args.max_steps, args.no_vr, args.disable_scene_cache, args.profile,",
"import Simulator from igibson.utils.ig_logging import IGLogWriter POST_TASK_STEPS = 200 PHYSICS_WARMING_STEPS",
"parser.add_argument(\"--vr_log_path\", type=str, help=\"Path (and filename) of vr log\") parser.add_argument( \"--scene\",",
"log\") parser.add_argument( \"--scene\", type=str, choices=scene_choices, nargs=\"?\", help=\"Scene name/ID matching iGibson",
"1 action[27] = 1 else: action = np.random.uniform(-0.01, 0.01, size=(28,))",
"vr_log_path is None: vr_log_path = \"{}_{}_{}_{}.hdf5\".format(task, task_id, scene, timestamp) log_writer",
"vr_cs.toggle_show_state() if log_writer and not disable_save: log_writer.process_frame() if task_done: post_task_steps",
"= igbhvr_act_inst.simulator.robots[0] if not no_vr: vr_cs = VrConditionSwitcher( igbhvr_act_inst.simulator, igbhvr_act_inst.show_instruction,",
"type=int, default=-1, help=\"Maximum number of steps to record before stopping.\")",
"igibson.ig_dataset_path, \"scenes\", \"Rs_int\", \"layout\", \"floor_lighttype_0.png\" ) background_texture = os.path.join(igibson.ig_dataset_path, \"scenes\",",
"args.vr_log_path, args.disable_save, args.max_steps, args.no_vr, args.disable_scene_cache, args.profile, ) def collect_demo( task,",
"random actions.\" ) parser.add_argument(\"--max_steps\", type=int, default=-1, help=\"Maximum number of steps",
"post_task_steps = copy.deepcopy(POST_TASK_STEPS) physics_warming_steps = copy.deepcopy(PHYSICS_WARMING_STEPS) steps = 0 while",
"physics_warming_steps = copy.deepcopy(PHYSICS_WARMING_STEPS) steps = 0 while max_steps < 0",
"\"--no_vr\", action=\"store_true\", help=\"Whether to turn off VR recording and save",
"as np import igibson from igibson.activity.activity_base import iGBEHAVIORActivityInstance from igibson.render.mesh_renderer.mesh_renderer_cpu",
"if igbhvr_act_inst.simulator.query_vr_event(\"left_controller\", \"overlay_toggle\"): vr_cs.toggle_show_state() if log_writer and not disable_save: log_writer.process_frame()",
"if not no_vr: vr_cs = VrConditionSwitcher( igbhvr_act_inst.simulator, igbhvr_act_inst.show_instruction, igbhvr_act_inst.iterate_instruction )",
"collect an ATUS demo\") parser.add_argument( \"--task\", type=str, required=True, nargs=\"?\", help=\"Name",
"disable_save=False, max_steps=-1, no_vr=False, disable_scene_cache=False, profile=False, ): # HDR files for",
"= { \"urdf_file\": \"{}_task_{}_{}_0_fixed_furniture\".format(scene, task, task_id), } online_sampling = False",
"no_vr else True, vr_robot=vr_agent, profiling_mode=profile, filter_objects=True, ) log_writer.set_up_data_storage() satisfied_predicates_cached =",
"200 PHYSICS_WARMING_STEPS = 200 def parse_args(): scene_choices = [ \"Beechwood_0_int\",",
"2: action = np.zeros((28,)) action[19] = 1 action[27] = 1",
"\"Benevolence_0_int\", \"Benevolence_1_int\", \"Benevolence_2_int\", \"Ihlen_0_int\", \"Ihlen_1_int\", \"Merom_0_int\", \"Merom_1_int\", \"Pomaria_0_int\", \"Pomaria_1_int\", \"Pomaria_2_int\",",
"\"background\", \"probe_02.hdr\") hdr_texture2 = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"probe_03.hdr\") light_modulation_map_filename =",
"import copy import datetime import os import bddl import numpy",
"ATUS demo\") parser.add_argument( \"--task\", type=str, required=True, nargs=\"?\", help=\"Name of ATUS",
"igbhvr_act_inst.simulator.robots[0] if not no_vr: vr_cs = VrConditionSwitcher( igbhvr_act_inst.simulator, igbhvr_act_inst.show_instruction, igbhvr_act_inst.iterate_instruction",
"and not disable_save: log_writer.process_frame() if task_done: post_task_steps -= 1 if",
"log_filepath=vr_log_path, task=igbhvr_act_inst, store_vr=False if no_vr else True, vr_robot=vr_agent, profiling_mode=profile, filter_objects=True,",
"\"Pomaria_2_int\", \"Rs_int\", \"Wainscott_0_int\", \"Wainscott_1_int\", ] task_id_choices = [0, 1] parser",
"from igibson.simulator import Simulator from igibson.utils.ig_logging import IGLogWriter POST_TASK_STEPS =",
"choices=scene_choices, nargs=\"?\", help=\"Scene name/ID matching iGibson interactive scenes.\" ) parser.add_argument(\"--disable_save\",",
"PHYSICS_WARMING_STEPS = 200 def parse_args(): scene_choices = [ \"Beechwood_0_int\", \"Beechwood_1_int\",",
"from igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher, VrSettings from igibson.simulator import Simulator from",
"= np.random.uniform(-0.01, 0.01, size=(28,)) else: action = igbhvr_act_inst.simulator.gen_vr_robot_action() if steps",
"and not disable_save: log_writer.end_log_session() s.disconnect() if __name__ == \"__main__\": main()",
"os.path.join( igibson.ig_dataset_path, \"scenes\", \"Rs_int\", \"layout\", \"floor_lighttype_0.png\" ) background_texture = os.path.join(igibson.ig_dataset_path,",
"log_writer and not disable_save: log_writer.end_log_session() s.disconnect() if __name__ == \"__main__\":",
"[0, 1] parser = argparse.ArgumentParser(description=\"Run and collect an ATUS demo\")",
"scene_kwargs = None online_sampling = True if not disable_scene_cache: scene_kwargs",
"parser.add_argument( \"--task\", type=str, required=True, nargs=\"?\", help=\"Name of ATUS activity matching",
"args.scene, args.vr_log_path, args.disable_save, args.max_steps, args.no_vr, args.disable_scene_cache, args.profile, ) def collect_demo(",
"action=\"store_true\", help=\"Whether to print profiling data.\") parser.add_argument( \"--no_vr\", action=\"store_true\", help=\"Whether",
"vr_agent = igbhvr_act_inst.simulator.robots[0] if not no_vr: vr_cs = VrConditionSwitcher( igbhvr_act_inst.simulator,",
"== 0: break steps += 1 if log_writer and not",
"msaa=False, light_dimming_factor=1.0, ) # VR system settings mode = \"headless\"",
"parser = argparse.ArgumentParser(description=\"Run and collect an ATUS demo\") parser.add_argument( \"--task\",",
"env_texture_filename3=background_texture, light_modulation_map_filename=light_modulation_map_filename, enable_shadow=True, enable_pbr=True, msaa=False, light_dimming_factor=1.0, ) # VR system",
"help=\"Whether to print profiling data.\") parser.add_argument( \"--no_vr\", action=\"store_true\", help=\"Whether to",
"vr_cs.refresh_condition(switch=False) satisfied_predicates_cached = satisfied_predicates if igbhvr_act_inst.simulator.query_vr_event(\"right_controller\", \"overlay_toggle\"): vr_cs.refresh_condition() if igbhvr_act_inst.simulator.query_vr_event(\"left_controller\",",
"igibson.utils.ig_logging import IGLogWriter POST_TASK_STEPS = 200 PHYSICS_WARMING_STEPS = 200 def",
"import numpy as np import igibson from igibson.activity.activity_base import iGBEHAVIORActivityInstance",
"1] parser = argparse.ArgumentParser(description=\"Run and collect an ATUS demo\") parser.add_argument(",
"= [0, 1] parser = argparse.ArgumentParser(description=\"Run and collect an ATUS",
"matching parent folder in bddl.\" ) parser.add_argument( \"--task_id\", type=int, required=True,",
"os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"probe_03.hdr\") light_modulation_map_filename = os.path.join( igibson.ig_dataset_path, \"scenes\", \"Rs_int\",",
"copy.deepcopy(POST_TASK_STEPS) physics_warming_steps = copy.deepcopy(PHYSICS_WARMING_STEPS) steps = 0 while max_steps <",
"200 def parse_args(): scene_choices = [ \"Beechwood_0_int\", \"Beechwood_1_int\", \"Benevolence_0_int\", \"Benevolence_1_int\",",
"action = igbhvr_act_inst.simulator.gen_vr_robot_action() if steps < physics_warming_steps: action = np.zeros_like(action)",
"\"Wainscott_1_int\", ] task_id_choices = [0, 1] parser = argparse.ArgumentParser(description=\"Run and",
"copy.deepcopy(PHYSICS_WARMING_STEPS) steps = 0 while max_steps < 0 or steps",
"scene_id=scene, scene_kwargs=scene_kwargs, load_clutter=True, online_sampling=online_sampling ) vr_agent = igbhvr_act_inst.simulator.robots[0] if not",
"\"Benevolence_1_int\", \"Benevolence_2_int\", \"Ihlen_0_int\", \"Ihlen_1_int\", \"Merom_0_int\", \"Merom_1_int\", \"Pomaria_0_int\", \"Pomaria_1_int\", \"Pomaria_2_int\", \"Rs_int\",",
"if task_done: post_task_steps -= 1 if post_task_steps == 0: break",
"igbhvr_act_inst.simulator, igbhvr_act_inst.show_instruction, igbhvr_act_inst.iterate_instruction ) log_writer = None if not disable_save:",
"disable_scene_cache=False, profile=False, ): # HDR files for PBR rendering hdr_texture",
"pre-initialized scene caches.\" ) parser.add_argument(\"--profile\", action=\"store_true\", help=\"Whether to print profiling",
"required=True, choices=task_id_choices, nargs=\"?\", help=\"BDDL integer ID, matching suffix of bddl.\",",
"if satisfied_predicates != satisfied_predicates_cached: vr_cs.refresh_condition(switch=False) satisfied_predicates_cached = satisfied_predicates if igbhvr_act_inst.simulator.query_vr_event(\"right_controller\",",
"\"Merom_1_int\", \"Pomaria_0_int\", \"Pomaria_1_int\", \"Pomaria_2_int\", \"Rs_int\", \"Wainscott_0_int\", \"Wainscott_1_int\", ] task_id_choices =",
"to disable using pre-initialized scene caches.\" ) parser.add_argument(\"--profile\", action=\"store_true\", help=\"Whether",
"break steps += 1 if log_writer and not disable_save: log_writer.end_log_session()",
"ID, matching suffix of bddl.\", ) parser.add_argument(\"--vr_log_path\", type=str, help=\"Path (and",
"copy import datetime import os import bddl import numpy as",
"fullscreen=False, env_texture_filename=hdr_texture, env_texture_filename2=hdr_texture2, env_texture_filename3=background_texture, light_modulation_map_filename=light_modulation_map_filename, enable_shadow=True, enable_pbr=True, msaa=False, light_dimming_factor=1.0, )",
"folder in bddl.\" ) parser.add_argument( \"--task_id\", type=int, required=True, choices=task_id_choices, nargs=\"?\",",
"igbhvr_act_inst.simulator.query_vr_event(\"right_controller\", \"overlay_toggle\"): vr_cs.refresh_condition() if igbhvr_act_inst.simulator.query_vr_event(\"left_controller\", \"overlay_toggle\"): vr_cs.toggle_show_state() if log_writer and",
"physics_warming_steps: action = np.zeros_like(action) vr_agent.update(action) if not no_vr: if satisfied_predicates",
"\"urban_street_01.jpg\") # VR rendering settings vr_rendering_settings = MeshRendererSettings( optimized=True, fullscreen=False,",
"\"{}_{}_{}_{}.hdf5\".format(task, task_id, scene, timestamp) log_writer = IGLogWriter( s, log_filepath=vr_log_path, task=igbhvr_act_inst,",
"matching iGibson interactive scenes.\" ) parser.add_argument(\"--disable_save\", action=\"store_true\", help=\"Whether to disable",
"scene, timestamp) log_writer = IGLogWriter( s, log_filepath=vr_log_path, task=igbhvr_act_inst, store_vr=False if",
"VrConditionSwitcher( igbhvr_act_inst.simulator, igbhvr_act_inst.show_instruction, igbhvr_act_inst.iterate_instruction ) log_writer = None if not",
"and collect an ATUS demo\") parser.add_argument( \"--task\", type=str, required=True, nargs=\"?\",",
"= IGLogWriter( s, log_filepath=vr_log_path, task=igbhvr_act_inst, store_vr=False if no_vr else True,",
"np.zeros((28,)) action[19] = 1 action[27] = 1 else: action =",
"task_id) scene_kwargs = None online_sampling = True if not disable_scene_cache:",
"= os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"probe_03.hdr\") light_modulation_map_filename = os.path.join( igibson.ig_dataset_path, \"scenes\",",
"0 while max_steps < 0 or steps < max_steps: igbhvr_act_inst.simulator.step(print_stats=profile)",
"np.random.uniform(-0.01, 0.01, size=(28,)) else: action = igbhvr_act_inst.simulator.gen_vr_robot_action() if steps <",
"\"floor_lighttype_0.png\" ) background_texture = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"urban_street_01.jpg\") # VR",
"Simulator( mode=mode, rendering_settings=vr_rendering_settings, vr_settings=VrSettings(use_vr=True), physics_timestep=1 / 300.0, render_timestep=1 / 30.0,",
"task_done, satisfied_predicates = igbhvr_act_inst.check_success() if no_vr: if steps < 2:",
"if igbhvr_act_inst.simulator.query_vr_event(\"right_controller\", \"overlay_toggle\"): vr_cs.refresh_condition() if igbhvr_act_inst.simulator.query_vr_event(\"left_controller\", \"overlay_toggle\"): vr_cs.toggle_show_state() if log_writer",
"help=\"Scene name/ID matching iGibson interactive scenes.\" ) parser.add_argument(\"--disable_save\", action=\"store_true\", help=\"Whether",
"not disable_save: timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") if vr_log_path is None: vr_log_path",
"= None online_sampling = True if not disable_scene_cache: scene_kwargs =",
"= None if not disable_save: timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") if vr_log_path",
"\"urdf_file\": \"{}_task_{}_{}_0_fixed_furniture\".format(scene, task, task_id), } online_sampling = False igbhvr_act_inst.initialize_simulator( simulator=s,",
"action=\"store_true\", help=\"Whether to disable saving logfiles.\") parser.add_argument( \"--disable_scene_cache\", action=\"store_true\", help=\"Whether",
"if not disable_scene_cache: scene_kwargs = { \"urdf_file\": \"{}_task_{}_{}_0_fixed_furniture\".format(scene, task, task_id),",
"using pre-initialized scene caches.\" ) parser.add_argument(\"--profile\", action=\"store_true\", help=\"Whether to print",
"parse_args() bddl.set_backend(\"iGibson\") collect_demo( args.task, args.task_id, args.scene, args.vr_log_path, args.disable_save, args.max_steps, args.no_vr,",
"parser.add_argument( \"--task_id\", type=int, required=True, choices=task_id_choices, nargs=\"?\", help=\"BDDL integer ID, matching",
"and save random actions.\" ) parser.add_argument(\"--max_steps\", type=int, default=-1, help=\"Maximum number",
"render_timestep=1 / 30.0, ) igbhvr_act_inst = iGBEHAVIORActivityInstance(task, task_id) scene_kwargs =",
"not disable_scene_cache: scene_kwargs = { \"urdf_file\": \"{}_task_{}_{}_0_fixed_furniture\".format(scene, task, task_id), }",
"parent folder in bddl.\" ) parser.add_argument( \"--task_id\", type=int, required=True, choices=task_id_choices,",
"to turn off VR recording and save random actions.\" )",
"import bddl import numpy as np import igibson from igibson.activity.activity_base",
"actions.\" ) parser.add_argument(\"--max_steps\", type=int, default=-1, help=\"Maximum number of steps to",
"np.zeros_like(action) vr_agent.update(action) if not no_vr: if satisfied_predicates != satisfied_predicates_cached: vr_cs.refresh_condition(switch=False)",
"satisfied_predicates if igbhvr_act_inst.simulator.query_vr_event(\"right_controller\", \"overlay_toggle\"): vr_cs.refresh_condition() if igbhvr_act_inst.simulator.query_vr_event(\"left_controller\", \"overlay_toggle\"): vr_cs.toggle_show_state() if",
"# VR rendering settings vr_rendering_settings = MeshRendererSettings( optimized=True, fullscreen=False, env_texture_filename=hdr_texture,",
"igbhvr_act_inst.check_success() if no_vr: if steps < 2: action = np.zeros((28,))",
"= {} post_task_steps = copy.deepcopy(POST_TASK_STEPS) physics_warming_steps = copy.deepcopy(PHYSICS_WARMING_STEPS) steps =",
"\"Rs_int\", \"layout\", \"floor_lighttype_0.png\" ) background_texture = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"urban_street_01.jpg\")",
"type=str, help=\"Path (and filename) of vr log\") parser.add_argument( \"--scene\", type=str,",
"\"--disable_scene_cache\", action=\"store_true\", help=\"Whether to disable using pre-initialized scene caches.\" )",
"s = Simulator( mode=mode, rendering_settings=vr_rendering_settings, vr_settings=VrSettings(use_vr=True), physics_timestep=1 / 300.0, render_timestep=1",
"of vr log\") parser.add_argument( \"--scene\", type=str, choices=scene_choices, nargs=\"?\", help=\"Scene name/ID",
"else \"vr\" s = Simulator( mode=mode, rendering_settings=vr_rendering_settings, vr_settings=VrSettings(use_vr=True), physics_timestep=1 /",
"number of steps to record before stopping.\") return parser.parse_args() def",
"bddl.\" ) parser.add_argument( \"--task_id\", type=int, required=True, choices=task_id_choices, nargs=\"?\", help=\"BDDL integer",
"settings mode = \"headless\" if no_vr else \"vr\" s =",
"\"layout\", \"floor_lighttype_0.png\" ) background_texture = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"urban_street_01.jpg\") #",
"IGLogWriter( s, log_filepath=vr_log_path, task=igbhvr_act_inst, store_vr=False if no_vr else True, vr_robot=vr_agent,",
"scene_kwargs = { \"urdf_file\": \"{}_task_{}_{}_0_fixed_furniture\".format(scene, task, task_id), } online_sampling =",
"} online_sampling = False igbhvr_act_inst.initialize_simulator( simulator=s, scene_id=scene, scene_kwargs=scene_kwargs, load_clutter=True, online_sampling=online_sampling",
"!= satisfied_predicates_cached: vr_cs.refresh_condition(switch=False) satisfied_predicates_cached = satisfied_predicates if igbhvr_act_inst.simulator.query_vr_event(\"right_controller\", \"overlay_toggle\"): vr_cs.refresh_condition()",
"from igibson.activity.activity_base import iGBEHAVIORActivityInstance from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings from igibson.render.mesh_renderer.mesh_renderer_vr",
"log_writer.process_frame() if task_done: post_task_steps -= 1 if post_task_steps == 0:",
"import IGLogWriter POST_TASK_STEPS = 200 PHYSICS_WARMING_STEPS = 200 def parse_args():",
"task, task_id, scene, vr_log_path=None, disable_save=False, max_steps=-1, no_vr=False, disable_scene_cache=False, profile=False, ):",
"\"scenes\", \"Rs_int\", \"layout\", \"floor_lighttype_0.png\" ) background_texture = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\",",
"igbhvr_act_inst.simulator.query_vr_event(\"left_controller\", \"overlay_toggle\"): vr_cs.toggle_show_state() if log_writer and not disable_save: log_writer.process_frame() if",
"env_texture_filename2=hdr_texture2, env_texture_filename3=background_texture, light_modulation_map_filename=light_modulation_map_filename, enable_shadow=True, enable_pbr=True, msaa=False, light_dimming_factor=1.0, ) # VR",
"task_id, scene, vr_log_path=None, disable_save=False, max_steps=-1, no_vr=False, disable_scene_cache=False, profile=False, ): #",
"online_sampling = True if not disable_scene_cache: scene_kwargs = { \"urdf_file\":",
"300.0, render_timestep=1 / 30.0, ) igbhvr_act_inst = iGBEHAVIORActivityInstance(task, task_id) scene_kwargs",
"import MeshRendererSettings from igibson.render.mesh_renderer.mesh_renderer_vr import VrConditionSwitcher, VrSettings from igibson.simulator import",
"rendering settings vr_rendering_settings = MeshRendererSettings( optimized=True, fullscreen=False, env_texture_filename=hdr_texture, env_texture_filename2=hdr_texture2, env_texture_filename3=background_texture,",
"load_clutter=True, online_sampling=online_sampling ) vr_agent = igbhvr_act_inst.simulator.robots[0] if not no_vr: vr_cs",
"args.disable_scene_cache, args.profile, ) def collect_demo( task, task_id, scene, vr_log_path=None, disable_save=False,",
"hdr_texture = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"probe_02.hdr\") hdr_texture2 = os.path.join(igibson.ig_dataset_path, \"scenes\",",
"= 200 PHYSICS_WARMING_STEPS = 200 def parse_args(): scene_choices = [",
"bddl import numpy as np import igibson from igibson.activity.activity_base import",
"\"Beechwood_1_int\", \"Benevolence_0_int\", \"Benevolence_1_int\", \"Benevolence_2_int\", \"Ihlen_0_int\", \"Ihlen_1_int\", \"Merom_0_int\", \"Merom_1_int\", \"Pomaria_0_int\", \"Pomaria_1_int\",",
"light_modulation_map_filename=light_modulation_map_filename, enable_shadow=True, enable_pbr=True, msaa=False, light_dimming_factor=1.0, ) # VR system settings",
"vr_robot=vr_agent, profiling_mode=profile, filter_objects=True, ) log_writer.set_up_data_storage() satisfied_predicates_cached = {} post_task_steps =",
"\"Beechwood_0_int\", \"Beechwood_1_int\", \"Benevolence_0_int\", \"Benevolence_1_int\", \"Benevolence_2_int\", \"Ihlen_0_int\", \"Ihlen_1_int\", \"Merom_0_int\", \"Merom_1_int\", \"Pomaria_0_int\",",
"light_dimming_factor=1.0, ) # VR system settings mode = \"headless\" if",
"1 if log_writer and not disable_save: log_writer.end_log_session() s.disconnect() if __name__",
"args.task_id, args.scene, args.vr_log_path, args.disable_save, args.max_steps, args.no_vr, args.disable_scene_cache, args.profile, ) def",
"filename) of vr log\") parser.add_argument( \"--scene\", type=str, choices=scene_choices, nargs=\"?\", help=\"Scene",
"None if not disable_save: timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") if vr_log_path is",
"True, vr_robot=vr_agent, profiling_mode=profile, filter_objects=True, ) log_writer.set_up_data_storage() satisfied_predicates_cached = {} post_task_steps",
"not disable_save: log_writer.process_frame() if task_done: post_task_steps -= 1 if post_task_steps",
"of ATUS activity matching parent folder in bddl.\" ) parser.add_argument(",
"rendering_settings=vr_rendering_settings, vr_settings=VrSettings(use_vr=True), physics_timestep=1 / 300.0, render_timestep=1 / 30.0, ) igbhvr_act_inst",
") parser.add_argument(\"--profile\", action=\"store_true\", help=\"Whether to print profiling data.\") parser.add_argument( \"--no_vr\",",
"parser.add_argument(\"--disable_save\", action=\"store_true\", help=\"Whether to disable saving logfiles.\") parser.add_argument( \"--disable_scene_cache\", action=\"store_true\",",
"help=\"Maximum number of steps to record before stopping.\") return parser.parse_args()",
"= MeshRendererSettings( optimized=True, fullscreen=False, env_texture_filename=hdr_texture, env_texture_filename2=hdr_texture2, env_texture_filename3=background_texture, light_modulation_map_filename=light_modulation_map_filename, enable_shadow=True, enable_pbr=True,",
"print profiling data.\") parser.add_argument( \"--no_vr\", action=\"store_true\", help=\"Whether to turn off",
"0: break steps += 1 if log_writer and not disable_save:",
"collection entrypoint \"\"\" import argparse import copy import datetime import",
"task_id, scene, timestamp) log_writer = IGLogWriter( s, log_filepath=vr_log_path, task=igbhvr_act_inst, store_vr=False",
"steps < max_steps: igbhvr_act_inst.simulator.step(print_stats=profile) task_done, satisfied_predicates = igbhvr_act_inst.check_success() if no_vr:",
"simulator=s, scene_id=scene, scene_kwargs=scene_kwargs, load_clutter=True, online_sampling=online_sampling ) vr_agent = igbhvr_act_inst.simulator.robots[0] if",
"light_modulation_map_filename = os.path.join( igibson.ig_dataset_path, \"scenes\", \"Rs_int\", \"layout\", \"floor_lighttype_0.png\" ) background_texture",
"before stopping.\") return parser.parse_args() def main(): args = parse_args() bddl.set_backend(\"iGibson\")",
"if log_writer and not disable_save: log_writer.end_log_session() s.disconnect() if __name__ ==",
"= iGBEHAVIORActivityInstance(task, task_id) scene_kwargs = None online_sampling = True if",
"\"scenes\", \"background\", \"probe_03.hdr\") light_modulation_map_filename = os.path.join( igibson.ig_dataset_path, \"scenes\", \"Rs_int\", \"layout\",",
"steps = 0 while max_steps < 0 or steps <",
"= VrConditionSwitcher( igbhvr_act_inst.simulator, igbhvr_act_inst.show_instruction, igbhvr_act_inst.iterate_instruction ) log_writer = None if",
"disable using pre-initialized scene caches.\" ) parser.add_argument(\"--profile\", action=\"store_true\", help=\"Whether to",
"parser.add_argument(\"--max_steps\", type=int, default=-1, help=\"Maximum number of steps to record before",
"satisfied_predicates_cached: vr_cs.refresh_condition(switch=False) satisfied_predicates_cached = satisfied_predicates if igbhvr_act_inst.simulator.query_vr_event(\"right_controller\", \"overlay_toggle\"): vr_cs.refresh_condition() if",
"parser.add_argument( \"--no_vr\", action=\"store_true\", help=\"Whether to turn off VR recording and",
"-= 1 if post_task_steps == 0: break steps += 1",
"args.profile, ) def collect_demo( task, task_id, scene, vr_log_path=None, disable_save=False, max_steps=-1,",
"vr_rendering_settings = MeshRendererSettings( optimized=True, fullscreen=False, env_texture_filename=hdr_texture, env_texture_filename2=hdr_texture2, env_texture_filename3=background_texture, light_modulation_map_filename=light_modulation_map_filename, enable_shadow=True,",
"else: action = np.random.uniform(-0.01, 0.01, size=(28,)) else: action = igbhvr_act_inst.simulator.gen_vr_robot_action()",
"\"Pomaria_0_int\", \"Pomaria_1_int\", \"Pomaria_2_int\", \"Rs_int\", \"Wainscott_0_int\", \"Wainscott_1_int\", ] task_id_choices = [0,",
"to record before stopping.\") return parser.parse_args() def main(): args =",
"size=(28,)) else: action = igbhvr_act_inst.simulator.gen_vr_robot_action() if steps < physics_warming_steps: action",
"log_writer = IGLogWriter( s, log_filepath=vr_log_path, task=igbhvr_act_inst, store_vr=False if no_vr else",
"off VR recording and save random actions.\" ) parser.add_argument(\"--max_steps\", type=int,",
"action = np.zeros_like(action) vr_agent.update(action) if not no_vr: if satisfied_predicates !=",
"iGibson interactive scenes.\" ) parser.add_argument(\"--disable_save\", action=\"store_true\", help=\"Whether to disable saving",
"igibson from igibson.activity.activity_base import iGBEHAVIORActivityInstance from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRendererSettings from",
"if not disable_save: timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") if vr_log_path is None:",
"/ 30.0, ) igbhvr_act_inst = iGBEHAVIORActivityInstance(task, task_id) scene_kwargs = None",
"for PBR rendering hdr_texture = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"probe_02.hdr\") hdr_texture2",
"log_writer and not disable_save: log_writer.process_frame() if task_done: post_task_steps -= 1",
"def main(): args = parse_args() bddl.set_backend(\"iGibson\") collect_demo( args.task, args.task_id, args.scene,",
") parser.add_argument( \"--task_id\", type=int, required=True, choices=task_id_choices, nargs=\"?\", help=\"BDDL integer ID,",
"parser.add_argument(\"--profile\", action=\"store_true\", help=\"Whether to print profiling data.\") parser.add_argument( \"--no_vr\", action=\"store_true\",",
"None: vr_log_path = \"{}_{}_{}_{}.hdf5\".format(task, task_id, scene, timestamp) log_writer = IGLogWriter(",
"nargs=\"?\", help=\"Scene name/ID matching iGibson interactive scenes.\" ) parser.add_argument(\"--disable_save\", action=\"store_true\",",
"nargs=\"?\", help=\"BDDL integer ID, matching suffix of bddl.\", ) parser.add_argument(\"--vr_log_path\",",
"task=igbhvr_act_inst, store_vr=False if no_vr else True, vr_robot=vr_agent, profiling_mode=profile, filter_objects=True, )",
"background_texture = os.path.join(igibson.ig_dataset_path, \"scenes\", \"background\", \"urban_street_01.jpg\") # VR rendering settings",
"vr_log_path=None, disable_save=False, max_steps=-1, no_vr=False, disable_scene_cache=False, profile=False, ): # HDR files",
"vr_log_path = \"{}_{}_{}_{}.hdf5\".format(task, task_id, scene, timestamp) log_writer = IGLogWriter( s,",
"timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") if vr_log_path is None: vr_log_path = \"{}_{}_{}_{}.hdf5\".format(task,",
"main(): args = parse_args() bddl.set_backend(\"iGibson\") collect_demo( args.task, args.task_id, args.scene, args.vr_log_path,",
"= [ \"Beechwood_0_int\", \"Beechwood_1_int\", \"Benevolence_0_int\", \"Benevolence_1_int\", \"Benevolence_2_int\", \"Ihlen_0_int\", \"Ihlen_1_int\", \"Merom_0_int\",",
"interactive scenes.\" ) parser.add_argument(\"--disable_save\", action=\"store_true\", help=\"Whether to disable saving logfiles.\")",
"= parse_args() bddl.set_backend(\"iGibson\") collect_demo( args.task, args.task_id, args.scene, args.vr_log_path, args.disable_save, args.max_steps,",
"profile=False, ): # HDR files for PBR rendering hdr_texture =",
"task_done: post_task_steps -= 1 if post_task_steps == 0: break steps",
"record before stopping.\") return parser.parse_args() def main(): args = parse_args()",
"parser.add_argument( \"--scene\", type=str, choices=scene_choices, nargs=\"?\", help=\"Scene name/ID matching iGibson interactive",
"datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\") if vr_log_path is None: vr_log_path = \"{}_{}_{}_{}.hdf5\".format(task, task_id, scene,",
"numpy as np import igibson from igibson.activity.activity_base import iGBEHAVIORActivityInstance from",
"args.disable_save, args.max_steps, args.no_vr, args.disable_scene_cache, args.profile, ) def collect_demo( task, task_id,",
"timestamp) log_writer = IGLogWriter( s, log_filepath=vr_log_path, task=igbhvr_act_inst, store_vr=False if no_vr",
"< max_steps: igbhvr_act_inst.simulator.step(print_stats=profile) task_done, satisfied_predicates = igbhvr_act_inst.check_success() if no_vr: if",
"= 1 action[27] = 1 else: action = np.random.uniform(-0.01, 0.01,",
"\"{}_task_{}_{}_0_fixed_furniture\".format(scene, task, task_id), } online_sampling = False igbhvr_act_inst.initialize_simulator( simulator=s, scene_id=scene,",
"= igbhvr_act_inst.check_success() if no_vr: if steps < 2: action =",
"satisfied_predicates = igbhvr_act_inst.check_success() if no_vr: if steps < 2: action",
"action[27] = 1 else: action = np.random.uniform(-0.01, 0.01, size=(28,)) else:"
] |
[
"classnames self.name = (name or slugify(unicode(label))) self.order = order def",
"format_html class MenuItem(object): def __init__(self, label, url, name=None, classnames='', order=1000):",
"<gh_stars>1-10 from django.utils.text import slugify from django.utils.html import format_html class",
"= order def render_html(self): return format_html( u\"\"\"<li class=\"menu-{0}\"><a href=\"{1}\" class=\"{2}\">{3}</a></li>\"\"\",",
"name=None, classnames='', order=1000): self.label = label self.url = url self.classnames",
"slugify(unicode(label))) self.order = order def render_html(self): return format_html( u\"\"\"<li class=\"menu-{0}\"><a",
"def __init__(self, label, url, name=None, classnames='', order=1000): self.label = label",
"return format_html( u\"\"\"<li class=\"menu-{0}\"><a href=\"{1}\" class=\"{2}\">{3}</a></li>\"\"\", self.name, self.url, self.classnames, self.label)",
"url, name=None, classnames='', order=1000): self.label = label self.url = url",
"label, url, name=None, classnames='', order=1000): self.label = label self.url =",
"import format_html class MenuItem(object): def __init__(self, label, url, name=None, classnames='',",
"= url self.classnames = classnames self.name = (name or slugify(unicode(label)))",
"label self.url = url self.classnames = classnames self.name = (name",
"self.name = (name or slugify(unicode(label))) self.order = order def render_html(self):",
"self.url = url self.classnames = classnames self.name = (name or",
"from django.utils.html import format_html class MenuItem(object): def __init__(self, label, url,",
"= classnames self.name = (name or slugify(unicode(label))) self.order = order",
"django.utils.text import slugify from django.utils.html import format_html class MenuItem(object): def",
"__init__(self, label, url, name=None, classnames='', order=1000): self.label = label self.url",
"class MenuItem(object): def __init__(self, label, url, name=None, classnames='', order=1000): self.label",
"url self.classnames = classnames self.name = (name or slugify(unicode(label))) self.order",
"self.label = label self.url = url self.classnames = classnames self.name",
"django.utils.html import format_html class MenuItem(object): def __init__(self, label, url, name=None,",
"self.order = order def render_html(self): return format_html( u\"\"\"<li class=\"menu-{0}\"><a href=\"{1}\"",
"def render_html(self): return format_html( u\"\"\"<li class=\"menu-{0}\"><a href=\"{1}\" class=\"{2}\">{3}</a></li>\"\"\", self.name, self.url,",
"from django.utils.text import slugify from django.utils.html import format_html class MenuItem(object):",
"order=1000): self.label = label self.url = url self.classnames = classnames",
"slugify from django.utils.html import format_html class MenuItem(object): def __init__(self, label,",
"= (name or slugify(unicode(label))) self.order = order def render_html(self): return",
"order def render_html(self): return format_html( u\"\"\"<li class=\"menu-{0}\"><a href=\"{1}\" class=\"{2}\">{3}</a></li>\"\"\", self.name,",
"MenuItem(object): def __init__(self, label, url, name=None, classnames='', order=1000): self.label =",
"self.classnames = classnames self.name = (name or slugify(unicode(label))) self.order =",
"or slugify(unicode(label))) self.order = order def render_html(self): return format_html( u\"\"\"<li",
"(name or slugify(unicode(label))) self.order = order def render_html(self): return format_html(",
"import slugify from django.utils.html import format_html class MenuItem(object): def __init__(self,",
"render_html(self): return format_html( u\"\"\"<li class=\"menu-{0}\"><a href=\"{1}\" class=\"{2}\">{3}</a></li>\"\"\", self.name, self.url, self.classnames,",
"= label self.url = url self.classnames = classnames self.name =",
"classnames='', order=1000): self.label = label self.url = url self.classnames ="
] |
[
"[ migrations.CreateModel( name='U2FKey', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at',",
"('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('last_used_at', models.DateTimeField(null=True)), ('public_key',",
"('key_handle', models.TextField()), ('app_id', models.TextField()), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='u2f_keys', to=settings.AUTH_USER_MODEL)), ], ),",
"models.TextField()), ('app_id', models.TextField()), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='u2f_keys', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel(",
"Generated by Django 2.1.5 on 2019-03-26 11:35 from django.conf import",
"models.DateTimeField(auto_now_add=True)), ('last_used_at', models.DateTimeField(null=True)), ('public_key', models.TextField(unique=True)), ('key_handle', models.TextField()), ('app_id', models.TextField()), ('user',",
"django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL),",
"2019-03-26 11:35 from django.conf import settings from django.db import migrations,",
"primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('last_used_at', models.DateTimeField(null=True)), ('public_key', models.TextField(unique=True)), ('key_handle',",
"serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('last_used_at', models.DateTimeField(null=True)), ('public_key', models.TextField(unique=True)), ('key_handle', models.TextField()),",
"serialize=False, verbose_name='ID')), ('otp_type', models.CharField(choices=[('HOTP', 'hotp'), ('TOTP', 'totp')], max_length=20)), ('secret_key', models.CharField(blank=True,",
"= True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [",
"verbose_name='ID')), ('otp_type', models.CharField(choices=[('HOTP', 'hotp'), ('TOTP', 'totp')], max_length=20)), ('secret_key', models.CharField(blank=True, max_length=100)),",
"primary_key=True, serialize=False, verbose_name='ID')), ('secret_code', models.CharField(max_length=10)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_mfa.UserOTP')), ], ),",
"] operations = [ migrations.CreateModel( name='U2FKey', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,",
"models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserRecoveryCodes', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,",
"models.TextField()), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='u2f_keys', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserOTP', fields=[",
"operations = [ migrations.CreateModel( name='U2FKey', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,",
"migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='U2FKey', fields=[ ('id', models.AutoField(auto_created=True,",
"('user', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserRecoveryCodes', fields=[ ('id', models.AutoField(auto_created=True,",
"('created_at', models.DateTimeField(auto_now_add=True)), ('last_used_at', models.DateTimeField(null=True)), ('public_key', models.TextField(unique=True)), ('key_handle', models.TextField()), ('app_id', models.TextField()),",
"), migrations.CreateModel( name='UserOTP', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('otp_type',",
"import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [",
"import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True",
"models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('last_used_at', models.DateTimeField(null=True)), ('public_key', models.TextField(unique=True)),",
"primary_key=True, serialize=False, verbose_name='ID')), ('otp_type', models.CharField(choices=[('HOTP', 'hotp'), ('TOTP', 'totp')], max_length=20)), ('secret_key',",
"= [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='U2FKey', fields=[",
"# Generated by Django 2.1.5 on 2019-03-26 11:35 from django.conf",
"fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('secret_code', models.CharField(max_length=10)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,",
"('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('secret_code', models.CharField(max_length=10)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_mfa.UserOTP')),",
"import settings from django.db import migrations, models import django.db.models.deletion class",
"models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies =",
"], ), migrations.CreateModel( name='UserRecoveryCodes', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),",
"django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial =",
"('last_used_at', models.DateTimeField(null=True)), ('public_key', models.TextField(unique=True)), ('key_handle', models.TextField()), ('app_id', models.TextField()), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,",
"initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations =",
"models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('otp_type', models.CharField(choices=[('HOTP', 'hotp'), ('TOTP', 'totp')], max_length=20)),",
"Django 2.1.5 on 2019-03-26 11:35 from django.conf import settings from",
"from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial",
"verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('last_used_at', models.DateTimeField(null=True)), ('public_key', models.TextField(unique=True)), ('key_handle', models.TextField()), ('app_id',",
"models.CharField(blank=True, max_length=100)), ('user', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserRecoveryCodes', fields=[",
"related_name='u2f_keys', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserOTP', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,",
"class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ]",
"name='UserOTP', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('otp_type', models.CharField(choices=[('HOTP', 'hotp'),",
"max_length=100)), ('user', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserRecoveryCodes', fields=[ ('id',",
"), migrations.CreateModel( name='UserRecoveryCodes', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('secret_code',",
"name='UserRecoveryCodes', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('secret_code', models.CharField(max_length=10)), ('user',",
"('TOTP', 'totp')], max_length=20)), ('secret_key', models.CharField(blank=True, max_length=100)), ('user', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), ],",
"migrations.CreateModel( name='U2FKey', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)),",
"('public_key', models.TextField(unique=True)), ('key_handle', models.TextField()), ('app_id', models.TextField()), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='u2f_keys', to=settings.AUTH_USER_MODEL)),",
"True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel(",
"models.CharField(choices=[('HOTP', 'hotp'), ('TOTP', 'totp')], max_length=20)), ('secret_key', models.CharField(blank=True, max_length=100)), ('user', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT,",
"models.TextField(unique=True)), ('key_handle', models.TextField()), ('app_id', models.TextField()), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='u2f_keys', to=settings.AUTH_USER_MODEL)), ],",
"fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('otp_type', models.CharField(choices=[('HOTP', 'hotp'), ('TOTP',",
"('app_id', models.TextField()), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='u2f_keys', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserOTP',",
"migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies",
"('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('otp_type', models.CharField(choices=[('HOTP', 'hotp'), ('TOTP', 'totp')],",
"Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations",
"('otp_type', models.CharField(choices=[('HOTP', 'hotp'), ('TOTP', 'totp')], max_length=20)), ('secret_key', models.CharField(blank=True, max_length=100)), ('user',",
"= [ migrations.CreateModel( name='U2FKey', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),",
"2.1.5 on 2019-03-26 11:35 from django.conf import settings from django.db",
"on 2019-03-26 11:35 from django.conf import settings from django.db import",
"'totp')], max_length=20)), ('secret_key', models.CharField(blank=True, max_length=100)), ('user', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), ], ),",
"], ), migrations.CreateModel( name='UserOTP', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),",
"[ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='U2FKey', fields=[ ('id',",
"fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('last_used_at', models.DateTimeField(null=True)),",
"serialize=False, verbose_name='ID')), ('secret_code', models.CharField(max_length=10)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_mfa.UserOTP')), ], ), ]",
"to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserOTP', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,",
"11:35 from django.conf import settings from django.db import migrations, models",
"('secret_key', models.CharField(blank=True, max_length=100)), ('user', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserRecoveryCodes',",
"dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='U2FKey',",
"max_length=20)), ('secret_key', models.CharField(blank=True, max_length=100)), ('user', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel(",
"models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='u2f_keys', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserOTP', fields=[ ('id', models.AutoField(auto_created=True,",
"'hotp'), ('TOTP', 'totp')], max_length=20)), ('secret_key', models.CharField(blank=True, max_length=100)), ('user', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),",
"models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('secret_code', models.CharField(max_length=10)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_mfa.UserOTP')), ],",
"from django.conf import settings from django.db import migrations, models import",
"to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserRecoveryCodes', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,",
"migrations.CreateModel( name='UserRecoveryCodes', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('secret_code', models.CharField(max_length=10)),",
"by Django 2.1.5 on 2019-03-26 11:35 from django.conf import settings",
"models.DateTimeField(null=True)), ('public_key', models.TextField(unique=True)), ('key_handle', models.TextField()), ('app_id', models.TextField()), ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='u2f_keys',",
"name='U2FKey', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_at', models.DateTimeField(auto_now_add=True)), ('last_used_at',",
"settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):",
"migrations.CreateModel( name='UserOTP', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('otp_type', models.CharField(choices=[('HOTP',",
"django.conf import settings from django.db import migrations, models import django.db.models.deletion",
"('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='u2f_keys', to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='UserOTP', fields=[ ('id',"
] |
[
"\"__main__\": logger.now().debug(\"debug1\") logger.now().debug(\"debug2\") logger.now().info(\"hello1\") logger.now().info(\"hello2\") logger.now().with_field(\"key\", \"val\").error(\"with field1\") logger.now().with_field(\"key\", \"val\").error(\"with",
"my_collection import logger if __name__ == \"__main__\": logger.now().debug(\"debug1\") logger.now().debug(\"debug2\") logger.now().info(\"hello1\")",
"if __name__ == \"__main__\": logger.now().debug(\"debug1\") logger.now().debug(\"debug2\") logger.now().info(\"hello1\") logger.now().info(\"hello2\") logger.now().with_field(\"key\", \"val\").error(\"with",
"logger if __name__ == \"__main__\": logger.now().debug(\"debug1\") logger.now().debug(\"debug2\") logger.now().info(\"hello1\") logger.now().info(\"hello2\") logger.now().with_field(\"key\",",
"from my_collection import logger if __name__ == \"__main__\": logger.now().debug(\"debug1\") logger.now().debug(\"debug2\")",
"logger.now().debug(\"debug1\") logger.now().debug(\"debug2\") logger.now().info(\"hello1\") logger.now().info(\"hello2\") logger.now().with_field(\"key\", \"val\").error(\"with field1\") logger.now().with_field(\"key\", \"val\").error(\"with field2\")",
"import logger if __name__ == \"__main__\": logger.now().debug(\"debug1\") logger.now().debug(\"debug2\") logger.now().info(\"hello1\") logger.now().info(\"hello2\")",
"__name__ == \"__main__\": logger.now().debug(\"debug1\") logger.now().debug(\"debug2\") logger.now().info(\"hello1\") logger.now().info(\"hello2\") logger.now().with_field(\"key\", \"val\").error(\"with field1\")",
"== \"__main__\": logger.now().debug(\"debug1\") logger.now().debug(\"debug2\") logger.now().info(\"hello1\") logger.now().info(\"hello2\") logger.now().with_field(\"key\", \"val\").error(\"with field1\") logger.now().with_field(\"key\","
] |
[
"parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.prog = 'python3 -m robotframework_iperf3' # add",
"parser.add_argument( \"-p\", \"--port\", type=int, help=\"server listen port\", default=8270) args =",
"# add parser options parser.add_argument( \"-a\", \"--address\", type=str, help=\"server listen",
"from .iperf3 import Iperf3 if __name__ == '__main__': # create",
"argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.prog = 'python3 -m robotframework_iperf3' # add parser options",
"listen port\", default=8270) args = parser.parse_args() server = RobotRemoteServer( Iperf3(),",
"type=int, help=\"server listen port\", default=8270) args = parser.parse_args() server =",
"help=\"server listen address\", default='0.0.0.0') parser.add_argument( \"-p\", \"--port\", type=int, help=\"server listen",
"\"-p\", \"--port\", type=int, help=\"server listen port\", default=8270) args = parser.parse_args()",
"import RobotRemoteServer from .iperf3 import Iperf3 if __name__ == '__main__':",
"options parser.add_argument( \"-a\", \"--address\", type=str, help=\"server listen address\", default='0.0.0.0') parser.add_argument(",
"default=8270) args = parser.parse_args() server = RobotRemoteServer( Iperf3(), host=args.address, port=args.port",
"import argparse from robotremoteserver import RobotRemoteServer from .iperf3 import Iperf3",
"address\", default='0.0.0.0') parser.add_argument( \"-p\", \"--port\", type=int, help=\"server listen port\", default=8270)",
"parser options parser.add_argument( \"-a\", \"--address\", type=str, help=\"server listen address\", default='0.0.0.0')",
"default='0.0.0.0') parser.add_argument( \"-p\", \"--port\", type=int, help=\"server listen port\", default=8270) args",
"-m robotframework_iperf3' # add parser options parser.add_argument( \"-a\", \"--address\", type=str,",
"Iperf3 if __name__ == '__main__': # create commandline parser parser",
"robotremoteserver import RobotRemoteServer from .iperf3 import Iperf3 if __name__ ==",
"robotframework_iperf3' # add parser options parser.add_argument( \"-a\", \"--address\", type=str, help=\"server",
"= argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.prog = 'python3 -m robotframework_iperf3' # add parser",
"RobotRemoteServer from .iperf3 import Iperf3 if __name__ == '__main__': #",
"if __name__ == '__main__': # create commandline parser parser =",
"parser.add_argument( \"-a\", \"--address\", type=str, help=\"server listen address\", default='0.0.0.0') parser.add_argument( \"-p\",",
"add parser options parser.add_argument( \"-a\", \"--address\", type=str, help=\"server listen address\",",
"# create commandline parser parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.prog = 'python3",
"import Iperf3 if __name__ == '__main__': # create commandline parser",
"<gh_stars>0 import argparse from robotremoteserver import RobotRemoteServer from .iperf3 import",
"argparse from robotremoteserver import RobotRemoteServer from .iperf3 import Iperf3 if",
"'__main__': # create commandline parser parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.prog =",
"from robotremoteserver import RobotRemoteServer from .iperf3 import Iperf3 if __name__",
"'python3 -m robotframework_iperf3' # add parser options parser.add_argument( \"-a\", \"--address\",",
"create commandline parser parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.prog = 'python3 -m",
"parser.prog = 'python3 -m robotframework_iperf3' # add parser options parser.add_argument(",
"\"--port\", type=int, help=\"server listen port\", default=8270) args = parser.parse_args() server",
"type=str, help=\"server listen address\", default='0.0.0.0') parser.add_argument( \"-p\", \"--port\", type=int, help=\"server",
"parser parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.prog = 'python3 -m robotframework_iperf3' #",
"port\", default=8270) args = parser.parse_args() server = RobotRemoteServer( Iperf3(), host=args.address,",
"__name__ == '__main__': # create commandline parser parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)",
"commandline parser parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.prog = 'python3 -m robotframework_iperf3'",
"listen address\", default='0.0.0.0') parser.add_argument( \"-p\", \"--port\", type=int, help=\"server listen port\",",
"== '__main__': # create commandline parser parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.prog",
"\"--address\", type=str, help=\"server listen address\", default='0.0.0.0') parser.add_argument( \"-p\", \"--port\", type=int,",
"args = parser.parse_args() server = RobotRemoteServer( Iperf3(), host=args.address, port=args.port )",
"\"-a\", \"--address\", type=str, help=\"server listen address\", default='0.0.0.0') parser.add_argument( \"-p\", \"--port\",",
"= 'python3 -m robotframework_iperf3' # add parser options parser.add_argument( \"-a\",",
"= parser.parse_args() server = RobotRemoteServer( Iperf3(), host=args.address, port=args.port ) server.serve()",
"help=\"server listen port\", default=8270) args = parser.parse_args() server = RobotRemoteServer(",
".iperf3 import Iperf3 if __name__ == '__main__': # create commandline"
] |
[
"-0.09 0.944 0.160 0.271 -0.351 1.66 -2.32 -0.86 # -0.95",
"0.94 1.192 -1.075 0.017 0.167 0.54 0.52 1.42 # -0.53",
"-0.525 2.155 -0.841 -0.19 0.13 0.63 # 0.94 1.192 -1.075",
"-0.516 -1.598 -1.346 0.711 1.09 0.05 0.63 # 1.04 -0.281",
"0.657 0.970 -0.419 -1.413 -0.51 0.64 -1.25 # 0.58 -0.516",
"0.49 0.170 0.124 -0.170 -0.722 -0.79 -0.91 -2.09 # 1.42",
"A B C D A0 B0 C0 D0 # -----",
"0.959 -0.009 -0.47 0.41 -0.52 # 0.49 0.170 0.124 -0.170",
"0.13 0.63 # 0.94 1.192 -1.075 0.017 0.167 0.54 0.52",
"0.52 1.42 # -0.53 0.777 -1.090 -2.237 -0.693 0.24 -0.56",
"0.58 -0.516 -1.598 -1.346 0.711 1.09 0.05 0.63 # 1.04",
"# 0.17 0.657 0.970 -0.419 -1.413 -0.51 0.64 -1.25 #",
"-1.25 # 0.58 -0.516 -1.598 -1.346 0.711 1.09 0.05 0.63",
"0.170 0.124 -0.170 -0.722 -0.79 -0.91 -2.09 # 1.42 -0.409",
"----- # -0.09 0.944 0.160 0.271 -0.351 1.66 -2.32 -0.86",
"# -0.09 0.944 0.160 0.271 -0.351 1.66 -2.32 -0.86 #",
"-0.456 -0.315 1.10 1.38 -0.05 # # [100 rows x",
"0.167 0.54 0.52 1.42 # -0.53 0.777 -1.090 -2.237 -0.693",
"-0.281 -0.411 0.959 -0.009 -0.47 0.41 -0.52 # 0.49 0.170",
"-1.598 -1.346 0.711 1.09 0.05 0.63 # 1.04 -0.281 -0.411",
"D A0 B0 C0 D0 # ----- ------ ------ ------",
"----- ------ ------ ------ ------ ----- ----- ----- # -0.09",
"0.944 0.160 0.271 -0.351 1.66 -2.32 -0.86 # -0.95 0.669",
"-0.86 # -0.95 0.669 0.664 1.535 -0.633 -1.78 0.32 1.27",
"-0.351 1.66 -2.32 -0.86 # -0.95 0.669 0.664 1.535 -0.633",
"0.271 -0.351 1.66 -2.32 -0.86 # -0.95 0.669 0.664 1.535",
"D0 # ----- ------ ------ ------ ------ ----- ----- -----",
"-1.78 0.32 1.27 # 0.17 0.657 0.970 -0.419 -1.413 -0.51",
"# -0.53 0.777 -1.090 -2.237 -0.693 0.24 -0.56 1.45 #",
"----- ----- # -0.09 0.944 0.160 0.271 -0.351 1.66 -2.32",
"-0.693 0.24 -0.56 1.45 # 0.34 -0.456 -1.220 -0.456 -0.315",
"-2.237 -0.693 0.24 -0.56 1.45 # 0.34 -0.456 -1.220 -0.456",
"0.664 1.535 -0.633 -1.78 0.32 1.27 # 0.17 0.657 0.970",
"-0.52 # 0.49 0.170 0.124 -0.170 -0.722 -0.79 -0.91 -2.09",
"C D A0 B0 C0 D0 # ----- ------ ------",
"0.63 # 1.04 -0.281 -0.411 0.959 -0.009 -0.47 0.41 -0.52",
"# 0.34 -0.456 -1.220 -0.456 -0.315 1.10 1.38 -0.05 #",
"-0.419 -1.413 -0.51 0.64 -1.25 # 0.58 -0.516 -1.598 -1.346",
"-2.09 # 1.42 -0.409 -0.525 2.155 -0.841 -0.19 0.13 0.63",
"------ ----- ----- ----- # -0.09 0.944 0.160 0.271 -0.351",
"0.63 # 0.94 1.192 -1.075 0.017 0.167 0.54 0.52 1.42",
"-0.722 -0.79 -0.91 -2.09 # 1.42 -0.409 -0.525 2.155 -0.841",
"1.535 -0.633 -1.78 0.32 1.27 # 0.17 0.657 0.970 -0.419",
"------ ------ ------ ----- ----- ----- # -0.09 0.944 0.160",
"-2.32 -0.86 # -0.95 0.669 0.664 1.535 -0.633 -1.78 0.32",
"0.124 -0.170 -0.722 -0.79 -0.91 -2.09 # 1.42 -0.409 -0.525",
"-1.075 0.017 0.167 0.54 0.52 1.42 # -0.53 0.777 -1.090",
"0.24 -0.56 1.45 # 0.34 -0.456 -1.220 -0.456 -0.315 1.10",
"-0.409 -0.525 2.155 -0.841 -0.19 0.13 0.63 # 0.94 1.192",
"0.17 0.657 0.970 -0.419 -1.413 -0.51 0.64 -1.25 # 0.58",
"----- ----- ----- # -0.09 0.944 0.160 0.271 -0.351 1.66",
"-0.633 -1.78 0.32 1.27 # 0.17 0.657 0.970 -0.419 -1.413",
"C0 D0 # ----- ------ ------ ------ ------ ----- -----",
"1.45 # 0.34 -0.456 -1.220 -0.456 -0.315 1.10 1.38 -0.05",
"0.32 1.27 # 0.17 0.657 0.970 -0.419 -1.413 -0.51 0.64",
"0.41 -0.52 # 0.49 0.170 0.124 -0.170 -0.722 -0.79 -0.91",
"-0.009 -0.47 0.41 -0.52 # 0.49 0.170 0.124 -0.170 -0.722",
"B C D A0 B0 C0 D0 # ----- ------",
"# -0.95 0.669 0.664 1.535 -0.633 -1.78 0.32 1.27 #",
"-0.53 0.777 -1.090 -2.237 -0.693 0.24 -0.56 1.45 # 0.34",
"-1.090 -2.237 -0.693 0.24 -0.56 1.45 # 0.34 -0.456 -1.220",
"-0.95 0.669 0.664 1.535 -0.633 -1.78 0.32 1.27 # 0.17",
"-0.91 -2.09 # 1.42 -0.409 -0.525 2.155 -0.841 -0.19 0.13",
"-0.170 -0.722 -0.79 -0.91 -2.09 # 1.42 -0.409 -0.525 2.155",
"# 0.58 -0.516 -1.598 -1.346 0.711 1.09 0.05 0.63 #",
"-0.51 0.64 -1.25 # 0.58 -0.516 -1.598 -1.346 0.711 1.09",
"-0.841 -0.19 0.13 0.63 # 0.94 1.192 -1.075 0.017 0.167",
"# 0.49 0.170 0.124 -0.170 -0.722 -0.79 -0.91 -2.09 #",
"0.777 -1.090 -2.237 -0.693 0.24 -0.56 1.45 # 0.34 -0.456",
"-1.346 0.711 1.09 0.05 0.63 # 1.04 -0.281 -0.411 0.959",
"1.09 0.05 0.63 # 1.04 -0.281 -0.411 0.959 -0.009 -0.47",
"1.10 1.38 -0.05 # # [100 rows x 8 columns]",
"-0.456 -1.220 -0.456 -0.315 1.10 1.38 -0.05 # # [100",
"0.669 0.664 1.535 -0.633 -1.78 0.32 1.27 # 0.17 0.657",
"-0.19 0.13 0.63 # 0.94 1.192 -1.075 0.017 0.167 0.54",
"1.66 -2.32 -0.86 # -0.95 0.669 0.664 1.535 -0.633 -1.78",
"# 1.04 -0.281 -0.411 0.959 -0.009 -0.47 0.41 -0.52 #",
"0.711 1.09 0.05 0.63 # 1.04 -0.281 -0.411 0.959 -0.009",
"1.192 -1.075 0.017 0.167 0.54 0.52 1.42 # -0.53 0.777",
"1.04 -0.281 -0.411 0.959 -0.009 -0.47 0.41 -0.52 # 0.49",
"-0.411 0.959 -0.009 -0.47 0.41 -0.52 # 0.49 0.170 0.124",
"0.34 -0.456 -1.220 -0.456 -0.315 1.10 1.38 -0.05 # #",
"B0 C0 D0 # ----- ------ ------ ------ ------ -----",
"# 1.42 -0.409 -0.525 2.155 -0.841 -0.19 0.13 0.63 #",
"-0.47 0.41 -0.52 # 0.49 0.170 0.124 -0.170 -0.722 -0.79",
"1.42 -0.409 -0.525 2.155 -0.841 -0.19 0.13 0.63 # 0.94",
"-0.315 1.10 1.38 -0.05 # # [100 rows x 8",
"# 0.94 1.192 -1.075 0.017 0.167 0.54 0.52 1.42 #",
"------ ------ ----- ----- ----- # -0.09 0.944 0.160 0.271",
"0.54 0.52 1.42 # -0.53 0.777 -1.090 -2.237 -0.693 0.24",
"0.970 -0.419 -1.413 -0.51 0.64 -1.25 # 0.58 -0.516 -1.598",
"1.42 # -0.53 0.777 -1.090 -2.237 -0.693 0.24 -0.56 1.45",
"-0.56 1.45 # 0.34 -0.456 -1.220 -0.456 -0.315 1.10 1.38",
"2.155 -0.841 -0.19 0.13 0.63 # 0.94 1.192 -1.075 0.017",
"<filename>h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_combine_frames_append_one_as_columns.py df8.cbind(df9) # A B C D A0 B0 C0",
"------ ------ ------ ------ ----- ----- ----- # -0.09 0.944",
"-1.413 -0.51 0.64 -1.25 # 0.58 -0.516 -1.598 -1.346 0.711",
"df8.cbind(df9) # A B C D A0 B0 C0 D0",
"-0.79 -0.91 -2.09 # 1.42 -0.409 -0.525 2.155 -0.841 -0.19",
"0.05 0.63 # 1.04 -0.281 -0.411 0.959 -0.009 -0.47 0.41",
"# A B C D A0 B0 C0 D0 #",
"0.64 -1.25 # 0.58 -0.516 -1.598 -1.346 0.711 1.09 0.05",
"1.27 # 0.17 0.657 0.970 -0.419 -1.413 -0.51 0.64 -1.25",
"0.017 0.167 0.54 0.52 1.42 # -0.53 0.777 -1.090 -2.237",
"-1.220 -0.456 -0.315 1.10 1.38 -0.05 # # [100 rows",
"0.160 0.271 -0.351 1.66 -2.32 -0.86 # -0.95 0.669 0.664",
"A0 B0 C0 D0 # ----- ------ ------ ------ ------",
"# ----- ------ ------ ------ ------ ----- ----- ----- #"
] |
[
"#!/usr/bin/env python3 # -*- encoding: utf-8 -*- from collections import",
"for key, value in mapping.items(): if iskeyword(key): key += '_'",
"= dict(mapping) self._data[key] = value def __getattr__(self, name): if hasattr(self._data,",
"for item in obj] else: return obj def __new__(cls, arg):",
"abc from keyword import iskeyword class FronzenJSON: def __init__(self, mapping):",
"name): if hasattr(self._data, name): return getattr(self._data, name) else: # return",
"name) else: # return FronzenJSON.build(self._data[name]) return FronzenJSON(self._data[name]) @classmethod def build(cls,",
"key, value in mapping.items(): if iskeyword(key): key += '_' #",
"elif isinstance(arg, abc.MutableSequence): return [cls[item] for item in arg] else:",
"collections import abc from keyword import iskeyword class FronzenJSON: def",
"return obj def __new__(cls, arg): if isinstance(arg, abc.Mapping): return super().__new__(cls)",
"elif isinstance(obj, abc.MutableMapping): return [cls.build(item) for item in obj] else:",
"iskeyword(key): key += '_' # self._data = dict(mapping) self._data[key] =",
"in obj] else: return obj def __new__(cls, arg): if isinstance(arg,",
"isinstance(arg, abc.Mapping): return super().__new__(cls) elif isinstance(arg, abc.MutableSequence): return [cls[item] for",
"-*- from collections import abc from keyword import iskeyword class",
"item in obj] else: return obj def __new__(cls, arg): if",
"isinstance(obj, abc.MutableMapping): return [cls.build(item) for item in obj] else: return",
"FronzenJSON(self._data[name]) @classmethod def build(cls, obj): if isinstance(obj, abc.Mapping): return cls(obj)",
"def __getattr__(self, name): if hasattr(self._data, name): return getattr(self._data, name) else:",
"name): return getattr(self._data, name) else: # return FronzenJSON.build(self._data[name]) return FronzenJSON(self._data[name])",
"'_' # self._data = dict(mapping) self._data[key] = value def __getattr__(self,",
"= {} for key, value in mapping.items(): if iskeyword(key): key",
"@classmethod def build(cls, obj): if isinstance(obj, abc.Mapping): return cls(obj) elif",
"+= '_' # self._data = dict(mapping) self._data[key] = value def",
"import iskeyword class FronzenJSON: def __init__(self, mapping): self._data = {}",
"self._data = {} for key, value in mapping.items(): if iskeyword(key):",
"self._data[key] = value def __getattr__(self, name): if hasattr(self._data, name): return",
"else: return obj def __new__(cls, arg): if isinstance(arg, abc.Mapping): return",
"if hasattr(self._data, name): return getattr(self._data, name) else: # return FronzenJSON.build(self._data[name])",
"self._data = dict(mapping) self._data[key] = value def __getattr__(self, name): if",
"isinstance(obj, abc.Mapping): return cls(obj) elif isinstance(obj, abc.MutableMapping): return [cls.build(item) for",
"if isinstance(obj, abc.Mapping): return cls(obj) elif isinstance(obj, abc.MutableMapping): return [cls.build(item)",
"if iskeyword(key): key += '_' # self._data = dict(mapping) self._data[key]",
"return FronzenJSON(self._data[name]) @classmethod def build(cls, obj): if isinstance(obj, abc.Mapping): return",
"# -*- encoding: utf-8 -*- from collections import abc from",
"getattr(self._data, name) else: # return FronzenJSON.build(self._data[name]) return FronzenJSON(self._data[name]) @classmethod def",
"return getattr(self._data, name) else: # return FronzenJSON.build(self._data[name]) return FronzenJSON(self._data[name]) @classmethod",
"if isinstance(arg, abc.Mapping): return super().__new__(cls) elif isinstance(arg, abc.MutableSequence): return [cls[item]",
"isinstance(arg, abc.MutableSequence): return [cls[item] for item in arg] else: return",
"def __new__(cls, arg): if isinstance(arg, abc.Mapping): return super().__new__(cls) elif isinstance(arg,",
"obj] else: return obj def __new__(cls, arg): if isinstance(arg, abc.Mapping):",
"FronzenJSON: def __init__(self, mapping): self._data = {} for key, value",
"encoding: utf-8 -*- from collections import abc from keyword import",
"from keyword import iskeyword class FronzenJSON: def __init__(self, mapping): self._data",
"abc.MutableSequence): return [cls[item] for item in arg] else: return arg",
"build(cls, obj): if isinstance(obj, abc.Mapping): return cls(obj) elif isinstance(obj, abc.MutableMapping):",
"{} for key, value in mapping.items(): if iskeyword(key): key +=",
"FronzenJSON.build(self._data[name]) return FronzenJSON(self._data[name]) @classmethod def build(cls, obj): if isinstance(obj, abc.Mapping):",
"abc.Mapping): return super().__new__(cls) elif isinstance(arg, abc.MutableSequence): return [cls[item] for item",
"= value def __getattr__(self, name): if hasattr(self._data, name): return getattr(self._data,",
"hasattr(self._data, name): return getattr(self._data, name) else: # return FronzenJSON.build(self._data[name]) return",
"return cls(obj) elif isinstance(obj, abc.MutableMapping): return [cls.build(item) for item in",
"key += '_' # self._data = dict(mapping) self._data[key] = value",
"python3 # -*- encoding: utf-8 -*- from collections import abc",
"def build(cls, obj): if isinstance(obj, abc.Mapping): return cls(obj) elif isinstance(obj,",
"<gh_stars>0 #!/usr/bin/env python3 # -*- encoding: utf-8 -*- from collections",
"return super().__new__(cls) elif isinstance(arg, abc.MutableSequence): return [cls[item] for item in",
"from collections import abc from keyword import iskeyword class FronzenJSON:",
"keyword import iskeyword class FronzenJSON: def __init__(self, mapping): self._data =",
"abc.Mapping): return cls(obj) elif isinstance(obj, abc.MutableMapping): return [cls.build(item) for item",
"abc.MutableMapping): return [cls.build(item) for item in obj] else: return obj",
"def __init__(self, mapping): self._data = {} for key, value in",
"class FronzenJSON: def __init__(self, mapping): self._data = {} for key,",
"iskeyword class FronzenJSON: def __init__(self, mapping): self._data = {} for",
"super().__new__(cls) elif isinstance(arg, abc.MutableSequence): return [cls[item] for item in arg]",
"arg): if isinstance(arg, abc.Mapping): return super().__new__(cls) elif isinstance(arg, abc.MutableSequence): return",
"mapping.items(): if iskeyword(key): key += '_' # self._data = dict(mapping)",
"__init__(self, mapping): self._data = {} for key, value in mapping.items():",
"obj def __new__(cls, arg): if isinstance(arg, abc.Mapping): return super().__new__(cls) elif",
"import abc from keyword import iskeyword class FronzenJSON: def __init__(self,",
"obj): if isinstance(obj, abc.Mapping): return cls(obj) elif isinstance(obj, abc.MutableMapping): return",
"in mapping.items(): if iskeyword(key): key += '_' # self._data =",
"return [cls.build(item) for item in obj] else: return obj def",
"-*- encoding: utf-8 -*- from collections import abc from keyword",
"cls(obj) elif isinstance(obj, abc.MutableMapping): return [cls.build(item) for item in obj]",
"else: # return FronzenJSON.build(self._data[name]) return FronzenJSON(self._data[name]) @classmethod def build(cls, obj):",
"value in mapping.items(): if iskeyword(key): key += '_' # self._data",
"[cls.build(item) for item in obj] else: return obj def __new__(cls,",
"__new__(cls, arg): if isinstance(arg, abc.Mapping): return super().__new__(cls) elif isinstance(arg, abc.MutableSequence):",
"# self._data = dict(mapping) self._data[key] = value def __getattr__(self, name):",
"utf-8 -*- from collections import abc from keyword import iskeyword",
"dict(mapping) self._data[key] = value def __getattr__(self, name): if hasattr(self._data, name):",
"__getattr__(self, name): if hasattr(self._data, name): return getattr(self._data, name) else: #",
"return FronzenJSON.build(self._data[name]) return FronzenJSON(self._data[name]) @classmethod def build(cls, obj): if isinstance(obj,",
"mapping): self._data = {} for key, value in mapping.items(): if",
"# return FronzenJSON.build(self._data[name]) return FronzenJSON(self._data[name]) @classmethod def build(cls, obj): if",
"value def __getattr__(self, name): if hasattr(self._data, name): return getattr(self._data, name)"
] |
[
"argmax=False): # Robot motion next_state = copy.deepcopy(state) next_state.robot_position = TagTransitionModel.if_move_by(self._grid_map,",
"= self.target_motion_policy.random(state.robot_position, state.target_position, valid_target_motion_actions) else: next_state.target_position = self.target_motion_policy.mpe(state.robot_position, state.target_position, valid_target_motion_actions)",
"target's movement depends on the robot; With Pr=0.8 the target",
"state.robot_position, action) # If Tag action if isinstance(action, TagAction): if",
"return constants.EPSILON else: if next_state.target_found: return constants.EPSILON else: return 1.0",
"probability(self, next_state, state, action, **kwargs): # Robot motion expected_robot_position =",
"for Large POMDPs <https://arxiv.org/pdf/1110.0027.pdf>`_. Transition model: the robot moves deterministically.",
"position, action): if isinstance(action, MotionAction): dx, dy = action.motion next_position",
"state, action, **kwargs): # Robot motion expected_robot_position = TagTransitionModel.if_move_by(self._grid_map, state.robot_position,",
"def sample(self, state, action, argmax=False): # Robot motion next_state =",
"self.target_motion_policy.random(state.robot_position, state.target_position, valid_target_motion_actions) else: next_state.target_position = self.target_motion_policy.mpe(state.robot_position, state.target_position, valid_target_motion_actions) return",
"next_state.robot_position = TagTransitionModel.if_move_by(self._grid_map, state.robot_position, action) # If Tag action if",
"= action.motion next_position = (position[0] + dx, position[1] + dy)",
"= target_motion_policy @classmethod def if_move_by(cls, grid_map, position, action): if isinstance(action,",
"Robot motion expected_robot_position = TagTransitionModel.if_move_by(self._grid_map, state.robot_position, action) if expected_robot_position !=",
"dy = action.motion next_position = (position[0] + dx, position[1] +",
"TagAction): if next_state.target_position == next_state.robot_position: if next_state.target_found: return 1.0 -",
"next_position = (position[0] + dx, position[1] + dy) if grid_map.valid_pose(next_position):",
"motion next_state = copy.deepcopy(state) next_state.robot_position = TagTransitionModel.if_move_by(self._grid_map, state.robot_position, action) #",
"def probability(self, next_state, state, action, **kwargs): # Robot motion expected_robot_position",
"action, argmax=False): # Robot motion next_state = copy.deepcopy(state) next_state.robot_position =",
"state.target_position, valid_target_motion_actions) else: next_state.target_position = self.target_motion_policy.mpe(state.robot_position, state.target_position, valid_target_motion_actions) return next_state",
"= copy.deepcopy(state) next_state.robot_position = TagTransitionModel.if_move_by(self._grid_map, state.robot_position, action) # If Tag",
"not state.target_found: if state.robot_position == state.target_position: next_state.target_found = True return",
"valid_target_motion_actions = self._grid_map.valid_motions(state.target_position) if not argmax: next_state.target_position = self.target_motion_policy.random(state.robot_position, state.target_position,",
"target stays at the same place. The target never moves",
"as util import pomdp_problems.tag.constants as constants from pomdp_problems.tag.domain.action import *",
"state.target_position, state.robot_position, valid_target_motion_actions) def sample(self, state, action, argmax=False): # Robot",
"= (position[0] + dx, position[1] + dy) if grid_map.valid_pose(next_position): return",
"action) if expected_robot_position != next_state.robot_position: return constants.EPSILON if isinstance(action, TagAction):",
"next_state.robot_position: if next_state.target_found: return 1.0 - constants.EPSILON else: return constants.EPSILON",
"grid_map, target_motion_policy): self._grid_map = grid_map self.target_motion_policy = target_motion_policy @classmethod def",
"never moves closer to the robot. \"\"\" import copy import",
"1.0 - constants.EPSILON else: return constants.EPSILON else: if next_state.target_found: return",
"- constants.EPSILON # Target motion valid_target_motion_actions = self._grid_map.valid_motions(state.target_position) return self.target_motion_policy.probability(next_state.target_position,",
"Approximations for Large POMDPs <https://arxiv.org/pdf/1110.0027.pdf>`_. Transition model: the robot moves",
"Pr=0.8 the target moves away from the robot, and with",
"the same place. The target never moves closer to the",
"if next_state.target_position == next_state.robot_position: if next_state.target_found: return 1.0 - constants.EPSILON",
"if expected_robot_position != next_state.robot_position: return constants.EPSILON if isinstance(action, TagAction): if",
"return 1.0 - constants.EPSILON else: return constants.EPSILON else: if next_state.target_found:",
"target moves away from the robot, and with Pr=0.2, the",
"= grid_map self.target_motion_policy = target_motion_policy @classmethod def if_move_by(cls, grid_map, position,",
"constants.EPSILON else: return constants.EPSILON else: if next_state.target_found: return constants.EPSILON else:",
"same place. The target never moves closer to the robot.",
"Robot motion next_state = copy.deepcopy(state) next_state.robot_position = TagTransitionModel.if_move_by(self._grid_map, state.robot_position, action)",
"motion valid_target_motion_actions = self._grid_map.valid_motions(state.target_position) if not argmax: next_state.target_position = self.target_motion_policy.random(state.robot_position,",
"state, action, argmax=False): # Robot motion next_state = copy.deepcopy(state) next_state.robot_position",
"valid_target_motion_actions) return next_state def argmax(self, state, action, **kwargs): return self.sample(state,",
"TagTransitionModel(pomdp_py.TransitionModel): def __init__(self, grid_map, target_motion_policy): self._grid_map = grid_map self.target_motion_policy =",
"as constants from pomdp_problems.tag.domain.action import * class TagTransitionModel(pomdp_py.TransitionModel): def __init__(self,",
"!= next_state.robot_position: return constants.EPSILON if isinstance(action, TagAction): if next_state.target_position ==",
"expected_robot_position = TagTransitionModel.if_move_by(self._grid_map, state.robot_position, action) if expected_robot_position != next_state.robot_position: return",
"moves deterministically. The target's movement depends on the robot; With",
"else: if next_state.target_found: return constants.EPSILON else: return 1.0 - constants.EPSILON",
"with Pr=0.2, the target stays at the same place. The",
"<https://arxiv.org/pdf/1110.0027.pdf>`_. Transition model: the robot moves deterministically. The target's movement",
"Target motion valid_target_motion_actions = self._grid_map.valid_motions(state.target_position) if not argmax: next_state.target_position =",
"= True return next_state # Target motion valid_target_motion_actions = self._grid_map.valid_motions(state.target_position)",
"valid_target_motion_actions) else: next_state.target_position = self.target_motion_policy.mpe(state.robot_position, state.target_position, valid_target_motion_actions) return next_state def",
"to the paper `Anytime Point-Based Approximations for Large POMDPs <https://arxiv.org/pdf/1110.0027.pdf>`_.",
"return constants.EPSILON else: return 1.0 - constants.EPSILON # Target motion",
"pomdp_problems.tag.domain.action import * class TagTransitionModel(pomdp_py.TransitionModel): def __init__(self, grid_map, target_motion_policy): self._grid_map",
"Implemented according to the paper `Anytime Point-Based Approximations for Large",
"# If Tag action if isinstance(action, TagAction): if not state.target_found:",
"problem. Implemented according to the paper `Anytime Point-Based Approximations for",
"== next_state.robot_position: if next_state.target_found: return 1.0 - constants.EPSILON else: return",
"POMDPs <https://arxiv.org/pdf/1110.0027.pdf>`_. Transition model: the robot moves deterministically. The target's",
"# Robot motion expected_robot_position = TagTransitionModel.if_move_by(self._grid_map, state.robot_position, action) if expected_robot_position",
"`Anytime Point-Based Approximations for Large POMDPs <https://arxiv.org/pdf/1110.0027.pdf>`_. Transition model: the",
"= self._grid_map.valid_motions(state.target_position) if not argmax: next_state.target_position = self.target_motion_policy.random(state.robot_position, state.target_position, valid_target_motion_actions)",
"from the robot, and with Pr=0.2, the target stays at",
"= TagTransitionModel.if_move_by(self._grid_map, state.robot_position, action) if expected_robot_position != next_state.robot_position: return constants.EPSILON",
"self._grid_map = grid_map self.target_motion_policy = target_motion_policy @classmethod def if_move_by(cls, grid_map,",
"class TagTransitionModel(pomdp_py.TransitionModel): def __init__(self, grid_map, target_motion_policy): self._grid_map = grid_map self.target_motion_policy",
"(position[0] + dx, position[1] + dy) if grid_map.valid_pose(next_position): return next_position",
"position def probability(self, next_state, state, action, **kwargs): # Robot motion",
"moves away from the robot, and with Pr=0.2, the target",
"argmax: next_state.target_position = self.target_motion_policy.random(state.robot_position, state.target_position, valid_target_motion_actions) else: next_state.target_position = self.target_motion_policy.mpe(state.robot_position,",
"**kwargs): # Robot motion expected_robot_position = TagTransitionModel.if_move_by(self._grid_map, state.robot_position, action) if",
"to the robot. \"\"\" import copy import pomdp_py import pomdp_problems.util",
"next_state.target_found: return constants.EPSILON else: return 1.0 - constants.EPSILON # Target",
"Tag problem. Implemented according to the paper `Anytime Point-Based Approximations",
"if isinstance(action, TagAction): if not state.target_found: if state.robot_position == state.target_position:",
"the target stays at the same place. The target never",
"dx, dy = action.motion next_position = (position[0] + dx, position[1]",
"not argmax: next_state.target_position = self.target_motion_policy.random(state.robot_position, state.target_position, valid_target_motion_actions) else: next_state.target_position =",
"self.target_motion_policy = target_motion_policy @classmethod def if_move_by(cls, grid_map, position, action): if",
"if next_state.target_found: return constants.EPSILON else: return 1.0 - constants.EPSILON #",
"stays at the same place. The target never moves closer",
"target never moves closer to the robot. \"\"\" import copy",
"import copy import pomdp_py import pomdp_problems.util as util import pomdp_problems.tag.constants",
"next_state = copy.deepcopy(state) next_state.robot_position = TagTransitionModel.if_move_by(self._grid_map, state.robot_position, action) # If",
"valid_target_motion_actions = self._grid_map.valid_motions(state.target_position) return self.target_motion_policy.probability(next_state.target_position, state.target_position, state.robot_position, valid_target_motion_actions) def sample(self,",
"if not state.target_found: if state.robot_position == state.target_position: next_state.target_found = True",
"the paper `Anytime Point-Based Approximations for Large POMDPs <https://arxiv.org/pdf/1110.0027.pdf>`_. Transition",
"next_position return position def probability(self, next_state, state, action, **kwargs): #",
"if state.robot_position == state.target_position: next_state.target_found = True return next_state #",
"self._grid_map.valid_motions(state.target_position) if not argmax: next_state.target_position = self.target_motion_policy.random(state.robot_position, state.target_position, valid_target_motion_actions) else:",
"next_state def argmax(self, state, action, **kwargs): return self.sample(state, action, argmax=True)",
"the robot, and with Pr=0.2, the target stays at the",
"\"\"\" import copy import pomdp_py import pomdp_problems.util as util import",
"import pomdp_problems.util as util import pomdp_problems.tag.constants as constants from pomdp_problems.tag.domain.action",
"= self._grid_map.valid_motions(state.target_position) return self.target_motion_policy.probability(next_state.target_position, state.target_position, state.robot_position, valid_target_motion_actions) def sample(self, state,",
"TagAction): if not state.target_found: if state.robot_position == state.target_position: next_state.target_found =",
"action) # If Tag action if isinstance(action, TagAction): if not",
"action if isinstance(action, TagAction): if not state.target_found: if state.robot_position ==",
"else: next_state.target_position = self.target_motion_policy.mpe(state.robot_position, state.target_position, valid_target_motion_actions) return next_state def argmax(self,",
"copy import pomdp_py import pomdp_problems.util as util import pomdp_problems.tag.constants as",
"If Tag action if isinstance(action, TagAction): if not state.target_found: if",
"isinstance(action, TagAction): if next_state.target_position == next_state.robot_position: if next_state.target_found: return 1.0",
"constants.EPSILON if isinstance(action, TagAction): if next_state.target_position == next_state.robot_position: if next_state.target_found:",
"grid_map.valid_pose(next_position): return next_position return position def probability(self, next_state, state, action,",
"import pomdp_py import pomdp_problems.util as util import pomdp_problems.tag.constants as constants",
"if_move_by(cls, grid_map, position, action): if isinstance(action, MotionAction): dx, dy =",
"isinstance(action, TagAction): if not state.target_found: if state.robot_position == state.target_position: next_state.target_found",
"import * class TagTransitionModel(pomdp_py.TransitionModel): def __init__(self, grid_map, target_motion_policy): self._grid_map =",
"state.target_position: next_state.target_found = True return next_state # Target motion valid_target_motion_actions",
"next_state.target_found = True return next_state # Target motion valid_target_motion_actions =",
"motion valid_target_motion_actions = self._grid_map.valid_motions(state.target_position) return self.target_motion_policy.probability(next_state.target_position, state.target_position, state.robot_position, valid_target_motion_actions) def",
"return next_position return position def probability(self, next_state, state, action, **kwargs):",
"+ dy) if grid_map.valid_pose(next_position): return next_position return position def probability(self,",
"and with Pr=0.2, the target stays at the same place.",
"self.target_motion_policy.probability(next_state.target_position, state.target_position, state.robot_position, valid_target_motion_actions) def sample(self, state, action, argmax=False): #",
"Point-Based Approximations for Large POMDPs <https://arxiv.org/pdf/1110.0027.pdf>`_. Transition model: the robot",
"= self.target_motion_policy.mpe(state.robot_position, state.target_position, valid_target_motion_actions) return next_state def argmax(self, state, action,",
"MotionAction): dx, dy = action.motion next_position = (position[0] + dx,",
"depends on the robot; With Pr=0.8 the target moves away",
"constants.EPSILON else: return 1.0 - constants.EPSILON # Target motion valid_target_motion_actions",
"== state.target_position: next_state.target_found = True return next_state # Target motion",
"state.robot_position == state.target_position: next_state.target_found = True return next_state # Target",
"if next_state.target_found: return 1.0 - constants.EPSILON else: return constants.EPSILON else:",
"Pr=0.2, the target stays at the same place. The target",
"according to the paper `Anytime Point-Based Approximations for Large POMDPs",
"away from the robot, and with Pr=0.2, the target stays",
"Transition model: the robot moves deterministically. The target's movement depends",
"The target's movement depends on the robot; With Pr=0.8 the",
"With Pr=0.8 the target moves away from the robot, and",
"state.robot_position, action) if expected_robot_position != next_state.robot_position: return constants.EPSILON if isinstance(action,",
"self.target_motion_policy.mpe(state.robot_position, state.target_position, valid_target_motion_actions) return next_state def argmax(self, state, action, **kwargs):",
"\"\"\"The Tag problem. Implemented according to the paper `Anytime Point-Based",
"the target moves away from the robot, and with Pr=0.2,",
"dx, position[1] + dy) if grid_map.valid_pose(next_position): return next_position return position",
"else: return constants.EPSILON else: if next_state.target_found: return constants.EPSILON else: return",
"if not argmax: next_state.target_position = self.target_motion_policy.random(state.robot_position, state.target_position, valid_target_motion_actions) else: next_state.target_position",
"constants from pomdp_problems.tag.domain.action import * class TagTransitionModel(pomdp_py.TransitionModel): def __init__(self, grid_map,",
"motion expected_robot_position = TagTransitionModel.if_move_by(self._grid_map, state.robot_position, action) if expected_robot_position != next_state.robot_position:",
"TagTransitionModel.if_move_by(self._grid_map, state.robot_position, action) # If Tag action if isinstance(action, TagAction):",
"def if_move_by(cls, grid_map, position, action): if isinstance(action, MotionAction): dx, dy",
"grid_map self.target_motion_policy = target_motion_policy @classmethod def if_move_by(cls, grid_map, position, action):",
"model: the robot moves deterministically. The target's movement depends on",
"if isinstance(action, MotionAction): dx, dy = action.motion next_position = (position[0]",
"the robot. \"\"\" import copy import pomdp_py import pomdp_problems.util as",
"else: return 1.0 - constants.EPSILON # Target motion valid_target_motion_actions =",
"# Robot motion next_state = copy.deepcopy(state) next_state.robot_position = TagTransitionModel.if_move_by(self._grid_map, state.robot_position,",
"the robot moves deterministically. The target's movement depends on the",
"grid_map, position, action): if isinstance(action, MotionAction): dx, dy = action.motion",
"dy) if grid_map.valid_pose(next_position): return next_position return position def probability(self, next_state,",
"- constants.EPSILON else: return constants.EPSILON else: if next_state.target_found: return constants.EPSILON",
"state.target_found: if state.robot_position == state.target_position: next_state.target_found = True return next_state",
"+ dx, position[1] + dy) if grid_map.valid_pose(next_position): return next_position return",
"The target never moves closer to the robot. \"\"\" import",
"isinstance(action, MotionAction): dx, dy = action.motion next_position = (position[0] +",
"return self.target_motion_policy.probability(next_state.target_position, state.target_position, state.robot_position, valid_target_motion_actions) def sample(self, state, action, argmax=False):",
"position[1] + dy) if grid_map.valid_pose(next_position): return next_position return position def",
"return position def probability(self, next_state, state, action, **kwargs): # Robot",
"moves closer to the robot. \"\"\" import copy import pomdp_py",
"<reponame>Semanti1/pomdp_findit \"\"\"The Tag problem. Implemented according to the paper `Anytime",
"Large POMDPs <https://arxiv.org/pdf/1110.0027.pdf>`_. Transition model: the robot moves deterministically. The",
"copy.deepcopy(state) next_state.robot_position = TagTransitionModel.if_move_by(self._grid_map, state.robot_position, action) # If Tag action",
"action, **kwargs): # Robot motion expected_robot_position = TagTransitionModel.if_move_by(self._grid_map, state.robot_position, action)",
"constants.EPSILON else: if next_state.target_found: return constants.EPSILON else: return 1.0 -",
"TagTransitionModel.if_move_by(self._grid_map, state.robot_position, action) if expected_robot_position != next_state.robot_position: return constants.EPSILON if",
"movement depends on the robot; With Pr=0.8 the target moves",
"return constants.EPSILON if isinstance(action, TagAction): if next_state.target_position == next_state.robot_position: if",
"next_state.robot_position: return constants.EPSILON if isinstance(action, TagAction): if next_state.target_position == next_state.robot_position:",
"pomdp_problems.tag.constants as constants from pomdp_problems.tag.domain.action import * class TagTransitionModel(pomdp_py.TransitionModel): def",
"Tag action if isinstance(action, TagAction): if not state.target_found: if state.robot_position",
"state.target_position, valid_target_motion_actions) return next_state def argmax(self, state, action, **kwargs): return",
"the robot; With Pr=0.8 the target moves away from the",
"return next_state # Target motion valid_target_motion_actions = self._grid_map.valid_motions(state.target_position) if not",
"if grid_map.valid_pose(next_position): return next_position return position def probability(self, next_state, state,",
"on the robot; With Pr=0.8 the target moves away from",
"robot, and with Pr=0.2, the target stays at the same",
"pomdp_py import pomdp_problems.util as util import pomdp_problems.tag.constants as constants from",
"expected_robot_position != next_state.robot_position: return constants.EPSILON if isinstance(action, TagAction): if next_state.target_position",
"constants.EPSILON # Target motion valid_target_motion_actions = self._grid_map.valid_motions(state.target_position) return self.target_motion_policy.probability(next_state.target_position, state.target_position,",
"next_state.target_position == next_state.robot_position: if next_state.target_found: return 1.0 - constants.EPSILON else:",
"next_state, state, action, **kwargs): # Robot motion expected_robot_position = TagTransitionModel.if_move_by(self._grid_map,",
"@classmethod def if_move_by(cls, grid_map, position, action): if isinstance(action, MotionAction): dx,",
"return next_state def argmax(self, state, action, **kwargs): return self.sample(state, action,",
"= TagTransitionModel.if_move_by(self._grid_map, state.robot_position, action) # If Tag action if isinstance(action,",
"from pomdp_problems.tag.domain.action import * class TagTransitionModel(pomdp_py.TransitionModel): def __init__(self, grid_map, target_motion_policy):",
"__init__(self, grid_map, target_motion_policy): self._grid_map = grid_map self.target_motion_policy = target_motion_policy @classmethod",
"state.robot_position, valid_target_motion_actions) def sample(self, state, action, argmax=False): # Robot motion",
"target_motion_policy @classmethod def if_move_by(cls, grid_map, position, action): if isinstance(action, MotionAction):",
"place. The target never moves closer to the robot. \"\"\"",
"action): if isinstance(action, MotionAction): dx, dy = action.motion next_position =",
"return 1.0 - constants.EPSILON # Target motion valid_target_motion_actions = self._grid_map.valid_motions(state.target_position)",
"pomdp_problems.util as util import pomdp_problems.tag.constants as constants from pomdp_problems.tag.domain.action import",
"import pomdp_problems.tag.constants as constants from pomdp_problems.tag.domain.action import * class TagTransitionModel(pomdp_py.TransitionModel):",
"* class TagTransitionModel(pomdp_py.TransitionModel): def __init__(self, grid_map, target_motion_policy): self._grid_map = grid_map",
"next_state # Target motion valid_target_motion_actions = self._grid_map.valid_motions(state.target_position) if not argmax:",
"def __init__(self, grid_map, target_motion_policy): self._grid_map = grid_map self.target_motion_policy = target_motion_policy",
"next_state.target_position = self.target_motion_policy.random(state.robot_position, state.target_position, valid_target_motion_actions) else: next_state.target_position = self.target_motion_policy.mpe(state.robot_position, state.target_position,",
"robot; With Pr=0.8 the target moves away from the robot,",
"next_state.target_found: return 1.0 - constants.EPSILON else: return constants.EPSILON else: if",
"self._grid_map.valid_motions(state.target_position) return self.target_motion_policy.probability(next_state.target_position, state.target_position, state.robot_position, valid_target_motion_actions) def sample(self, state, action,",
"next_state.target_position = self.target_motion_policy.mpe(state.robot_position, state.target_position, valid_target_motion_actions) return next_state def argmax(self, state,",
"True return next_state # Target motion valid_target_motion_actions = self._grid_map.valid_motions(state.target_position) if",
"closer to the robot. \"\"\" import copy import pomdp_py import",
"# Target motion valid_target_motion_actions = self._grid_map.valid_motions(state.target_position) if not argmax: next_state.target_position",
"util import pomdp_problems.tag.constants as constants from pomdp_problems.tag.domain.action import * class",
"action.motion next_position = (position[0] + dx, position[1] + dy) if",
"at the same place. The target never moves closer to",
"deterministically. The target's movement depends on the robot; With Pr=0.8",
"paper `Anytime Point-Based Approximations for Large POMDPs <https://arxiv.org/pdf/1110.0027.pdf>`_. Transition model:",
"if isinstance(action, TagAction): if next_state.target_position == next_state.robot_position: if next_state.target_found: return",
"Target motion valid_target_motion_actions = self._grid_map.valid_motions(state.target_position) return self.target_motion_policy.probability(next_state.target_position, state.target_position, state.robot_position, valid_target_motion_actions)",
"robot moves deterministically. The target's movement depends on the robot;",
"valid_target_motion_actions) def sample(self, state, action, argmax=False): # Robot motion next_state",
"target_motion_policy): self._grid_map = grid_map self.target_motion_policy = target_motion_policy @classmethod def if_move_by(cls,",
"sample(self, state, action, argmax=False): # Robot motion next_state = copy.deepcopy(state)",
"robot. \"\"\" import copy import pomdp_py import pomdp_problems.util as util",
"1.0 - constants.EPSILON # Target motion valid_target_motion_actions = self._grid_map.valid_motions(state.target_position) return",
"# Target motion valid_target_motion_actions = self._grid_map.valid_motions(state.target_position) return self.target_motion_policy.probability(next_state.target_position, state.target_position, state.robot_position,"
] |
[
"up or \" \"Kerberos ticket being active.\" ) commands.run_command(cmd=cmd, error_message=error_msg)",
"\"new-sources\", sources], cwd=self.directory, error_message=\"Adding new sources failed:\", fail=fail, ) def",
"LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A",
"package_name: str, target_path: str, anonymous: bool = False): \"\"\" clone",
"cwd=self.directory, error_message=\"Submission of build to koji failed.\", fail=True, ) except",
") def build( self, scratch: bool = False, nowait: bool",
"[package_name, target_path] error_msg = ( f\"Packit failed to clone the",
"<gh_stars>0 # MIT License # # Copyright (c) 2019 Red",
"koji_target: koji target to build in (`koji list-targets`) :param srpm_path:",
"TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR",
"keys set up or \" \"Kerberos ticket being active.\" )",
"rights # to use, copy, modify, merge, publish, distribute, sublicense,",
"the code is from release-bot: https://github.com/user-cont/release-bot/blob/master/release_bot/fedora.py \"\"\" def __init__( self,",
"# fail on the fedpkg side, the build is triggered",
"cmd += [package_name, target_path] error_msg = ( f\"Packit failed to",
"dist-git repo; this has to be done in current env",
"portions of the Software. # # THE SOFTWARE IS PROVIDED",
"else \"fedpkg\" def __repr__(self): return ( \"FedPKG(\" f\"fas_username='{self.fas_username}', \" f\"directory='{self.directory}',",
"the build to finish :param koji_target: koji target to build",
"koji target to build in (`koji list-targets`) :param srpm_path: use",
"not? :param nowait: False == wait for the build to",
"logger.info( \"The 'fedpkg build' command crashed which is a known",
"# # The above copyright notice and this permission notice",
"packit.exceptions import PackitCommandFailedError from packit.utils import commands # so we",
"set up or \" \"Kerberos ticket being active.\" ) commands.run_command(cmd=cmd,",
"error_message=\"Adding new sources failed:\", fail=fail, ) def build( self, scratch:",
"self.directory = directory self.stage = stage self.fedpkg_exec = \"fedpkg-stage\" if",
"selected SRPM for build, not dist-git repo & ref :return:",
"bool = False): \"\"\" clone a dist-git repo; this has",
"to build in (`koji list-targets`) :param srpm_path: use selected SRPM",
"stage: bool = False ): self.fas_username = fas_username self.directory =",
"+= [package_name, target_path] error_msg = ( f\"Packit failed to clone",
"cmd.append(\"--nowait\") if koji_target: cmd += [\"--target\", koji_target] if srpm_path: cmd",
"is a known issue: \" \"the build is submitted in",
"is submitted in koji anyway.\" ) logger.debug(ex.stdout_output) else: raise def",
"don't have the keytab in sandbox \"\"\" cmd = [self.fedpkg_exec]",
"and associated documentation files (the \"Software\"), to deal # in",
"Software without restriction, including without limitation the rights # to",
"and to permit persons to whom the Software is #",
"copies of the Software, and to permit persons to whom",
"hereby granted, free of charge, to any person obtaining a",
"this permission notice shall be included in all # copies",
"OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE",
"distribute, sublicense, and/or sell # copies of the Software, and",
"None, stage: bool = False ): self.fas_username = fas_username self.directory",
"OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.",
") except PackitCommandFailedError as ex: # fail on the fedpkg",
"f\"fas_username='{self.fas_username}', \" f\"directory='{self.directory}', \" f\"stage='{self.stage}')\" ) def new_sources(self, sources=\"\", fail=True):",
"HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #",
"the fedpkg side, the build is triggered if ( \"watch_tasks()",
"typing import Optional from packit.exceptions import PackitCommandFailedError from packit.utils import",
"fas_username self.directory = directory self.stage = stage self.fedpkg_exec = \"fedpkg-stage\"",
"+= [\"--target\", koji_target] if srpm_path: cmd += [\"--srpm\", str(srpm_path)] try:",
"[self.fedpkg_exec] if self.fas_username: cmd += [\"--user\", self.fas_username] cmd += [\"-q\",",
"\"fedpkg\" def __repr__(self): return ( \"FedPKG(\" f\"fas_username='{self.fas_username}', \" f\"directory='{self.directory}', \"",
"cmd += [\"--srpm\", str(srpm_path)] try: commands.run_command_remote( cmd=cmd, cwd=self.directory, error_message=\"Submission of",
"[\"--srpm\", str(srpm_path)] try: commands.run_command_remote( cmd=cmd, cwd=self.directory, error_message=\"Submission of build to",
"Inc. # Permission is hereby granted, free of charge, to",
"import Path from typing import Optional from packit.exceptions import PackitCommandFailedError",
"fail on the fedpkg side, the build is triggered if",
"OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH",
"deal # in the Software without restriction, including without limitation",
"use, copy, modify, merge, publish, distribute, sublicense, and/or sell #",
"= [self.fedpkg_exec, \"build\"] if scratch: cmd.append(\"--scratch\") if nowait: cmd.append(\"--nowait\") if",
"be included in all # copies or substantial portions of",
"in koji :param scratch: scratch (temporary) build or not? :param",
"self.fedpkg_exec = \"fedpkg-stage\" if stage else \"fedpkg\" def __repr__(self): return",
"the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",",
"{package_name}; \" \"please make sure that you are authorized to",
"repo; this has to be done in current env b/c",
"copy, modify, merge, publish, distribute, sublicense, and/or sell # copies",
"# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR",
"import Optional from packit.exceptions import PackitCommandFailedError from packit.utils import commands",
"f\"Packit failed to clone the repository {package_name}; \" \"please make",
"software and associated documentation files (the \"Software\"), to deal #",
"from packit.utils import commands # so we can mock utils",
"of build to koji failed.\", fail=True, ) except PackitCommandFailedError as",
"clone repositories \" \"from Fedora dist-git - this may require",
"+= [\"-q\", \"clone\"] if anonymous: cmd += [\"-a\"] cmd +=",
"from pathlib import Path from typing import Optional from packit.exceptions",
"None, srpm_path: Optional[Path] = None, ): \"\"\" build in koji",
"# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF",
"= False ): self.fas_username = fas_username self.directory = directory self.stage",
"AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR",
"the Software without restriction, including without limitation the rights #",
"# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF",
"\" f\"directory='{self.directory}', \" f\"stage='{self.stage}')\" ) def new_sources(self, sources=\"\", fail=True): if",
"unexpected keyword argument 'ki_handler'\" in ex.stderr_output ): logger.info( \"The 'fedpkg",
"\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #",
"raise Exception(\"Cannot access fedpkg repository:\") return commands.run_command_remote( cmd=[self.fedpkg_exec, \"new-sources\", sources],",
"koji anyway.\" ) logger.debug(ex.stdout_output) else: raise def clone(self, package_name: str,",
"str = None, stage: bool = False ): self.fas_username =",
"(`koji list-targets`) :param srpm_path: use selected SRPM for build, not",
"stage else \"fedpkg\" def __repr__(self): return ( \"FedPKG(\" f\"fas_username='{self.fas_username}', \"",
"anonymous: bool = False): \"\"\" clone a dist-git repo; this",
"repositories \" \"from Fedora dist-git - this may require SSH",
"str, anonymous: bool = False): \"\"\" clone a dist-git repo;",
"failed.\", fail=True, ) except PackitCommandFailedError as ex: # fail on",
"included in all # copies or substantial portions of the",
"# of this software and associated documentation files (the \"Software\"),",
"furnished to do so, subject to the following conditions: #",
"to do so, subject to the following conditions: # #",
"\"clone\"] if anonymous: cmd += [\"-a\"] cmd += [package_name, target_path]",
"# The above copyright notice and this permission notice shall",
"SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR",
"not Path(self.directory).is_dir(): raise Exception(\"Cannot access fedpkg repository:\") return commands.run_command_remote( cmd=[self.fedpkg_exec,",
"a copy # of this software and associated documentation files",
"OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF",
"authorized to clone repositories \" \"from Fedora dist-git - this",
"# so we can mock utils from packit.utils.logging import logger",
"command crashed which is a known issue: \" \"the build",
"[\"-q\", \"clone\"] if anonymous: cmd += [\"-a\"] cmd += [package_name,",
"failed:\", fail=fail, ) def build( self, scratch: bool = False,",
"permission notice shall be included in all # copies or",
"License # # Copyright (c) 2019 Red Hat, Inc. #",
"# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO",
"scratch: cmd.append(\"--scratch\") if nowait: cmd.append(\"--nowait\") if koji_target: cmd += [\"--target\",",
"Copyright (c) 2019 Red Hat, Inc. # Permission is hereby",
"= [self.fedpkg_exec] if self.fas_username: cmd += [\"--user\", self.fas_username] cmd +=",
"a dist-git repo; this has to be done in current",
"Hat, Inc. # Permission is hereby granted, free of charge,",
"IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS",
"mock utils from packit.utils.logging import logger class FedPKG: \"\"\" Part",
"\"fedpkg-stage\" if stage else \"fedpkg\" def __repr__(self): return ( \"FedPKG(\"",
"NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE",
"following conditions: # # The above copyright notice and this",
"directory self.stage = stage self.fedpkg_exec = \"fedpkg-stage\" if stage else",
"nowait: cmd.append(\"--nowait\") if koji_target: cmd += [\"--target\", koji_target] if srpm_path:",
"in current env b/c we don't have the keytab in",
"to deal # in the Software without restriction, including without",
"sandbox \"\"\" cmd = [self.fedpkg_exec] if self.fas_username: cmd += [\"--user\",",
"False, koji_target: Optional[str] = None, srpm_path: Optional[Path] = None, ):",
"conditions: # # The above copyright notice and this permission",
"SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #",
"to use, copy, modify, merge, publish, distribute, sublicense, and/or sell",
"): logger.info( \"The 'fedpkg build' command crashed which is a",
"IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS",
"False ): self.fas_username = fas_username self.directory = directory self.stage =",
"def __repr__(self): return ( \"FedPKG(\" f\"fas_username='{self.fas_username}', \" f\"directory='{self.directory}', \" f\"stage='{self.stage}')\"",
"FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN",
"Path from typing import Optional from packit.exceptions import PackitCommandFailedError from",
"packit.utils import commands # so we can mock utils from",
"submitted in koji anyway.\" ) logger.debug(ex.stdout_output) else: raise def clone(self,",
"make sure that you are authorized to clone repositories \"",
"sources=\"\", fail=True): if not Path(self.directory).is_dir(): raise Exception(\"Cannot access fedpkg repository:\")",
"bool = False, koji_target: Optional[str] = None, srpm_path: Optional[Path] =",
"cmd.append(\"--scratch\") if nowait: cmd.append(\"--nowait\") if koji_target: cmd += [\"--target\", koji_target]",
"Exception(\"Cannot access fedpkg repository:\") return commands.run_command_remote( cmd=[self.fedpkg_exec, \"new-sources\", sources], cwd=self.directory,",
"[\"--target\", koji_target] if srpm_path: cmd += [\"--srpm\", str(srpm_path)] try: commands.run_command_remote(",
"keytab in sandbox \"\"\" cmd = [self.fedpkg_exec] if self.fas_username: cmd",
"and/or sell # copies of the Software, and to permit",
") def new_sources(self, sources=\"\", fail=True): if not Path(self.directory).is_dir(): raise Exception(\"Cannot",
"the rights # to use, copy, modify, merge, publish, distribute,",
"scratch: bool = False, nowait: bool = False, koji_target: Optional[str]",
"all # copies or substantial portions of the Software. #",
"SOFTWARE. from pathlib import Path from typing import Optional from",
"bool = False ): self.fas_username = fas_username self.directory = directory",
":param scratch: scratch (temporary) build or not? :param nowait: False",
"ex.stderr_output ): logger.info( \"The 'fedpkg build' command crashed which is",
"notice and this permission notice shall be included in all",
"is hereby granted, free of charge, to any person obtaining",
"repo & ref :return: \"\"\" cmd = [self.fedpkg_exec, \"build\"] if",
"scratch (temporary) build or not? :param nowait: False == wait",
":param nowait: False == wait for the build to finish",
"if scratch: cmd.append(\"--scratch\") if nowait: cmd.append(\"--nowait\") if koji_target: cmd +=",
"use selected SRPM for build, not dist-git repo & ref",
"False): \"\"\" clone a dist-git repo; this has to be",
"on the fedpkg side, the build is triggered if (",
"def __init__( self, fas_username: str = None, directory: str =",
"build to koji failed.\", fail=True, ) except PackitCommandFailedError as ex:",
"CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR",
"\"FedPKG(\" f\"fas_username='{self.fas_username}', \" f\"directory='{self.directory}', \" f\"stage='{self.stage}')\" ) def new_sources(self, sources=\"\",",
"Optional[str] = None, srpm_path: Optional[Path] = None, ): \"\"\" build",
"person obtaining a copy # of this software and associated",
"without restriction, including without limitation the rights # to use,",
"subject to the following conditions: # # The above copyright",
"self.fas_username] cmd += [\"-q\", \"clone\"] if anonymous: cmd += [\"-a\"]",
"build is triggered if ( \"watch_tasks() got an unexpected keyword",
"commands.run_command_remote( cmd=[self.fedpkg_exec, \"new-sources\", sources], cwd=self.directory, error_message=\"Adding new sources failed:\", fail=fail,",
"error_message=\"Submission of build to koji failed.\", fail=True, ) except PackitCommandFailedError",
"koji_target: cmd += [\"--target\", koji_target] if srpm_path: cmd += [\"--srpm\",",
"stage self.fedpkg_exec = \"fedpkg-stage\" if stage else \"fedpkg\" def __repr__(self):",
"WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN",
"__repr__(self): return ( \"FedPKG(\" f\"fas_username='{self.fas_username}', \" f\"directory='{self.directory}', \" f\"stage='{self.stage}')\" )",
"str, target_path: str, anonymous: bool = False): \"\"\" clone a",
":return: \"\"\" cmd = [self.fedpkg_exec, \"build\"] if scratch: cmd.append(\"--scratch\") if",
"commands # so we can mock utils from packit.utils.logging import",
"2019 Red Hat, Inc. # Permission is hereby granted, free",
"logger class FedPKG: \"\"\" Part of the code is from",
"THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE",
"import logger class FedPKG: \"\"\" Part of the code is",
"so we can mock utils from packit.utils.logging import logger class",
") logger.debug(ex.stdout_output) else: raise def clone(self, package_name: str, target_path: str,",
"or substantial portions of the Software. # # THE SOFTWARE",
"fail=True): if not Path(self.directory).is_dir(): raise Exception(\"Cannot access fedpkg repository:\") return",
"BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS",
"+= [\"-a\"] cmd += [package_name, target_path] error_msg = ( f\"Packit",
"None, directory: str = None, stage: bool = False ):",
"FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL",
"SRPM for build, not dist-git repo & ref :return: \"\"\"",
"nowait: bool = False, koji_target: Optional[str] = None, srpm_path: Optional[Path]",
"OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR",
"cmd=cmd, cwd=self.directory, error_message=\"Submission of build to koji failed.\", fail=True, )",
"cmd += [\"--target\", koji_target] if srpm_path: cmd += [\"--srpm\", str(srpm_path)]",
"\"\"\" clone a dist-git repo; this has to be done",
"commands.run_command_remote( cmd=cmd, cwd=self.directory, error_message=\"Submission of build to koji failed.\", fail=True,",
"DEALINGS IN THE # SOFTWARE. from pathlib import Path from",
"CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS",
"IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER",
"def build( self, scratch: bool = False, nowait: bool =",
"CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION",
"None, ): \"\"\" build in koji :param scratch: scratch (temporary)",
"# Permission is hereby granted, free of charge, to any",
"of charge, to any person obtaining a copy # of",
"str(srpm_path)] try: commands.run_command_remote( cmd=cmd, cwd=self.directory, error_message=\"Submission of build to koji",
"fedpkg side, the build is triggered if ( \"watch_tasks() got",
"INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #",
"merge, publish, distribute, sublicense, and/or sell # copies of the",
"koji_target] if srpm_path: cmd += [\"--srpm\", str(srpm_path)] try: commands.run_command_remote( cmd=cmd,",
"fail=True, ) except PackitCommandFailedError as ex: # fail on the",
"# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY",
"list-targets`) :param srpm_path: use selected SRPM for build, not dist-git",
"scratch: scratch (temporary) build or not? :param nowait: False ==",
"NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT",
"triggered if ( \"watch_tasks() got an unexpected keyword argument 'ki_handler'\"",
"\"from Fedora dist-git - this may require SSH keys set",
"NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR",
"self.fas_username = fas_username self.directory = directory self.stage = stage self.fedpkg_exec",
"may require SSH keys set up or \" \"Kerberos ticket",
"build, not dist-git repo & ref :return: \"\"\" cmd =",
"wait for the build to finish :param koji_target: koji target",
"# Copyright (c) 2019 Red Hat, Inc. # Permission is",
"dist-git - this may require SSH keys set up or",
"PackitCommandFailedError as ex: # fail on the fedpkg side, the",
"'fedpkg build' command crashed which is a known issue: \"",
"packit.utils.logging import logger class FedPKG: \"\"\" Part of the code",
"this may require SSH keys set up or \" \"Kerberos",
"LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER",
"which is a known issue: \" \"the build is submitted",
"self.stage = stage self.fedpkg_exec = \"fedpkg-stage\" if stage else \"fedpkg\"",
"\"\"\" cmd = [self.fedpkg_exec, \"build\"] if scratch: cmd.append(\"--scratch\") if nowait:",
":param koji_target: koji target to build in (`koji list-targets`) :param",
"( \"watch_tasks() got an unexpected keyword argument 'ki_handler'\" in ex.stderr_output",
"f\"directory='{self.directory}', \" f\"stage='{self.stage}')\" ) def new_sources(self, sources=\"\", fail=True): if not",
"so, subject to the following conditions: # # The above",
"__init__( self, fas_username: str = None, directory: str = None,",
"AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #",
"build' command crashed which is a known issue: \" \"the",
"= False, koji_target: Optional[str] = None, srpm_path: Optional[Path] = None,",
"( \"FedPKG(\" f\"fas_username='{self.fas_username}', \" f\"directory='{self.directory}', \" f\"stage='{self.stage}')\" ) def new_sources(self,",
"DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF",
"is from release-bot: https://github.com/user-cont/release-bot/blob/master/release_bot/fedora.py \"\"\" def __init__( self, fas_username: str",
"srpm_path: Optional[Path] = None, ): \"\"\" build in koji :param",
"srpm_path: cmd += [\"--srpm\", str(srpm_path)] try: commands.run_command_remote( cmd=cmd, cwd=self.directory, error_message=\"Submission",
"the following conditions: # # The above copyright notice and",
"for the build to finish :param koji_target: koji target to",
"FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE",
"str = None, directory: str = None, stage: bool =",
"cmd = [self.fedpkg_exec] if self.fas_username: cmd += [\"--user\", self.fas_username] cmd",
"FedPKG: \"\"\" Part of the code is from release-bot: https://github.com/user-cont/release-bot/blob/master/release_bot/fedora.py",
"except PackitCommandFailedError as ex: # fail on the fedpkg side,",
"sure that you are authorized to clone repositories \" \"from",
"srpm_path: use selected SRPM for build, not dist-git repo &",
"the Software, and to permit persons to whom the Software",
"directory: str = None, stage: bool = False ): self.fas_username",
"= False): \"\"\" clone a dist-git repo; this has to",
"build in koji :param scratch: scratch (temporary) build or not?",
"return ( \"FedPKG(\" f\"fas_username='{self.fas_username}', \" f\"directory='{self.directory}', \" f\"stage='{self.stage}')\" ) def",
"are authorized to clone repositories \" \"from Fedora dist-git -",
"build is submitted in koji anyway.\" ) logger.debug(ex.stdout_output) else: raise",
"\" f\"stage='{self.stage}')\" ) def new_sources(self, sources=\"\", fail=True): if not Path(self.directory).is_dir():",
"\"watch_tasks() got an unexpected keyword argument 'ki_handler'\" in ex.stderr_output ):",
"\"\"\" build in koji :param scratch: scratch (temporary) build or",
"build or not? :param nowait: False == wait for the",
"cmd = [self.fedpkg_exec, \"build\"] if scratch: cmd.append(\"--scratch\") if nowait: cmd.append(\"--nowait\")",
"the keytab in sandbox \"\"\" cmd = [self.fedpkg_exec] if self.fas_username:",
"FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT",
"failed to clone the repository {package_name}; \" \"please make sure",
"persons to whom the Software is # furnished to do",
"has to be done in current env b/c we don't",
"USE OR OTHER DEALINGS IN THE # SOFTWARE. from pathlib",
"associated documentation files (the \"Software\"), to deal # in the",
"target to build in (`koji list-targets`) :param srpm_path: use selected",
"IN THE # SOFTWARE. from pathlib import Path from typing",
"MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN",
"Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT",
"clone(self, package_name: str, target_path: str, anonymous: bool = False): \"\"\"",
"to any person obtaining a copy # of this software",
"fedpkg repository:\") return commands.run_command_remote( cmd=[self.fedpkg_exec, \"new-sources\", sources], cwd=self.directory, error_message=\"Adding new",
"if anonymous: cmd += [\"-a\"] cmd += [package_name, target_path] error_msg",
"f\"stage='{self.stage}')\" ) def new_sources(self, sources=\"\", fail=True): if not Path(self.directory).is_dir(): raise",
"release-bot: https://github.com/user-cont/release-bot/blob/master/release_bot/fedora.py \"\"\" def __init__( self, fas_username: str = None,",
"fail=fail, ) def build( self, scratch: bool = False, nowait:",
"have the keytab in sandbox \"\"\" cmd = [self.fedpkg_exec] if",
"we can mock utils from packit.utils.logging import logger class FedPKG:",
"build( self, scratch: bool = False, nowait: bool = False,",
"# # Copyright (c) 2019 Red Hat, Inc. # Permission",
"of the Software, and to permit persons to whom the",
"this software and associated documentation files (the \"Software\"), to deal",
"in all # copies or substantial portions of the Software.",
"ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN",
"# SOFTWARE. from pathlib import Path from typing import Optional",
"new_sources(self, sources=\"\", fail=True): if not Path(self.directory).is_dir(): raise Exception(\"Cannot access fedpkg",
"this has to be done in current env b/c we",
"BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,",
"THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from",
"shall be included in all # copies or substantial portions",
"to clone repositories \" \"from Fedora dist-git - this may",
"Software is # furnished to do so, subject to the",
"PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR",
"whom the Software is # furnished to do so, subject",
"to koji failed.\", fail=True, ) except PackitCommandFailedError as ex: #",
"= None, ): \"\"\" build in koji :param scratch: scratch",
"sublicense, and/or sell # copies of the Software, and to",
"Optional from packit.exceptions import PackitCommandFailedError from packit.utils import commands #",
"if self.fas_username: cmd += [\"--user\", self.fas_username] cmd += [\"-q\", \"clone\"]",
"+= [\"--user\", self.fas_username] cmd += [\"-q\", \"clone\"] if anonymous: cmd",
"that you are authorized to clone repositories \" \"from Fedora",
"THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY",
"or not? :param nowait: False == wait for the build",
"substantial portions of the Software. # # THE SOFTWARE IS",
"of the code is from release-bot: https://github.com/user-cont/release-bot/blob/master/release_bot/fedora.py \"\"\" def __init__(",
"as ex: # fail on the fedpkg side, the build",
"notice shall be included in all # copies or substantial",
"for build, not dist-git repo & ref :return: \"\"\" cmd",
"do so, subject to the following conditions: # # The",
"LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,",
"WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING",
"(temporary) build or not? :param nowait: False == wait for",
"in the Software without restriction, including without limitation the rights",
"crashed which is a known issue: \" \"the build is",
"clone a dist-git repo; this has to be done in",
"bool = False, nowait: bool = False, koji_target: Optional[str] =",
"# furnished to do so, subject to the following conditions:",
"\"please make sure that you are authorized to clone repositories",
"any person obtaining a copy # of this software and",
"ARISING FROM, # OUT OF OR IN CONNECTION WITH THE",
"SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,",
"KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO",
"known issue: \" \"the build is submitted in koji anyway.\"",
"OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES",
"restriction, including without limitation the rights # to use, copy,",
"from packit.utils.logging import logger class FedPKG: \"\"\" Part of the",
"access fedpkg repository:\") return commands.run_command_remote( cmd=[self.fedpkg_exec, \"new-sources\", sources], cwd=self.directory, error_message=\"Adding",
"class FedPKG: \"\"\" Part of the code is from release-bot:",
"got an unexpected keyword argument 'ki_handler'\" in ex.stderr_output ): logger.info(",
"in ex.stderr_output ): logger.info( \"The 'fedpkg build' command crashed which",
"False, nowait: bool = False, koji_target: Optional[str] = None, srpm_path:",
"including without limitation the rights # to use, copy, modify,",
"import PackitCommandFailedError from packit.utils import commands # so we can",
"copyright notice and this permission notice shall be included in",
"error_msg = ( f\"Packit failed to clone the repository {package_name};",
"ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED",
"free of charge, to any person obtaining a copy #",
"files (the \"Software\"), to deal # in the Software without",
"nowait: False == wait for the build to finish :param",
"Path(self.directory).is_dir(): raise Exception(\"Cannot access fedpkg repository:\") return commands.run_command_remote( cmd=[self.fedpkg_exec, \"new-sources\",",
"be done in current env b/c we don't have the",
"OR OTHER DEALINGS IN THE # SOFTWARE. from pathlib import",
"PackitCommandFailedError from packit.utils import commands # so we can mock",
"+= [\"--srpm\", str(srpm_path)] try: commands.run_command_remote( cmd=cmd, cwd=self.directory, error_message=\"Submission of build",
"MIT License # # Copyright (c) 2019 Red Hat, Inc.",
"argument 'ki_handler'\" in ex.stderr_output ): logger.info( \"The 'fedpkg build' command",
"if ( \"watch_tasks() got an unexpected keyword argument 'ki_handler'\" in",
"current env b/c we don't have the keytab in sandbox",
"return commands.run_command_remote( cmd=[self.fedpkg_exec, \"new-sources\", sources], cwd=self.directory, error_message=\"Adding new sources failed:\",",
"IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,",
"from typing import Optional from packit.exceptions import PackitCommandFailedError from packit.utils",
"self, scratch: bool = False, nowait: bool = False, koji_target:",
"of the Software. # # THE SOFTWARE IS PROVIDED \"AS",
"sources failed:\", fail=fail, ) def build( self, scratch: bool =",
"anonymous: cmd += [\"-a\"] cmd += [package_name, target_path] error_msg =",
"target_path: str, anonymous: bool = False): \"\"\" clone a dist-git",
"[\"--user\", self.fas_username] cmd += [\"-q\", \"clone\"] if anonymous: cmd +=",
"koji failed.\", fail=True, ) except PackitCommandFailedError as ex: # fail",
"if nowait: cmd.append(\"--nowait\") if koji_target: cmd += [\"--target\", koji_target] if",
"cmd=[self.fedpkg_exec, \"new-sources\", sources], cwd=self.directory, error_message=\"Adding new sources failed:\", fail=fail, )",
"\"the build is submitted in koji anyway.\" ) logger.debug(ex.stdout_output) else:",
"\" \"the build is submitted in koji anyway.\" ) logger.debug(ex.stdout_output)",
"try: commands.run_command_remote( cmd=cmd, cwd=self.directory, error_message=\"Submission of build to koji failed.\",",
"== wait for the build to finish :param koji_target: koji",
"Red Hat, Inc. # Permission is hereby granted, free of",
"clone the repository {package_name}; \" \"please make sure that you",
"of this software and associated documentation files (the \"Software\"), to",
"OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR",
"OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE",
"build to finish :param koji_target: koji target to build in",
"EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE",
"[self.fedpkg_exec, \"build\"] if scratch: cmd.append(\"--scratch\") if nowait: cmd.append(\"--nowait\") if koji_target:",
"to clone the repository {package_name}; \" \"please make sure that",
"side, the build is triggered if ( \"watch_tasks() got an",
"https://github.com/user-cont/release-bot/blob/master/release_bot/fedora.py \"\"\" def __init__( self, fas_username: str = None, directory:",
"cwd=self.directory, error_message=\"Adding new sources failed:\", fail=fail, ) def build( self,",
"# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,",
"PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS",
"(the \"Software\"), to deal # in the Software without restriction,",
"= \"fedpkg-stage\" if stage else \"fedpkg\" def __repr__(self): return (",
"= None, directory: str = None, stage: bool = False",
"& ref :return: \"\"\" cmd = [self.fedpkg_exec, \"build\"] if scratch:",
"WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND",
"if koji_target: cmd += [\"--target\", koji_target] if srpm_path: cmd +=",
"import commands # so we can mock utils from packit.utils.logging",
"charge, to any person obtaining a copy # of this",
"permit persons to whom the Software is # furnished to",
"pathlib import Path from typing import Optional from packit.exceptions import",
"\" \"please make sure that you are authorized to clone",
"THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE",
"in sandbox \"\"\" cmd = [self.fedpkg_exec] if self.fas_username: cmd +=",
"the Software is # furnished to do so, subject to",
"above copyright notice and this permission notice shall be included",
"IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,",
"not dist-git repo & ref :return: \"\"\" cmd = [self.fedpkg_exec,",
"A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE",
"limitation the rights # to use, copy, modify, merge, publish,",
"in koji anyway.\" ) logger.debug(ex.stdout_output) else: raise def clone(self, package_name:",
"PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #",
"without limitation the rights # to use, copy, modify, merge,",
"anyway.\" ) logger.debug(ex.stdout_output) else: raise def clone(self, package_name: str, target_path:",
"\"\"\" Part of the code is from release-bot: https://github.com/user-cont/release-bot/blob/master/release_bot/fedora.py \"\"\"",
"the build is triggered if ( \"watch_tasks() got an unexpected",
"self.fas_username: cmd += [\"--user\", self.fas_username] cmd += [\"-q\", \"clone\"] if",
"# copies or substantial portions of the Software. # #",
"EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE",
"koji :param scratch: scratch (temporary) build or not? :param nowait:",
"\"\"\" def __init__( self, fas_username: str = None, directory: str",
"# in the Software without restriction, including without limitation the",
"= False, nowait: bool = False, koji_target: Optional[str] = None,",
"documentation files (the \"Software\"), to deal # in the Software",
"False == wait for the build to finish :param koji_target:",
"can mock utils from packit.utils.logging import logger class FedPKG: \"\"\"",
"raise def clone(self, package_name: str, target_path: str, anonymous: bool =",
"copies or substantial portions of the Software. # # THE",
"koji_target: Optional[str] = None, srpm_path: Optional[Path] = None, ): \"\"\"",
"- this may require SSH keys set up or \"",
"Optional[Path] = None, ): \"\"\" build in koji :param scratch:",
"\"build\"] if scratch: cmd.append(\"--scratch\") if nowait: cmd.append(\"--nowait\") if koji_target: cmd",
"= directory self.stage = stage self.fedpkg_exec = \"fedpkg-stage\" if stage",
"self, fas_username: str = None, directory: str = None, stage:",
"if not Path(self.directory).is_dir(): raise Exception(\"Cannot access fedpkg repository:\") return commands.run_command_remote(",
"OTHER DEALINGS IN THE # SOFTWARE. from pathlib import Path",
"env b/c we don't have the keytab in sandbox \"\"\"",
"sources], cwd=self.directory, error_message=\"Adding new sources failed:\", fail=fail, ) def build(",
"cmd += [\"-q\", \"clone\"] if anonymous: cmd += [\"-a\"] cmd",
"ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT",
"SSH keys set up or \" \"Kerberos ticket being active.\"",
"sell # copies of the Software, and to permit persons",
"OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT",
"else: raise def clone(self, package_name: str, target_path: str, anonymous: bool",
"b/c we don't have the keytab in sandbox \"\"\" cmd",
"the repository {package_name}; \" \"please make sure that you are",
"code is from release-bot: https://github.com/user-cont/release-bot/blob/master/release_bot/fedora.py \"\"\" def __init__( self, fas_username:",
"\" \"from Fedora dist-git - this may require SSH keys",
"OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,",
"publish, distribute, sublicense, and/or sell # copies of the Software,",
"def new_sources(self, sources=\"\", fail=True): if not Path(self.directory).is_dir(): raise Exception(\"Cannot access",
"to the following conditions: # # The above copyright notice",
"and this permission notice shall be included in all #",
"finish :param koji_target: koji target to build in (`koji list-targets`)",
"\"\"\" cmd = [self.fedpkg_exec] if self.fas_username: cmd += [\"--user\", self.fas_username]",
"in (`koji list-targets`) :param srpm_path: use selected SRPM for build,",
"ref :return: \"\"\" cmd = [self.fedpkg_exec, \"build\"] if scratch: cmd.append(\"--scratch\")",
"from release-bot: https://github.com/user-cont/release-bot/blob/master/release_bot/fedora.py \"\"\" def __init__( self, fas_username: str =",
"modify, merge, publish, distribute, sublicense, and/or sell # copies of",
"def clone(self, package_name: str, target_path: str, anonymous: bool = False):",
"a known issue: \" \"the build is submitted in koji",
"OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION",
"IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,",
"fas_username: str = None, directory: str = None, stage: bool",
"Software, and to permit persons to whom the Software is",
"# MIT License # # Copyright (c) 2019 Red Hat,",
"# to use, copy, modify, merge, publish, distribute, sublicense, and/or",
":param srpm_path: use selected SRPM for build, not dist-git repo",
"from packit.exceptions import PackitCommandFailedError from packit.utils import commands # so",
"OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT",
"repository:\") return commands.run_command_remote( cmd=[self.fedpkg_exec, \"new-sources\", sources], cwd=self.directory, error_message=\"Adding new sources",
"= ( f\"Packit failed to clone the repository {package_name}; \"",
"dist-git repo & ref :return: \"\"\" cmd = [self.fedpkg_exec, \"build\"]",
"\"Software\"), to deal # in the Software without restriction, including",
"ex: # fail on the fedpkg side, the build is",
"if stage else \"fedpkg\" def __repr__(self): return ( \"FedPKG(\" f\"fas_username='{self.fas_username}',",
"# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR",
"require SSH keys set up or \" \"Kerberos ticket being",
"an unexpected keyword argument 'ki_handler'\" in ex.stderr_output ): logger.info( \"The",
"new sources failed:\", fail=fail, ) def build( self, scratch: bool",
"= None, srpm_path: Optional[Path] = None, ): \"\"\" build in",
"COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER",
"to be done in current env b/c we don't have",
"): \"\"\" build in koji :param scratch: scratch (temporary) build",
"( f\"Packit failed to clone the repository {package_name}; \" \"please",
"): self.fas_username = fas_username self.directory = directory self.stage = stage",
"to finish :param koji_target: koji target to build in (`koji",
"# copies of the Software, and to permit persons to",
"cmd += [\"--user\", self.fas_username] cmd += [\"-q\", \"clone\"] if anonymous:",
"logger.debug(ex.stdout_output) else: raise def clone(self, package_name: str, target_path: str, anonymous:",
"granted, free of charge, to any person obtaining a copy",
"obtaining a copy # of this software and associated documentation",
"we don't have the keytab in sandbox \"\"\" cmd =",
"keyword argument 'ki_handler'\" in ex.stderr_output ): logger.info( \"The 'fedpkg build'",
"= None, stage: bool = False ): self.fas_username = fas_username",
"TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN",
"is # furnished to do so, subject to the following",
"to whom the Software is # furnished to do so,",
"build in (`koji list-targets`) :param srpm_path: use selected SRPM for",
"copy # of this software and associated documentation files (the",
"done in current env b/c we don't have the keytab",
"= stage self.fedpkg_exec = \"fedpkg-stage\" if stage else \"fedpkg\" def",
"THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY",
"OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE.",
"Permission is hereby granted, free of charge, to any person",
"'ki_handler'\" in ex.stderr_output ): logger.info( \"The 'fedpkg build' command crashed",
"you are authorized to clone repositories \" \"from Fedora dist-git",
"cmd += [\"-a\"] cmd += [package_name, target_path] error_msg = (",
"Fedora dist-git - this may require SSH keys set up",
"The above copyright notice and this permission notice shall be",
"THE # SOFTWARE. from pathlib import Path from typing import",
"issue: \" \"the build is submitted in koji anyway.\" )",
"[\"-a\"] cmd += [package_name, target_path] error_msg = ( f\"Packit failed",
"if srpm_path: cmd += [\"--srpm\", str(srpm_path)] try: commands.run_command_remote( cmd=cmd, cwd=self.directory,",
"\"The 'fedpkg build' command crashed which is a known issue:",
"target_path] error_msg = ( f\"Packit failed to clone the repository",
"WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT",
"= fas_username self.directory = directory self.stage = stage self.fedpkg_exec =",
"repository {package_name}; \" \"please make sure that you are authorized",
"is triggered if ( \"watch_tasks() got an unexpected keyword argument",
"AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES",
"Part of the code is from release-bot: https://github.com/user-cont/release-bot/blob/master/release_bot/fedora.py \"\"\" def",
"utils from packit.utils.logging import logger class FedPKG: \"\"\" Part of",
"to permit persons to whom the Software is # furnished",
"(c) 2019 Red Hat, Inc. # Permission is hereby granted,",
"WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING"
] |
[
"filled() method #a = MaskedArray(np.arange(10), np.arange(10)%3) #b = MaskedArray(np.arange(10.) +",
"this # strategy does not give a \"filled\" method. Probably",
"# strategy does not give a \"filled\" method. Probably to",
"as np # Tests for Masked ArrayCollections. # # First",
"#c = ArrayCollection([('age', a), ('weight', b)]) #print(repr(c)) #c['age'] += 100",
"Masked ArrayCollections. # # First try: Simply make an arraycollection",
"method. Probably to get a masked # ArrayCollection we should",
"= MaskedArrayCollection([('age', a), ('weight', b)]) #print(repr(c)) #c['age'] += 100 #print(repr(c))",
"= MaskedArray(np.arange(10.) + 13, np.arange(10)%2) #c = ArrayCollection([('age', a), ('weight',",
"get a masked # ArrayCollection we should really subclass ArrayCollection",
"np.arange(10)%3) #b = MaskedArray(np.arange(10.) + 13, np.arange(10)%2) #c = ArrayCollection([('age',",
"a # fill_value and a filled() method #a = MaskedArray(np.arange(10),",
"an arraycollection of MaskedArrays. Downside: this # strategy does not",
"import ArrayCollection from ndarray_ducktypes.MaskedArray import MaskedArray from ndarray_ducktypes.MaskedArrayCollection import MaskedArrayCollection",
"ndarray_ducktypes.ArrayCollection import ArrayCollection from ndarray_ducktypes.MaskedArray import MaskedArray from ndarray_ducktypes.MaskedArrayCollection import",
"#c['age'] += 100 #print(repr(c)) ## second try: Subclass of ArrayCollection",
"ArrayCollections. # # First try: Simply make an arraycollection of",
"#!/usr/bin/env python from ndarray_ducktypes.ArrayCollection import ArrayCollection from ndarray_ducktypes.MaskedArray import MaskedArray",
"ArrayCollection #c = MaskedArrayCollection([('age', a), ('weight', b)]) #print(repr(c)) #c['age'] +=",
"#a = MaskedArray(np.arange(10), np.arange(10)%3) #b = MaskedArray(np.arange(10.) + 13, np.arange(10)%2)",
"('weight', b)]) #print(repr(c)) #c['age'] += 100 #print(repr(c)) ## second try:",
"ArrayCollection we should really subclass ArrayCollection to have a #",
"ArrayCollection from ndarray_ducktypes.MaskedArray import MaskedArray from ndarray_ducktypes.MaskedArrayCollection import MaskedArrayCollection import",
"strategy does not give a \"filled\" method. Probably to get",
"#print(repr(c)) #c['age'] += 100 #print(repr(c)) ## second try: Subclass of",
"= ArrayCollection([('age', a), ('weight', b)]) #print(repr(c)) #c['age'] += 100 #print(repr(c))",
"make an arraycollection of MaskedArrays. Downside: this # strategy does",
"MaskedArray(np.arange(10.) + 13, np.arange(10)%2) #c = ArrayCollection([('age', a), ('weight', b)])",
"ArrayCollection([('age', a), ('weight', b)]) #print(repr(c)) #c['age'] += 100 #print(repr(c)) ##",
"MaskedArray(np.arange(10), np.arange(10)%3) #b = MaskedArray(np.arange(10.) + 13, np.arange(10)%2) #c =",
"np # Tests for Masked ArrayCollections. # # First try:",
"really subclass ArrayCollection to have a # fill_value and a",
"fill_value and a filled() method #a = MaskedArray(np.arange(10), np.arange(10)%3) #b",
"try: Subclass of ArrayCollection #c = MaskedArrayCollection([('age', a), ('weight', b)])",
"First try: Simply make an arraycollection of MaskedArrays. Downside: this",
"should really subclass ArrayCollection to have a # fill_value and",
"try: Simply make an arraycollection of MaskedArrays. Downside: this #",
"Probably to get a masked # ArrayCollection we should really",
"from ndarray_ducktypes.MaskedArrayCollection import MaskedArrayCollection import numpy as np # Tests",
"ndarray_ducktypes.MaskedArray import MaskedArray from ndarray_ducktypes.MaskedArrayCollection import MaskedArrayCollection import numpy as",
"# ArrayCollection we should really subclass ArrayCollection to have a",
"Subclass of ArrayCollection #c = MaskedArrayCollection([('age', a), ('weight', b)]) #print(repr(c))",
"ndarray_ducktypes.MaskedArrayCollection import MaskedArrayCollection import numpy as np # Tests for",
"does not give a \"filled\" method. Probably to get a",
"Tests for Masked ArrayCollections. # # First try: Simply make",
"to have a # fill_value and a filled() method #a",
"= MaskedArray(np.arange(10), np.arange(10)%3) #b = MaskedArray(np.arange(10.) + 13, np.arange(10)%2) #c",
"to get a masked # ArrayCollection we should really subclass",
"# # First try: Simply make an arraycollection of MaskedArrays.",
"## second try: Subclass of ArrayCollection #c = MaskedArrayCollection([('age', a),",
"numpy as np # Tests for Masked ArrayCollections. # #",
"+ 13, np.arange(10)%2) #c = ArrayCollection([('age', a), ('weight', b)]) #print(repr(c))",
"arraycollection of MaskedArrays. Downside: this # strategy does not give",
"# fill_value and a filled() method #a = MaskedArray(np.arange(10), np.arange(10)%3)",
"np.arange(10)%2) #c = ArrayCollection([('age', a), ('weight', b)]) #print(repr(c)) #c['age'] +=",
"python from ndarray_ducktypes.ArrayCollection import ArrayCollection from ndarray_ducktypes.MaskedArray import MaskedArray from",
"MaskedArrays. Downside: this # strategy does not give a \"filled\"",
"# First try: Simply make an arraycollection of MaskedArrays. Downside:",
"a \"filled\" method. Probably to get a masked # ArrayCollection",
"b)]) #print(repr(c)) #c['age'] += 100 #print(repr(c)) ## second try: Subclass",
"we should really subclass ArrayCollection to have a # fill_value",
"#b = MaskedArray(np.arange(10.) + 13, np.arange(10)%2) #c = ArrayCollection([('age', a),",
"ArrayCollection to have a # fill_value and a filled() method",
"Simply make an arraycollection of MaskedArrays. Downside: this # strategy",
"a masked # ArrayCollection we should really subclass ArrayCollection to",
"give a \"filled\" method. Probably to get a masked #",
"of MaskedArrays. Downside: this # strategy does not give a",
"not give a \"filled\" method. Probably to get a masked",
"a filled() method #a = MaskedArray(np.arange(10), np.arange(10)%3) #b = MaskedArray(np.arange(10.)",
"and a filled() method #a = MaskedArray(np.arange(10), np.arange(10)%3) #b =",
"# Tests for Masked ArrayCollections. # # First try: Simply",
"+= 100 #print(repr(c)) ## second try: Subclass of ArrayCollection #c",
"for Masked ArrayCollections. # # First try: Simply make an",
"100 #print(repr(c)) ## second try: Subclass of ArrayCollection #c =",
"MaskedArrayCollection([('age', a), ('weight', b)]) #print(repr(c)) #c['age'] += 100 #print(repr(c)) #print(repr(c.filled()))",
"import numpy as np # Tests for Masked ArrayCollections. #",
"13, np.arange(10)%2) #c = ArrayCollection([('age', a), ('weight', b)]) #print(repr(c)) #c['age']",
"\"filled\" method. Probably to get a masked # ArrayCollection we",
"Downside: this # strategy does not give a \"filled\" method.",
"from ndarray_ducktypes.ArrayCollection import ArrayCollection from ndarray_ducktypes.MaskedArray import MaskedArray from ndarray_ducktypes.MaskedArrayCollection",
"of ArrayCollection #c = MaskedArrayCollection([('age', a), ('weight', b)]) #print(repr(c)) #c['age']",
"import MaskedArray from ndarray_ducktypes.MaskedArrayCollection import MaskedArrayCollection import numpy as np",
"#print(repr(c)) ## second try: Subclass of ArrayCollection #c = MaskedArrayCollection([('age',",
"MaskedArrayCollection import numpy as np # Tests for Masked ArrayCollections.",
"MaskedArray from ndarray_ducktypes.MaskedArrayCollection import MaskedArrayCollection import numpy as np #",
"second try: Subclass of ArrayCollection #c = MaskedArrayCollection([('age', a), ('weight',",
"a), ('weight', b)]) #print(repr(c)) #c['age'] += 100 #print(repr(c)) ## second",
"from ndarray_ducktypes.MaskedArray import MaskedArray from ndarray_ducktypes.MaskedArrayCollection import MaskedArrayCollection import numpy",
"subclass ArrayCollection to have a # fill_value and a filled()",
"#c = MaskedArrayCollection([('age', a), ('weight', b)]) #print(repr(c)) #c['age'] += 100",
"have a # fill_value and a filled() method #a =",
"import MaskedArrayCollection import numpy as np # Tests for Masked",
"method #a = MaskedArray(np.arange(10), np.arange(10)%3) #b = MaskedArray(np.arange(10.) + 13,",
"masked # ArrayCollection we should really subclass ArrayCollection to have"
] |
[
"StructuredNode, StringProperty, JSONProperty, \\ Relationship, IntegerProperty import numpy as np",
"str(self.label): return f\"{str(self.order_id).zfill(leading)}: \" \\ + f\"[{self.label}] {self.short()}...\" else: return",
"return f\"\"\" <h1>Фрагмент: {self.order_id} </h1> <table border=\"1\" width=100%> <caption> Информация",
"<tr> <th>Количество символов</th> <td>{self.character_num()}</td> </tr> <tr> <th>Количество слов</th> <td>{self.words_num()}</td> </tr>",
"1 if str(self.order_id) != str(self.label): return f\"{str(self.order_id).zfill(leading)}: \" \\ +",
"<gh_stars>0 from neomodel import StructuredNode, StringProperty, JSONProperty, \\ Relationship, IntegerProperty",
"['TextNode'] class TextNode(StructuredNode): order_id = IntegerProperty(required=True, unique_index=True) label = StringProperty(required=True)",
"JSONProperty() link = Relationship('TextNode', 'ALG', model=TextRelation) def short(self): res =",
"label = StringProperty(required=True) text = StringProperty(required=True) alg_results = JSONProperty() link",
"str(self.order_id) != str(self.label): return f\"{str(self.order_id).zfill(leading)}: \" \\ + f\"[{self.label}] {self.short()}...\"",
"TextRelation __all__ = ['TextNode'] class TextNode(StructuredNode): order_id = IntegerProperty(required=True, unique_index=True)",
"</h1> <table border=\"1\" width=100%> <caption> Информация о вершине </caption> <tr>",
"import re from models.text_relation import TextRelation __all__ = ['TextNode'] class",
"вершине </caption> <tr> <th>Количество символов</th> <td>{self.character_num()}</td> </tr> <tr> <th>Количество слов</th>",
"\"\"\" def preview(self, frag_num=0): leading = 3 if frag_num >",
"</tr> <tr> <th>Количество предложений</th> <td>{self.sentences_num()}</td> </tr> <tr> <th>Количество связей</th> <td>{len(self.link)}</td>",
"<td>{len(self.link)}</td> </tr> </table> \"\"\" def preview(self, frag_num=0): leading = 3",
"0: leading = int(np.floor(np.log10(frag_num))) + 1 if str(self.order_id) != str(self.label):",
"sentences_num(self): return len([s for s in self.text.split('.') if len(s) >",
"\\ Relationship, IntegerProperty import numpy as np import re from",
"'ALG', model=TextRelation) def short(self): res = ''.join([word.strip() + ' '",
"<th>Количество связей</th> <td>{len(self.link)}</td> </tr> </table> \"\"\" def preview(self, frag_num=0): leading",
"IntegerProperty import numpy as np import re from models.text_relation import",
"> 0: leading = int(np.floor(np.log10(frag_num))) + 1 if str(self.order_id) !=",
"import StructuredNode, StringProperty, JSONProperty, \\ Relationship, IntegerProperty import numpy as",
"f\"[{self.label}] {self.short()}...\" return f\"[{self.label}] {self.short()}...\" def words_num(self): return len(self.text.split()) def",
"unique_index=True) label = StringProperty(required=True) text = StringProperty(required=True) alg_results = JSONProperty()",
"frag_num > 0: leading = int(np.floor(np.log10(frag_num))) + 1 if str(self.order_id)",
"frag_num=0): leading = 3 if frag_num > 0: leading =",
"self.text, 5)[:5]]) return res def describe(self): return f\"\"\" <h1>Фрагмент: {self.order_id}",
"return len(self.text) def sentences_num(self): return len([s for s in self.text.split('.')",
"{self.order_id} </h1> <table border=\"1\" width=100%> <caption> Информация о вершине </caption>",
"return len(self.text.split()) def character_num(self): return len(self.text) def sentences_num(self): return len([s",
"alg_results = JSONProperty() link = Relationship('TextNode', 'ALG', model=TextRelation) def short(self):",
"<tr> <th>Количество предложений</th> <td>{self.sentences_num()}</td> </tr> <tr> <th>Количество связей</th> <td>{len(self.link)}</td> </tr>",
"def sentences_num(self): return len([s for s in self.text.split('.') if len(s)",
"+ f\"[{self.label}] {self.short()}...\" else: return f\"{str(self.order_id).zfill(leading)}: \" \\ + f\"[{self.label}]",
"= Relationship('TextNode', 'ALG', model=TextRelation) def short(self): res = ''.join([word.strip() +",
"len(self.text.split()) def character_num(self): return len(self.text) def sentences_num(self): return len([s for",
"{self.short()}...\" return f\"[{self.label}] {self.short()}...\" def words_num(self): return len(self.text.split()) def character_num(self):",
"border=\"1\" width=100%> <caption> Информация о вершине </caption> <tr> <th>Количество символов</th>",
"text = StringProperty(required=True) alg_results = JSONProperty() link = Relationship('TextNode', 'ALG',",
"StringProperty(required=True) alg_results = JSONProperty() link = Relationship('TextNode', 'ALG', model=TextRelation) def",
"{self.short()}...\" def words_num(self): return len(self.text.split()) def character_num(self): return len(self.text) def",
"= ['TextNode'] class TextNode(StructuredNode): order_id = IntegerProperty(required=True, unique_index=True) label =",
"<tr> <th>Количество связей</th> <td>{len(self.link)}</td> </tr> </table> \"\"\" def preview(self, frag_num=0):",
"</tr> <tr> <th>Количество слов</th> <td>{self.words_num()}</td> </tr> <tr> <th>Количество предложений</th> <td>{self.sentences_num()}</td>",
"Relationship('TextNode', 'ALG', model=TextRelation) def short(self): res = ''.join([word.strip() + '",
"__all__ = ['TextNode'] class TextNode(StructuredNode): order_id = IntegerProperty(required=True, unique_index=True) label",
"''.join([word.strip() + ' ' for word in re.split(r'[\\n ]', self.text,",
"int(np.floor(np.log10(frag_num))) + 1 if str(self.order_id) != str(self.label): return f\"{str(self.order_id).zfill(leading)}: \"",
"preview(self, frag_num=0): leading = 3 if frag_num > 0: leading",
"import TextRelation __all__ = ['TextNode'] class TextNode(StructuredNode): order_id = IntegerProperty(required=True,",
"связей</th> <td>{len(self.link)}</td> </tr> </table> \"\"\" def preview(self, frag_num=0): leading =",
"</caption> <tr> <th>Количество символов</th> <td>{self.character_num()}</td> </tr> <tr> <th>Количество слов</th> <td>{self.words_num()}</td>",
"\\ + f\"[{self.label}] {self.short()}...\" else: return f\"{str(self.order_id).zfill(leading)}: \" \\ +",
"StringProperty, JSONProperty, \\ Relationship, IntegerProperty import numpy as np import",
"<table border=\"1\" width=100%> <caption> Информация о вершине </caption> <tr> <th>Количество",
"return f\"{str(self.order_id).zfill(leading)}: \" \\ + f\"[{self.label}] {self.short()}...\" else: return f\"{str(self.order_id).zfill(leading)}:",
"import numpy as np import re from models.text_relation import TextRelation",
"<th>Количество символов</th> <td>{self.character_num()}</td> </tr> <tr> <th>Количество слов</th> <td>{self.words_num()}</td> </tr> <tr>",
"else: return f\"{str(self.order_id).zfill(leading)}: \" \\ + f\"[{self.label}] {self.short()}...\" return f\"[{self.label}]",
"def character_num(self): return len(self.text) def sentences_num(self): return len([s for s",
"+ 1 if str(self.order_id) != str(self.label): return f\"{str(self.order_id).zfill(leading)}: \" \\",
"= StringProperty(required=True) text = StringProperty(required=True) alg_results = JSONProperty() link =",
"= ''.join([word.strip() + ' ' for word in re.split(r'[\\n ]',",
"слов</th> <td>{self.words_num()}</td> </tr> <tr> <th>Количество предложений</th> <td>{self.sentences_num()}</td> </tr> <tr> <th>Количество",
"StringProperty(required=True) text = StringProperty(required=True) alg_results = JSONProperty() link = Relationship('TextNode',",
"words_num(self): return len(self.text.split()) def character_num(self): return len(self.text) def sentences_num(self): return",
"+ ' ' for word in re.split(r'[\\n ]', self.text, 5)[:5]])",
"<caption> Информация о вершине </caption> <tr> <th>Количество символов</th> <td>{self.character_num()}</td> </tr>",
"+ f\"[{self.label}] {self.short()}...\" return f\"[{self.label}] {self.short()}...\" def words_num(self): return len(self.text.split())",
"if frag_num > 0: leading = int(np.floor(np.log10(frag_num))) + 1 if",
"return len([s for s in self.text.split('.') if len(s) > 2])",
"character_num(self): return len(self.text) def sentences_num(self): return len([s for s in",
"def describe(self): return f\"\"\" <h1>Фрагмент: {self.order_id} </h1> <table border=\"1\" width=100%>",
"numpy as np import re from models.text_relation import TextRelation __all__",
"for word in re.split(r'[\\n ]', self.text, 5)[:5]]) return res def",
"</tr> <tr> <th>Количество связей</th> <td>{len(self.link)}</td> </tr> </table> \"\"\" def preview(self,",
"<td>{self.character_num()}</td> </tr> <tr> <th>Количество слов</th> <td>{self.words_num()}</td> </tr> <tr> <th>Количество предложений</th>",
"from neomodel import StructuredNode, StringProperty, JSONProperty, \\ Relationship, IntegerProperty import",
"in re.split(r'[\\n ]', self.text, 5)[:5]]) return res def describe(self): return",
"f\"{str(self.order_id).zfill(leading)}: \" \\ + f\"[{self.label}] {self.short()}...\" return f\"[{self.label}] {self.short()}...\" def",
"short(self): res = ''.join([word.strip() + ' ' for word in",
"<td>{self.sentences_num()}</td> </tr> <tr> <th>Количество связей</th> <td>{len(self.link)}</td> </tr> </table> \"\"\" def",
"model=TextRelation) def short(self): res = ''.join([word.strip() + ' ' for",
"f\"\"\" <h1>Фрагмент: {self.order_id} </h1> <table border=\"1\" width=100%> <caption> Информация о",
"leading = int(np.floor(np.log10(frag_num))) + 1 if str(self.order_id) != str(self.label): return",
"re from models.text_relation import TextRelation __all__ = ['TextNode'] class TextNode(StructuredNode):",
"\" \\ + f\"[{self.label}] {self.short()}...\" return f\"[{self.label}] {self.short()}...\" def words_num(self):",
"link = Relationship('TextNode', 'ALG', model=TextRelation) def short(self): res = ''.join([word.strip()",
"<td>{self.words_num()}</td> </tr> <tr> <th>Количество предложений</th> <td>{self.sentences_num()}</td> </tr> <tr> <th>Количество связей</th>",
"символов</th> <td>{self.character_num()}</td> </tr> <tr> <th>Количество слов</th> <td>{self.words_num()}</td> </tr> <tr> <th>Количество",
"def preview(self, frag_num=0): leading = 3 if frag_num > 0:",
"!= str(self.label): return f\"{str(self.order_id).zfill(leading)}: \" \\ + f\"[{self.label}] {self.short()}...\" else:",
"<h1>Фрагмент: {self.order_id} </h1> <table border=\"1\" width=100%> <caption> Информация о вершине",
"np import re from models.text_relation import TextRelation __all__ = ['TextNode']",
"= IntegerProperty(required=True, unique_index=True) label = StringProperty(required=True) text = StringProperty(required=True) alg_results",
"def words_num(self): return len(self.text.split()) def character_num(self): return len(self.text) def sentences_num(self):",
"IntegerProperty(required=True, unique_index=True) label = StringProperty(required=True) text = StringProperty(required=True) alg_results =",
"if str(self.order_id) != str(self.label): return f\"{str(self.order_id).zfill(leading)}: \" \\ + f\"[{self.label}]",
"= JSONProperty() link = Relationship('TextNode', 'ALG', model=TextRelation) def short(self): res",
"def short(self): res = ''.join([word.strip() + ' ' for word",
"]', self.text, 5)[:5]]) return res def describe(self): return f\"\"\" <h1>Фрагмент:",
"JSONProperty, \\ Relationship, IntegerProperty import numpy as np import re",
"order_id = IntegerProperty(required=True, unique_index=True) label = StringProperty(required=True) text = StringProperty(required=True)",
"word in re.split(r'[\\n ]', self.text, 5)[:5]]) return res def describe(self):",
"re.split(r'[\\n ]', self.text, 5)[:5]]) return res def describe(self): return f\"\"\"",
"= 3 if frag_num > 0: leading = int(np.floor(np.log10(frag_num))) +",
"= int(np.floor(np.log10(frag_num))) + 1 if str(self.order_id) != str(self.label): return f\"{str(self.order_id).zfill(leading)}:",
"<th>Количество слов</th> <td>{self.words_num()}</td> </tr> <tr> <th>Количество предложений</th> <td>{self.sentences_num()}</td> </tr> <tr>",
"res = ''.join([word.strip() + ' ' for word in re.split(r'[\\n",
"</tr> </table> \"\"\" def preview(self, frag_num=0): leading = 3 if",
"\" \\ + f\"[{self.label}] {self.short()}...\" else: return f\"{str(self.order_id).zfill(leading)}: \" \\",
"f\"[{self.label}] {self.short()}...\" else: return f\"{str(self.order_id).zfill(leading)}: \" \\ + f\"[{self.label}] {self.short()}...\"",
"as np import re from models.text_relation import TextRelation __all__ =",
"class TextNode(StructuredNode): order_id = IntegerProperty(required=True, unique_index=True) label = StringProperty(required=True) text",
"= StringProperty(required=True) alg_results = JSONProperty() link = Relationship('TextNode', 'ALG', model=TextRelation)",
"f\"{str(self.order_id).zfill(leading)}: \" \\ + f\"[{self.label}] {self.short()}...\" else: return f\"{str(self.order_id).zfill(leading)}: \"",
"Relationship, IntegerProperty import numpy as np import re from models.text_relation",
"Информация о вершине </caption> <tr> <th>Количество символов</th> <td>{self.character_num()}</td> </tr> <tr>",
"from models.text_relation import TextRelation __all__ = ['TextNode'] class TextNode(StructuredNode): order_id",
"{self.short()}...\" else: return f\"{str(self.order_id).zfill(leading)}: \" \\ + f\"[{self.label}] {self.short()}...\" return",
"models.text_relation import TextRelation __all__ = ['TextNode'] class TextNode(StructuredNode): order_id =",
"res def describe(self): return f\"\"\" <h1>Фрагмент: {self.order_id} </h1> <table border=\"1\"",
"leading = 3 if frag_num > 0: leading = int(np.floor(np.log10(frag_num)))",
"' ' for word in re.split(r'[\\n ]', self.text, 5)[:5]]) return",
"</table> \"\"\" def preview(self, frag_num=0): leading = 3 if frag_num",
"\\ + f\"[{self.label}] {self.short()}...\" return f\"[{self.label}] {self.short()}...\" def words_num(self): return",
"return f\"[{self.label}] {self.short()}...\" def words_num(self): return len(self.text.split()) def character_num(self): return",
"о вершине </caption> <tr> <th>Количество символов</th> <td>{self.character_num()}</td> </tr> <tr> <th>Количество",
"3 if frag_num > 0: leading = int(np.floor(np.log10(frag_num))) + 1",
"return res def describe(self): return f\"\"\" <h1>Фрагмент: {self.order_id} </h1> <table",
"width=100%> <caption> Информация о вершине </caption> <tr> <th>Количество символов</th> <td>{self.character_num()}</td>",
"neomodel import StructuredNode, StringProperty, JSONProperty, \\ Relationship, IntegerProperty import numpy",
"f\"[{self.label}] {self.short()}...\" def words_num(self): return len(self.text.split()) def character_num(self): return len(self.text)",
"TextNode(StructuredNode): order_id = IntegerProperty(required=True, unique_index=True) label = StringProperty(required=True) text =",
"' for word in re.split(r'[\\n ]', self.text, 5)[:5]]) return res",
"<th>Количество предложений</th> <td>{self.sentences_num()}</td> </tr> <tr> <th>Количество связей</th> <td>{len(self.link)}</td> </tr> </table>",
"предложений</th> <td>{self.sentences_num()}</td> </tr> <tr> <th>Количество связей</th> <td>{len(self.link)}</td> </tr> </table> \"\"\"",
"describe(self): return f\"\"\" <h1>Фрагмент: {self.order_id} </h1> <table border=\"1\" width=100%> <caption>",
"return f\"{str(self.order_id).zfill(leading)}: \" \\ + f\"[{self.label}] {self.short()}...\" return f\"[{self.label}] {self.short()}...\"",
"5)[:5]]) return res def describe(self): return f\"\"\" <h1>Фрагмент: {self.order_id} </h1>",
"<tr> <th>Количество слов</th> <td>{self.words_num()}</td> </tr> <tr> <th>Количество предложений</th> <td>{self.sentences_num()}</td> </tr>",
"len(self.text) def sentences_num(self): return len([s for s in self.text.split('.') if"
] |
[
"return Bishop(board, team, position) def compare_list(self, expected, results): compared =",
"WHITE, C('c6')) bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate()",
"C('h7')) bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected",
"r in results: if e[0] == r[0] and e[1] ==",
"= self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected = [C('d3'),",
"board, team, position): from chess.models import Bishop return Bishop(board, team,",
"= [C('f5'), C('g6'), C('h7')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) def",
"= StubBoard() board[C('c2')] = StubPiece(board, BLACK, C('c2')) bishop = self.get_bishop(board,",
"self.assertEqual(len(results), 13) board = StubBoard() board[C('c6')] = StubPiece(board, WHITE, C('c6'))",
"expected = [C('b1')] correct = self.compare_list(expected, results) self.assertFalse(any(correct)) def test_generate_bottomright(self):",
"def get_bishop(self, board, team, position): from chess.models import Bishop return",
"TestBishopGenerate(unittest.TestCase): def get_bishop(self, board, team, position): from chess.models import Bishop",
"correct = self.compare_list(expected, results) self.assertTrue(all(correct)) def test_generate_topleft(self): board = StubBoard()",
"WHITE, C('e4')) results = bishop.generate() expected = [C('d3'), C('c2')] correct",
"Bishop return Bishop(board, team, position) def compare_list(self, expected, results): compared",
"r[1]: compared.append(True) break else: compared.append(False) return compared def test_generate_topright(self): board",
"= [C('f3'), C('g2'), C('h1')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) def",
"self.assertTrue(all(correct)) expected = [C('c6')] correct = self.compare_list(expected, results) self.assertFalse(any(correct)) def",
"self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected = [C('d5')] correct",
"compared def test_generate_topright(self): board = StubBoard() board[C('h7')] = StubPiece(board, BLACK,",
"bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() self.assertEqual(len(results), 10)",
"= StubPiece(board, BLACK, C('c2')) bishop = self.get_bishop(board, WHITE, C('e4')) results",
"for e in expected: for r in results: if e[0]",
"C('e4')) results = bishop.generate() self.assertEqual(len(results), 10) if __name__ == '__main__':",
"results = bishop.generate() self.assertEqual(len(results), 13) board = StubBoard() board[C('c6')] =",
"expected = [C('d3'), C('c2')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) expected",
"expected = [C('d5')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) expected =",
"C('e4')) results = bishop.generate() expected = [C('d5')] correct = self.compare_list(expected,",
"compared.append(True) break else: compared.append(False) return compared def test_generate_topright(self): board =",
"expected = [C('f5'), C('g6'), C('h7')] correct = self.compare_list(expected, results) self.assertTrue(all(correct))",
"Bishop(board, team, position) def compare_list(self, expected, results): compared = []",
"13) board = StubBoard() board[C('c6')] = StubPiece(board, WHITE, C('c6')) bishop",
"self.compare_list(expected, results) self.assertTrue(all(correct)) def test_generate_amount(self): board = StubBoard() bishop =",
"compared = [] for e in expected: for r in",
"bishop.generate() expected = [C('f3'), C('g2'), C('h1')] correct = self.compare_list(expected, results)",
"= bishop.generate() expected = [C('d5')] correct = self.compare_list(expected, results) self.assertTrue(all(correct))",
"= self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() self.assertEqual(len(results), 13) board",
"import Bishop return Bishop(board, team, position) def compare_list(self, expected, results):",
"StubPiece, C, WHITE, BLACK class TestBishopGenerate(unittest.TestCase): def get_bishop(self, board, team,",
"StubPiece(board, BLACK, C('c2')) bishop = self.get_bishop(board, WHITE, C('e4')) results =",
"C('h1')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) def test_generate_amount(self): board =",
"= bishop.generate() self.assertEqual(len(results), 13) board = StubBoard() board[C('c6')] = StubPiece(board,",
"C, WHITE, BLACK class TestBishopGenerate(unittest.TestCase): def get_bishop(self, board, team, position):",
"r[0] and e[1] == r[1]: compared.append(True) break else: compared.append(False) return",
"StubBoard() bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() self.assertEqual(len(results),",
"def test_generate_topleft(self): board = StubBoard() board[C('c6')] = StubPiece(board, WHITE, C('c6'))",
"C('e4')) results = bishop.generate() expected = [C('d3'), C('c2')] correct =",
"correct = self.compare_list(expected, results) self.assertTrue(all(correct)) expected = [C('c6')] correct =",
"expected = [C('f3'), C('g2'), C('h1')] correct = self.compare_list(expected, results) self.assertTrue(all(correct))",
"= [C('d5')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) expected = [C('c6')]",
"test_generate_topright(self): board = StubBoard() board[C('h7')] = StubPiece(board, BLACK, C('h7')) bishop",
"self.compare_list(expected, results) self.assertTrue(all(correct)) expected = [C('c6')] correct = self.compare_list(expected, results)",
"bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected =",
"self.assertTrue(all(correct)) def test_generate_topleft(self): board = StubBoard() board[C('c6')] = StubPiece(board, WHITE,",
"= [C('d3'), C('c2')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) expected =",
"results = bishop.generate() self.assertEqual(len(results), 10) if __name__ == '__main__': unittest.main()",
"= self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() self.assertEqual(len(results), 10) if",
"= [C('b1')] correct = self.compare_list(expected, results) self.assertFalse(any(correct)) def test_generate_bottomright(self): board",
"results = bishop.generate() expected = [C('f5'), C('g6'), C('h7')] correct =",
"def test_generate_amount(self): board = StubBoard() bishop = self.get_bishop(board, WHITE, C('e4'))",
"= bishop.generate() expected = [C('f3'), C('g2'), C('h1')] correct = self.compare_list(expected,",
"[C('b1')] correct = self.compare_list(expected, results) self.assertFalse(any(correct)) def test_generate_bottomright(self): board =",
"WHITE, C('e4')) results = bishop.generate() self.assertEqual(len(results), 10) if __name__ ==",
"results) self.assertTrue(all(correct)) def test_generate_amount(self): board = StubBoard() bishop = self.get_bishop(board,",
"WHITE, BLACK class TestBishopGenerate(unittest.TestCase): def get_bishop(self, board, team, position): from",
"StubBoard() board[C('c2')] = StubPiece(board, BLACK, C('c2')) bishop = self.get_bishop(board, WHITE,",
"bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() self.assertEqual(len(results), 13)",
"= StubPiece(board, WHITE, C('c6')) bishop = self.get_bishop(board, WHITE, C('e4')) results",
"[C('d5')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) expected = [C('c6')] correct",
"BLACK, C('h7')) bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate()",
"= self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected = [C('f3'),",
"StubBoard() bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected",
"= self.compare_list(expected, results) self.assertFalse(any(correct)) def test_generate_bottomleft(self): board = StubBoard() board[C('c2')]",
"board = StubBoard() board[C('c6')] = StubPiece(board, WHITE, C('c6')) bishop =",
"import StubBoard, StubPiece, C, WHITE, BLACK class TestBishopGenerate(unittest.TestCase): def get_bishop(self,",
"board[C('h7')] = StubPiece(board, BLACK, C('h7')) bishop = self.get_bishop(board, WHITE, C('e4'))",
"WHITE, C('e4')) results = bishop.generate() expected = [C('d5')] correct =",
"C('c2')) bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected",
"C('h7')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) def test_generate_topleft(self): board =",
"[C('f3'), C('g2'), C('h1')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) def test_generate_amount(self):",
"C('c6')) bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected",
"def test_generate_bottomleft(self): board = StubBoard() board[C('c2')] = StubPiece(board, BLACK, C('c2'))",
"and e[1] == r[1]: compared.append(True) break else: compared.append(False) return compared",
"[C('d3'), C('c2')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) expected = [C('b1')]",
"C('e4')) results = bishop.generate() expected = [C('f5'), C('g6'), C('h7')] correct",
"board[C('c2')] = StubPiece(board, BLACK, C('c2')) bishop = self.get_bishop(board, WHITE, C('e4'))",
"= self.compare_list(expected, results) self.assertTrue(all(correct)) def test_generate_amount(self): board = StubBoard() bishop",
"expected, results): compared = [] for e in expected: for",
"WHITE, C('e4')) results = bishop.generate() self.assertEqual(len(results), 13) board = StubBoard()",
"self.assertTrue(all(correct)) def test_generate_amount(self): board = StubBoard() bishop = self.get_bishop(board, WHITE,",
"bishop.generate() expected = [C('d5')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) expected",
"== r[0] and e[1] == r[1]: compared.append(True) break else: compared.append(False)",
"= self.compare_list(expected, results) self.assertTrue(all(correct)) expected = [C('c6')] correct = self.compare_list(expected,",
".helpers import StubBoard, StubPiece, C, WHITE, BLACK class TestBishopGenerate(unittest.TestCase): def",
"C('c2')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) expected = [C('b1')] correct",
"= self.compare_list(expected, results) self.assertTrue(all(correct)) expected = [C('b1')] correct = self.compare_list(expected,",
"BLACK class TestBishopGenerate(unittest.TestCase): def get_bishop(self, board, team, position): from chess.models",
"def test_generate_bottomright(self): board = StubBoard() bishop = self.get_bishop(board, WHITE, C('e4'))",
"self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() self.assertEqual(len(results), 13) board =",
"results: if e[0] == r[0] and e[1] == r[1]: compared.append(True)",
"results = bishop.generate() expected = [C('f3'), C('g2'), C('h1')] correct =",
"results) self.assertFalse(any(correct)) def test_generate_bottomright(self): board = StubBoard() bishop = self.get_bishop(board,",
"StubBoard, StubPiece, C, WHITE, BLACK class TestBishopGenerate(unittest.TestCase): def get_bishop(self, board,",
"= self.compare_list(expected, results) self.assertFalse(any(correct)) def test_generate_bottomright(self): board = StubBoard() bishop",
"C('e4')) results = bishop.generate() expected = [C('f3'), C('g2'), C('h1')] correct",
"test_generate_bottomleft(self): board = StubBoard() board[C('c2')] = StubPiece(board, BLACK, C('c2')) bishop",
"board = StubBoard() bishop = self.get_bishop(board, WHITE, C('e4')) results =",
"test_generate_amount(self): board = StubBoard() bishop = self.get_bishop(board, WHITE, C('e4')) results",
"unittest from .helpers import StubBoard, StubPiece, C, WHITE, BLACK class",
"self.assertFalse(any(correct)) def test_generate_bottomright(self): board = StubBoard() bishop = self.get_bishop(board, WHITE,",
"C('e4')) results = bishop.generate() self.assertEqual(len(results), 13) board = StubBoard() board[C('c6')]",
"e[1] == r[1]: compared.append(True) break else: compared.append(False) return compared def",
"board = StubBoard() board[C('c2')] = StubPiece(board, BLACK, C('c2')) bishop =",
"board = StubBoard() board[C('h7')] = StubPiece(board, BLACK, C('h7')) bishop =",
"import unittest from .helpers import StubBoard, StubPiece, C, WHITE, BLACK",
"for r in results: if e[0] == r[0] and e[1]",
"expected = [C('c6')] correct = self.compare_list(expected, results) self.assertFalse(any(correct)) def test_generate_bottomleft(self):",
"self.compare_list(expected, results) self.assertTrue(all(correct)) def test_generate_topleft(self): board = StubBoard() board[C('c6')] =",
"correct = self.compare_list(expected, results) self.assertTrue(all(correct)) expected = [C('b1')] correct =",
"self.compare_list(expected, results) self.assertFalse(any(correct)) def test_generate_bottomright(self): board = StubBoard() bishop =",
"StubBoard() board[C('c6')] = StubPiece(board, WHITE, C('c6')) bishop = self.get_bishop(board, WHITE,",
"results): compared = [] for e in expected: for r",
"def test_generate_topright(self): board = StubBoard() board[C('h7')] = StubPiece(board, BLACK, C('h7'))",
"results) self.assertTrue(all(correct)) expected = [C('b1')] correct = self.compare_list(expected, results) self.assertFalse(any(correct))",
"results = bishop.generate() expected = [C('d5')] correct = self.compare_list(expected, results)",
"= [] for e in expected: for r in results:",
"results = bishop.generate() expected = [C('d3'), C('c2')] correct = self.compare_list(expected,",
"in results: if e[0] == r[0] and e[1] == r[1]:",
"WHITE, C('e4')) results = bishop.generate() expected = [C('f3'), C('g2'), C('h1')]",
"C('c6')) bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() self.assertEqual(len(results),",
"self.assertFalse(any(correct)) def test_generate_bottomleft(self): board = StubBoard() board[C('c2')] = StubPiece(board, BLACK,",
"StubBoard() board[C('h7')] = StubPiece(board, BLACK, C('h7')) bishop = self.get_bishop(board, WHITE,",
"bishop.generate() expected = [C('d3'), C('c2')] correct = self.compare_list(expected, results) self.assertTrue(all(correct))",
"self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected = [C('d3'), C('c2')]",
"get_bishop(self, board, team, position): from chess.models import Bishop return Bishop(board,",
"[C('f5'), C('g6'), C('h7')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) def test_generate_topleft(self):",
"WHITE, C('e4')) results = bishop.generate() expected = [C('f5'), C('g6'), C('h7')]",
"= StubBoard() board[C('h7')] = StubPiece(board, BLACK, C('h7')) bishop = self.get_bishop(board,",
"[] for e in expected: for r in results: if",
"C('g6'), C('h7')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) def test_generate_topleft(self): board",
"= bishop.generate() expected = [C('f5'), C('g6'), C('h7')] correct = self.compare_list(expected,",
"[C('c6')] correct = self.compare_list(expected, results) self.assertFalse(any(correct)) def test_generate_bottomleft(self): board =",
"StubPiece(board, WHITE, C('c6')) bishop = self.get_bishop(board, WHITE, C('e4')) results =",
"if e[0] == r[0] and e[1] == r[1]: compared.append(True) break",
"StubPiece(board, BLACK, C('h7')) bishop = self.get_bishop(board, WHITE, C('e4')) results =",
"= self.compare_list(expected, results) self.assertTrue(all(correct)) def test_generate_topleft(self): board = StubBoard() board[C('c6')]",
"C('g2'), C('h1')] correct = self.compare_list(expected, results) self.assertTrue(all(correct)) def test_generate_amount(self): board",
"position): from chess.models import Bishop return Bishop(board, team, position) def",
"board[C('c6')] = StubPiece(board, WHITE, C('c6')) bishop = self.get_bishop(board, WHITE, C('e4'))",
"== r[1]: compared.append(True) break else: compared.append(False) return compared def test_generate_topright(self):",
"bishop.generate() expected = [C('f5'), C('g6'), C('h7')] correct = self.compare_list(expected, results)",
"= StubBoard() bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate()",
"results) self.assertFalse(any(correct)) def test_generate_bottomleft(self): board = StubBoard() board[C('c2')] = StubPiece(board,",
"break else: compared.append(False) return compared def test_generate_topright(self): board = StubBoard()",
"self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected = [C('f3'), C('g2'),",
"bishop.generate() self.assertEqual(len(results), 13) board = StubBoard() board[C('c6')] = StubPiece(board, WHITE,",
"e[0] == r[0] and e[1] == r[1]: compared.append(True) break else:",
"correct = self.compare_list(expected, results) self.assertFalse(any(correct)) def test_generate_bottomleft(self): board = StubBoard()",
"position) def compare_list(self, expected, results): compared = [] for e",
"compared.append(False) return compared def test_generate_topright(self): board = StubBoard() board[C('h7')] =",
"team, position): from chess.models import Bishop return Bishop(board, team, position)",
"self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected = [C('f5'), C('g6'),",
"test_generate_topleft(self): board = StubBoard() board[C('c6')] = StubPiece(board, WHITE, C('c6')) bishop",
"class TestBishopGenerate(unittest.TestCase): def get_bishop(self, board, team, position): from chess.models import",
"from .helpers import StubBoard, StubPiece, C, WHITE, BLACK class TestBishopGenerate(unittest.TestCase):",
"else: compared.append(False) return compared def test_generate_topright(self): board = StubBoard() board[C('h7')]",
"= StubPiece(board, BLACK, C('h7')) bishop = self.get_bishop(board, WHITE, C('e4')) results",
"def compare_list(self, expected, results): compared = [] for e in",
"= self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected = [C('d5')]",
"= self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() expected = [C('f5'),",
"results) self.assertTrue(all(correct)) expected = [C('c6')] correct = self.compare_list(expected, results) self.assertFalse(any(correct))",
"self.compare_list(expected, results) self.assertFalse(any(correct)) def test_generate_bottomleft(self): board = StubBoard() board[C('c2')] =",
"e in expected: for r in results: if e[0] ==",
"compare_list(self, expected, results): compared = [] for e in expected:",
"return compared def test_generate_topright(self): board = StubBoard() board[C('h7')] = StubPiece(board,",
"= bishop.generate() expected = [C('d3'), C('c2')] correct = self.compare_list(expected, results)",
"from chess.models import Bishop return Bishop(board, team, position) def compare_list(self,",
"self.compare_list(expected, results) self.assertTrue(all(correct)) expected = [C('b1')] correct = self.compare_list(expected, results)",
"expected: for r in results: if e[0] == r[0] and",
"self.assertTrue(all(correct)) expected = [C('b1')] correct = self.compare_list(expected, results) self.assertFalse(any(correct)) def",
"= [C('c6')] correct = self.compare_list(expected, results) self.assertFalse(any(correct)) def test_generate_bottomleft(self): board",
"correct = self.compare_list(expected, results) self.assertTrue(all(correct)) def test_generate_amount(self): board = StubBoard()",
"in expected: for r in results: if e[0] == r[0]",
"test_generate_bottomright(self): board = StubBoard() bishop = self.get_bishop(board, WHITE, C('e4')) results",
"chess.models import Bishop return Bishop(board, team, position) def compare_list(self, expected,",
"results) self.assertTrue(all(correct)) def test_generate_topleft(self): board = StubBoard() board[C('c6')] = StubPiece(board,",
"correct = self.compare_list(expected, results) self.assertFalse(any(correct)) def test_generate_bottomright(self): board = StubBoard()",
"BLACK, C('c2')) bishop = self.get_bishop(board, WHITE, C('e4')) results = bishop.generate()",
"self.get_bishop(board, WHITE, C('e4')) results = bishop.generate() self.assertEqual(len(results), 10) if __name__",
"team, position) def compare_list(self, expected, results): compared = [] for",
"= StubBoard() board[C('c6')] = StubPiece(board, WHITE, C('c6')) bishop = self.get_bishop(board,"
] |
[
"lib.exceptions.workflow import EntryException @Action(ListStage.DATAGATHERING) def list_records(context, output): output = []",
"lib.fmd.namedentity import NamedEntity from lib.fmd.decorators import Action, ListStage, GetStage from",
"[] if hasattr(context, 'filter'): context.log.debug('Using filter [%s]' % context.filter) entries",
"from lib.exceptions.workflow import EntryException @Action(ListStage.DATAGATHERING) def list_records(context, output): output =",
"[%s]' % context.filter) entries = context.ddb.list(context.filter) else: entries = context.ddb.list()",
"if hasattr(context, 'filter'): context.log.debug('Using filter [%s]' % context.filter) entries =",
"def list_records(context, output): output = [] if hasattr(context, 'filter'): context.log.debug('Using",
"hasattr(context, 'filter'): context.log.debug('Using filter [%s]' % context.filter) entries = context.ddb.list(context.filter)",
"Action, ListStage, GetStage from lib.exceptions.workflow import EntryException @Action(ListStage.DATAGATHERING) def list_records(context,",
"context.filter) entries = context.ddb.list(context.filter) else: entries = context.ddb.list() return NamedEntity('records',",
"import Action, ListStage, GetStage from lib.exceptions.workflow import EntryException @Action(ListStage.DATAGATHERING) def",
"output = [] if hasattr(context, 'filter'): context.log.debug('Using filter [%s]' %",
"filter [%s]' % context.filter) entries = context.ddb.list(context.filter) else: entries =",
"import NamedEntity from lib.fmd.decorators import Action, ListStage, GetStage from lib.exceptions.workflow",
"from lib.fmd.decorators import Action, ListStage, GetStage from lib.exceptions.workflow import EntryException",
"list_records(context, output): output = [] if hasattr(context, 'filter'): context.log.debug('Using filter",
"@Action(ListStage.DATAGATHERING) def list_records(context, output): output = [] if hasattr(context, 'filter'):",
"from lib.fmd.namedentity import NamedEntity from lib.fmd.decorators import Action, ListStage, GetStage",
"GetStage from lib.exceptions.workflow import EntryException @Action(ListStage.DATAGATHERING) def list_records(context, output): output",
"output): output = [] if hasattr(context, 'filter'): context.log.debug('Using filter [%s]'",
"context.log.debug('Using filter [%s]' % context.filter) entries = context.ddb.list(context.filter) else: entries",
"% context.filter) entries = context.ddb.list(context.filter) else: entries = context.ddb.list() return",
"NamedEntity from lib.fmd.decorators import Action, ListStage, GetStage from lib.exceptions.workflow import",
"entries = context.ddb.list(context.filter) else: entries = context.ddb.list() return NamedEntity('records', entries)",
"lib.fmd.decorators import Action, ListStage, GetStage from lib.exceptions.workflow import EntryException @Action(ListStage.DATAGATHERING)",
"ListStage, GetStage from lib.exceptions.workflow import EntryException @Action(ListStage.DATAGATHERING) def list_records(context, output):",
"EntryException @Action(ListStage.DATAGATHERING) def list_records(context, output): output = [] if hasattr(context,",
"= [] if hasattr(context, 'filter'): context.log.debug('Using filter [%s]' % context.filter)",
"import EntryException @Action(ListStage.DATAGATHERING) def list_records(context, output): output = [] if",
"'filter'): context.log.debug('Using filter [%s]' % context.filter) entries = context.ddb.list(context.filter) else:"
] |
[
"each ' 'with exactly two indexes, where the first element",
"getattr(other, '__class__', None) == _DefaultAction class _SwitchedActionMetaClass(abc.ABCMeta): def __new__(mcs, name,",
"be some iterable object that provides `__len__` (such as a",
"and returns an `ActionResponse` object or raises an `ActionError`. `switch_to_action_map`",
"code-block:: python class UserActionV1(Action): ... class UserActionV2(Action): ... class UserTransitionAction(SwitchedAction):",
"not object and ( not cls.switch_to_action_map or not hasattr(cls.switch_to_action_map, '__iter__')",
"base action callable) without instantiating/calling it, based on the switches",
"or list) and have exactly two elements. For each item",
"length that won't raise an error on values that don't",
"on the switches enabled in the request, initializes the action",
"is an action ' '(callable).' ) return cls @six.add_metaclass(_SwitchedActionMetaClass) class",
"also be a valid switch, and it would still be",
"a switch that provides an attribute `value` which, itself, provides",
"performing this validation every time the action is called. \"\"\"",
"item as a default, and its switch could simply be",
"the second element (the action) of that item will be",
"the raw action (such as the action class or the",
"object and returns a new callable that, itself, accepts an",
"None default_action = None for switch, action in self.switch_to_action_map: if",
"indexable objects that provide `__len__` (such as a tuple [recommended]",
"an iterable of at least two indexable items, each '",
"it could also be a valid switch, and it would",
"`SwitchedAction` will iterate over that list, checking the first element",
"\"\"\" A specialized action that defers to other, concrete actions",
"or not hasattr(cls.switch_to_action_map, '__iter__') or _len(cls.switch_to_action_map) < 2 or any(",
"an `ActionError`. `switch_to_action_map` must have at least two items in",
"@six.add_metaclass(_SwitchedActionMetaClass) class SwitchedAction(object): \"\"\" A specialized action that defers to",
"should not override this. :param settings: The server settings object",
"item will be deferred to. If it finds no items",
"class UserTransitionAction(SwitchedAction): switch_to_action_map = ( (USER_VERSION_2_ENABLED, UserActionV2), (SwitchedAction.DEFAULT_ACTION, UserActionV1), )",
"request, or the default raw action if no switches were",
"!= 2 or not is_switch(i[0]) or not callable(i[1]) ) ):",
"'__len__', lambda *_: -1)() class _DefaultAction(object): def __int__(self): d =",
"(such as the action class or the base action callable)",
"switches matched. :param action_request: The request object :type action_request: EnrichedActionRequest",
"__new__(mcs, name, bases, body): \"\"\" Validate the switch_to_action_map when the",
"it. `SwitchedAction` will iterate over that list, checking the first",
"action) of that item will be deferred to. If it",
"`__int__` (such as an actual integer) or a switch that",
"that item will be deferred to. If it finds no",
"items whose switches are enabled, it will use the very",
"last item as a default, and its switch could simply",
"_DefaultAction(object): def __int__(self): d = id(self) return d if d",
"treated as the default in the case that no other",
"_DefaultAction class _SwitchedActionMetaClass(abc.ABCMeta): def __new__(mcs, name, bases, body): \"\"\" Validate",
"are enabled, it will use the very last action in",
"callable(i[1]) ) ): raise ValueError( 'Class attribute switch_to_action_map must be",
"based on the switches in the action request, or the",
"which, itself, provides `__int__` (or is an int). The second",
"if it is enabled in the request. If it is,",
"the action with the server settings, and then calls the",
"Subclasses must not override any methods and must override `switch_to_action_map`.",
"`switch_to_action_map`. As such, you can treat the last item as",
"accepts an `ActionRequest` object and returns an `ActionResponse` object or",
"in it. `SwitchedAction` will iterate over that list, checking the",
"\"\"\" Get the raw action (such as the action class",
"'SwitchedAction', ) def _len(item): # Safe length that won't raise",
"if self.__class__ is SwitchedAction: raise TypeError('Cannot instantiate abstract SwitchedAction') self.settings",
"exactly two indexes, where the first element is a switch",
"based on the switches enabled in the request, initializes the",
"first element is a switch and the second element is",
"the server settings, and then calls the action with the",
"def _len(item): # Safe length that won't raise an error",
"and must override `switch_to_action_map`. `switch_to_action_map` should be some iterable object",
"in cls.switch_to_action_map if not hasattr(i, '__getitem__') or _len(i) != 2",
"import) and improves performance by not performing this validation every",
"not cls.switch_to_action_map or not hasattr(cls.switch_to_action_map, '__iter__') or _len(cls.switch_to_action_map) < 2",
"= None default_action = None for switch, action in self.switch_to_action_map:",
"one that extends `Action`) or any callable that accepts a",
"actual integer) or a switch that provides an attribute `value`",
"the `Server` (or potentially from tests). Finds the appropriate real",
"action :rtype: callable \"\"\" last_action = None matched_action = None",
"_len(cls.switch_to_action_map) < 2 or any( True for i in cls.switch_to_action_map",
"self.switch_to_action_map: if switch == self.DEFAULT_ACTION: default_action = action elif switch",
"\"\"\" cls = super(_SwitchedActionMetaClass, mcs).__new__(mcs, name, bases, body) # noinspection",
"'__class__', None) == _DefaultAction class _SwitchedActionMetaClass(abc.ABCMeta): def __new__(mcs, name, bases,",
"_SwitchedActionMetaClass(abc.ABCMeta): def __new__(mcs, name, bases, body): \"\"\" Validate the switch_to_action_map",
"item to see if it is enabled in the request.",
"Finds the appropriate real action to invoke based on the",
"`switch_to_action_map` should be some iterable object that provides `__len__` (such",
"UserActionV1(Action): ... class UserActionV2(Action): ... class UserTransitionAction(SwitchedAction): switch_to_action_map = (",
"action with the request object, returning its response directly. :param",
"[recommended] or list) and have exactly two elements. For each",
"UserActionV2(Action): ... class UserTransitionAction(SwitchedAction): switch_to_action_map = ( (USER_VERSION_2_ENABLED, UserActionV2), (SwitchedAction.DEFAULT_ACTION,",
"'__iter__') or _len(cls.switch_to_action_map) < 2 or any( True for i",
"be a switch that provides `__int__` (such as an actual",
"a valid switch, and it would still be treated as",
"then calls the action with the request object, returning its",
"is_switch __all__ = ( 'SwitchedAction', ) def _len(item): # Safe",
"object that provides `__len__` (such as a tuple [recommended] or",
"first element (switch) of each item to see if it",
"cls = super(_SwitchedActionMetaClass, mcs).__new__(mcs, name, bases, body) # noinspection PyUnresolvedReferences",
"import ( absolute_import, unicode_literals, ) import abc import six from",
"of each item to see if it is enabled in",
"object :type settings: dict \"\"\" if self.__class__ is SwitchedAction: raise",
"is SwitchedAction: raise TypeError('Cannot instantiate abstract SwitchedAction') self.settings = settings",
"else -d def __eq__(self, other): return getattr(other, '__class__', None) ==",
"a tuple [recommended] or list). Its items must be indexable",
"a server settings object and returns a new callable that,",
"The request object :type action_request: EnrichedActionRequest :return: The response object",
"Construct a new action. Concrete classes should not override this.",
"it every time the class is instantiated. This identifies problems",
"items in it. `SwitchedAction` will iterate over that list, checking",
"items matched). Example usage: .. code-block:: python class UserActionV1(Action): ...",
"itself, provides `__int__` (or is an int). The second element",
"action to invoke based on the switches enabled in the",
"d = id(self) return d if d < 0 else",
"0 else -d def __eq__(self, other): return getattr(other, '__class__', None)",
"and then calls the action with the request object, returning",
"if bases[0] is not object and ( not cls.switch_to_action_map or",
":param settings: The server settings object :type settings: dict \"\"\"",
"2 or any( True for i in cls.switch_to_action_map if not",
"unicode_literals, ) import abc import six from pysoa.server.internal.types import is_switch",
"`__len__` (such as a tuple [recommended] or list) and have",
"that list, checking the first element (switch) of each item",
"checking the first element (switch) of each item to see",
"required: it could also be a valid switch, and it",
"elif switch and action_request.switches.is_active(switch): matched_action = action break else: last_action",
"bases, body) # noinspection PyUnresolvedReferences if bases[0] is not object",
"__future__ import ( absolute_import, unicode_literals, ) import abc import six",
"two items in it. `SwitchedAction` will iterate over that list,",
"it would still be treated as the default in the",
"to invoke based on the switches enabled in the request,",
"def __init__(self, settings=None): \"\"\" Construct a new action. Concrete classes",
"on the switches in the action request, or the default",
"or any( True for i in cls.switch_to_action_map if not hasattr(i,",
"usage: .. code-block:: python class UserActionV1(Action): ... class UserActionV2(Action): ...",
"bases[0] is not object and ( not cls.switch_to_action_map or not",
"request switches. Subclasses must not override any methods and must",
"or list). Its items must be indexable objects that provide",
"it is enabled in the request. If it is, the",
"that defers to other, concrete actions based on request switches.",
"override `switch_to_action_map`. `switch_to_action_map` should be some iterable object that provides",
"two elements. For each item in `switch_to_action_map`, the first element",
"class is instantiated. This identifies problems earlier (on import) and",
"'(callable).' ) return cls @six.add_metaclass(_SwitchedActionMetaClass) class SwitchedAction(object): \"\"\" A specialized",
"it finds no items whose switches are enabled, it will",
"matched). Example usage: .. code-block:: python class UserActionV1(Action): ... class",
"be deferred to. If it finds no items whose switches",
"not override this. :param settings: The server settings object :type",
"class UserActionV1(Action): ... class UserActionV2(Action): ... class UserTransitionAction(SwitchedAction): switch_to_action_map =",
"time the action is called. \"\"\" cls = super(_SwitchedActionMetaClass, mcs).__new__(mcs,",
"(or is an int). The second element must be an",
"that don't support length return getattr(item, '__len__', lambda *_: -1)()",
"have at least two items in it. `SwitchedAction` will iterate",
"__init__(self, settings=None): \"\"\" Construct a new action. Concrete classes should",
"action callable) without instantiating/calling it, based on the switches in",
"(although, this is not required: it could also be a",
"created, instead of doing it every time the class is",
"not performing this validation every time the action is called.",
"server settings object and returns a new callable that, itself,",
"default_action or last_action def __call__(self, action_request): \"\"\" Main entry point",
"return cls @six.add_metaclass(_SwitchedActionMetaClass) class SwitchedAction(object): \"\"\" A specialized action that",
"and improves performance by not performing this validation every time",
"`ActionRequest` object and returns an `ActionResponse` object or raises an",
"class is created, instead of doing it every time the",
"see if it is enabled in the request. If it",
"' 'with exactly two indexes, where the first element is",
"other): return getattr(other, '__class__', None) == _DefaultAction class _SwitchedActionMetaClass(abc.ABCMeta): def",
"return d if d < 0 else -d def __eq__(self,",
"return matched_action or default_action or last_action def __call__(self, action_request): \"\"\"",
"performance by not performing this validation every time the action",
"or the base action callable) without instantiating/calling it, based on",
"The response object :rtype: ActionResponse :raise: ActionError, ResponseValidationError \"\"\" return",
") ): raise ValueError( 'Class attribute switch_to_action_map must be an",
"def get_uninitialized_action(self, action_request): \"\"\" Get the raw action (such as",
"with the request object, returning its response directly. :param action_request:",
"is_switch(i[0]) or not callable(i[1]) ) ): raise ValueError( 'Class attribute",
"each item to see if it is enabled in the",
"matched_action = None default_action = None for switch, action in",
"default_action = None for switch, action in self.switch_to_action_map: if switch",
"action that defers to other, concrete actions based on request",
"enabled, it will use the very last action in `switch_to_action_map`.",
".. code-block:: python class UserActionV1(Action): ... class UserActionV2(Action): ... class",
"action break else: last_action = action return matched_action or default_action",
"__all__ = ( 'SwitchedAction', ) def _len(item): # Safe length",
"int). The second element must be an action, such as",
"action ' '(callable).' ) return cls @six.add_metaclass(_SwitchedActionMetaClass) class SwitchedAction(object): \"\"\"",
"a default, and its switch could simply be `SwitchedAction.DEFAULT_ACTION` (although,",
"the first element is a switch and the second element",
"in the case that no other items matched). Example usage:",
"instantiating/calling it, based on the switches in the action request,",
"'with exactly two indexes, where the first element is a",
"DEFAULT_ACTION = _DefaultAction() switch_to_action_map = () def __init__(self, settings=None): \"\"\"",
"be `SwitchedAction.DEFAULT_ACTION` (although, this is not required: it could also",
"the action class or the base action callable) without instantiating/calling",
"callable \"\"\" last_action = None matched_action = None default_action =",
"self.__class__ is SwitchedAction: raise TypeError('Cannot instantiate abstract SwitchedAction') self.settings =",
"new action. Concrete classes should not override this. :param settings:",
"action in self.switch_to_action_map: if switch == self.DEFAULT_ACTION: default_action = action",
"that won't raise an error on values that don't support",
"be indexable objects that provide `__len__` (such as a tuple",
"def __new__(mcs, name, bases, body): \"\"\" Validate the switch_to_action_map when",
"matched. :param action_request: The request object :type action_request: EnrichedActionRequest :return:",
"None matched_action = None default_action = None for switch, action",
"action if no switches were present or no switches matched.",
":type settings: dict \"\"\" if self.__class__ is SwitchedAction: raise TypeError('Cannot",
"an action class (e.g. one that extends `Action`) or any",
"values that don't support length return getattr(item, '__len__', lambda *_:",
"have exactly two elements. For each item in `switch_to_action_map`, the",
"the action request, or the default raw action if no",
"cls.switch_to_action_map or not hasattr(cls.switch_to_action_map, '__iter__') or _len(cls.switch_to_action_map) < 2 or",
"no other items matched). Example usage: .. code-block:: python class",
"items, each ' 'with exactly two indexes, where the first",
"action_request.switches.is_active(switch): matched_action = action break else: last_action = action return",
"The server settings object :type settings: dict \"\"\" if self.__class__",
"from tests). Finds the appropriate real action to invoke based",
"cls @six.add_metaclass(_SwitchedActionMetaClass) class SwitchedAction(object): \"\"\" A specialized action that defers",
"returns a new callable that, itself, accepts an `ActionRequest` object",
"or any callable that accepts a server settings object and",
"response object :rtype: ActionResponse :raise: ActionError, ResponseValidationError \"\"\" return self.get_uninitialized_action(action_request)(self.settings)(action_request)",
"be an iterable of at least two indexable items, each",
"request object, returning its response directly. :param action_request: The request",
"2 or not is_switch(i[0]) or not callable(i[1]) ) ): raise",
"enabled in the request. If it is, the second element",
"support length return getattr(item, '__len__', lambda *_: -1)() class _DefaultAction(object):",
"provides `__int__` (such as an actual integer) or a switch",
"provides `__int__` (or is an int). The second element must",
"class (e.g. one that extends `Action`) or any callable that",
"switch == self.DEFAULT_ACTION: default_action = action elif switch and action_request.switches.is_active(switch):",
"last_action def __call__(self, action_request): \"\"\" Main entry point for actions",
"on values that don't support length return getattr(item, '__len__', lambda",
"the request. If it is, the second element (the action)",
"-d def __eq__(self, other): return getattr(other, '__class__', None) == _DefaultAction",
"switch, and it would still be treated as the default",
"default raw action if no switches were present or no",
"don't support length return getattr(item, '__len__', lambda *_: -1)() class",
"and returns a new callable that, itself, accepts an `ActionRequest`",
"of at least two indexable items, each ' 'with exactly",
"class SwitchedAction(object): \"\"\" A specialized action that defers to other,",
"the switches in the action request, or the default raw",
"element (the action) of that item will be deferred to.",
"the action is called. \"\"\" cls = super(_SwitchedActionMetaClass, mcs).__new__(mcs, name,",
"element is a switch and the second element is an",
"(SwitchedAction.DEFAULT_ACTION, UserActionV1), ) \"\"\" DEFAULT_ACTION = _DefaultAction() switch_to_action_map = ()",
"could simply be `SwitchedAction.DEFAULT_ACTION` (although, this is not required: it",
"concrete actions based on request switches. Subclasses must not override",
"<gh_stars>0 from __future__ import ( absolute_import, unicode_literals, ) import abc",
"\"\"\" Construct a new action. Concrete classes should not override",
"the request object, returning its response directly. :param action_request: The",
"still be treated as the default in the case that",
"an attribute `value` which, itself, provides `__int__` (or is an",
"_len(item): # Safe length that won't raise an error on",
"of doing it every time the class is instantiated. This",
"PyUnresolvedReferences if bases[0] is not object and ( not cls.switch_to_action_map",
"not hasattr(i, '__getitem__') or _len(i) != 2 or not is_switch(i[0])",
"-1)() class _DefaultAction(object): def __int__(self): d = id(self) return d",
"object or raises an `ActionError`. `switch_to_action_map` must have at least",
"no items whose switches are enabled, it will use the",
"be treated as the default in the case that no",
"a switch that provides `__int__` (such as an actual integer)",
"= id(self) return d if d < 0 else -d",
"is called. \"\"\" cls = super(_SwitchedActionMetaClass, mcs).__new__(mcs, name, bases, body)",
"if not hasattr(i, '__getitem__') or _len(i) != 2 or not",
"__int__(self): d = id(self) return d if d < 0",
"list) and have exactly two elements. For each item in",
"to see if it is enabled in the request. If",
"break else: last_action = action return matched_action or default_action or",
"UserActionV1), ) \"\"\" DEFAULT_ACTION = _DefaultAction() switch_to_action_map = () def",
"is not required: it could also be a valid switch,",
"' '(callable).' ) return cls @six.add_metaclass(_SwitchedActionMetaClass) class SwitchedAction(object): \"\"\" A",
"matched_action = action break else: last_action = action return matched_action",
"real action to invoke based on the switches enabled in",
"the base action callable) without instantiating/calling it, based on the",
"the class is created, instead of doing it every time",
"class UserActionV2(Action): ... class UserTransitionAction(SwitchedAction): switch_to_action_map = ( (USER_VERSION_2_ENABLED, UserActionV2),",
"= ( 'SwitchedAction', ) def _len(item): # Safe length that",
"The second element must be an action, such as an",
"settings: The server settings object :type settings: dict \"\"\" if",
"import six from pysoa.server.internal.types import is_switch __all__ = ( 'SwitchedAction',",
"(such as a tuple [recommended] or list). Its items must",
"in the request, initializes the action with the server settings,",
"switches in the action request, or the default raw action",
"< 2 or any( True for i in cls.switch_to_action_map if",
"methods and must override `switch_to_action_map`. `switch_to_action_map` should be some iterable",
"or not is_switch(i[0]) or not callable(i[1]) ) ): raise ValueError(",
"If it is, the second element (the action) of that",
"provide `__len__` (such as a tuple [recommended] or list) and",
"an `ActionResponse` object or raises an `ActionError`. `switch_to_action_map` must have",
"raises an `ActionError`. `switch_to_action_map` must have at least two items",
":param action_request: The request object :type action_request: EnrichedActionRequest :return: The",
"that, itself, accepts an `ActionRequest` object and returns an `ActionResponse`",
"settings, and then calls the action with the request object,",
"d < 0 else -d def __eq__(self, other): return getattr(other,",
"appropriate real action to invoke based on the switches enabled",
"UserActionV2), (SwitchedAction.DEFAULT_ACTION, UserActionV1), ) \"\"\" DEFAULT_ACTION = _DefaultAction() switch_to_action_map =",
"defers to other, concrete actions based on request switches. Subclasses",
"action with the server settings, and then calls the action",
"*_: -1)() class _DefaultAction(object): def __int__(self): d = id(self) return",
"this validation every time the action is called. \"\"\" cls",
"where the first element is a switch and the second",
"classes should not override this. :param settings: The server settings",
"an int). The second element must be an action, such",
"improves performance by not performing this validation every time the",
"server settings object :type settings: dict \"\"\" if self.__class__ is",
"None for switch, action in self.switch_to_action_map: if switch == self.DEFAULT_ACTION:",
"any( True for i in cls.switch_to_action_map if not hasattr(i, '__getitem__')",
":return: The response object :rtype: ActionResponse :raise: ActionError, ResponseValidationError \"\"\"",
"and the second element is an action ' '(callable).' )",
"list, checking the first element (switch) of each item to",
"actions from the `Server` (or potentially from tests). Finds the",
"override this. :param settings: The server settings object :type settings:",
"Example usage: .. code-block:: python class UserActionV1(Action): ... class UserActionV2(Action):",
"Main entry point for actions from the `Server` (or potentially",
"The action :rtype: callable \"\"\" last_action = None matched_action =",
"(on import) and improves performance by not performing this validation",
"'__getitem__') or _len(i) != 2 or not is_switch(i[0]) or not",
"not hasattr(cls.switch_to_action_map, '__iter__') or _len(cls.switch_to_action_map) < 2 or any( True",
"length return getattr(item, '__len__', lambda *_: -1)() class _DefaultAction(object): def",
"action_request): \"\"\" Get the raw action (such as the action",
"or no switches matched. :param action_request: The request object :type",
"None) == _DefaultAction class _SwitchedActionMetaClass(abc.ABCMeta): def __new__(mcs, name, bases, body):",
"... class UserTransitionAction(SwitchedAction): switch_to_action_map = ( (USER_VERSION_2_ENABLED, UserActionV2), (SwitchedAction.DEFAULT_ACTION, UserActionV1),",
"= None for switch, action in self.switch_to_action_map: if switch ==",
"in `switch_to_action_map`, the first element must be a switch that",
"time the class is instantiated. This identifies problems earlier (on",
"two indexable items, each ' 'with exactly two indexes, where",
"__call__(self, action_request): \"\"\" Main entry point for actions from the",
"that extends `Action`) or any callable that accepts a server",
"items must be indexable objects that provide `__len__` (such as",
"and action_request.switches.is_active(switch): matched_action = action break else: last_action = action",
"an error on values that don't support length return getattr(item,",
"d if d < 0 else -d def __eq__(self, other):",
"problems earlier (on import) and improves performance by not performing",
"at least two items in it. `SwitchedAction` will iterate over",
"as the action class or the base action callable) without",
"that provides `__int__` (such as an actual integer) or a",
"For each item in `switch_to_action_map`, the first element must be",
"element is an action ' '(callable).' ) return cls @six.add_metaclass(_SwitchedActionMetaClass)",
"no switches were present or no switches matched. :param action_request:",
"action in `switch_to_action_map`. As such, you can treat the last",
"object and returns an `ActionResponse` object or raises an `ActionError`.",
"= _DefaultAction() switch_to_action_map = () def __init__(self, settings=None): \"\"\" Construct",
"switch_to_action_map when the class is created, instead of doing it",
"action class (e.g. one that extends `Action`) or any callable",
"Get the raw action (such as the action class or",
"last_action = None matched_action = None default_action = None for",
"is not object and ( not cls.switch_to_action_map or not hasattr(cls.switch_to_action_map,",
"\"\"\" Main entry point for actions from the `Server` (or",
"return getattr(other, '__class__', None) == _DefaultAction class _SwitchedActionMetaClass(abc.ABCMeta): def __new__(mcs,",
"self.DEFAULT_ACTION: default_action = action elif switch and action_request.switches.is_active(switch): matched_action =",
":type action_request: EnrichedActionRequest :return: The action :rtype: callable \"\"\" last_action",
"it is, the second element (the action) of that item",
"the appropriate real action to invoke based on the switches",
"provides an attribute `value` which, itself, provides `__int__` (or is",
"valid switch, and it would still be treated as the",
"simply be `SwitchedAction.DEFAULT_ACTION` (although, this is not required: it could",
"= action return matched_action or default_action or last_action def __call__(self,",
"(e.g. one that extends `Action`) or any callable that accepts",
"won't raise an error on values that don't support length",
"`ActionResponse` object or raises an `ActionError`. `switch_to_action_map` must have at",
"action (such as the action class or the base action",
"other items matched). Example usage: .. code-block:: python class UserActionV1(Action):",
"of that item will be deferred to. If it finds",
"callable that accepts a server settings object and returns a",
"calls the action with the request object, returning its response",
"UserTransitionAction(SwitchedAction): switch_to_action_map = ( (USER_VERSION_2_ENABLED, UserActionV2), (SwitchedAction.DEFAULT_ACTION, UserActionV1), ) \"\"\"",
"(such as an actual integer) or a switch that provides",
"tests). Finds the appropriate real action to invoke based on",
"and its switch could simply be `SwitchedAction.DEFAULT_ACTION` (although, this is",
"whose switches are enabled, it will use the very last",
"second element is an action ' '(callable).' ) return cls",
"# Safe length that won't raise an error on values",
"raise ValueError( 'Class attribute switch_to_action_map must be an iterable of",
"getattr(item, '__len__', lambda *_: -1)() class _DefaultAction(object): def __int__(self): d",
"the class is instantiated. This identifies problems earlier (on import)",
"and have exactly two elements. For each item in `switch_to_action_map`,",
"that provide `__len__` (such as a tuple [recommended] or list)",
"initializes the action with the server settings, and then calls",
"object, returning its response directly. :param action_request: The request object",
"no switches matched. :param action_request: The request object :type action_request:",
"`__len__` (such as a tuple [recommended] or list). Its items",
"body) # noinspection PyUnresolvedReferences if bases[0] is not object and",
"attribute switch_to_action_map must be an iterable of at least two",
"(switch) of each item to see if it is enabled",
"settings: dict \"\"\" if self.__class__ is SwitchedAction: raise TypeError('Cannot instantiate",
"to other, concrete actions based on request switches. Subclasses must",
"Validate the switch_to_action_map when the class is created, instead of",
"its switch could simply be `SwitchedAction.DEFAULT_ACTION` (although, this is not",
"present or no switches matched. :param action_request: The request object",
"as a default, and its switch could simply be `SwitchedAction.DEFAULT_ACTION`",
"must be indexable objects that provide `__len__` (such as a",
"... class UserActionV2(Action): ... class UserTransitionAction(SwitchedAction): switch_to_action_map = ( (USER_VERSION_2_ENABLED,",
"the default raw action if no switches were present or",
"default, and its switch could simply be `SwitchedAction.DEFAULT_ACTION` (although, this",
"this is not required: it could also be a valid",
"switch that provides `__int__` (such as an actual integer) or",
"every time the class is instantiated. This identifies problems earlier",
"(or potentially from tests). Finds the appropriate real action to",
"not is_switch(i[0]) or not callable(i[1]) ) ): raise ValueError( 'Class",
"the first element must be a switch that provides `__int__`",
"or _len(i) != 2 or not is_switch(i[0]) or not callable(i[1])",
"request object :type action_request: EnrichedActionRequest :return: The action :rtype: callable",
"must be an iterable of at least two indexable items,",
"class _SwitchedActionMetaClass(abc.ABCMeta): def __new__(mcs, name, bases, body): \"\"\" Validate the",
"error on values that don't support length return getattr(item, '__len__',",
"would still be treated as the default in the case",
"callable) without instantiating/calling it, based on the switches in the",
"the case that no other items matched). Example usage: ..",
"EnrichedActionRequest :return: The action :rtype: callable \"\"\" last_action = None",
"server settings, and then calls the action with the request",
"when the class is created, instead of doing it every",
"object :type action_request: EnrichedActionRequest :return: The response object :rtype: ActionResponse",
"switch and action_request.switches.is_active(switch): matched_action = action break else: last_action =",
"second element must be an action, such as an action",
"deferred to. If it finds no items whose switches are",
"a new callable that, itself, accepts an `ActionRequest` object and",
"action_request: EnrichedActionRequest :return: The response object :rtype: ActionResponse :raise: ActionError,",
"(such as a tuple [recommended] or list) and have exactly",
"mcs).__new__(mcs, name, bases, body) # noinspection PyUnresolvedReferences if bases[0] is",
"indexes, where the first element is a switch and the",
"objects that provide `__len__` (such as a tuple [recommended] or",
"in the action request, or the default raw action if",
"default_action = action elif switch and action_request.switches.is_active(switch): matched_action = action",
"( (USER_VERSION_2_ENABLED, UserActionV2), (SwitchedAction.DEFAULT_ACTION, UserActionV1), ) \"\"\" DEFAULT_ACTION = _DefaultAction()",
"must be a switch that provides `__int__` (such as an",
"(the action) of that item will be deferred to. If",
"return getattr(item, '__len__', lambda *_: -1)() class _DefaultAction(object): def __int__(self):",
"action. Concrete classes should not override this. :param settings: The",
"request, initializes the action with the server settings, and then",
"object and ( not cls.switch_to_action_map or not hasattr(cls.switch_to_action_map, '__iter__') or",
"= action elif switch and action_request.switches.is_active(switch): matched_action = action break",
"is instantiated. This identifies problems earlier (on import) and improves",
"instantiate abstract SwitchedAction') self.settings = settings def get_uninitialized_action(self, action_request): \"\"\"",
"raise an error on values that don't support length return",
"`switch_to_action_map` must have at least two items in it. `SwitchedAction`",
"\"\"\" DEFAULT_ACTION = _DefaultAction() switch_to_action_map = () def __init__(self, settings=None):",
"= super(_SwitchedActionMetaClass, mcs).__new__(mcs, name, bases, body) # noinspection PyUnresolvedReferences if",
"= None matched_action = None default_action = None for switch,",
"from the `Server` (or potentially from tests). Finds the appropriate",
"over that list, checking the first element (switch) of each",
"is created, instead of doing it every time the class",
") return cls @six.add_metaclass(_SwitchedActionMetaClass) class SwitchedAction(object): \"\"\" A specialized action",
"provides `__len__` (such as a tuple [recommended] or list). Its",
"`__int__` (or is an int). The second element must be",
"hasattr(cls.switch_to_action_map, '__iter__') or _len(cls.switch_to_action_map) < 2 or any( True for",
"== _DefaultAction class _SwitchedActionMetaClass(abc.ABCMeta): def __new__(mcs, name, bases, body): \"\"\"",
"__eq__(self, other): return getattr(other, '__class__', None) == _DefaultAction class _SwitchedActionMetaClass(abc.ABCMeta):",
"class or the base action callable) without instantiating/calling it, based",
"def __int__(self): d = id(self) return d if d <",
"get_uninitialized_action(self, action_request): \"\"\" Get the raw action (such as the",
"settings def get_uninitialized_action(self, action_request): \"\"\" Get the raw action (such",
"Concrete classes should not override this. :param settings: The server",
"is a switch and the second element is an action",
"matched_action or default_action or last_action def __call__(self, action_request): \"\"\" Main",
"for i in cls.switch_to_action_map if not hasattr(i, '__getitem__') or _len(i)",
"such as an action class (e.g. one that extends `Action`)",
"that provides an attribute `value` which, itself, provides `__int__` (or",
"is an int). The second element must be an action,",
"be an action, such as an action class (e.g. one",
"accepts a server settings object and returns a new callable",
"iterable of at least two indexable items, each ' 'with",
"the very last action in `switch_to_action_map`. As such, you can",
"`switch_to_action_map`, the first element must be a switch that provides",
"as an action class (e.g. one that extends `Action`) or",
"the request, initializes the action with the server settings, and",
"# noinspection PyUnresolvedReferences if bases[0] is not object and (",
"returning its response directly. :param action_request: The request object :type",
"doing it every time the class is instantiated. This identifies",
"or a switch that provides an attribute `value` which, itself,",
"absolute_import, unicode_literals, ) import abc import six from pysoa.server.internal.types import",
"in `switch_to_action_map`. As such, you can treat the last item",
"lambda *_: -1)() class _DefaultAction(object): def __int__(self): d = id(self)",
"any callable that accepts a server settings object and returns",
"[recommended] or list). Its items must be indexable objects that",
"= action break else: last_action = action return matched_action or",
"every time the action is called. \"\"\" cls = super(_SwitchedActionMetaClass,",
"True for i in cls.switch_to_action_map if not hasattr(i, '__getitem__') or",
"should be some iterable object that provides `__len__` (such as",
"or _len(cls.switch_to_action_map) < 2 or any( True for i in",
"action return matched_action or default_action or last_action def __call__(self, action_request):",
"not callable(i[1]) ) ): raise ValueError( 'Class attribute switch_to_action_map must",
"as the default in the case that no other items",
"= () def __init__(self, settings=None): \"\"\" Construct a new action.",
"\"\"\" Validate the switch_to_action_map when the class is created, instead",
"it, based on the switches in the action request, or",
"This identifies problems earlier (on import) and improves performance by",
"= settings def get_uninitialized_action(self, action_request): \"\"\" Get the raw action",
"if d < 0 else -d def __eq__(self, other): return",
"six from pysoa.server.internal.types import is_switch __all__ = ( 'SwitchedAction', )",
"earlier (on import) and improves performance by not performing this",
"from pysoa.server.internal.types import is_switch __all__ = ( 'SwitchedAction', ) def",
"switches are enabled, it will use the very last action",
"switches. Subclasses must not override any methods and must override",
"the switch_to_action_map when the class is created, instead of doing",
"or the default raw action if no switches were present",
"switches enabled in the request, initializes the action with the",
"raw action if no switches were present or no switches",
"switch that provides an attribute `value` which, itself, provides `__int__`",
"from __future__ import ( absolute_import, unicode_literals, ) import abc import",
"settings object :type settings: dict \"\"\" if self.__class__ is SwitchedAction:",
"abc import six from pysoa.server.internal.types import is_switch __all__ = (",
"import is_switch __all__ = ( 'SwitchedAction', ) def _len(item): #",
"action, such as an action class (e.g. one that extends",
"indexable items, each ' 'with exactly two indexes, where the",
"\"\"\" if self.__class__ is SwitchedAction: raise TypeError('Cannot instantiate abstract SwitchedAction')",
"if switch == self.DEFAULT_ACTION: default_action = action elif switch and",
"could also be a valid switch, and it would still",
"not required: it could also be a valid switch, and",
"specialized action that defers to other, concrete actions based on",
"on request switches. Subclasses must not override any methods and",
"an action ' '(callable).' ) return cls @six.add_metaclass(_SwitchedActionMetaClass) class SwitchedAction(object):",
"two indexes, where the first element is a switch and",
"< 0 else -d def __eq__(self, other): return getattr(other, '__class__',",
"called. \"\"\" cls = super(_SwitchedActionMetaClass, mcs).__new__(mcs, name, bases, body) #",
"object :type action_request: EnrichedActionRequest :return: The action :rtype: callable \"\"\"",
"it will use the very last action in `switch_to_action_map`. As",
"must not override any methods and must override `switch_to_action_map`. `switch_to_action_map`",
"`value` which, itself, provides `__int__` (or is an int). The",
"is enabled in the request. If it is, the second",
"switch, action in self.switch_to_action_map: if switch == self.DEFAULT_ACTION: default_action =",
"item in `switch_to_action_map`, the first element must be a switch",
"element must be an action, such as an action class",
"action_request: The request object :type action_request: EnrichedActionRequest :return: The response",
"def __eq__(self, other): return getattr(other, '__class__', None) == _DefaultAction class",
"must have at least two items in it. `SwitchedAction` will",
"import abc import six from pysoa.server.internal.types import is_switch __all__ =",
"by not performing this validation every time the action is",
"extends `Action`) or any callable that accepts a server settings",
"request. If it is, the second element (the action) of",
"'Class attribute switch_to_action_map must be an iterable of at least",
"for actions from the `Server` (or potentially from tests). Finds",
"switch_to_action_map = ( (USER_VERSION_2_ENABLED, UserActionV2), (SwitchedAction.DEFAULT_ACTION, UserActionV1), ) \"\"\" DEFAULT_ACTION",
"treat the last item as a default, and its switch",
"enabled in the request, initializes the action with the server",
"for switch, action in self.switch_to_action_map: if switch == self.DEFAULT_ACTION: default_action",
"): raise ValueError( 'Class attribute switch_to_action_map must be an iterable",
"returns an `ActionResponse` object or raises an `ActionError`. `switch_to_action_map` must",
"use the very last action in `switch_to_action_map`. As such, you",
"the last item as a default, and its switch could",
"list). Its items must be indexable objects that provide `__len__`",
"will be deferred to. If it finds no items whose",
"super(_SwitchedActionMetaClass, mcs).__new__(mcs, name, bases, body) # noinspection PyUnresolvedReferences if bases[0]",
"callable that, itself, accepts an `ActionRequest` object and returns an",
"its response directly. :param action_request: The request object :type action_request:",
"SwitchedAction') self.settings = settings def get_uninitialized_action(self, action_request): \"\"\" Get the",
"action is called. \"\"\" cls = super(_SwitchedActionMetaClass, mcs).__new__(mcs, name, bases,",
"cls.switch_to_action_map if not hasattr(i, '__getitem__') or _len(i) != 2 or",
"each item in `switch_to_action_map`, the first element must be a",
"action_request: The request object :type action_request: EnrichedActionRequest :return: The action",
"as a tuple [recommended] or list). Its items must be",
"iterate over that list, checking the first element (switch) of",
"id(self) return d if d < 0 else -d def",
"i in cls.switch_to_action_map if not hasattr(i, '__getitem__') or _len(i) !=",
"As such, you can treat the last item as a",
"the first element (switch) of each item to see if",
"the second element is an action ' '(callable).' ) return",
"must override `switch_to_action_map`. `switch_to_action_map` should be some iterable object that",
"and ( not cls.switch_to_action_map or not hasattr(cls.switch_to_action_map, '__iter__') or _len(cls.switch_to_action_map)",
"a switch and the second element is an action '",
"name, bases, body): \"\"\" Validate the switch_to_action_map when the class",
"such, you can treat the last item as a default,",
"last action in `switch_to_action_map`. As such, you can treat the",
"last_action = action return matched_action or default_action or last_action def",
"you can treat the last item as a default, and",
"can treat the last item as a default, and its",
"SwitchedAction: raise TypeError('Cannot instantiate abstract SwitchedAction') self.settings = settings def",
"identifies problems earlier (on import) and improves performance by not",
"EnrichedActionRequest :return: The response object :rtype: ActionResponse :raise: ActionError, ResponseValidationError",
"new callable that, itself, accepts an `ActionRequest` object and returns",
"_len(i) != 2 or not is_switch(i[0]) or not callable(i[1]) )",
"`Action`) or any callable that accepts a server settings object",
"elements. For each item in `switch_to_action_map`, the first element must",
"without instantiating/calling it, based on the switches in the action",
"in self.switch_to_action_map: if switch == self.DEFAULT_ACTION: default_action = action elif",
"other, concrete actions based on request switches. Subclasses must not",
"not override any methods and must override `switch_to_action_map`. `switch_to_action_map` should",
"to. If it finds no items whose switches are enabled,",
"bases, body): \"\"\" Validate the switch_to_action_map when the class is",
"second element (the action) of that item will be deferred",
"(USER_VERSION_2_ENABLED, UserActionV2), (SwitchedAction.DEFAULT_ACTION, UserActionV1), ) \"\"\" DEFAULT_ACTION = _DefaultAction() switch_to_action_map",
"Its items must be indexable objects that provide `__len__` (such",
"that no other items matched). Example usage: .. code-block:: python",
"will use the very last action in `switch_to_action_map`. As such,",
"as an actual integer) or a switch that provides an",
"invoke based on the switches enabled in the request, initializes",
"self.settings = settings def get_uninitialized_action(self, action_request): \"\"\" Get the raw",
"( not cls.switch_to_action_map or not hasattr(cls.switch_to_action_map, '__iter__') or _len(cls.switch_to_action_map) <",
"== self.DEFAULT_ACTION: default_action = action elif switch and action_request.switches.is_active(switch): matched_action",
"in the request. If it is, the second element (the",
"`switch_to_action_map`. `switch_to_action_map` should be some iterable object that provides `__len__`",
":return: The action :rtype: callable \"\"\" last_action = None matched_action",
"override any methods and must override `switch_to_action_map`. `switch_to_action_map` should be",
"actions based on request switches. Subclasses must not override any",
"point for actions from the `Server` (or potentially from tests).",
"class _DefaultAction(object): def __int__(self): d = id(self) return d if",
":rtype: callable \"\"\" last_action = None matched_action = None default_action",
"action elif switch and action_request.switches.is_active(switch): matched_action = action break else:",
"() def __init__(self, settings=None): \"\"\" Construct a new action. Concrete",
"at least two indexable items, each ' 'with exactly two",
"attribute `value` which, itself, provides `__int__` (or is an int).",
"`Server` (or potentially from tests). Finds the appropriate real action",
"else: last_action = action return matched_action or default_action or last_action",
"A specialized action that defers to other, concrete actions based",
"exactly two elements. For each item in `switch_to_action_map`, the first",
"or not callable(i[1]) ) ): raise ValueError( 'Class attribute switch_to_action_map",
"abstract SwitchedAction') self.settings = settings def get_uninitialized_action(self, action_request): \"\"\" Get",
"as a tuple [recommended] or list) and have exactly two",
"be a valid switch, and it would still be treated",
"the default in the case that no other items matched).",
"def __call__(self, action_request): \"\"\" Main entry point for actions from",
"the action with the request object, returning its response directly.",
"raw action (such as the action class or the base",
"element must be a switch that provides `__int__` (such as",
") def _len(item): # Safe length that won't raise an",
"least two indexable items, each ' 'with exactly two indexes,",
"or last_action def __call__(self, action_request): \"\"\" Main entry point for",
"raise TypeError('Cannot instantiate abstract SwitchedAction') self.settings = settings def get_uninitialized_action(self,",
"If it finds no items whose switches are enabled, it",
"or raises an `ActionError`. `switch_to_action_map` must have at least two",
"or default_action or last_action def __call__(self, action_request): \"\"\" Main entry",
"action class or the base action callable) without instantiating/calling it,",
"tuple [recommended] or list). Its items must be indexable objects",
"entry point for actions from the `Server` (or potentially from",
"very last action in `switch_to_action_map`. As such, you can treat",
"element (switch) of each item to see if it is",
"and it would still be treated as the default in",
"case that no other items matched). Example usage: .. code-block::",
"iterable object that provides `__len__` (such as a tuple [recommended]",
"switch_to_action_map = () def __init__(self, settings=None): \"\"\" Construct a new",
"dict \"\"\" if self.__class__ is SwitchedAction: raise TypeError('Cannot instantiate abstract",
"switch could simply be `SwitchedAction.DEFAULT_ACTION` (although, this is not required:",
"( 'SwitchedAction', ) def _len(item): # Safe length that won't",
"switches were present or no switches matched. :param action_request: The",
"is, the second element (the action) of that item will",
"action request, or the default raw action if no switches",
"instead of doing it every time the class is instantiated.",
"action_request): \"\"\" Main entry point for actions from the `Server`",
"tuple [recommended] or list) and have exactly two elements. For",
"must be an action, such as an action class (e.g.",
"integer) or a switch that provides an attribute `value` which,",
"an `ActionRequest` object and returns an `ActionResponse` object or raises",
"potentially from tests). Finds the appropriate real action to invoke",
") \"\"\" DEFAULT_ACTION = _DefaultAction() switch_to_action_map = () def __init__(self,",
"validation every time the action is called. \"\"\" cls =",
"\"\"\" last_action = None matched_action = None default_action = None",
"pysoa.server.internal.types import is_switch __all__ = ( 'SwitchedAction', ) def _len(item):",
"switch and the second element is an action ' '(callable).'",
"default in the case that no other items matched). Example",
"a tuple [recommended] or list) and have exactly two elements.",
"`SwitchedAction.DEFAULT_ACTION` (although, this is not required: it could also be",
"based on request switches. Subclasses must not override any methods",
"were present or no switches matched. :param action_request: The request",
"the switches enabled in the request, initializes the action with",
"TypeError('Cannot instantiate abstract SwitchedAction') self.settings = settings def get_uninitialized_action(self, action_request):",
"directly. :param action_request: The request object :type action_request: EnrichedActionRequest :return:",
"_DefaultAction() switch_to_action_map = () def __init__(self, settings=None): \"\"\" Construct a",
":type action_request: EnrichedActionRequest :return: The response object :rtype: ActionResponse :raise:",
"that accepts a server settings object and returns a new",
"ValueError( 'Class attribute switch_to_action_map must be an iterable of at",
"first element must be a switch that provides `__int__` (such",
"an action, such as an action class (e.g. one that",
"`ActionError`. `switch_to_action_map` must have at least two items in it.",
"this. :param settings: The server settings object :type settings: dict",
"noinspection PyUnresolvedReferences if bases[0] is not object and ( not",
"python class UserActionV1(Action): ... class UserActionV2(Action): ... class UserTransitionAction(SwitchedAction): switch_to_action_map",
"body): \"\"\" Validate the switch_to_action_map when the class is created,",
"itself, accepts an `ActionRequest` object and returns an `ActionResponse` object",
"The request object :type action_request: EnrichedActionRequest :return: The action :rtype:",
"Safe length that won't raise an error on values that",
"will iterate over that list, checking the first element (switch)",
"request object :type action_request: EnrichedActionRequest :return: The response object :rtype:",
"if no switches were present or no switches matched. :param",
") import abc import six from pysoa.server.internal.types import is_switch __all__",
"instantiated. This identifies problems earlier (on import) and improves performance",
"any methods and must override `switch_to_action_map`. `switch_to_action_map` should be some",
"a new action. Concrete classes should not override this. :param",
"switch_to_action_map must be an iterable of at least two indexable",
"( absolute_import, unicode_literals, ) import abc import six from pysoa.server.internal.types",
"settings object and returns a new callable that, itself, accepts",
"settings=None): \"\"\" Construct a new action. Concrete classes should not",
"some iterable object that provides `__len__` (such as a tuple",
"with the server settings, and then calls the action with",
"= ( (USER_VERSION_2_ENABLED, UserActionV2), (SwitchedAction.DEFAULT_ACTION, UserActionV1), ) \"\"\" DEFAULT_ACTION =",
"an actual integer) or a switch that provides an attribute",
"name, bases, body) # noinspection PyUnresolvedReferences if bases[0] is not",
"response directly. :param action_request: The request object :type action_request: EnrichedActionRequest",
"that provides `__len__` (such as a tuple [recommended] or list).",
"action_request: EnrichedActionRequest :return: The action :rtype: callable \"\"\" last_action =",
"hasattr(i, '__getitem__') or _len(i) != 2 or not is_switch(i[0]) or",
"finds no items whose switches are enabled, it will use",
"SwitchedAction(object): \"\"\" A specialized action that defers to other, concrete",
"least two items in it. `SwitchedAction` will iterate over that"
] |
[
"'', }, } EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' HAYSTACK_CONNECTIONS = { 'default':",
"= 'test' # SECURITY WARNING: don't run with debug turned",
"with debug turned on in production! DEBUG = True TEMPLATE_DEBUG",
"SECURITY WARNING: don't run with debug turned on in production!",
"= True TEMPLATE_DEBUG = True ALLOWED_HOSTS = ['127.0.0.1'] DATABASES =",
"['127.0.0.1'] DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'sqlite3.db',",
"run with debug turned on in production! DEBUG = True",
"ALLOWED_HOSTS = ['127.0.0.1'] DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3',",
"# SECURITY WARNING: don't run with debug turned on in",
"= 'django.core.mail.backends.console.EmailBackend' HAYSTACK_CONNECTIONS = { 'default': { 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine', },",
"SECRET_KEY = 'test' # SECURITY WARNING: don't run with debug",
"DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = ['127.0.0.1'] DATABASES",
"'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'sqlite3.db', 'USER': '', 'PASSWORD': '',",
"'', 'HOST': '', }, } EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' HAYSTACK_CONNECTIONS =",
"}, } EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' HAYSTACK_CONNECTIONS = { 'default': {",
"'django.db.backends.sqlite3', 'NAME': 'sqlite3.db', 'USER': '', 'PASSWORD': '', 'HOST': '', },",
"'test' # SECURITY WARNING: don't run with debug turned on",
"= True ALLOWED_HOSTS = ['127.0.0.1'] DATABASES = { 'default': {",
"'NAME': 'sqlite3.db', 'USER': '', 'PASSWORD': '', 'HOST': '', }, }",
".base import * SECRET_KEY = 'test' # SECURITY WARNING: don't",
"don't run with debug turned on in production! DEBUG =",
"'django.core.mail.backends.console.EmailBackend' HAYSTACK_CONNECTIONS = { 'default': { 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine', }, }",
"from .base import * SECRET_KEY = 'test' # SECURITY WARNING:",
"debug turned on in production! DEBUG = True TEMPLATE_DEBUG =",
"'PASSWORD': '', 'HOST': '', }, } EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' HAYSTACK_CONNECTIONS",
"WARNING: don't run with debug turned on in production! DEBUG",
"EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' HAYSTACK_CONNECTIONS = { 'default': { 'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',",
"'HOST': '', }, } EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' HAYSTACK_CONNECTIONS = {",
"turned on in production! DEBUG = True TEMPLATE_DEBUG = True",
"in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS =",
"on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS",
"True ALLOWED_HOSTS = ['127.0.0.1'] DATABASES = { 'default': { 'ENGINE':",
"= { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'sqlite3.db', 'USER': '',",
"= ['127.0.0.1'] DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME':",
"'sqlite3.db', 'USER': '', 'PASSWORD': '', 'HOST': '', }, } EMAIL_BACKEND",
"DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'sqlite3.db', 'USER':",
"'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'sqlite3.db', 'USER': '', 'PASSWORD': '', 'HOST': '',",
"TEMPLATE_DEBUG = True ALLOWED_HOSTS = ['127.0.0.1'] DATABASES = { 'default':",
"* SECRET_KEY = 'test' # SECURITY WARNING: don't run with",
"import * SECRET_KEY = 'test' # SECURITY WARNING: don't run",
"'USER': '', 'PASSWORD': '', 'HOST': '', }, } EMAIL_BACKEND =",
"{ 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'sqlite3.db', 'USER': '', 'PASSWORD': '', 'HOST':",
"True TEMPLATE_DEBUG = True ALLOWED_HOSTS = ['127.0.0.1'] DATABASES = {",
"production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = ['127.0.0.1']",
"{ 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'sqlite3.db', 'USER': '', 'PASSWORD':",
"} EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' HAYSTACK_CONNECTIONS = { 'default': { 'ENGINE':",
"'', 'PASSWORD': '', 'HOST': '', }, } EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'"
] |
[
"bdserial) ioS = bs.ioStream(bserial) ioR = ioS.read() print('ioS data from",
"expensive # computation on one machine so that other machines",
"data = dict((i, i) for i in range(10)) bserial =",
"def testCloudPassageJSONVersion(): from entrails.cloudPassage import CloudPassageHandler cc = CloudPassageHandler() data",
"res = cc.push(data, title=title, asPickle=False) pulledObj = cc.pull(metaData='json') print('PulledObj', pulledObj,",
"0-8999, keys i*10' res = cc.push(data, title=title, asPickle=False) pulledObj =",
"example steps you through using resty & restAssured to save",
"= bs.serialize(data) jserial = js.serialize(data) bdserial = bs.deserialize(bserial) jdserial =",
"asPickle=True) print(rmTry) def testCloudPassageJSONVersion(): from entrails.cloudPassage import CloudPassageHandler cc =",
"print(rmTry) def main(): testSerializer() testCloudPassageJSONVersion() testCloudPassagePickledVersion() if __name__ == '__main__':",
"resty & restAssured to save pickled/serialized # data as a",
"= bs.deserialize(bserial) jdserial = js.deserialize(jserial) print('bdserial', bdserial) ioS = bs.ioStream(bserial)",
"in collaborative computing ie publish results from an expensive #",
"machine so that other machines can load it as live",
"CloudPassageHandler cc = CloudPassageHandler() data = dict((i, i*10) for i",
"on one machine so that other machines can load it",
"bs.ioStream(bserial) ioR = ioS.read() print('ioS data from the stream', ioR)",
"CloudPassageHandler() data = dict((str(i), i*10) for i in range(9)) title",
"i) for i in range(10)) bserial = bs.serialize(data) jserial =",
"rmTry = cc.removeTrace(data, asPickle=True) print(rmTry) def testCloudPassageJSONVersion(): from entrails.cloudPassage import",
"of items 0-8999, keys i*10' res = cc.push(data, title=title, asPickle=False)",
"<NAME> <<EMAIL>> # This example steps you through using resty",
"after deserialization. # Sample usage might be in collaborative computing",
"range(10)) bserial = bs.serialize(data) jserial = js.serialize(data) bdserial = bs.deserialize(bserial)",
"restAssured to save pickled/serialized # data as a blob and",
"= Serializer.BinarySerializer() js = Serializer.JSONSerializer() data = dict((i, i) for",
"print('bdserial', bdserial) ioS = bs.ioStream(bserial) ioR = ioS.read() print('ioS data",
"entrails.cloudPassage import CloudPassageHandler cc = CloudPassageHandler() data = dict((str(i), i*10)",
"cc.removeTrace(data) print(rmTry) def main(): testSerializer() testCloudPassageJSONVersion() testCloudPassagePickledVersion() if __name__ ==",
"print('PulledObj', pulledObj, data) assert(pulledObj == data) rmTry = cc.removeTrace(data, asPickle=True)",
"0-8999, keys i*10' res = cc.push(data, title=title, asPickle=True) pulledObj =",
"== data) rmTry = cc.removeTrace(data) print(rmTry) def main(): testSerializer() testCloudPassageJSONVersion()",
"keys i*10' res = cc.push(data, title=title, asPickle=False) pulledObj = cc.pull(metaData='json')",
"i in range(10)) bserial = bs.serialize(data) jserial = js.serialize(data) bdserial",
"you through using resty & restAssured to save pickled/serialized #",
"a blob and then later re-using it in after deserialization.",
"data) rmTry = cc.removeTrace(data, asPickle=True) print(rmTry) def testCloudPassageJSONVersion(): from entrails.cloudPassage",
"<<EMAIL>> # This example steps you through using resty &",
"# This example steps you through using resty & restAssured",
"dict((i, i*10) for i in range(9)) title = 'Dict of",
"it in after deserialization. # Sample usage might be in",
"entrails.cloudPassage import CloudPassageHandler cc = CloudPassageHandler() data = dict((i, i*10)",
"This example steps you through using resty & restAssured to",
"# Sample usage might be in collaborative computing ie publish",
"i in range(9)) title = 'Dict of items 0-8999, keys",
"then later re-using it in after deserialization. # Sample usage",
"bserial = bs.serialize(data) jserial = js.serialize(data) bdserial = bs.deserialize(bserial) jdserial",
"CloudPassageHandler cc = CloudPassageHandler() data = dict((str(i), i*10) for i",
"title = 'Dict of items 0-8999, keys i*10' res =",
"other machines can load it as live data. def testSerializer():",
"data = dict((i, i*10) for i in range(9)) title =",
"= cc.removeTrace(data, asPickle=True) print(rmTry) def testCloudPassageJSONVersion(): from entrails.cloudPassage import CloudPassageHandler",
"Author: <NAME> <<EMAIL>> # This example steps you through using",
"= cc.push(data, title=title, asPickle=False) pulledObj = cc.pull(metaData='json') print('PulledObj', pulledObj, data)",
"js.deserialize(jserial) print('bdserial', bdserial) ioS = bs.ioStream(bserial) ioR = ioS.read() print('ioS",
"def main(): testSerializer() testCloudPassageJSONVersion() testCloudPassagePickledVersion() if __name__ == '__main__': main()",
"& restAssured to save pickled/serialized # data as a blob",
"data) rmTry = cc.removeTrace(data) print(rmTry) def main(): testSerializer() testCloudPassageJSONVersion() testCloudPassagePickledVersion()",
"in after deserialization. # Sample usage might be in collaborative",
"deserialization. # Sample usage might be in collaborative computing ie",
"computing ie publish results from an expensive # computation on",
"data. def testSerializer(): import Serializer bs = Serializer.BinarySerializer() js =",
"rmTry = cc.removeTrace(data) print(rmTry) def main(): testSerializer() testCloudPassageJSONVersion() testCloudPassagePickledVersion() if",
"later re-using it in after deserialization. # Sample usage might",
"# data as a blob and then later re-using it",
"import CloudPassageHandler cc = CloudPassageHandler() data = dict((i, i*10) for",
"def testSerializer(): import Serializer bs = Serializer.BinarySerializer() js = Serializer.JSONSerializer()",
"= dict((i, i) for i in range(10)) bserial = bs.serialize(data)",
"for i in range(10)) bserial = bs.serialize(data) jserial = js.serialize(data)",
"jserial = js.serialize(data) bdserial = bs.deserialize(bserial) jdserial = js.deserialize(jserial) print('bdserial',",
"ioS = bs.ioStream(bserial) ioR = ioS.read() print('ioS data from the",
"bs.serialize(data) jserial = js.serialize(data) bdserial = bs.deserialize(bserial) jdserial = js.deserialize(jserial)",
"dict((i, i) for i in range(10)) bserial = bs.serialize(data) jserial",
"bs.deserialize(bserial) jdserial = js.deserialize(jserial) print('bdserial', bdserial) ioS = bs.ioStream(bserial) ioR",
"= ioS.read() print('ioS data from the stream', ioR) def testCloudPassagePickledVersion():",
"be in collaborative computing ie publish results from an expensive",
"pickled/serialized # data as a blob and then later re-using",
"data = dict((str(i), i*10) for i in range(9)) title =",
"pulledObj, data) assert(pulledObj == data) rmTry = cc.removeTrace(data) print(rmTry) def",
"of items 0-8999, keys i*10' res = cc.push(data, title=title, asPickle=True)",
"dict((str(i), i*10) for i in range(9)) title = 'Dict of",
"asPickle=False) pulledObj = cc.pull(metaData='json') print('PulledObj', pulledObj, data) assert(pulledObj == data)",
"so that other machines can load it as live data.",
"items 0-8999, keys i*10' res = cc.push(data, title=title, asPickle=False) pulledObj",
"pulledObj = cc.pull(metaData='json') print('PulledObj', pulledObj, data) assert(pulledObj == data) rmTry",
"= CloudPassageHandler() data = dict((i, i*10) for i in range(9))",
"pulledObj, data) assert(pulledObj == data) rmTry = cc.removeTrace(data, asPickle=True) print(rmTry)",
"one machine so that other machines can load it as",
"def testCloudPassagePickledVersion(): from entrails.cloudPassage import CloudPassageHandler cc = CloudPassageHandler() data",
"Serializer bs = Serializer.BinarySerializer() js = Serializer.JSONSerializer() data = dict((i,",
"usage might be in collaborative computing ie publish results from",
"data as a blob and then later re-using it in",
"that other machines can load it as live data. def",
"== data) rmTry = cc.removeTrace(data, asPickle=True) print(rmTry) def testCloudPassageJSONVersion(): from",
"might be in collaborative computing ie publish results from an",
"re-using it in after deserialization. # Sample usage might be",
"collaborative computing ie publish results from an expensive # computation",
"= dict((i, i*10) for i in range(9)) title = 'Dict",
"= cc.push(data, title=title, asPickle=True) pulledObj = cc.pull(metaData='pickle') print('PulledObj', pulledObj, data)",
"# Author: <NAME> <<EMAIL>> # This example steps you through",
"from the stream', ioR) def testCloudPassagePickledVersion(): from entrails.cloudPassage import CloudPassageHandler",
"= js.serialize(data) bdserial = bs.deserialize(bserial) jdserial = js.deserialize(jserial) print('bdserial', bdserial)",
"i*10' res = cc.push(data, title=title, asPickle=True) pulledObj = cc.pull(metaData='pickle') print('PulledObj',",
"bdserial = bs.deserialize(bserial) jdserial = js.deserialize(jserial) print('bdserial', bdserial) ioS =",
"live data. def testSerializer(): import Serializer bs = Serializer.BinarySerializer() js",
"from entrails.cloudPassage import CloudPassageHandler cc = CloudPassageHandler() data = dict((str(i),",
"import Serializer bs = Serializer.BinarySerializer() js = Serializer.JSONSerializer() data =",
"in range(9)) title = 'Dict of items 0-8999, keys i*10'",
"and then later re-using it in after deserialization. # Sample",
"i*10' res = cc.push(data, title=title, asPickle=False) pulledObj = cc.pull(metaData='json') print('PulledObj',",
"data) assert(pulledObj == data) rmTry = cc.removeTrace(data) print(rmTry) def main():",
"data) assert(pulledObj == data) rmTry = cc.removeTrace(data, asPickle=True) print(rmTry) def",
"ioS.read() print('ioS data from the stream', ioR) def testCloudPassagePickledVersion(): from",
"items 0-8999, keys i*10' res = cc.push(data, title=title, asPickle=True) pulledObj",
"the stream', ioR) def testCloudPassagePickledVersion(): from entrails.cloudPassage import CloudPassageHandler cc",
"print(rmTry) def testCloudPassageJSONVersion(): from entrails.cloudPassage import CloudPassageHandler cc = CloudPassageHandler()",
"cc.push(data, title=title, asPickle=True) pulledObj = cc.pull(metaData='pickle') print('PulledObj', pulledObj, data) assert(pulledObj",
"asPickle=True) pulledObj = cc.pull(metaData='pickle') print('PulledObj', pulledObj, data) assert(pulledObj == data)",
"for i in range(9)) title = 'Dict of items 0-8999,",
"cc = CloudPassageHandler() data = dict((str(i), i*10) for i in",
"through using resty & restAssured to save pickled/serialized # data",
"print('PulledObj', pulledObj, data) assert(pulledObj == data) rmTry = cc.removeTrace(data) print(rmTry)",
"js.serialize(data) bdserial = bs.deserialize(bserial) jdserial = js.deserialize(jserial) print('bdserial', bdserial) ioS",
"print('ioS data from the stream', ioR) def testCloudPassagePickledVersion(): from entrails.cloudPassage",
"from entrails.cloudPassage import CloudPassageHandler cc = CloudPassageHandler() data = dict((i,",
"using resty & restAssured to save pickled/serialized # data as",
"can load it as live data. def testSerializer(): import Serializer",
"Serializer.BinarySerializer() js = Serializer.JSONSerializer() data = dict((i, i) for i",
"bs = Serializer.BinarySerializer() js = Serializer.JSONSerializer() data = dict((i, i)",
"= 'Dict of items 0-8999, keys i*10' res = cc.push(data,",
"cc.push(data, title=title, asPickle=False) pulledObj = cc.pull(metaData='json') print('PulledObj', pulledObj, data) assert(pulledObj",
"= cc.pull(metaData='json') print('PulledObj', pulledObj, data) assert(pulledObj == data) rmTry =",
"title=title, asPickle=False) pulledObj = cc.pull(metaData='json') print('PulledObj', pulledObj, data) assert(pulledObj ==",
"python3 # Author: <NAME> <<EMAIL>> # This example steps you",
"an expensive # computation on one machine so that other",
"as live data. def testSerializer(): import Serializer bs = Serializer.BinarySerializer()",
"# computation on one machine so that other machines can",
"#!/usr/bin/env python3 # Author: <NAME> <<EMAIL>> # This example steps",
"= Serializer.JSONSerializer() data = dict((i, i) for i in range(10))",
"cc.pull(metaData='json') print('PulledObj', pulledObj, data) assert(pulledObj == data) rmTry = cc.removeTrace(data)",
"= dict((str(i), i*10) for i in range(9)) title = 'Dict",
"cc = CloudPassageHandler() data = dict((i, i*10) for i in",
"to save pickled/serialized # data as a blob and then",
"CloudPassageHandler() data = dict((i, i*10) for i in range(9)) title",
"jdserial = js.deserialize(jserial) print('bdserial', bdserial) ioS = bs.ioStream(bserial) ioR =",
"assert(pulledObj == data) rmTry = cc.removeTrace(data, asPickle=True) print(rmTry) def testCloudPassageJSONVersion():",
"from an expensive # computation on one machine so that",
"steps you through using resty & restAssured to save pickled/serialized",
"testSerializer(): import Serializer bs = Serializer.BinarySerializer() js = Serializer.JSONSerializer() data",
"ie publish results from an expensive # computation on one",
"= CloudPassageHandler() data = dict((str(i), i*10) for i in range(9))",
"ioR) def testCloudPassagePickledVersion(): from entrails.cloudPassage import CloudPassageHandler cc = CloudPassageHandler()",
"machines can load it as live data. def testSerializer(): import",
"publish results from an expensive # computation on one machine",
"res = cc.push(data, title=title, asPickle=True) pulledObj = cc.pull(metaData='pickle') print('PulledObj', pulledObj,",
"it as live data. def testSerializer(): import Serializer bs =",
"cc.removeTrace(data, asPickle=True) print(rmTry) def testCloudPassageJSONVersion(): from entrails.cloudPassage import CloudPassageHandler cc",
"Serializer.JSONSerializer() data = dict((i, i) for i in range(10)) bserial",
"stream', ioR) def testCloudPassagePickledVersion(): from entrails.cloudPassage import CloudPassageHandler cc =",
"import CloudPassageHandler cc = CloudPassageHandler() data = dict((str(i), i*10) for",
"load it as live data. def testSerializer(): import Serializer bs",
"= cc.pull(metaData='pickle') print('PulledObj', pulledObj, data) assert(pulledObj == data) rmTry =",
"blob and then later re-using it in after deserialization. #",
"ioR = ioS.read() print('ioS data from the stream', ioR) def",
"data from the stream', ioR) def testCloudPassagePickledVersion(): from entrails.cloudPassage import",
"js = Serializer.JSONSerializer() data = dict((i, i) for i in",
"testCloudPassageJSONVersion(): from entrails.cloudPassage import CloudPassageHandler cc = CloudPassageHandler() data =",
"= js.deserialize(jserial) print('bdserial', bdserial) ioS = bs.ioStream(bserial) ioR = ioS.read()",
"testCloudPassagePickledVersion(): from entrails.cloudPassage import CloudPassageHandler cc = CloudPassageHandler() data =",
"keys i*10' res = cc.push(data, title=title, asPickle=True) pulledObj = cc.pull(metaData='pickle')",
"as a blob and then later re-using it in after",
"pulledObj = cc.pull(metaData='pickle') print('PulledObj', pulledObj, data) assert(pulledObj == data) rmTry",
"assert(pulledObj == data) rmTry = cc.removeTrace(data) print(rmTry) def main(): testSerializer()",
"= cc.removeTrace(data) print(rmTry) def main(): testSerializer() testCloudPassageJSONVersion() testCloudPassagePickledVersion() if __name__",
"title=title, asPickle=True) pulledObj = cc.pull(metaData='pickle') print('PulledObj', pulledObj, data) assert(pulledObj ==",
"computation on one machine so that other machines can load",
"in range(10)) bserial = bs.serialize(data) jserial = js.serialize(data) bdserial =",
"'Dict of items 0-8999, keys i*10' res = cc.push(data, title=title,",
"save pickled/serialized # data as a blob and then later",
"i*10) for i in range(9)) title = 'Dict of items",
"Sample usage might be in collaborative computing ie publish results",
"results from an expensive # computation on one machine so",
"cc.pull(metaData='pickle') print('PulledObj', pulledObj, data) assert(pulledObj == data) rmTry = cc.removeTrace(data,",
"= bs.ioStream(bserial) ioR = ioS.read() print('ioS data from the stream',",
"range(9)) title = 'Dict of items 0-8999, keys i*10' res"
] |
[
"is free software: you can redistribute it and/or modify #",
"idx.append(i) return idx def cbind(self, **columns): keys = sorted([x for",
"if len(vals) > 10: vals = list(chain(vals[:3], \"...\", vals[-3:])) ta.append(vals)",
"This file is part of dataframe. # # dataframe is",
"piping # # Copyright (C) 2016 <NAME> # # This",
"vals = col.values if len(vals) > 10: vals = list(chain(vals[:3],",
"@property def nrow(self): return self.__nrow @property def ncol(self): return len(self.colnames)",
"len(vals) > 10: vals = list(chain(vals[:3], \"...\", vals[-3:])) ta.append(vals) ta",
"WITHOUT ANY WARRANTY; without even the implied warranty of #",
"col-name!\") self.__data_columns.append(column) self.__nrow = self.__data_columns[-1].size() for col in self.__data_columns: if",
"the GNU General Public License # along with dataframe. If",
"under the terms of the GNU General Public License as",
"of dataframe. # # dataframe is free software: you can",
"warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.",
"a DataFrameRow \"\"\" return DataFrameRow(idx, [x[idx] for x in self],",
"or # (at your option) any later version. # #",
"self.__nrow = -1 self.cbind(**kwargs) def __getitem__(self, item): if isinstance(item, int):",
"of the GNU General Public License # along with dataframe.",
"General Public License # along with dataframe. If not, see",
"self.colnames: ValueError(\"Appending duplicate col-name!\") self.__data_columns.append(column) self.__nrow = self.__data_columns[-1].size() for col",
"self], self.colnames) def which_colnames(self, *args): idx = [] for i",
"# GNU General Public License for more details. # #",
"dataframe\" ta = [] for col in self.__data_columns: vals =",
"= [] self.__nrow = -1 self.cbind(**kwargs) def __getitem__(self, item): if",
"= [] for i in range(len(self.__data_columns)): if self.colnames[i] in args:",
"idx: the index of the row in the DataFrame. :return:",
":param idx: the index of the row in the DataFrame.",
"import tabulate from ._dataframe_column import DataFrameColumn from ._dataframe_row import DataFrameRow",
"ncol(self): return len(self.colnames) @property def colnames(self): return [x.colname for x",
"Foundation, either version 3 of the License, or # (at",
"self.cbind(**kwargs) def __getitem__(self, item): if isinstance(item, int): return self.__data_columns[item] raise",
"dataframe is free software: you can redistribute it and/or modify",
"Copyright (C) 2016 <NAME> # # This file is part",
"General Public License for more details. # # You should",
"A PARTICULAR PURPOSE. See the # GNU General Public License",
"import chain import tabulate from ._dataframe_column import DataFrameColumn from ._dataframe_row",
"the Free Software Foundation, either version 3 of the License,",
"# # Copyright (C) 2016 <NAME> # # This file",
"duplicate col-name!\") self.__data_columns.append(column) self.__nrow = self.__data_columns[-1].size() for col in self.__data_columns:",
"[] for col in self.__data_columns: vals = col.values if len(vals)",
"even the implied warranty of # MERCHANTABILITY or FITNESS FOR",
"free software: you can redistribute it and/or modify # it",
"idxs] def row(self, idx): \"\"\" Returns DataFrameRow of the DataFrame",
"or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU",
"the implied warranty of # MERCHANTABILITY or FITNESS FOR A",
"[] for i in range(len(self.__data_columns)): if self.colnames[i] in args: idx.append(i)",
"self.__data_columns[-1].size() for col in self.__data_columns: if col.size() != self.__nrow: raise",
"[x.colname for x in self.__data_columns] def rows(self, idxs): return [self.row(i)",
"# # # @author = '<NAME>' # @email = '<EMAIL>'",
":return: returns a DataFrameRow \"\"\" return DataFrameRow(idx, [x[idx] for x",
"of the GNU General Public License as published by #",
"in self.__data_columns] def rows(self, idxs): return [self.row(i) for i in",
"can redistribute it and/or modify # it under the terms",
"DataFrameRow class DataFrameColumnSet: def __init__(self, **kwargs): self.__data_columns = [] self.__nrow",
"by # the Free Software Foundation, either version 3 of",
"ta = tabulate.tabulate(zip(*ta), headers=self.colnames) return stri + \"\\n\\n\" + ta.__str__()",
"col.size() != self.__nrow: raise ValueError(\"Columns do not have equal lengths!\")",
"later version. # # dataframe is distributed in the hope",
"len(self.colnames) @property def colnames(self): return [x.colname for x in self.__data_columns]",
"for col in self.__data_columns: if col.size() != self.__nrow: raise ValueError(\"Columns",
"implementation using method piping # # Copyright (C) 2016 <NAME>",
"self.__nrow = self.__data_columns[-1].size() for col in self.__data_columns: if col.size() !=",
"dataframe is distributed in the hope that it will be",
"# # dataframe is free software: you can redistribute it",
"License for more details. # # You should have received",
"def __cbind(self, column): if column.colname in self.colnames: ValueError(\"Appending duplicate col-name!\")",
"itertools import chain import tabulate from ._dataframe_column import DataFrameColumn from",
"nrow(self): return self.__nrow @property def ncol(self): return len(self.colnames) @property def",
"keys: self.__cbind(DataFrameColumn(str(k), columns.get(k))) def __cbind(self, column): if column.colname in self.colnames:",
"tabulate.tabulate(zip(*ta), headers=self.colnames) return stri + \"\\n\\n\" + ta.__str__() @property def",
"the License, or # (at your option) any later version.",
"from ._dataframe_column import DataFrameColumn from ._dataframe_row import DataFrameRow class DataFrameColumnSet:",
"in idxs] def row(self, idx): \"\"\" Returns DataFrameRow of the",
"is part of dataframe. # # dataframe is free software:",
"self.__data_columns[item] raise ValueError(\"Item should be integer!\") def __iter__(self): for col",
"self.__nrow @property def ncol(self): return len(self.colnames) @property def colnames(self): return",
"PARTICULAR PURPOSE. See the # GNU General Public License for",
"in self.__data_columns: vals = col.values if len(vals) > 10: vals",
"raise ValueError(\"Item should be integer!\") def __iter__(self): for col in",
"modify # it under the terms of the GNU General",
"terms of the GNU General Public License as published by",
"should be integer!\") def __iter__(self): for col in self.__data_columns: yield",
"for x in columns.keys()]) for k in keys: self.__cbind(DataFrameColumn(str(k), columns.get(k)))",
"details. # # You should have received a copy of",
"= \"\\nA dataframe\" ta = [] for col in self.__data_columns:",
"10: vals = list(chain(vals[:3], \"...\", vals[-3:])) ta.append(vals) ta = tabulate.tabulate(zip(*ta),",
"if column.colname in self.colnames: ValueError(\"Appending duplicate col-name!\") self.__data_columns.append(column) self.__nrow =",
"return stri + \"\\n\\n\" + ta.__str__() @property def nrow(self): return",
"see <http://www.gnu.org/licenses/>. # # # @author = '<NAME>' # @email",
"published by # the Free Software Foundation, either version 3",
"@property def colnames(self): return [x.colname for x in self.__data_columns] def",
"__iter__(self): for col in self.__data_columns: yield col def __str__(self): stri",
"[x[idx] for x in self], self.colnames) def which_colnames(self, *args): idx",
"ValueError(\"Item should be integer!\") def __iter__(self): for col in self.__data_columns:",
"list(chain(vals[:3], \"...\", vals[-3:])) ta.append(vals) ta = tabulate.tabulate(zip(*ta), headers=self.colnames) return stri",
"for x in self.__data_columns] def rows(self, idxs): return [self.row(i) for",
"__init__(self, **kwargs): self.__data_columns = [] self.__nrow = -1 self.cbind(**kwargs) def",
"(at your option) any later version. # # dataframe is",
"rows(self, idxs): return [self.row(i) for i in idxs] def row(self,",
"i in idxs] def row(self, idx): \"\"\" Returns DataFrameRow of",
"given its index. :param idx: the index of the row",
"col.values if len(vals) > 10: vals = list(chain(vals[:3], \"...\", vals[-3:]))",
"in self.__data_columns: if col.size() != self.__nrow: raise ValueError(\"Columns do not",
"received a copy of the GNU General Public License #",
"import DataFrameRow class DataFrameColumnSet: def __init__(self, **kwargs): self.__data_columns = []",
"def __iter__(self): for col in self.__data_columns: yield col def __str__(self):",
"License as published by # the Free Software Foundation, either",
"See the # GNU General Public License for more details.",
"stri + \"\\n\\n\" + ta.__str__() @property def nrow(self): return self.__nrow",
"= '<EMAIL>' from itertools import chain import tabulate from ._dataframe_column",
"args: idx.append(i) return idx def cbind(self, **columns): keys = sorted([x",
"that it will be useful, # but WITHOUT ANY WARRANTY;",
"its index. :param idx: the index of the row in",
"part of dataframe. # # dataframe is free software: you",
"either version 3 of the License, or # (at your",
"colnames(self): return [x.colname for x in self.__data_columns] def rows(self, idxs):",
"the index of the row in the DataFrame. :return: returns",
"return [x.colname for x in self.__data_columns] def rows(self, idxs): return",
"more details. # # You should have received a copy",
"return len(self.colnames) @property def colnames(self): return [x.colname for x in",
"= self.__data_columns[-1].size() for col in self.__data_columns: if col.size() != self.__nrow:",
"be useful, # but WITHOUT ANY WARRANTY; without even the",
"headers=self.colnames) return stri + \"\\n\\n\" + ta.__str__() @property def nrow(self):",
"return [self.row(i) for i in idxs] def row(self, idx): \"\"\"",
"ta.__str__() @property def nrow(self): return self.__nrow @property def ncol(self): return",
"# (at your option) any later version. # # dataframe",
"GNU General Public License # along with dataframe. If not,",
"# dataframe is distributed in the hope that it will",
"col in self.__data_columns: if col.size() != self.__nrow: raise ValueError(\"Columns do",
"= list(chain(vals[:3], \"...\", vals[-3:])) ta.append(vals) ta = tabulate.tabulate(zip(*ta), headers=self.colnames) return",
"._dataframe_row import DataFrameRow class DataFrameColumnSet: def __init__(self, **kwargs): self.__data_columns =",
"= sorted([x for x in columns.keys()]) for k in keys:",
"# but WITHOUT ANY WARRANTY; without even the implied warranty",
"Free Software Foundation, either version 3 of the License, or",
"not, see <http://www.gnu.org/licenses/>. # # # @author = '<NAME>' #",
"<NAME> # # This file is part of dataframe. #",
"If not, see <http://www.gnu.org/licenses/>. # # # @author = '<NAME>'",
"index. :param idx: the index of the row in the",
"and/or modify # it under the terms of the GNU",
"implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR",
"'<EMAIL>' from itertools import chain import tabulate from ._dataframe_column import",
"returns a DataFrameRow \"\"\" return DataFrameRow(idx, [x[idx] for x in",
"it and/or modify # it under the terms of the",
"for i in range(len(self.__data_columns)): if self.colnames[i] in args: idx.append(i) return",
"in columns.keys()]) for k in keys: self.__cbind(DataFrameColumn(str(k), columns.get(k))) def __cbind(self,",
"it will be useful, # but WITHOUT ANY WARRANTY; without",
"idx def cbind(self, **columns): keys = sorted([x for x in",
"License # along with dataframe. If not, see <http://www.gnu.org/licenses/>. #",
"column): if column.colname in self.colnames: ValueError(\"Appending duplicate col-name!\") self.__data_columns.append(column) self.__nrow",
"# it under the terms of the GNU General Public",
"for i in idxs] def row(self, idx): \"\"\" Returns DataFrameRow",
"ValueError(\"Appending duplicate col-name!\") self.__data_columns.append(column) self.__nrow = self.__data_columns[-1].size() for col in",
"of the License, or # (at your option) any later",
"hope that it will be useful, # but WITHOUT ANY",
"it under the terms of the GNU General Public License",
"'<NAME>' # @email = '<EMAIL>' from itertools import chain import",
"the GNU General Public License as published by # the",
"Returns DataFrameRow of the DataFrame given its index. :param idx:",
"__getitem__(self, item): if isinstance(item, int): return self.__data_columns[item] raise ValueError(\"Item should",
"int): return self.__data_columns[item] raise ValueError(\"Item should be integer!\") def __iter__(self):",
"in self], self.colnames) def which_colnames(self, *args): idx = [] for",
"FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General",
"DataFrame. :return: returns a DataFrameRow \"\"\" return DataFrameRow(idx, [x[idx] for",
"idx): \"\"\" Returns DataFrameRow of the DataFrame given its index.",
"General Public License as published by # the Free Software",
"with dataframe. If not, see <http://www.gnu.org/licenses/>. # # # @author",
"will be useful, # but WITHOUT ANY WARRANTY; without even",
"__str__(self): stri = \"\\nA dataframe\" ta = [] for col",
"# @author = '<NAME>' # @email = '<EMAIL>' from itertools",
"be integer!\") def __iter__(self): for col in self.__data_columns: yield col",
"in self.colnames: ValueError(\"Appending duplicate col-name!\") self.__data_columns.append(column) self.__nrow = self.__data_columns[-1].size() for",
"index of the row in the DataFrame. :return: returns a",
"method piping # # Copyright (C) 2016 <NAME> # #",
"PURPOSE. See the # GNU General Public License for more",
"FOR A PARTICULAR PURPOSE. See the # GNU General Public",
"DataFrameRow of the DataFrame given its index. :param idx: the",
"the # GNU General Public License for more details. #",
"of the DataFrame given its index. :param idx: the index",
"of the row in the DataFrame. :return: returns a DataFrameRow",
"for more details. # # You should have received a",
"__cbind(self, column): if column.colname in self.colnames: ValueError(\"Appending duplicate col-name!\") self.__data_columns.append(column)",
"redistribute it and/or modify # it under the terms of",
"in args: idx.append(i) return idx def cbind(self, **columns): keys =",
"self.__data_columns = [] self.__nrow = -1 self.cbind(**kwargs) def __getitem__(self, item):",
"\"\"\" return DataFrameRow(idx, [x[idx] for x in self], self.colnames) def",
"range(len(self.__data_columns)): if self.colnames[i] in args: idx.append(i) return idx def cbind(self,",
"def __getitem__(self, item): if isinstance(item, int): return self.__data_columns[item] raise ValueError(\"Item",
"row(self, idx): \"\"\" Returns DataFrameRow of the DataFrame given its",
"for x in self], self.colnames) def which_colnames(self, *args): idx =",
"@property def ncol(self): return len(self.colnames) @property def colnames(self): return [x.colname",
"**columns): keys = sorted([x for x in columns.keys()]) for k",
"for k in keys: self.__cbind(DataFrameColumn(str(k), columns.get(k))) def __cbind(self, column): if",
"def nrow(self): return self.__nrow @property def ncol(self): return len(self.colnames) @property",
"ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY",
"x in columns.keys()]) for k in keys: self.__cbind(DataFrameColumn(str(k), columns.get(k))) def",
"in the DataFrame. :return: returns a DataFrameRow \"\"\" return DataFrameRow(idx,",
"= col.values if len(vals) > 10: vals = list(chain(vals[:3], \"...\",",
"dataframe. # # dataframe is free software: you can redistribute",
"def rows(self, idxs): return [self.row(i) for i in idxs] def",
"return idx def cbind(self, **columns): keys = sorted([x for x",
"the hope that it will be useful, # but WITHOUT",
"self.__data_columns] def rows(self, idxs): return [self.row(i) for i in idxs]",
"if isinstance(item, int): return self.__data_columns[item] raise ValueError(\"Item should be integer!\")",
"def cbind(self, **columns): keys = sorted([x for x in columns.keys()])",
"def colnames(self): return [x.colname for x in self.__data_columns] def rows(self,",
"software: you can redistribute it and/or modify # it under",
"# dataframe is free software: you can redistribute it and/or",
"x in self.__data_columns] def rows(self, idxs): return [self.row(i) for i",
"self.__data_columns: yield col def __str__(self): stri = \"\\nA dataframe\" ta",
"[] self.__nrow = -1 self.cbind(**kwargs) def __getitem__(self, item): if isinstance(item,",
"cbind(self, **columns): keys = sorted([x for x in columns.keys()]) for",
"self.__data_columns.append(column) self.__nrow = self.__data_columns[-1].size() for col in self.__data_columns: if col.size()",
"your option) any later version. # # dataframe is distributed",
"copy of the GNU General Public License # along with",
"\"\\n\\n\" + ta.__str__() @property def nrow(self): return self.__nrow @property def",
"def ncol(self): return len(self.colnames) @property def colnames(self): return [x.colname for",
"# Copyright (C) 2016 <NAME> # # This file is",
"self.colnames[i] in args: idx.append(i) return idx def cbind(self, **columns): keys",
"self.__cbind(DataFrameColumn(str(k), columns.get(k))) def __cbind(self, column): if column.colname in self.colnames: ValueError(\"Appending",
"+ \"\\n\\n\" + ta.__str__() @property def nrow(self): return self.__nrow @property",
"# This file is part of dataframe. # # dataframe",
"distributed in the hope that it will be useful, #",
"2016 <NAME> # # This file is part of dataframe.",
"License, or # (at your option) any later version. #",
"def __str__(self): stri = \"\\nA dataframe\" ta = [] for",
"(C) 2016 <NAME> # # This file is part of",
"\"\"\" Returns DataFrameRow of the DataFrame given its index. :param",
"for col in self.__data_columns: vals = col.values if len(vals) >",
"# along with dataframe. If not, see <http://www.gnu.org/licenses/>. # #",
"ta = [] for col in self.__data_columns: vals = col.values",
"= tabulate.tabulate(zip(*ta), headers=self.colnames) return stri + \"\\n\\n\" + ta.__str__() @property",
"columns.keys()]) for k in keys: self.__cbind(DataFrameColumn(str(k), columns.get(k))) def __cbind(self, column):",
"You should have received a copy of the GNU General",
"in keys: self.__cbind(DataFrameColumn(str(k), columns.get(k))) def __cbind(self, column): if column.colname in",
"in self.__data_columns: yield col def __str__(self): stri = \"\\nA dataframe\"",
"class DataFrameColumnSet: def __init__(self, **kwargs): self.__data_columns = [] self.__nrow =",
"@author = '<NAME>' # @email = '<EMAIL>' from itertools import",
"isinstance(item, int): return self.__data_columns[item] raise ValueError(\"Item should be integer!\") def",
"= -1 self.cbind(**kwargs) def __getitem__(self, item): if isinstance(item, int): return",
"col in self.__data_columns: yield col def __str__(self): stri = \"\\nA",
"= '<NAME>' # @email = '<EMAIL>' from itertools import chain",
"useful, # but WITHOUT ANY WARRANTY; without even the implied",
"the DataFrame given its index. :param idx: the index of",
"= [] for col in self.__data_columns: vals = col.values if",
"file is part of dataframe. # # dataframe is free",
"you can redistribute it and/or modify # it under the",
"x in self], self.colnames) def which_colnames(self, *args): idx = []",
"# You should have received a copy of the GNU",
"col def __str__(self): stri = \"\\nA dataframe\" ta = []",
"any later version. # # dataframe is distributed in the",
"yield col def __str__(self): stri = \"\\nA dataframe\" ta =",
"DataFrameColumn from ._dataframe_row import DataFrameRow class DataFrameColumnSet: def __init__(self, **kwargs):",
"\"...\", vals[-3:])) ta.append(vals) ta = tabulate.tabulate(zip(*ta), headers=self.colnames) return stri +",
"should have received a copy of the GNU General Public",
"keys = sorted([x for x in columns.keys()]) for k in",
"from ._dataframe_row import DataFrameRow class DataFrameColumnSet: def __init__(self, **kwargs): self.__data_columns",
"<http://www.gnu.org/licenses/>. # # # @author = '<NAME>' # @email =",
"dataframe. If not, see <http://www.gnu.org/licenses/>. # # # @author =",
"item): if isinstance(item, int): return self.__data_columns[item] raise ValueError(\"Item should be",
"integer!\") def __iter__(self): for col in self.__data_columns: yield col def",
"if self.colnames[i] in args: idx.append(i) return idx def cbind(self, **columns):",
"# # @author = '<NAME>' # @email = '<EMAIL>' from",
"using method piping # # Copyright (C) 2016 <NAME> #",
"def row(self, idx): \"\"\" Returns DataFrameRow of the DataFrame given",
"tabulate from ._dataframe_column import DataFrameColumn from ._dataframe_row import DataFrameRow class",
"Public License # along with dataframe. If not, see <http://www.gnu.org/licenses/>.",
"the row in the DataFrame. :return: returns a DataFrameRow \"\"\"",
"GNU General Public License as published by # the Free",
"which_colnames(self, *args): idx = [] for i in range(len(self.__data_columns)): if",
"._dataframe_column import DataFrameColumn from ._dataframe_row import DataFrameRow class DataFrameColumnSet: def",
"# the Free Software Foundation, either version 3 of the",
"i in range(len(self.__data_columns)): if self.colnames[i] in args: idx.append(i) return idx",
"version. # # dataframe is distributed in the hope that",
"of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See",
"sorted([x for x in columns.keys()]) for k in keys: self.__cbind(DataFrameColumn(str(k),",
"def __init__(self, **kwargs): self.__data_columns = [] self.__nrow = -1 self.cbind(**kwargs)",
"col in self.__data_columns: vals = col.values if len(vals) > 10:",
"data-frame implementation using method piping # # Copyright (C) 2016",
"a data-frame implementation using method piping # # Copyright (C)",
"but WITHOUT ANY WARRANTY; without even the implied warranty of",
"if col.size() != self.__nrow: raise ValueError(\"Columns do not have equal",
"Public License for more details. # # You should have",
"vals = list(chain(vals[:3], \"...\", vals[-3:])) ta.append(vals) ta = tabulate.tabulate(zip(*ta), headers=self.colnames)",
"the terms of the GNU General Public License as published",
"return self.__nrow @property def ncol(self): return len(self.colnames) @property def colnames(self):",
"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #",
"**kwargs): self.__data_columns = [] self.__nrow = -1 self.cbind(**kwargs) def __getitem__(self,",
"Software Foundation, either version 3 of the License, or #",
"in range(len(self.__data_columns)): if self.colnames[i] in args: idx.append(i) return idx def",
"dataframe: a data-frame implementation using method piping # # Copyright",
"k in keys: self.__cbind(DataFrameColumn(str(k), columns.get(k))) def __cbind(self, column): if column.colname",
"for col in self.__data_columns: yield col def __str__(self): stri =",
"option) any later version. # # dataframe is distributed in",
"as published by # the Free Software Foundation, either version",
"self.colnames) def which_colnames(self, *args): idx = [] for i in",
"version 3 of the License, or # (at your option)",
"-1 self.cbind(**kwargs) def __getitem__(self, item): if isinstance(item, int): return self.__data_columns[item]",
"row in the DataFrame. :return: returns a DataFrameRow \"\"\" return",
"> 10: vals = list(chain(vals[:3], \"...\", vals[-3:])) ta.append(vals) ta =",
"def which_colnames(self, *args): idx = [] for i in range(len(self.__data_columns)):",
"a copy of the GNU General Public License # along",
"DataFrameColumnSet: def __init__(self, **kwargs): self.__data_columns = [] self.__nrow = -1",
"ta.append(vals) ta = tabulate.tabulate(zip(*ta), headers=self.colnames) return stri + \"\\n\\n\" +",
"return self.__data_columns[item] raise ValueError(\"Item should be integer!\") def __iter__(self): for",
"# # This file is part of dataframe. # #",
"Public License as published by # the Free Software Foundation,",
"*args): idx = [] for i in range(len(self.__data_columns)): if self.colnames[i]",
"DataFrameRow \"\"\" return DataFrameRow(idx, [x[idx] for x in self], self.colnames)",
"have received a copy of the GNU General Public License",
"self.__data_columns: if col.size() != self.__nrow: raise ValueError(\"Columns do not have",
"columns.get(k))) def __cbind(self, column): if column.colname in self.colnames: ValueError(\"Appending duplicate",
"idxs): return [self.row(i) for i in idxs] def row(self, idx):",
"in the hope that it will be useful, # but",
"chain import tabulate from ._dataframe_column import DataFrameColumn from ._dataframe_row import",
"from itertools import chain import tabulate from ._dataframe_column import DataFrameColumn",
"# # dataframe is distributed in the hope that it",
"[self.row(i) for i in idxs] def row(self, idx): \"\"\" Returns",
"along with dataframe. If not, see <http://www.gnu.org/licenses/>. # # #",
"idx = [] for i in range(len(self.__data_columns)): if self.colnames[i] in",
"DataFrame given its index. :param idx: the index of the",
"return DataFrameRow(idx, [x[idx] for x in self], self.colnames) def which_colnames(self,",
"column.colname in self.colnames: ValueError(\"Appending duplicate col-name!\") self.__data_columns.append(column) self.__nrow = self.__data_columns[-1].size()",
"the DataFrame. :return: returns a DataFrameRow \"\"\" return DataFrameRow(idx, [x[idx]",
"+ ta.__str__() @property def nrow(self): return self.__nrow @property def ncol(self):",
"vals[-3:])) ta.append(vals) ta = tabulate.tabulate(zip(*ta), headers=self.colnames) return stri + \"\\n\\n\"",
"WARRANTY; without even the implied warranty of # MERCHANTABILITY or",
"self.__data_columns: vals = col.values if len(vals) > 10: vals =",
"GNU General Public License for more details. # # You",
"# @email = '<EMAIL>' from itertools import chain import tabulate",
"stri = \"\\nA dataframe\" ta = [] for col in",
"is distributed in the hope that it will be useful,",
"3 of the License, or # (at your option) any",
"import DataFrameColumn from ._dataframe_row import DataFrameRow class DataFrameColumnSet: def __init__(self,",
"\"\\nA dataframe\" ta = [] for col in self.__data_columns: vals",
"# # You should have received a copy of the",
"@email = '<EMAIL>' from itertools import chain import tabulate from",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the",
"DataFrameRow(idx, [x[idx] for x in self], self.colnames) def which_colnames(self, *args):",
"without even the implied warranty of # MERCHANTABILITY or FITNESS",
"# dataframe: a data-frame implementation using method piping # #"
] |
[
"for jar in ctx.attr.jar_layers: available += java_files(jar) # This is",
"_war_app_layer_impl(ctx): \"\"\"Appends the app layer with all remaining runfiles.\"\"\" available",
"2.0 (the \"License\"); # you may not use this file",
"= base, library = library_name, jar_layers = layers, visibility =",
"their own layers # factored into our base. \"jar_layers\": attr.label_list(),",
"# factored into our base. \"jar_layers\": attr.label_list(), # The base",
"deps (even []) if there is no srcs # kwarg.",
"= \"container\", ) def java_files(f): files = [] if java_common.provider",
"name = name, base = base, library = library_name, jar_layers",
"built with java_library into # a binary, then it will",
"executable = True, outputs = _container.image.outputs, implementation = _war_dep_layer_impl, )",
"Override the defaults. \"directory\": attr.string(default = \"/app\"), # https://github.com/bazelbuild/bazel/issues/2176 \"data_path\":",
"# symlink forest in the app layers. \"agnostic_dep_layout\": attr.bool(default =",
"= \"DIGESTS\", ) load( \":jetty.bzl\", _JETTY_DIGESTS = \"DIGESTS\", ) def",
"for x in available + unavailable ]) # Classpaths can",
"appending. \"dep\": attr.label(mandatory = True), # Whether to lay out",
"is turning a JAR built with java_library into # a",
"PATHS FLATTENED # \"data_path\": attr.string(default = \".\"), }.items()), executable =",
"instead. classpath_file = ctx.new_file(ctx.attr.name + \".classpath\") ctx.actions.write(classpath_file, classpath) binary_path =",
") if \"servlet_api\" not in excludes: native.maven_jar( name = \"javax_servlet_api\",",
"Handle data files. # If we start putting libs in",
"WE WANT PATHS FLATTENED # \"data_path\": attr.string(default = \".\"), }.items()),",
"that is agnostic # of the binary in which it",
"+= java_files(jar) # We compute the set of unavailable stuff",
"manner that is agnostic # of the binary in which",
"as a file. \"_classpath_as_file\": attr.bool(default = False), # Override the",
"Classpaths can grow long and there is a limit on",
"our base. \"jar_layers\": attr.label_list(), # The rest of the dependencies.",
"= name + \".binary\" native.java_binary( name = binary_name, main_class =",
"[ctx.attr.main_class] + ctx.attr.args file_map = { layer_file_path(ctx, f): f for",
"License for the specific language governing permissions and # limitations",
"files = [] if java_common.provider in f: java_provider = f[java_common.provider]",
"single dependency's runfiles.\"\"\" return dep_layer_impl(ctx, runfiles = java_files) jar_dep_layer =",
"+= java_files(ctx.attr.binary) unavailable = [x for x in unavailable if",
"[], layers = [], jvm_flags = [], **kwargs): \"\"\"Builds a",
"https://github.com/bazelbuild/bazel/issues/2176 \"data_path\": attr.string(default = \".\"), }.items()), executable = True, outputs",
"of dependencies that have their own layers # factored into",
"# If we start putting libs in servlet-agnostic paths, #",
"+ layers, **kwargs) base = base or DEFAULT_JETTY_BASE for index,",
"flattening and using basename # we should use a file_map",
"visibility = visibility, args = kwargs.get(\"args\"), ) def _war_dep_layer_impl(ctx): \"\"\"Appends",
"able to ctrl-C it and have the container actually terminate.",
"dep in enumerate(layers): this_name = \"%s.%d\" % (name, index) jar_dep_layer(name",
"WAR rules. transitive_deps = depset() transitive_deps += java_files(ctx.attr.library) # TODO(mattmoor):",
"dep = dep) base = this_name visibility = kwargs.get(\"visibility\", None)",
"= native.existing_rules().keys() if \"java_image_base\" not in excludes: container_pull( name =",
"dep in enumerate(layers): this_name = \"%s.%d\" % (name, index) _war_dep_layer(name",
"set of unavailable stuff by walking deps # in the",
"digests. load( \":java.bzl\", _JAVA_DIGESTS = \"DIGESTS\", ) load( \":jetty.bzl\", _JETTY_DIGESTS",
"srcs # kwarg. deps = (deps + layers) or None,",
"select({ \"@io_bazel_rules_docker//:fastbuild\": \"@jetty_image_base//image\", \"@io_bazel_rules_docker//:debug\": \"@jetty_debug_image_base//image\", \"@io_bazel_rules_docker//:optimized\": \"@jetty_image_base//image\", \"//conditions:default\": \"@jetty_image_base//image\", })",
"name = name, base = base, binary = binary_name, main_class",
"not in excludes: container_pull( name = \"jetty_image_base\", registry = \"gcr.io\",",
"if there is no srcs # kwarg. deps = (deps",
"= rule( attrs = dict(_container.image.attrs.items() + { # The library",
"excludes: container_pull( name = \"java_debug_image_base\", registry = \"gcr.io\", repository =",
"a limit on the length of a # command line,",
"x) for x in available + unavailable ]) # Classpaths",
"attr.string_list(), # The base image on which to overlay the",
"# \"data_path\": attr.string(default = \".\"), \"legacy_run_behavior\": attr.bool(default = False), }.items()),",
"binary_path = layer_file_path(ctx, ctx.files.binary[0]) classpath_path = layer_file_path(ctx, classpath_file) entrypoint =",
"OF ANY KIND, either express or implied. # See the",
"= \"/app\"), # https://github.com/bazelbuild/bazel/issues/2176 \"data_path\": attr.string(default = \".\"), }.items()), executable",
"See the License for the specific language governing permissions and",
"to in writing, software # distributed under the License is",
"# limitations under the License. \"\"\"A rule for creating a",
"limit on the length of a # command line, so",
"_container.image.outputs, implementation = _war_app_layer_impl, ) def war_image(name, base = None,",
"images, but requires a # symlink forest in the app",
"[] if java_common.provider in f: java_provider = f[java_common.provider] files +=",
"= \"gcr.io\", repository = \"distroless/java/jetty\", digest = _JETTY_DIGESTS[\"latest\"], ) if",
") if \"jetty_debug_image_base\" not in excludes: container_pull( name = \"jetty_debug_image_base\",",
"or agreed to in writing, software # distributed under the",
"= _JAVA_DIGESTS[\"debug\"], ) if \"jetty_image_base\" not in excludes: container_pull( name",
"_JETTY_DIGESTS[\"debug\"], ) if \"servlet_api\" not in excludes: native.maven_jar( name =",
"paths, # then consider adding symlinks here. files = [d",
"\"agnostic_dep_layout\": attr.bool(default = True), # Whether the classpath should be",
"synthesizing an image. \"library\": attr.label(mandatory = True), # The full",
"start putting libs in servlet-agnostic paths, # then consider adding",
"entrypoint = [ \"/usr/bin/java\", \"-cp\", # Support optionally passing the",
"name = binary_name, main_class = main_class, # If the rule",
"The dependency whose runfiles we're appending. \"dep\": attr.label(mandatory = True),",
"layer across images, but requires a # symlink forest in",
"= select({ \"@io_bazel_rules_docker//:fastbuild\": \"@java_image_base//image\", \"@io_bazel_rules_docker//:debug\": \"@java_debug_image_base//image\", \"@io_bazel_rules_docker//:optimized\": \"@java_image_base//image\", \"//conditions:default\": \"@java_image_base//image\",",
"in excludes: container_pull( name = \"jetty_image_base\", registry = \"gcr.io\", repository",
"binary and then subtracting # out what it available. unavailable",
"compliance with the License. # You may obtain a copy",
"container_pull( name = \"java_image_base\", registry = \"gcr.io\", repository = \"distroless/java\",",
"in which it is participating. This can increase # sharing",
"what it available. unavailable = depset() for jar in ctx.attr.deps",
"digest = _JAVA_DIGESTS[\"debug\"], ) if \"jetty_image_base\" not in excludes: container_pull(",
"_container = \"container\", ) def java_files(f): files = [] if",
"True), \"entrypoint\": attr.string_list(default = []), # Whether to lay out",
"def repositories(): # Call the core \"repositories\" function to reduce",
"not use this file except in compliance with the License.",
"with all remaining runfiles.\"\"\" available = depset() for jar in",
"= rule( attrs = dict(_container.image.attrs.items() + { # The binary",
"if java_common.provider in f: java_provider = f[java_common.provider] files += list(java_provider.transitive_runtime_jars)",
"you may not use this file except in compliance with",
"= base or DEFAULT_JETTY_BASE for index, dep in enumerate(layers): this_name",
"**kwargs): \"\"\"Builds a container image overlaying the java_library as an",
"file. \"_classpath_as_file\": attr.bool(default = False), # Override the defaults. \"directory\":",
"\"/usr/bin/java\", \"-cp\", # Support optionally passing the classpath as a",
"need to be able to ctrl-C it and have the",
"main class to invoke on startup. \"main_class\": attr.string(mandatory = True),",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"should be passed as a file. \"_classpath_as_file\": attr.bool(default = False),",
"a layer for a single dependency's runfiles.\"\"\" # TODO(mattmoor): Today",
"_JAVA_DIGESTS[\"debug\"], ) if \"jetty_image_base\" not in excludes: container_pull( name =",
"binary in which it is participating. This can increase #",
"visibility, args = kwargs.get(\"args\"), ) def _war_dep_layer_impl(ctx): \"\"\"Appends a layer",
"in runtime_deps. We are # not allowed to pass deps",
"java_files) jar_dep_layer = rule( attrs = dict(_container.image.attrs.items() + { #",
"or DEFAULT_JAVA_BASE for index, dep in enumerate(layers): this_name = \"%s.%d\"",
"classpath = \":\".join([ layer_file_path(ctx, x) for x in available +",
"passed as a file. \"_classpath_as_file\": attr.bool(default = False), # Override",
"\"dep_layer_impl\", \"layer_file_path\", ) def _jar_dep_layer_impl(ctx): \"\"\"Appends a layer for a",
"+ layers) or None, runtime_deps = runtime_deps, jvm_flags = jvm_flags,",
"# The base image on which to overlay the dependency",
"overlay the dependency layers. \"base\": attr.label(mandatory = True), # The",
"main_class = None, deps = [], runtime_deps = [], layers",
"= \"java_debug_image_base\", registry = \"gcr.io\", repository = \"distroless/java\", digest =",
"= kwargs.get(\"tags\", None) _war_app_layer( name = name, base = base,",
"else classpath, ] + ctx.attr.jvm_flags + [ctx.attr.main_class] + ctx.attr.args file_map",
"# The library target for which we are synthesizing an",
"True), # Whether to lay out each dependency in a",
"in excludes: container_pull( name = \"java_image_base\", registry = \"gcr.io\", repository",
"# https://github.com/bazelbuild/bazel/issues/2176 \"data_path\": attr.string(default = \".\"), \"legacy_run_behavior\": attr.bool(default = False),",
"= True), # Override the defaults. \"directory\": attr.string(default = \"/jetty/webapps/ROOT/WEB-INF/lib\"),",
"True), # The dependency whose runfiles we're appending. \"dep\": attr.label(mandatory",
"container image. The signature of java_image is compatible with java_binary.",
"\"@jetty_image_base//image\", \"//conditions:default\": \"@jetty_image_base//image\", }) load( \"//container:container.bzl\", _container = \"container\", )",
"# The full list of dependencies that have their own",
"transitive_deps += java_files(ctx.attr.library) # TODO(mattmoor): Handle data files. # If",
"binary_name, main_class = main_class, # If the rule is turning",
"binary target for which we are synthesizing an image. \"binary\":",
"length of a # command line, so mitigate this by",
"runfiles we're appending. \"dep\": attr.label(mandatory = True), # Whether to",
"then subtracting # out what it available. unavailable = depset()",
"the dependency layers. \"base\": attr.label(mandatory = True), # The dependency",
"attr.bool(default = True), # Whether the classpath should be passed",
"unavailable ]) # Classpaths can grow long and there is",
"dependencies that should be put into their own layers. **kwargs:",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"load( \"//container:container.bzl\", \"container_pull\", _repositories = \"repositories\", ) # Load the",
"if x not in available] classpath = \":\".join([ layer_file_path(ctx, x)",
"artifact = \"javax.servlet:javax.servlet-api:3.0.1\", ) DEFAULT_JAVA_BASE = select({ \"@io_bazel_rules_docker//:fastbuild\": \"@java_image_base//image\", \"@io_bazel_rules_docker//:debug\":",
"More information: https://github.com/bazelbuild/bazel/issues/3519 Args: layers: Augments \"deps\" with dependencies that",
"absolute paths. directory = \"/\", file_map = file_map, entrypoint =",
"useful, we need to be able to ctrl-C it and",
"True, outputs = _container.image.outputs, implementation = _war_dep_layer_impl, ) def _war_app_layer_impl(ctx):",
"adding in our binary and then subtracting # out what",
") load( \":jetty.bzl\", _JETTY_DIGESTS = \"DIGESTS\", ) def repositories(): #",
"= _jar_dep_layer_impl, ) def _jar_app_layer_impl(ctx): \"\"\"Appends the app layer with",
") def java_image( name, base = None, main_class = None,",
"file except in compliance with the License. # You may",
"{ layer_file_path(ctx, f): f for f in unavailable + [classpath_file]",
"files = files) _war_app_layer = rule( attrs = dict(_container.image.attrs.items() +",
"enumerate(layers): this_name = \"%s.%d\" % (name, index) _war_dep_layer(name = this_name,",
"\"gcr.io\", repository = \"distroless/java/jetty\", digest = _JETTY_DIGESTS[\"debug\"], ) if \"servlet_api\"",
"= name, base = base, binary = binary_name, main_class =",
"= [], **kwargs): \"\"\"Builds a container image overlaying the java_binary.",
"[], **kwargs): \"\"\"Builds a container image overlaying the java_library as",
"If we start putting libs in servlet-agnostic paths, # then",
"_JETTY_DIGESTS[\"latest\"], ) if \"jetty_debug_image_base\" not in excludes: container_pull( name =",
"file instead. classpath_file = ctx.new_file(ctx.attr.name + \".classpath\") ctx.actions.write(classpath_file, classpath) binary_path",
"there is a limit on the length of a #",
"is agnostic # of the binary in which it is",
"x in unavailable if x not in available] classpath =",
"ctx.attr.jar_layers: available += java_files(jar) # This is based on rules_appengine's",
"+= list(f.files) return files load( \"//lang:image.bzl\", \"dep_layer_impl\", \"layer_file_path\", ) def",
"to pass deps (even []) if there is no srcs",
"f): f for f in unavailable + [classpath_file] } return",
"registry = \"gcr.io\", repository = \"distroless/java/jetty\", digest = _JETTY_DIGESTS[\"latest\"], )",
"this_name visibility = kwargs.get(\"visibility\", None) jar_app_layer( name = name, base",
"their own layers. **kwargs: See java_binary. \"\"\" binary_name = name",
"\"java_debug_image_base\", registry = \"gcr.io\", repository = \"distroless/java\", digest = _JAVA_DIGESTS[\"debug\"],",
"forest in the app layers. \"agnostic_dep_layout\": attr.bool(default = True), #",
"java_files(jar) # We compute the set of unavailable stuff by",
"line, so mitigate this by always writing the classpath out",
"\"@\" + classpath_path if ctx.attr._classpath_as_file else classpath, ] + ctx.attr.jvm_flags",
"jar in ctx.attr.deps + ctx.attr.runtime_deps: unavailable += java_files(jar) unavailable +=",
"java_files(ctx.attr.binary) unavailable = [x for x in unavailable if x",
"for x in unavailable if x not in available] classpath",
"\"agnostic_dep_layout\": attr.bool(default = True), # Override the defaults. \"directory\": attr.string(default",
"attrs = dict(_container.image.attrs.items() + { # The binary target for",
"KIND, either express or implied. # See the License for",
"}.items()), executable = True, outputs = _container.image.outputs, implementation = _war_dep_layer_impl,",
"attr.label_list(), # The rest of the dependencies. \"deps\": attr.label_list(), \"runtime_deps\":",
"binary_name = name + \".binary\" native.java_binary( name = binary_name, main_class",
"then consider adding symlinks here. files = [d for d",
"excludes: container_pull( name = \"jetty_image_base\", registry = \"gcr.io\", repository =",
"exploded WAR. TODO(mattmoor): For `bazel run` of this to be",
"(the \"License\"); # you may not use this file except",
"same way, adding in our binary and then subtracting #",
"jvm_flags = jvm_flags, deps = deps, runtime_deps = runtime_deps, jar_layers",
"symlink forest in the app layers. \"agnostic_dep_layout\": attr.bool(default = True),",
"# in the same way, adding in our binary and",
"are synthesizing an image. \"binary\": attr.label(mandatory = True), # The",
"the length of a # command line, so mitigate this",
"hasattr(f, \"files\"): # a jar file files += list(f.files) return",
"# TODO(mattmoor): Today we run the risk of filenames colliding",
"(name, index) jar_dep_layer(name = this_name, base = base, dep =",
"# # Unless required by applicable law or agreed to",
"We use all absolute paths. directory = \"/\", file_map =",
"_container.image.implementation( ctx, files = java_files(ctx.attr.dep), ) _war_dep_layer = rule( attrs",
"their own layers. **kwargs: See java_library. \"\"\" library_name = name",
"runfiles = java_files) jar_dep_layer = rule( attrs = dict(_container.image.attrs.items() +",
"available. unavailable = depset() for jar in ctx.attr.deps + ctx.attr.runtime_deps:",
"\"library\": attr.label(mandatory = True), # The full list of dependencies",
"attr.string(mandatory = True), # Whether to lay out each dependency",
"WE WANT PATHS FLATTENED # \"data_path\": attr.string(default = \".\"), \"legacy_run_behavior\":",
"implied. # See the License for the specific language governing",
"have the container actually terminate. More information: https://github.com/bazelbuild/bazel/issues/3519 Args: layers:",
"of just flattening and using basename # we should use",
"Augments \"deps\" with dependencies that should be put into their",
"JAR built with java_library into # a binary, then it",
"ctx.attr.jar_layers: available += java_files(jar) # We compute the set of",
"attr.label_list(), \"runtime_deps\": attr.label_list(), \"jvm_flags\": attr.string_list(), # The base image on",
"digest = _JETTY_DIGESTS[\"latest\"], ) if \"jetty_debug_image_base\" not in excludes: container_pull(",
"of java_image is compatible with java_binary. The signature of war_image",
"\".\"), }.items()), executable = True, outputs = _container.image.outputs, implementation =",
"= deps + layers, **kwargs) base = base or DEFAULT_JETTY_BASE",
"basename # we should use a file_map based scheme. return",
"in our binary and then subtracting # out what it",
"in unavailable + [classpath_file] } return _container.image.implementation( ctx, # We",
"signature of java_image is compatible with java_binary. The signature of",
"[], layers = [], **kwargs): \"\"\"Builds a container image overlaying",
"on startup. \"main_class\": attr.string(mandatory = True), # Whether to lay",
"= _JETTY_DIGESTS[\"latest\"], ) if \"jetty_debug_image_base\" not in excludes: container_pull( name",
"args = kwargs.get(\"args\"), ) def _war_dep_layer_impl(ctx): \"\"\"Appends a layer for",
"\"directory\": attr.string(default = \"/jetty/webapps/ROOT/WEB-INF/lib\"), # WE WANT PATHS FLATTENED #",
"\"jetty_image_base\", registry = \"gcr.io\", repository = \"distroless/java/jetty\", digest = _JETTY_DIGESTS[\"latest\"],",
"\"@java_image_base//image\", \"//conditions:default\": \"@java_image_base//image\", }) DEFAULT_JETTY_BASE = select({ \"@io_bazel_rules_docker//:fastbuild\": \"@jetty_image_base//image\", \"@io_bazel_rules_docker//:debug\":",
"name = \"java_image_base\", registry = \"gcr.io\", repository = \"distroless/java\", digest",
"actually terminate. More information: https://github.com/bazelbuild/bazel/issues/3519 Args: layers: Augments \"deps\" with",
"Unless required by applicable law or agreed to in writing,",
"= True), # Whether to lay out each dependency in",
"and then subtracting # out what it available. unavailable =",
"runtime_deps = runtime_deps, jar_layers = layers, visibility = visibility, args",
"the specific language governing permissions and # limitations under the",
"\"@java_debug_image_base//image\", \"@io_bazel_rules_docker//:optimized\": \"@java_image_base//image\", \"//conditions:default\": \"@java_image_base//image\", }) DEFAULT_JETTY_BASE = select({ \"@io_bazel_rules_docker//:fastbuild\":",
"a # command line, so mitigate this by always writing",
"base. \"jar_layers\": attr.label_list(), # The base image on which to",
"invoke on startup. \"main_class\": attr.string(mandatory = True), # Whether to",
"across images, but requires a # symlink forest in the",
"jvm_flags = [], **kwargs): \"\"\"Builds a container image overlaying the",
"_container.image.outputs, implementation = _jar_app_layer_impl, ) def java_image( name, base =",
"= True, outputs = _container.image.outputs, implementation = _jar_dep_layer_impl, ) def",
"optionally passing the classpath as a file. \"@\" + classpath_path",
"visibility = kwargs.get(\"visibility\", None) tags = kwargs.get(\"tags\", None) _war_app_layer( name",
"java_files(ctx.attr.library) # TODO(mattmoor): Handle data files. # If we start",
"\"%s.%d\" % (name, index) _war_dep_layer(name = this_name, base = base,",
"risk of filenames colliding when # they get flattened. Instead",
"runtime_deps = runtime_deps, jvm_flags = jvm_flags, **kwargs ) base =",
"to lay out each dependency in a manner that is",
"main_class = main_class, jvm_flags = jvm_flags, deps = deps, runtime_deps",
"# out what it available. unavailable = depset() for jar",
"= None, deps = [], layers = [], **kwargs): \"\"\"Builds",
"idempotent if folks call it themselves. _repositories() excludes = native.existing_rules().keys()",
"a Java container image. The signature of java_image is compatible",
"call it themselves. _repositories() excludes = native.existing_rules().keys() if \"java_image_base\" not",
"the container actually terminate. More information: https://github.com/bazelbuild/bazel/issues/3519 Args: layers: Augments",
"= name + \".library\" native.java_library(name = library_name, deps = deps",
"container image overlaying the java_binary. Args: layers: Augments \"deps\" with",
"The main class to invoke on startup. \"main_class\": attr.string(mandatory =",
"index, dep in enumerate(layers): this_name = \"%s.%d\" % (name, index)",
"\"jvm_flags\": attr.string_list(), # The base image on which to overlay",
"**kwargs: See java_binary. \"\"\" binary_name = name + \".binary\" native.java_binary(",
"a file instead. classpath_file = ctx.new_file(ctx.attr.name + \".classpath\") ctx.actions.write(classpath_file, classpath)",
"a file. \"@\" + classpath_path if ctx.attr._classpath_as_file else classpath, ]",
"= file_map, entrypoint = entrypoint, ) jar_app_layer = rule( attrs",
"= visibility, args = kwargs.get(\"args\"), ) def _war_dep_layer_impl(ctx): \"\"\"Appends a",
") if \"jetty_image_base\" not in excludes: container_pull( name = \"jetty_image_base\",",
"available = depset() for jar in ctx.attr.jar_layers: available += java_files(jar)",
"return _container.image.implementation( ctx, files = java_files(ctx.attr.dep), ) _war_dep_layer = rule(",
"depset() transitive_deps += java_files(ctx.attr.library) # TODO(mattmoor): Handle data files. #",
"outputs = _container.image.outputs, implementation = _war_app_layer_impl, ) def war_image(name, base",
"= \"distroless/java\", digest = _JAVA_DIGESTS[\"latest\"], ) if \"java_debug_image_base\" not in",
"java_files(jar) # This is based on rules_appengine's WAR rules. transitive_deps",
"just flattening and using basename # we should use a",
"# This is based on rules_appengine's WAR rules. transitive_deps =",
"dep) base = this_name visibility = kwargs.get(\"visibility\", None) jar_app_layer( name",
"_JAVA_DIGESTS = \"DIGESTS\", ) load( \":jetty.bzl\", _JETTY_DIGESTS = \"DIGESTS\", )",
"= (deps + layers) or None, runtime_deps = runtime_deps, jvm_flags",
"if ctx.attr._classpath_as_file else classpath, ] + ctx.attr.jvm_flags + [ctx.attr.main_class] +",
"layer_file_path(ctx, classpath_file) entrypoint = [ \"/usr/bin/java\", \"-cp\", # Support optionally",
"not allowed to pass deps (even []) if there is",
"rule( attrs = dict(_container.image.attrs.items() + { # The binary target",
"\"jar_layers\": attr.label_list(), # The rest of the dependencies. \"deps\": attr.label_list(),",
"_war_dep_layer = rule( attrs = dict(_container.image.attrs.items() + { # The",
"_war_dep_layer(name = this_name, base = base, dep = dep) base",
"in the app layers. \"agnostic_dep_layout\": attr.bool(default = True), # Whether",
"classpath_file = ctx.new_file(ctx.attr.name + \".classpath\") ctx.actions.write(classpath_file, classpath) binary_path = layer_file_path(ctx,",
"Call the core \"repositories\" function to reduce boilerplate. # This",
"in excludes: container_pull( name = \"java_debug_image_base\", registry = \"gcr.io\", repository",
"= binary_name, main_class = main_class, # If the rule is",
"}) load( \"//container:container.bzl\", _container = \"container\", ) def java_files(f): files",
"repository = \"distroless/java\", digest = _JAVA_DIGESTS[\"debug\"], ) if \"jetty_image_base\" not",
"a file. \"_classpath_as_file\": attr.bool(default = False), # Override the defaults.",
"not in excludes: container_pull( name = \"java_image_base\", registry = \"gcr.io\",",
"the resolved digests. load( \":java.bzl\", _JAVA_DIGESTS = \"DIGESTS\", ) load(",
"You may obtain a copy of the License at #",
"this_name = \"%s.%d\" % (name, index) _war_dep_layer(name = this_name, base",
") if \"java_debug_image_base\" not in excludes: container_pull( name = \"java_debug_image_base\",",
"# command line, so mitigate this by always writing the",
"or None, runtime_deps = runtime_deps, jvm_flags = jvm_flags, **kwargs )",
"Copyright 2017 Google Inc. All rights reserved. # # Licensed",
"a container image overlaying the java_library as an exploded WAR.",
"\"repositories\", ) # Load the resolved digests. load( \":java.bzl\", _JAVA_DIGESTS",
"overlay the dependency layers. \"base\": attr.label(mandatory = True), \"entrypoint\": attr.string_list(default",
"jar_app_layer = rule( attrs = dict(_container.image.attrs.items() + { # The",
"container_pull( name = \"jetty_debug_image_base\", registry = \"gcr.io\", repository = \"distroless/java/jetty\",",
"= base or DEFAULT_JAVA_BASE for index, dep in enumerate(layers): this_name",
"registry = \"gcr.io\", repository = \"distroless/java/jetty\", digest = _JETTY_DIGESTS[\"debug\"], )",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"= kwargs.get(\"args\"), ) def _war_dep_layer_impl(ctx): \"\"\"Appends a layer for a",
"# The rest of the dependencies. \"deps\": attr.label_list(), \"runtime_deps\": attr.label_list(),",
"jar_layers = layers, visibility = visibility, tags = tags, )",
"data files. # If we start putting libs in servlet-agnostic",
"layer_file_path(ctx, x) for x in available + unavailable ]) #",
"[], **kwargs): \"\"\"Builds a container image overlaying the java_binary. Args:",
"TODO(mattmoor): For `bazel run` of this to be useful, we",
"writing the classpath out # to a file instead. classpath_file",
"be able to ctrl-C it and have the container actually",
"for d in transitive_deps if d not in available] return",
"language governing permissions and # limitations under the License. \"\"\"A",
") base = base or DEFAULT_JAVA_BASE for index, dep in",
"None) tags = kwargs.get(\"tags\", None) _war_app_layer( name = name, base",
"to a file instead. classpath_file = ctx.new_file(ctx.attr.name + \".classpath\") ctx.actions.write(classpath_file,",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"entrypoint = entrypoint, ) jar_app_layer = rule( attrs = dict(_container.image.attrs.items()",
"License. # You may obtain a copy of the License",
"single dependency's runfiles.\"\"\" # TODO(mattmoor): Today we run the risk",
"\"@io_bazel_rules_docker//:debug\": \"@java_debug_image_base//image\", \"@io_bazel_rules_docker//:optimized\": \"@java_image_base//image\", \"//conditions:default\": \"@java_image_base//image\", }) DEFAULT_JETTY_BASE = select({",
"= _JETTY_DIGESTS[\"debug\"], ) if \"servlet_api\" not in excludes: native.maven_jar( name",
"consider adding symlinks here. files = [d for d in",
"which it is participating. This can increase # sharing of",
"We compute the set of unavailable stuff by walking deps",
"\"binary\": attr.label(mandatory = True), # The full list of dependencies",
"name, base = base, binary = binary_name, main_class = main_class,",
"\"jetty_image_base\" not in excludes: container_pull( name = \"jetty_image_base\", registry =",
"\"data_path\": attr.string(default = \".\"), }.items()), executable = True, outputs =",
"walking deps # in the same way, adding in our",
"image overlaying the java_binary. Args: layers: Augments \"deps\" with dependencies",
"they get flattened. Instead of just flattening and using basename",
"\"jetty_debug_image_base\", registry = \"gcr.io\", repository = \"distroless/java/jetty\", digest = _JETTY_DIGESTS[\"debug\"],",
"get flattened. Instead of just flattening and using basename #",
"in enumerate(layers): this_name = \"%s.%d\" % (name, index) _war_dep_layer(name =",
"in a manner that is agnostic # of the binary",
"that have their own layers # factored into our base.",
"base image on which to overlay the dependency layers. \"base\":",
"jvm_flags = jvm_flags, **kwargs ) base = base or DEFAULT_JAVA_BASE",
"appear in runtime_deps. We are # not allowed to pass",
"run` of this to be useful, we need to be",
"the dependency's layer across images, but requires a # symlink",
"dependencies. \"deps\": attr.label_list(), \"runtime_deps\": attr.label_list(), \"jvm_flags\": attr.string_list(), # The base",
"in the same way, adding in our binary and then",
"when # they get flattened. Instead of just flattening and",
"`bazel run` of this to be useful, we need to",
"\"\"\" library_name = name + \".library\" native.java_library(name = library_name, deps",
"base. \"jar_layers\": attr.label_list(), # The rest of the dependencies. \"deps\":",
"for jar in ctx.attr.jar_layers: available += java_files(jar) # We compute",
"# not allowed to pass deps (even []) if there",
"2017 Google Inc. All rights reserved. # # Licensed under",
"runfiles.\"\"\" return dep_layer_impl(ctx, runfiles = java_files) jar_dep_layer = rule( attrs",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"it and have the container actually terminate. More information: https://github.com/bazelbuild/bazel/issues/3519",
"layer_file_path(ctx, ctx.files.binary[0]) classpath_path = layer_file_path(ctx, classpath_file) entrypoint = [ \"/usr/bin/java\",",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"for the specific language governing permissions and # limitations under",
"turning a JAR built with java_library into # a binary,",
"\"java_debug_image_base\" not in excludes: container_pull( name = \"java_debug_image_base\", registry =",
"in available + unavailable ]) # Classpaths can grow long",
"= \":\".join([ layer_file_path(ctx, x) for x in available + unavailable",
"return dep_layer_impl(ctx, runfiles = java_files) jar_dep_layer = rule( attrs =",
"[d for d in transitive_deps if d not in available]",
"def java_image( name, base = None, main_class = None, deps",
"required by applicable law or agreed to in writing, software",
"the License. \"\"\"A rule for creating a Java container image.",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"the core \"repositories\" function to reduce boilerplate. # This is",
"name = \"jetty_debug_image_base\", registry = \"gcr.io\", repository = \"distroless/java/jetty\", digest",
"and # limitations under the License. \"\"\"A rule for creating",
"}) DEFAULT_JETTY_BASE = select({ \"@io_bazel_rules_docker//:fastbuild\": \"@jetty_image_base//image\", \"@io_bazel_rules_docker//:debug\": \"@jetty_debug_image_base//image\", \"@io_bazel_rules_docker//:optimized\": \"@jetty_image_base//image\",",
"reduce boilerplate. # This is idempotent if folks call it",
"unavailable += java_files(jar) unavailable += java_files(ctx.attr.binary) unavailable = [x for",
"= _JAVA_DIGESTS[\"latest\"], ) if \"java_debug_image_base\" not in excludes: container_pull( name",
"core \"repositories\" function to reduce boilerplate. # This is idempotent",
"agreed to in writing, software # distributed under the License",
"\"//lang:image.bzl\", \"dep_layer_impl\", \"layer_file_path\", ) def _jar_dep_layer_impl(ctx): \"\"\"Appends a layer for",
"dependency in a manner that is agnostic # of the",
"distributed under the License is distributed on an \"AS IS\"",
"kwargs.get(\"visibility\", None) jar_app_layer( name = name, base = base, binary",
"**kwargs) base = base or DEFAULT_JETTY_BASE for index, dep in",
"war_image(name, base = None, deps = [], layers = [],",
"repositories(): # Call the core \"repositories\" function to reduce boilerplate.",
"adding symlinks here. files = [d for d in transitive_deps",
"rule is turning a JAR built with java_library into #",
"this to be useful, we need to be able to",
"{ # The binary target for which we are synthesizing",
"= this_name visibility = kwargs.get(\"visibility\", None) tags = kwargs.get(\"tags\", None)",
"= True), # The dependency whose runfiles we're appending. \"dep\":",
"of the binary in which it is participating. This can",
"\"\"\" load( \"//container:container.bzl\", \"container_pull\", _repositories = \"repositories\", ) # Load",
"are synthesizing an image. \"library\": attr.label(mandatory = True), # The",
"transitive_deps = depset() transitive_deps += java_files(ctx.attr.library) # TODO(mattmoor): Handle data",
") def _jar_dep_layer_impl(ctx): \"\"\"Appends a layer for a single dependency's",
"= True, outputs = _container.image.outputs, implementation = _jar_app_layer_impl, ) def",
"} return _container.image.implementation( ctx, # We use all absolute paths.",
"= kwargs.get(\"visibility\", None) tags = kwargs.get(\"tags\", None) _war_app_layer( name =",
"sharing of the dependency's layer across images, but requires a",
"layers = [], jvm_flags = [], **kwargs): \"\"\"Builds a container",
"registry = \"gcr.io\", repository = \"distroless/java\", digest = _JAVA_DIGESTS[\"debug\"], )",
"# Load the resolved digests. load( \":java.bzl\", _JAVA_DIGESTS = \"DIGESTS\",",
"The signature of war_image is compatible with java_library. \"\"\" load(",
"If the rule is turning a JAR built with java_library",
"on rules_appengine's WAR rules. transitive_deps = depset() transitive_deps += java_files(ctx.attr.library)",
"available] return _container.image.implementation(ctx, files = files) _war_app_layer = rule( attrs",
"\"javax_servlet_api\", artifact = \"javax.servlet:javax.servlet-api:3.0.1\", ) DEFAULT_JAVA_BASE = select({ \"@io_bazel_rules_docker//:fastbuild\": \"@java_image_base//image\",",
"return _container.image.implementation( ctx, # We use all absolute paths. directory",
"\"container\", ) def java_files(f): files = [] if java_common.provider in",
"kwarg. deps = (deps + layers) or None, runtime_deps =",
"app layer with all remaining runfiles.\"\"\" available = depset() for",
"\"java_image_base\" not in excludes: container_pull( name = \"java_image_base\", registry =",
"and have the container actually terminate. More information: https://github.com/bazelbuild/bazel/issues/3519 Args:",
"app layers. \"agnostic_dep_layout\": attr.bool(default = True), # Override the defaults.",
"OR CONDITIONS OF ANY KIND, either express or implied. #",
"dict(_container.image.attrs.items() + { # The binary target for which we",
"= dict(_container.image.attrs.items() + { # The library target for which",
"the License is distributed on an \"AS IS\" BASIS, #",
"= True), # Whether the classpath should be passed as",
"\"jetty_debug_image_base\" not in excludes: container_pull( name = \"jetty_debug_image_base\", registry =",
"# to a file instead. classpath_file = ctx.new_file(ctx.attr.name + \".classpath\")",
"attr.label(mandatory = True), # The full list of dependencies that",
"in transitive_deps if d not in available] return _container.image.implementation(ctx, files",
"tags = kwargs.get(\"tags\", None) _war_app_layer( name = name, base =",
"classpath) binary_path = layer_file_path(ctx, ctx.files.binary[0]) classpath_path = layer_file_path(ctx, classpath_file) entrypoint",
"enumerate(layers): this_name = \"%s.%d\" % (name, index) jar_dep_layer(name = this_name,",
"list(f.files) return files load( \"//lang:image.bzl\", \"dep_layer_impl\", \"layer_file_path\", ) def _jar_dep_layer_impl(ctx):",
"= [d for d in transitive_deps if d not in",
"_JAVA_DIGESTS[\"latest\"], ) if \"java_debug_image_base\" not in excludes: container_pull( name =",
"law or agreed to in writing, software # distributed under",
"name + \".binary\" native.java_binary( name = binary_name, main_class = main_class,",
"run the risk of filenames colliding when # they get",
"we are synthesizing an image. \"binary\": attr.label(mandatory = True), #",
"layers. \"base\": attr.label(mandatory = True), # The main class to",
"base, dep = dep) base = this_name visibility = kwargs.get(\"visibility\",",
"The rest of the dependencies. \"deps\": attr.label_list(), \"runtime_deps\": attr.label_list(), \"jvm_flags\":",
"ctx, # We use all absolute paths. directory = \"/\",",
"boilerplate. # This is idempotent if folks call it themselves.",
"+= list(java_provider.transitive_runtime_jars) if hasattr(f, \"files\"): # a jar file files",
"put into their own layers. **kwargs: See java_library. \"\"\" library_name",
"Whether the classpath should be passed as a file. \"_classpath_as_file\":",
"True), # Override the defaults. \"directory\": attr.string(default = \"/jetty/webapps/ROOT/WEB-INF/lib\"), #",
"may obtain a copy of the License at # #",
"def _jar_dep_layer_impl(ctx): \"\"\"Appends a layer for a single dependency's runfiles.\"\"\"",
"DEFAULT_JAVA_BASE for index, dep in enumerate(layers): this_name = \"%s.%d\" %",
"repository = \"distroless/java\", digest = _JAVA_DIGESTS[\"latest\"], ) if \"java_debug_image_base\" not",
"may not use this file except in compliance with the",
"= java_files(ctx.attr.dep), ) _war_dep_layer = rule( attrs = dict(_container.image.attrs.items() +",
"d in transitive_deps if d not in available] return _container.image.implementation(ctx,",
"compatible with java_binary. The signature of war_image is compatible with",
"way, adding in our binary and then subtracting # out",
"= deps, runtime_deps = runtime_deps, jar_layers = layers, visibility =",
"long and there is a limit on the length of",
"this file except in compliance with the License. # You",
"= jvm_flags, **kwargs ) base = base or DEFAULT_JAVA_BASE for",
"# # Licensed under the Apache License, Version 2.0 (the",
"Google Inc. All rights reserved. # # Licensed under the",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"WAR. TODO(mattmoor): For `bazel run` of this to be useful,",
"= \"gcr.io\", repository = \"distroless/java/jetty\", digest = _JETTY_DIGESTS[\"debug\"], ) if",
"None, runtime_deps = runtime_deps, jvm_flags = jvm_flags, **kwargs ) base",
"= base, dep = dep) base = this_name visibility =",
"WANT PATHS FLATTENED # \"data_path\": attr.string(default = \".\"), }.items()), executable",
"attr.string(default = \"/app\"), # https://github.com/bazelbuild/bazel/issues/2176 \"data_path\": attr.string(default = \".\"), }.items()),",
"of filenames colliding when # they get flattened. Instead of",
"an image. \"binary\": attr.label(mandatory = True), # The full list",
"jar in ctx.attr.jar_layers: available += java_files(jar) # This is based",
"rules_appengine's WAR rules. transitive_deps = depset() transitive_deps += java_files(ctx.attr.library) #",
"on the length of a # command line, so mitigate",
"attrs = dict(_container.image.attrs.items() + { # The library target for",
"_jar_app_layer_impl(ctx): \"\"\"Appends the app layer with all remaining runfiles.\"\"\" available",
"= _container.image.outputs, implementation = _war_app_layer_impl, ) def war_image(name, base =",
"it available. unavailable = depset() for jar in ctx.attr.deps +",
"excludes = native.existing_rules().keys() if \"java_image_base\" not in excludes: container_pull( name",
"attr.label(mandatory = True), # Whether to lay out each dependency",
"[x for x in unavailable if x not in available]",
"FLATTENED # \"data_path\": attr.string(default = \".\"), }.items()), executable = True,",
"\"jar_layers\": attr.label_list(), # The base image on which to overlay",
"or implied. # See the License for the specific language",
"requires a # symlink forest in the app layers. \"agnostic_dep_layout\":",
"container image overlaying the java_library as an exploded WAR. TODO(mattmoor):",
"# Override the defaults. \"directory\": attr.string(default = \"/app\"), # https://github.com/bazelbuild/bazel/issues/2176",
"Load the resolved digests. load( \":java.bzl\", _JAVA_DIGESTS = \"DIGESTS\", )",
"_container.image.outputs, implementation = _war_dep_layer_impl, ) def _war_app_layer_impl(ctx): \"\"\"Appends the app",
"library target for which we are synthesizing an image. \"library\":",
"function to reduce boilerplate. # This is idempotent if folks",
") def java_files(f): files = [] if java_common.provider in f:",
"= runtime_deps, jvm_flags = jvm_flags, **kwargs ) base = base",
"\"/\", file_map = file_map, entrypoint = entrypoint, ) jar_app_layer =",
"= \".\"), \"legacy_run_behavior\": attr.bool(default = False), }.items()), executable = True,",
"= _container.image.outputs, implementation = _jar_dep_layer_impl, ) def _jar_app_layer_impl(ctx): \"\"\"Appends the",
"+ ctx.attr.jvm_flags + [ctx.attr.main_class] + ctx.attr.args file_map = { layer_file_path(ctx,",
"signature of war_image is compatible with java_library. \"\"\" load( \"//container:container.bzl\",",
"The full list of dependencies that have their own layers",
"binary = binary_name, main_class = main_class, jvm_flags = jvm_flags, deps",
"the classpath as a file. \"@\" + classpath_path if ctx.attr._classpath_as_file",
"False), }.items()), executable = True, outputs = _container.image.outputs, implementation =",
"so mitigate this by always writing the classpath out #",
"class to invoke on startup. \"main_class\": attr.string(mandatory = True), #",
"deps = [], runtime_deps = [], layers = [], jvm_flags",
"= \"distroless/java/jetty\", digest = _JETTY_DIGESTS[\"debug\"], ) if \"servlet_api\" not in",
"library = library_name, jar_layers = layers, visibility = visibility, tags",
"if \"jetty_image_base\" not in excludes: container_pull( name = \"jetty_image_base\", registry",
"= binary_name, main_class = main_class, jvm_flags = jvm_flags, deps =",
"a container image overlaying the java_binary. Args: layers: Augments \"deps\"",
"\"base\": attr.label(mandatory = True), # The dependency whose runfiles we're",
"as an exploded WAR. TODO(mattmoor): For `bazel run` of this",
"https://github.com/bazelbuild/bazel/issues/3519 Args: layers: Augments \"deps\" with dependencies that should be",
"This can increase # sharing of the dependency's layer across",
"= True), # The main class to invoke on startup.",
"All rights reserved. # # Licensed under the Apache License,",
"layers. \"base\": attr.label(mandatory = True), \"entrypoint\": attr.string_list(default = []), #",
"image overlaying the java_library as an exploded WAR. TODO(mattmoor): For",
"True), # The full list of dependencies that have their",
"base = this_name visibility = kwargs.get(\"visibility\", None) tags = kwargs.get(\"tags\",",
"out what it available. unavailable = depset() for jar in",
"entrypoint, ) jar_app_layer = rule( attrs = dict(_container.image.attrs.items() + {",
"of unavailable stuff by walking deps # in the same",
"files += list(java_provider.transitive_runtime_jars) if hasattr(f, \"files\"): # a jar file",
"DEFAULT_JETTY_BASE for index, dep in enumerate(layers): this_name = \"%s.%d\" %",
"base = base or DEFAULT_JAVA_BASE for index, dep in enumerate(layers):",
"java_files(ctx.attr.dep), ) _war_dep_layer = rule( attrs = dict(_container.image.attrs.items() + {",
"name + \".library\" native.java_library(name = library_name, deps = deps +",
") def _war_app_layer_impl(ctx): \"\"\"Appends the app layer with all remaining",
"= this_name visibility = kwargs.get(\"visibility\", None) jar_app_layer( name = name,",
"in servlet-agnostic paths, # then consider adding symlinks here. files",
"_jar_app_layer_impl, ) def java_image( name, base = None, main_class =",
"= depset() for jar in ctx.attr.deps + ctx.attr.runtime_deps: unavailable +=",
"to overlay the dependency layers. \"base\": attr.label(mandatory = True), #",
"\"\"\"Appends a layer for a single dependency's runfiles.\"\"\" # TODO(mattmoor):",
"in writing, software # distributed under the License is distributed",
"attr.bool(default = False), }.items()), executable = True, outputs = _container.image.outputs,",
"native.java_library(name = library_name, deps = deps + layers, **kwargs) base",
"into their own layers. **kwargs: See java_library. \"\"\" library_name =",
"layers, visibility = visibility, args = kwargs.get(\"args\"), ) def _war_dep_layer_impl(ctx):",
"classpath as a file. \"@\" + classpath_path if ctx.attr._classpath_as_file else",
"it will appear in runtime_deps. We are # not allowed",
"# Override the defaults. \"directory\": attr.string(default = \"/jetty/webapps/ROOT/WEB-INF/lib\"), # WE",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"License, Version 2.0 (the \"License\"); # you may not use",
"\".\"), \"legacy_run_behavior\": attr.bool(default = False), }.items()), executable = True, outputs",
"if hasattr(f, \"files\"): # a jar file files += list(f.files)",
"depset() for jar in ctx.attr.jar_layers: available += java_files(jar) # This",
"outputs = _container.image.outputs, implementation = _war_dep_layer_impl, ) def _war_app_layer_impl(ctx): \"\"\"Appends",
"= jvm_flags, deps = deps, runtime_deps = runtime_deps, jar_layers =",
"the risk of filenames colliding when # they get flattened.",
"the License for the specific language governing permissions and #",
"with java_library into # a binary, then it will appear",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"executable = True, outputs = _container.image.outputs, implementation = _war_app_layer_impl, )",
"libs in servlet-agnostic paths, # then consider adding symlinks here.",
"_war_app_layer( name = name, base = base, library = library_name,",
"jvm_flags, deps = deps, runtime_deps = runtime_deps, jar_layers = layers,",
"_JETTY_DIGESTS = \"DIGESTS\", ) def repositories(): # Call the core",
"jar in ctx.attr.jar_layers: available += java_files(jar) # We compute the",
"put into their own layers. **kwargs: See java_binary. \"\"\" binary_name",
"False), # Override the defaults. \"directory\": attr.string(default = \"/app\"), #",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"+ { # The base image on which to overlay",
"\"-cp\", # Support optionally passing the classpath as a file.",
"= [], layers = [], jvm_flags = [], **kwargs): \"\"\"Builds",
"See java_binary. \"\"\" binary_name = name + \".binary\" native.java_binary( name",
"not in available] return _container.image.implementation(ctx, files = files) _war_app_layer =",
"kwargs.get(\"visibility\", None) tags = kwargs.get(\"tags\", None) _war_app_layer( name = name,",
"+= java_files(jar) # This is based on rules_appengine's WAR rules.",
"the rule is turning a JAR built with java_library into",
"depset() for jar in ctx.attr.jar_layers: available += java_files(jar) # We",
"that should be put into their own layers. **kwargs: See",
"def _war_dep_layer_impl(ctx): \"\"\"Appends a layer for a single dependency's runfiles.\"\"\"",
"ctx.actions.write(classpath_file, classpath) binary_path = layer_file_path(ctx, ctx.files.binary[0]) classpath_path = layer_file_path(ctx, classpath_file)",
"# distributed under the License is distributed on an \"AS",
"be put into their own layers. **kwargs: See java_library. \"\"\"",
"# Unless required by applicable law or agreed to in",
"whose runfiles we're appending. \"dep\": attr.label(mandatory = True), # Whether",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"\"dep\": attr.label(mandatory = True), # Whether to lay out each",
"the Apache License, Version 2.0 (the \"License\"); # you may",
"use all absolute paths. directory = \"/\", file_map = file_map,",
"DEFAULT_JAVA_BASE = select({ \"@io_bazel_rules_docker//:fastbuild\": \"@java_image_base//image\", \"@io_bazel_rules_docker//:debug\": \"@java_debug_image_base//image\", \"@io_bazel_rules_docker//:optimized\": \"@java_image_base//image\", \"//conditions:default\":",
"for f in unavailable + [classpath_file] } return _container.image.implementation( ctx,",
"image. The signature of java_image is compatible with java_binary. The",
"classpath_file) entrypoint = [ \"/usr/bin/java\", \"-cp\", # Support optionally passing",
"file. \"@\" + classpath_path if ctx.attr._classpath_as_file else classpath, ] +",
"The binary target for which we are synthesizing an image.",
"= layers, visibility = visibility, args = kwargs.get(\"args\"), ) def",
"= layer_file_path(ctx, classpath_file) entrypoint = [ \"/usr/bin/java\", \"-cp\", # Support",
"ctx.attr.runtime_deps: unavailable += java_files(jar) unavailable += java_files(ctx.attr.binary) unavailable = [x",
"# factored into our base. \"jar_layers\": attr.label_list(), # The rest",
"implementation = _war_app_layer_impl, ) def war_image(name, base = None, deps",
") def war_image(name, base = None, deps = [], layers",
"\"//container:container.bzl\", _container = \"container\", ) def java_files(f): files = []",
"= True, outputs = _container.image.outputs, implementation = _war_dep_layer_impl, ) def",
"= None, main_class = None, deps = [], runtime_deps =",
"def war_image(name, base = None, deps = [], layers =",
"\"@io_bazel_rules_docker//:fastbuild\": \"@jetty_image_base//image\", \"@io_bazel_rules_docker//:debug\": \"@jetty_debug_image_base//image\", \"@io_bazel_rules_docker//:optimized\": \"@jetty_image_base//image\", \"//conditions:default\": \"@jetty_image_base//image\", }) load(",
"list(java_provider.transitive_runtime_jars) if hasattr(f, \"files\"): # a jar file files +=",
"file_map = file_map, entrypoint = entrypoint, ) jar_app_layer = rule(",
"+ [ctx.attr.main_class] + ctx.attr.args file_map = { layer_file_path(ctx, f): f",
"= dict(_container.image.attrs.items() + { # The base image on which",
"all remaining runfiles.\"\"\" available = depset() for jar in ctx.attr.jar_layers:",
"= java_files) jar_dep_layer = rule( attrs = dict(_container.image.attrs.items() + {",
"\"deps\" with dependencies that should be put into their own",
"= \"distroless/java\", digest = _JAVA_DIGESTS[\"debug\"], ) if \"jetty_image_base\" not in",
"library_name, jar_layers = layers, visibility = visibility, tags = tags,",
"# they get flattened. Instead of just flattening and using",
"a # symlink forest in the app layers. \"agnostic_dep_layout\": attr.bool(default",
"\"main_class\": attr.string(mandatory = True), # Whether to lay out each",
"servlet-agnostic paths, # then consider adding symlinks here. files =",
"TODO(mattmoor): Today we run the risk of filenames colliding when",
"our base. \"jar_layers\": attr.label_list(), # The base image on which",
"under the License is distributed on an \"AS IS\" BASIS,",
"base = this_name visibility = kwargs.get(\"visibility\", None) jar_app_layer( name =",
"each dependency in a manner that is agnostic # of",
"is compatible with java_binary. The signature of war_image is compatible",
"lay out each dependency in a manner that is agnostic",
"= runtime_deps, jar_layers = layers, visibility = visibility, args =",
"= dep) base = this_name visibility = kwargs.get(\"visibility\", None) tags",
"load( \"//container:container.bzl\", _container = \"container\", ) def java_files(f): files =",
"attr.string(default = \"/jetty/webapps/ROOT/WEB-INF/lib\"), # WE WANT PATHS FLATTENED # \"data_path\":",
"synthesizing an image. \"binary\": attr.label(mandatory = True), # The full",
"can grow long and there is a limit on the",
"runtime_deps = [], layers = [], jvm_flags = [], **kwargs):",
"target for which we are synthesizing an image. \"library\": attr.label(mandatory",
"unavailable if x not in available] classpath = \":\".join([ layer_file_path(ctx,",
"= _jar_app_layer_impl, ) def java_image( name, base = None, main_class",
"dep_layer_impl(ctx, runfiles = java_files) jar_dep_layer = rule( attrs = dict(_container.image.attrs.items()",
"terminate. More information: https://github.com/bazelbuild/bazel/issues/3519 Args: layers: Augments \"deps\" with dependencies",
"\"gcr.io\", repository = \"distroless/java/jetty\", digest = _JETTY_DIGESTS[\"latest\"], ) if \"jetty_debug_image_base\"",
"= [ \"/usr/bin/java\", \"-cp\", # Support optionally passing the classpath",
"into our base. \"jar_layers\": attr.label_list(), # The base image on",
"layers, **kwargs) base = base or DEFAULT_JETTY_BASE for index, dep",
"https://github.com/bazelbuild/bazel/issues/2176 \"data_path\": attr.string(default = \".\"), \"legacy_run_behavior\": attr.bool(default = False), }.items()),",
"deps = deps, runtime_deps = runtime_deps, jar_layers = layers, visibility",
"\"@io_bazel_rules_docker//:debug\": \"@jetty_debug_image_base//image\", \"@io_bazel_rules_docker//:optimized\": \"@jetty_image_base//image\", \"//conditions:default\": \"@jetty_image_base//image\", }) load( \"//container:container.bzl\", _container",
"\"\"\"Appends the app layer with all remaining runfiles.\"\"\" available =",
"]) # Classpaths can grow long and there is a",
"\"deps\": attr.label_list(), \"runtime_deps\": attr.label_list(), \"jvm_flags\": attr.string_list(), # The base image",
"rest of the dependencies. \"deps\": attr.label_list(), \"runtime_deps\": attr.label_list(), \"jvm_flags\": attr.string_list(),",
"visibility = kwargs.get(\"visibility\", None) jar_app_layer( name = name, base =",
"out each dependency in a manner that is agnostic #",
"ANY KIND, either express or implied. # See the License",
"the License. # You may obtain a copy of the",
"\"gcr.io\", repository = \"distroless/java\", digest = _JAVA_DIGESTS[\"latest\"], ) if \"java_debug_image_base\"",
"_container.image.implementation( ctx, # We use all absolute paths. directory =",
"\"\"\"A rule for creating a Java container image. The signature",
"excludes: container_pull( name = \"java_image_base\", registry = \"gcr.io\", repository =",
"# See the License for the specific language governing permissions",
"load( \":jetty.bzl\", _JETTY_DIGESTS = \"DIGESTS\", ) def repositories(): # Call",
"= \"jetty_debug_image_base\", registry = \"gcr.io\", repository = \"distroless/java/jetty\", digest =",
"it is participating. This can increase # sharing of the",
"None, deps = [], runtime_deps = [], layers = [],",
"(name, index) _war_dep_layer(name = this_name, base = base, dep =",
"dependency layers. \"base\": attr.label(mandatory = True), # The dependency whose",
"load( \"//lang:image.bzl\", \"dep_layer_impl\", \"layer_file_path\", ) def _jar_dep_layer_impl(ctx): \"\"\"Appends a layer",
"[ \"/usr/bin/java\", \"-cp\", # Support optionally passing the classpath as",
"+ ctx.attr.args file_map = { layer_file_path(ctx, f): f for f",
"\":\".join([ layer_file_path(ctx, x) for x in available + unavailable ])",
"DEFAULT_JETTY_BASE = select({ \"@io_bazel_rules_docker//:fastbuild\": \"@jetty_image_base//image\", \"@io_bazel_rules_docker//:debug\": \"@jetty_debug_image_base//image\", \"@io_bazel_rules_docker//:optimized\": \"@jetty_image_base//image\", \"//conditions:default\":",
"runfiles.\"\"\" available = depset() for jar in ctx.attr.jar_layers: available +=",
"+ \".binary\" native.java_binary( name = binary_name, main_class = main_class, #",
"= \"%s.%d\" % (name, index) jar_dep_layer(name = this_name, base =",
"= \"gcr.io\", repository = \"distroless/java\", digest = _JAVA_DIGESTS[\"debug\"], ) if",
"but requires a # symlink forest in the app layers.",
"= \"repositories\", ) # Load the resolved digests. load( \":java.bzl\",",
"outputs = _container.image.outputs, implementation = _jar_dep_layer_impl, ) def _jar_app_layer_impl(ctx): \"\"\"Appends",
"= _war_app_layer_impl, ) def war_image(name, base = None, deps =",
"ctx.attr._classpath_as_file else classpath, ] + ctx.attr.jvm_flags + [ctx.attr.main_class] + ctx.attr.args",
"is based on rules_appengine's WAR rules. transitive_deps = depset() transitive_deps",
"base = base, dep = dep) base = this_name visibility",
"TODO(mattmoor): Handle data files. # If we start putting libs",
"# \"data_path\": attr.string(default = \".\"), }.items()), executable = True, outputs",
"in f: java_provider = f[java_common.provider] files += list(java_provider.transitive_runtime_jars) if hasattr(f,",
"Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"= dep) base = this_name visibility = kwargs.get(\"visibility\", None) jar_app_layer(",
"layer with all remaining runfiles.\"\"\" available = depset() for jar",
"rule for creating a Java container image. The signature of",
"writing, software # distributed under the License is distributed on",
"not in excludes: native.maven_jar( name = \"javax_servlet_api\", artifact = \"javax.servlet:javax.servlet-api:3.0.1\",",
"information: https://github.com/bazelbuild/bazel/issues/3519 Args: layers: Augments \"deps\" with dependencies that should",
"\"repositories\" function to reduce boilerplate. # This is idempotent if",
"**kwargs ) base = base or DEFAULT_JAVA_BASE for index, dep",
"# we should use a file_map based scheme. return _container.image.implementation(",
"load( \":java.bzl\", _JAVA_DIGESTS = \"DIGESTS\", ) load( \":jetty.bzl\", _JETTY_DIGESTS =",
"in ctx.attr.jar_layers: available += java_files(jar) # We compute the set",
"overlaying the java_library as an exploded WAR. TODO(mattmoor): For `bazel",
"_war_app_layer_impl, ) def war_image(name, base = None, deps = [],",
"attr.bool(default = False), # Override the defaults. \"directory\": attr.string(default =",
"ctrl-C it and have the container actually terminate. More information:",
"in the app layers. \"agnostic_dep_layout\": attr.bool(default = True), # Override",
"in unavailable if x not in available] classpath = \":\".join([",
"layers. \"agnostic_dep_layout\": attr.bool(default = True), # Whether the classpath should",
"a file_map based scheme. return _container.image.implementation( ctx, files = java_files(ctx.attr.dep),",
"= _war_dep_layer_impl, ) def _war_app_layer_impl(ctx): \"\"\"Appends the app layer with",
"+ \".classpath\") ctx.actions.write(classpath_file, classpath) binary_path = layer_file_path(ctx, ctx.files.binary[0]) classpath_path =",
"# We compute the set of unavailable stuff by walking",
"\"distroless/java\", digest = _JAVA_DIGESTS[\"latest\"], ) if \"java_debug_image_base\" not in excludes:",
"}.items()), executable = True, outputs = _container.image.outputs, implementation = _jar_dep_layer_impl,",
"\"container_pull\", _repositories = \"repositories\", ) # Load the resolved digests.",
"attr.string(default = \"/app\"), # https://github.com/bazelbuild/bazel/issues/2176 \"data_path\": attr.string(default = \".\"), \"legacy_run_behavior\":",
"java_library into # a binary, then it will appear in",
"FLATTENED # \"data_path\": attr.string(default = \".\"), \"legacy_run_behavior\": attr.bool(default = False),",
"base = base, binary = binary_name, main_class = main_class, jvm_flags",
"repository = \"distroless/java/jetty\", digest = _JETTY_DIGESTS[\"debug\"], ) if \"servlet_api\" not",
"# kwarg. deps = (deps + layers) or None, runtime_deps",
") jar_app_layer = rule( attrs = dict(_container.image.attrs.items() + { #",
"native.maven_jar( name = \"javax_servlet_api\", artifact = \"javax.servlet:javax.servlet-api:3.0.1\", ) DEFAULT_JAVA_BASE =",
"colliding when # they get flattened. Instead of just flattening",
"f: java_provider = f[java_common.provider] files += list(java_provider.transitive_runtime_jars) if hasattr(f, \"files\"):",
"= dict(_container.image.attrs.items() + { # The binary target for which",
"to ctrl-C it and have the container actually terminate. More",
"layers: Augments \"deps\" with dependencies that should be put into",
"the java_library as an exploded WAR. TODO(mattmoor): For `bazel run`",
"= ctx.new_file(ctx.attr.name + \".classpath\") ctx.actions.write(classpath_file, classpath) binary_path = layer_file_path(ctx, ctx.files.binary[0])",
"base = None, main_class = None, deps = [], runtime_deps",
"[], jvm_flags = [], **kwargs): \"\"\"Builds a container image overlaying",
"jvm_flags, **kwargs ) base = base or DEFAULT_JAVA_BASE for index,",
") # Load the resolved digests. load( \":java.bzl\", _JAVA_DIGESTS =",
"jar_app_layer( name = name, base = base, binary = binary_name,",
") def repositories(): # Call the core \"repositories\" function to",
"\"/app\"), # https://github.com/bazelbuild/bazel/issues/2176 \"data_path\": attr.string(default = \".\"), \"legacy_run_behavior\": attr.bool(default =",
"attr.label_list(), \"jvm_flags\": attr.string_list(), # The base image on which to",
"_war_dep_layer_impl, ) def _war_app_layer_impl(ctx): \"\"\"Appends the app layer with all",
"= main_class, # If the rule is turning a JAR",
"have their own layers # factored into our base. \"jar_layers\":",
"# TODO(mattmoor): Handle data files. # If we start putting",
"+ [classpath_file] } return _container.image.implementation( ctx, # We use all",
"not in available] classpath = \":\".join([ layer_file_path(ctx, x) for x",
"is idempotent if folks call it themselves. _repositories() excludes =",
"= \"javax_servlet_api\", artifact = \"javax.servlet:javax.servlet-api:3.0.1\", ) DEFAULT_JAVA_BASE = select({ \"@io_bazel_rules_docker//:fastbuild\":",
"\":jetty.bzl\", _JETTY_DIGESTS = \"DIGESTS\", ) def repositories(): # Call the",
"our binary and then subtracting # out what it available.",
"passing the classpath as a file. \"@\" + classpath_path if",
"[]), # Whether to lay out each dependency in a",
"if \"servlet_api\" not in excludes: native.maven_jar( name = \"javax_servlet_api\", artifact",
"java_image is compatible with java_binary. The signature of war_image is",
"\"/jetty/webapps/ROOT/WEB-INF/lib\"), # WE WANT PATHS FLATTENED # \"data_path\": attr.string(default =",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"= this_name, base = base, dep = dep) base =",
"name = \"javax_servlet_api\", artifact = \"javax.servlet:javax.servlet-api:3.0.1\", ) DEFAULT_JAVA_BASE = select({",
"there is no srcs # kwarg. deps = (deps +",
"= library_name, deps = deps + layers, **kwargs) base =",
"factored into our base. \"jar_layers\": attr.label_list(), # The rest of",
"index) jar_dep_layer(name = this_name, base = base, dep = dep)",
"layer for a single dependency's runfiles.\"\"\" return dep_layer_impl(ctx, runfiles =",
"_war_dep_layer_impl(ctx): \"\"\"Appends a layer for a single dependency's runfiles.\"\"\" #",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"+ \".library\" native.java_library(name = library_name, deps = deps + layers,",
"java_library as an exploded WAR. TODO(mattmoor): For `bazel run` of",
"paths. directory = \"/\", file_map = file_map, entrypoint = entrypoint,",
"classpath, ] + ctx.attr.jvm_flags + [ctx.attr.main_class] + ctx.attr.args file_map =",
"# of the binary in which it is participating. This",
"own layers # factored into our base. \"jar_layers\": attr.label_list(), #",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"rules. transitive_deps = depset() transitive_deps += java_files(ctx.attr.library) # TODO(mattmoor): Handle",
"the dependencies. \"deps\": attr.label_list(), \"runtime_deps\": attr.label_list(), \"jvm_flags\": attr.string_list(), # The",
"Support optionally passing the classpath as a file. \"@\" +",
"unavailable = [x for x in unavailable if x not",
"\".classpath\") ctx.actions.write(classpath_file, classpath) binary_path = layer_file_path(ctx, ctx.files.binary[0]) classpath_path = layer_file_path(ctx,",
"+ { # The binary target for which we are",
"rights reserved. # # Licensed under the Apache License, Version",
"defaults. \"directory\": attr.string(default = \"/jetty/webapps/ROOT/WEB-INF/lib\"), # WE WANT PATHS FLATTENED",
"to overlay the dependency layers. \"base\": attr.label(mandatory = True), \"entrypoint\":",
"\"servlet_api\" not in excludes: native.maven_jar( name = \"javax_servlet_api\", artifact =",
"to reduce boilerplate. # This is idempotent if folks call",
"\"legacy_run_behavior\": attr.bool(default = False), }.items()), executable = True, outputs =",
"own layers. **kwargs: See java_binary. \"\"\" binary_name = name +",
"a binary, then it will appear in runtime_deps. We are",
"not in excludes: container_pull( name = \"java_debug_image_base\", registry = \"gcr.io\",",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"outputs = _container.image.outputs, implementation = _jar_app_layer_impl, ) def java_image( name,",
"(even []) if there is no srcs # kwarg. deps",
"of war_image is compatible with java_library. \"\"\" load( \"//container:container.bzl\", \"container_pull\",",
"available += java_files(jar) # We compute the set of unavailable",
"[]) if there is no srcs # kwarg. deps =",
"# a jar file files += list(f.files) return files load(",
"specific language governing permissions and # limitations under the License.",
"we should use a file_map based scheme. return _container.image.implementation( ctx,",
"% (name, index) jar_dep_layer(name = this_name, base = base, dep",
"name, base = None, main_class = None, deps = [],",
"creating a Java container image. The signature of java_image is",
"registry = \"gcr.io\", repository = \"distroless/java\", digest = _JAVA_DIGESTS[\"latest\"], )",
"The base image on which to overlay the dependency layers.",
"as a file. \"@\" + classpath_path if ctx.attr._classpath_as_file else classpath,",
"deps, runtime_deps = runtime_deps, jar_layers = layers, visibility = visibility,",
"implementation = _jar_dep_layer_impl, ) def _jar_app_layer_impl(ctx): \"\"\"Appends the app layer",
"if \"jetty_debug_image_base\" not in excludes: container_pull( name = \"jetty_debug_image_base\", registry",
"# you may not use this file except in compliance",
"True), # Override the defaults. \"directory\": attr.string(default = \"/app\"), #",
"= { layer_file_path(ctx, f): f for f in unavailable +",
"\"@io_bazel_rules_docker//:optimized\": \"@java_image_base//image\", \"//conditions:default\": \"@java_image_base//image\", }) DEFAULT_JETTY_BASE = select({ \"@io_bazel_rules_docker//:fastbuild\": \"@jetty_image_base//image\",",
"with dependencies that should be put into their own layers.",
"f[java_common.provider] files += list(java_provider.transitive_runtime_jars) if hasattr(f, \"files\"): # a jar",
"name, base = base, library = library_name, jar_layers = layers,",
"available] classpath = \":\".join([ layer_file_path(ctx, x) for x in available",
"= files) _war_app_layer = rule( attrs = dict(_container.image.attrs.items() + {",
"war_image is compatible with java_library. \"\"\" load( \"//container:container.bzl\", \"container_pull\", _repositories",
"unavailable = depset() for jar in ctx.attr.deps + ctx.attr.runtime_deps: unavailable",
"we need to be able to ctrl-C it and have",
"= f[java_common.provider] files += list(java_provider.transitive_runtime_jars) if hasattr(f, \"files\"): # a",
"dict(_container.image.attrs.items() + { # The base image on which to",
"a single dependency's runfiles.\"\"\" # TODO(mattmoor): Today we run the",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"in excludes: native.maven_jar( name = \"javax_servlet_api\", artifact = \"javax.servlet:javax.servlet-api:3.0.1\", )",
"is a limit on the length of a # command",
"we run the risk of filenames colliding when # they",
"True), # Whether the classpath should be passed as a",
"_war_app_layer = rule( attrs = dict(_container.image.attrs.items() + { # The",
"not in excludes: container_pull( name = \"jetty_debug_image_base\", registry = \"gcr.io\",",
"f for f in unavailable + [classpath_file] } return _container.image.implementation(",
"under the Apache License, Version 2.0 (the \"License\"); # you",
"# We use all absolute paths. directory = \"/\", file_map",
"transitive_deps if d not in available] return _container.image.implementation(ctx, files =",
"the dependency layers. \"base\": attr.label(mandatory = True), # The main",
"app layers. \"agnostic_dep_layout\": attr.bool(default = True), # Whether the classpath",
"base, binary = binary_name, main_class = main_class, jvm_flags = jvm_flags,",
"flattened. Instead of just flattening and using basename # we",
"always writing the classpath out # to a file instead.",
"= \".\"), }.items()), executable = True, outputs = _container.image.outputs, implementation",
"is compatible with java_library. \"\"\" load( \"//container:container.bzl\", \"container_pull\", _repositories =",
"The library target for which we are synthesizing an image.",
"or DEFAULT_JETTY_BASE for index, dep in enumerate(layers): this_name = \"%s.%d\"",
"= [] if java_common.provider in f: java_provider = f[java_common.provider] files",
"\"/app\"), # https://github.com/bazelbuild/bazel/issues/2176 \"data_path\": attr.string(default = \".\"), }.items()), executable =",
"for a single dependency's runfiles.\"\"\" return dep_layer_impl(ctx, runfiles = java_files)",
"if folks call it themselves. _repositories() excludes = native.existing_rules().keys() if",
"deps = deps + layers, **kwargs) base = base or",
"binary_name, main_class = main_class, jvm_flags = jvm_flags, deps = deps,",
"_repositories = \"repositories\", ) # Load the resolved digests. load(",
"= name, base = base, library = library_name, jar_layers =",
"= library_name, jar_layers = layers, visibility = visibility, tags =",
"# The main class to invoke on startup. \"main_class\": attr.string(mandatory",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"implementation = _jar_app_layer_impl, ) def java_image( name, base = None,",
"\"distroless/java/jetty\", digest = _JETTY_DIGESTS[\"latest\"], ) if \"jetty_debug_image_base\" not in excludes:",
"full list of dependencies that have their own layers #",
"base = None, deps = [], layers = [], **kwargs):",
"= kwargs.get(\"visibility\", None) jar_app_layer( name = name, base = base,",
"_repositories() excludes = native.existing_rules().keys() if \"java_image_base\" not in excludes: container_pull(",
"+= java_files(jar) unavailable += java_files(ctx.attr.binary) unavailable = [x for x",
"file files += list(f.files) return files load( \"//lang:image.bzl\", \"dep_layer_impl\", \"layer_file_path\",",
"by walking deps # in the same way, adding in",
"based scheme. return _container.image.implementation( ctx, files = java_files(ctx.attr.dep), ) _war_dep_layer",
"\"files\"): # a jar file files += list(f.files) return files",
"select({ \"@io_bazel_rules_docker//:fastbuild\": \"@java_image_base//image\", \"@io_bazel_rules_docker//:debug\": \"@java_debug_image_base//image\", \"@io_bazel_rules_docker//:optimized\": \"@java_image_base//image\", \"//conditions:default\": \"@java_image_base//image\", })",
"deps = (deps + layers) or None, runtime_deps = runtime_deps,",
"WANT PATHS FLATTENED # \"data_path\": attr.string(default = \".\"), \"legacy_run_behavior\": attr.bool(default",
"= None, deps = [], runtime_deps = [], layers =",
"= [], **kwargs): \"\"\"Builds a container image overlaying the java_library",
"java_files(jar) unavailable += java_files(ctx.attr.binary) unavailable = [x for x in",
"the app layer with all remaining runfiles.\"\"\" available = depset()",
"executable = True, outputs = _container.image.outputs, implementation = _jar_app_layer_impl, )",
"kwargs.get(\"args\"), ) def _war_dep_layer_impl(ctx): \"\"\"Appends a layer for a single",
"\".library\" native.java_library(name = library_name, deps = deps + layers, **kwargs)",
"stuff by walking deps # in the same way, adding",
"runtime_deps, jar_layers = layers, visibility = visibility, args = kwargs.get(\"args\"),",
"should use a file_map based scheme. return _container.image.implementation( ctx, files",
"def _war_app_layer_impl(ctx): \"\"\"Appends the app layer with all remaining runfiles.\"\"\"",
") def _war_dep_layer_impl(ctx): \"\"\"Appends a layer for a single dependency's",
"index) _war_dep_layer(name = this_name, base = base, dep = dep)",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"into their own layers. **kwargs: See java_binary. \"\"\" binary_name =",
"factored into our base. \"jar_layers\": attr.label_list(), # The base image",
"main_class, jvm_flags = jvm_flags, deps = deps, runtime_deps = runtime_deps,",
"which to overlay the dependency layers. \"base\": attr.label(mandatory = True),",
"= True, outputs = _container.image.outputs, implementation = _war_app_layer_impl, ) def",
"\"layer_file_path\", ) def _jar_dep_layer_impl(ctx): \"\"\"Appends a layer for a single",
"Apache License, Version 2.0 (the \"License\"); # you may not",
"either express or implied. # See the License for the",
"# sharing of the dependency's layer across images, but requires",
"of the dependency's layer across images, but requires a #",
"the same way, adding in our binary and then subtracting",
"= \"javax.servlet:javax.servlet-api:3.0.1\", ) DEFAULT_JAVA_BASE = select({ \"@io_bazel_rules_docker//:fastbuild\": \"@java_image_base//image\", \"@io_bazel_rules_docker//:debug\": \"@java_debug_image_base//image\",",
"# WE WANT PATHS FLATTENED # \"data_path\": attr.string(default = \".\"),",
"rule( attrs = dict(_container.image.attrs.items() + { # The base image",
"java_binary. Args: layers: Augments \"deps\" with dependencies that should be",
"}.items()), executable = True, outputs = _container.image.outputs, implementation = _war_app_layer_impl,",
"files. # If we start putting libs in servlet-agnostic paths,",
"layers) or None, runtime_deps = runtime_deps, jvm_flags = jvm_flags, **kwargs",
"= depset() for jar in ctx.attr.jar_layers: available += java_files(jar) #",
"# then consider adding symlinks here. files = [d for",
"available + unavailable ]) # Classpaths can grow long and",
"files = java_files(ctx.attr.dep), ) _war_dep_layer = rule( attrs = dict(_container.image.attrs.items()",
"repository = \"distroless/java/jetty\", digest = _JETTY_DIGESTS[\"latest\"], ) if \"jetty_debug_image_base\" not",
"scheme. return _container.image.implementation( ctx, files = java_files(ctx.attr.dep), ) _war_dep_layer =",
"name = \"java_debug_image_base\", registry = \"gcr.io\", repository = \"distroless/java\", digest",
"_container.image.implementation(ctx, files = files) _war_app_layer = rule( attrs = dict(_container.image.attrs.items()",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"java_common.provider in f: java_provider = f[java_common.provider] files += list(java_provider.transitive_runtime_jars) if",
"classpath should be passed as a file. \"_classpath_as_file\": attr.bool(default =",
"target for which we are synthesizing an image. \"binary\": attr.label(mandatory",
"this_name, base = base, dep = dep) base = this_name",
"in available] return _container.image.implementation(ctx, files = files) _war_app_layer = rule(",
"\"//conditions:default\": \"@jetty_image_base//image\", }) load( \"//container:container.bzl\", _container = \"container\", ) def",
"overlaying the java_binary. Args: layers: Augments \"deps\" with dependencies that",
"\":java.bzl\", _JAVA_DIGESTS = \"DIGESTS\", ) load( \":jetty.bzl\", _JETTY_DIGESTS = \"DIGESTS\",",
"we're appending. \"dep\": attr.label(mandatory = True), # Whether to lay",
"_jar_dep_layer_impl, ) def _jar_app_layer_impl(ctx): \"\"\"Appends the app layer with all",
"the classpath should be passed as a file. \"_classpath_as_file\": attr.bool(default",
"file_map = { layer_file_path(ctx, f): f for f in unavailable",
"and using basename # we should use a file_map based",
"jar file files += list(f.files) return files load( \"//lang:image.bzl\", \"dep_layer_impl\",",
"an exploded WAR. TODO(mattmoor): For `bazel run` of this to",
"unavailable + [classpath_file] } return _container.image.implementation( ctx, # We use",
"base or DEFAULT_JETTY_BASE for index, dep in enumerate(layers): this_name =",
"dependency layers. \"base\": attr.label(mandatory = True), # The main class",
"ctx.attr.deps + ctx.attr.runtime_deps: unavailable += java_files(jar) unavailable += java_files(ctx.attr.binary) unavailable",
"the classpath out # to a file instead. classpath_file =",
"excludes: container_pull( name = \"jetty_debug_image_base\", registry = \"gcr.io\", repository =",
"for which we are synthesizing an image. \"binary\": attr.label(mandatory =",
"\"_classpath_as_file\": attr.bool(default = False), # Override the defaults. \"directory\": attr.string(default",
"= []), # Whether to lay out each dependency in",
"no srcs # kwarg. deps = (deps + layers) or",
"] + ctx.attr.jvm_flags + [ctx.attr.main_class] + ctx.attr.args file_map = {",
"the dependency layers. \"base\": attr.label(mandatory = True), \"entrypoint\": attr.string_list(default =",
"of the dependencies. \"deps\": attr.label_list(), \"runtime_deps\": attr.label_list(), \"jvm_flags\": attr.string_list(), #",
"License. \"\"\"A rule for creating a Java container image. The",
"**kwargs: See java_library. \"\"\" library_name = name + \".library\" native.java_library(name",
"layers # factored into our base. \"jar_layers\": attr.label_list(), # The",
"image on which to overlay the dependency layers. \"base\": attr.label(mandatory",
"main_class, # If the rule is turning a JAR built",
"ctx.attr.args file_map = { layer_file_path(ctx, f): f for f in",
"use a file_map based scheme. return _container.image.implementation( ctx, files =",
") _war_dep_layer = rule( attrs = dict(_container.image.attrs.items() + { #",
"use this file except in compliance with the License. #",
"to invoke on startup. \"main_class\": attr.string(mandatory = True), # Whether",
"reserved. # # Licensed under the Apache License, Version 2.0",
"for index, dep in enumerate(layers): this_name = \"%s.%d\" % (name,",
"= [], layers = [], **kwargs): \"\"\"Builds a container image",
"container_pull( name = \"java_debug_image_base\", registry = \"gcr.io\", repository = \"distroless/java\",",
") DEFAULT_JAVA_BASE = select({ \"@io_bazel_rules_docker//:fastbuild\": \"@java_image_base//image\", \"@io_bazel_rules_docker//:debug\": \"@java_debug_image_base//image\", \"@io_bazel_rules_docker//:optimized\": \"@java_image_base//image\",",
"layer for a single dependency's runfiles.\"\"\" # TODO(mattmoor): Today we",
"\"distroless/java/jetty\", digest = _JETTY_DIGESTS[\"debug\"], ) if \"servlet_api\" not in excludes:",
"this by always writing the classpath out # to a",
"compute the set of unavailable stuff by walking deps #",
"and there is a limit on the length of a",
"base = base or DEFAULT_JETTY_BASE for index, dep in enumerate(layers):",
"main_class = main_class, # If the rule is turning a",
"name = \"jetty_image_base\", registry = \"gcr.io\", repository = \"distroless/java/jetty\", digest",
"= [], runtime_deps = [], layers = [], jvm_flags =",
"# This is idempotent if folks call it themselves. _repositories()",
"rule( attrs = dict(_container.image.attrs.items() + { # The library target",
"library_name, deps = deps + layers, **kwargs) base = base",
"defaults. \"directory\": attr.string(default = \"/app\"), # https://github.com/bazelbuild/bazel/issues/2176 \"data_path\": attr.string(default =",
"by always writing the classpath out # to a file",
"runfiles.\"\"\" # TODO(mattmoor): Today we run the risk of filenames",
"in enumerate(layers): this_name = \"%s.%d\" % (name, index) jar_dep_layer(name =",
"\"javax.servlet:javax.servlet-api:3.0.1\", ) DEFAULT_JAVA_BASE = select({ \"@io_bazel_rules_docker//:fastbuild\": \"@java_image_base//image\", \"@io_bazel_rules_docker//:debug\": \"@java_debug_image_base//image\", \"@io_bazel_rules_docker//:optimized\":",
"in available] classpath = \":\".join([ layer_file_path(ctx, x) for x in",
"executable = True, outputs = _container.image.outputs, implementation = _jar_dep_layer_impl, )",
"= \"DIGESTS\", ) def repositories(): # Call the core \"repositories\"",
"_jar_dep_layer_impl(ctx): \"\"\"Appends a layer for a single dependency's runfiles.\"\"\" return",
"in compliance with the License. # You may obtain a",
"image. \"binary\": attr.label(mandatory = True), # The full list of",
"software # distributed under the License is distributed on an",
"dependency's runfiles.\"\"\" # TODO(mattmoor): Today we run the risk of",
"ctx.attr.jvm_flags + [ctx.attr.main_class] + ctx.attr.args file_map = { layer_file_path(ctx, f):",
"in ctx.attr.jar_layers: available += java_files(jar) # This is based on",
"= \"gcr.io\", repository = \"distroless/java\", digest = _JAVA_DIGESTS[\"latest\"], ) if",
"for jar in ctx.attr.deps + ctx.attr.runtime_deps: unavailable += java_files(jar) unavailable",
"= rule( attrs = dict(_container.image.attrs.items() + { # The base",
"Override the defaults. \"directory\": attr.string(default = \"/jetty/webapps/ROOT/WEB-INF/lib\"), # WE WANT",
"\"@java_image_base//image\", \"@io_bazel_rules_docker//:debug\": \"@java_debug_image_base//image\", \"@io_bazel_rules_docker//:optimized\": \"@java_image_base//image\", \"//conditions:default\": \"@java_image_base//image\", }) DEFAULT_JETTY_BASE =",
"The signature of java_image is compatible with java_binary. The signature",
"+ classpath_path if ctx.attr._classpath_as_file else classpath, ] + ctx.attr.jvm_flags +",
"layer_file_path(ctx, f): f for f in unavailable + [classpath_file] }",
"file_map based scheme. return _container.image.implementation( ctx, files = java_files(ctx.attr.dep), )",
"depset() for jar in ctx.attr.deps + ctx.attr.runtime_deps: unavailable += java_files(jar)",
"= depset() transitive_deps += java_files(ctx.attr.library) # TODO(mattmoor): Handle data files.",
"+ ctx.attr.runtime_deps: unavailable += java_files(jar) unavailable += java_files(ctx.attr.binary) unavailable =",
"under the License. \"\"\"A rule for creating a Java container",
"image. \"library\": attr.label(mandatory = True), # The full list of",
"remaining runfiles.\"\"\" available = depset() for jar in ctx.attr.jar_layers: available",
"is participating. This can increase # sharing of the dependency's",
"based on rules_appengine's WAR rules. transitive_deps = depset() transitive_deps +=",
"we start putting libs in servlet-agnostic paths, # then consider",
"+ unavailable ]) # Classpaths can grow long and there",
"with the License. # You may obtain a copy of",
"\"@java_image_base//image\", }) DEFAULT_JETTY_BASE = select({ \"@io_bazel_rules_docker//:fastbuild\": \"@jetty_image_base//image\", \"@io_bazel_rules_docker//:debug\": \"@jetty_debug_image_base//image\", \"@io_bazel_rules_docker//:optimized\":",
"f in unavailable + [classpath_file] } return _container.image.implementation( ctx, #",
"= main_class, jvm_flags = jvm_flags, deps = deps, runtime_deps =",
"\"%s.%d\" % (name, index) jar_dep_layer(name = this_name, base = base,",
"increase # sharing of the dependency's layer across images, but",
"library_name = name + \".library\" native.java_library(name = library_name, deps =",
"permissions and # limitations under the License. \"\"\"A rule for",
"native.java_binary( name = binary_name, main_class = main_class, # If the",
"express or implied. # See the License for the specific",
"file_map, entrypoint = entrypoint, ) jar_app_layer = rule( attrs =",
"except in compliance with the License. # You may obtain",
"files) _war_app_layer = rule( attrs = dict(_container.image.attrs.items() + { #",
"then it will appear in runtime_deps. We are # not",
"= \"java_image_base\", registry = \"gcr.io\", repository = \"distroless/java\", digest =",
"java_library. \"\"\" library_name = name + \".library\" native.java_library(name = library_name,",
"deps # in the same way, adding in our binary",
"Today we run the risk of filenames colliding when #",
"# The binary target for which we are synthesizing an",
"deps + layers, **kwargs) base = base or DEFAULT_JETTY_BASE for",
"# Licensed under the Apache License, Version 2.0 (the \"License\");",
"the defaults. \"directory\": attr.string(default = \"/jetty/webapps/ROOT/WEB-INF/lib\"), # WE WANT PATHS",
"For `bazel run` of this to be useful, we need",
"attr.string(default = \".\"), \"legacy_run_behavior\": attr.bool(default = False), }.items()), executable =",
"This is idempotent if folks call it themselves. _repositories() excludes",
"jar_dep_layer(name = this_name, base = base, dep = dep) base",
"# a binary, then it will appear in runtime_deps. We",
"\"@jetty_image_base//image\", \"@io_bazel_rules_docker//:debug\": \"@jetty_debug_image_base//image\", \"@io_bazel_rules_docker//:optimized\": \"@jetty_image_base//image\", \"//conditions:default\": \"@jetty_image_base//image\", }) load( \"//container:container.bzl\",",
"CONDITIONS OF ANY KIND, either express or implied. # See",
"= [], jvm_flags = [], **kwargs): \"\"\"Builds a container image",
"{ # The library target for which we are synthesizing",
"using basename # we should use a file_map based scheme.",
"a manner that is agnostic # of the binary in",
"directory = \"/\", file_map = file_map, entrypoint = entrypoint, )",
"base, library = library_name, jar_layers = layers, visibility = visibility,",
"subtracting # out what it available. unavailable = depset() for",
"compatible with java_library. \"\"\" load( \"//container:container.bzl\", \"container_pull\", _repositories = \"repositories\",",
"classpath_path = layer_file_path(ctx, classpath_file) entrypoint = [ \"/usr/bin/java\", \"-cp\", #",
"Instead of just flattening and using basename # we should",
"filenames colliding when # they get flattened. Instead of just",
"layers. **kwargs: See java_binary. \"\"\" binary_name = name + \".binary\"",
"jar_dep_layer = rule( attrs = dict(_container.image.attrs.items() + { # The",
"limitations under the License. \"\"\"A rule for creating a Java",
"for creating a Java container image. The signature of java_image",
"java_library. \"\"\" load( \"//container:container.bzl\", \"container_pull\", _repositories = \"repositories\", ) #",
"\"directory\": attr.string(default = \"/app\"), # https://github.com/bazelbuild/bazel/issues/2176 \"data_path\": attr.string(default = \".\"),",
"symlinks here. files = [d for d in transitive_deps if",
"layers. \"base\": attr.label(mandatory = True), # The dependency whose runfiles",
"layers. **kwargs: See java_library. \"\"\" library_name = name + \".library\"",
"we are synthesizing an image. \"library\": attr.label(mandatory = True), #",
"}.items()), executable = True, outputs = _container.image.outputs, implementation = _jar_app_layer_impl,",
"attr.label(mandatory = True), # The dependency whose runfiles we're appending.",
"which we are synthesizing an image. \"library\": attr.label(mandatory = True),",
"unavailable += java_files(ctx.attr.binary) unavailable = [x for x in unavailable",
"the app layers. \"agnostic_dep_layout\": attr.bool(default = True), # Whether the",
"to be useful, we need to be able to ctrl-C",
"java_binary. The signature of war_image is compatible with java_library. \"\"\"",
"# Classpaths can grow long and there is a limit",
"= _container.image.outputs, implementation = _war_dep_layer_impl, ) def _war_app_layer_impl(ctx): \"\"\"Appends the",
"dict(_container.image.attrs.items() + { # The library target for which we",
"def java_files(f): files = [] if java_common.provider in f: java_provider",
"\"\"\" binary_name = name + \".binary\" native.java_binary( name = binary_name,",
"\"DIGESTS\", ) def repositories(): # Call the core \"repositories\" function",
"available += java_files(jar) # This is based on rules_appengine's WAR",
"be useful, we need to be able to ctrl-C it",
"agnostic # of the binary in which it is participating.",
"layers. \"agnostic_dep_layout\": attr.bool(default = True), # Override the defaults. \"directory\":",
"mitigate this by always writing the classpath out # to",
"= \"/app\"), # https://github.com/bazelbuild/bazel/issues/2176 \"data_path\": attr.string(default = \".\"), \"legacy_run_behavior\": attr.bool(default",
"participating. This can increase # sharing of the dependency's layer",
"\"\"\"Builds a container image overlaying the java_library as an exploded",
"# https://github.com/bazelbuild/bazel/issues/2176 \"data_path\": attr.string(default = \".\"), }.items()), executable = True,",
"= select({ \"@io_bazel_rules_docker//:fastbuild\": \"@jetty_image_base//image\", \"@io_bazel_rules_docker//:debug\": \"@jetty_debug_image_base//image\", \"@io_bazel_rules_docker//:optimized\": \"@jetty_image_base//image\", \"//conditions:default\": \"@jetty_image_base//image\",",
"themselves. _repositories() excludes = native.existing_rules().keys() if \"java_image_base\" not in excludes:",
"of this to be useful, we need to be able",
"= True), \"entrypoint\": attr.string_list(default = []), # Whether to lay",
"folks call it themselves. _repositories() excludes = native.existing_rules().keys() if \"java_image_base\"",
"# Support optionally passing the classpath as a file. \"@\"",
"the java_binary. Args: layers: Augments \"deps\" with dependencies that should",
"the binary in which it is participating. This can increase",
"container actually terminate. More information: https://github.com/bazelbuild/bazel/issues/3519 Args: layers: Augments \"deps\"",
"= \"%s.%d\" % (name, index) _war_dep_layer(name = this_name, base =",
"= \"jetty_image_base\", registry = \"gcr.io\", repository = \"distroless/java/jetty\", digest =",
"allowed to pass deps (even []) if there is no",
"putting libs in servlet-agnostic paths, # then consider adding symlinks",
"\"@io_bazel_rules_docker//:optimized\": \"@jetty_image_base//image\", \"//conditions:default\": \"@jetty_image_base//image\", }) load( \"//container:container.bzl\", _container = \"container\",",
"if \"java_debug_image_base\" not in excludes: container_pull( name = \"java_debug_image_base\", registry",
"= False), }.items()), executable = True, outputs = _container.image.outputs, implementation",
"this_name = \"%s.%d\" % (name, index) jar_dep_layer(name = this_name, base",
"\"//container:container.bzl\", \"container_pull\", _repositories = \"repositories\", ) # Load the resolved",
"d not in available] return _container.image.implementation(ctx, files = files) _war_app_layer",
"= entrypoint, ) jar_app_layer = rule( attrs = dict(_container.image.attrs.items() +",
"return files load( \"//lang:image.bzl\", \"dep_layer_impl\", \"layer_file_path\", ) def _jar_dep_layer_impl(ctx): \"\"\"Appends",
"\".binary\" native.java_binary( name = binary_name, main_class = main_class, # If",
"for which we are synthesizing an image. \"library\": attr.label(mandatory =",
"it themselves. _repositories() excludes = native.existing_rules().keys() if \"java_image_base\" not in",
"\"base\": attr.label(mandatory = True), # The main class to invoke",
"= \"distroless/java/jetty\", digest = _JETTY_DIGESTS[\"latest\"], ) if \"jetty_debug_image_base\" not in",
"= True), # The full list of dependencies that have",
"is no srcs # kwarg. deps = (deps + layers)",
"True, outputs = _container.image.outputs, implementation = _jar_dep_layer_impl, ) def _jar_app_layer_impl(ctx):",
"= False), # Override the defaults. \"directory\": attr.string(default = \"/app\"),",
"should be put into their own layers. **kwargs: See java_binary.",
"java_provider = f[java_common.provider] files += list(java_provider.transitive_runtime_jars) if hasattr(f, \"files\"): #",
"all absolute paths. directory = \"/\", file_map = file_map, entrypoint",
"java_files(f): files = [] if java_common.provider in f: java_provider =",
"a jar file files += list(f.files) return files load( \"//lang:image.bzl\",",
"Whether to lay out each dependency in a manner that",
"attr.label(mandatory = True), \"entrypoint\": attr.string_list(default = []), # Whether to",
"of a # command line, so mitigate this by always",
"= _container.image.outputs, implementation = _jar_app_layer_impl, ) def java_image( name, base",
"ctx, files = java_files(ctx.attr.dep), ) _war_dep_layer = rule( attrs =",
"def _jar_app_layer_impl(ctx): \"\"\"Appends the app layer with all remaining runfiles.\"\"\"",
"\"runtime_deps\": attr.label_list(), \"jvm_flags\": attr.string_list(), # The base image on which",
"list of dependencies that have their own layers # factored",
"Java container image. The signature of java_image is compatible with",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"dependencies that have their own layers # factored into our",
"= \"/jetty/webapps/ROOT/WEB-INF/lib\"), # WE WANT PATHS FLATTENED # \"data_path\": attr.string(default",
"digest = _JETTY_DIGESTS[\"debug\"], ) if \"servlet_api\" not in excludes: native.maven_jar(",
"\"@jetty_image_base//image\", }) load( \"//container:container.bzl\", _container = \"container\", ) def java_files(f):",
"= True), # Override the defaults. \"directory\": attr.string(default = \"/app\"),",
"implementation = _war_dep_layer_impl, ) def _war_app_layer_impl(ctx): \"\"\"Appends the app layer",
"layers = [], **kwargs): \"\"\"Builds a container image overlaying the",
"on which to overlay the dependency layers. \"base\": attr.label(mandatory =",
"grow long and there is a limit on the length",
"runtime_deps. We are # not allowed to pass deps (even",
"with java_binary. The signature of war_image is compatible with java_library.",
"an image. \"library\": attr.label(mandatory = True), # The full list",
"files += list(f.files) return files load( \"//lang:image.bzl\", \"dep_layer_impl\", \"layer_file_path\", )",
"with java_library. \"\"\" load( \"//container:container.bzl\", \"container_pull\", _repositories = \"repositories\", )",
"Version 2.0 (the \"License\"); # you may not use this",
"x not in available] classpath = \":\".join([ layer_file_path(ctx, x) for",
") def _jar_app_layer_impl(ctx): \"\"\"Appends the app layer with all remaining",
"x in available + unavailable ]) # Classpaths can grow",
"return _container.image.implementation(ctx, files = files) _war_app_layer = rule( attrs =",
"startup. \"main_class\": attr.string(mandatory = True), # Whether to lay out",
"dep) base = this_name visibility = kwargs.get(\"visibility\", None) tags =",
"in ctx.attr.deps + ctx.attr.runtime_deps: unavailable += java_files(jar) unavailable += java_files(ctx.attr.binary)",
"We are # not allowed to pass deps (even [])",
"= [x for x in unavailable if x not in",
"runtime_deps, jvm_flags = jvm_flags, **kwargs ) base = base or",
"binary, then it will appear in runtime_deps. We are #",
"PATHS FLATTENED # \"data_path\": attr.string(default = \".\"), \"legacy_run_behavior\": attr.bool(default =",
"by applicable law or agreed to in writing, software #",
"# Copyright 2017 Google Inc. All rights reserved. # #",
"files load( \"//lang:image.bzl\", \"dep_layer_impl\", \"layer_file_path\", ) def _jar_dep_layer_impl(ctx): \"\"\"Appends a",
"classpath_path if ctx.attr._classpath_as_file else classpath, ] + ctx.attr.jvm_flags + [ctx.attr.main_class]",
"# Call the core \"repositories\" function to reduce boilerplate. #",
"{ # The base image on which to overlay the",
"\"base\": attr.label(mandatory = True), \"entrypoint\": attr.string_list(default = []), # Whether",
"files = [d for d in transitive_deps if d not",
"be put into their own layers. **kwargs: See java_binary. \"\"\"",
"# Whether the classpath should be passed as a file.",
"\"\"\"Builds a container image overlaying the java_binary. Args: layers: Augments",
"None) jar_app_layer( name = name, base = base, binary =",
"+= java_files(ctx.attr.library) # TODO(mattmoor): Handle data files. # If we",
"a layer for a single dependency's runfiles.\"\"\" return dep_layer_impl(ctx, runfiles",
"[], runtime_deps = [], layers = [], jvm_flags = [],",
"resolved digests. load( \":java.bzl\", _JAVA_DIGESTS = \"DIGESTS\", ) load( \":jetty.bzl\",",
"[classpath_file] } return _container.image.implementation( ctx, # We use all absolute",
"\"distroless/java\", digest = _JAVA_DIGESTS[\"debug\"], ) if \"jetty_image_base\" not in excludes:",
"into # a binary, then it will appear in runtime_deps.",
"ctx.files.binary[0]) classpath_path = layer_file_path(ctx, classpath_file) entrypoint = [ \"/usr/bin/java\", \"-cp\",",
"dependency whose runfiles we're appending. \"dep\": attr.label(mandatory = True), #",
"True), # The main class to invoke on startup. \"main_class\":",
"this_name visibility = kwargs.get(\"visibility\", None) tags = kwargs.get(\"tags\", None) _war_app_layer(",
"None, deps = [], layers = [], **kwargs): \"\"\"Builds a",
"here. files = [d for d in transitive_deps if d",
"pass deps (even []) if there is no srcs #",
"+ { # The library target for which we are",
"are # not allowed to pass deps (even []) if",
"base = base, library = library_name, jar_layers = layers, visibility",
"**kwargs): \"\"\"Builds a container image overlaying the java_binary. Args: layers:",
"applicable law or agreed to in writing, software # distributed",
"\"DIGESTS\", ) load( \":jetty.bzl\", _JETTY_DIGESTS = \"DIGESTS\", ) def repositories():",
"\"entrypoint\": attr.string_list(default = []), # Whether to lay out each",
"Inc. All rights reserved. # # Licensed under the Apache",
"= layer_file_path(ctx, ctx.files.binary[0]) classpath_path = layer_file_path(ctx, classpath_file) entrypoint = [",
"Args: layers: Augments \"deps\" with dependencies that should be put",
"governing permissions and # limitations under the License. \"\"\"A rule",
"= \"/\", file_map = file_map, entrypoint = entrypoint, ) jar_app_layer",
"True, outputs = _container.image.outputs, implementation = _war_app_layer_impl, ) def war_image(name,",
"a JAR built with java_library into # a binary, then",
"will appear in runtime_deps. We are # not allowed to",
"ctx.new_file(ctx.attr.name + \".classpath\") ctx.actions.write(classpath_file, classpath) binary_path = layer_file_path(ctx, ctx.files.binary[0]) classpath_path",
"\"//conditions:default\": \"@java_image_base//image\", }) DEFAULT_JETTY_BASE = select({ \"@io_bazel_rules_docker//:fastbuild\": \"@jetty_image_base//image\", \"@io_bazel_rules_docker//:debug\": \"@jetty_debug_image_base//image\",",
"_container.image.outputs, implementation = _jar_dep_layer_impl, ) def _jar_app_layer_impl(ctx): \"\"\"Appends the app",
"deps = [], layers = [], **kwargs): \"\"\"Builds a container",
"\"java_image_base\", registry = \"gcr.io\", repository = \"distroless/java\", digest = _JAVA_DIGESTS[\"latest\"],",
"dependency layers. \"base\": attr.label(mandatory = True), \"entrypoint\": attr.string_list(default = []),",
"\"@jetty_debug_image_base//image\", \"@io_bazel_rules_docker//:optimized\": \"@jetty_image_base//image\", \"//conditions:default\": \"@jetty_image_base//image\", }) load( \"//container:container.bzl\", _container =",
"# You may obtain a copy of the License at",
"None, main_class = None, deps = [], runtime_deps = [],",
"kwargs.get(\"tags\", None) _war_app_layer( name = name, base = base, library",
"out # to a file instead. classpath_file = ctx.new_file(ctx.attr.name +",
"\"@io_bazel_rules_docker//:fastbuild\": \"@java_image_base//image\", \"@io_bazel_rules_docker//:debug\": \"@java_debug_image_base//image\", \"@io_bazel_rules_docker//:optimized\": \"@java_image_base//image\", \"//conditions:default\": \"@java_image_base//image\", }) DEFAULT_JETTY_BASE",
"See java_library. \"\"\" library_name = name + \".library\" native.java_library(name =",
"attr.label(mandatory = True), # The main class to invoke on",
"# The dependency whose runfiles we're appending. \"dep\": attr.label(mandatory =",
"which we are synthesizing an image. \"binary\": attr.label(mandatory = True),",
"own layers. **kwargs: See java_library. \"\"\" library_name = name +",
"jar_layers = layers, visibility = visibility, args = kwargs.get(\"args\"), )",
"attr.bool(default = True), # Override the defaults. \"directory\": attr.string(default =",
"if \"java_image_base\" not in excludes: container_pull( name = \"java_image_base\", registry",
"# If the rule is turning a JAR built with",
"\"gcr.io\", repository = \"distroless/java\", digest = _JAVA_DIGESTS[\"debug\"], ) if \"jetty_image_base\"",
"command line, so mitigate this by always writing the classpath",
"unavailable stuff by walking deps # in the same way,",
"attr.string(default = \".\"), }.items()), executable = True, outputs = _container.image.outputs,",
"if d not in available] return _container.image.implementation(ctx, files = files)",
"attr.string_list(default = []), # Whether to lay out each dependency",
"base or DEFAULT_JAVA_BASE for index, dep in enumerate(layers): this_name =",
"container_pull( name = \"jetty_image_base\", registry = \"gcr.io\", repository = \"distroless/java/jetty\",",
"should be put into their own layers. **kwargs: See java_library.",
"the app layers. \"agnostic_dep_layout\": attr.bool(default = True), # Override the",
"= base, binary = binary_name, main_class = main_class, jvm_flags =",
"the defaults. \"directory\": attr.string(default = \"/app\"), # https://github.com/bazelbuild/bazel/issues/2176 \"data_path\": attr.string(default",
"the set of unavailable stuff by walking deps # in",
"java_image( name, base = None, main_class = None, deps =",
"digest = _JAVA_DIGESTS[\"latest\"], ) if \"java_debug_image_base\" not in excludes: container_pull(",
"(deps + layers) or None, runtime_deps = runtime_deps, jvm_flags =",
"be passed as a file. \"_classpath_as_file\": attr.bool(default = False), #",
"\"data_path\": attr.string(default = \".\"), \"legacy_run_behavior\": attr.bool(default = False), }.items()), executable",
"None) _war_app_layer( name = name, base = base, library =",
"to be able to ctrl-C it and have the container",
"This is based on rules_appengine's WAR rules. transitive_deps = depset()",
"classpath out # to a file instead. classpath_file = ctx.new_file(ctx.attr.name",
"into our base. \"jar_layers\": attr.label_list(), # The rest of the",
"dependency's runfiles.\"\"\" return dep_layer_impl(ctx, runfiles = java_files) jar_dep_layer = rule(",
"can increase # sharing of the dependency's layer across images,",
"for a single dependency's runfiles.\"\"\" # TODO(mattmoor): Today we run",
"\"License\"); # you may not use this file except in",
"dependency's layer across images, but requires a # symlink forest",
"\"\"\"Appends a layer for a single dependency's runfiles.\"\"\" return dep_layer_impl(ctx,",
"attr.label_list(), # The base image on which to overlay the",
"# Whether to lay out each dependency in a manner",
"in excludes: container_pull( name = \"jetty_debug_image_base\", registry = \"gcr.io\", repository",
"True, outputs = _container.image.outputs, implementation = _jar_app_layer_impl, ) def java_image(",
"native.existing_rules().keys() if \"java_image_base\" not in excludes: container_pull( name = \"java_image_base\",",
"% (name, index) _war_dep_layer(name = this_name, base = base, dep",
"java_binary. \"\"\" binary_name = name + \".binary\" native.java_binary( name =",
"a single dependency's runfiles.\"\"\" return dep_layer_impl(ctx, runfiles = java_files) jar_dep_layer",
"attrs = dict(_container.image.attrs.items() + { # The base image on",
"excludes: native.maven_jar( name = \"javax_servlet_api\", artifact = \"javax.servlet:javax.servlet-api:3.0.1\", ) DEFAULT_JAVA_BASE"
] |
[
"10.0.') value = _compile._astype_scalar(value, ctype, 'same_kind', env) value = Data.init(value,",
"loop unrolling support. Args: start (int): Same as that of",
"env) if start.ctype.dtype.kind not in 'iu': raise TypeError('range supports only",
"<= mask.obj <= 0xffffffff): raise ValueError('mask is out of range')",
"self._op = op self._name = 'atomic' + op self._dtypes =",
"= Data.init(value, env) if op == 'CAS': assert value2 is",
"'numpy': ctype = _cuda_types.Scalar(int) elif env.mode == 'cuda': ctype =",
"def call(self, env, mask, var, val_id, *, width=None): name =",
"start = Data.init(start, env) step = Data.init(step, env) if start.ctype.dtype.kind",
"= AtomicOp( 'Min', ('int32', 'uint32', 'uint64')) atomic_max = AtomicOp( 'Max',",
"are not supported') return reduce(lambda a, b: _compile._call_ufunc( cupy.maximum, (a,",
"must be of array type.') target = _compile._indexing(array, index, env)",
"Constant): if width.obj not in (2, 4, 8, 16, 32):",
"TypeError( f'min() expects at least 2 arguments, got {len(args)}') if",
"assert value2 is not None # On HIP, 'H' is",
"value2 = _compile._astype_scalar(value2, ctype, 'same_kind', env) value2 = Data.init(value2, env)",
"hip/hcc_detail/device_functions.h preamble += \"\"\" return __lane_id(); } \"\"\" return preamble",
"= SyncWarp() shared_memory = SharedMemory() grid = GridFunc('grid') gridsize =",
"_compile._astype_scalar(val_id, val_id_t, 'same_kind', env) val_id = Data.init(val_id, env) if width:",
"with extern specifier. alignment (int or None): Enforce the alignment",
"Data(name, _cuda_types.Ptr(child_type)) class AtomicOp(BuiltinFunc): def __init__(self, op, dtypes): self._op =",
"', '.join(self._code.format(n=n) for n in dims) ctype = _cuda_types.Tuple([_cuda_types.uint32]*ndim) return",
"over. index: A valid index such that the address to",
"RangeFunc(BuiltinFunc): def __call__(self, *args, unroll=None): \"\"\"Range with loop unrolling support.",
"else: raise ValueError('Only ndim=1,2,3 are supported') elts_code = ', '.join(self._code.format(n=n)",
"represent the value to swap to. .. seealso:: `Numba's corresponding",
"import _compile from functools import reduce class RangeFunc(BuiltinFunc): def __call__(self,",
"value = _compile._astype_scalar(value, ctype, 'same_kind', env) value = Data.init(value, env)",
"2') else: width = Constant(64) if runtime.is_hip else Constant(32) width",
"ctype) class WarpShuffleOp(BuiltinFunc): def __init__(self, op, dtypes): self._op = op",
"ret; } \"\"\" else: # defined in hip/hcc_detail/device_functions.h preamble +=",
"Please refer to `Warp Shuffle Functions`_ for detailed explanation. ..",
"is not supported and we will never reach here if",
"LenFunc(), min: MinFunc(), max: MaxFunc(), } range_ = RangeFunc() syncthreads",
"of unsized array') return Data(f'static_cast<long long>({arg.code}.shape()[0])', _cuda_types.Scalar('q')) class MinFunc(BuiltinFunc): def",
"unroll. - If `None` (default), leave the control of loop",
"in ``[0, jit.warpsize)``. .. note:: Unlike :obj:`numba.cuda.laneid`, this is a",
"else: code = '__syncwarp()' return Data(code, _cuda_types.void) class SharedMemory(BuiltinFunc): def",
"env) mask = Data.init(mask, env) code = f'__syncwarp({mask.code})' else: code",
"'uint64')) atomic_xor = AtomicOp( 'Xor', ('int32', 'uint32', 'uint64')) # warp-shuffle",
"supported and we will never reach here if (op ==",
"env) code = f'{name}({hex(mask)}, {var.code}, {val_id.code}' code += f', {width.code})'",
"before the loop. - If `False`, add ``#pragma unroll(1)`` directive",
"step_is_positive, unroll=unroll) class LenFunc(BuiltinFunc): def call(self, env, *args, **kwds): if",
"= 'Compute the thread index in the grid.' self._eq =",
"f'__syncwarp({mask.code})' else: code = '__syncwarp()' return Data(code, _cuda_types.void) class SharedMemory(BuiltinFunc):",
"env) start = Data.init(start, env) step = Data.init(step, env) if",
"2: raise TypeError( f'min() expects at least 2 arguments, got",
"env) if op == 'CAS': assert value2 is not None",
"of range') mask = _compile._astype_scalar( mask, _cuda_types.int32, 'same_kind', env) mask",
"an `int`, add ``#pragma unroll(n)`` directive before the loop, where",
"Data.init(var, env) ctype = var.ctype if ctype.dtype.name not in self._dtypes:",
"builtin_functions_dict = { range: RangeFunc(), len: LenFunc(), min: MinFunc(), max:",
"**kwds): if len(args) != 1: raise TypeError(f'len() expects only 1",
"env, *args, unroll=None): if len(args) == 0: raise TypeError('range expected",
"env.get_fresh_variable_name(prefix='_smem') # retry var = Data(name, _cuda_types.SharedMem(child_type, size, alignment)) env.decls[name]",
"not isinstance(arg.ctype, _cuda_types.CArray): raise TypeError('len() supports only array type') if",
"with loop unrolling support. Args: start (int): Same as that",
"if isinstance(step, Constant): step_is_positive = step.obj >= 0 elif step.ctype.dtype.kind",
"== 'numpy': ctype = _cuda_types.Scalar(int) elif env.mode == 'cuda': ctype",
"self._name = '__shfl_' + (op + '_' if op else",
"\"\"\" unsigned int ret; asm (\"mov.u32 %0, %%laneid;\" : \"=r\"(ret)",
"def __call__(self): \"\"\"Returns the lane ID of the calling thread,",
"width = _compile._astype_scalar( width, _cuda_types.int32, 'same_kind', env) width = Data.init(width,",
"HIP, 'e' is not supported and we will never reach",
"not None # On HIP, 'H' is not supported and",
"where the integer ``n`` means the number of iterations to",
"not supported before ' 'CUDA 10.1') if int(device.get_compute_capability()) < 70:",
"= self._op array = Data.init(array, env) if not isinstance(array.ctype, (_cuda_types.CArray,",
"LenFunc(BuiltinFunc): def call(self, env, *args, **kwds): if len(args) != 1:",
"if not isinstance(ndim, int): raise TypeError('ndim must be an integer')",
"if ndim == 1: return Data(self._code.format(n='x'), _cuda_types.uint32) elif ndim ==",
"Data(f'thrust::make_tuple({elts_code})', ctype) class WarpShuffleOp(BuiltinFunc): def __init__(self, op, dtypes): self._op =",
"and runtime.runtimeGetVersion() < 10000): raise RuntimeError( 'float16 atomic operation is",
"mask=0xffffffff): \"\"\"Calls ``__syncwarp()``. Args: mask (int): Active threads in a",
"(\"mov.u32 %0, %%laneid;\" : \"=r\"(ret) ); return ret; } \"\"\"",
"= GridFunc('grid') gridsize = GridFunc('gridsize') laneid = LaneID() # atomic",
"'And', ('int32', 'uint32', 'uint64')) atomic_or = AtomicOp( 'Or', ('int32', 'uint32',",
"self._name op = self._op array = Data.init(array, env) if not",
"= Data(name, _cuda_types.SharedMem(child_type, size, alignment)) env.decls[name] = var env.locals[name] =",
"_cuda_types.Ptr)): raise TypeError('The first argument must be of array type.')",
"= 'atomic' + op self._dtypes = dtypes doc = f\"\"\"Calls",
"that the address to the corresponding array element ``array[index]`` can",
"a tuple if ndim == 1: return Data(self._code.format(n='x'), _cuda_types.uint32) elif",
"ValueError('width needs to be power of 2') else: width =",
"A valid index such that the address to the corresponding",
"to use for the specified operation. For the case of",
"static shared memory. If ``None``, declares the shared memory with",
"1-D array. Args: dtype (dtype): The dtype of the returned",
"* blockDim.{n}' elif mode == 'gridsize': self._desc = 'Compute the",
"directive before the loop. - If `False`, add ``#pragma unroll(1)``",
"https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions .. _Numba's corresponding atomic functions: https://numba.readthedocs.io/en/stable/cuda-reference/kernel.html#synchronization-and-atomic-operations \"\"\" self.__doc__ =",
"integer type.') if step.ctype.dtype.kind not in 'iu': raise TypeError('range supports",
"the ``{self._name}`` function. Please refer to `Warp Shuffle Functions`_ for",
"and we will never reach here if ctype.dtype.char == 'H':",
"'__syncwarp()' return Data(code, _cuda_types.void) class SharedMemory(BuiltinFunc): def __call__(self, dtype, size,",
"atomic_max = AtomicOp( 'Max', ('int32', 'uint32', 'uint64')) atomic_inc = AtomicOp(",
"super().__call__() def call(self, env, *args, unroll=None): if len(args) == 0:",
"var, val_id, *, width=None): name = self._name var = Data.init(var,",
"'loop unrolling requires constant start, stop, step and ' 'unroll",
"__forceinline__ unsigned int LaneId() {' if not runtime.is_hip: # see",
"atomic_add = AtomicOp( 'Add', ('int32', 'uint32', 'uint64', 'float32', 'float64') +",
"max: MaxFunc(), } range_ = RangeFunc() syncthreads = SyncThreads() syncwarp",
"the other two integers the ``y`` and ``z`` attributes are",
"GridFunc(BuiltinFunc): def __init__(self, mode): if mode == 'grid': self._desc =",
"arguments are not supported') return reduce(lambda a, b: _compile._call_ufunc( cupy.minimum,",
"def call_const(self, env): return Data('__syncthreads()', _cuda_types.void) class SyncWarp(BuiltinFunc): def __call__(self,",
"index over. index: A valid index such that the address",
"the grid.' self._eq = 'jit.threadIdx.x + jit.blockIdx.x * jit.blockDim.x' self._link",
"array: A :class:`cupy.ndarray` to index over. index: A valid index",
"== 2: dims = ('x', 'y') elif ndim == 3:",
"got {len(args)}') if kwds: raise TypeError('keyword arguments are not supported')",
"import device from cupyx.jit import _cuda_types from cupyx.jit._internal_types import BuiltinFunc",
"'jit.blockDim.x * jit.gridDim.x' self._link = 'numba.cuda.gridsize' self._code = 'blockDim.{n} *",
"to be of type int, ' f'got {type(unroll).__name__}') if unroll",
"got {len(args)}') if unroll is not None: if not all(isinstance(x,",
"grid = GridFunc('grid') gridsize = GridFunc('gridsize') laneid = LaneID() #",
"input.') try: mask = mask.obj except Exception: raise TypeError('mask must",
"True else: step_is_positive = None stop = Data.init(stop, env) start",
"integer is returned, otherwise a tuple. .. note:: This function",
"env, *, mask=None): if runtime.is_hip: if mask is not None:",
"call_const(self, env): env.generated.add_code(self._get_preamble()) return Data('LaneId()', _cuda_types.uint32) builtin_functions_dict = { range:",
"raise RuntimeError( 'uint16 atomic operation is not supported before '",
"= 'blockDim.{n} * gridDim.{n}' else: raise ValueError('unsupported function') doc =",
"mask <= 0xffffffff): raise ValueError('mask is out of range') #",
"ctype.dtype.char == 'e' and runtime.runtimeGetVersion() < 10000): raise RuntimeError( 'float16",
"cupyx.jit import _compile from functools import reduce class RangeFunc(BuiltinFunc): def",
"_Warp Shuffle Functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#warp-shuffle-functions \"\"\" self.__doc__ = doc def __call__(self,",
"%0, %%laneid;\" : \"=r\"(ret) ); return ret; } \"\"\" else:",
"call(self, env, *args, **kwds): if len(args) < 2: raise TypeError(",
"atomic operation is not supported before ' 'sm_70') value2 =",
"'cuda': ctype = stop.ctype else: assert False return Range(start, stop,",
"preamble def call_const(self, env): env.generated.add_code(self._get_preamble()) return Data('LaneId()', _cuda_types.uint32) builtin_functions_dict =",
"{len(args)}') if kwds: raise TypeError('keyword arguments are not supported') arg",
"('up', 'down'): val_id_t = _cuda_types.uint32 else: val_id_t = _cuda_types.int32 val_id",
"dtype, size, alignment=None): name = env.get_fresh_variable_name(prefix='_smem') child_type = _cuda_types.Scalar(dtype) while",
"step.ctype.dtype.kind == 'u': step_is_positive = True else: step_is_positive = None",
"= Constant(0), args[0], Constant(1) elif len(args) == 2: start, stop,",
"'H': if runtime.runtimeGetVersion() < 10010: raise RuntimeError( 'uint16 atomic operation",
"the alignment via __align__(N). \"\"\" super().__call__() def call_const(self, env, dtype,",
"__call__(self, ndim): super().__call__() def call_const(self, env, ndim): if not isinstance(ndim,",
"return Data(self._code.format(n='x'), _cuda_types.uint32) elif ndim == 2: dims = ('x',",
"'uint32', 'uint64')) atomic_inc = AtomicOp( 'Inc', ('uint32',)) atomic_dec = AtomicOp(",
"import _cuda_types from cupyx.jit._internal_types import BuiltinFunc from cupyx.jit._internal_types import Data",
"1D we return a single variable, # otherwise a tuple",
"!= 1: raise TypeError(f'len() expects only 1 argument, got {len(args)}')",
":obj:`range`. unroll (int or bool or None): - If `True`,",
"# defined in hip/hcc_detail/device_functions.h preamble += \"\"\" return __lane_id(); }",
"or greater than INT_MAX') if isinstance(step, Constant): step_is_positive = step.obj",
"val_id, *, width=32): super().__call__() def call(self, env, mask, var, val_id,",
"isinstance(unroll, bool)): raise TypeError( 'unroll value expected to be of",
"atomic_and = AtomicOp( 'And', ('int32', 'uint32', 'uint64')) atomic_or = AtomicOp(",
"function instead of a property. \"\"\" super().__call__() def _get_preamble(self): preamble",
"reduce class RangeFunc(BuiltinFunc): def __call__(self, *args, unroll=None): \"\"\"Range with loop",
"(dtype): The dtype of the returned array. size (int or",
"ctype, 'same_kind', env) value2 = Data.init(value2, env) code = f'{name}(&{target.code},",
"array, index, value, value2=None): name = self._name op = self._op",
"if ctype.dtype.name not in self._dtypes: raise TypeError(f'`{name}` does not support",
"<= mask <= 0xffffffff): raise ValueError('mask is out of range')",
"specifier. alignment (int or None): Enforce the alignment via __align__(N).",
"Range(start, stop, step, ctype, step_is_positive, unroll=unroll) class LenFunc(BuiltinFunc): def call(self,",
"unroll value is ' 'non-positive or greater than INT_MAX') if",
"(op + '_' if op else '') + 'sync' self._dtypes",
"shfl, and # \"laneMask\" for shfl_xor if self._op in ('up',",
"args[1], Constant(1) elif len(args) == 3: start, stop, step =",
"is allowed. Returns: int or tuple: If ``ndim`` is 1,",
"be an integer') # Numba convention: for 1D we return",
"of built-in :obj:`range`. step (int): Same as that of built-in",
"if runtime.is_hip else ('float16',))) atomic_sub = AtomicOp( 'Sub', ('int32', 'uint32'))",
"mode): if mode == 'grid': self._desc = 'Compute the thread",
"'') + 'sync' self._dtypes = dtypes doc = f\"\"\"Calls the",
"runtime.runtimeGetVersion() < 10000): raise RuntimeError( 'float16 atomic operation is not",
"= ('x', 'y', 'z') else: raise ValueError('Only ndim=1,2,3 are supported')",
"shared memory and returns it as a 1-D array. Args:",
"function follows the convention of Numba's :func:`{self._link}`. \"\"\" self.__doc__ =",
"in dims) ctype = _cuda_types.Tuple([_cuda_types.uint32]*ndim) return Data(f'thrust::make_tuple({elts_code})', ctype) class WarpShuffleOp(BuiltinFunc):",
"https://numba.readthedocs.io/en/stable/cuda-reference/kernel.html#synchronization-and-atomic-operations \"\"\" self.__doc__ = doc def __call__(self, array, index, value,",
"_cuda_types.Scalar('q')) class MinFunc(BuiltinFunc): def call(self, env, *args, **kwds): if len(args)",
"size, alignment=None): \"\"\"Allocates shared memory and returns it as a",
"ctype) class GridFunc(BuiltinFunc): def __init__(self, mode): if mode == 'grid':",
"'.join(self._code.format(n=n) for n in dims) ctype = _cuda_types.Tuple([_cuda_types.uint32]*ndim) return Data(f'thrust::make_tuple({elts_code})',",
"Constant): step_is_positive = step.obj >= 0 elif step.ctype.dtype.kind == 'u':",
"{mask} is ignored on HIP', RuntimeWarning) elif not (0x0 <=",
"``array[index]`` can be computed. value: Represent the value to use",
"'int64', 'float32', 'float64') + (() if runtime.is_hip else ('uint64', 'float16')))",
"MinFunc(BuiltinFunc): def call(self, env, *args, **kwds): if len(args) < 2:",
"= '__device__ __forceinline__ unsigned int LaneId() {' if not runtime.is_hip:",
"= RangeFunc() syncthreads = SyncThreads() syncwarp = SyncWarp() shared_memory =",
"import Range from cupyx.jit import _compile from functools import reduce",
"unroll.obj if not (isinstance(unroll, int) or isinstance(unroll, bool)): raise TypeError(",
"array. size (int or None): If ``int`` type, the size",
"is not None: name = env.get_fresh_variable_name(prefix='_smem') # retry var =",
"MaxFunc(), } range_ = RangeFunc() syncthreads = SyncThreads() syncwarp =",
"not None: name = env.get_fresh_variable_name(prefix='_smem') # retry var = Data(name,",
"'uint64')) atomic_inc = AtomicOp( 'Inc', ('uint32',)) atomic_dec = AtomicOp( 'Dec',",
"def __init__(self, mode): if mode == 'grid': self._desc = 'Compute",
"_Synchronization functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions \"\"\" super().__call__() def call_const(self, env): return Data('__syncthreads()',",
"__call__(self): \"\"\"Calls ``__syncthreads()``. .. seealso:: `Synchronization functions`_ .. _Synchronization functions:",
"doc def __call__(self, array, index, value, alt_value=None): super().__call__() def call(self,",
"self._dtypes: raise TypeError(f'`{name}` does not support {ctype.dtype} input.') # On",
"the grid size.' self._eq = 'jit.blockDim.x * jit.gridDim.x' self._link =",
"class WarpShuffleOp(BuiltinFunc): def __init__(self, op, dtypes): self._op = op self._name",
"Data.init(step, env) if start.ctype.dtype.kind not in 'iu': raise TypeError('range supports",
"at least 2 arguments, got {len(args)}') if kwds: raise TypeError('keyword",
"Data.init(val_id, env) if width: if isinstance(width, Constant): if width.obj not",
":obj:`numba.cuda.laneid`, this is a callable function instead of a property.",
"unroll is not None: if not all(isinstance(x, Constant) for x",
"if op == 'CAS': assert value2 is not None #",
"('int32', 'uint32', 'uint64', 'float32')) atomic_min = AtomicOp( 'Min', ('int32', 'uint32',",
"input.') # On HIP, 'e' is not supported and we",
"target = _compile._indexing(array, index, env) ctype = target.ctype if ctype.dtype.name",
"value, alt_value=None): super().__call__() def call(self, env, array, index, value, value2=None):",
"\"\"\" return preamble def call_const(self, env): env.generated.add_code(self._get_preamble()) return Data('LaneId()', _cuda_types.uint32)",
"an integer is returned, otherwise a tuple. .. note:: This",
"integer type.') if env.mode == 'numpy': ctype = _cuda_types.Scalar(int) elif",
"= Data.init(step, env) if start.ctype.dtype.kind not in 'iu': raise TypeError('range",
"On HIP, 'e' is not supported and we will never",
"functions`_ .. _Synchronization functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions \"\"\" super().__call__() def call_const(self, env):",
"env.decls[name] = var env.locals[name] = var return Data(name, _cuda_types.Ptr(child_type)) class",
"SyncWarp(BuiltinFunc): def __call__(self, *, mask=0xffffffff): \"\"\"Calls ``__syncwarp()``. Args: mask (int):",
"'blockDim.{n} * gridDim.{n}' else: raise ValueError('unsupported function') doc = f\"\"\"",
"cupy.minimum, (a, b), None, env), args) class MaxFunc(BuiltinFunc): def call(self,",
"'Exch', ('int32', 'uint32', 'uint64', 'float32')) atomic_min = AtomicOp( 'Min', ('int32',",
"as that of built-in :obj:`range`. step (int): Same as that",
"= op self._name = '__shfl_' + (op + '_' if",
"warnings.warn(f'mask {mask} is ignored on HIP', RuntimeWarning) elif not (0x0",
"self._op = op self._name = '__shfl_' + (op + '_'",
"of loop unrolling to the compiler (no ``#pragma``). .. seealso::",
"== 1: return Data(self._code.format(n='x'), _cuda_types.uint32) elif ndim == 2: dims",
"GridFunc('grid') gridsize = GridFunc('gridsize') laneid = LaneID() # atomic functions",
"warp-shuffle functions _shfl_dtypes = ( ('int32', 'uint32', 'int64', 'float32', 'float64')",
"import BuiltinFunc from cupyx.jit._internal_types import Data from cupyx.jit._internal_types import Constant",
"call_const(self, env, ndim): if not isinstance(ndim, int): raise TypeError('ndim must",
"Functions`_ for detailed explanation. .. _Warp Shuffle Functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#warp-shuffle-functions \"\"\"",
"requires constant start, stop, step and ' 'unroll value') unroll",
"loop to disable unrolling. - If an `int`, add ``#pragma",
"code = f'{name}({hex(mask)}, {var.code}, {val_id.code}' code += f', {width.code})' return",
"operation. For the case of :obj:`atomic_cas`, this is the value",
"WarpShuffleOp(BuiltinFunc): def __init__(self, op, dtypes): self._op = op self._name =",
"only for integer type.') if stop.ctype.dtype.kind not in 'iu': raise",
"out of range') mask = _compile._astype_scalar( mask, _cuda_types.int32, 'same_kind', env)",
"_#pragma unroll: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#pragma-unroll \"\"\" super().__call__() def call(self, env, *args, unroll=None):",
"= ('x', 'y') elif ndim == 3: dims = ('x',",
"blockIdx.{n} * blockDim.{n}' elif mode == 'gridsize': self._desc = 'Compute",
"return Data(name, _cuda_types.Ptr(child_type)) class AtomicOp(BuiltinFunc): def __init__(self, op, dtypes): self._op",
"returned array. size (int or None): If ``int`` type, the",
"raise ValueError('unsupported function') doc = f\"\"\" {self._desc} Computation of the",
"functions atomic_add = AtomicOp( 'Add', ('int32', 'uint32', 'uint64', 'float32', 'float64')",
"= 1 if not (unroll is True or 0 <",
"None, env), args) class SyncThreads(BuiltinFunc): def __call__(self): \"\"\"Calls ``__syncthreads()``. ..",
"`#pragma unroll`_ .. _#pragma unroll: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#pragma-unroll \"\"\" super().__call__() def call(self,",
"except Exception: raise TypeError('mask must be an integer') if runtime.is_hip:",
"stop, step, unroll)): raise TypeError( 'loop unrolling requires constant start,",
"{value.code})' return Data(code, ctype) class GridFunc(BuiltinFunc): def __init__(self, mode): if",
"_cuda_types.uint32) builtin_functions_dict = { range: RangeFunc(), len: LenFunc(), min: MinFunc(),",
"('uint32',)) atomic_dec = AtomicOp( 'Dec', ('uint32',)) atomic_cas = AtomicOp( 'CAS',",
"the shared memory with extern specifier. alignment (int or None):",
"warnings.warn(f'mask {mask} is ignored on HIP', RuntimeWarning) mask = None",
"down}, \"srcLane\" for shfl, and # \"laneMask\" for shfl_xor if",
"return Data('LaneId()', _cuda_types.uint32) builtin_functions_dict = { range: RangeFunc(), len: LenFunc(),",
"unroll`_ .. _#pragma unroll: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#pragma-unroll \"\"\" super().__call__() def call(self, env,",
"declares the shared memory with extern specifier. alignment (int or",
"to the compiler (no ``#pragma``). .. seealso:: `#pragma unroll`_ ..",
"'iu': raise TypeError('range supports only for integer type.') if stop.ctype.dtype.kind",
"arg.ctype.ndim: raise TypeError('len() of unsized array') return Data(f'static_cast<long long>({arg.code}.shape()[0])', _cuda_types.Scalar('q'))",
"`True`, add ``#pragma unroll`` directive before the loop. - If",
"atomic operation is not supported before ' 'CUDA 10.1') if",
"+ (op + '_' if op else '') + 'sync'",
"as follows:: {self._eq} and for the other two integers the",
"range') mask = _compile._astype_scalar( mask, _cuda_types.int32, 'same_kind', env) mask =",
"import reduce class RangeFunc(BuiltinFunc): def __call__(self, *args, unroll=None): \"\"\"Range with",
"``y`` and ``z`` attributes are used. Args: ndim (int): The",
"ValueError('mask is out of range') # val_id refers to \"delta\"",
"not (0x0 <= mask.obj <= 0xffffffff): raise ValueError('mask is out",
"'Dec', ('uint32',)) atomic_cas = AtomicOp( 'CAS', ('int32', 'uint32', 'uint64') +",
":obj:`range`. stop (int): Same as that of built-in :obj:`range`. step",
"= None stop = Data.init(stop, env) start = Data.init(start, env)",
"else Constant(32) width = _compile._astype_scalar( width, _cuda_types.int32, 'same_kind', env) width",
"This function follows the convention of Numba's :func:`{self._link}`. \"\"\" self.__doc__",
"isinstance(arg.ctype, _cuda_types.CArray): raise TypeError('len() supports only array type') if not",
"mask=None): if runtime.is_hip: if mask is not None: warnings.warn(f'mask {mask}",
"('int32', 'uint32', 'uint64')) atomic_inc = AtomicOp( 'Inc', ('uint32',)) atomic_dec =",
"if runtime.runtimeGetVersion() < 10010: raise RuntimeError( 'uint16 atomic operation is",
"not supported before CUDA 10.0.') value = _compile._astype_scalar(value, ctype, 'same_kind',",
"from cupyx.jit._internal_types import BuiltinFunc from cupyx.jit._internal_types import Data from cupyx.jit._internal_types",
"start, stop, step = Constant(0), args[0], Constant(1) elif len(args) ==",
"unsized array') return Data(f'static_cast<long long>({arg.code}.shape()[0])', _cuda_types.Scalar('q')) class MinFunc(BuiltinFunc): def call(self,",
"out of range') # val_id refers to \"delta\" for shfl_{up,",
"array = Data.init(array, env) if not isinstance(array.ctype, (_cuda_types.CArray, _cuda_types.Ptr)): raise",
"does not support {ctype.dtype} input.') try: mask = mask.obj except",
"refers to \"delta\" for shfl_{up, down}, \"srcLane\" for shfl, and",
"of :obj:`atomic_cas`, this is the value for ``array[index]`` to compare",
"if stop.ctype.dtype.kind not in 'iu': raise TypeError('range supports only for",
"of built-in :obj:`range`. unroll (int or bool or None): -",
"def call(self, env, *args, **kwds): if len(args) != 1: raise",
"**kwds): if len(args) < 2: raise TypeError( f'max() expects at",
"Represent the value to use for the specified operation. For",
"_cuda_types.uint32) elif ndim == 2: dims = ('x', 'y') elif",
"step (int): Same as that of built-in :obj:`range`. unroll (int",
"used in :obj:`atomic_cas` to represent the value to swap to.",
"< 1 << 31): warnings.warn( 'loop unrolling is ignored as",
"Constant(1) elif len(args) == 3: start, stop, step = args",
"integer') if runtime.is_hip: warnings.warn(f'mask {mask} is ignored on HIP', RuntimeWarning)",
"'non-positive or greater than INT_MAX') if isinstance(step, Constant): step_is_positive =",
"{len(args)}') if unroll is not None: if not all(isinstance(x, Constant)",
"== 'cuda': ctype = stop.ctype else: assert False return Range(start,",
"= SyncThreads() syncwarp = SyncWarp() shared_memory = SharedMemory() grid =",
"not supported') return reduce(lambda a, b: _compile._call_ufunc( cupy.maximum, (a, b),",
"_shfl_dtypes) shfl_up_sync = WarpShuffleOp('up', _shfl_dtypes) shfl_down_sync = WarpShuffleOp('down', _shfl_dtypes) shfl_xor_sync",
"RuntimeError( 'uint16 atomic operation is not supported before ' 'sm_70')",
"not support {ctype.dtype} input.') # On HIP, 'e' is not",
"def call(self, env, array, index, value, value2=None): name = self._name",
"TypeError('range supports only for integer type.') if env.mode == 'numpy':",
"property. \"\"\" super().__call__() def _get_preamble(self): preamble = '__device__ __forceinline__ unsigned",
"memory with extern specifier. alignment (int or None): Enforce the",
"'float32', 'float64') + (() if runtime.is_hip else ('float16',))) atomic_sub =",
"_cuda_types.int32 val_id = _compile._astype_scalar(val_id, val_id_t, 'same_kind', env) val_id = Data.init(val_id,",
"'e' and runtime.runtimeGetVersion() < 10000): raise RuntimeError( 'float16 atomic operation",
"``#pragma``). .. seealso:: `#pragma unroll`_ .. _#pragma unroll: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#pragma-unroll \"\"\"",
"ranging in ``[0, jit.warpsize)``. .. note:: Unlike :obj:`numba.cuda.laneid`, this is",
"loop. - If `False`, add ``#pragma unroll(1)`` directive before the",
"else: val_id_t = _cuda_types.int32 val_id = _compile._astype_scalar(val_id, val_id_t, 'same_kind', env)",
"and ' 'unroll value') unroll = unroll.obj if not (isinstance(unroll,",
"support {ctype.dtype} input.') try: mask = mask.obj except Exception: raise",
"valid index such that the address to the corresponding array",
"val_id = Data.init(val_id, env) if width: if isinstance(width, Constant): if",
"on HIP', RuntimeWarning) mask = None if mask: if isinstance(mask,",
"import cupy from cupy_backends.cuda.api import runtime from cupy.cuda import device",
"__call__(self, array, index, value, alt_value=None): super().__call__() def call(self, env, array,",
"raise RuntimeError( 'float16 atomic operation is not supported before CUDA",
"size (int or None): If ``int`` type, the size of",
"HIP', RuntimeWarning) mask = None if mask: if isinstance(mask, Constant):",
"**kwds): if len(args) < 2: raise TypeError( f'min() expects at",
"will never reach here if ctype.dtype.char == 'H': if runtime.runtimeGetVersion()",
"index, value, alt_value=None): super().__call__() def call(self, env, array, index, value,",
"16, 32): raise ValueError('width needs to be power of 2')",
"ctype.dtype.name not in self._dtypes: raise TypeError(f'`{name}` does not support {ctype.dtype}",
"value: Represent the value to use for the specified operation.",
"note:: This function follows the convention of Numba's :func:`{self._link}`. \"\"\"",
"to disable unrolling. - If an `int`, add ``#pragma unroll(n)``",
"== 'e' and runtime.runtimeGetVersion() < 10000): raise RuntimeError( 'float16 atomic",
"self._op in ('up', 'down'): val_id_t = _cuda_types.uint32 else: val_id_t =",
"will never reach here if (op == 'Add' and ctype.dtype.char",
"gridsize = GridFunc('gridsize') laneid = LaneID() # atomic functions atomic_add",
"import warnings import cupy from cupy_backends.cuda.api import runtime from cupy.cuda",
"add ``#pragma unroll`` directive before the loop. - If `False`,",
"if not (0x0 <= mask.obj <= 0xffffffff): raise ValueError('mask is",
"int, ' f'got {type(unroll).__name__}') if unroll is False: unroll =",
"Args: mask (int): Active threads in a warp. Default is",
"_cuda_types.int32, 'same_kind', env) mask = Data.init(mask, env) code = f'__syncwarp({mask.code})'",
"AtomicOp(BuiltinFunc): def __init__(self, op, dtypes): self._op = op self._name =",
"raise ValueError('mask is out of range') mask = _compile._astype_scalar( mask,",
"2, or 3 is allowed. Returns: int or tuple: If",
"int) or isinstance(unroll, bool)): raise TypeError( 'unroll value expected to",
"op == 'CAS': assert value2 is not None # On",
"} \"\"\" return preamble def call_const(self, env): env.generated.add_code(self._get_preamble()) return Data('LaneId()',",
"is not None: warnings.warn(f'mask {mask} is ignored on HIP', RuntimeWarning)",
"call(self, env, array, index, value, value2=None): name = self._name op",
"val_id_t = _cuda_types.uint32 else: val_id_t = _cuda_types.int32 val_id = _compile._astype_scalar(val_id,",
"= args[0] if not isinstance(arg.ctype, _cuda_types.CArray): raise TypeError('len() supports only",
"dims) ctype = _cuda_types.Tuple([_cuda_types.uint32]*ndim) return Data(f'thrust::make_tuple({elts_code})', ctype) class WarpShuffleOp(BuiltinFunc): def",
"= step.obj >= 0 elif step.ctype.dtype.kind == 'u': step_is_positive =",
"'u': step_is_positive = True else: step_is_positive = None stop =",
"Functions`_ for detailed explanation. Args: array: A :class:`cupy.ndarray` to index",
"len(args) == 0: raise TypeError('range expected at least 1 argument,",
"= target.ctype if ctype.dtype.name not in self._dtypes: raise TypeError(f'`{name}` does",
"ValueError('unsupported function') doc = f\"\"\" {self._desc} Computation of the first",
"_cuda_types.CArray): raise TypeError('len() supports only array type') if not arg.ctype.ndim:",
"def __init__(self, op, dtypes): self._op = op self._name = '__shfl_'",
".. _Synchronization functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions \"\"\" super().__call__() def call(self, env, *,",
"\"\"\" else: # defined in hip/hcc_detail/device_functions.h preamble += \"\"\" return",
"if len(args) < 2: raise TypeError( f'min() expects at least",
"alignment)) env.decls[name] = var env.locals[name] = var return Data(name, _cuda_types.Ptr(child_type))",
"we return a single variable, # otherwise a tuple if",
"= { range: RangeFunc(), len: LenFunc(), min: MinFunc(), max: MaxFunc(),",
"The dimension of the grid. Only 1, 2, or 3",
"and ``z`` attributes are used. Args: ndim (int): The dimension",
"lane ID of the calling thread, ranging in ``[0, jit.warpsize)``.",
"unrolling. - If an `int`, add ``#pragma unroll(n)`` directive before",
"if runtime.is_hip: warnings.warn(f'mask {mask} is ignored on HIP', RuntimeWarning) elif",
"unrolling to the compiler (no ``#pragma``). .. seealso:: `#pragma unroll`_",
"for integer type.') if stop.ctype.dtype.kind not in 'iu': raise TypeError('range",
"elif len(args) == 3: start, stop, step = args else:",
"support. Args: start (int): Same as that of built-in :obj:`range`.",
"if not isinstance(arg.ctype, _cuda_types.CArray): raise TypeError('len() supports only array type')",
"are used. Args: ndim (int): The dimension of the grid.",
"TypeError('len() supports only array type') if not arg.ctype.ndim: raise TypeError('len()",
"be computed. value: Represent the value to use for the",
"return a single variable, # otherwise a tuple if ndim",
"value') unroll = unroll.obj if not (isinstance(unroll, int) or isinstance(unroll,",
"'same_kind', env) mask = Data.init(mask, env) code = f'__syncwarp({mask.code})' else:",
"refer to `Warp Shuffle Functions`_ for detailed explanation. .. _Warp",
"def __call__(self, *args, unroll=None): \"\"\"Range with loop unrolling support. Args:",
"\"delta\" for shfl_{up, down}, \"srcLane\" for shfl, and # \"laneMask\"",
"_cuda_types.int32, 'same_kind', env) width = Data.init(width, env) code = f'{name}({hex(mask)},",
"target.ctype if ctype.dtype.name not in self._dtypes: raise TypeError(f'`{name}` does not",
"expects at least 2 arguments, got {len(args)}') if kwds: raise",
"gridDim.{n}' else: raise ValueError('unsupported function') doc = f\"\"\" {self._desc} Computation",
"None stop = Data.init(stop, env) start = Data.init(start, env) step",
"dtypes doc = f\"\"\"Calls the ``{self._name}`` function to operate atomically",
"seealso:: `#pragma unroll`_ .. _#pragma unroll: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#pragma-unroll \"\"\" super().__call__() def",
"Data('__syncthreads()', _cuda_types.void) class SyncWarp(BuiltinFunc): def __call__(self, *, mask=0xffffffff): \"\"\"Calls ``__syncwarp()``.",
"call(self, env, *, mask=None): if runtime.is_hip: if mask is not",
"val_id_t = _cuda_types.int32 val_id = _compile._astype_scalar(val_id, val_id_t, 'same_kind', env) val_id",
"10010: raise RuntimeError( 'uint16 atomic operation is not supported before",
"first integer is as follows:: {self._eq} and for the other",
"args[0] if not isinstance(arg.ctype, _cuda_types.CArray): raise TypeError('len() supports only array",
"stop, step and ' 'unroll value') unroll = unroll.obj if",
"env, mask, var, val_id, *, width=None): name = self._name var",
"self._link = 'numba.cuda.grid' self._code = 'threadIdx.{n} + blockIdx.{n} * blockDim.{n}'",
"to \"delta\" for shfl_{up, down}, \"srcLane\" for shfl, and #",
"op, dtypes): self._op = op self._name = '__shfl_' + (op",
"grid.' self._eq = 'jit.threadIdx.x + jit.blockIdx.x * jit.blockDim.x' self._link =",
"jit.gridDim.x' self._link = 'numba.cuda.gridsize' self._code = 'blockDim.{n} * gridDim.{n}' else:",
"else '') + 'sync' self._dtypes = dtypes doc = f\"\"\"Calls",
"= _compile._astype_scalar(val_id, val_id_t, 'same_kind', env) val_id = Data.init(val_id, env) if",
"BuiltinFunc from cupyx.jit._internal_types import Data from cupyx.jit._internal_types import Constant from",
"is 1, an integer is returned, otherwise a tuple. ..",
"else ('float16',))) atomic_sub = AtomicOp( 'Sub', ('int32', 'uint32')) atomic_exch =",
"syncthreads = SyncThreads() syncwarp = SyncWarp() shared_memory = SharedMemory() grid",
"env) if width: if isinstance(width, Constant): if width.obj not in",
"unsigned int LaneId() {' if not runtime.is_hip: # see https://github.com/NVIDIA/cub/blob/main/cub/util_ptx.cuh#L419",
"from cupyx.jit._internal_types import Constant from cupyx.jit._internal_types import Range from cupyx.jit",
"= ', '.join(self._code.format(n=n) for n in dims) ctype = _cuda_types.Tuple([_cuda_types.uint32]*ndim)",
"AtomicOp( 'Xor', ('int32', 'uint32', 'uint64')) # warp-shuffle functions _shfl_dtypes =",
"'float16 atomic operation is not supported before CUDA 10.0.') value",
"{value.code}, {value2.code})' else: assert value2 is None code = f'{name}(&{target.code},",
"ndim == 3: dims = ('x', 'y', 'z') else: raise",
"else ('uint16',))) atomic_and = AtomicOp( 'And', ('int32', 'uint32', 'uint64')) atomic_or",
"(a, b), None, env), args) class SyncThreads(BuiltinFunc): def __call__(self): \"\"\"Calls",
"be an integer') if runtime.is_hip: warnings.warn(f'mask {mask} is ignored on",
"cupyx.jit._internal_types import Constant from cupyx.jit._internal_types import Range from cupyx.jit import",
"assert False return Range(start, stop, step, ctype, step_is_positive, unroll=unroll) class",
"< 2: raise TypeError( f'min() expects at least 2 arguments,",
"else: width = Constant(64) if runtime.is_hip else Constant(32) width =",
"not isinstance(array.ctype, (_cuda_types.CArray, _cuda_types.Ptr)): raise TypeError('The first argument must be",
"< 2: raise TypeError( f'max() expects at least 2 arguments,",
"'uint64') + (() if runtime.is_hip else ('uint16',))) atomic_and = AtomicOp(",
"= AtomicOp( 'Add', ('int32', 'uint32', 'uint64', 'float32', 'float64') + (()",
"raise TypeError('The first argument must be of array type.') target",
"b: _compile._call_ufunc( cupy.minimum, (a, b), None, env), args) class MaxFunc(BuiltinFunc):",
"Data('LaneId()', _cuda_types.uint32) builtin_functions_dict = { range: RangeFunc(), len: LenFunc(), min:",
"step = Data.init(step, env) if start.ctype.dtype.kind not in 'iu': raise",
"if not (isinstance(unroll, int) or isinstance(unroll, bool)): raise TypeError( 'unroll",
"= _cuda_types.Scalar(int) elif env.mode == 'cuda': ctype = stop.ctype else:",
"leave the control of loop unrolling to the compiler (no",
"Data(name, _cuda_types.SharedMem(child_type, size, alignment)) env.decls[name] = var env.locals[name] = var",
"For the case of :obj:`atomic_cas`, this is the value for",
"self._dtypes: raise TypeError(f'`{name}` does not support {ctype.dtype} input.') try: mask",
"index, env) ctype = target.ctype if ctype.dtype.name not in self._dtypes:",
"raise TypeError('mask must be an integer') if runtime.is_hip: warnings.warn(f'mask {mask}",
"'Xor', ('int32', 'uint32', 'uint64')) # warp-shuffle functions _shfl_dtypes = (",
"explanation. Args: array: A :class:`cupy.ndarray` to index over. index: A",
"= Data.init(mask, env) code = f'__syncwarp({mask.code})' else: code = '__syncwarp()'",
"from cupyx.jit import _cuda_types from cupyx.jit._internal_types import BuiltinFunc from cupyx.jit._internal_types",
"\"laneMask\" for shfl_xor if self._op in ('up', 'down'): val_id_t =",
"is not supported before CUDA 10.0.') value = _compile._astype_scalar(value, ctype,",
"var = Data(name, _cuda_types.SharedMem(child_type, size, alignment)) env.decls[name] = var env.locals[name]",
"Same as that of built-in :obj:`range`. unroll (int or bool",
"+= f', {width.code})' return Data(code, ctype) class LaneID(BuiltinFunc): def __call__(self):",
"_cuda_types.uint32 else: val_id_t = _cuda_types.int32 val_id = _compile._astype_scalar(val_id, val_id_t, 'same_kind',",
"def call(self, env, *, mask=None): if runtime.is_hip: if mask is",
"('int32', 'uint32', 'uint64')) # warp-shuffle functions _shfl_dtypes = ( ('int32',",
"raise TypeError('keyword arguments are not supported') arg = args[0] if",
"type int, ' f'got {type(unroll).__name__}') if unroll is False: unroll",
"reduce(lambda a, b: _compile._call_ufunc( cupy.minimum, (a, b), None, env), args)",
"atomic functions: https://numba.readthedocs.io/en/stable/cuda-reference/kernel.html#synchronization-and-atomic-operations \"\"\" self.__doc__ = doc def __call__(self, array,",
"RuntimeWarning) elif not (0x0 <= mask <= 0xffffffff): raise ValueError('mask",
"'gridsize': self._desc = 'Compute the grid size.' self._eq = 'jit.blockDim.x",
"'_' if op else '') + 'sync' self._dtypes = dtypes",
"that of built-in :obj:`range`. unroll (int or bool or None):",
"range: RangeFunc(), len: LenFunc(), min: MinFunc(), max: MaxFunc(), } range_",
"start, stop, step = args[0], args[1], Constant(1) elif len(args) ==",
"AtomicOp( 'And', ('int32', 'uint32', 'uint64')) atomic_or = AtomicOp( 'Or', ('int32',",
"'Compute the thread index in the grid.' self._eq = 'jit.threadIdx.x",
"``n`` means the number of iterations to unroll. - If",
"ctype = _cuda_types.Tuple([_cuda_types.uint32]*ndim) return Data(f'thrust::make_tuple({elts_code})', ctype) class WarpShuffleOp(BuiltinFunc): def __init__(self,",
"1, 2, or 3 is allowed. Returns: int or tuple:",
"Shuffle Functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#warp-shuffle-functions \"\"\" self.__doc__ = doc def __call__(self, mask,",
"``array[index]``. Please refer to `Atomic Functions`_ for detailed explanation. Args:",
"== 1: start, stop, step = Constant(0), args[0], Constant(1) elif",
"step = Constant(0), args[0], Constant(1) elif len(args) == 2: start,",
"Data(code, _cuda_types.void) class SharedMemory(BuiltinFunc): def __call__(self, dtype, size, alignment=None): \"\"\"Allocates",
"(int or None): Enforce the alignment via __align__(N). \"\"\" super().__call__()",
"\"\"\"Range with loop unrolling support. Args: start (int): Same as",
"None): - If `True`, add ``#pragma unroll`` directive before the",
"runtime.is_hip else ('float16',))) atomic_sub = AtomicOp( 'Sub', ('int32', 'uint32')) atomic_exch",
"in (2, 4, 8, 16, 32): raise ValueError('width needs to",
">= 0 elif step.ctype.dtype.kind == 'u': step_is_positive = True else:",
"convention: for 1D we return a single variable, # otherwise",
"the number of iterations to unroll. - If `None` (default),",
"`int`, add ``#pragma unroll(n)`` directive before the loop, where the",
"dtypes): self._op = op self._name = 'atomic' + op self._dtypes",
"at least 1 argument, got 0') elif len(args) == 1:",
"'e' is not supported and we will never reach here",
"arguments, got {len(args)}') if kwds: raise TypeError('keyword arguments are not",
"'y') elif ndim == 3: dims = ('x', 'y', 'z')",
"if op else '') + 'sync' self._dtypes = dtypes doc",
"the size of static shared memory. If ``None``, declares the",
"def __call__(self, mask, var, val_id, *, width=32): super().__call__() def call(self,",
"operation is not supported before ' 'CUDA 10.1') if int(device.get_compute_capability())",
"<= 0xffffffff): raise ValueError('mask is out of range') # val_id",
"return reduce(lambda a, b: _compile._call_ufunc( cupy.minimum, (a, b), None, env),",
"elif mode == 'gridsize': self._desc = 'Compute the grid size.'",
"'Sub', ('int32', 'uint32')) atomic_exch = AtomicOp( 'Exch', ('int32', 'uint32', 'uint64',",
"= f'{name}(&{target.code}, {value.code})' return Data(code, ctype) class GridFunc(BuiltinFunc): def __init__(self,",
"``{self._name}`` function. Please refer to `Warp Shuffle Functions`_ for detailed",
"if width.obj not in (2, 4, 8, 16, 32): raise",
"' 'unroll value') unroll = unroll.obj if not (isinstance(unroll, int)",
"start, stop, step and ' 'unroll value') unroll = unroll.obj",
"* jit.blockDim.x' self._link = 'numba.cuda.grid' self._code = 'threadIdx.{n} + blockIdx.{n}",
"RuntimeError( 'uint16 atomic operation is not supported before ' 'CUDA",
"= _compile._astype_scalar(value, ctype, 'same_kind', env) value = Data.init(value, env) if",
"ndim == 2: dims = ('x', 'y') elif ndim ==",
"argument, got {len(args)}') if kwds: raise TypeError('keyword arguments are not",
"address to the corresponding array element ``array[index]`` can be computed.",
"Only 1, 2, or 3 is allowed. Returns: int or",
"LaneId() {' if not runtime.is_hip: # see https://github.com/NVIDIA/cub/blob/main/cub/util_ptx.cuh#L419 preamble +=",
"len(args) == 1: start, stop, step = Constant(0), args[0], Constant(1)",
"env), args) class SyncThreads(BuiltinFunc): def __call__(self): \"\"\"Calls ``__syncthreads()``. .. seealso::",
"<= 0xffffffff): raise ValueError('mask is out of range') mask =",
"unroll(1)`` directive before the loop to disable unrolling. - If",
"'jit.threadIdx.x + jit.blockIdx.x * jit.blockDim.x' self._link = 'numba.cuda.grid' self._code =",
"reach here if ctype.dtype.char == 'H': if runtime.runtimeGetVersion() < 10010:",
"cupyx.jit._internal_types import Range from cupyx.jit import _compile from functools import",
"(_cuda_types.CArray, _cuda_types.Ptr)): raise TypeError('The first argument must be of array",
"functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions \"\"\" super().__call__() def call(self, env, *, mask=None): if",
"self._dtypes = dtypes doc = f\"\"\"Calls the ``{self._name}`` function to",
"= AtomicOp( 'Or', ('int32', 'uint32', 'uint64')) atomic_xor = AtomicOp( 'Xor',",
"name = env.get_fresh_variable_name(prefix='_smem') # retry var = Data(name, _cuda_types.SharedMem(child_type, size,",
"Data.init(start, env) step = Data.init(step, env) if start.ctype.dtype.kind not in",
"raise TypeError('range expected at least 1 argument, got 0') elif",
"disable unrolling. - If an `int`, add ``#pragma unroll(n)`` directive",
"warnings import cupy from cupy_backends.cuda.api import runtime from cupy.cuda import",
"step, ctype, step_is_positive, unroll=unroll) class LenFunc(BuiltinFunc): def call(self, env, *args,",
"operation is not supported before ' 'sm_70') value2 = _compile._astype_scalar(value2,",
"laneid = LaneID() # atomic functions atomic_add = AtomicOp( 'Add',",
"attributes are used. Args: ndim (int): The dimension of the",
"width, _cuda_types.int32, 'same_kind', env) width = Data.init(width, env) code =",
"https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions \"\"\" super().__call__() def call(self, env, *, mask=None): if runtime.is_hip:",
"# On HIP, 'e' is not supported and we will",
"(() if runtime.is_hip else ('float16',))) atomic_sub = AtomicOp( 'Sub', ('int32',",
"of built-in :obj:`range`. stop (int): Same as that of built-in",
"Constant(64) if runtime.is_hip else Constant(32) width = _compile._astype_scalar( width, _cuda_types.int32,",
"width = Data.init(width, env) code = f'{name}({hex(mask)}, {var.code}, {val_id.code}' code",
"the first integer is as follows:: {self._eq} and for the",
"(int): The dimension of the grid. Only 1, 2, or",
"the convention of Numba's :func:`{self._link}`. \"\"\" self.__doc__ = doc def",
"https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#pragma-unroll \"\"\" super().__call__() def call(self, env, *args, unroll=None): if len(args)",
"atomic functions`_ .. _Atomic Functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions .. _Numba's corresponding atomic",
"of static shared memory. If ``None``, declares the shared memory",
"mask.obj except Exception: raise TypeError('mask must be an integer') if",
"to swap to. .. seealso:: `Numba's corresponding atomic functions`_ ..",
"10.1') if int(device.get_compute_capability()) < 70: raise RuntimeError( 'uint16 atomic operation",
"_compile._astype_scalar( width, _cuda_types.int32, 'same_kind', env) width = Data.init(width, env) code",
"class SyncWarp(BuiltinFunc): def __call__(self, *, mask=0xffffffff): \"\"\"Calls ``__syncwarp()``. Args: mask",
"class SyncThreads(BuiltinFunc): def __call__(self): \"\"\"Calls ``__syncthreads()``. .. seealso:: `Synchronization functions`_",
"value2 = Data.init(value2, env) code = f'{name}(&{target.code}, {value.code}, {value2.code})' else:",
"and for the other two integers the ``y`` and ``z``",
"if runtime.is_hip else ('uint64', 'float16'))) shfl_sync = WarpShuffleOp('', _shfl_dtypes) shfl_up_sync",
"LaneID(BuiltinFunc): def __call__(self): \"\"\"Returns the lane ID of the calling",
"array type') if not arg.ctype.ndim: raise TypeError('len() of unsized array')",
"unrolling requires constant start, stop, step and ' 'unroll value')",
"before CUDA 10.0.') value = _compile._astype_scalar(value, ctype, 'same_kind', env) value",
"integer type.') if stop.ctype.dtype.kind not in 'iu': raise TypeError('range supports",
"functools import reduce class RangeFunc(BuiltinFunc): def __call__(self, *args, unroll=None): \"\"\"Range",
"return Data(code, ctype) class LaneID(BuiltinFunc): def __call__(self): \"\"\"Returns the lane",
"as that of built-in :obj:`range`. unroll (int or bool or",
"__init__(self, mode): if mode == 'grid': self._desc = 'Compute the",
"isinstance(mask, Constant): if not (0x0 <= mask.obj <= 0xffffffff): raise",
"' f'got {type(unroll).__name__}') if unroll is False: unroll = 1",
"a property. \"\"\" super().__call__() def _get_preamble(self): preamble = '__device__ __forceinline__",
"b), None, env), args) class MaxFunc(BuiltinFunc): def call(self, env, *args,",
"+ blockIdx.{n} * blockDim.{n}' elif mode == 'gridsize': self._desc =",
"'same_kind', env) value = Data.init(value, env) if op == 'CAS':",
"or None): If ``int`` type, the size of static shared",
"ndim == 1: return Data(self._code.format(n='x'), _cuda_types.uint32) elif ndim == 2:",
"child_type = _cuda_types.Scalar(dtype) while env[name] is not None: name =",
"atomic_cas = AtomicOp( 'CAS', ('int32', 'uint32', 'uint64') + (() if",
"call(self, env, mask, var, val_id, *, width=None): name = self._name",
"dtypes doc = f\"\"\"Calls the ``{self._name}`` function. Please refer to",
"AtomicOp( 'Exch', ('int32', 'uint32', 'uint64', 'float32')) atomic_min = AtomicOp( 'Min',",
"__init__(self, op, dtypes): self._op = op self._name = '__shfl_' +",
"code = f'{name}(&{target.code}, {value.code})' return Data(code, ctype) class GridFunc(BuiltinFunc): def",
"before the loop, where the integer ``n`` means the number",
"'unroll value expected to be of type int, ' f'got",
"index: A valid index such that the address to the",
":obj:`atomic_cas`, this is the value for ``array[index]`` to compare with.",
"mask is not None: warnings.warn(f'mask {mask} is ignored on HIP',",
"from cupy_backends.cuda.api import runtime from cupy.cuda import device from cupyx.jit",
"value for ``array[index]`` to compare with. alt_value: Only used in",
"= dtypes doc = f\"\"\"Calls the ``{self._name}`` function. Please refer",
"functions`_ .. _Atomic Functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions .. _Numba's corresponding atomic functions:",
"cupy.maximum, (a, b), None, env), args) class SyncThreads(BuiltinFunc): def __call__(self):",
"preamble += \"\"\" return __lane_id(); } \"\"\" return preamble def",
"elif not (0x0 <= mask <= 0xffffffff): raise ValueError('mask is",
"array') return Data(f'static_cast<long long>({arg.code}.shape()[0])', _cuda_types.Scalar('q')) class MinFunc(BuiltinFunc): def call(self, env,",
"on HIP', RuntimeWarning) elif not (0x0 <= mask <= 0xffffffff):",
"bool or None): - If `True`, add ``#pragma unroll`` directive",
"atomic functions atomic_add = AtomicOp( 'Add', ('int32', 'uint32', 'uint64', 'float32',",
"explanation. .. _Warp Shuffle Functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#warp-shuffle-functions \"\"\" self.__doc__ = doc",
".. _Synchronization functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions \"\"\" super().__call__() def call_const(self, env): return",
"{ range: RangeFunc(), len: LenFunc(), min: MinFunc(), max: MaxFunc(), }",
"warnings.warn( 'loop unrolling is ignored as the unroll value is",
"an integer') if runtime.is_hip: warnings.warn(f'mask {mask} is ignored on HIP',",
"to `Warp Shuffle Functions`_ for detailed explanation. .. _Warp Shuffle",
"_compile._indexing(array, index, env) ctype = target.ctype if ctype.dtype.name not in",
"kwds: raise TypeError('keyword arguments are not supported') return reduce(lambda a,",
"class MinFunc(BuiltinFunc): def call(self, env, *args, **kwds): if len(args) <",
"= GridFunc('gridsize') laneid = LaneID() # atomic functions atomic_add =",
"unroll (int or bool or None): - If `True`, add",
"if self._op in ('up', 'down'): val_id_t = _cuda_types.uint32 else: val_id_t",
"Data(self._code.format(n='x'), _cuda_types.uint32) elif ndim == 2: dims = ('x', 'y')",
"is not None: if not all(isinstance(x, Constant) for x in",
"than INT_MAX') if isinstance(step, Constant): step_is_positive = step.obj >= 0",
"'numba.cuda.grid' self._code = 'threadIdx.{n} + blockIdx.{n} * blockDim.{n}' elif mode",
"of the first integer is as follows:: {self._eq} and for",
"raise TypeError( 'unroll value expected to be of type int,",
"jit.blockIdx.x * jit.blockDim.x' self._link = 'numba.cuda.grid' self._code = 'threadIdx.{n} +",
"self._eq = 'jit.threadIdx.x + jit.blockIdx.x * jit.blockDim.x' self._link = 'numba.cuda.grid'",
"*, mask=0xffffffff): \"\"\"Calls ``__syncwarp()``. Args: mask (int): Active threads in",
"width.obj not in (2, 4, 8, 16, 32): raise ValueError('width",
"f\"\"\"Calls the ``{self._name}`` function. Please refer to `Warp Shuffle Functions`_",
"== 'u': step_is_positive = True else: step_is_positive = None stop",
"def __call__(self, array, index, value, alt_value=None): super().__call__() def call(self, env,",
"< 10000): raise RuntimeError( 'float16 atomic operation is not supported",
"loop unrolling to the compiler (no ``#pragma``). .. seealso:: `#pragma",
"{width.code})' return Data(code, ctype) class LaneID(BuiltinFunc): def __call__(self): \"\"\"Returns the",
"op, dtypes): self._op = op self._name = 'atomic' + op",
"use for the specified operation. For the case of :obj:`atomic_cas`,",
"Data(code, ctype) class GridFunc(BuiltinFunc): def __init__(self, mode): if mode ==",
"unroll)): raise TypeError( 'loop unrolling requires constant start, stop, step",
"a tuple. .. note:: This function follows the convention of",
"not None: warnings.warn(f'mask {mask} is ignored on HIP', RuntimeWarning) mask",
"('int32', 'uint32', 'uint64')) atomic_max = AtomicOp( 'Max', ('int32', 'uint32', 'uint64'))",
"'float64') + (() if runtime.is_hip else ('float16',))) atomic_sub = AtomicOp(",
"integer is as follows:: {self._eq} and for the other two",
"Data.init(mask, env) code = f'__syncwarp({mask.code})' else: code = '__syncwarp()' return",
"the value to use for the specified operation. For the",
"if not arg.ctype.ndim: raise TypeError('len() of unsized array') return Data(f'static_cast<long",
"Functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#warp-shuffle-functions \"\"\" self.__doc__ = doc def __call__(self, mask, var,",
"or None): - If `True`, add ``#pragma unroll`` directive before",
"raise TypeError('range supports only for integer type.') if step.ctype.dtype.kind not",
"'CUDA 10.1') if int(device.get_compute_capability()) < 70: raise RuntimeError( 'uint16 atomic",
"(default), leave the control of loop unrolling to the compiler",
"= Constant(64) if runtime.is_hip else Constant(32) width = _compile._astype_scalar( width,",
"not runtime.is_hip: # see https://github.com/NVIDIA/cub/blob/main/cub/util_ptx.cuh#L419 preamble += \"\"\" unsigned int",
"\"=r\"(ret) ); return ret; } \"\"\" else: # defined in",
"= AtomicOp( 'Exch', ('int32', 'uint32', 'uint64', 'float32')) atomic_min = AtomicOp(",
"('x', 'y', 'z') else: raise ValueError('Only ndim=1,2,3 are supported') elts_code",
"``__syncthreads()``. .. seealso:: `Synchronization functions`_ .. _Synchronization functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions \"\"\"",
"min: MinFunc(), max: MaxFunc(), } range_ = RangeFunc() syncthreads =",
"returns it as a 1-D array. Args: dtype (dtype): The",
"'Add' and ctype.dtype.char == 'e' and runtime.runtimeGetVersion() < 10000): raise",
"memory. If ``None``, declares the shared memory with extern specifier.",
"for 1D we return a single variable, # otherwise a",
"Functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions .. _Numba's corresponding atomic functions: https://numba.readthedocs.io/en/stable/cuda-reference/kernel.html#synchronization-and-atomic-operations \"\"\" self.__doc__",
"the thread index in the grid.' self._eq = 'jit.threadIdx.x +",
"cupyx.jit._internal_types import BuiltinFunc from cupyx.jit._internal_types import Data from cupyx.jit._internal_types import",
"env) code = f'{name}(&{target.code}, {value.code}, {value2.code})' else: assert value2 is",
"via __align__(N). \"\"\" super().__call__() def call_const(self, env, dtype, size, alignment=None):",
"Enforce the alignment via __align__(N). \"\"\" super().__call__() def call_const(self, env,",
"<< 31): warnings.warn( 'loop unrolling is ignored as the unroll",
"Data.init(value2, env) code = f'{name}(&{target.code}, {value.code}, {value2.code})' else: assert value2",
"a callable function instead of a property. \"\"\" super().__call__() def",
"arguments are not supported') arg = args[0] if not isinstance(arg.ctype,",
"'y', 'z') else: raise ValueError('Only ndim=1,2,3 are supported') elts_code =",
"if start.ctype.dtype.kind not in 'iu': raise TypeError('range supports only for",
"grid. Only 1, 2, or 3 is allowed. Returns: int",
": \"=r\"(ret) ); return ret; } \"\"\" else: # defined",
"computed. value: Represent the value to use for the specified",
"self._desc = 'Compute the grid size.' self._eq = 'jit.blockDim.x *",
"unroll = unroll.obj if not (isinstance(unroll, int) or isinstance(unroll, bool)):",
"runtime.runtimeGetVersion() < 10010: raise RuntimeError( 'uint16 atomic operation is not",
"class SharedMemory(BuiltinFunc): def __call__(self, dtype, size, alignment=None): \"\"\"Allocates shared memory",
".. seealso:: `#pragma unroll`_ .. _#pragma unroll: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#pragma-unroll \"\"\" super().__call__()",
"env) value = Data.init(value, env) if op == 'CAS': assert",
"reduce(lambda a, b: _compile._call_ufunc( cupy.maximum, (a, b), None, env), args)",
"LaneID() # atomic functions atomic_add = AtomicOp( 'Add', ('int32', 'uint32',",
"to be power of 2') else: width = Constant(64) if",
"mask: if isinstance(mask, Constant): if not (0x0 <= mask.obj <=",
"arg = args[0] if not isinstance(arg.ctype, _cuda_types.CArray): raise TypeError('len() supports",
"raise TypeError( f'max() expects at least 2 arguments, got {len(args)}')",
"# On HIP, 'H' is not supported and we will",
"greater than INT_MAX') if isinstance(step, Constant): step_is_positive = step.obj >=",
"INT_MAX') if isinstance(step, Constant): step_is_positive = step.obj >= 0 elif",
"= AtomicOp( 'Inc', ('uint32',)) atomic_dec = AtomicOp( 'Dec', ('uint32',)) atomic_cas",
"None: warnings.warn(f'mask {mask} is ignored on HIP', RuntimeWarning) mask =",
"in 'iu': raise TypeError('range supports only for integer type.') if",
"\"\"\" self.__doc__ = doc def __call__(self, mask, var, val_id, *,",
"array element ``array[index]`` can be computed. value: Represent the value",
"_compile._call_ufunc( cupy.maximum, (a, b), None, env), args) class SyncThreads(BuiltinFunc): def",
"HIP, 'H' is not supported and we will never reach",
"in ('up', 'down'): val_id_t = _cuda_types.uint32 else: val_id_t = _cuda_types.int32",
"(() if runtime.is_hip else ('uint64', 'float16'))) shfl_sync = WarpShuffleOp('', _shfl_dtypes)",
"step_is_positive = step.obj >= 0 elif step.ctype.dtype.kind == 'u': step_is_positive",
"_compile._astype_scalar(value2, ctype, 'same_kind', env) value2 = Data.init(value2, env) code =",
"= AtomicOp( 'Xor', ('int32', 'uint32', 'uint64')) # warp-shuffle functions _shfl_dtypes",
"runtime.is_hip else ('uint64', 'float16'))) shfl_sync = WarpShuffleOp('', _shfl_dtypes) shfl_up_sync =",
"ValueError('Only ndim=1,2,3 are supported') elts_code = ', '.join(self._code.format(n=n) for n",
"blockDim.{n}' elif mode == 'gridsize': self._desc = 'Compute the grid",
"'uint64')) # warp-shuffle functions _shfl_dtypes = ( ('int32', 'uint32', 'int64',",
"_cuda_types.SharedMem(child_type, size, alignment)) env.decls[name] = var env.locals[name] = var return",
"is ignored as the unroll value is ' 'non-positive or",
"None # On HIP, 'H' is not supported and we",
"\"\"\" super().__call__() def call_const(self, env): return Data('__syncthreads()', _cuda_types.void) class SyncWarp(BuiltinFunc):",
".. _#pragma unroll: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#pragma-unroll \"\"\" super().__call__() def call(self, env, *args,",
"'same_kind', env) width = Data.init(width, env) code = f'{name}({hex(mask)}, {var.code},",
"None if mask: if isinstance(mask, Constant): if not (0x0 <=",
"shfl_up_sync = WarpShuffleOp('up', _shfl_dtypes) shfl_down_sync = WarpShuffleOp('down', _shfl_dtypes) shfl_xor_sync =",
"self.__doc__ = doc def __call__(self, ndim): super().__call__() def call_const(self, env,",
"__call__(self, *, mask=0xffffffff): \"\"\"Calls ``__syncwarp()``. Args: mask (int): Active threads",
"size of static shared memory. If ``None``, declares the shared",
"'Compute the grid size.' self._eq = 'jit.blockDim.x * jit.gridDim.x' self._link",
"shared memory with extern specifier. alignment (int or None): Enforce",
"None): If ``int`` type, the size of static shared memory.",
"0 < unroll < 1 << 31): warnings.warn( 'loop unrolling",
"must be an integer') # Numba convention: for 1D we",
"+= \"\"\" return __lane_id(); } \"\"\" return preamble def call_const(self,",
"31): warnings.warn( 'loop unrolling is ignored as the unroll value",
"'threadIdx.{n} + blockIdx.{n} * blockDim.{n}' elif mode == 'gridsize': self._desc",
"specified operation. For the case of :obj:`atomic_cas`, this is the",
"= doc def __call__(self, mask, var, val_id, *, width=32): super().__call__()",
"env, *args, **kwds): if len(args) < 2: raise TypeError( f'min()",
"env.generated.add_code(self._get_preamble()) return Data('LaneId()', _cuda_types.uint32) builtin_functions_dict = { range: RangeFunc(), len:",
"expects only 1 argument, got {len(args)}') if kwds: raise TypeError('keyword",
"supported and we will never reach here if ctype.dtype.char ==",
"+ (() if runtime.is_hip else ('uint16',))) atomic_and = AtomicOp( 'And',",
"raise ValueError('Only ndim=1,2,3 are supported') elts_code = ', '.join(self._code.format(n=n) for",
"in the grid.' self._eq = 'jit.threadIdx.x + jit.blockIdx.x * jit.blockDim.x'",
"# warp-shuffle functions _shfl_dtypes = ( ('int32', 'uint32', 'int64', 'float32',",
"'uint32', 'int64', 'float32', 'float64') + (() if runtime.is_hip else ('uint64',",
"= f\"\"\"Calls the ``{self._name}`` function to operate atomically on ``array[index]``.",
"for detailed explanation. .. _Warp Shuffle Functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#warp-shuffle-functions \"\"\" self.__doc__",
"for integer type.') if env.mode == 'numpy': ctype = _cuda_types.Scalar(int)",
"ctype = target.ctype if ctype.dtype.name not in self._dtypes: raise TypeError(f'`{name}`",
"atomic_inc = AtomicOp( 'Inc', ('uint32',)) atomic_dec = AtomicOp( 'Dec', ('uint32',))",
"'z') else: raise ValueError('Only ndim=1,2,3 are supported') elts_code = ',",
"ignored on HIP', RuntimeWarning) elif not (0x0 <= mask <=",
"tuple. .. note:: This function follows the convention of Numba's",
"built-in :obj:`range`. stop (int): Same as that of built-in :obj:`range`.",
"= Data.init(val_id, env) if width: if isinstance(width, Constant): if width.obj",
"functions _shfl_dtypes = ( ('int32', 'uint32', 'int64', 'float32', 'float64') +",
"'uint16 atomic operation is not supported before ' 'CUDA 10.1')",
"AtomicOp( 'Sub', ('int32', 'uint32')) atomic_exch = AtomicOp( 'Exch', ('int32', 'uint32',",
"functions: https://numba.readthedocs.io/en/stable/cuda-reference/kernel.html#synchronization-and-atomic-operations \"\"\" self.__doc__ = doc def __call__(self, array, index,",
"f'{name}(&{target.code}, {value.code}, {value2.code})' else: assert value2 is None code =",
"doc def __call__(self, mask, var, val_id, *, width=32): super().__call__() def",
"'uint32', 'uint64')) atomic_max = AtomicOp( 'Max', ('int32', 'uint32', 'uint64')) atomic_inc",
"runtime.is_hip else Constant(32) width = _compile._astype_scalar( width, _cuda_types.int32, 'same_kind', env)",
"f'min() expects at least 2 arguments, got {len(args)}') if kwds:",
"supports only for integer type.') if stop.ctype.dtype.kind not in 'iu':",
"isinstance(width, Constant): if width.obj not in (2, 4, 8, 16,",
"env[name] is not None: name = env.get_fresh_variable_name(prefix='_smem') # retry var",
"call(self, env, *args, **kwds): if len(args) != 1: raise TypeError(f'len()",
"_cuda_types from cupyx.jit._internal_types import BuiltinFunc from cupyx.jit._internal_types import Data from",
"2: raise TypeError( f'max() expects at least 2 arguments, got",
"= unroll.obj if not (isinstance(unroll, int) or isinstance(unroll, bool)): raise",
"b), None, env), args) class SyncThreads(BuiltinFunc): def __call__(self): \"\"\"Calls ``__syncthreads()``.",
"is not supported before ' 'sm_70') value2 = _compile._astype_scalar(value2, ctype,",
"runtime.is_hip: if mask is not None: warnings.warn(f'mask {mask} is ignored",
"allowed. Returns: int or tuple: If ``ndim`` is 1, an",
"int or tuple: If ``ndim`` is 1, an integer is",
"'float16'))) shfl_sync = WarpShuffleOp('', _shfl_dtypes) shfl_up_sync = WarpShuffleOp('up', _shfl_dtypes) shfl_down_sync",
"and we will never reach here if (op == 'Add'",
"for ``array[index]`` to compare with. alt_value: Only used in :obj:`atomic_cas`",
"f'{name}({hex(mask)}, {var.code}, {val_id.code}' code += f', {width.code})' return Data(code, ctype)",
"return preamble def call_const(self, env): env.generated.add_code(self._get_preamble()) return Data('LaneId()', _cuda_types.uint32) builtin_functions_dict",
"the compiler (no ``#pragma``). .. seealso:: `#pragma unroll`_ .. _#pragma",
"step_is_positive = None stop = Data.init(stop, env) start = Data.init(start,",
"for detailed explanation. Args: array: A :class:`cupy.ndarray` to index over.",
"of range') # val_id refers to \"delta\" for shfl_{up, down},",
"raise TypeError('len() of unsized array') return Data(f'static_cast<long long>({arg.code}.shape()[0])', _cuda_types.Scalar('q')) class",
"+ op self._dtypes = dtypes doc = f\"\"\"Calls the ``{self._name}``",
"'Add', ('int32', 'uint32', 'uint64', 'float32', 'float64') + (() if runtime.is_hip",
"to compare with. alt_value: Only used in :obj:`atomic_cas` to represent",
"if len(args) != 1: raise TypeError(f'len() expects only 1 argument,",
"supports only array type') if not arg.ctype.ndim: raise TypeError('len() of",
"raise TypeError(f'`{name}` does not support {ctype.dtype} input.') # On HIP,",
"elif ndim == 3: dims = ('x', 'y', 'z') else:",
"`None` (default), leave the control of loop unrolling to the",
"doc def __call__(self, ndim): super().__call__() def call_const(self, env, ndim): if",
"not (isinstance(unroll, int) or isinstance(unroll, bool)): raise TypeError( 'unroll value",
"code = f'{name}(&{target.code}, {value.code}, {value2.code})' else: assert value2 is None",
"in self._dtypes: raise TypeError(f'`{name}` does not support {ctype.dtype} input.') try:",
"def __call__(self, *, mask=0xffffffff): \"\"\"Calls ``__syncwarp()``. Args: mask (int): Active",
"1 argument, got 0') elif len(args) == 1: start, stop,",
"if kwds: raise TypeError('keyword arguments are not supported') return reduce(lambda",
"= self._name var = Data.init(var, env) ctype = var.ctype if",
"Please refer to `Atomic Functions`_ for detailed explanation. Args: array:",
"if runtime.is_hip else Constant(32) width = _compile._astype_scalar( width, _cuda_types.int32, 'same_kind',",
"'Max', ('int32', 'uint32', 'uint64')) atomic_inc = AtomicOp( 'Inc', ('uint32',)) atomic_dec",
"runtime.is_hip: # see https://github.com/NVIDIA/cub/blob/main/cub/util_ptx.cuh#L419 preamble += \"\"\" unsigned int ret;",
"other two integers the ``y`` and ``z`` attributes are used.",
"Data.init(width, env) code = f'{name}({hex(mask)}, {var.code}, {val_id.code}' code += f',",
"raise TypeError('range supports only for integer type.') if stop.ctype.dtype.kind not",
"*args, **kwds): if len(args) != 1: raise TypeError(f'len() expects only",
"a warp. Default is 0xffffffff. .. seealso:: `Synchronization functions`_ ..",
"AtomicOp( 'Add', ('int32', 'uint32', 'uint64', 'float32', 'float64') + (() if",
"GridFunc('gridsize') laneid = LaneID() # atomic functions atomic_add = AtomicOp(",
"def call(self, env, *args, unroll=None): if len(args) == 0: raise",
"is not supported before ' 'CUDA 10.1') if int(device.get_compute_capability()) <",
"ctype = var.ctype if ctype.dtype.name not in self._dtypes: raise TypeError(f'`{name}`",
"is a callable function instead of a property. \"\"\" super().__call__()",
"type, the size of static shared memory. If ``None``, declares",
"ctype) class LaneID(BuiltinFunc): def __call__(self): \"\"\"Returns the lane ID of",
"the corresponding array element ``array[index]`` can be computed. value: Represent",
"name = env.get_fresh_variable_name(prefix='_smem') child_type = _cuda_types.Scalar(dtype) while env[name] is not",
"'loop unrolling is ignored as the unroll value is '",
"stop (int): Same as that of built-in :obj:`range`. step (int):",
"class GridFunc(BuiltinFunc): def __init__(self, mode): if mode == 'grid': self._desc",
"https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions \"\"\" super().__call__() def call_const(self, env): return Data('__syncthreads()', _cuda_types.void) class",
"``{self._name}`` function to operate atomically on ``array[index]``. Please refer to",
"env) ctype = target.ctype if ctype.dtype.name not in self._dtypes: raise",
"it as a 1-D array. Args: dtype (dtype): The dtype",
"'Inc', ('uint32',)) atomic_dec = AtomicOp( 'Dec', ('uint32',)) atomic_cas = AtomicOp(",
"alt_value=None): super().__call__() def call(self, env, array, index, value, value2=None): name",
"= Data.init(stop, env) start = Data.init(start, env) step = Data.init(step,",
".. note:: Unlike :obj:`numba.cuda.laneid`, this is a callable function instead",
"else: assert value2 is None code = f'{name}(&{target.code}, {value.code})' return",
"is ' 'non-positive or greater than INT_MAX') if isinstance(step, Constant):",
"_compile._call_ufunc( cupy.minimum, (a, b), None, env), args) class MaxFunc(BuiltinFunc): def",
"assert value2 is None code = f'{name}(&{target.code}, {value.code})' return Data(code,",
"raise TypeError( f'min() expects at least 2 arguments, got {len(args)}')",
"Unlike :obj:`numba.cuda.laneid`, this is a callable function instead of a",
"follows the convention of Numba's :func:`{self._link}`. \"\"\" self.__doc__ = doc",
"val_id = _compile._astype_scalar(val_id, val_id_t, 'same_kind', env) val_id = Data.init(val_id, env)",
"len: LenFunc(), min: MinFunc(), max: MaxFunc(), } range_ = RangeFunc()",
"or None): Enforce the alignment via __align__(N). \"\"\" super().__call__() def",
"atomic_min = AtomicOp( 'Min', ('int32', 'uint32', 'uint64')) atomic_max = AtomicOp(",
"1, an integer is returned, otherwise a tuple. .. note::",
"support {ctype.dtype} input.') # On HIP, 'e' is not supported",
"\"srcLane\" for shfl, and # \"laneMask\" for shfl_xor if self._op",
"if len(args) == 0: raise TypeError('range expected at least 1",
"the value for ``array[index]`` to compare with. alt_value: Only used",
":func:`{self._link}`. \"\"\" self.__doc__ = doc def __call__(self, ndim): super().__call__() def",
".. _Atomic Functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions .. _Numba's corresponding atomic functions: https://numba.readthedocs.io/en/stable/cuda-reference/kernel.html#synchronization-and-atomic-operations",
"TypeError( 'unroll value expected to be of type int, '",
"if (op == 'Add' and ctype.dtype.char == 'e' and runtime.runtimeGetVersion()",
"used. Args: ndim (int): The dimension of the grid. Only",
"or tuple: If ``ndim`` is 1, an integer is returned,",
"width = Constant(64) if runtime.is_hip else Constant(32) width = _compile._astype_scalar(",
"{mask} is ignored on HIP', RuntimeWarning) mask = None if",
"a, b: _compile._call_ufunc( cupy.maximum, (a, b), None, env), args) class",
"If ``ndim`` is 1, an integer is returned, otherwise a",
"1 argument, got {len(args)}') if kwds: raise TypeError('keyword arguments are",
"`Atomic Functions`_ for detailed explanation. Args: array: A :class:`cupy.ndarray` to",
"ctype = _cuda_types.Scalar(int) elif env.mode == 'cuda': ctype = stop.ctype",
"must be an integer') if runtime.is_hip: warnings.warn(f'mask {mask} is ignored",
"f'{name}(&{target.code}, {value.code})' return Data(code, ctype) class GridFunc(BuiltinFunc): def __init__(self, mode):",
"super().__call__() def call(self, env, *, mask=None): if runtime.is_hip: if mask",
"op = self._op array = Data.init(array, env) if not isinstance(array.ctype,",
"_Numba's corresponding atomic functions: https://numba.readthedocs.io/en/stable/cuda-reference/kernel.html#synchronization-and-atomic-operations \"\"\" self.__doc__ = doc def",
"not supported and we will never reach here if (op",
":class:`cupy.ndarray` to index over. index: A valid index such that",
"power of 2') else: width = Constant(64) if runtime.is_hip else",
"{ctype.dtype} input.') try: mask = mask.obj except Exception: raise TypeError('mask",
"mask, _cuda_types.int32, 'same_kind', env) mask = Data.init(mask, env) code =",
"= SharedMemory() grid = GridFunc('grid') gridsize = GridFunc('gridsize') laneid =",
"jit.warpsize)``. .. note:: Unlike :obj:`numba.cuda.laneid`, this is a callable function",
"if kwds: raise TypeError('keyword arguments are not supported') arg =",
"def __call__(self, dtype, size, alignment=None): \"\"\"Allocates shared memory and returns",
"doc = f\"\"\" {self._desc} Computation of the first integer is",
"if len(args) < 2: raise TypeError( f'max() expects at least",
"on ``array[index]``. Please refer to `Atomic Functions`_ for detailed explanation.",
"env): return Data('__syncthreads()', _cuda_types.void) class SyncWarp(BuiltinFunc): def __call__(self, *, mask=0xffffffff):",
"shared_memory = SharedMemory() grid = GridFunc('grid') gridsize = GridFunc('gridsize') laneid",
"all(isinstance(x, Constant) for x in (start, stop, step, unroll)): raise",
"TypeError(f'len() expects only 1 argument, got {len(args)}') if kwds: raise",
"('int32', 'uint32', 'uint64') + (() if runtime.is_hip else ('uint16',))) atomic_and",
"' 'non-positive or greater than INT_MAX') if isinstance(step, Constant): step_is_positive",
"env, dtype, size, alignment=None): name = env.get_fresh_variable_name(prefix='_smem') child_type = _cuda_types.Scalar(dtype)",
"_cuda_types.Ptr(child_type)) class AtomicOp(BuiltinFunc): def __init__(self, op, dtypes): self._op = op",
"op self._name = '__shfl_' + (op + '_' if op",
"RangeFunc(), len: LenFunc(), min: MinFunc(), max: MaxFunc(), } range_ =",
"isinstance(array.ctype, (_cuda_types.CArray, _cuda_types.Ptr)): raise TypeError('The first argument must be of",
"ndim (int): The dimension of the grid. Only 1, 2,",
"f', {width.code})' return Data(code, ctype) class LaneID(BuiltinFunc): def __call__(self): \"\"\"Returns",
"3: start, stop, step = args else: raise TypeError( f'range",
"for shfl_{up, down}, \"srcLane\" for shfl, and # \"laneMask\" for",
"function to operate atomically on ``array[index]``. Please refer to `Atomic",
"def call_const(self, env): env.generated.add_code(self._get_preamble()) return Data('LaneId()', _cuda_types.uint32) builtin_functions_dict = {",
"(0x0 <= mask <= 0xffffffff): raise ValueError('mask is out of",
"RuntimeError( 'float16 atomic operation is not supported before CUDA 10.0.')",
"or 0 < unroll < 1 << 31): warnings.warn( 'loop",
"= f'__syncwarp({mask.code})' else: code = '__syncwarp()' return Data(code, _cuda_types.void) class",
"device from cupyx.jit import _cuda_types from cupyx.jit._internal_types import BuiltinFunc from",
"`Synchronization functions`_ .. _Synchronization functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions \"\"\" super().__call__() def call(self,",
"start.ctype.dtype.kind not in 'iu': raise TypeError('range supports only for integer",
"dtype, size, alignment=None): \"\"\"Allocates shared memory and returns it as",
"the integer ``n`` means the number of iterations to unroll.",
"retry var = Data(name, _cuda_types.SharedMem(child_type, size, alignment)) env.decls[name] = var",
"bool)): raise TypeError( 'unroll value expected to be of type",
"corresponding array element ``array[index]`` can be computed. value: Represent the",
"not support {ctype.dtype} input.') try: mask = mask.obj except Exception:",
"syncwarp = SyncWarp() shared_memory = SharedMemory() grid = GridFunc('grid') gridsize",
"return Data(code, _cuda_types.void) class SharedMemory(BuiltinFunc): def __call__(self, dtype, size, alignment=None):",
"Constant(0), args[0], Constant(1) elif len(args) == 2: start, stop, step",
"def call(self, env, *args, **kwds): if len(args) < 2: raise",
"var, val_id, *, width=32): super().__call__() def call(self, env, mask, var,",
"== 'Add' and ctype.dtype.char == 'e' and runtime.runtimeGetVersion() < 10000):",
"not (0x0 <= mask <= 0xffffffff): raise ValueError('mask is out",
"ValueError('mask is out of range') mask = _compile._astype_scalar( mask, _cuda_types.int32,",
"elif len(args) == 1: start, stop, step = Constant(0), args[0],",
"TypeError('keyword arguments are not supported') return reduce(lambda a, b: _compile._call_ufunc(",
"Args: dtype (dtype): The dtype of the returned array. size",
"self._name var = Data.init(var, env) ctype = var.ctype if ctype.dtype.name",
"runtime.is_hip: warnings.warn(f'mask {mask} is ignored on HIP', RuntimeWarning) elif not",
"'same_kind', env) val_id = Data.init(val_id, env) if width: if isinstance(width,",
"class MaxFunc(BuiltinFunc): def call(self, env, *args, **kwds): if len(args) <",
"cupy_backends.cuda.api import runtime from cupy.cuda import device from cupyx.jit import",
"< unroll < 1 << 31): warnings.warn( 'loop unrolling is",
"class LenFunc(BuiltinFunc): def call(self, env, *args, **kwds): if len(args) !=",
"= _cuda_types.Tuple([_cuda_types.uint32]*ndim) return Data(f'thrust::make_tuple({elts_code})', ctype) class WarpShuffleOp(BuiltinFunc): def __init__(self, op,",
"refer to `Atomic Functions`_ for detailed explanation. Args: array: A",
"raise TypeError(f'len() expects only 1 argument, got {len(args)}') if kwds:",
"argument, got 0') elif len(args) == 1: start, stop, step",
"= _cuda_types.int32 val_id = _compile._astype_scalar(val_id, val_id_t, 'same_kind', env) val_id =",
"0xffffffff): raise ValueError('mask is out of range') # val_id refers",
"= args else: raise TypeError( f'range expected at most 3",
"env) step = Data.init(step, env) if start.ctype.dtype.kind not in 'iu':",
"built-in :obj:`range`. unroll (int or bool or None): - If",
"'same_kind', env) value2 = Data.init(value2, env) code = f'{name}(&{target.code}, {value.code},",
"mode == 'grid': self._desc = 'Compute the thread index in",
"stop, step = args else: raise TypeError( f'range expected at",
"= Data.init(value2, env) code = f'{name}(&{target.code}, {value.code}, {value2.code})' else: assert",
"SharedMemory() grid = GridFunc('grid') gridsize = GridFunc('gridsize') laneid = LaneID()",
"the loop to disable unrolling. - If an `int`, add",
"len(args) < 2: raise TypeError( f'min() expects at least 2",
"``None``, declares the shared memory with extern specifier. alignment (int",
"supported before CUDA 10.0.') value = _compile._astype_scalar(value, ctype, 'same_kind', env)",
"self._link = 'numba.cuda.gridsize' self._code = 'blockDim.{n} * gridDim.{n}' else: raise",
"SyncThreads() syncwarp = SyncWarp() shared_memory = SharedMemory() grid = GridFunc('grid')",
"follows:: {self._eq} and for the other two integers the ``y``",
"Default is 0xffffffff. .. seealso:: `Synchronization functions`_ .. _Synchronization functions:",
"compiler (no ``#pragma``). .. seealso:: `#pragma unroll`_ .. _#pragma unroll:",
"SharedMemory(BuiltinFunc): def __call__(self, dtype, size, alignment=None): \"\"\"Allocates shared memory and",
"see https://github.com/NVIDIA/cub/blob/main/cub/util_ptx.cuh#L419 preamble += \"\"\" unsigned int ret; asm (\"mov.u32",
"calling thread, ranging in ``[0, jit.warpsize)``. .. note:: Unlike :obj:`numba.cuda.laneid`,",
"runtime from cupy.cuda import device from cupyx.jit import _cuda_types from",
"CUDA 10.0.') value = _compile._astype_scalar(value, ctype, 'same_kind', env) value =",
"if not (unroll is True or 0 < unroll <",
"# otherwise a tuple if ndim == 1: return Data(self._code.format(n='x'),",
"if not isinstance(array.ctype, (_cuda_types.CArray, _cuda_types.Ptr)): raise TypeError('The first argument must",
"RangeFunc() syncthreads = SyncThreads() syncwarp = SyncWarp() shared_memory = SharedMemory()",
"Constant): if not (0x0 <= mask.obj <= 0xffffffff): raise ValueError('mask",
"0 elif step.ctype.dtype.kind == 'u': step_is_positive = True else: step_is_positive",
"val_id_t, 'same_kind', env) val_id = Data.init(val_id, env) if width: if",
"import Constant from cupyx.jit._internal_types import Range from cupyx.jit import _compile",
"_cuda_types.Scalar(dtype) while env[name] is not None: name = env.get_fresh_variable_name(prefix='_smem') #",
"32): raise ValueError('width needs to be power of 2') else:",
"val_id refers to \"delta\" for shfl_{up, down}, \"srcLane\" for shfl,",
"else ('uint64', 'float16'))) shfl_sync = WarpShuffleOp('', _shfl_dtypes) shfl_up_sync = WarpShuffleOp('up',",
"== 3: start, stop, step = args else: raise TypeError(",
"unroll=unroll) class LenFunc(BuiltinFunc): def call(self, env, *args, **kwds): if len(args)",
"in :obj:`atomic_cas` to represent the value to swap to. ..",
"ctype, 'same_kind', env) value = Data.init(value, env) if op ==",
"unroll=None): \"\"\"Range with loop unrolling support. Args: start (int): Same",
"the case of :obj:`atomic_cas`, this is the value for ``array[index]``",
"shfl_xor if self._op in ('up', 'down'): val_id_t = _cuda_types.uint32 else:",
"' 'sm_70') value2 = _compile._astype_scalar(value2, ctype, 'same_kind', env) value2 =",
"never reach here if ctype.dtype.char == 'H': if runtime.runtimeGetVersion() <",
"None code = f'{name}(&{target.code}, {value.code})' return Data(code, ctype) class GridFunc(BuiltinFunc):",
"or 3 is allowed. Returns: int or tuple: If ``ndim``",
"1 if not (unroll is True or 0 < unroll",
"step = args else: raise TypeError( f'range expected at most",
"seealso:: `Numba's corresponding atomic functions`_ .. _Atomic Functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions ..",
"and # \"laneMask\" for shfl_xor if self._op in ('up', 'down'):",
"Data from cupyx.jit._internal_types import Constant from cupyx.jit._internal_types import Range from",
"# atomic functions atomic_add = AtomicOp( 'Add', ('int32', 'uint32', 'uint64',",
"least 2 arguments, got {len(args)}') if kwds: raise TypeError('keyword arguments",
"this is a callable function instead of a property. \"\"\"",
"(int): Same as that of built-in :obj:`range`. stop (int): Same",
".. note:: This function follows the convention of Numba's :func:`{self._link}`.",
"raise TypeError(f'`{name}` does not support {ctype.dtype} input.') try: mask =",
"If `False`, add ``#pragma unroll(1)`` directive before the loop to",
"len(args) < 2: raise TypeError( f'max() expects at least 2",
"# val_id refers to \"delta\" for shfl_{up, down}, \"srcLane\" for",
"b: _compile._call_ufunc( cupy.maximum, (a, b), None, env), args) class SyncThreads(BuiltinFunc):",
"not arg.ctype.ndim: raise TypeError('len() of unsized array') return Data(f'static_cast<long long>({arg.code}.shape()[0])',",
"most 3 argument, got {len(args)}') if unroll is not None:",
"+ '_' if op else '') + 'sync' self._dtypes =",
"not (unroll is True or 0 < unroll < 1",
"the grid. Only 1, 2, or 3 is allowed. Returns:",
"raise ValueError('width needs to be power of 2') else: width",
"variable, # otherwise a tuple if ndim == 1: return",
"(start, stop, step, unroll)): raise TypeError( 'loop unrolling requires constant",
"== 'H': if runtime.runtimeGetVersion() < 10010: raise RuntimeError( 'uint16 atomic",
"the loop. - If `False`, add ``#pragma unroll(1)`` directive before",
"(op == 'Add' and ctype.dtype.char == 'e' and runtime.runtimeGetVersion() <",
"self._code = 'blockDim.{n} * gridDim.{n}' else: raise ValueError('unsupported function') doc",
"call_const(self, env, dtype, size, alignment=None): name = env.get_fresh_variable_name(prefix='_smem') child_type =",
"Args: ndim (int): The dimension of the grid. Only 1,",
":obj:`range`. step (int): Same as that of built-in :obj:`range`. unroll",
"elif env.mode == 'cuda': ctype = stop.ctype else: assert False",
"if unroll is False: unroll = 1 if not (unroll",
"AtomicOp( 'Dec', ('uint32',)) atomic_cas = AtomicOp( 'CAS', ('int32', 'uint32', 'uint64')",
"' 'CUDA 10.1') if int(device.get_compute_capability()) < 70: raise RuntimeError( 'uint16",
"seealso:: `Synchronization functions`_ .. _Synchronization functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions \"\"\" super().__call__() def",
"the specified operation. For the case of :obj:`atomic_cas`, this is",
"the unroll value is ' 'non-positive or greater than INT_MAX')",
"of Numba's :func:`{self._link}`. \"\"\" self.__doc__ = doc def __call__(self, ndim):",
"index, value, value2=None): name = self._name op = self._op array",
"the calling thread, ranging in ``[0, jit.warpsize)``. .. note:: Unlike",
"return Data(code, ctype) class GridFunc(BuiltinFunc): def __init__(self, mode): if mode",
"'__shfl_' + (op + '_' if op else '') +",
"if mode == 'grid': self._desc = 'Compute the thread index",
"def __call__(self): \"\"\"Calls ``__syncthreads()``. .. seealso:: `Synchronization functions`_ .. _Synchronization",
"( ('int32', 'uint32', 'int64', 'float32', 'float64') + (() if runtime.is_hip",
"in (start, stop, step, unroll)): raise TypeError( 'loop unrolling requires",
"if env.mode == 'numpy': ctype = _cuda_types.Scalar(int) elif env.mode ==",
"atomic_xor = AtomicOp( 'Xor', ('int32', 'uint32', 'uint64')) # warp-shuffle functions",
"= AtomicOp( 'Max', ('int32', 'uint32', 'uint64')) atomic_inc = AtomicOp( 'Inc',",
"be of type int, ' f'got {type(unroll).__name__}') if unroll is",
"2: dims = ('x', 'y') elif ndim == 3: dims",
"corresponding atomic functions`_ .. _Atomic Functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions .. _Numba's corresponding",
"integers the ``y`` and ``z`` attributes are used. Args: ndim",
"asm (\"mov.u32 %0, %%laneid;\" : \"=r\"(ret) ); return ret; }",
"Returns: int or tuple: If ``ndim`` is 1, an integer",
"ignored as the unroll value is ' 'non-positive or greater",
"0xffffffff. .. seealso:: `Synchronization functions`_ .. _Synchronization functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions \"\"\"",
"otherwise a tuple if ndim == 1: return Data(self._code.format(n='x'), _cuda_types.uint32)",
"__align__(N). \"\"\" super().__call__() def call_const(self, env, dtype, size, alignment=None): name",
"long>({arg.code}.shape()[0])', _cuda_types.Scalar('q')) class MinFunc(BuiltinFunc): def call(self, env, *args, **kwds): if",
"= AtomicOp( 'And', ('int32', 'uint32', 'uint64')) atomic_or = AtomicOp( 'Or',",
"only for integer type.') if step.ctype.dtype.kind not in 'iu': raise",
"array, index, value, alt_value=None): super().__call__() def call(self, env, array, index,",
"elif len(args) == 2: start, stop, step = args[0], args[1],",
"_compile._astype_scalar(value, ctype, 'same_kind', env) value = Data.init(value, env) if op",
"(int or None): If ``int`` type, the size of static",
"4, 8, 16, 32): raise ValueError('width needs to be power",
"('int32', 'uint32', 'uint64')) atomic_or = AtomicOp( 'Or', ('int32', 'uint32', 'uint64'))",
"operation is not supported before CUDA 10.0.') value = _compile._astype_scalar(value,",
"start (int): Same as that of built-in :obj:`range`. stop (int):",
"supports only for integer type.') if env.mode == 'numpy': ctype",
"Args: start (int): Same as that of built-in :obj:`range`. stop",
":obj:`atomic_cas` to represent the value to swap to. .. seealso::",
"``ndim`` is 1, an integer is returned, otherwise a tuple.",
"0') elif len(args) == 1: start, stop, step = Constant(0),",
"shared memory. If ``None``, declares the shared memory with extern",
"not supported') arg = args[0] if not isinstance(arg.ctype, _cuda_types.CArray): raise",
"= stop.ctype else: assert False return Range(start, stop, step, ctype,",
"swap to. .. seealso:: `Numba's corresponding atomic functions`_ .. _Atomic",
"10000): raise RuntimeError( 'float16 atomic operation is not supported before",
"from cupyx.jit import _compile from functools import reduce class RangeFunc(BuiltinFunc):",
"= Data.init(array, env) if not isinstance(array.ctype, (_cuda_types.CArray, _cuda_types.Ptr)): raise TypeError('The",
"\"\"\" super().__call__() def call(self, env, *, mask=None): if runtime.is_hip: if",
"unroll(n)`` directive before the loop, where the integer ``n`` means",
"``#pragma unroll(n)`` directive before the loop, where the integer ``n``",
"else: step_is_positive = None stop = Data.init(stop, env) start =",
"var return Data(name, _cuda_types.Ptr(child_type)) class AtomicOp(BuiltinFunc): def __init__(self, op, dtypes):",
"means the number of iterations to unroll. - If `None`",
"or bool or None): - If `True`, add ``#pragma unroll``",
"'sync' self._dtypes = dtypes doc = f\"\"\"Calls the ``{self._name}`` function.",
"AtomicOp( 'Max', ('int32', 'uint32', 'uint64')) atomic_inc = AtomicOp( 'Inc', ('uint32',))",
"the returned array. size (int or None): If ``int`` type,",
"supported') arg = args[0] if not isinstance(arg.ctype, _cuda_types.CArray): raise TypeError('len()",
"def _get_preamble(self): preamble = '__device__ __forceinline__ unsigned int LaneId() {'",
"``int`` type, the size of static shared memory. If ``None``,",
"two integers the ``y`` and ``z`` attributes are used. Args:",
"+ 'sync' self._dtypes = dtypes doc = f\"\"\"Calls the ``{self._name}``",
"number of iterations to unroll. - If `None` (default), leave",
"is the value for ``array[index]`` to compare with. alt_value: Only",
"ctype, step_is_positive, unroll=unroll) class LenFunc(BuiltinFunc): def call(self, env, *args, **kwds):",
"corresponding atomic functions: https://numba.readthedocs.io/en/stable/cuda-reference/kernel.html#synchronization-and-atomic-operations \"\"\" self.__doc__ = doc def __call__(self,",
"mask = mask.obj except Exception: raise TypeError('mask must be an",
"env) code = f'__syncwarp({mask.code})' else: code = '__syncwarp()' return Data(code,",
"is ignored on HIP', RuntimeWarning) mask = None if mask:",
"of type int, ' f'got {type(unroll).__name__}') if unroll is False:",
"= var.ctype if ctype.dtype.name not in self._dtypes: raise TypeError(f'`{name}` does",
"= var return Data(name, _cuda_types.Ptr(child_type)) class AtomicOp(BuiltinFunc): def __init__(self, op,",
"(int): Active threads in a warp. Default is 0xffffffff. ..",
"(int or bool or None): - If `True`, add ``#pragma",
"env, ndim): if not isinstance(ndim, int): raise TypeError('ndim must be",
"only for integer type.') if env.mode == 'numpy': ctype =",
"AtomicOp( 'CAS', ('int32', 'uint32', 'uint64') + (() if runtime.is_hip else",
"raise ValueError('mask is out of range') # val_id refers to",
"'uint64', 'float32')) atomic_min = AtomicOp( 'Min', ('int32', 'uint32', 'uint64')) atomic_max",
"'uint32', 'uint64')) # warp-shuffle functions _shfl_dtypes = ( ('int32', 'uint32',",
"from functools import reduce class RangeFunc(BuiltinFunc): def __call__(self, *args, unroll=None):",
"AtomicOp( 'Or', ('int32', 'uint32', 'uint64')) atomic_xor = AtomicOp( 'Xor', ('int32',",
"(unroll is True or 0 < unroll < 1 <<",
"f'got {type(unroll).__name__}') if unroll is False: unroll = 1 if",
"elif ndim == 2: dims = ('x', 'y') elif ndim",
"else: # defined in hip/hcc_detail/device_functions.h preamble += \"\"\" return __lane_id();",
"'sm_70') value2 = _compile._astype_scalar(value2, ctype, 'same_kind', env) value2 = Data.init(value2,",
"'float64') + (() if runtime.is_hip else ('uint64', 'float16'))) shfl_sync =",
"ctype = stop.ctype else: assert False return Range(start, stop, step,",
"2 arguments, got {len(args)}') if kwds: raise TypeError('keyword arguments are",
"= _compile._indexing(array, index, env) ctype = target.ctype if ctype.dtype.name not",
"*, width=None): name = self._name var = Data.init(var, env) ctype",
"WarpShuffleOp('', _shfl_dtypes) shfl_up_sync = WarpShuffleOp('up', _shfl_dtypes) shfl_down_sync = WarpShuffleOp('down', _shfl_dtypes)",
"f'range expected at most 3 argument, got {len(args)}') if unroll",
"_Synchronization functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions \"\"\" super().__call__() def call(self, env, *, mask=None):",
"env) ctype = var.ctype if ctype.dtype.name not in self._dtypes: raise",
"False: unroll = 1 if not (unroll is True or",
"for the specified operation. For the case of :obj:`atomic_cas`, this",
"atomic_or = AtomicOp( 'Or', ('int32', 'uint32', 'uint64')) atomic_xor = AtomicOp(",
"to index over. index: A valid index such that the",
"{val_id.code}' code += f', {width.code})' return Data(code, ctype) class LaneID(BuiltinFunc):",
"is None code = f'{name}(&{target.code}, {value.code})' return Data(code, ctype) class",
"value expected to be of type int, ' f'got {type(unroll).__name__}')",
"return __lane_id(); } \"\"\" return preamble def call_const(self, env): env.generated.add_code(self._get_preamble())",
"self._code = 'threadIdx.{n} + blockIdx.{n} * blockDim.{n}' elif mode ==",
"directive before the loop to disable unrolling. - If an",
"a, b: _compile._call_ufunc( cupy.minimum, (a, b), None, env), args) class",
"unrolling is ignored as the unroll value is ' 'non-positive",
"instead of a property. \"\"\" super().__call__() def _get_preamble(self): preamble =",
"cupyx.jit import _cuda_types from cupyx.jit._internal_types import BuiltinFunc from cupyx.jit._internal_types import",
"https://github.com/NVIDIA/cub/blob/main/cub/util_ptx.cuh#L419 preamble += \"\"\" unsigned int ret; asm (\"mov.u32 %0,",
"integer ``n`` means the number of iterations to unroll. -",
"raise TypeError('len() supports only array type') if not arg.ctype.ndim: raise",
"return Range(start, stop, step, ctype, step_is_positive, unroll=unroll) class LenFunc(BuiltinFunc): def",
"} \"\"\" else: # defined in hip/hcc_detail/device_functions.h preamble += \"\"\"",
"'uint32', 'uint64')) atomic_xor = AtomicOp( 'Xor', ('int32', 'uint32', 'uint64')) #",
"else: raise TypeError( f'range expected at most 3 argument, got",
"callable function instead of a property. \"\"\" super().__call__() def _get_preamble(self):",
"mask, var, val_id, *, width=32): super().__call__() def call(self, env, mask,",
"= AtomicOp( 'Dec', ('uint32',)) atomic_cas = AtomicOp( 'CAS', ('int32', 'uint32',",
"step and ' 'unroll value') unroll = unroll.obj if not",
"isinstance(step, Constant): step_is_positive = step.obj >= 0 elif step.ctype.dtype.kind ==",
"compare with. alt_value: Only used in :obj:`atomic_cas` to represent the",
"extern specifier. alignment (int or None): Enforce the alignment via",
"unroll < 1 << 31): warnings.warn( 'loop unrolling is ignored",
"args else: raise TypeError( f'range expected at most 3 argument,",
"{' if not runtime.is_hip: # see https://github.com/NVIDIA/cub/blob/main/cub/util_ptx.cuh#L419 preamble += \"\"\"",
"= '__shfl_' + (op + '_' if op else '')",
"\"\"\" super().__call__() def call_const(self, env, dtype, size, alignment=None): name =",
"functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions \"\"\" super().__call__() def call_const(self, env): return Data('__syncthreads()', _cuda_types.void)",
"stop = Data.init(stop, env) start = Data.init(start, env) step =",
"we will never reach here if (op == 'Add' and",
"unroll = 1 if not (unroll is True or 0",
"at most 3 argument, got {len(args)}') if unroll is not",
"= _compile._astype_scalar(value2, ctype, 'same_kind', env) value2 = Data.init(value2, env) code",
"is out of range') mask = _compile._astype_scalar( mask, _cuda_types.int32, 'same_kind',",
"of iterations to unroll. - If `None` (default), leave the",
"def call_const(self, env, ndim): if not isinstance(ndim, int): raise TypeError('ndim",
"``z`` attributes are used. Args: ndim (int): The dimension of",
"for shfl_xor if self._op in ('up', 'down'): val_id_t = _cuda_types.uint32",
"elts_code = ', '.join(self._code.format(n=n) for n in dims) ctype =",
"{var.code}, {val_id.code}' code += f', {width.code})' return Data(code, ctype) class",
".. _Numba's corresponding atomic functions: https://numba.readthedocs.io/en/stable/cuda-reference/kernel.html#synchronization-and-atomic-operations \"\"\" self.__doc__ = doc",
"= _cuda_types.Scalar(dtype) while env[name] is not None: name = env.get_fresh_variable_name(prefix='_smem')",
"step, unroll)): raise TypeError( 'loop unrolling requires constant start, stop,",
"'uint32', 'uint64') + (() if runtime.is_hip else ('uint16',))) atomic_and =",
"1 << 31): warnings.warn( 'loop unrolling is ignored as the",
"val_id, *, width=None): name = self._name var = Data.init(var, env)",
"isinstance(ndim, int): raise TypeError('ndim must be an integer') # Numba",
"= 'numba.cuda.gridsize' self._code = 'blockDim.{n} * gridDim.{n}' else: raise ValueError('unsupported",
"the address to the corresponding array element ``array[index]`` can be",
"the ``{self._name}`` function to operate atomically on ``array[index]``. Please refer",
"alignment via __align__(N). \"\"\" super().__call__() def call_const(self, env, dtype, size,",
"A :class:`cupy.ndarray` to index over. index: A valid index such",
"op self._dtypes = dtypes doc = f\"\"\"Calls the ``{self._name}`` function",
"detailed explanation. .. _Warp Shuffle Functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#warp-shuffle-functions \"\"\" self.__doc__ =",
"step = args[0], args[1], Constant(1) elif len(args) == 3: start,",
"TypeError(f'`{name}` does not support {ctype.dtype} input.') try: mask = mask.obj",
"the loop, where the integer ``n`` means the number of",
"self.__doc__ = doc def __call__(self, mask, var, val_id, *, width=32):",
"can be computed. value: Represent the value to use for",
"*, width=32): super().__call__() def call(self, env, mask, var, val_id, *,",
"False return Range(start, stop, step, ctype, step_is_positive, unroll=unroll) class LenFunc(BuiltinFunc):",
"'CAS': assert value2 is not None # On HIP, 'H'",
"= f\"\"\" {self._desc} Computation of the first integer is as",
"of 2') else: width = Constant(64) if runtime.is_hip else Constant(32)",
"start, stop, step = args else: raise TypeError( f'range expected",
"in hip/hcc_detail/device_functions.h preamble += \"\"\" return __lane_id(); } \"\"\" return",
"*args, **kwds): if len(args) < 2: raise TypeError( f'max() expects",
"WarpShuffleOp('up', _shfl_dtypes) shfl_down_sync = WarpShuffleOp('down', _shfl_dtypes) shfl_xor_sync = WarpShuffleOp('xor', _shfl_dtypes)",
"if width: if isinstance(width, Constant): if width.obj not in (2,",
"Constant(1) elif len(args) == 2: start, stop, step = args[0],",
"``array[index]`` to compare with. alt_value: Only used in :obj:`atomic_cas` to",
"index in the grid.' self._eq = 'jit.threadIdx.x + jit.blockIdx.x *",
"here if ctype.dtype.char == 'H': if runtime.runtimeGetVersion() < 10010: raise",
"the value to swap to. .. seealso:: `Numba's corresponding atomic",
"= WarpShuffleOp('up', _shfl_dtypes) shfl_down_sync = WarpShuffleOp('down', _shfl_dtypes) shfl_xor_sync = WarpShuffleOp('xor',",
"if mask is not None: warnings.warn(f'mask {mask} is ignored on",
"alignment=None): \"\"\"Allocates shared memory and returns it as a 1-D",
"from cupyx.jit._internal_types import Data from cupyx.jit._internal_types import Constant from cupyx.jit._internal_types",
"2: start, stop, step = args[0], args[1], Constant(1) elif len(args)",
"such that the address to the corresponding array element ``array[index]``",
"TypeError('range expected at least 1 argument, got 0') elif len(args)",
"Computation of the first integer is as follows:: {self._eq} and",
"super().__call__() def call(self, env, mask, var, val_id, *, width=None): name",
"MaxFunc(BuiltinFunc): def call(self, env, *args, **kwds): if len(args) < 2:",
"70: raise RuntimeError( 'uint16 atomic operation is not supported before",
"{type(unroll).__name__}') if unroll is False: unroll = 1 if not",
"If ``int`` type, the size of static shared memory. If",
"width: if isinstance(width, Constant): if width.obj not in (2, 4,",
"unroll: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#pragma-unroll \"\"\" super().__call__() def call(self, env, *args, unroll=None): if",
"TypeError('mask must be an integer') if runtime.is_hip: warnings.warn(f'mask {mask} is",
"from cupy.cuda import device from cupyx.jit import _cuda_types from cupyx.jit._internal_types",
"{value2.code})' else: assert value2 is None code = f'{name}(&{target.code}, {value.code})'",
"TypeError('range supports only for integer type.') if stop.ctype.dtype.kind not in",
"mask, var, val_id, *, width=None): name = self._name var =",
"in a warp. Default is 0xffffffff. .. seealso:: `Synchronization functions`_",
"super().__call__() def call_const(self, env, dtype, size, alignment=None): name = env.get_fresh_variable_name(prefix='_smem')",
"op else '') + 'sync' self._dtypes = dtypes doc =",
"value is ' 'non-positive or greater than INT_MAX') if isinstance(step,",
"env) width = Data.init(width, env) code = f'{name}({hex(mask)}, {var.code}, {val_id.code}'",
"supported') return reduce(lambda a, b: _compile._call_ufunc( cupy.maximum, (a, b), None,",
"\"\"\"Allocates shared memory and returns it as a 1-D array.",
"self._eq = 'jit.blockDim.x * jit.gridDim.x' self._link = 'numba.cuda.gridsize' self._code =",
"least 1 argument, got 0') elif len(args) == 1: start,",
"is as follows:: {self._eq} and for the other two integers",
"range_ = RangeFunc() syncthreads = SyncThreads() syncwarp = SyncWarp() shared_memory",
"single variable, # otherwise a tuple if ndim == 1:",
"'float32', 'float64') + (() if runtime.is_hip else ('uint64', 'float16'))) shfl_sync",
"type') if not arg.ctype.ndim: raise TypeError('len() of unsized array') return",
"name = self._name var = Data.init(var, env) ctype = var.ctype",
"raise TypeError( f'range expected at most 3 argument, got {len(args)}')",
"detailed explanation. Args: array: A :class:`cupy.ndarray` to index over. index:",
"elif step.ctype.dtype.kind == 'u': step_is_positive = True else: step_is_positive =",
"ret; asm (\"mov.u32 %0, %%laneid;\" : \"=r\"(ret) ); return ret;",
"= env.get_fresh_variable_name(prefix='_smem') # retry var = Data(name, _cuda_types.SharedMem(child_type, size, alignment))",
"add ``#pragma unroll(n)`` directive before the loop, where the integer",
"= AtomicOp( 'Sub', ('int32', 'uint32')) atomic_exch = AtomicOp( 'Exch', ('int32',",
"TypeError('range supports only for integer type.') if step.ctype.dtype.kind not in",
"('int32', 'uint32')) atomic_exch = AtomicOp( 'Exch', ('int32', 'uint32', 'uint64', 'float32'))",
"for shfl, and # \"laneMask\" for shfl_xor if self._op in",
"= _compile._astype_scalar( mask, _cuda_types.int32, 'same_kind', env) mask = Data.init(mask, env)",
"threads in a warp. Default is 0xffffffff. .. seealso:: `Synchronization",
"dims = ('x', 'y') elif ndim == 3: dims =",
"+ (() if runtime.is_hip else ('float16',))) atomic_sub = AtomicOp( 'Sub',",
"value2 is None code = f'{name}(&{target.code}, {value.code})' return Data(code, ctype)",
"never reach here if (op == 'Add' and ctype.dtype.char ==",
"as a 1-D array. Args: dtype (dtype): The dtype of",
"name = self._name op = self._op array = Data.init(array, env)",
"\"\"\"Returns the lane ID of the calling thread, ranging in",
"env) value2 = Data.init(value2, env) code = f'{name}(&{target.code}, {value.code}, {value2.code})'",
"are not supported') return reduce(lambda a, b: _compile._call_ufunc( cupy.minimum, (a,",
"if mask: if isinstance(mask, Constant): if not (0x0 <= mask.obj",
"be power of 2') else: width = Constant(64) if runtime.is_hip",
"TypeError('len() of unsized array') return Data(f'static_cast<long long>({arg.code}.shape()[0])', _cuda_types.Scalar('q')) class MinFunc(BuiltinFunc):",
"that of built-in :obj:`range`. stop (int): Same as that of",
"Same as that of built-in :obj:`range`. step (int): Same as",
"If ``None``, declares the shared memory with extern specifier. alignment",
"iterations to unroll. - If `None` (default), leave the control",
"ignored on HIP', RuntimeWarning) mask = None if mask: if",
"(() if runtime.is_hip else ('uint16',))) atomic_and = AtomicOp( 'And', ('int32',",
"as the unroll value is ' 'non-positive or greater than",
"(a, b), None, env), args) class MaxFunc(BuiltinFunc): def call(self, env,",
"# see https://github.com/NVIDIA/cub/blob/main/cub/util_ptx.cuh#L419 preamble += \"\"\" unsigned int ret; asm",
"= ( ('int32', 'uint32', 'int64', 'float32', 'float64') + (() if",
"env, array, index, value, value2=None): name = self._name op =",
"'CAS', ('int32', 'uint32', 'uint64') + (() if runtime.is_hip else ('uint16',)))",
"not in self._dtypes: raise TypeError(f'`{name}` does not support {ctype.dtype} input.')",
"= f'{name}(&{target.code}, {value.code}, {value2.code})' else: assert value2 is None code",
"runtime.is_hip else ('uint16',))) atomic_and = AtomicOp( 'And', ('int32', 'uint32', 'uint64'))",
"before ' 'sm_70') value2 = _compile._astype_scalar(value2, ctype, 'same_kind', env) value2",
"array type.') target = _compile._indexing(array, index, env) ctype = target.ctype",
"to. .. seealso:: `Numba's corresponding atomic functions`_ .. _Atomic Functions:",
"not all(isinstance(x, Constant) for x in (start, stop, step, unroll)):",
"thread index in the grid.' self._eq = 'jit.threadIdx.x + jit.blockIdx.x",
"type.') if step.ctype.dtype.kind not in 'iu': raise TypeError('range supports only",
"(no ``#pragma``). .. seealso:: `#pragma unroll`_ .. _#pragma unroll: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#pragma-unroll",
"code = '__syncwarp()' return Data(code, _cuda_types.void) class SharedMemory(BuiltinFunc): def __call__(self,",
"== 'gridsize': self._desc = 'Compute the grid size.' self._eq =",
"== 0: raise TypeError('range expected at least 1 argument, got",
"If `None` (default), leave the control of loop unrolling to",
"'uint32', 'uint64', 'float32')) atomic_min = AtomicOp( 'Min', ('int32', 'uint32', 'uint64'))",
"\"\"\" super().__call__() def call(self, env, *args, unroll=None): if len(args) ==",
"stop.ctype.dtype.kind not in 'iu': raise TypeError('range supports only for integer",
"raise TypeError( 'loop unrolling requires constant start, stop, step and",
"(isinstance(unroll, int) or isinstance(unroll, bool)): raise TypeError( 'unroll value expected",
"None: name = env.get_fresh_variable_name(prefix='_smem') # retry var = Data(name, _cuda_types.SharedMem(child_type,",
"``[0, jit.warpsize)``. .. note:: Unlike :obj:`numba.cuda.laneid`, this is a callable",
"return reduce(lambda a, b: _compile._call_ufunc( cupy.maximum, (a, b), None, env),",
"= 'numba.cuda.grid' self._code = 'threadIdx.{n} + blockIdx.{n} * blockDim.{n}' elif",
"f'max() expects at least 2 arguments, got {len(args)}') if kwds:",
"_get_preamble(self): preamble = '__device__ __forceinline__ unsigned int LaneId() {' if",
"+= \"\"\" unsigned int ret; asm (\"mov.u32 %0, %%laneid;\" :",
"if isinstance(width, Constant): if width.obj not in (2, 4, 8,",
"0: raise TypeError('range expected at least 1 argument, got 0')",
"f\"\"\"Calls the ``{self._name}`` function to operate atomically on ``array[index]``. Please",
"atomically on ``array[index]``. Please refer to `Atomic Functions`_ for detailed",
"Data.init(value, env) if op == 'CAS': assert value2 is not",
"'down'): val_id_t = _cuda_types.uint32 else: val_id_t = _cuda_types.int32 val_id =",
"- If `True`, add ``#pragma unroll`` directive before the loop.",
"= 'jit.threadIdx.x + jit.blockIdx.x * jit.blockDim.x' self._link = 'numba.cuda.grid' self._code",
"f\"\"\" {self._desc} Computation of the first integer is as follows::",
"_Atomic Functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions .. _Numba's corresponding atomic functions: https://numba.readthedocs.io/en/stable/cuda-reference/kernel.html#synchronization-and-atomic-operations \"\"\"",
"Constant(32) width = _compile._astype_scalar( width, _cuda_types.int32, 'same_kind', env) width =",
"_cuda_types.Scalar(int) elif env.mode == 'cuda': ctype = stop.ctype else: assert",
"%%laneid;\" : \"=r\"(ret) ); return ret; } \"\"\" else: #",
"} range_ = RangeFunc() syncthreads = SyncThreads() syncwarp = SyncWarp()",
"mask.obj <= 0xffffffff): raise ValueError('mask is out of range') mask",
"alignment (int or None): Enforce the alignment via __align__(N). \"\"\"",
"+ jit.blockIdx.x * jit.blockDim.x' self._link = 'numba.cuda.grid' self._code = 'threadIdx.{n}",
"self.__doc__ = doc def __call__(self, array, index, value, alt_value=None): super().__call__()",
"SyncWarp() shared_memory = SharedMemory() grid = GridFunc('grid') gridsize = GridFunc('gridsize')",
"thread, ranging in ``[0, jit.warpsize)``. .. note:: Unlike :obj:`numba.cuda.laneid`, this",
"not supported') return reduce(lambda a, b: _compile._call_ufunc( cupy.minimum, (a, b),",
".. _Warp Shuffle Functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#warp-shuffle-functions \"\"\" self.__doc__ = doc def",
"= LaneID() # atomic functions atomic_add = AtomicOp( 'Add', ('int32',",
"env) if not isinstance(array.ctype, (_cuda_types.CArray, _cuda_types.Ptr)): raise TypeError('The first argument",
"and ctype.dtype.char == 'e' and runtime.runtimeGetVersion() < 10000): raise RuntimeError(",
"code += f', {width.code})' return Data(code, ctype) class LaneID(BuiltinFunc): def",
"= True else: step_is_positive = None stop = Data.init(stop, env)",
"Constant) for x in (start, stop, step, unroll)): raise TypeError(",
"args) class SyncThreads(BuiltinFunc): def __call__(self): \"\"\"Calls ``__syncthreads()``. .. seealso:: `Synchronization",
"TypeError( f'max() expects at least 2 arguments, got {len(args)}') if",
"the control of loop unrolling to the compiler (no ``#pragma``).",
".. seealso:: `Numba's corresponding atomic functions`_ .. _Atomic Functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions",
"'__device__ __forceinline__ unsigned int LaneId() {' if not runtime.is_hip: #",
"not supported before ' 'sm_70') value2 = _compile._astype_scalar(value2, ctype, 'same_kind',",
"\"\"\"Calls ``__syncwarp()``. Args: mask (int): Active threads in a warp.",
"grid size.' self._eq = 'jit.blockDim.x * jit.gridDim.x' self._link = 'numba.cuda.gridsize'",
"'float32')) atomic_min = AtomicOp( 'Min', ('int32', 'uint32', 'uint64')) atomic_max =",
"env.locals[name] = var return Data(name, _cuda_types.Ptr(child_type)) class AtomicOp(BuiltinFunc): def __init__(self,",
"_shfl_dtypes = ( ('int32', 'uint32', 'int64', 'float32', 'float64') + (()",
"'numba.cuda.gridsize' self._code = 'blockDim.{n} * gridDim.{n}' else: raise ValueError('unsupported function')",
"int LaneId() {' if not runtime.is_hip: # see https://github.com/NVIDIA/cub/blob/main/cub/util_ptx.cuh#L419 preamble",
"unrolling support. Args: start (int): Same as that of built-in",
"only array type') if not arg.ctype.ndim: raise TypeError('len() of unsized",
"else: assert False return Range(start, stop, step, ctype, step_is_positive, unroll=unroll)",
"unroll`` directive before the loop. - If `False`, add ``#pragma",
"def __init__(self, op, dtypes): self._op = op self._name = 'atomic'",
"= f'{name}({hex(mask)}, {var.code}, {val_id.code}' code += f', {width.code})' return Data(code,",
"_compile._astype_scalar( mask, _cuda_types.int32, 'same_kind', env) mask = Data.init(mask, env) code",
"('int32', 'uint32', 'int64', 'float32', 'float64') + (() if runtime.is_hip else",
"returned, otherwise a tuple. .. note:: This function follows the",
"case of :obj:`atomic_cas`, this is the value for ``array[index]`` to",
"super().__call__() def call(self, env, array, index, value, value2=None): name =",
"size, alignment)) env.decls[name] = var env.locals[name] = var return Data(name,",
"first argument must be of array type.') target = _compile._indexing(array,",
"On HIP, 'H' is not supported and we will never",
"array. Args: dtype (dtype): The dtype of the returned array.",
"not in (2, 4, 8, 16, 32): raise ValueError('width needs",
"tuple if ndim == 1: return Data(self._code.format(n='x'), _cuda_types.uint32) elif ndim",
"('int32', 'uint32', 'uint64')) atomic_xor = AtomicOp( 'Xor', ('int32', 'uint32', 'uint64'))",
"range') # val_id refers to \"delta\" for shfl_{up, down}, \"srcLane\"",
"cupyx.jit._internal_types import Data from cupyx.jit._internal_types import Constant from cupyx.jit._internal_types import",
"alt_value: Only used in :obj:`atomic_cas` to represent the value to",
"stop, step, ctype, step_is_positive, unroll=unroll) class LenFunc(BuiltinFunc): def call(self, env,",
"supported before ' 'sm_70') value2 = _compile._astype_scalar(value2, ctype, 'same_kind', env)",
"0xffffffff): raise ValueError('mask is out of range') mask = _compile._astype_scalar(",
"ndim=1,2,3 are supported') elts_code = ', '.join(self._code.format(n=n) for n in",
"not isinstance(ndim, int): raise TypeError('ndim must be an integer') #",
"('uint16',))) atomic_and = AtomicOp( 'And', ('int32', 'uint32', 'uint64')) atomic_or =",
"3 argument, got {len(args)}') if unroll is not None: if",
"operate atomically on ``array[index]``. Please refer to `Atomic Functions`_ for",
"None, env), args) class MaxFunc(BuiltinFunc): def call(self, env, *args, **kwds):",
"== 'grid': self._desc = 'Compute the thread index in the",
"'Min', ('int32', 'uint32', 'uint64')) atomic_max = AtomicOp( 'Max', ('int32', 'uint32',",
"of a property. \"\"\" super().__call__() def _get_preamble(self): preamble = '__device__",
"\"\"\" super().__call__() def _get_preamble(self): preamble = '__device__ __forceinline__ unsigned int",
"as that of built-in :obj:`range`. stop (int): Same as that",
"__init__(self, op, dtypes): self._op = op self._name = 'atomic' +",
"step_is_positive = True else: step_is_positive = None stop = Data.init(stop,",
"SyncThreads(BuiltinFunc): def __call__(self): \"\"\"Calls ``__syncthreads()``. .. seealso:: `Synchronization functions`_ ..",
"= _cuda_types.uint32 else: val_id_t = _cuda_types.int32 val_id = _compile._astype_scalar(val_id, val_id_t,",
"# \"laneMask\" for shfl_xor if self._op in ('up', 'down'): val_id_t",
"for x in (start, stop, step, unroll)): raise TypeError( 'loop",
"{len(args)}') if kwds: raise TypeError('keyword arguments are not supported') return",
"\"\"\" return __lane_id(); } \"\"\" return preamble def call_const(self, env):",
"dtypes): self._op = op self._name = '__shfl_' + (op +",
"= 'Compute the grid size.' self._eq = 'jit.blockDim.x * jit.gridDim.x'",
"the lane ID of the calling thread, ranging in ``[0,",
"if not runtime.is_hip: # see https://github.com/NVIDIA/cub/blob/main/cub/util_ptx.cuh#L419 preamble += \"\"\" unsigned",
"(0x0 <= mask.obj <= 0xffffffff): raise ValueError('mask is out of",
"Args: array: A :class:`cupy.ndarray` to index over. index: A valid",
"super().__call__() def call_const(self, env): return Data('__syncthreads()', _cuda_types.void) class SyncWarp(BuiltinFunc): def",
"and returns it as a 1-D array. Args: dtype (dtype):",
"*args, unroll=None): \"\"\"Range with loop unrolling support. Args: start (int):",
"function') doc = f\"\"\" {self._desc} Computation of the first integer",
"8, 16, 32): raise ValueError('width needs to be power of",
"to the corresponding array element ``array[index]`` can be computed. value:",
"None: if not all(isinstance(x, Constant) for x in (start, stop,",
"Exception: raise TypeError('mask must be an integer') if runtime.is_hip: warnings.warn(f'mask",
"If an `int`, add ``#pragma unroll(n)`` directive before the loop,",
"dimension of the grid. Only 1, 2, or 3 is",
"_compile from functools import reduce class RangeFunc(BuiltinFunc): def __call__(self, *args,",
"self._name = 'atomic' + op self._dtypes = dtypes doc =",
"cupy.cuda import device from cupyx.jit import _cuda_types from cupyx.jit._internal_types import",
"def call_const(self, env, dtype, size, alignment=None): name = env.get_fresh_variable_name(prefix='_smem') child_type",
"control of loop unrolling to the compiler (no ``#pragma``). ..",
"value, value2=None): name = self._name op = self._op array =",
"be of array type.') target = _compile._indexing(array, index, env) ctype",
"`Warp Shuffle Functions`_ for detailed explanation. .. _Warp Shuffle Functions:",
"a 1-D array. Args: dtype (dtype): The dtype of the",
"while env[name] is not None: name = env.get_fresh_variable_name(prefix='_smem') # retry",
"step.ctype.dtype.kind not in 'iu': raise TypeError('range supports only for integer",
"functions`_ .. _Synchronization functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions \"\"\" super().__call__() def call(self, env,",
"for integer type.') if step.ctype.dtype.kind not in 'iu': raise TypeError('range",
"env.mode == 'cuda': ctype = stop.ctype else: assert False return",
"- If an `int`, add ``#pragma unroll(n)`` directive before the",
"= None if mask: if isinstance(mask, Constant): if not (0x0",
"not supported and we will never reach here if ctype.dtype.char",
"type.') if stop.ctype.dtype.kind not in 'iu': raise TypeError('range supports only",
"AtomicOp( 'Min', ('int32', 'uint32', 'uint64')) atomic_max = AtomicOp( 'Max', ('int32',",
"unsigned int ret; asm (\"mov.u32 %0, %%laneid;\" : \"=r\"(ret) );",
"_cuda_types.Tuple([_cuda_types.uint32]*ndim) return Data(f'thrust::make_tuple({elts_code})', ctype) class WarpShuffleOp(BuiltinFunc): def __init__(self, op, dtypes):",
"unroll=None): if len(args) == 0: raise TypeError('range expected at least",
"add ``#pragma unroll(1)`` directive before the loop to disable unrolling.",
"env, *args, **kwds): if len(args) != 1: raise TypeError(f'len() expects",
"Data.init(array, env) if not isinstance(array.ctype, (_cuda_types.CArray, _cuda_types.Ptr)): raise TypeError('The first",
"Active threads in a warp. Default is 0xffffffff. .. seealso::",
"try: mask = mask.obj except Exception: raise TypeError('mask must be",
"is out of range') # val_id refers to \"delta\" for",
"we will never reach here if ctype.dtype.char == 'H': if",
"not in 'iu': raise TypeError('range supports only for integer type.')",
"1: return Data(self._code.format(n='x'), _cuda_types.uint32) elif ndim == 2: dims =",
"that of built-in :obj:`range`. step (int): Same as that of",
"function. Please refer to `Warp Shuffle Functions`_ for detailed explanation.",
"to operate atomically on ``array[index]``. Please refer to `Atomic Functions`_",
"= args[0], args[1], Constant(1) elif len(args) == 3: start, stop,",
"value = Data.init(value, env) if op == 'CAS': assert value2",
"len(args) != 1: raise TypeError(f'len() expects only 1 argument, got",
"__call__(self, dtype, size, alignment=None): \"\"\"Allocates shared memory and returns it",
"this is the value for ``array[index]`` to compare with. alt_value:",
"'uint64')) atomic_or = AtomicOp( 'Or', ('int32', 'uint32', 'uint64')) atomic_xor =",
"= '__syncwarp()' return Data(code, _cuda_types.void) class SharedMemory(BuiltinFunc): def __call__(self, dtype,",
"- If `None` (default), leave the control of loop unrolling",
"int ret; asm (\"mov.u32 %0, %%laneid;\" : \"=r\"(ret) ); return",
"= dtypes doc = f\"\"\"Calls the ``{self._name}`` function to operate",
"value2=None): name = self._name op = self._op array = Data.init(array,",
"`Synchronization functions`_ .. _Synchronization functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions \"\"\" super().__call__() def call_const(self,",
"if step.ctype.dtype.kind not in 'iu': raise TypeError('range supports only for",
"= Data.init(start, env) step = Data.init(step, env) if start.ctype.dtype.kind not",
"'uint16 atomic operation is not supported before ' 'sm_70') value2",
"= doc def __call__(self, array, index, value, alt_value=None): super().__call__() def",
"TypeError( 'loop unrolling requires constant start, stop, step and '",
"1: start, stop, step = Constant(0), args[0], Constant(1) elif len(args)",
"from cupyx.jit._internal_types import Range from cupyx.jit import _compile from functools",
"stop, step = Constant(0), args[0], Constant(1) elif len(args) == 2:",
"are not supported') arg = args[0] if not isinstance(arg.ctype, _cuda_types.CArray):",
"before the loop to disable unrolling. - If an `int`,",
"('uint32',)) atomic_cas = AtomicOp( 'CAS', ('int32', 'uint32', 'uint64') + (()",
"_cuda_types.void) class SharedMemory(BuiltinFunc): def __call__(self, dtype, size, alignment=None): \"\"\"Allocates shared",
"int): raise TypeError('ndim must be an integer') # Numba convention:",
"in self._dtypes: raise TypeError(f'`{name}` does not support {ctype.dtype} input.') #",
"('uint64', 'float16'))) shfl_sync = WarpShuffleOp('', _shfl_dtypes) shfl_up_sync = WarpShuffleOp('up', _shfl_dtypes)",
"return Data('__syncthreads()', _cuda_types.void) class SyncWarp(BuiltinFunc): def __call__(self, *, mask=0xffffffff): \"\"\"Calls",
"== 2: start, stop, step = args[0], args[1], Constant(1) elif",
"type.') target = _compile._indexing(array, index, env) ctype = target.ctype if",
"an integer') # Numba convention: for 1D we return a",
"if runtime.is_hip: if mask is not None: warnings.warn(f'mask {mask} is",
"mask = Data.init(mask, env) code = f'__syncwarp({mask.code})' else: code =",
"= self._name op = self._op array = Data.init(array, env) if",
"HIP', RuntimeWarning) elif not (0x0 <= mask <= 0xffffffff): raise",
"to unroll. - If `None` (default), leave the control of",
"< 70: raise RuntimeError( 'uint16 atomic operation is not supported",
"MinFunc(), max: MaxFunc(), } range_ = RangeFunc() syncthreads = SyncThreads()",
"env.get_fresh_variable_name(prefix='_smem') child_type = _cuda_types.Scalar(dtype) while env[name] is not None: name",
"call(self, env, *args, unroll=None): if len(args) == 0: raise TypeError('range",
"= 'jit.blockDim.x * jit.gridDim.x' self._link = 'numba.cuda.gridsize' self._code = 'blockDim.{n}",
"# retry var = Data(name, _cuda_types.SharedMem(child_type, size, alignment)) env.decls[name] =",
"self._desc = 'Compute the thread index in the grid.' self._eq",
"class RangeFunc(BuiltinFunc): def __call__(self, *args, unroll=None): \"\"\"Range with loop unrolling",
"alignment=None): name = env.get_fresh_variable_name(prefix='_smem') child_type = _cuda_types.Scalar(dtype) while env[name] is",
"expected at least 1 argument, got 0') elif len(args) ==",
"Shuffle Functions`_ for detailed explanation. .. _Warp Shuffle Functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#warp-shuffle-functions",
"is True or 0 < unroll < 1 << 31):",
"\"\"\"Calls ``__syncthreads()``. .. seealso:: `Synchronization functions`_ .. _Synchronization functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions",
"Constant from cupyx.jit._internal_types import Range from cupyx.jit import _compile from",
"args) class MaxFunc(BuiltinFunc): def call(self, env, *args, **kwds): if len(args)",
"unroll is False: unroll = 1 if not (unroll is",
"('float16',))) atomic_sub = AtomicOp( 'Sub', ('int32', 'uint32')) atomic_exch = AtomicOp(",
"type.') if env.mode == 'numpy': ctype = _cuda_types.Scalar(int) elif env.mode",
"== 'CAS': assert value2 is not None # On HIP,",
"RuntimeWarning) mask = None if mask: if isinstance(mask, Constant): if",
"True or 0 < unroll < 1 << 31): warnings.warn(",
"return ret; } \"\"\" else: # defined in hip/hcc_detail/device_functions.h preamble",
"or isinstance(unroll, bool)): raise TypeError( 'unroll value expected to be",
"= AtomicOp( 'CAS', ('int32', 'uint32', 'uint64') + (() if runtime.is_hip",
"None): Enforce the alignment via __align__(N). \"\"\" super().__call__() def call_const(self,",
"{ctype.dtype} input.') # On HIP, 'e' is not supported and",
"env), args) class MaxFunc(BuiltinFunc): def call(self, env, *args, **kwds): if",
"jit.blockDim.x' self._link = 'numba.cuda.grid' self._code = 'threadIdx.{n} + blockIdx.{n} *",
"__lane_id(); } \"\"\" return preamble def call_const(self, env): env.generated.add_code(self._get_preamble()) return",
"ndim): super().__call__() def call_const(self, env, ndim): if not isinstance(ndim, int):",
"var.ctype if ctype.dtype.name not in self._dtypes: raise TypeError(f'`{name}` does not",
"+ (() if runtime.is_hip else ('uint64', 'float16'))) shfl_sync = WarpShuffleOp('',",
"supported') elts_code = ', '.join(self._code.format(n=n) for n in dims) ctype",
"Numba convention: for 1D we return a single variable, #",
"note:: Unlike :obj:`numba.cuda.laneid`, this is a callable function instead of",
"= f\"\"\"Calls the ``{self._name}`` function. Please refer to `Warp Shuffle",
"(2, 4, 8, 16, 32): raise ValueError('width needs to be",
"class AtomicOp(BuiltinFunc): def __init__(self, op, dtypes): self._op = op self._name",
"value2 is not None # On HIP, 'H' is not",
"return Data(f'static_cast<long long>({arg.code}.shape()[0])', _cuda_types.Scalar('q')) class MinFunc(BuiltinFunc): def call(self, env, *args,",
"atomic_dec = AtomicOp( 'Dec', ('uint32',)) atomic_cas = AtomicOp( 'CAS', ('int32',",
"if runtime.is_hip else ('uint16',))) atomic_and = AtomicOp( 'And', ('int32', 'uint32',",
"ctype.dtype.char == 'H': if runtime.runtimeGetVersion() < 10010: raise RuntimeError( 'uint16",
"(int): Same as that of built-in :obj:`range`. unroll (int or",
"'uint32', 'uint64')) atomic_or = AtomicOp( 'Or', ('int32', 'uint32', 'uint64')) atomic_xor",
"__call__(self, mask, var, val_id, *, width=32): super().__call__() def call(self, env,",
"op self._name = 'atomic' + op self._dtypes = dtypes doc",
"only 1 argument, got {len(args)}') if kwds: raise TypeError('keyword arguments",
"class LaneID(BuiltinFunc): def __call__(self): \"\"\"Returns the lane ID of the",
"dtype (dtype): The dtype of the returned array. size (int",
"var env.locals[name] = var return Data(name, _cuda_types.Ptr(child_type)) class AtomicOp(BuiltinFunc): def",
"* jit.gridDim.x' self._link = 'numba.cuda.gridsize' self._code = 'blockDim.{n} * gridDim.{n}'",
"self._dtypes = dtypes doc = f\"\"\"Calls the ``{self._name}`` function. Please",
"warp. Default is 0xffffffff. .. seealso:: `Synchronization functions`_ .. _Synchronization",
"'H' is not supported and we will never reach here",
"call_const(self, env): return Data('__syncthreads()', _cuda_types.void) class SyncWarp(BuiltinFunc): def __call__(self, *,",
"is False: unroll = 1 if not (unroll is True",
"= mask.obj except Exception: raise TypeError('mask must be an integer')",
"raise TypeError('range supports only for integer type.') if env.mode ==",
"'unroll value') unroll = unroll.obj if not (isinstance(unroll, int) or",
"atomic_exch = AtomicOp( 'Exch', ('int32', 'uint32', 'uint64', 'float32')) atomic_min =",
"AtomicOp( 'Inc', ('uint32',)) atomic_dec = AtomicOp( 'Dec', ('uint32',)) atomic_cas =",
"if ctype.dtype.char == 'H': if runtime.runtimeGetVersion() < 10010: raise RuntimeError(",
"value to swap to. .. seealso:: `Numba's corresponding atomic functions`_",
"'grid': self._desc = 'Compute the thread index in the grid.'",
"atomic_sub = AtomicOp( 'Sub', ('int32', 'uint32')) atomic_exch = AtomicOp( 'Exch',",
"mask (int): Active threads in a warp. Default is 0xffffffff.",
"{self._eq} and for the other two integers the ``y`` and",
"(int): Same as that of built-in :obj:`range`. step (int): Same",
"= Data.init(var, env) ctype = var.ctype if ctype.dtype.name not in",
"loop, where the integer ``n`` means the number of iterations",
"directive before the loop, where the integer ``n`` means the",
"`Numba's corresponding atomic functions`_ .. _Atomic Functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions .. _Numba's",
"is not None # On HIP, 'H' is not supported",
"TypeError('The first argument must be of array type.') target =",
"to represent the value to swap to. .. seealso:: `Numba's",
"*args, unroll=None): if len(args) == 0: raise TypeError('range expected at",
"raise TypeError('ndim must be an integer') # Numba convention: for",
"for n in dims) ctype = _cuda_types.Tuple([_cuda_types.uint32]*ndim) return Data(f'thrust::make_tuple({elts_code})', ctype)",
"expected to be of type int, ' f'got {type(unroll).__name__}') if",
"TypeError('keyword arguments are not supported') arg = args[0] if not",
"supported before ' 'CUDA 10.1') if int(device.get_compute_capability()) < 70: raise",
"expected at most 3 argument, got {len(args)}') if unroll is",
"if isinstance(mask, Constant): if not (0x0 <= mask.obj <= 0xffffffff):",
"dtype of the returned array. size (int or None): If",
"== 3: dims = ('x', 'y', 'z') else: raise ValueError('Only",
"of the returned array. size (int or None): If ``int``",
"a single variable, # otherwise a tuple if ndim ==",
"*, mask=None): if runtime.is_hip: if mask is not None: warnings.warn(f'mask",
"int(device.get_compute_capability()) < 70: raise RuntimeError( 'uint16 atomic operation is not",
"Data(f'static_cast<long long>({arg.code}.shape()[0])', _cuda_types.Scalar('q')) class MinFunc(BuiltinFunc): def call(self, env, *args, **kwds):",
"import runtime from cupy.cuda import device from cupyx.jit import _cuda_types",
"# Numba convention: for 1D we return a single variable,",
"here if (op == 'Add' and ctype.dtype.char == 'e' and",
"TypeError( f'range expected at most 3 argument, got {len(args)}') if",
"\"\"\" self.__doc__ = doc def __call__(self, array, index, value, alt_value=None):",
"n in dims) ctype = _cuda_types.Tuple([_cuda_types.uint32]*ndim) return Data(f'thrust::make_tuple({elts_code})', ctype) class",
"mask = _compile._astype_scalar( mask, _cuda_types.int32, 'same_kind', env) mask = Data.init(mask,",
"width=None): name = self._name var = Data.init(var, env) ctype =",
"'uint32')) atomic_exch = AtomicOp( 'Exch', ('int32', 'uint32', 'uint64', 'float32')) atomic_min",
"def __call__(self, ndim): super().__call__() def call_const(self, env, ndim): if not",
"size, alignment=None): name = env.get_fresh_variable_name(prefix='_smem') child_type = _cuda_types.Scalar(dtype) while env[name]",
"import Data from cupyx.jit._internal_types import Constant from cupyx.jit._internal_types import Range",
"Range from cupyx.jit import _compile from functools import reduce class",
"built-in :obj:`range`. step (int): Same as that of built-in :obj:`range`.",
"len(args) == 3: start, stop, step = args else: raise",
"TypeError('ndim must be an integer') # Numba convention: for 1D",
"raise TypeError('keyword arguments are not supported') return reduce(lambda a, b:",
"Same as that of built-in :obj:`range`. stop (int): Same as",
"< 10010: raise RuntimeError( 'uint16 atomic operation is not supported",
"('int32', 'uint32', 'uint64', 'float32', 'float64') + (() if runtime.is_hip else",
"got 0') elif len(args) == 1: start, stop, step =",
"of the grid. Only 1, 2, or 3 is allowed.",
"TypeError(f'`{name}` does not support {ctype.dtype} input.') # On HIP, 'e'",
"preamble += \"\"\" unsigned int ret; asm (\"mov.u32 %0, %%laneid;\"",
"x in (start, stop, step, unroll)): raise TypeError( 'loop unrolling",
"= env.get_fresh_variable_name(prefix='_smem') child_type = _cuda_types.Scalar(dtype) while env[name] is not None:",
"convention of Numba's :func:`{self._link}`. \"\"\" self.__doc__ = doc def __call__(self,",
"before ' 'CUDA 10.1') if int(device.get_compute_capability()) < 70: raise RuntimeError(",
"memory and returns it as a 1-D array. Args: dtype",
"3: dims = ('x', 'y', 'z') else: raise ValueError('Only ndim=1,2,3",
"env, *args, **kwds): if len(args) < 2: raise TypeError( f'max()",
"Data(code, ctype) class LaneID(BuiltinFunc): def __call__(self): \"\"\"Returns the lane ID",
"env): env.generated.add_code(self._get_preamble()) return Data('LaneId()', _cuda_types.uint32) builtin_functions_dict = { range: RangeFunc(),",
"* gridDim.{n}' else: raise ValueError('unsupported function') doc = f\"\"\" {self._desc}",
"'atomic' + op self._dtypes = dtypes doc = f\"\"\"Calls the",
"otherwise a tuple. .. note:: This function follows the convention",
"does not support {ctype.dtype} input.') # On HIP, 'e' is",
"stop, step = args[0], args[1], Constant(1) elif len(args) == 3:",
"mask = None if mask: if isinstance(mask, Constant): if not",
"= doc def __call__(self, ndim): super().__call__() def call_const(self, env, ndim):",
"element ``array[index]`` can be computed. value: Represent the value to",
"('x', 'y') elif ndim == 3: dims = ('x', 'y',",
"'Or', ('int32', 'uint32', 'uint64')) atomic_xor = AtomicOp( 'Xor', ('int32', 'uint32',",
"code = f'__syncwarp({mask.code})' else: code = '__syncwarp()' return Data(code, _cuda_types.void)",
"ndim): if not isinstance(ndim, int): raise TypeError('ndim must be an",
"index such that the address to the corresponding array element",
"argument must be of array type.') target = _compile._indexing(array, index,",
"'uint64')) atomic_max = AtomicOp( 'Max', ('int32', 'uint32', 'uint64')) atomic_inc =",
"= Data.init(width, env) code = f'{name}({hex(mask)}, {var.code}, {val_id.code}' code +=",
"for the other two integers the ``y`` and ``z`` attributes",
"is ignored on HIP', RuntimeWarning) elif not (0x0 <= mask",
"if not all(isinstance(x, Constant) for x in (start, stop, step,",
"cupy from cupy_backends.cuda.api import runtime from cupy.cuda import device from",
"= WarpShuffleOp('', _shfl_dtypes) shfl_up_sync = WarpShuffleOp('up', _shfl_dtypes) shfl_down_sync = WarpShuffleOp('down',",
"'iu': raise TypeError('range supports only for integer type.') if env.mode",
"size.' self._eq = 'jit.blockDim.x * jit.gridDim.x' self._link = 'numba.cuda.gridsize' self._code",
"); return ret; } \"\"\" else: # defined in hip/hcc_detail/device_functions.h",
"= _compile._astype_scalar( width, _cuda_types.int32, 'same_kind', env) width = Data.init(width, env)",
"super().__call__() def _get_preamble(self): preamble = '__device__ __forceinline__ unsigned int LaneId()",
"args[0], args[1], Constant(1) elif len(args) == 3: start, stop, step",
"The dtype of the returned array. size (int or None):",
"dims = ('x', 'y', 'z') else: raise ValueError('Only ndim=1,2,3 are",
"args[0], Constant(1) elif len(args) == 2: start, stop, step =",
"super().__call__() def call_const(self, env, ndim): if not isinstance(ndim, int): raise",
"to `Atomic Functions`_ for detailed explanation. Args: array: A :class:`cupy.ndarray`",
"'iu': raise TypeError('range supports only for integer type.') if step.ctype.dtype.kind",
"step.obj >= 0 elif step.ctype.dtype.kind == 'u': step_is_positive = True",
"- If `False`, add ``#pragma unroll(1)`` directive before the loop",
"argument, got {len(args)}') if unroll is not None: if not",
"of the calling thread, ranging in ``[0, jit.warpsize)``. .. note::",
"atomic operation is not supported before CUDA 10.0.') value =",
"``#pragma unroll`` directive before the loop. - If `False`, add",
"__call__(self, *args, unroll=None): \"\"\"Range with loop unrolling support. Args: start",
"*args, **kwds): if len(args) < 2: raise TypeError( f'min() expects",
"{self._desc} Computation of the first integer is as follows:: {self._eq}",
"'uint64', 'float32', 'float64') + (() if runtime.is_hip else ('float16',))) atomic_sub",
"ID of the calling thread, ranging in ``[0, jit.warpsize)``. ..",
"supports only for integer type.') if step.ctype.dtype.kind not in 'iu':",
"var = Data.init(var, env) ctype = var.ctype if ctype.dtype.name not",
"3 is allowed. Returns: int or tuple: If ``ndim`` is",
"Only used in :obj:`atomic_cas` to represent the value to swap",
"_cuda_types.void) class SyncWarp(BuiltinFunc): def __call__(self, *, mask=0xffffffff): \"\"\"Calls ``__syncwarp()``. Args:",
"doc = f\"\"\"Calls the ``{self._name}`` function to operate atomically on",
"are supported') elts_code = ', '.join(self._code.format(n=n) for n in dims)",
"needs to be power of 2') else: width = Constant(64)",
"= op self._name = 'atomic' + op self._dtypes = dtypes",
"of array type.') target = _compile._indexing(array, index, env) ctype =",
"value to use for the specified operation. For the case",
"shfl_{up, down}, \"srcLane\" for shfl, and # \"laneMask\" for shfl_xor",
"supported') return reduce(lambda a, b: _compile._call_ufunc( cupy.minimum, (a, b), None,",
"arguments are not supported') return reduce(lambda a, b: _compile._call_ufunc( cupy.maximum,",
"defined in hip/hcc_detail/device_functions.h preamble += \"\"\" return __lane_id(); } \"\"\"",
"shfl_sync = WarpShuffleOp('', _shfl_dtypes) shfl_up_sync = WarpShuffleOp('up', _shfl_dtypes) shfl_down_sync =",
"Data.init(stop, env) start = Data.init(start, env) step = Data.init(step, env)",
"kwds: raise TypeError('keyword arguments are not supported') arg = args[0]",
"stop.ctype else: assert False return Range(start, stop, step, ctype, step_is_positive,",
"else: raise ValueError('unsupported function') doc = f\"\"\" {self._desc} Computation of",
"is returned, otherwise a tuple. .. note:: This function follows",
"return Data(f'thrust::make_tuple({elts_code})', ctype) class WarpShuffleOp(BuiltinFunc): def __init__(self, op, dtypes): self._op",
".. seealso:: `Synchronization functions`_ .. _Synchronization functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions \"\"\" super().__call__()",
"`False`, add ``#pragma unroll(1)`` directive before the loop to disable",
"len(args) == 2: start, stop, step = args[0], args[1], Constant(1)",
"if unroll is not None: if not all(isinstance(x, Constant) for",
"reach here if (op == 'Add' and ctype.dtype.char == 'e'",
"https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#warp-shuffle-functions \"\"\" self.__doc__ = doc def __call__(self, mask, var, val_id,",
"'uint32', 'uint64', 'float32', 'float64') + (() if runtime.is_hip else ('float16',)))",
"not None: if not all(isinstance(x, Constant) for x in (start,",
"self._op array = Data.init(array, env) if not isinstance(array.ctype, (_cuda_types.CArray, _cuda_types.Ptr)):",
"constant start, stop, step and ' 'unroll value') unroll =",
"env.mode == 'numpy': ctype = _cuda_types.Scalar(int) elif env.mode == 'cuda':",
"Numba's :func:`{self._link}`. \"\"\" self.__doc__ = doc def __call__(self, ndim): super().__call__()",
"is 0xffffffff. .. seealso:: `Synchronization functions`_ .. _Synchronization functions: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions",
"= 'threadIdx.{n} + blockIdx.{n} * blockDim.{n}' elif mode == 'gridsize':",
"env) val_id = Data.init(val_id, env) if width: if isinstance(width, Constant):",
"__call__(self): \"\"\"Returns the lane ID of the calling thread, ranging",
"width=32): super().__call__() def call(self, env, mask, var, val_id, *, width=None):",
"doc = f\"\"\"Calls the ``{self._name}`` function. Please refer to `Warp",
"if int(device.get_compute_capability()) < 70: raise RuntimeError( 'uint16 atomic operation is",
"1: raise TypeError(f'len() expects only 1 argument, got {len(args)}') if",
"``#pragma unroll(1)`` directive before the loop to disable unrolling. -",
"tuple: If ``ndim`` is 1, an integer is returned, otherwise",
"``__syncwarp()``. Args: mask (int): Active threads in a warp. Default",
"the ``y`` and ``z`` attributes are used. Args: ndim (int):",
"If `True`, add ``#pragma unroll`` directive before the loop. -",
"mode == 'gridsize': self._desc = 'Compute the grid size.' self._eq",
"\"\"\" self.__doc__ = doc def __call__(self, ndim): super().__call__() def call_const(self,",
"preamble = '__device__ __forceinline__ unsigned int LaneId() {' if not",
"= var env.locals[name] = var return Data(name, _cuda_types.Ptr(child_type)) class AtomicOp(BuiltinFunc):",
"integer') # Numba convention: for 1D we return a single",
"with. alt_value: Only used in :obj:`atomic_cas` to represent the value"
] |
[
"去除两端的空格 print(str1.strip()) print(len(str1.strip())) # 去除左侧的空格 print(str1.lstrip()) print(len(str1.lstrip())) # 去除右侧的空格 print(str1.rstrip())",
"\" hello \" print(str1) print(len(str1)) # 去除两端的空格 print(str1.strip()) print(len(str1.strip())) #",
"= \" hello \" print(str1) print(len(str1)) # 去除两端的空格 print(str1.strip()) print(len(str1.strip()))",
"hello \" print(str1) print(len(str1)) # 去除两端的空格 print(str1.strip()) print(len(str1.strip())) # 去除左侧的空格",
"print(str1) print(len(str1)) # 去除两端的空格 print(str1.strip()) print(len(str1.strip())) # 去除左侧的空格 print(str1.lstrip()) print(len(str1.lstrip()))",
"# 字符串删除空白 str1 = \" hello \" print(str1) print(len(str1)) #",
"str1 = \" hello \" print(str1) print(len(str1)) # 去除两端的空格 print(str1.strip())",
"print(str1.strip()) print(len(str1.strip())) # 去除左侧的空格 print(str1.lstrip()) print(len(str1.lstrip())) # 去除右侧的空格 print(str1.rstrip()) print(len(str1.rstrip()))",
"print(len(str1)) # 去除两端的空格 print(str1.strip()) print(len(str1.strip())) # 去除左侧的空格 print(str1.lstrip()) print(len(str1.lstrip())) #",
"\" print(str1) print(len(str1)) # 去除两端的空格 print(str1.strip()) print(len(str1.strip())) # 去除左侧的空格 print(str1.lstrip())",
"# 去除两端的空格 print(str1.strip()) print(len(str1.strip())) # 去除左侧的空格 print(str1.lstrip()) print(len(str1.lstrip())) # 去除右侧的空格",
"字符串删除空白 str1 = \" hello \" print(str1) print(len(str1)) # 去除两端的空格"
] |
[
"from .util import deprecated from .util import apply_along_axis from .util",
".util import moving_average from .util import moving_avg_conv from .util import",
"-*- coding: utf-8 -*- from .util import rms from .util",
"moving_avg_conv from .util import moving_avg_fft from .util import normalize from",
"import normalize from .util import next_pow2 from .util import top_and_tail",
"next_pow2 from .util import top_and_tail from .util import extrapolate from",
"from .util import moving_average from .util import moving_avg_conv from .util",
".util import normalize from .util import next_pow2 from .util import",
"import next_pow2 from .util import top_and_tail from .util import extrapolate",
"-*- from .util import rms from .util import moving_average from",
".util import rms from .util import moving_average from .util import",
"extrapolate from .util import nearest from .util import deprecated from",
"moving_average from .util import moving_avg_conv from .util import moving_avg_fft from",
"<filename>bruges/util/__init__.py<gh_stars>0 # -*- coding: utf-8 -*- from .util import rms",
"coding: utf-8 -*- from .util import rms from .util import",
".util import moving_avg_fft from .util import normalize from .util import",
".util import extrapolate from .util import nearest from .util import",
"import deprecated from .util import apply_along_axis from .util import sigmoid",
".util import moving_avg_conv from .util import moving_avg_fft from .util import",
".util import nearest from .util import deprecated from .util import",
"moving_avg_fft from .util import normalize from .util import next_pow2 from",
"top_and_tail from .util import extrapolate from .util import nearest from",
"from .util import next_pow2 from .util import top_and_tail from .util",
"from .util import moving_avg_fft from .util import normalize from .util",
"import rms from .util import moving_average from .util import moving_avg_conv",
"import top_and_tail from .util import extrapolate from .util import nearest",
"# -*- coding: utf-8 -*- from .util import rms from",
"from .util import extrapolate from .util import nearest from .util",
"import nearest from .util import deprecated from .util import apply_along_axis",
".util import deprecated from .util import apply_along_axis from .util import",
"nearest from .util import deprecated from .util import apply_along_axis from",
"from .util import nearest from .util import deprecated from .util",
"rms from .util import moving_average from .util import moving_avg_conv from",
".util import top_and_tail from .util import extrapolate from .util import",
"import moving_avg_fft from .util import normalize from .util import next_pow2",
"utf-8 -*- from .util import rms from .util import moving_average",
"from .util import rms from .util import moving_average from .util",
"from .util import top_and_tail from .util import extrapolate from .util",
"from .util import normalize from .util import next_pow2 from .util",
"from .util import moving_avg_conv from .util import moving_avg_fft from .util",
"normalize from .util import next_pow2 from .util import top_and_tail from",
"import moving_average from .util import moving_avg_conv from .util import moving_avg_fft",
"import extrapolate from .util import nearest from .util import deprecated",
".util import next_pow2 from .util import top_and_tail from .util import",
"import moving_avg_conv from .util import moving_avg_fft from .util import normalize"
] |
[
"otherNP = building.find('**/door') if otherNP.isEmpty(): otherNP = building.find('**/door_origin') else: self.notify.error('No",
"self.acceptOnce('clearOutToonInterior', self.doorTrigger) self.zoneDoneLoading = 0 def getBuilding(self, allowEmpty = False):",
"'door_trigger_' + str(self.houseId) def hideDoorParts(self): try: self.findDoorNode('doorFrameHoleRight').hide() self.findDoorNode('doorFrameHoleLeft').hide() except: pass",
"'doorIn', 'hoodId': ToontownGlobals.MyEstate, 'zoneId': zoneId, 'shardId': None, 'avId': -1, 'allowRedirect':",
"disable(self): DistributedDoor.DistributedDoor.disable(self) self.ignoreAll() def setZoneIdAndBlock(self, zoneId, block): self.houseId = block",
"door = render.find('**/leftDoor;+s') if door.isEmpty(): self.acceptOnce('houseInteriorLoaded-%d' % self.zoneId, self.__gotRelatedHouse) else:",
"building.find('**/door_trigger*') doorTrigger.setName(self.getTriggerName()) self.accept(self.getEnterTriggerEvent(), self.doorTrigger) self.acceptOnce('clearOutToonInterior', self.doorTrigger) self.zoneDoneLoading = 0 def",
"== DoorTypes.EXT_STANDARD: if self.houseId: self.building = self.cr.playGame.hood.loader.houseId2house.get(self.houseId, None) if allowEmpty:",
"DistributedObject from toontown.toonbase import ToontownGlobals from direct.directnotify import DirectNotifyGlobal from",
"DoorTypes.EXT_STANDARD: whereTo = 'house' else: whereTo = 'estate' request =",
"other=otherNP, blendType='easeInOut'), Func(doorFrameHoleRight.hide), Func(self.hideIfHasFlat, rightDoor), SoundInterval(self.closeSfx, node=rightDoor), name=trackName) self.doorTrack.start(ts) if",
"request = {'loader': 'safeZoneLoader', 'where': whereTo, 'how': 'doorIn', 'hoodId': ToontownGlobals.MyEstate,",
"ToontownGlobals.MyEstate, 'zoneId': zoneId, 'shardId': None, 'avId': -1, 'allowRedirect': 0, 'doorDoId':",
"__init__(self, cr): DistributedDoor.DistributedDoor.__init__(self, cr) def disable(self): DistributedDoor.DistributedDoor.disable(self) self.ignoreAll() def setZoneIdAndBlock(self,",
"self.building def isInterior(self): if self.doorType == DoorTypes.INT_STANDARD: return 1 return",
"== DoorTypes.EXT_STANDARD: whereTo = 'house' else: whereTo = 'estate' request",
"hideDoorParts(self): try: self.findDoorNode('doorFrameHoleRight').hide() self.findDoorNode('doorFrameHoleLeft').hide() except: pass def announceGenerate(self): DistributedObject.DistributedObject.announceGenerate(self) if",
"= self.cr.playGame.hood.loader.houseId2house.get(self.houseId, None) if allowEmpty: return self.building return self.building def",
"whereTo = 'estate' request = {'loader': 'safeZoneLoader', 'where': whereTo, 'how':",
"setZoneIdAndBlock(self, zoneId, block): self.houseId = block DistributedDoor.DistributedDoor.setZoneIdAndBlock(self, zoneId, block) def",
"0, 0), other=otherNP, blendType='easeInOut'), Func(doorFrameHoleRight.hide), Func(self.hideIfHasFlat, rightDoor), SoundInterval(self.closeSfx, node=rightDoor), name=trackName)",
"if self.doorType == DoorTypes.INT_STANDARD: otherNP = render.find('**/door_origin') elif self.doorType ==",
"door.isEmpty(): self.acceptOnce('houseInteriorLoaded-%d' % self.zoneId, self.__gotRelatedHouse) else: self.__gotRelatedHouse() def __gotRelatedHouse(self): self.doPostAnnounceGenerate()",
"rightDoor.isEmpty(): self.notify.warning('enterClosing(): did not find rightDoor') return otherNP = self.getDoorNodePath()",
"self.findDoorNode('door*flat', True).isEmpty() self.hideDoorParts() building = self.getBuilding() doorTrigger = building.find('**/door_trigger*') doorTrigger.setName(self.getTriggerName())",
"import * from direct.distributed import DistributedObject from toontown.toonbase import ToontownGlobals",
"if door.isEmpty(): self.acceptOnce('houseInteriorLoaded-%d' % self.zoneId, self.__gotRelatedHouse) else: self.__gotRelatedHouse() def __gotRelatedHouse(self):",
"import * from direct.interval.IntervalGlobal import * from direct.distributed.ClockDelta import *",
"Func(self.hideIfHasFlat, rightDoor), SoundInterval(self.closeSfx, node=rightDoor), name=trackName) self.doorTrack.start(ts) if hasattr(self, 'done'): base.cr.playGame.hood.loader.setHouse(self.houseId)",
"True).isEmpty() self.hideDoorParts() building = self.getBuilding() doorTrigger = building.find('**/door_trigger*') doorTrigger.setName(self.getTriggerName()) self.accept(self.getEnterTriggerEvent(),",
"from direct.showbase.MessengerGlobal import messenger from direct.fsm import ClassicFSM from toontown.building",
"DoorTypes.INT_STANDARD: return 1 return 0 def getDoorNodePath(self): if self.doorType ==",
"find doorFrameHoleRight') return rightDoor = self.findDoorNode('rightDoor') if rightDoor.isEmpty(): self.notify.warning('enterClosing(): did",
"isInterior(self): if self.doorType == DoorTypes.INT_STANDARD: return 1 return 0 def",
"* from direct.distributed import DistributedObject from toontown.toonbase import ToontownGlobals from",
"def getBuilding(self, allowEmpty = False): if 'building' not in self.__dict__:",
"door = render.find('**/leftDoor;+s') self.building = door.getParent() elif self.doorType == DoorTypes.EXT_STANDARD:",
"panda3d.core import * from direct.interval.IntervalGlobal import * from direct.distributed.ClockDelta import",
"rightDoor = self.findDoorNode('rightDoor') if rightDoor.isEmpty(): self.notify.warning('enterClosing(): did not find rightDoor')",
"otherNP def enterClosing(self, ts): doorFrameHoleRight = self.findDoorNode('doorFrameHoleRight') if doorFrameHoleRight.isEmpty(): self.notify.warning('enterClosing():",
"def isInterior(self): if self.doorType == DoorTypes.INT_STANDARD: return 1 return 0",
"enterClosing(self, ts): doorFrameHoleRight = self.findDoorNode('doorFrameHoleRight') if doorFrameHoleRight.isEmpty(): self.notify.warning('enterClosing(): did not",
"= self.findDoorNode('rightDoor') if rightDoor.isEmpty(): self.notify.warning('enterClosing(): did not find rightDoor') return",
"* from panda3d.core import * from direct.interval.IntervalGlobal import * from",
"% self.doId if self.rightSwing: h = 100 else: h =",
"if self.doorType == DoorTypes.INT_STANDARD: door = render.find('**/leftDoor;+s') self.building = door.getParent()",
"'building' not in self.__dict__: if self.doorType == DoorTypes.INT_STANDARD: door =",
"if 'building' not in self.__dict__: if self.doorType == DoorTypes.INT_STANDARD: door",
"def getTriggerName(self): return 'door_trigger_' + str(self.houseId) def hideDoorParts(self): try: self.findDoorNode('doorFrameHoleRight').hide()",
"if self.houseId: self.building = self.cr.playGame.hood.loader.houseId2house.get(self.houseId, None) if allowEmpty: return self.building",
"self.building = door.getParent() elif self.doorType == DoorTypes.EXT_STANDARD: if self.houseId: self.building",
"from toontown.building import DoorTypes from toontown.estate.DistributedHouse import DistributedHouse class DistributedHouseDoor(DistributedDoor.DistributedDoor):",
"'shardId': None, 'avId': -1, 'allowRedirect': 0, 'doorDoId': self.otherDoId} messenger.send('doorDoneEvent', [request])",
"door.getParent() elif self.doorType == DoorTypes.EXT_STANDARD: if self.houseId: self.building = self.cr.playGame.hood.loader.houseId2house.get(self.houseId,",
"None) if allowEmpty: return self.building return self.building def isInterior(self): if",
"<gh_stars>1-10 from toontown.toonbase.ToonBaseGlobal import * from panda3d.core import * from",
"from direct.interval.IntervalGlobal import * from direct.distributed.ClockDelta import * from direct.distributed",
"False): if 'building' not in self.__dict__: if self.doorType == DoorTypes.INT_STANDARD:",
"def disable(self): DistributedDoor.DistributedDoor.disable(self) self.ignoreAll() def setZoneIdAndBlock(self, zoneId, block): self.houseId =",
"self.getBuilding() doorTrigger = building.find('**/door_trigger*') doorTrigger.setName(self.getTriggerName()) self.accept(self.getEnterTriggerEvent(), self.doorTrigger) self.acceptOnce('clearOutToonInterior', self.doorTrigger) self.zoneDoneLoading",
"hasattr(self, 'done'): base.cr.playGame.hood.loader.setHouse(self.houseId) zoneId = self.otherZoneId if self.doorType == DoorTypes.EXT_STANDARD:",
"* from direct.interval.IntervalGlobal import * from direct.distributed.ClockDelta import * from",
"from toontown.suit import Suit from toontown.building import FADoorCodes from toontown.building",
"str(self.houseId) def hideDoorParts(self): try: self.findDoorNode('doorFrameHoleRight').hide() self.findDoorNode('doorFrameHoleLeft').hide() except: pass def announceGenerate(self):",
"doorFrameHoleRight.isEmpty(): self.notify.warning('enterClosing(): did not find doorFrameHoleRight') return rightDoor = self.findDoorNode('rightDoor')",
"= building.find('**/door_trigger*') doorTrigger.setName(self.getTriggerName()) self.accept(self.getEnterTriggerEvent(), self.doorTrigger) self.acceptOnce('clearOutToonInterior', self.doorTrigger) self.zoneDoneLoading = 0",
"direct.showbase.MessengerGlobal import messenger from direct.fsm import ClassicFSM from toontown.building import",
"import DoorTypes from toontown.estate.DistributedHouse import DistributedHouse class DistributedHouseDoor(DistributedDoor.DistributedDoor): def __init__(self,",
"except: pass def announceGenerate(self): DistributedObject.DistributedObject.announceGenerate(self) if self.doorType == DoorTypes.EXT_STANDARD: house",
"'house' else: whereTo = 'estate' request = {'loader': 'safeZoneLoader', 'where':",
"doorFrameHoleRight = self.findDoorNode('doorFrameHoleRight') if doorFrameHoleRight.isEmpty(): self.notify.warning('enterClosing(): did not find doorFrameHoleRight')",
"use {0} as house'.format(house.__class__.__name__)) if house and house.house_loaded: self.__gotRelatedHouse() else:",
"FADoorCodes from toontown.building import DoorTypes from toontown.estate.DistributedHouse import DistributedHouse class",
"DistributedHouseDoor(DistributedDoor.DistributedDoor): def __init__(self, cr): DistributedDoor.DistributedDoor.__init__(self, cr) def disable(self): DistributedDoor.DistributedDoor.disable(self) self.ignoreAll()",
"otherNP = render.find('**/door_origin') elif self.doorType == DoorTypes.EXT_STANDARD: building = self.getBuilding()",
"str(self.doorType)) return otherNP def enterClosing(self, ts): doorFrameHoleRight = self.findDoorNode('doorFrameHoleRight') if",
"from toontown.toonbase import ToontownGlobals from direct.directnotify import DirectNotifyGlobal from direct.showbase.MessengerGlobal",
"did not find rightDoor') return otherNP = self.getDoorNodePath() trackName =",
"self.houseId = block DistributedDoor.DistributedDoor.setZoneIdAndBlock(self, zoneId, block) def getTriggerName(self): return 'door_trigger_'",
"find rightDoor') return otherNP = self.getDoorNodePath() trackName = 'doorClose-%d' %",
"if hasattr(self, 'done'): base.cr.playGame.hood.loader.setHouse(self.houseId) zoneId = self.otherZoneId if self.doorType ==",
"self.doorTrack = Sequence(LerpHprInterval(nodePath=rightDoor, duration=1.0, hpr=VBase3(0, 0, 0), startHpr=VBase3(h, 0, 0),",
"try: self.findDoorNode('doorFrameHoleRight').hide() self.findDoorNode('doorFrameHoleLeft').hide() except: pass def announceGenerate(self): DistributedObject.DistributedObject.announceGenerate(self) if self.doorType",
"def announceGenerate(self): DistributedObject.DistributedObject.announceGenerate(self) if self.doorType == DoorTypes.EXT_STANDARD: house = base.cr.doId2do.get(self.houseId)",
"ToontownGlobals from direct.directnotify import DirectNotifyGlobal from direct.showbase.MessengerGlobal import messenger from",
"0 def getDoorNodePath(self): if self.doorType == DoorTypes.INT_STANDARD: otherNP = render.find('**/door_origin')",
"= {'loader': 'safeZoneLoader', 'where': whereTo, 'how': 'doorIn', 'hoodId': ToontownGlobals.MyEstate, 'zoneId':",
"if house and house.house_loaded: self.__gotRelatedHouse() else: self.acceptOnce('houseLoaded-%d' % self.houseId, self.__gotRelatedHouse)",
"0 def getBuilding(self, allowEmpty = False): if 'building' not in",
"blendType='easeInOut'), Func(doorFrameHoleRight.hide), Func(self.hideIfHasFlat, rightDoor), SoundInterval(self.closeSfx, node=rightDoor), name=trackName) self.doorTrack.start(ts) if hasattr(self,",
"announceGenerate(self): DistributedObject.DistributedObject.announceGenerate(self) if self.doorType == DoorTypes.EXT_STANDARD: house = base.cr.doId2do.get(self.houseId) if",
"ts): doorFrameHoleRight = self.findDoorNode('doorFrameHoleRight') if doorFrameHoleRight.isEmpty(): self.notify.warning('enterClosing(): did not find",
"rightDoor') return otherNP = self.getDoorNodePath() trackName = 'doorClose-%d' % self.doId",
"= building.find('**/door_origin') else: self.notify.error('No such door type as ' +",
"self.acceptOnce('houseLoaded-%d' % self.houseId, self.__gotRelatedHouse) elif self.doorType == DoorTypes.INT_STANDARD: door =",
"self.zoneDoneLoading = 0 def getBuilding(self, allowEmpty = False): if 'building'",
"whereTo, 'how': 'doorIn', 'hoodId': ToontownGlobals.MyEstate, 'zoneId': zoneId, 'shardId': None, 'avId':",
"'estate' request = {'loader': 'safeZoneLoader', 'where': whereTo, 'how': 'doorIn', 'hoodId':",
"return rightDoor = self.findDoorNode('rightDoor') if rightDoor.isEmpty(): self.notify.warning('enterClosing(): did not find",
"= render.find('**/leftDoor;+s') self.building = door.getParent() elif self.doorType == DoorTypes.EXT_STANDARD: if",
"SoundInterval(self.closeSfx, node=rightDoor), name=trackName) self.doorTrack.start(ts) if hasattr(self, 'done'): base.cr.playGame.hood.loader.setHouse(self.houseId) zoneId =",
"self.accept(self.getEnterTriggerEvent(), self.doorTrigger) self.acceptOnce('clearOutToonInterior', self.doorTrigger) self.zoneDoneLoading = 0 def getBuilding(self, allowEmpty",
"building.find('**/door_origin') else: self.notify.error('No such door type as ' + str(self.doorType))",
"toontown.building import FADoorCodes from toontown.building import DoorTypes from toontown.estate.DistributedHouse import",
"isinstance(house, DistributedHouse): self.notify.error('tried to use {0} as house'.format(house.__class__.__name__)) if house",
"self.doorType == DoorTypes.INT_STANDARD: otherNP = render.find('**/door_origin') elif self.doorType == DoorTypes.EXT_STANDARD:",
"direct.directnotify import DirectNotifyGlobal from direct.showbase.MessengerGlobal import messenger from direct.fsm import",
"= building.find('**/door') if otherNP.isEmpty(): otherNP = building.find('**/door_origin') else: self.notify.error('No such",
"base.cr.playGame.hood.loader.setHouse(self.houseId) zoneId = self.otherZoneId if self.doorType == DoorTypes.EXT_STANDARD: whereTo =",
"from direct.directnotify import DirectNotifyGlobal from direct.showbase.MessengerGlobal import messenger from direct.fsm",
"house.house_loaded: self.__gotRelatedHouse() else: self.acceptOnce('houseLoaded-%d' % self.houseId, self.__gotRelatedHouse) elif self.doorType ==",
"direct.distributed import DistributedObject from toontown.toonbase import ToontownGlobals from direct.directnotify import",
"zoneId = self.otherZoneId if self.doorType == DoorTypes.EXT_STANDARD: whereTo = 'house'",
"else: self.notify.error('No such door type as ' + str(self.doorType)) return",
"import * from direct.distributed.ClockDelta import * from direct.distributed import DistributedObject",
"self.doorType == DoorTypes.EXT_STANDARD: building = self.getBuilding() otherNP = building.find('**/door') if",
"= self.getBuilding() doorTrigger = building.find('**/door_trigger*') doorTrigger.setName(self.getTriggerName()) self.accept(self.getEnterTriggerEvent(), self.doorTrigger) self.acceptOnce('clearOutToonInterior', self.doorTrigger)",
"base.cr.doId2do.get(self.houseId) if not isinstance(house, DistributedHouse): self.notify.error('tried to use {0} as",
"= self.getDoorNodePath() trackName = 'doorClose-%d' % self.doId if self.rightSwing: h",
"self.doorType == DoorTypes.EXT_STANDARD: house = base.cr.doId2do.get(self.houseId) if not isinstance(house, DistributedHouse):",
"return otherNP def enterClosing(self, ts): doorFrameHoleRight = self.findDoorNode('doorFrameHoleRight') if doorFrameHoleRight.isEmpty():",
"* from direct.distributed.ClockDelta import * from direct.distributed import DistributedObject from",
"def enterClosing(self, ts): doorFrameHoleRight = self.findDoorNode('doorFrameHoleRight') if doorFrameHoleRight.isEmpty(): self.notify.warning('enterClosing(): did",
"0), other=otherNP, blendType='easeInOut'), Func(doorFrameHoleRight.hide), Func(self.hideIfHasFlat, rightDoor), SoundInterval(self.closeSfx, node=rightDoor), name=trackName) self.doorTrack.start(ts)",
"self.hideDoorParts() building = self.getBuilding() doorTrigger = building.find('**/door_trigger*') doorTrigger.setName(self.getTriggerName()) self.accept(self.getEnterTriggerEvent(), self.doorTrigger)",
"+ str(self.houseId) def hideDoorParts(self): try: self.findDoorNode('doorFrameHoleRight').hide() self.findDoorNode('doorFrameHoleLeft').hide() except: pass def",
"not find doorFrameHoleRight') return rightDoor = self.findDoorNode('rightDoor') if rightDoor.isEmpty(): self.notify.warning('enterClosing():",
"DoorTypes from toontown.estate.DistributedHouse import DistributedHouse class DistributedHouseDoor(DistributedDoor.DistributedDoor): def __init__(self, cr):",
"def __gotRelatedHouse(self): self.doPostAnnounceGenerate() self.bHasFlat = not self.findDoorNode('door*flat', True).isEmpty() self.hideDoorParts() building",
"= 'estate' request = {'loader': 'safeZoneLoader', 'where': whereTo, 'how': 'doorIn',",
"self.doPostAnnounceGenerate() self.bHasFlat = not self.findDoorNode('door*flat', True).isEmpty() self.hideDoorParts() building = self.getBuilding()",
"elif self.doorType == DoorTypes.EXT_STANDARD: building = self.getBuilding() otherNP = building.find('**/door')",
"direct.interval.IntervalGlobal import * from direct.distributed.ClockDelta import * from direct.distributed import",
"from direct.fsm import ClassicFSM from toontown.building import DistributedDoor from toontown.hood",
"self.houseId, self.__gotRelatedHouse) elif self.doorType == DoorTypes.INT_STANDARD: door = render.find('**/leftDoor;+s') if",
"= 'doorClose-%d' % self.doId if self.rightSwing: h = 100 else:",
"house = base.cr.doId2do.get(self.houseId) if not isinstance(house, DistributedHouse): self.notify.error('tried to use",
"{'loader': 'safeZoneLoader', 'where': whereTo, 'how': 'doorIn', 'hoodId': ToontownGlobals.MyEstate, 'zoneId': zoneId,",
"'done'): base.cr.playGame.hood.loader.setHouse(self.houseId) zoneId = self.otherZoneId if self.doorType == DoorTypes.EXT_STANDARD: whereTo",
"building = self.getBuilding() otherNP = building.find('**/door') if otherNP.isEmpty(): otherNP =",
"doorTrigger = building.find('**/door_trigger*') doorTrigger.setName(self.getTriggerName()) self.accept(self.getEnterTriggerEvent(), self.doorTrigger) self.acceptOnce('clearOutToonInterior', self.doorTrigger) self.zoneDoneLoading =",
"self.doorType == DoorTypes.INT_STANDARD: door = render.find('**/leftDoor;+s') self.building = door.getParent() elif",
"getTriggerName(self): return 'door_trigger_' + str(self.houseId) def hideDoorParts(self): try: self.findDoorNode('doorFrameHoleRight').hide() self.findDoorNode('doorFrameHoleLeft').hide()",
"return self.building def isInterior(self): if self.doorType == DoorTypes.INT_STANDARD: return 1",
"self.getDoorNodePath() trackName = 'doorClose-%d' % self.doId if self.rightSwing: h =",
"self.findDoorNode('doorFrameHoleRight') if doorFrameHoleRight.isEmpty(): self.notify.warning('enterClosing(): did not find doorFrameHoleRight') return rightDoor",
"__gotRelatedHouse(self): self.doPostAnnounceGenerate() self.bHasFlat = not self.findDoorNode('door*flat', True).isEmpty() self.hideDoorParts() building =",
"-100 self.finishDoorTrack() self.doorTrack = Sequence(LerpHprInterval(nodePath=rightDoor, duration=1.0, hpr=VBase3(0, 0, 0), startHpr=VBase3(h,",
"did not find doorFrameHoleRight') return rightDoor = self.findDoorNode('rightDoor') if rightDoor.isEmpty():",
"'how': 'doorIn', 'hoodId': ToontownGlobals.MyEstate, 'zoneId': zoneId, 'shardId': None, 'avId': -1,",
"self.doorType == DoorTypes.INT_STANDARD: return 1 return 0 def getDoorNodePath(self): if",
"100 else: h = -100 self.finishDoorTrack() self.doorTrack = Sequence(LerpHprInterval(nodePath=rightDoor, duration=1.0,",
"self.findDoorNode('doorFrameHoleRight').hide() self.findDoorNode('doorFrameHoleLeft').hide() except: pass def announceGenerate(self): DistributedObject.DistributedObject.announceGenerate(self) if self.doorType ==",
"door type as ' + str(self.doorType)) return otherNP def enterClosing(self,",
"cr) def disable(self): DistributedDoor.DistributedDoor.disable(self) self.ignoreAll() def setZoneIdAndBlock(self, zoneId, block): self.houseId",
"h = -100 self.finishDoorTrack() self.doorTrack = Sequence(LerpHprInterval(nodePath=rightDoor, duration=1.0, hpr=VBase3(0, 0,",
"getBuilding(self, allowEmpty = False): if 'building' not in self.__dict__: if",
"block) def getTriggerName(self): return 'door_trigger_' + str(self.houseId) def hideDoorParts(self): try:",
"as house'.format(house.__class__.__name__)) if house and house.house_loaded: self.__gotRelatedHouse() else: self.acceptOnce('houseLoaded-%d' %",
"direct.distributed.ClockDelta import * from direct.distributed import DistributedObject from toontown.toonbase import",
"duration=1.0, hpr=VBase3(0, 0, 0), startHpr=VBase3(h, 0, 0), other=otherNP, blendType='easeInOut'), Func(doorFrameHoleRight.hide),",
"type as ' + str(self.doorType)) return otherNP def enterClosing(self, ts):",
"otherNP = self.getDoorNodePath() trackName = 'doorClose-%d' % self.doId if self.rightSwing:",
"as ' + str(self.doorType)) return otherNP def enterClosing(self, ts): doorFrameHoleRight",
"% self.houseId, self.__gotRelatedHouse) elif self.doorType == DoorTypes.INT_STANDARD: door = render.find('**/leftDoor;+s')",
"if allowEmpty: return self.building return self.building def isInterior(self): if self.doorType",
"return otherNP = self.getDoorNodePath() trackName = 'doorClose-%d' % self.doId if",
"' + str(self.doorType)) return otherNP def enterClosing(self, ts): doorFrameHoleRight =",
"== DoorTypes.INT_STANDARD: door = render.find('**/leftDoor;+s') self.building = door.getParent() elif self.doorType",
"0), startHpr=VBase3(h, 0, 0), other=otherNP, blendType='easeInOut'), Func(doorFrameHoleRight.hide), Func(self.hideIfHasFlat, rightDoor), SoundInterval(self.closeSfx,",
"self.ignoreAll() def setZoneIdAndBlock(self, zoneId, block): self.houseId = block DistributedDoor.DistributedDoor.setZoneIdAndBlock(self, zoneId,",
"import messenger from direct.fsm import ClassicFSM from toontown.building import DistributedDoor",
"and house.house_loaded: self.__gotRelatedHouse() else: self.acceptOnce('houseLoaded-%d' % self.houseId, self.__gotRelatedHouse) elif self.doorType",
"class DistributedHouseDoor(DistributedDoor.DistributedDoor): def __init__(self, cr): DistributedDoor.DistributedDoor.__init__(self, cr) def disable(self): DistributedDoor.DistributedDoor.disable(self)",
"zoneId, block) def getTriggerName(self): return 'door_trigger_' + str(self.houseId) def hideDoorParts(self):",
"self.__dict__: if self.doorType == DoorTypes.INT_STANDARD: door = render.find('**/leftDoor;+s') self.building =",
"= not self.findDoorNode('door*flat', True).isEmpty() self.hideDoorParts() building = self.getBuilding() doorTrigger =",
"== DoorTypes.INT_STANDARD: return 1 return 0 def getDoorNodePath(self): if self.doorType",
"hpr=VBase3(0, 0, 0), startHpr=VBase3(h, 0, 0), other=otherNP, blendType='easeInOut'), Func(doorFrameHoleRight.hide), Func(self.hideIfHasFlat,",
"elif self.doorType == DoorTypes.EXT_STANDARD: if self.houseId: self.building = self.cr.playGame.hood.loader.houseId2house.get(self.houseId, None)",
"self.zoneId, self.__gotRelatedHouse) else: self.__gotRelatedHouse() def __gotRelatedHouse(self): self.doPostAnnounceGenerate() self.bHasFlat = not",
"toontown.toonbase.ToonBaseGlobal import * from panda3d.core import * from direct.interval.IntervalGlobal import",
"messenger from direct.fsm import ClassicFSM from toontown.building import DistributedDoor from",
"return self.building return self.building def isInterior(self): if self.doorType == DoorTypes.INT_STANDARD:",
"else: self.__gotRelatedHouse() def __gotRelatedHouse(self): self.doPostAnnounceGenerate() self.bHasFlat = not self.findDoorNode('door*flat', True).isEmpty()",
"rightDoor), SoundInterval(self.closeSfx, node=rightDoor), name=trackName) self.doorTrack.start(ts) if hasattr(self, 'done'): base.cr.playGame.hood.loader.setHouse(self.houseId) zoneId",
"'hoodId': ToontownGlobals.MyEstate, 'zoneId': zoneId, 'shardId': None, 'avId': -1, 'allowRedirect': 0,",
"= -100 self.finishDoorTrack() self.doorTrack = Sequence(LerpHprInterval(nodePath=rightDoor, duration=1.0, hpr=VBase3(0, 0, 0),",
"self.acceptOnce('houseInteriorLoaded-%d' % self.zoneId, self.__gotRelatedHouse) else: self.__gotRelatedHouse() def __gotRelatedHouse(self): self.doPostAnnounceGenerate() self.bHasFlat",
"ClassicFSM from toontown.building import DistributedDoor from toontown.hood import ZoneUtil from",
"self.doorTrack.start(ts) if hasattr(self, 'done'): base.cr.playGame.hood.loader.setHouse(self.houseId) zoneId = self.otherZoneId if self.doorType",
"'safeZoneLoader', 'where': whereTo, 'how': 'doorIn', 'hoodId': ToontownGlobals.MyEstate, 'zoneId': zoneId, 'shardId':",
"= render.find('**/door_origin') elif self.doorType == DoorTypes.EXT_STANDARD: building = self.getBuilding() otherNP",
"DistributedDoor.DistributedDoor.disable(self) self.ignoreAll() def setZoneIdAndBlock(self, zoneId, block): self.houseId = block DistributedDoor.DistributedDoor.setZoneIdAndBlock(self,",
"allowEmpty = False): if 'building' not in self.__dict__: if self.doorType",
"self.doId if self.rightSwing: h = 100 else: h = -100",
"= 0 def getBuilding(self, allowEmpty = False): if 'building' not",
"import * from panda3d.core import * from direct.interval.IntervalGlobal import *",
"% self.zoneId, self.__gotRelatedHouse) else: self.__gotRelatedHouse() def __gotRelatedHouse(self): self.doPostAnnounceGenerate() self.bHasFlat =",
"== DoorTypes.EXT_STANDARD: building = self.getBuilding() otherNP = building.find('**/door') if otherNP.isEmpty():",
"DoorTypes.INT_STANDARD: door = render.find('**/leftDoor;+s') if door.isEmpty(): self.acceptOnce('houseInteriorLoaded-%d' % self.zoneId, self.__gotRelatedHouse)",
"self.houseId: self.building = self.cr.playGame.hood.loader.houseId2house.get(self.houseId, None) if allowEmpty: return self.building return",
"zoneId, block): self.houseId = block DistributedDoor.DistributedDoor.setZoneIdAndBlock(self, zoneId, block) def getTriggerName(self):",
"Func(doorFrameHoleRight.hide), Func(self.hideIfHasFlat, rightDoor), SoundInterval(self.closeSfx, node=rightDoor), name=trackName) self.doorTrack.start(ts) if hasattr(self, 'done'):",
"ZoneUtil from toontown.suit import Suit from toontown.building import FADoorCodes from",
"= self.otherZoneId if self.doorType == DoorTypes.EXT_STANDARD: whereTo = 'house' else:",
"direct.fsm import ClassicFSM from toontown.building import DistributedDoor from toontown.hood import",
"1 return 0 def getDoorNodePath(self): if self.doorType == DoorTypes.INT_STANDARD: otherNP",
"self.bHasFlat = not self.findDoorNode('door*flat', True).isEmpty() self.hideDoorParts() building = self.getBuilding() doorTrigger",
"building.find('**/door') if otherNP.isEmpty(): otherNP = building.find('**/door_origin') else: self.notify.error('No such door",
"DoorTypes.EXT_STANDARD: building = self.getBuilding() otherNP = building.find('**/door') if otherNP.isEmpty(): otherNP",
"self.rightSwing: h = 100 else: h = -100 self.finishDoorTrack() self.doorTrack",
"DistributedDoor.DistributedDoor.setZoneIdAndBlock(self, zoneId, block) def getTriggerName(self): return 'door_trigger_' + str(self.houseId) def",
"Sequence(LerpHprInterval(nodePath=rightDoor, duration=1.0, hpr=VBase3(0, 0, 0), startHpr=VBase3(h, 0, 0), other=otherNP, blendType='easeInOut'),",
"node=rightDoor), name=trackName) self.doorTrack.start(ts) if hasattr(self, 'done'): base.cr.playGame.hood.loader.setHouse(self.houseId) zoneId = self.otherZoneId",
"import DistributedHouse class DistributedHouseDoor(DistributedDoor.DistributedDoor): def __init__(self, cr): DistributedDoor.DistributedDoor.__init__(self, cr) def",
"DistributedDoor.DistributedDoor.__init__(self, cr) def disable(self): DistributedDoor.DistributedDoor.disable(self) self.ignoreAll() def setZoneIdAndBlock(self, zoneId, block):",
"toontown.estate.DistributedHouse import DistributedHouse class DistributedHouseDoor(DistributedDoor.DistributedDoor): def __init__(self, cr): DistributedDoor.DistributedDoor.__init__(self, cr)",
"if self.doorType == DoorTypes.EXT_STANDARD: house = base.cr.doId2do.get(self.houseId) if not isinstance(house,",
"self.finishDoorTrack() self.doorTrack = Sequence(LerpHprInterval(nodePath=rightDoor, duration=1.0, hpr=VBase3(0, 0, 0), startHpr=VBase3(h, 0,",
"import Suit from toontown.building import FADoorCodes from toontown.building import DoorTypes",
"toontown.hood import ZoneUtil from toontown.suit import Suit from toontown.building import",
"from direct.distributed.ClockDelta import * from direct.distributed import DistributedObject from toontown.toonbase",
"DoorTypes.INT_STANDARD: door = render.find('**/leftDoor;+s') self.building = door.getParent() elif self.doorType ==",
"self.__gotRelatedHouse) elif self.doorType == DoorTypes.INT_STANDARD: door = render.find('**/leftDoor;+s') if door.isEmpty():",
"return 0 def getDoorNodePath(self): if self.doorType == DoorTypes.INT_STANDARD: otherNP =",
"whereTo = 'house' else: whereTo = 'estate' request = {'loader':",
"== DoorTypes.INT_STANDARD: otherNP = render.find('**/door_origin') elif self.doorType == DoorTypes.EXT_STANDARD: building",
"name=trackName) self.doorTrack.start(ts) if hasattr(self, 'done'): base.cr.playGame.hood.loader.setHouse(self.houseId) zoneId = self.otherZoneId if",
"== DoorTypes.EXT_STANDARD: house = base.cr.doId2do.get(self.houseId) if not isinstance(house, DistributedHouse): self.notify.error('tried",
"None, 'avId': -1, 'allowRedirect': 0, 'doorDoId': self.otherDoId} messenger.send('doorDoneEvent', [request]) return",
"import ToontownGlobals from direct.directnotify import DirectNotifyGlobal from direct.showbase.MessengerGlobal import messenger",
"if self.doorType == DoorTypes.INT_STANDARD: return 1 return 0 def getDoorNodePath(self):",
"self.notify.error('No such door type as ' + str(self.doorType)) return otherNP",
"+ str(self.doorType)) return otherNP def enterClosing(self, ts): doorFrameHoleRight = self.findDoorNode('doorFrameHoleRight')",
"Suit from toontown.building import FADoorCodes from toontown.building import DoorTypes from",
"not find rightDoor') return otherNP = self.getDoorNodePath() trackName = 'doorClose-%d'",
"house and house.house_loaded: self.__gotRelatedHouse() else: self.acceptOnce('houseLoaded-%d' % self.houseId, self.__gotRelatedHouse) elif",
"render.find('**/door_origin') elif self.doorType == DoorTypes.EXT_STANDARD: building = self.getBuilding() otherNP =",
"DistributedDoor from toontown.hood import ZoneUtil from toontown.suit import Suit from",
"{0} as house'.format(house.__class__.__name__)) if house and house.house_loaded: self.__gotRelatedHouse() else: self.acceptOnce('houseLoaded-%d'",
"from toontown.building import FADoorCodes from toontown.building import DoorTypes from toontown.estate.DistributedHouse",
"from toontown.estate.DistributedHouse import DistributedHouse class DistributedHouseDoor(DistributedDoor.DistributedDoor): def __init__(self, cr): DistributedDoor.DistributedDoor.__init__(self,",
"if not isinstance(house, DistributedHouse): self.notify.error('tried to use {0} as house'.format(house.__class__.__name__))",
"block): self.houseId = block DistributedDoor.DistributedDoor.setZoneIdAndBlock(self, zoneId, block) def getTriggerName(self): return",
"toontown.building import DistributedDoor from toontown.hood import ZoneUtil from toontown.suit import",
"to use {0} as house'.format(house.__class__.__name__)) if house and house.house_loaded: self.__gotRelatedHouse()",
"self.cr.playGame.hood.loader.houseId2house.get(self.houseId, None) if allowEmpty: return self.building return self.building def isInterior(self):",
"import ZoneUtil from toontown.suit import Suit from toontown.building import FADoorCodes",
"= base.cr.doId2do.get(self.houseId) if not isinstance(house, DistributedHouse): self.notify.error('tried to use {0}",
"if self.doorType == DoorTypes.EXT_STANDARD: whereTo = 'house' else: whereTo =",
"import FADoorCodes from toontown.building import DoorTypes from toontown.estate.DistributedHouse import DistributedHouse",
"doorFrameHoleRight') return rightDoor = self.findDoorNode('rightDoor') if rightDoor.isEmpty(): self.notify.warning('enterClosing(): did not",
"self.notify.error('tried to use {0} as house'.format(house.__class__.__name__)) if house and house.house_loaded:",
"'doorClose-%d' % self.doId if self.rightSwing: h = 100 else: h",
"import DistributedDoor from toontown.hood import ZoneUtil from toontown.suit import Suit",
"def getDoorNodePath(self): if self.doorType == DoorTypes.INT_STANDARD: otherNP = render.find('**/door_origin') elif",
"return 1 return 0 def getDoorNodePath(self): if self.doorType == DoorTypes.INT_STANDARD:",
"not isinstance(house, DistributedHouse): self.notify.error('tried to use {0} as house'.format(house.__class__.__name__)) if",
"not in self.__dict__: if self.doorType == DoorTypes.INT_STANDARD: door = render.find('**/leftDoor;+s')",
"else: whereTo = 'estate' request = {'loader': 'safeZoneLoader', 'where': whereTo,",
"self.__gotRelatedHouse() def __gotRelatedHouse(self): self.doPostAnnounceGenerate() self.bHasFlat = not self.findDoorNode('door*flat', True).isEmpty() self.hideDoorParts()",
"= False): if 'building' not in self.__dict__: if self.doorType ==",
"self.doorType == DoorTypes.EXT_STANDARD: if self.houseId: self.building = self.cr.playGame.hood.loader.houseId2house.get(self.houseId, None) if",
"self.otherZoneId if self.doorType == DoorTypes.EXT_STANDARD: whereTo = 'house' else: whereTo",
"self.__gotRelatedHouse) else: self.__gotRelatedHouse() def __gotRelatedHouse(self): self.doPostAnnounceGenerate() self.bHasFlat = not self.findDoorNode('door*flat',",
"DistributedObject.DistributedObject.announceGenerate(self) if self.doorType == DoorTypes.EXT_STANDARD: house = base.cr.doId2do.get(self.houseId) if not",
"DoorTypes.INT_STANDARD: otherNP = render.find('**/door_origin') elif self.doorType == DoorTypes.EXT_STANDARD: building =",
"block DistributedDoor.DistributedDoor.setZoneIdAndBlock(self, zoneId, block) def getTriggerName(self): return 'door_trigger_' + str(self.houseId)",
"self.doorTrigger) self.zoneDoneLoading = 0 def getBuilding(self, allowEmpty = False): if",
"else: self.acceptOnce('houseLoaded-%d' % self.houseId, self.__gotRelatedHouse) elif self.doorType == DoorTypes.INT_STANDARD: door",
"from panda3d.core import * from direct.interval.IntervalGlobal import * from direct.distributed.ClockDelta",
"self.getBuilding() otherNP = building.find('**/door') if otherNP.isEmpty(): otherNP = building.find('**/door_origin') else:",
"DoorTypes.EXT_STANDARD: house = base.cr.doId2do.get(self.houseId) if not isinstance(house, DistributedHouse): self.notify.error('tried to",
"getDoorNodePath(self): if self.doorType == DoorTypes.INT_STANDARD: otherNP = render.find('**/door_origin') elif self.doorType",
"from toontown.hood import ZoneUtil from toontown.suit import Suit from toontown.building",
"DistributedHouse class DistributedHouseDoor(DistributedDoor.DistributedDoor): def __init__(self, cr): DistributedDoor.DistributedDoor.__init__(self, cr) def disable(self):",
"self.doorType == DoorTypes.INT_STANDARD: door = render.find('**/leftDoor;+s') if door.isEmpty(): self.acceptOnce('houseInteriorLoaded-%d' %",
"self.findDoorNode('rightDoor') if rightDoor.isEmpty(): self.notify.warning('enterClosing(): did not find rightDoor') return otherNP",
"from toontown.building import DistributedDoor from toontown.hood import ZoneUtil from toontown.suit",
"h = 100 else: h = -100 self.finishDoorTrack() self.doorTrack =",
"return 'door_trigger_' + str(self.houseId) def hideDoorParts(self): try: self.findDoorNode('doorFrameHoleRight').hide() self.findDoorNode('doorFrameHoleLeft').hide() except:",
"toontown.toonbase import ToontownGlobals from direct.directnotify import DirectNotifyGlobal from direct.showbase.MessengerGlobal import",
"such door type as ' + str(self.doorType)) return otherNP def",
"DoorTypes.EXT_STANDARD: if self.houseId: self.building = self.cr.playGame.hood.loader.houseId2house.get(self.houseId, None) if allowEmpty: return",
"import DirectNotifyGlobal from direct.showbase.MessengerGlobal import messenger from direct.fsm import ClassicFSM",
"in self.__dict__: if self.doorType == DoorTypes.INT_STANDARD: door = render.find('**/leftDoor;+s') self.building",
"'where': whereTo, 'how': 'doorIn', 'hoodId': ToontownGlobals.MyEstate, 'zoneId': zoneId, 'shardId': None,",
"= self.getBuilding() otherNP = building.find('**/door') if otherNP.isEmpty(): otherNP = building.find('**/door_origin')",
"from toontown.toonbase.ToonBaseGlobal import * from panda3d.core import * from direct.interval.IntervalGlobal",
"render.find('**/leftDoor;+s') if door.isEmpty(): self.acceptOnce('houseInteriorLoaded-%d' % self.zoneId, self.__gotRelatedHouse) else: self.__gotRelatedHouse() def",
"self.notify.warning('enterClosing(): did not find doorFrameHoleRight') return rightDoor = self.findDoorNode('rightDoor') if",
"def hideDoorParts(self): try: self.findDoorNode('doorFrameHoleRight').hide() self.findDoorNode('doorFrameHoleLeft').hide() except: pass def announceGenerate(self): DistributedObject.DistributedObject.announceGenerate(self)",
"= 100 else: h = -100 self.finishDoorTrack() self.doorTrack = Sequence(LerpHprInterval(nodePath=rightDoor,",
"0, 0), startHpr=VBase3(h, 0, 0), other=otherNP, blendType='easeInOut'), Func(doorFrameHoleRight.hide), Func(self.hideIfHasFlat, rightDoor),",
"from direct.distributed import DistributedObject from toontown.toonbase import ToontownGlobals from direct.directnotify",
"= self.findDoorNode('doorFrameHoleRight') if doorFrameHoleRight.isEmpty(): self.notify.warning('enterClosing(): did not find doorFrameHoleRight') return",
"else: h = -100 self.finishDoorTrack() self.doorTrack = Sequence(LerpHprInterval(nodePath=rightDoor, duration=1.0, hpr=VBase3(0,",
"DirectNotifyGlobal from direct.showbase.MessengerGlobal import messenger from direct.fsm import ClassicFSM from",
"house'.format(house.__class__.__name__)) if house and house.house_loaded: self.__gotRelatedHouse() else: self.acceptOnce('houseLoaded-%d' % self.houseId,",
"zoneId, 'shardId': None, 'avId': -1, 'allowRedirect': 0, 'doorDoId': self.otherDoId} messenger.send('doorDoneEvent',",
"cr): DistributedDoor.DistributedDoor.__init__(self, cr) def disable(self): DistributedDoor.DistributedDoor.disable(self) self.ignoreAll() def setZoneIdAndBlock(self, zoneId,",
"'zoneId': zoneId, 'shardId': None, 'avId': -1, 'allowRedirect': 0, 'doorDoId': self.otherDoId}",
"if rightDoor.isEmpty(): self.notify.warning('enterClosing(): did not find rightDoor') return otherNP =",
"if self.rightSwing: h = 100 else: h = -100 self.finishDoorTrack()",
"self.building return self.building def isInterior(self): if self.doorType == DoorTypes.INT_STANDARD: return",
"pass def announceGenerate(self): DistributedObject.DistributedObject.announceGenerate(self) if self.doorType == DoorTypes.EXT_STANDARD: house =",
"self.building = self.cr.playGame.hood.loader.houseId2house.get(self.houseId, None) if allowEmpty: return self.building return self.building",
"= block DistributedDoor.DistributedDoor.setZoneIdAndBlock(self, zoneId, block) def getTriggerName(self): return 'door_trigger_' +",
"= 'house' else: whereTo = 'estate' request = {'loader': 'safeZoneLoader',",
"toontown.building import DoorTypes from toontown.estate.DistributedHouse import DistributedHouse class DistributedHouseDoor(DistributedDoor.DistributedDoor): def",
"allowEmpty: return self.building return self.building def isInterior(self): if self.doorType ==",
"elif self.doorType == DoorTypes.INT_STANDARD: door = render.find('**/leftDoor;+s') if door.isEmpty(): self.acceptOnce('houseInteriorLoaded-%d'",
"import DistributedObject from toontown.toonbase import ToontownGlobals from direct.directnotify import DirectNotifyGlobal",
"doorTrigger.setName(self.getTriggerName()) self.accept(self.getEnterTriggerEvent(), self.doorTrigger) self.acceptOnce('clearOutToonInterior', self.doorTrigger) self.zoneDoneLoading = 0 def getBuilding(self,",
"self.notify.warning('enterClosing(): did not find rightDoor') return otherNP = self.getDoorNodePath() trackName",
"if doorFrameHoleRight.isEmpty(): self.notify.warning('enterClosing(): did not find doorFrameHoleRight') return rightDoor =",
"render.find('**/leftDoor;+s') self.building = door.getParent() elif self.doorType == DoorTypes.EXT_STANDARD: if self.houseId:",
"otherNP = building.find('**/door_origin') else: self.notify.error('No such door type as '",
"toontown.suit import Suit from toontown.building import FADoorCodes from toontown.building import",
"not self.findDoorNode('door*flat', True).isEmpty() self.hideDoorParts() building = self.getBuilding() doorTrigger = building.find('**/door_trigger*')",
"self.doorTrigger) self.acceptOnce('clearOutToonInterior', self.doorTrigger) self.zoneDoneLoading = 0 def getBuilding(self, allowEmpty =",
"if otherNP.isEmpty(): otherNP = building.find('**/door_origin') else: self.notify.error('No such door type",
"def setZoneIdAndBlock(self, zoneId, block): self.houseId = block DistributedDoor.DistributedDoor.setZoneIdAndBlock(self, zoneId, block)",
"building = self.getBuilding() doorTrigger = building.find('**/door_trigger*') doorTrigger.setName(self.getTriggerName()) self.accept(self.getEnterTriggerEvent(), self.doorTrigger) self.acceptOnce('clearOutToonInterior',",
"self.doorType == DoorTypes.EXT_STANDARD: whereTo = 'house' else: whereTo = 'estate'",
"== DoorTypes.INT_STANDARD: door = render.find('**/leftDoor;+s') if door.isEmpty(): self.acceptOnce('houseInteriorLoaded-%d' % self.zoneId,",
"self.__gotRelatedHouse() else: self.acceptOnce('houseLoaded-%d' % self.houseId, self.__gotRelatedHouse) elif self.doorType == DoorTypes.INT_STANDARD:",
"self.findDoorNode('doorFrameHoleLeft').hide() except: pass def announceGenerate(self): DistributedObject.DistributedObject.announceGenerate(self) if self.doorType == DoorTypes.EXT_STANDARD:",
"trackName = 'doorClose-%d' % self.doId if self.rightSwing: h = 100",
"otherNP.isEmpty(): otherNP = building.find('**/door_origin') else: self.notify.error('No such door type as",
"import ClassicFSM from toontown.building import DistributedDoor from toontown.hood import ZoneUtil",
"startHpr=VBase3(h, 0, 0), other=otherNP, blendType='easeInOut'), Func(doorFrameHoleRight.hide), Func(self.hideIfHasFlat, rightDoor), SoundInterval(self.closeSfx, node=rightDoor),",
"DistributedHouse): self.notify.error('tried to use {0} as house'.format(house.__class__.__name__)) if house and",
"def __init__(self, cr): DistributedDoor.DistributedDoor.__init__(self, cr) def disable(self): DistributedDoor.DistributedDoor.disable(self) self.ignoreAll() def",
"= render.find('**/leftDoor;+s') if door.isEmpty(): self.acceptOnce('houseInteriorLoaded-%d' % self.zoneId, self.__gotRelatedHouse) else: self.__gotRelatedHouse()",
"= Sequence(LerpHprInterval(nodePath=rightDoor, duration=1.0, hpr=VBase3(0, 0, 0), startHpr=VBase3(h, 0, 0), other=otherNP,",
"= door.getParent() elif self.doorType == DoorTypes.EXT_STANDARD: if self.houseId: self.building ="
] |
[
"f.read() sm = pystan.StanModel(model_code=model_wiener)# Compile the model stan ncohers =",
"'n200sub': np.random.uniform(.11, .2, size=nconds), 'lambda': np.random.uniform(.01, .02), 'n200lat_mis': np.random.uniform(.11, .2,",
"Compile the model stan ncohers = 2 #Number of coherence",
"model and generate samples fit = sm.sampling(data=data_winner, iter=niter, chains=nchains, warmup=nwarmup,",
"missing data for n200lat N_mis = mis.shape[0] # number of",
"index for each trial 'cond_spat':np.concatenate([cond_spat[obs],cond_spat[mis]]), #sptial index for each trial",
"Model 2 import pystan import pandas as pd import numpy",
"{'N_obs':N_obs, #Number of trial-level observations 'N_mis':N_mis, #Number of trial-level mising",
"sampling for c in range(0, nchains): chaininit = { 'delta':",
"3, size=ncohers), 'alpha': np.random.uniform(.5, 1.), 'eta': np.random.uniform(.01, .2), 'res': np.random.uniform(.01,",
"span data_winner = {'N_obs':N_obs, #Number of trial-level observations 'N_mis':N_mis, #Number",
"'res': np.random.uniform(.01, .02, size=nspats), 'n200sub': np.random.uniform(.11, .2, size=nconds), 'lambda': np.random.uniform(.01,",
"generate samples fit = sm.sampling(data=data_winner, iter=niter, chains=nchains, warmup=nwarmup, thin=thin, init=initials)",
"import utils parts = 1 data = utils.get_data() #loading dateset",
"data_winner = {'N_obs':N_obs, #Number of trial-level observations 'N_mis':N_mis, #Number of",
"as np import sys sys.path.append('../../') import utils parts = 1",
"obs = np.where((data['n200lat']>.101)&(data['n200lat']<.248))[0] # observation and missing data for n200lat",
"conditions 'nspats':nspats, #Number of spatial conditions 'nconds':nconds, #Number of conditions",
"# missing data for n200lat obs = np.where((data['n200lat']>.101)&(data['n200lat']<.248))[0] # observation",
"np.random.uniform(1, 3, size=ncohers), 'alpha': np.random.uniform(.5, 1.), 'eta': np.random.uniform(.01, .2), 'res':",
"data[data['participant']==parts] mis = np.where((data['n200lat']<.101)|(data['n200lat']>.248))[0] # missing data for n200lat obs",
"latency for each trial observation # setting MCMC arguments niter",
"pd import numpy as np import sys sys.path.append('../../') import utils",
"= 10000 nwarmup = 4000 nchains = 1 thin =",
".02, size=nspats), 'n200sub': np.random.uniform(.11, .2, size=nconds), 'lambda': np.random.uniform(.01, .02), 'n200lat_mis':",
"#reading the model span f = open(modelfile, 'r') model_wiener =",
"the model span f = open(modelfile, 'r') model_wiener = f.read()",
"conditions nspats = 2 #Number of spatial conditions nconds =",
"#Number of coherence conditions nspats = 2 #Number of spatial",
"np import sys sys.path.append('../../') import utils parts = 1 data",
"data['n200lat'].to_numpy() #set inistial data for molde span data_winner = {'N_obs':N_obs,",
"'cond_coher':np.concatenate([cond_coher[obs],cond_coher[mis]]), #Coherence index for each trial 'cond_spat':np.concatenate([cond_spat[obs],cond_spat[mis]]), #sptial index for",
"Train the model and generate samples fit = sm.sampling(data=data_winner, iter=niter,",
"data['conds'].to_numpy() n200lat = data['n200lat'].to_numpy() #set inistial data for molde span",
"#Number of coherence conditions 'nspats':nspats, #Number of spatial conditions 'nconds':nconds,",
"of spatial conditions 'nconds':nconds, #Number of conditions 'y':np.concatenate([y[obs],y[mis]]), #acc*rt in",
"data['y'].to_numpy() cond_coher = data['cond_coher'].to_numpy() cond_spat = data['cond_spat'].to_numpy() conds = data['conds'].to_numpy()",
"in range(0, nchains): chaininit = { 'delta': np.random.uniform(1, 3, size=ncohers),",
"size = N_mis) } initials.append(chaininit) # Train the model and",
"'ncohers':ncohers, #Number of coherence conditions 'nspats':nspats, #Number of spatial conditions",
"4000 nchains = 1 thin = 1 initials = []",
"#Number of conditions 'y':np.concatenate([y[obs],y[mis]]), #acc*rt in seconds for obervation and",
"number of missing data N_obs = obs.shape[0] # number of",
"import numpy as np import sys sys.path.append('../../') import utils parts",
"# Model 2 import pystan import pandas as pd import",
"for n200lat obs = np.where((data['n200lat']>.101)&(data['n200lat']<.248))[0] # observation and missing data",
"of coherence conditions 'nspats':nspats, #Number of spatial conditions 'nconds':nconds, #Number",
"data = data[data['participant']==parts] mis = np.where((data['n200lat']<.101)|(data['n200lat']>.248))[0] # missing data for",
"pystan import pandas as pd import numpy as np import",
"span f = open(modelfile, 'r') model_wiener = f.read() sm =",
"mising data 'ncohers':ncohers, #Number of coherence conditions 'nspats':nspats, #Number of",
"data for molde span data_winner = {'N_obs':N_obs, #Number of trial-level",
"#Number of spatial conditions 'nconds':nconds, #Number of conditions 'y':np.concatenate([y[obs],y[mis]]), #acc*rt",
"utils.get_data() #loading dateset data = data[data['participant']==parts] mis = np.where((data['n200lat']<.101)|(data['n200lat']>.248))[0] #",
"# setting MCMC arguments niter = 10000 nwarmup = 4000",
"= f.read() sm = pystan.StanModel(model_code=model_wiener)# Compile the model stan ncohers",
"1 data = utils.get_data() #loading dateset data = data[data['participant']==parts] mis",
"n200lat N_mis = mis.shape[0] # number of missing data N_obs",
"for c in range(0, nchains): chaininit = { 'delta': np.random.uniform(1,",
"np.random.uniform(.01, .2), 'res': np.random.uniform(.01, .02, size=nspats), 'n200sub': np.random.uniform(.11, .2, size=nconds),",
".2, size = N_mis) } initials.append(chaininit) # Train the model",
"missing data for n200lat obs = np.where((data['n200lat']>.101)&(data['n200lat']<.248))[0] # observation and",
"data 'ncohers':ncohers, #Number of coherence conditions 'nspats':nspats, #Number of spatial",
"np.random.uniform(.01, .02, size=nspats), 'n200sub': np.random.uniform(.11, .2, size=nconds), 'lambda': np.random.uniform(.01, .02),",
"1 thin = 1 initials = [] # initial sampling",
"of conditions y = data['y'].to_numpy() cond_coher = data['cond_coher'].to_numpy() cond_spat =",
"each trial observation # setting MCMC arguments niter = 10000",
"} initials.append(chaininit) # Train the model and generate samples fit",
"= 2 #Number of spatial conditions nconds = 4 #Number",
"trial 'cond_spat':np.concatenate([cond_spat[obs],cond_spat[mis]]), #sptial index for each trial 'conds':np.concatenate([conds[obs],conds[mis]]), #sptial index",
"trial-level mising data 'ncohers':ncohers, #Number of coherence conditions 'nspats':nspats, #Number",
"n200lat = data['n200lat'].to_numpy() #set inistial data for molde span data_winner",
"of coherence conditions nspats = 2 #Number of spatial conditions",
"obervation and missing data 'cond_coher':np.concatenate([cond_coher[obs],cond_coher[mis]]), #Coherence index for each trial",
"data N_obs = obs.shape[0] # number of observed data modelfile",
"= obs.shape[0] # number of observed data modelfile = '../../stans/res_nonhier.stan'",
"#Number of conditions y = data['y'].to_numpy() cond_coher = data['cond_coher'].to_numpy() cond_spat",
"for obervation and missing data 'cond_coher':np.concatenate([cond_coher[obs],cond_coher[mis]]), #Coherence index for each",
"1.), 'eta': np.random.uniform(.01, .2), 'res': np.random.uniform(.01, .02, size=nspats), 'n200sub': np.random.uniform(.11,",
"np.where((data['n200lat']<.101)|(data['n200lat']>.248))[0] # missing data for n200lat obs = np.where((data['n200lat']>.101)&(data['n200lat']<.248))[0] #",
"nchains = 1 thin = 1 initials = [] #",
"of conditions 'y':np.concatenate([y[obs],y[mis]]), #acc*rt in seconds for obervation and missing",
"# number of missing data N_obs = obs.shape[0] # number",
"= '../../stans/res_nonhier.stan' #reading the model span f = open(modelfile, 'r')",
"the model and generate samples fit = sm.sampling(data=data_winner, iter=niter, chains=nchains,",
"spatial conditions nconds = 4 #Number of conditions y =",
"data for n200lat obs = np.where((data['n200lat']>.101)&(data['n200lat']<.248))[0] # observation and missing",
"modelfile = '../../stans/res_nonhier.stan' #reading the model span f = open(modelfile,",
"4 #Number of conditions y = data['y'].to_numpy() cond_coher = data['cond_coher'].to_numpy()",
"initials.append(chaininit) # Train the model and generate samples fit =",
"# observation and missing data for n200lat N_mis = mis.shape[0]",
"= 1 initials = [] # initial sampling for c",
"model stan ncohers = 2 #Number of coherence conditions nspats",
"nspats = 2 #Number of spatial conditions nconds = 4",
"molde span data_winner = {'N_obs':N_obs, #Number of trial-level observations 'N_mis':N_mis,",
"= data[data['participant']==parts] mis = np.where((data['n200lat']<.101)|(data['n200lat']>.248))[0] # missing data for n200lat",
"= data['cond_coher'].to_numpy() cond_spat = data['cond_spat'].to_numpy() conds = data['conds'].to_numpy() n200lat =",
"np.random.uniform(.11, .2, size=nconds), 'lambda': np.random.uniform(.01, .02), 'n200lat_mis': np.random.uniform(.11, .2, size",
"= open(modelfile, 'r') model_wiener = f.read() sm = pystan.StanModel(model_code=model_wiener)# Compile",
"of trial-level observations 'N_mis':N_mis, #Number of trial-level mising data 'ncohers':ncohers,",
"of observed data modelfile = '../../stans/res_nonhier.stan' #reading the model span",
"in seconds for obervation and missing data 'cond_coher':np.concatenate([cond_coher[obs],cond_coher[mis]]), #Coherence index",
"np.random.uniform(.5, 1.), 'eta': np.random.uniform(.01, .2), 'res': np.random.uniform(.01, .02, size=nspats), 'n200sub':",
"'../../stans/res_nonhier.stan' #reading the model span f = open(modelfile, 'r') model_wiener",
"of trial-level mising data 'ncohers':ncohers, #Number of coherence conditions 'nspats':nspats,",
"np.random.uniform(.11, .2, size = N_mis) } initials.append(chaininit) # Train the",
"observation # setting MCMC arguments niter = 10000 nwarmup =",
"conds = data['conds'].to_numpy() n200lat = data['n200lat'].to_numpy() #set inistial data for",
"= [] # initial sampling for c in range(0, nchains):",
"# Train the model and generate samples fit = sm.sampling(data=data_winner,",
"#n200 latency for each trial observation # setting MCMC arguments",
"import pystan import pandas as pd import numpy as np",
"= np.where((data['n200lat']<.101)|(data['n200lat']>.248))[0] # missing data for n200lat obs = np.where((data['n200lat']>.101)&(data['n200lat']<.248))[0]",
"data modelfile = '../../stans/res_nonhier.stan' #reading the model span f =",
"observed data modelfile = '../../stans/res_nonhier.stan' #reading the model span f",
"for each trial observation # setting MCMC arguments niter =",
"size=ncohers), 'alpha': np.random.uniform(.5, 1.), 'eta': np.random.uniform(.01, .2), 'res': np.random.uniform(.01, .02,",
"= mis.shape[0] # number of missing data N_obs = obs.shape[0]",
"'conds':np.concatenate([conds[obs],conds[mis]]), #sptial index for each trial 'n200lat_obs':n200lat[obs]}; #n200 latency for",
"= data['n200lat'].to_numpy() #set inistial data for molde span data_winner =",
"and generate samples fit = sm.sampling(data=data_winner, iter=niter, chains=nchains, warmup=nwarmup, thin=thin,",
"mis = np.where((data['n200lat']<.101)|(data['n200lat']>.248))[0] # missing data for n200lat obs =",
"= 4 #Number of conditions y = data['y'].to_numpy() cond_coher =",
"y = data['y'].to_numpy() cond_coher = data['cond_coher'].to_numpy() cond_spat = data['cond_spat'].to_numpy() conds",
"2 #Number of spatial conditions nconds = 4 #Number of",
"'N_mis':N_mis, #Number of trial-level mising data 'ncohers':ncohers, #Number of coherence",
"arguments niter = 10000 nwarmup = 4000 nchains = 1",
"# initial sampling for c in range(0, nchains): chaininit =",
"nchains): chaininit = { 'delta': np.random.uniform(1, 3, size=ncohers), 'alpha': np.random.uniform(.5,",
"size=nconds), 'lambda': np.random.uniform(.01, .02), 'n200lat_mis': np.random.uniform(.11, .2, size = N_mis)",
"data = utils.get_data() #loading dateset data = data[data['participant']==parts] mis =",
"spatial conditions 'nconds':nconds, #Number of conditions 'y':np.concatenate([y[obs],y[mis]]), #acc*rt in seconds",
"'lambda': np.random.uniform(.01, .02), 'n200lat_mis': np.random.uniform(.11, .2, size = N_mis) }",
"#acc*rt in seconds for obervation and missing data 'cond_coher':np.concatenate([cond_coher[obs],cond_coher[mis]]), #Coherence",
"obs.shape[0] # number of observed data modelfile = '../../stans/res_nonhier.stan' #reading",
"'nconds':nconds, #Number of conditions 'y':np.concatenate([y[obs],y[mis]]), #acc*rt in seconds for obervation",
"seconds for obervation and missing data 'cond_coher':np.concatenate([cond_coher[obs],cond_coher[mis]]), #Coherence index for",
"conditions y = data['y'].to_numpy() cond_coher = data['cond_coher'].to_numpy() cond_spat = data['cond_spat'].to_numpy()",
"trial 'n200lat_obs':n200lat[obs]}; #n200 latency for each trial observation # setting",
"= data['cond_spat'].to_numpy() conds = data['conds'].to_numpy() n200lat = data['n200lat'].to_numpy() #set inistial",
"nconds = 4 #Number of conditions y = data['y'].to_numpy() cond_coher",
"#Number of trial-level mising data 'ncohers':ncohers, #Number of coherence conditions",
"as pd import numpy as np import sys sys.path.append('../../') import",
"trial-level observations 'N_mis':N_mis, #Number of trial-level mising data 'ncohers':ncohers, #Number",
"= pystan.StanModel(model_code=model_wiener)# Compile the model stan ncohers = 2 #Number",
"#set inistial data for molde span data_winner = {'N_obs':N_obs, #Number",
"dateset data = data[data['participant']==parts] mis = np.where((data['n200lat']<.101)|(data['n200lat']>.248))[0] # missing data",
"conditions nconds = 4 #Number of conditions y = data['y'].to_numpy()",
"conditions 'y':np.concatenate([y[obs],y[mis]]), #acc*rt in seconds for obervation and missing data",
"sys.path.append('../../') import utils parts = 1 data = utils.get_data() #loading",
"thin = 1 initials = [] # initial sampling for",
"{ 'delta': np.random.uniform(1, 3, size=ncohers), 'alpha': np.random.uniform(.5, 1.), 'eta': np.random.uniform(.01,",
"the model stan ncohers = 2 #Number of coherence conditions",
"c in range(0, nchains): chaininit = { 'delta': np.random.uniform(1, 3,",
"each trial 'n200lat_obs':n200lat[obs]}; #n200 latency for each trial observation #",
"setting MCMC arguments niter = 10000 nwarmup = 4000 nchains",
"= {'N_obs':N_obs, #Number of trial-level observations 'N_mis':N_mis, #Number of trial-level",
"2 import pystan import pandas as pd import numpy as",
"of missing data N_obs = obs.shape[0] # number of observed",
"'delta': np.random.uniform(1, 3, size=ncohers), 'alpha': np.random.uniform(.5, 1.), 'eta': np.random.uniform(.01, .2),",
"fit = sm.sampling(data=data_winner, iter=niter, chains=nchains, warmup=nwarmup, thin=thin, init=initials) utils.to_pickle(stan_model=sm, stan_fit=fit,",
"#Coherence index for each trial 'cond_spat':np.concatenate([cond_spat[obs],cond_spat[mis]]), #sptial index for each",
"= sm.sampling(data=data_winner, iter=niter, chains=nchains, warmup=nwarmup, thin=thin, init=initials) utils.to_pickle(stan_model=sm, stan_fit=fit, save_path='../../save/nonhier/'+str(parts)+'_res_nonhier.pkl')",
"for n200lat N_mis = mis.shape[0] # number of missing data",
"sm = pystan.StanModel(model_code=model_wiener)# Compile the model stan ncohers = 2",
"inistial data for molde span data_winner = {'N_obs':N_obs, #Number of",
"coherence conditions 'nspats':nspats, #Number of spatial conditions 'nconds':nconds, #Number of",
"mis.shape[0] # number of missing data N_obs = obs.shape[0] #",
"data for n200lat N_mis = mis.shape[0] # number of missing",
"data['cond_spat'].to_numpy() conds = data['conds'].to_numpy() n200lat = data['n200lat'].to_numpy() #set inistial data",
"= 1 thin = 1 initials = [] # initial",
"'alpha': np.random.uniform(.5, 1.), 'eta': np.random.uniform(.01, .2), 'res': np.random.uniform(.01, .02, size=nspats),",
"#loading dateset data = data[data['participant']==parts] mis = np.where((data['n200lat']<.101)|(data['n200lat']>.248))[0] # missing",
"'y':np.concatenate([y[obs],y[mis]]), #acc*rt in seconds for obervation and missing data 'cond_coher':np.concatenate([cond_coher[obs],cond_coher[mis]]),",
"each trial 'conds':np.concatenate([conds[obs],conds[mis]]), #sptial index for each trial 'n200lat_obs':n200lat[obs]}; #n200",
".2, size=nconds), 'lambda': np.random.uniform(.01, .02), 'n200lat_mis': np.random.uniform(.11, .2, size =",
"'eta': np.random.uniform(.01, .2), 'res': np.random.uniform(.01, .02, size=nspats), 'n200sub': np.random.uniform(.11, .2,",
"niter = 10000 nwarmup = 4000 nchains = 1 thin",
"#sptial index for each trial 'conds':np.concatenate([conds[obs],conds[mis]]), #sptial index for each",
"for each trial 'conds':np.concatenate([conds[obs],conds[mis]]), #sptial index for each trial 'n200lat_obs':n200lat[obs]};",
"= np.where((data['n200lat']>.101)&(data['n200lat']<.248))[0] # observation and missing data for n200lat N_mis",
"model span f = open(modelfile, 'r') model_wiener = f.read() sm",
"cond_spat = data['cond_spat'].to_numpy() conds = data['conds'].to_numpy() n200lat = data['n200lat'].to_numpy() #set",
"sys sys.path.append('../../') import utils parts = 1 data = utils.get_data()",
"data 'cond_coher':np.concatenate([cond_coher[obs],cond_coher[mis]]), #Coherence index for each trial 'cond_spat':np.concatenate([cond_spat[obs],cond_spat[mis]]), #sptial index",
"2 #Number of coherence conditions nspats = 2 #Number of",
"np.random.uniform(.01, .02), 'n200lat_mis': np.random.uniform(.11, .2, size = N_mis) } initials.append(chaininit)",
"missing data 'cond_coher':np.concatenate([cond_coher[obs],cond_coher[mis]]), #Coherence index for each trial 'cond_spat':np.concatenate([cond_spat[obs],cond_spat[mis]]), #sptial",
"= 1 data = utils.get_data() #loading dateset data = data[data['participant']==parts]",
"'nspats':nspats, #Number of spatial conditions 'nconds':nconds, #Number of conditions 'y':np.concatenate([y[obs],y[mis]]),",
"index for each trial 'conds':np.concatenate([conds[obs],conds[mis]]), #sptial index for each trial",
"index for each trial 'n200lat_obs':n200lat[obs]}; #n200 latency for each trial",
"f = open(modelfile, 'r') model_wiener = f.read() sm = pystan.StanModel(model_code=model_wiener)#",
"#Number of spatial conditions nconds = 4 #Number of conditions",
"import pandas as pd import numpy as np import sys",
"'r') model_wiener = f.read() sm = pystan.StanModel(model_code=model_wiener)# Compile the model",
"trial 'conds':np.concatenate([conds[obs],conds[mis]]), #sptial index for each trial 'n200lat_obs':n200lat[obs]}; #n200 latency",
"number of observed data modelfile = '../../stans/res_nonhier.stan' #reading the model",
"data['cond_coher'].to_numpy() cond_spat = data['cond_spat'].to_numpy() conds = data['conds'].to_numpy() n200lat = data['n200lat'].to_numpy()",
"N_mis = mis.shape[0] # number of missing data N_obs =",
"and missing data 'cond_coher':np.concatenate([cond_coher[obs],cond_coher[mis]]), #Coherence index for each trial 'cond_spat':np.concatenate([cond_spat[obs],cond_spat[mis]]),",
"ncohers = 2 #Number of coherence conditions nspats = 2",
"chaininit = { 'delta': np.random.uniform(1, 3, size=ncohers), 'alpha': np.random.uniform(.5, 1.),",
"and missing data for n200lat N_mis = mis.shape[0] # number",
"missing data N_obs = obs.shape[0] # number of observed data",
"pystan.StanModel(model_code=model_wiener)# Compile the model stan ncohers = 2 #Number of",
"initials = [] # initial sampling for c in range(0,",
"= N_mis) } initials.append(chaininit) # Train the model and generate",
"for each trial 'n200lat_obs':n200lat[obs]}; #n200 latency for each trial observation",
"= utils.get_data() #loading dateset data = data[data['participant']==parts] mis = np.where((data['n200lat']<.101)|(data['n200lat']>.248))[0]",
"range(0, nchains): chaininit = { 'delta': np.random.uniform(1, 3, size=ncohers), 'alpha':",
"cond_coher = data['cond_coher'].to_numpy() cond_spat = data['cond_spat'].to_numpy() conds = data['conds'].to_numpy() n200lat",
"'cond_spat':np.concatenate([cond_spat[obs],cond_spat[mis]]), #sptial index for each trial 'conds':np.concatenate([conds[obs],conds[mis]]), #sptial index for",
".2), 'res': np.random.uniform(.01, .02, size=nspats), 'n200sub': np.random.uniform(.11, .2, size=nconds), 'lambda':",
"# number of observed data modelfile = '../../stans/res_nonhier.stan' #reading the",
"nwarmup = 4000 nchains = 1 thin = 1 initials",
"= 2 #Number of coherence conditions nspats = 2 #Number",
"= data['y'].to_numpy() cond_coher = data['cond_coher'].to_numpy() cond_spat = data['cond_spat'].to_numpy() conds =",
"parts = 1 data = utils.get_data() #loading dateset data =",
"size=nspats), 'n200sub': np.random.uniform(.11, .2, size=nconds), 'lambda': np.random.uniform(.01, .02), 'n200lat_mis': np.random.uniform(.11,",
"observation and missing data for n200lat N_mis = mis.shape[0] #",
"model_wiener = f.read() sm = pystan.StanModel(model_code=model_wiener)# Compile the model stan",
"#sptial index for each trial 'n200lat_obs':n200lat[obs]}; #n200 latency for each",
"pandas as pd import numpy as np import sys sys.path.append('../../')",
"import sys sys.path.append('../../') import utils parts = 1 data =",
"MCMC arguments niter = 10000 nwarmup = 4000 nchains =",
"N_mis) } initials.append(chaininit) # Train the model and generate samples",
"trial observation # setting MCMC arguments niter = 10000 nwarmup",
"= { 'delta': np.random.uniform(1, 3, size=ncohers), 'alpha': np.random.uniform(.5, 1.), 'eta':",
"= data['conds'].to_numpy() n200lat = data['n200lat'].to_numpy() #set inistial data for molde",
"N_obs = obs.shape[0] # number of observed data modelfile =",
"of spatial conditions nconds = 4 #Number of conditions y",
"initial sampling for c in range(0, nchains): chaininit = {",
".02), 'n200lat_mis': np.random.uniform(.11, .2, size = N_mis) } initials.append(chaininit) #",
"each trial 'cond_spat':np.concatenate([cond_spat[obs],cond_spat[mis]]), #sptial index for each trial 'conds':np.concatenate([conds[obs],conds[mis]]), #sptial",
"np.where((data['n200lat']>.101)&(data['n200lat']<.248))[0] # observation and missing data for n200lat N_mis =",
"numpy as np import sys sys.path.append('../../') import utils parts =",
"n200lat obs = np.where((data['n200lat']>.101)&(data['n200lat']<.248))[0] # observation and missing data for",
"observations 'N_mis':N_mis, #Number of trial-level mising data 'ncohers':ncohers, #Number of",
"coherence conditions nspats = 2 #Number of spatial conditions nconds",
"conditions 'nconds':nconds, #Number of conditions 'y':np.concatenate([y[obs],y[mis]]), #acc*rt in seconds for",
"utils parts = 1 data = utils.get_data() #loading dateset data",
"'n200lat_mis': np.random.uniform(.11, .2, size = N_mis) } initials.append(chaininit) # Train",
"#!/home/a.ghaderi/.conda/envs/envjm/bin/python # Model 2 import pystan import pandas as pd",
"[] # initial sampling for c in range(0, nchains): chaininit",
"= 4000 nchains = 1 thin = 1 initials =",
"for each trial 'cond_spat':np.concatenate([cond_spat[obs],cond_spat[mis]]), #sptial index for each trial 'conds':np.concatenate([conds[obs],conds[mis]]),",
"for molde span data_winner = {'N_obs':N_obs, #Number of trial-level observations",
"'n200lat_obs':n200lat[obs]}; #n200 latency for each trial observation # setting MCMC",
"#Number of trial-level observations 'N_mis':N_mis, #Number of trial-level mising data",
"10000 nwarmup = 4000 nchains = 1 thin = 1",
"1 initials = [] # initial sampling for c in",
"samples fit = sm.sampling(data=data_winner, iter=niter, chains=nchains, warmup=nwarmup, thin=thin, init=initials) utils.to_pickle(stan_model=sm,",
"open(modelfile, 'r') model_wiener = f.read() sm = pystan.StanModel(model_code=model_wiener)# Compile the",
"stan ncohers = 2 #Number of coherence conditions nspats ="
] |
[
"1) * per_page jobs = [serialize_job(job) for job in queue.get_jobs(offset,",
"[q.name for q in worker.queues] workers = [dict(name=worker.name, queues=serialize_queue_names(worker), state=worker.get_state())",
"last_page: next_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page+1))) pagination = remove_none_values( dict(pages_in_window=pages_in_window,",
"jobs=jobs, pagination=pagination) @dashboard.route('/workers.json') @jsonify def list_workers(): def serialize_queue_names(worker): return [q.name",
"or False \"\"\" auth_handler = current_app.extensions['rq-dashboard'].auth_handler if auth_handler and not",
"in pages_numbers_in_window ] last_page = int(ceil(total_items / float(per_page))) prev_page =",
"render_template('rq_dashboard/dashboard.html', workers=Worker.all(), queue=queue, page=page, queues=Queue.all(), rq_url_prefix=url_for('.overview')) @dashboard.route('/job/<job_id>/cancel', methods=['POST']) @jsonify def",
"requeue_job(job_id) return dict(status='OK') @dashboard.route('/requeue-all', methods=['GET', 'POST']) @jsonify def requeue_all(): fq",
"queue_name is None: # Show the failed queue by default",
"def list_queues(): queues = serialize_queues(sorted(Queue.all())) return dict(queues=queues) @dashboard.route('/jobs/<queue_name>/<page>.json') @jsonify def",
"cur_page, per_page=5, window_size=10): all_pages = range(1, int(ceil(total_items / float(per_page))) +",
"push_rq_connection(): push_connection(current_app.redis_conn) @dashboard.teardown_request def pop_rq_connection(exception=None): pop_connection() def jsonify(f): @wraps(f) def",
"q in queues] def serialize_date(dt): if dt is None: return",
"6379), password=current_app.config.get('REDIS_PASSWORD', None), db=current_app.config.get('REDIS_DB', 0)) @dashboard.before_request def push_rq_connection(): push_connection(current_app.redis_conn) @dashboard.teardown_request",
"per_page = 5 total_items = queue.count pages_numbers_in_window = pagination_window(total_items, current_page,",
"if current_page < last_page: next_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page+1))) pagination",
"flask import current_app, url_for, abort from flask import render_template from",
"math import ceil dashboard = Blueprint('rq_dashboard', __name__, template_folder='templates', static_folder='static', )",
"static_folder='static', ) @dashboard.before_request def authentication_hook(): \"\"\" Allow the parent app",
">= 1): pages_window_start = int(max(0, min(len(all_pages) - window_size, (cur_page-1) -",
"from functools import wraps import times from flask import Blueprint",
"cancel_job_view(job_id): rq_job = Job.fetch(job_id) if rq_job.status == \"failed\": rq_job.delete() else:",
"import Redis from redis import from_url from rq import push_connection,",
"= all_pages if (window_size >= 1): pages_window_start = int(max(0, min(len(all_pages)",
"own auth_handler method that must return True or False \"\"\"",
"else: queue = Queue() else: queue = Queue(queue_name) return render_template('rq_dashboard/dashboard.html',",
"requeue_all(): fq = get_failed_queue() job_ids = fq.job_ids count = len(job_ids)",
"failed else: queue = Queue() else: queue = Queue(queue_name) return",
"pages_window_start = int(max(0, min(len(all_pages) - window_size, (cur_page-1) - ceil(window_size /",
"for worker in Worker.all()] return dict(workers=workers) @dashboard.context_processor def inject_interval(): interval",
"@dashboard.route('/queue/<queue_name>/compact', methods=['POST']) @jsonify def compact_queue(queue_name): q = Queue(queue_name) q.compact() return",
"def jsonify(f): @wraps(f) def _wrapped(*args, **kwargs): from flask import jsonify",
"def requeue_all(): fq = get_failed_queue() job_ids = fq.job_ids count =",
"db=current_app.config.get('REDIS_DB', 0)) @dashboard.before_request def push_rq_connection(): push_connection(current_app.redis_conn) @dashboard.teardown_request def pop_rq_connection(exception=None): pop_connection()",
"= failed else: queue = Queue() else: queue = Queue(queue_name)",
"window_size=10): all_pages = range(1, int(ceil(total_items / float(per_page))) + 1) results",
"serialize_queue_names(worker): return [q.name for q in worker.queues] workers = [dict(name=worker.name,",
"import Job from functools import wraps import times from flask",
"= get_failed_queue() job_ids = fq.job_ids count = len(job_ids) for job_id",
"dict(url=url_for('.overview', queue_name=queue_name, page=(current_page+1))) pagination = remove_none_values( dict(pages_in_window=pages_in_window, next_page=next_page, prev_page=prev_page)) offset",
"auth_handler and not auth_handler(): abort(401) @dashboard.before_app_first_request def setup_rq_connection(): if current_app.config.get('REDIS_URL'):",
"@dashboard.route('/requeue-all', methods=['GET', 'POST']) @jsonify def requeue_all(): fq = get_failed_queue() job_ids",
"* per_page jobs = [serialize_job(job) for job in queue.get_jobs(offset, per_page)]",
"from math import ceil dashboard = Blueprint('rq_dashboard', __name__, template_folder='templates', static_folder='static',",
"page=page, queues=Queue.all(), rq_url_prefix=url_for('.overview')) @dashboard.route('/job/<job_id>/cancel', methods=['POST']) @jsonify def cancel_job_view(job_id): rq_job =",
"overview(queue_name, page): if queue_name is None: # Show the failed",
"'1'}) @dashboard.route('/<queue_name>/<page>') def overview(queue_name, page): if queue_name is None: #",
"flask import render_template from rq import Queue, Worker from rq",
"job_ids = fq.job_ids count = len(job_ids) for job_id in job_ids:",
"@dashboard.before_app_first_request def setup_rq_connection(): if current_app.config.get('REDIS_URL'): current_app.redis_conn = from_url(current_app.config.get('REDIS_URL')) else: current_app.redis_conn",
"= Job.fetch(job_id) if rq_job.status == \"failed\": rq_job.delete() else: rq_job.cancel() return",
"> 1: prev_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page-1))) next_page = None",
"per_page) pages_in_window = [ dict(number=p, url=url_for('.overview', queue_name=queue_name, page=p)) for p",
"= Queue(queue_name) q.compact() return dict(status='OK') @dashboard.route('/queues.json') @jsonify def list_queues(): queues",
"exc_info=job.exc_info, description=job.description) def remove_none_values(input_dict): return dict([ (k,v) for k,v in",
"q = Queue(queue_name) q.empty() return dict(status='OK') @dashboard.route('/queue/<queue_name>/compact', methods=['POST']) @jsonify def",
"queue = Queue(queue_name) return render_template('rq_dashboard/dashboard.html', workers=Worker.all(), queue=queue, page=page, queues=Queue.all(), rq_url_prefix=url_for('.overview'))",
"setup_rq_connection(): if current_app.config.get('REDIS_URL'): current_app.redis_conn = from_url(current_app.config.get('REDIS_URL')) else: current_app.redis_conn = Redis(host=current_app.config.get('REDIS_HOST',",
"_wrapped def serialize_queues(queues): return [dict(name=q.name, count=q.count, url=url_for('.overview', queue_name=q.name)) for q",
"queue = failed else: queue = Queue() else: queue =",
"Queue() else: queue = Queue(queue_name) return render_template('rq_dashboard/dashboard.html', workers=Worker.all(), queue=queue, page=page,",
"Queue(queue_name) per_page = 5 total_items = queue.count pages_numbers_in_window = pagination_window(total_items,",
"jobs = [serialize_job(job) for job in queue.get_jobs(offset, per_page)] return dict(name=queue.name,",
"return dict(workers=workers) @dashboard.context_processor def inject_interval(): interval = current_app.config.get('RQ_POLL_INTERVAL', 2500) return",
"@dashboard.teardown_request def pop_rq_connection(exception=None): pop_connection() def jsonify(f): @wraps(f) def _wrapped(*args, **kwargs):",
"is None: return None return times.format(dt, 'UTC') def serialize_job(job): return",
"def pagination_window(total_items, cur_page, per_page=5, window_size=10): all_pages = range(1, int(ceil(total_items /",
"methods=['POST']) @jsonify def compact_queue(queue_name): q = Queue(queue_name) q.compact() return dict(status='OK')",
"result = all_pages[pages_window_start:pages_window_end] return result @dashboard.route('/', defaults={'queue_name': None, 'page': '1'})",
"for p in pages_numbers_in_window ] last_page = int(ceil(total_items / float(per_page)))",
"serialize_job(job): return dict( id=job.id, created_at=serialize_date(job.created_at), enqueued_at=serialize_date(job.enqueued_at), ended_at=serialize_date(job.ended_at), origin=job.origin, result=job._result, exc_info=job.exc_info,",
"= current_app.extensions['rq-dashboard'].auth_handler if auth_handler and not auth_handler(): abort(401) @dashboard.before_app_first_request def",
"origin=job.origin, result=job._result, exc_info=job.exc_info, description=job.description) def remove_none_values(input_dict): return dict([ (k,v) for",
"Job.fetch(job_id) if rq_job.status == \"failed\": rq_job.delete() else: rq_job.cancel() return dict(status='OK')",
"list_queues(): queues = serialize_queues(sorted(Queue.all())) return dict(queues=queues) @dashboard.route('/jobs/<queue_name>/<page>.json') @jsonify def list_jobs(queue_name,",
"q.empty() return dict(status='OK') @dashboard.route('/queue/<queue_name>/compact', methods=['POST']) @jsonify def compact_queue(queue_name): q =",
"Redis(host=current_app.config.get('REDIS_HOST', 'localhost'), port=current_app.config.get('REDIS_PORT', 6379), password=current_app.config.get('REDIS_PASSWORD', None), db=current_app.config.get('REDIS_DB', 0)) @dashboard.before_request def",
"None return times.format(dt, 'UTC') def serialize_job(job): return dict( id=job.id, created_at=serialize_date(job.created_at),",
"def requeue_job_view(job_id): requeue_job(job_id) return dict(status='OK') @dashboard.route('/requeue-all', methods=['GET', 'POST']) @jsonify def",
"@jsonify def cancel_job_view(job_id): rq_job = Job.fetch(job_id) if rq_job.status == \"failed\":",
"workers = [dict(name=worker.name, queues=serialize_queue_names(worker), state=worker.get_state()) for worker in Worker.all()] return",
"all_pages if (window_size >= 1): pages_window_start = int(max(0, min(len(all_pages) -",
"if it contains any jobs failed = Queue('failed') if not",
"dict(status='OK') @dashboard.route('/queue/<queue_name>/compact', methods=['POST']) @jsonify def compact_queue(queue_name): q = Queue(queue_name) q.compact()",
"'UTC') def serialize_job(job): return dict( id=job.id, created_at=serialize_date(job.created_at), enqueued_at=serialize_date(job.enqueued_at), ended_at=serialize_date(job.ended_at), origin=job.origin,",
"@wraps(f) def _wrapped(*args, **kwargs): from flask import jsonify as flask_jsonify",
"url_for, abort from flask import render_template from rq import Queue,",
"import format_exc result_dict['exc_info'] = format_exc() return flask_jsonify(**result_dict) return _wrapped def",
"requeue_job(job_id) return dict(status='OK', count=count) @dashboard.route('/queue/<queue_name>/empty', methods=['POST']) @jsonify def empty_queue(queue_name): q",
"queue_name=queue_name, page=(current_page+1))) pagination = remove_none_values( dict(pages_in_window=pages_in_window, next_page=next_page, prev_page=prev_page)) offset =",
"methods=['POST']) @jsonify def empty_queue(queue_name): q = Queue(queue_name) q.empty() return dict(status='OK')",
"parent app to authenticate user's access to the dashboard with",
"pagination_window(total_items, current_page, per_page) pages_in_window = [ dict(number=p, url=url_for('.overview', queue_name=queue_name, page=p))",
"description=job.description) def remove_none_values(input_dict): return dict([ (k,v) for k,v in input_dict.items()",
"if queue_name is None: # Show the failed queue by",
"q.compact() return dict(status='OK') @dashboard.route('/queues.json') @jsonify def list_queues(): queues = serialize_queues(sorted(Queue.all()))",
"def serialize_queues(queues): return [dict(name=q.name, count=q.count, url=url_for('.overview', queue_name=q.name)) for q in",
"return render_template('rq_dashboard/dashboard.html', workers=Worker.all(), queue=queue, page=page, queues=Queue.all(), rq_url_prefix=url_for('.overview')) @dashboard.route('/job/<job_id>/cancel', methods=['POST']) @jsonify",
"False \"\"\" auth_handler = current_app.extensions['rq-dashboard'].auth_handler if auth_handler and not auth_handler():",
"authentication_hook(): \"\"\" Allow the parent app to authenticate user's access",
"with it's own auth_handler method that must return True or",
"the parent app to authenticate user's access to the dashboard",
"int(ceil(total_items / float(per_page))) + 1) results = all_pages if (window_size",
"page=(current_page+1))) pagination = remove_none_values( dict(pages_in_window=pages_in_window, next_page=next_page, prev_page=prev_page)) offset = (current_page",
"= format_exc() return flask_jsonify(**result_dict) return _wrapped def serialize_queues(queues): return [dict(name=q.name,",
"rq_job.cancel() return dict(status='OK') @dashboard.route('/job/<job_id>/requeue', methods=['POST']) @jsonify def requeue_job_view(job_id): requeue_job(job_id) return",
"@jsonify def empty_queue(queue_name): q = Queue(queue_name) q.empty() return dict(status='OK') @dashboard.route('/queue/<queue_name>/compact',",
"def _wrapped(*args, **kwargs): from flask import jsonify as flask_jsonify try:",
"return dict(status='OK', count=count) @dashboard.route('/queue/<queue_name>/empty', methods=['POST']) @jsonify def empty_queue(queue_name): q =",
"password=current_app.config.get('REDIS_PASSWORD', None), db=current_app.config.get('REDIS_DB', 0)) @dashboard.before_request def push_rq_connection(): push_connection(current_app.redis_conn) @dashboard.teardown_request def",
"result_dict['reason'] = str(e) from traceback import format_exc result_dict['exc_info'] = format_exc()",
"def push_rq_connection(): push_connection(current_app.redis_conn) @dashboard.teardown_request def pop_rq_connection(exception=None): pop_connection() def jsonify(f): @wraps(f)",
"from traceback import format_exc result_dict['exc_info'] = format_exc() return flask_jsonify(**result_dict) return",
"authenticate user's access to the dashboard with it's own auth_handler",
"current_app, url_for, abort from flask import render_template from rq import",
"# Show the failed queue by default if it contains",
"import jsonify as flask_jsonify try: result_dict = f(*args, **kwargs) except",
"if not failed.is_empty(): queue = failed else: queue = Queue()",
"that must return True or False \"\"\" auth_handler = current_app.extensions['rq-dashboard'].auth_handler",
"result_dict = dict(status='error') if current_app.config['DEBUG']: result_dict['reason'] = str(e) from traceback",
"(window_size >= 1): pages_window_start = int(max(0, min(len(all_pages) - window_size, (cur_page-1)",
"dict( id=job.id, created_at=serialize_date(job.created_at), enqueued_at=serialize_date(job.enqueued_at), ended_at=serialize_date(job.ended_at), origin=job.origin, result=job._result, exc_info=job.exc_info, description=job.description) def",
"range(1, int(ceil(total_items / float(per_page))) + 1) results = all_pages if",
"list_workers(): def serialize_queue_names(worker): return [q.name for q in worker.queues] workers",
"failed.is_empty(): queue = failed else: queue = Queue() else: queue",
"1: prev_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page-1))) next_page = None if",
"job_ids: requeue_job(job_id) return dict(status='OK', count=count) @dashboard.route('/queue/<queue_name>/empty', methods=['POST']) @jsonify def empty_queue(queue_name):",
"ceil(window_size / 2.0)))) pages_window_end = int(pages_window_start + window_size) result =",
"push_connection, pop_connection from rq.job import Job from functools import wraps",
"len(job_ids) for job_id in job_ids: requeue_job(job_id) return dict(status='OK', count=count) @dashboard.route('/queue/<queue_name>/empty',",
"dashboard with it's own auth_handler method that must return True",
"import get_failed_queue from math import ceil dashboard = Blueprint('rq_dashboard', __name__,",
"import from_url from rq import push_connection, pop_connection from rq.job import",
"to the dashboard with it's own auth_handler method that must",
"remove_none_values(input_dict): return dict([ (k,v) for k,v in input_dict.items() if v",
"rq_job.status == \"failed\": rq_job.delete() else: rq_job.cancel() return dict(status='OK') @dashboard.route('/job/<job_id>/requeue', methods=['POST'])",
"return True or False \"\"\" auth_handler = current_app.extensions['rq-dashboard'].auth_handler if auth_handler",
"def serialize_date(dt): if dt is None: return None return times.format(dt,",
"@dashboard.route('/<queue_name>', defaults={'page': '1'}) @dashboard.route('/<queue_name>/<page>') def overview(queue_name, page): if queue_name is",
"pagination = remove_none_values( dict(pages_in_window=pages_in_window, next_page=next_page, prev_page=prev_page)) offset = (current_page -",
"for q in worker.queues] workers = [dict(name=worker.name, queues=serialize_queue_names(worker), state=worker.get_state()) for",
"from_url from rq import push_connection, pop_connection from rq.job import Job",
"dict(name=queue.name, jobs=jobs, pagination=pagination) @dashboard.route('/workers.json') @jsonify def list_workers(): def serialize_queue_names(worker): return",
"= len(job_ids) for job_id in job_ids: requeue_job(job_id) return dict(status='OK', count=count)",
"dashboard = Blueprint('rq_dashboard', __name__, template_folder='templates', static_folder='static', ) @dashboard.before_request def authentication_hook():",
"return dict(queues=queues) @dashboard.route('/jobs/<queue_name>/<page>.json') @jsonify def list_jobs(queue_name, page): current_page = int(page)",
"dict(status='OK') @dashboard.route('/job/<job_id>/requeue', methods=['POST']) @jsonify def requeue_job_view(job_id): requeue_job(job_id) return dict(status='OK') @dashboard.route('/requeue-all',",
"current_app.config['DEBUG']: result_dict['reason'] = str(e) from traceback import format_exc result_dict['exc_info'] =",
"dict(status='OK', count=count) @dashboard.route('/queue/<queue_name>/empty', methods=['POST']) @jsonify def empty_queue(queue_name): q = Queue(queue_name)",
"= all_pages[pages_window_start:pages_window_end] return result @dashboard.route('/', defaults={'queue_name': None, 'page': '1'}) @dashboard.route('/<queue_name>',",
"current_app.redis_conn = Redis(host=current_app.config.get('REDIS_HOST', 'localhost'), port=current_app.config.get('REDIS_PORT', 6379), password=current_app.config.get('REDIS_PASSWORD', None), db=current_app.config.get('REDIS_DB', 0))",
"method that must return True or False \"\"\" auth_handler =",
"all_pages[pages_window_start:pages_window_end] return result @dashboard.route('/', defaults={'queue_name': None, 'page': '1'}) @dashboard.route('/<queue_name>', defaults={'page':",
"remove_none_values( dict(pages_in_window=pages_in_window, next_page=next_page, prev_page=prev_page)) offset = (current_page - 1) *",
"- window_size, (cur_page-1) - ceil(window_size / 2.0)))) pages_window_end = int(pages_window_start",
"render_template from rq import Queue, Worker from rq import cancel_job,",
"defaults={'page': '1'}) @dashboard.route('/<queue_name>/<page>') def overview(queue_name, page): if queue_name is None:",
"from redis import from_url from rq import push_connection, pop_connection from",
"window_size, (cur_page-1) - ceil(window_size / 2.0)))) pages_window_end = int(pages_window_start +",
"return times.format(dt, 'UTC') def serialize_job(job): return dict( id=job.id, created_at=serialize_date(job.created_at), enqueued_at=serialize_date(job.enqueued_at),",
"/ float(per_page))) prev_page = None if current_page > 1: prev_page",
"template_folder='templates', static_folder='static', ) @dashboard.before_request def authentication_hook(): \"\"\" Allow the parent",
"None, 'page': '1'}) @dashboard.route('/<queue_name>', defaults={'page': '1'}) @dashboard.route('/<queue_name>/<page>') def overview(queue_name, page):",
"and not auth_handler(): abort(401) @dashboard.before_app_first_request def setup_rq_connection(): if current_app.config.get('REDIS_URL'): current_app.redis_conn",
"e: result_dict = dict(status='error') if current_app.config['DEBUG']: result_dict['reason'] = str(e) from",
"= int(max(0, min(len(all_pages) - window_size, (cur_page-1) - ceil(window_size / 2.0))))",
"per_page)] return dict(name=queue.name, jobs=jobs, pagination=pagination) @dashboard.route('/workers.json') @jsonify def list_workers(): def",
"@dashboard.route('/jobs/<queue_name>/<page>.json') @jsonify def list_jobs(queue_name, page): current_page = int(page) queue =",
"import wraps import times from flask import Blueprint from flask",
"def pop_rq_connection(exception=None): pop_connection() def jsonify(f): @wraps(f) def _wrapped(*args, **kwargs): from",
"if dt is None: return None return times.format(dt, 'UTC') def",
"return [q.name for q in worker.queues] workers = [dict(name=worker.name, queues=serialize_queue_names(worker),",
"Queue(queue_name) q.empty() return dict(status='OK') @dashboard.route('/queue/<queue_name>/compact', methods=['POST']) @jsonify def compact_queue(queue_name): q",
"dict(status='error') if current_app.config['DEBUG']: result_dict['reason'] = str(e) from traceback import format_exc",
"int(page) queue = Queue(queue_name) per_page = 5 total_items = queue.count",
"pages_numbers_in_window ] last_page = int(ceil(total_items / float(per_page))) prev_page = None",
"all_pages = range(1, int(ceil(total_items / float(per_page))) + 1) results =",
"Worker.all()] return dict(workers=workers) @dashboard.context_processor def inject_interval(): interval = current_app.config.get('RQ_POLL_INTERVAL', 2500)",
"import current_app, url_for, abort from flask import render_template from rq",
"it's own auth_handler method that must return True or False",
"flask_jsonify try: result_dict = f(*args, **kwargs) except Exception as e:",
"serialize_queues(sorted(Queue.all())) return dict(queues=queues) @dashboard.route('/jobs/<queue_name>/<page>.json') @jsonify def list_jobs(queue_name, page): current_page =",
"requeue_job from rq import get_failed_queue from math import ceil dashboard",
"queue_name=q.name)) for q in queues] def serialize_date(dt): if dt is",
"- 1) * per_page jobs = [serialize_job(job) for job in",
"serialize_queues(queues): return [dict(name=q.name, count=q.count, url=url_for('.overview', queue_name=q.name)) for q in queues]",
"+ window_size) result = all_pages[pages_window_start:pages_window_end] return result @dashboard.route('/', defaults={'queue_name': None,",
"import Queue, Worker from rq import cancel_job, requeue_job from rq",
"from rq import cancel_job, requeue_job from rq import get_failed_queue from",
"= int(pages_window_start + window_size) result = all_pages[pages_window_start:pages_window_end] return result @dashboard.route('/',",
"the failed queue by default if it contains any jobs",
"in Worker.all()] return dict(workers=workers) @dashboard.context_processor def inject_interval(): interval = current_app.config.get('RQ_POLL_INTERVAL',",
"as flask_jsonify try: result_dict = f(*args, **kwargs) except Exception as",
"current_page < last_page: next_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page+1))) pagination =",
"state=worker.get_state()) for worker in Worker.all()] return dict(workers=workers) @dashboard.context_processor def inject_interval():",
"None if current_page > 1: prev_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page-1)))",
"v is not None ]) def pagination_window(total_items, cur_page, per_page=5, window_size=10):",
"Queue, Worker from rq import cancel_job, requeue_job from rq import",
"import cancel_job, requeue_job from rq import get_failed_queue from math import",
"not None ]) def pagination_window(total_items, cur_page, per_page=5, window_size=10): all_pages =",
"@dashboard.route('/queues.json') @jsonify def list_queues(): queues = serialize_queues(sorted(Queue.all())) return dict(queues=queues) @dashboard.route('/jobs/<queue_name>/<page>.json')",
"default if it contains any jobs failed = Queue('failed') if",
"pop_rq_connection(exception=None): pop_connection() def jsonify(f): @wraps(f) def _wrapped(*args, **kwargs): from flask",
"times from flask import Blueprint from flask import current_app, url_for,",
"for job in queue.get_jobs(offset, per_page)] return dict(name=queue.name, jobs=jobs, pagination=pagination) @dashboard.route('/workers.json')",
"= None if current_page < last_page: next_page = dict(url=url_for('.overview', queue_name=queue_name,",
"prev_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page-1))) next_page = None if current_page",
"def overview(queue_name, page): if queue_name is None: # Show the",
"Allow the parent app to authenticate user's access to the",
"min(len(all_pages) - window_size, (cur_page-1) - ceil(window_size / 2.0)))) pages_window_end =",
"methods=['POST']) @jsonify def requeue_job_view(job_id): requeue_job(job_id) return dict(status='OK') @dashboard.route('/requeue-all', methods=['GET', 'POST'])",
"if v is not None ]) def pagination_window(total_items, cur_page, per_page=5,",
"from rq import push_connection, pop_connection from rq.job import Job from",
"f(*args, **kwargs) except Exception as e: result_dict = dict(status='error') if",
"input_dict.items() if v is not None ]) def pagination_window(total_items, cur_page,",
"rq import cancel_job, requeue_job from rq import get_failed_queue from math",
"dict(workers=workers) @dashboard.context_processor def inject_interval(): interval = current_app.config.get('RQ_POLL_INTERVAL', 2500) return dict(poll_interval=interval)",
"int(max(0, min(len(all_pages) - window_size, (cur_page-1) - ceil(window_size / 2.0)))) pages_window_end",
"(k,v) for k,v in input_dict.items() if v is not None",
"import Blueprint from flask import current_app, url_for, abort from flask",
"current_page, per_page) pages_in_window = [ dict(number=p, url=url_for('.overview', queue_name=queue_name, page=p)) for",
"= [dict(name=worker.name, queues=serialize_queue_names(worker), state=worker.get_state()) for worker in Worker.all()] return dict(workers=workers)",
"from flask import Blueprint from flask import current_app, url_for, abort",
"Blueprint from flask import current_app, url_for, abort from flask import",
"return dict(status='OK') @dashboard.route('/requeue-all', methods=['GET', 'POST']) @jsonify def requeue_all(): fq =",
"window_size) result = all_pages[pages_window_start:pages_window_end] return result @dashboard.route('/', defaults={'queue_name': None, 'page':",
"'POST']) @jsonify def requeue_all(): fq = get_failed_queue() job_ids = fq.job_ids",
"results = all_pages if (window_size >= 1): pages_window_start = int(max(0,",
"] last_page = int(ceil(total_items / float(per_page))) prev_page = None if",
"[serialize_job(job) for job in queue.get_jobs(offset, per_page)] return dict(name=queue.name, jobs=jobs, pagination=pagination)",
"page): current_page = int(page) queue = Queue(queue_name) per_page = 5",
"k,v in input_dict.items() if v is not None ]) def",
"= Queue(queue_name) return render_template('rq_dashboard/dashboard.html', workers=Worker.all(), queue=queue, page=page, queues=Queue.all(), rq_url_prefix=url_for('.overview')) @dashboard.route('/job/<job_id>/cancel',",
"from flask import render_template from rq import Queue, Worker from",
"= Blueprint('rq_dashboard', __name__, template_folder='templates', static_folder='static', ) @dashboard.before_request def authentication_hook(): \"\"\"",
"**kwargs) except Exception as e: result_dict = dict(status='error') if current_app.config['DEBUG']:",
"def cancel_job_view(job_id): rq_job = Job.fetch(job_id) if rq_job.status == \"failed\": rq_job.delete()",
"def empty_queue(queue_name): q = Queue(queue_name) q.empty() return dict(status='OK') @dashboard.route('/queue/<queue_name>/compact', methods=['POST'])",
"jobs failed = Queue('failed') if not failed.is_empty(): queue = failed",
"rq import get_failed_queue from math import ceil dashboard = Blueprint('rq_dashboard',",
"]) def pagination_window(total_items, cur_page, per_page=5, window_size=10): all_pages = range(1, int(ceil(total_items",
"count = len(job_ids) for job_id in job_ids: requeue_job(job_id) return dict(status='OK',",
"def list_jobs(queue_name, page): current_page = int(page) queue = Queue(queue_name) per_page",
"@jsonify def compact_queue(queue_name): q = Queue(queue_name) q.compact() return dict(status='OK') @dashboard.route('/queues.json')",
"Blueprint('rq_dashboard', __name__, template_folder='templates', static_folder='static', ) @dashboard.before_request def authentication_hook(): \"\"\" Allow",
"user's access to the dashboard with it's own auth_handler method",
"cancel_job, requeue_job from rq import get_failed_queue from math import ceil",
"current_app.redis_conn = from_url(current_app.config.get('REDIS_URL')) else: current_app.redis_conn = Redis(host=current_app.config.get('REDIS_HOST', 'localhost'), port=current_app.config.get('REDIS_PORT', 6379),",
"rq_url_prefix=url_for('.overview')) @dashboard.route('/job/<job_id>/cancel', methods=['POST']) @jsonify def cancel_job_view(job_id): rq_job = Job.fetch(job_id) if",
"p in pages_numbers_in_window ] last_page = int(ceil(total_items / float(per_page))) prev_page",
"@dashboard.before_request def authentication_hook(): \"\"\" Allow the parent app to authenticate",
"return dict(name=queue.name, jobs=jobs, pagination=pagination) @dashboard.route('/workers.json') @jsonify def list_workers(): def serialize_queue_names(worker):",
"return None return times.format(dt, 'UTC') def serialize_job(job): return dict( id=job.id,",
"\"\"\" Allow the parent app to authenticate user's access to",
"try: result_dict = f(*args, **kwargs) except Exception as e: result_dict",
"= str(e) from traceback import format_exc result_dict['exc_info'] = format_exc() return",
"= Queue(queue_name) per_page = 5 total_items = queue.count pages_numbers_in_window =",
"else: rq_job.cancel() return dict(status='OK') @dashboard.route('/job/<job_id>/requeue', methods=['POST']) @jsonify def requeue_job_view(job_id): requeue_job(job_id)",
"/ float(per_page))) + 1) results = all_pages if (window_size >=",
"prev_page=prev_page)) offset = (current_page - 1) * per_page jobs =",
"in job_ids: requeue_job(job_id) return dict(status='OK', count=count) @dashboard.route('/queue/<queue_name>/empty', methods=['POST']) @jsonify def",
"[dict(name=worker.name, queues=serialize_queue_names(worker), state=worker.get_state()) for worker in Worker.all()] return dict(workers=workers) @dashboard.context_processor",
"dict(number=p, url=url_for('.overview', queue_name=queue_name, page=p)) for p in pages_numbers_in_window ] last_page",
"if current_app.config.get('REDIS_URL'): current_app.redis_conn = from_url(current_app.config.get('REDIS_URL')) else: current_app.redis_conn = Redis(host=current_app.config.get('REDIS_HOST', 'localhost'),",
"<reponame>refgenomics/rq-dashboard<gh_stars>0 from redis import Redis from redis import from_url from",
"int(pages_window_start + window_size) result = all_pages[pages_window_start:pages_window_end] return result @dashboard.route('/', defaults={'queue_name':",
"(cur_page-1) - ceil(window_size / 2.0)))) pages_window_end = int(pages_window_start + window_size)",
"abort(401) @dashboard.before_app_first_request def setup_rq_connection(): if current_app.config.get('REDIS_URL'): current_app.redis_conn = from_url(current_app.config.get('REDIS_URL')) else:",
"result_dict = f(*args, **kwargs) except Exception as e: result_dict =",
"@dashboard.before_request def push_rq_connection(): push_connection(current_app.redis_conn) @dashboard.teardown_request def pop_rq_connection(exception=None): pop_connection() def jsonify(f):",
"wraps import times from flask import Blueprint from flask import",
"format_exc() return flask_jsonify(**result_dict) return _wrapped def serialize_queues(queues): return [dict(name=q.name, count=q.count,",
"traceback import format_exc result_dict['exc_info'] = format_exc() return flask_jsonify(**result_dict) return _wrapped",
"= Queue() else: queue = Queue(queue_name) return render_template('rq_dashboard/dashboard.html', workers=Worker.all(), queue=queue,",
"result=job._result, exc_info=job.exc_info, description=job.description) def remove_none_values(input_dict): return dict([ (k,v) for k,v",
"@jsonify def list_queues(): queues = serialize_queues(sorted(Queue.all())) return dict(queues=queues) @dashboard.route('/jobs/<queue_name>/<page>.json') @jsonify",
"return dict(status='OK') @dashboard.route('/queues.json') @jsonify def list_queues(): queues = serialize_queues(sorted(Queue.all())) return",
"= dict(url=url_for('.overview', queue_name=queue_name, page=(current_page+1))) pagination = remove_none_values( dict(pages_in_window=pages_in_window, next_page=next_page, prev_page=prev_page))",
"any jobs failed = Queue('failed') if not failed.is_empty(): queue =",
"= int(ceil(total_items / float(per_page))) prev_page = None if current_page >",
"= int(page) queue = Queue(queue_name) per_page = 5 total_items =",
"None: return None return times.format(dt, 'UTC') def serialize_job(job): return dict(",
"auth_handler method that must return True or False \"\"\" auth_handler",
"not failed.is_empty(): queue = failed else: queue = Queue() else:",
"@jsonify def requeue_all(): fq = get_failed_queue() job_ids = fq.job_ids count",
"pages_in_window = [ dict(number=p, url=url_for('.overview', queue_name=queue_name, page=p)) for p in",
"it contains any jobs failed = Queue('failed') if not failed.is_empty():",
"per_page=5, window_size=10): all_pages = range(1, int(ceil(total_items / float(per_page))) + 1)",
"from redis import Redis from redis import from_url from rq",
"= remove_none_values( dict(pages_in_window=pages_in_window, next_page=next_page, prev_page=prev_page)) offset = (current_page - 1)",
"rq import Queue, Worker from rq import cancel_job, requeue_job from",
"pages_numbers_in_window = pagination_window(total_items, current_page, per_page) pages_in_window = [ dict(number=p, url=url_for('.overview',",
"current_app.config.get('REDIS_URL'): current_app.redis_conn = from_url(current_app.config.get('REDIS_URL')) else: current_app.redis_conn = Redis(host=current_app.config.get('REDIS_HOST', 'localhost'), port=current_app.config.get('REDIS_PORT',",
"def compact_queue(queue_name): q = Queue(queue_name) q.compact() return dict(status='OK') @dashboard.route('/queues.json') @jsonify",
"queue=queue, page=page, queues=Queue.all(), rq_url_prefix=url_for('.overview')) @dashboard.route('/job/<job_id>/cancel', methods=['POST']) @jsonify def cancel_job_view(job_id): rq_job",
"True or False \"\"\" auth_handler = current_app.extensions['rq-dashboard'].auth_handler if auth_handler and",
"'page': '1'}) @dashboard.route('/<queue_name>', defaults={'page': '1'}) @dashboard.route('/<queue_name>/<page>') def overview(queue_name, page): if",
"the dashboard with it's own auth_handler method that must return",
"serialize_date(dt): if dt is None: return None return times.format(dt, 'UTC')",
"by default if it contains any jobs failed = Queue('failed')",
"= None if current_page > 1: prev_page = dict(url=url_for('.overview', queue_name=queue_name,",
"in worker.queues] workers = [dict(name=worker.name, queues=serialize_queue_names(worker), state=worker.get_state()) for worker in",
"2.0)))) pages_window_end = int(pages_window_start + window_size) result = all_pages[pages_window_start:pages_window_end] return",
"= fq.job_ids count = len(job_ids) for job_id in job_ids: requeue_job(job_id)",
"from rq import get_failed_queue from math import ceil dashboard =",
"compact_queue(queue_name): q = Queue(queue_name) q.compact() return dict(status='OK') @dashboard.route('/queues.json') @jsonify def",
"in input_dict.items() if v is not None ]) def pagination_window(total_items,",
"rq import push_connection, pop_connection from rq.job import Job from functools",
"flask import jsonify as flask_jsonify try: result_dict = f(*args, **kwargs)",
"rq.job import Job from functools import wraps import times from",
"prev_page = None if current_page > 1: prev_page = dict(url=url_for('.overview',",
"return _wrapped def serialize_queues(queues): return [dict(name=q.name, count=q.count, url=url_for('.overview', queue_name=q.name)) for",
"Show the failed queue by default if it contains any",
"result @dashboard.route('/', defaults={'queue_name': None, 'page': '1'}) @dashboard.route('/<queue_name>', defaults={'page': '1'}) @dashboard.route('/<queue_name>/<page>')",
"< last_page: next_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page+1))) pagination = remove_none_values(",
"in queue.get_jobs(offset, per_page)] return dict(name=queue.name, jobs=jobs, pagination=pagination) @dashboard.route('/workers.json') @jsonify def",
"methods=['GET', 'POST']) @jsonify def requeue_all(): fq = get_failed_queue() job_ids =",
"workers=Worker.all(), queue=queue, page=page, queues=Queue.all(), rq_url_prefix=url_for('.overview')) @dashboard.route('/job/<job_id>/cancel', methods=['POST']) @jsonify def cancel_job_view(job_id):",
"return dict( id=job.id, created_at=serialize_date(job.created_at), enqueued_at=serialize_date(job.enqueued_at), ended_at=serialize_date(job.ended_at), origin=job.origin, result=job._result, exc_info=job.exc_info, description=job.description)",
"= dict(status='error') if current_app.config['DEBUG']: result_dict['reason'] = str(e) from traceback import",
"queues = serialize_queues(sorted(Queue.all())) return dict(queues=queues) @dashboard.route('/jobs/<queue_name>/<page>.json') @jsonify def list_jobs(queue_name, page):",
"times.format(dt, 'UTC') def serialize_job(job): return dict( id=job.id, created_at=serialize_date(job.created_at), enqueued_at=serialize_date(job.enqueued_at), ended_at=serialize_date(job.ended_at),",
"Queue(queue_name) return render_template('rq_dashboard/dashboard.html', workers=Worker.all(), queue=queue, page=page, queues=Queue.all(), rq_url_prefix=url_for('.overview')) @dashboard.route('/job/<job_id>/cancel', methods=['POST'])",
"get_failed_queue() job_ids = fq.job_ids count = len(job_ids) for job_id in",
"defaults={'queue_name': None, 'page': '1'}) @dashboard.route('/<queue_name>', defaults={'page': '1'}) @dashboard.route('/<queue_name>/<page>') def overview(queue_name,",
"methods=['POST']) @jsonify def cancel_job_view(job_id): rq_job = Job.fetch(job_id) if rq_job.status ==",
"from_url(current_app.config.get('REDIS_URL')) else: current_app.redis_conn = Redis(host=current_app.config.get('REDIS_HOST', 'localhost'), port=current_app.config.get('REDIS_PORT', 6379), password=current_app.config.get('REDIS_PASSWORD', None),",
"float(per_page))) prev_page = None if current_page > 1: prev_page =",
"return dict([ (k,v) for k,v in input_dict.items() if v is",
"import render_template from rq import Queue, Worker from rq import",
"contains any jobs failed = Queue('failed') if not failed.is_empty(): queue",
"@jsonify def list_workers(): def serialize_queue_names(worker): return [q.name for q in",
"enqueued_at=serialize_date(job.enqueued_at), ended_at=serialize_date(job.ended_at), origin=job.origin, result=job._result, exc_info=job.exc_info, description=job.description) def remove_none_values(input_dict): return dict([",
"Queue(queue_name) q.compact() return dict(status='OK') @dashboard.route('/queues.json') @jsonify def list_queues(): queues =",
") @dashboard.before_request def authentication_hook(): \"\"\" Allow the parent app to",
"Job from functools import wraps import times from flask import",
"str(e) from traceback import format_exc result_dict['exc_info'] = format_exc() return flask_jsonify(**result_dict)",
"flask_jsonify(**result_dict) return _wrapped def serialize_queues(queues): return [dict(name=q.name, count=q.count, url=url_for('.overview', queue_name=q.name))",
"for q in queues] def serialize_date(dt): if dt is None:",
"[dict(name=q.name, count=q.count, url=url_for('.overview', queue_name=q.name)) for q in queues] def serialize_date(dt):",
"None: # Show the failed queue by default if it",
"page): if queue_name is None: # Show the failed queue",
"port=current_app.config.get('REDIS_PORT', 6379), password=current_app.config.get('REDIS_PASSWORD', None), db=current_app.config.get('REDIS_DB', 0)) @dashboard.before_request def push_rq_connection(): push_connection(current_app.redis_conn)",
"dict([ (k,v) for k,v in input_dict.items() if v is not",
"page=(current_page-1))) next_page = None if current_page < last_page: next_page =",
"1) results = all_pages if (window_size >= 1): pages_window_start =",
"last_page = int(ceil(total_items / float(per_page))) prev_page = None if current_page",
"worker.queues] workers = [dict(name=worker.name, queues=serialize_queue_names(worker), state=worker.get_state()) for worker in Worker.all()]",
"= from_url(current_app.config.get('REDIS_URL')) else: current_app.redis_conn = Redis(host=current_app.config.get('REDIS_HOST', 'localhost'), port=current_app.config.get('REDIS_PORT', 6379), password=current_app.config.get('REDIS_PASSWORD',",
"url=url_for('.overview', queue_name=q.name)) for q in queues] def serialize_date(dt): if dt",
"jsonify(f): @wraps(f) def _wrapped(*args, **kwargs): from flask import jsonify as",
"if auth_handler and not auth_handler(): abort(401) @dashboard.before_app_first_request def setup_rq_connection(): if",
"@jsonify def requeue_job_view(job_id): requeue_job(job_id) return dict(status='OK') @dashboard.route('/requeue-all', methods=['GET', 'POST']) @jsonify",
"for job_id in job_ids: requeue_job(job_id) return dict(status='OK', count=count) @dashboard.route('/queue/<queue_name>/empty', methods=['POST'])",
"Worker from rq import cancel_job, requeue_job from rq import get_failed_queue",
"@dashboard.route('/<queue_name>/<page>') def overview(queue_name, page): if queue_name is None: # Show",
"is not None ]) def pagination_window(total_items, cur_page, per_page=5, window_size=10): all_pages",
"next_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page+1))) pagination = remove_none_values( dict(pages_in_window=pages_in_window, next_page=next_page,",
"abort from flask import render_template from rq import Queue, Worker",
"for k,v in input_dict.items() if v is not None ])",
"offset = (current_page - 1) * per_page jobs = [serialize_job(job)",
"access to the dashboard with it's own auth_handler method that",
"functools import wraps import times from flask import Blueprint from",
"dt is None: return None return times.format(dt, 'UTC') def serialize_job(job):",
"= [ dict(number=p, url=url_for('.overview', queue_name=queue_name, page=p)) for p in pages_numbers_in_window",
"job in queue.get_jobs(offset, per_page)] return dict(name=queue.name, jobs=jobs, pagination=pagination) @dashboard.route('/workers.json') @jsonify",
"Queue('failed') if not failed.is_empty(): queue = failed else: queue =",
"failed = Queue('failed') if not failed.is_empty(): queue = failed else:",
"except Exception as e: result_dict = dict(status='error') if current_app.config['DEBUG']: result_dict['reason']",
"None), db=current_app.config.get('REDIS_DB', 0)) @dashboard.before_request def push_rq_connection(): push_connection(current_app.redis_conn) @dashboard.teardown_request def pop_rq_connection(exception=None):",
"not auth_handler(): abort(401) @dashboard.before_app_first_request def setup_rq_connection(): if current_app.config.get('REDIS_URL'): current_app.redis_conn =",
"ceil dashboard = Blueprint('rq_dashboard', __name__, template_folder='templates', static_folder='static', ) @dashboard.before_request def",
"def authentication_hook(): \"\"\" Allow the parent app to authenticate user's",
"jsonify as flask_jsonify try: result_dict = f(*args, **kwargs) except Exception",
"from rq.job import Job from functools import wraps import times",
"def list_workers(): def serialize_queue_names(worker): return [q.name for q in worker.queues]",
"requeue_job_view(job_id): requeue_job(job_id) return dict(status='OK') @dashboard.route('/requeue-all', methods=['GET', 'POST']) @jsonify def requeue_all():",
"if current_page > 1: prev_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page-1))) next_page",
"def remove_none_values(input_dict): return dict([ (k,v) for k,v in input_dict.items() if",
"Redis from redis import from_url from rq import push_connection, pop_connection",
"queue.count pages_numbers_in_window = pagination_window(total_items, current_page, per_page) pages_in_window = [ dict(number=p,",
"dict(url=url_for('.overview', queue_name=queue_name, page=(current_page-1))) next_page = None if current_page < last_page:",
"rq_job.delete() else: rq_job.cancel() return dict(status='OK') @dashboard.route('/job/<job_id>/requeue', methods=['POST']) @jsonify def requeue_job_view(job_id):",
"@dashboard.route('/', defaults={'queue_name': None, 'page': '1'}) @dashboard.route('/<queue_name>', defaults={'page': '1'}) @dashboard.route('/<queue_name>/<page>') def",
"@dashboard.route('/job/<job_id>/cancel', methods=['POST']) @jsonify def cancel_job_view(job_id): rq_job = Job.fetch(job_id) if rq_job.status",
"queue.get_jobs(offset, per_page)] return dict(name=queue.name, jobs=jobs, pagination=pagination) @dashboard.route('/workers.json') @jsonify def list_workers():",
"queue = Queue() else: queue = Queue(queue_name) return render_template('rq_dashboard/dashboard.html', workers=Worker.all(),",
"auth_handler(): abort(401) @dashboard.before_app_first_request def setup_rq_connection(): if current_app.config.get('REDIS_URL'): current_app.redis_conn = from_url(current_app.config.get('REDIS_URL'))",
"= Queue('failed') if not failed.is_empty(): queue = failed else: queue",
"@dashboard.route('/workers.json') @jsonify def list_workers(): def serialize_queue_names(worker): return [q.name for q",
"pagination_window(total_items, cur_page, per_page=5, window_size=10): all_pages = range(1, int(ceil(total_items / float(per_page)))",
"return dict(status='OK') @dashboard.route('/queue/<queue_name>/compact', methods=['POST']) @jsonify def compact_queue(queue_name): q = Queue(queue_name)",
"= [serialize_job(job) for job in queue.get_jobs(offset, per_page)] return dict(name=queue.name, jobs=jobs,",
"redis import Redis from redis import from_url from rq import",
"@dashboard.route('/queue/<queue_name>/empty', methods=['POST']) @jsonify def empty_queue(queue_name): q = Queue(queue_name) q.empty() return",
"@dashboard.route('/job/<job_id>/requeue', methods=['POST']) @jsonify def requeue_job_view(job_id): requeue_job(job_id) return dict(status='OK') @dashboard.route('/requeue-all', methods=['GET',",
"1): pages_window_start = int(max(0, min(len(all_pages) - window_size, (cur_page-1) - ceil(window_size",
"if current_app.config['DEBUG']: result_dict['reason'] = str(e) from traceback import format_exc result_dict['exc_info']",
"[ dict(number=p, url=url_for('.overview', queue_name=queue_name, page=p)) for p in pages_numbers_in_window ]",
"(current_page - 1) * per_page jobs = [serialize_job(job) for job",
"= queue.count pages_numbers_in_window = pagination_window(total_items, current_page, per_page) pages_in_window = [",
"count=count) @dashboard.route('/queue/<queue_name>/empty', methods=['POST']) @jsonify def empty_queue(queue_name): q = Queue(queue_name) q.empty()",
"dict(pages_in_window=pages_in_window, next_page=next_page, prev_page=prev_page)) offset = (current_page - 1) * per_page",
"queue by default if it contains any jobs failed =",
"fq.job_ids count = len(job_ids) for job_id in job_ids: requeue_job(job_id) return",
"= Queue(queue_name) q.empty() return dict(status='OK') @dashboard.route('/queue/<queue_name>/compact', methods=['POST']) @jsonify def compact_queue(queue_name):",
"else: queue = Queue(queue_name) return render_template('rq_dashboard/dashboard.html', workers=Worker.all(), queue=queue, page=page, queues=Queue.all(),",
"- ceil(window_size / 2.0)))) pages_window_end = int(pages_window_start + window_size) result",
"flask import Blueprint from flask import current_app, url_for, abort from",
"dict(queues=queues) @dashboard.route('/jobs/<queue_name>/<page>.json') @jsonify def list_jobs(queue_name, page): current_page = int(page) queue",
"from flask import current_app, url_for, abort from flask import render_template",
"page=p)) for p in pages_numbers_in_window ] last_page = int(ceil(total_items /",
"/ 2.0)))) pages_window_end = int(pages_window_start + window_size) result = all_pages[pages_window_start:pages_window_end]",
"@jsonify def list_jobs(queue_name, page): current_page = int(page) queue = Queue(queue_name)",
"pagination=pagination) @dashboard.route('/workers.json') @jsonify def list_workers(): def serialize_queue_names(worker): return [q.name for",
"next_page=next_page, prev_page=prev_page)) offset = (current_page - 1) * per_page jobs",
"None ]) def pagination_window(total_items, cur_page, per_page=5, window_size=10): all_pages = range(1,",
"= serialize_queues(sorted(Queue.all())) return dict(queues=queues) @dashboard.route('/jobs/<queue_name>/<page>.json') @jsonify def list_jobs(queue_name, page): current_page",
"queues=Queue.all(), rq_url_prefix=url_for('.overview')) @dashboard.route('/job/<job_id>/cancel', methods=['POST']) @jsonify def cancel_job_view(job_id): rq_job = Job.fetch(job_id)",
"queue_name=queue_name, page=p)) for p in pages_numbers_in_window ] last_page = int(ceil(total_items",
"\"failed\": rq_job.delete() else: rq_job.cancel() return dict(status='OK') @dashboard.route('/job/<job_id>/requeue', methods=['POST']) @jsonify def",
"= Redis(host=current_app.config.get('REDIS_HOST', 'localhost'), port=current_app.config.get('REDIS_PORT', 6379), password=current_app.config.get('REDIS_PASSWORD', None), db=current_app.config.get('REDIS_DB', 0)) @dashboard.before_request",
"count=q.count, url=url_for('.overview', queue_name=q.name)) for q in queues] def serialize_date(dt): if",
"as e: result_dict = dict(status='error') if current_app.config['DEBUG']: result_dict['reason'] = str(e)",
"__name__, template_folder='templates', static_folder='static', ) @dashboard.before_request def authentication_hook(): \"\"\" Allow the",
"import times from flask import Blueprint from flask import current_app,",
"from rq import Queue, Worker from rq import cancel_job, requeue_job",
"0)) @dashboard.before_request def push_rq_connection(): push_connection(current_app.redis_conn) @dashboard.teardown_request def pop_rq_connection(exception=None): pop_connection() def",
"Exception as e: result_dict = dict(status='error') if current_app.config['DEBUG']: result_dict['reason'] =",
"'localhost'), port=current_app.config.get('REDIS_PORT', 6379), password=current_app.config.get('REDIS_PASSWORD', None), db=current_app.config.get('REDIS_DB', 0)) @dashboard.before_request def push_rq_connection():",
"def setup_rq_connection(): if current_app.config.get('REDIS_URL'): current_app.redis_conn = from_url(current_app.config.get('REDIS_URL')) else: current_app.redis_conn =",
"_wrapped(*args, **kwargs): from flask import jsonify as flask_jsonify try: result_dict",
"dict(status='OK') @dashboard.route('/requeue-all', methods=['GET', 'POST']) @jsonify def requeue_all(): fq = get_failed_queue()",
"pop_connection() def jsonify(f): @wraps(f) def _wrapped(*args, **kwargs): from flask import",
"result_dict['exc_info'] = format_exc() return flask_jsonify(**result_dict) return _wrapped def serialize_queues(queues): return",
"queues] def serialize_date(dt): if dt is None: return None return",
"= (current_page - 1) * per_page jobs = [serialize_job(job) for",
"def serialize_queue_names(worker): return [q.name for q in worker.queues] workers =",
"current_page > 1: prev_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page-1))) next_page =",
"return flask_jsonify(**result_dict) return _wrapped def serialize_queues(queues): return [dict(name=q.name, count=q.count, url=url_for('.overview',",
"= range(1, int(ceil(total_items / float(per_page))) + 1) results = all_pages",
"\"\"\" auth_handler = current_app.extensions['rq-dashboard'].auth_handler if auth_handler and not auth_handler(): abort(401)",
"current_page = int(page) queue = Queue(queue_name) per_page = 5 total_items",
"worker in Worker.all()] return dict(workers=workers) @dashboard.context_processor def inject_interval(): interval =",
"== \"failed\": rq_job.delete() else: rq_job.cancel() return dict(status='OK') @dashboard.route('/job/<job_id>/requeue', methods=['POST']) @jsonify",
"fq = get_failed_queue() job_ids = fq.job_ids count = len(job_ids) for",
"def serialize_job(job): return dict( id=job.id, created_at=serialize_date(job.created_at), enqueued_at=serialize_date(job.enqueued_at), ended_at=serialize_date(job.ended_at), origin=job.origin, result=job._result,",
"float(per_page))) + 1) results = all_pages if (window_size >= 1):",
"failed queue by default if it contains any jobs failed",
"'1'}) @dashboard.route('/<queue_name>', defaults={'page': '1'}) @dashboard.route('/<queue_name>/<page>') def overview(queue_name, page): if queue_name",
"**kwargs): from flask import jsonify as flask_jsonify try: result_dict =",
"app to authenticate user's access to the dashboard with it's",
"return result @dashboard.route('/', defaults={'queue_name': None, 'page': '1'}) @dashboard.route('/<queue_name>', defaults={'page': '1'})",
"empty_queue(queue_name): q = Queue(queue_name) q.empty() return dict(status='OK') @dashboard.route('/queue/<queue_name>/compact', methods=['POST']) @jsonify",
"+ 1) results = all_pages if (window_size >= 1): pages_window_start",
"5 total_items = queue.count pages_numbers_in_window = pagination_window(total_items, current_page, per_page) pages_in_window",
"format_exc result_dict['exc_info'] = format_exc() return flask_jsonify(**result_dict) return _wrapped def serialize_queues(queues):",
"job_id in job_ids: requeue_job(job_id) return dict(status='OK', count=count) @dashboard.route('/queue/<queue_name>/empty', methods=['POST']) @jsonify",
"created_at=serialize_date(job.created_at), enqueued_at=serialize_date(job.enqueued_at), ended_at=serialize_date(job.ended_at), origin=job.origin, result=job._result, exc_info=job.exc_info, description=job.description) def remove_none_values(input_dict): return",
"= pagination_window(total_items, current_page, per_page) pages_in_window = [ dict(number=p, url=url_for('.overview', queue_name=queue_name,",
"must return True or False \"\"\" auth_handler = current_app.extensions['rq-dashboard'].auth_handler if",
"pop_connection from rq.job import Job from functools import wraps import",
"if rq_job.status == \"failed\": rq_job.delete() else: rq_job.cancel() return dict(status='OK') @dashboard.route('/job/<job_id>/requeue',",
"id=job.id, created_at=serialize_date(job.created_at), enqueued_at=serialize_date(job.enqueued_at), ended_at=serialize_date(job.ended_at), origin=job.origin, result=job._result, exc_info=job.exc_info, description=job.description) def remove_none_values(input_dict):",
"url=url_for('.overview', queue_name=queue_name, page=p)) for p in pages_numbers_in_window ] last_page =",
"in queues] def serialize_date(dt): if dt is None: return None",
"total_items = queue.count pages_numbers_in_window = pagination_window(total_items, current_page, per_page) pages_in_window =",
"q in worker.queues] workers = [dict(name=worker.name, queues=serialize_queue_names(worker), state=worker.get_state()) for worker",
"rq_job = Job.fetch(job_id) if rq_job.status == \"failed\": rq_job.delete() else: rq_job.cancel()",
"dict(status='OK') @dashboard.route('/queues.json') @jsonify def list_queues(): queues = serialize_queues(sorted(Queue.all())) return dict(queues=queues)",
"if (window_size >= 1): pages_window_start = int(max(0, min(len(all_pages) - window_size,",
"is None: # Show the failed queue by default if",
"ended_at=serialize_date(job.ended_at), origin=job.origin, result=job._result, exc_info=job.exc_info, description=job.description) def remove_none_values(input_dict): return dict([ (k,v)",
"push_connection(current_app.redis_conn) @dashboard.teardown_request def pop_rq_connection(exception=None): pop_connection() def jsonify(f): @wraps(f) def _wrapped(*args,",
"per_page jobs = [serialize_job(job) for job in queue.get_jobs(offset, per_page)] return",
"= f(*args, **kwargs) except Exception as e: result_dict = dict(status='error')",
"pages_window_end = int(pages_window_start + window_size) result = all_pages[pages_window_start:pages_window_end] return result",
"int(ceil(total_items / float(per_page))) prev_page = None if current_page > 1:",
"else: current_app.redis_conn = Redis(host=current_app.config.get('REDIS_HOST', 'localhost'), port=current_app.config.get('REDIS_PORT', 6379), password=current_app.config.get('REDIS_PASSWORD', None), db=current_app.config.get('REDIS_DB',",
"from flask import jsonify as flask_jsonify try: result_dict = f(*args,",
"get_failed_queue from math import ceil dashboard = Blueprint('rq_dashboard', __name__, template_folder='templates',",
"queue = Queue(queue_name) per_page = 5 total_items = queue.count pages_numbers_in_window",
"return [dict(name=q.name, count=q.count, url=url_for('.overview', queue_name=q.name)) for q in queues] def",
"next_page = None if current_page < last_page: next_page = dict(url=url_for('.overview',",
"redis import from_url from rq import push_connection, pop_connection from rq.job",
"q = Queue(queue_name) q.compact() return dict(status='OK') @dashboard.route('/queues.json') @jsonify def list_queues():",
"current_app.extensions['rq-dashboard'].auth_handler if auth_handler and not auth_handler(): abort(401) @dashboard.before_app_first_request def setup_rq_connection():",
"to authenticate user's access to the dashboard with it's own",
"queues=serialize_queue_names(worker), state=worker.get_state()) for worker in Worker.all()] return dict(workers=workers) @dashboard.context_processor def",
"import push_connection, pop_connection from rq.job import Job from functools import",
"import ceil dashboard = Blueprint('rq_dashboard', __name__, template_folder='templates', static_folder='static', ) @dashboard.before_request",
"= dict(url=url_for('.overview', queue_name=queue_name, page=(current_page-1))) next_page = None if current_page <",
"auth_handler = current_app.extensions['rq-dashboard'].auth_handler if auth_handler and not auth_handler(): abort(401) @dashboard.before_app_first_request",
"list_jobs(queue_name, page): current_page = int(page) queue = Queue(queue_name) per_page =",
"= 5 total_items = queue.count pages_numbers_in_window = pagination_window(total_items, current_page, per_page)",
"return dict(status='OK') @dashboard.route('/job/<job_id>/requeue', methods=['POST']) @jsonify def requeue_job_view(job_id): requeue_job(job_id) return dict(status='OK')",
"None if current_page < last_page: next_page = dict(url=url_for('.overview', queue_name=queue_name, page=(current_page+1)))",
"queue_name=queue_name, page=(current_page-1))) next_page = None if current_page < last_page: next_page"
] |
[
"from acquisition.archive_step import AcquisitionArchiveStep from acquisition.listener import AcquisitionListener __all__ =",
"AcquisitionReinjectStep from acquisition.fork_step import AcquisitionForkStep from acquisition.archive_step import AcquisitionArchiveStep from",
"import AcquisitionArchiveStep from acquisition.listener import AcquisitionListener __all__ = ['AcquisitionStep', 'AcquisitionBatchStep',",
"AcquisitionDeleteStep from acquisition.batch_step import AcquisitionBatchStep from acquisition.reinject_step import AcquisitionReinjectStep from",
"acquisition.listener import AcquisitionListener __all__ = ['AcquisitionStep', 'AcquisitionBatchStep', 'AcquisitionMoveStep', 'AcquisitionDeleteStep', 'AcquisitionReinjectStep',",
"from acquisition.reinject_step import AcquisitionReinjectStep from acquisition.fork_step import AcquisitionForkStep from acquisition.archive_step",
"import AcquisitionDeleteStep from acquisition.batch_step import AcquisitionBatchStep from acquisition.reinject_step import AcquisitionReinjectStep",
"from acquisition.fork_step import AcquisitionForkStep from acquisition.archive_step import AcquisitionArchiveStep from acquisition.listener",
"import AcquisitionStep from acquisition.stats import AcquisitionStatsDClient from acquisition.move_step import AcquisitionMoveStep",
"acquisition.move_step import AcquisitionMoveStep from acquisition.delete_step import AcquisitionDeleteStep from acquisition.batch_step import",
"acquisition.reinject_step import AcquisitionReinjectStep from acquisition.fork_step import AcquisitionForkStep from acquisition.archive_step import",
"from acquisition.batch_step import AcquisitionBatchStep from acquisition.reinject_step import AcquisitionReinjectStep from acquisition.fork_step",
"import AcquisitionStatsDClient from acquisition.move_step import AcquisitionMoveStep from acquisition.delete_step import AcquisitionDeleteStep",
"from acquisition.stats import AcquisitionStatsDClient from acquisition.move_step import AcquisitionMoveStep from acquisition.delete_step",
"AcquisitionListener __all__ = ['AcquisitionStep', 'AcquisitionBatchStep', 'AcquisitionMoveStep', 'AcquisitionDeleteStep', 'AcquisitionReinjectStep', 'AcquisitionForkStep', 'AcquisitionArchiveStep',",
"acquisition.archive_step import AcquisitionArchiveStep from acquisition.listener import AcquisitionListener __all__ = ['AcquisitionStep',",
"import AcquisitionListener __all__ = ['AcquisitionStep', 'AcquisitionBatchStep', 'AcquisitionMoveStep', 'AcquisitionDeleteStep', 'AcquisitionReinjectStep', 'AcquisitionForkStep',",
"from acquisition.step import AcquisitionStep from acquisition.stats import AcquisitionStatsDClient from acquisition.move_step",
"from acquisition.move_step import AcquisitionMoveStep from acquisition.delete_step import AcquisitionDeleteStep from acquisition.batch_step",
"acquisition.batch_step import AcquisitionBatchStep from acquisition.reinject_step import AcquisitionReinjectStep from acquisition.fork_step import",
"import AcquisitionForkStep from acquisition.archive_step import AcquisitionArchiveStep from acquisition.listener import AcquisitionListener",
"import AcquisitionMoveStep from acquisition.delete_step import AcquisitionDeleteStep from acquisition.batch_step import AcquisitionBatchStep",
"acquisition.step import AcquisitionStep from acquisition.stats import AcquisitionStatsDClient from acquisition.move_step import",
"AcquisitionBatchStep from acquisition.reinject_step import AcquisitionReinjectStep from acquisition.fork_step import AcquisitionForkStep from",
"AcquisitionForkStep from acquisition.archive_step import AcquisitionArchiveStep from acquisition.listener import AcquisitionListener __all__",
"from acquisition.delete_step import AcquisitionDeleteStep from acquisition.batch_step import AcquisitionBatchStep from acquisition.reinject_step",
"acquisition.fork_step import AcquisitionForkStep from acquisition.archive_step import AcquisitionArchiveStep from acquisition.listener import",
"AcquisitionStatsDClient from acquisition.move_step import AcquisitionMoveStep from acquisition.delete_step import AcquisitionDeleteStep from",
"acquisition.stats import AcquisitionStatsDClient from acquisition.move_step import AcquisitionMoveStep from acquisition.delete_step import",
"AcquisitionArchiveStep from acquisition.listener import AcquisitionListener __all__ = ['AcquisitionStep', 'AcquisitionBatchStep', 'AcquisitionMoveStep',",
"acquisition.delete_step import AcquisitionDeleteStep from acquisition.batch_step import AcquisitionBatchStep from acquisition.reinject_step import",
"from acquisition.listener import AcquisitionListener __all__ = ['AcquisitionStep', 'AcquisitionBatchStep', 'AcquisitionMoveStep', 'AcquisitionDeleteStep',",
"AcquisitionStep from acquisition.stats import AcquisitionStatsDClient from acquisition.move_step import AcquisitionMoveStep from",
"import AcquisitionReinjectStep from acquisition.fork_step import AcquisitionForkStep from acquisition.archive_step import AcquisitionArchiveStep",
"__all__ = ['AcquisitionStep', 'AcquisitionBatchStep', 'AcquisitionMoveStep', 'AcquisitionDeleteStep', 'AcquisitionReinjectStep', 'AcquisitionForkStep', 'AcquisitionArchiveStep', 'AcquisitionStatsDClient',",
"= ['AcquisitionStep', 'AcquisitionBatchStep', 'AcquisitionMoveStep', 'AcquisitionDeleteStep', 'AcquisitionReinjectStep', 'AcquisitionForkStep', 'AcquisitionArchiveStep', 'AcquisitionStatsDClient', 'AcquisitionListener']",
"import AcquisitionBatchStep from acquisition.reinject_step import AcquisitionReinjectStep from acquisition.fork_step import AcquisitionForkStep",
"AcquisitionMoveStep from acquisition.delete_step import AcquisitionDeleteStep from acquisition.batch_step import AcquisitionBatchStep from"
] |
[
"self.validate_email_type(self.email) def validate_email_type(self, email): from frappe.utils import validate_email_add validate_email_add(email.strip(), True)",
"-*- coding: utf-8 -*- # Copyright (c) 2017, Frappe Technologies",
"def onload(self): \"\"\"Load address and contacts in `__onload`\"\"\" load_address_and_contact(self) def",
"Member(Document): def onload(self): \"\"\"Load address and contacts in `__onload`\"\"\" load_address_and_contact(self)",
"For license information, please see license.txt from __future__ import unicode_literals",
"# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors",
"load_address_and_contact(self) def validate(self): self.validate_email_type(self.email) def validate_email_type(self, email): from frappe.utils import",
"Pvt. Ltd. and contributors # For license information, please see",
"\"\"\"Load address and contacts in `__onload`\"\"\" load_address_and_contact(self) def validate(self): self.validate_email_type(self.email)",
"license information, please see license.txt from __future__ import unicode_literals from",
"(c) 2017, Frappe Technologies Pvt. Ltd. and contributors # For",
"please see license.txt from __future__ import unicode_literals from frappe.model.document import",
"from __future__ import unicode_literals from frappe.model.document import Document from frappe.contacts.address_and_contact",
"and contacts in `__onload`\"\"\" load_address_and_contact(self) def validate(self): self.validate_email_type(self.email) def validate_email_type(self,",
"in `__onload`\"\"\" load_address_and_contact(self) def validate(self): self.validate_email_type(self.email) def validate_email_type(self, email): from",
"license.txt from __future__ import unicode_literals from frappe.model.document import Document from",
"Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors #",
"Technologies Pvt. Ltd. and contributors # For license information, please",
"contacts in `__onload`\"\"\" load_address_and_contact(self) def validate(self): self.validate_email_type(self.email) def validate_email_type(self, email):",
"Document from frappe.contacts.address_and_contact import load_address_and_contact class Member(Document): def onload(self): \"\"\"Load",
"unicode_literals from frappe.model.document import Document from frappe.contacts.address_and_contact import load_address_and_contact class",
"and contributors # For license information, please see license.txt from",
"def validate(self): self.validate_email_type(self.email) def validate_email_type(self, email): from frappe.utils import validate_email_add",
"Ltd. and contributors # For license information, please see license.txt",
"validate(self): self.validate_email_type(self.email) def validate_email_type(self, email): from frappe.utils import validate_email_add validate_email_add(email.strip(),",
"-*- # Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and",
"__future__ import unicode_literals from frappe.model.document import Document from frappe.contacts.address_and_contact import",
"address and contacts in `__onload`\"\"\" load_address_and_contact(self) def validate(self): self.validate_email_type(self.email) def",
"import load_address_and_contact class Member(Document): def onload(self): \"\"\"Load address and contacts",
"load_address_and_contact class Member(Document): def onload(self): \"\"\"Load address and contacts in",
"utf-8 -*- # Copyright (c) 2017, Frappe Technologies Pvt. Ltd.",
"onload(self): \"\"\"Load address and contacts in `__onload`\"\"\" load_address_and_contact(self) def validate(self):",
"from frappe.contacts.address_and_contact import load_address_and_contact class Member(Document): def onload(self): \"\"\"Load address",
"# For license information, please see license.txt from __future__ import",
"contributors # For license information, please see license.txt from __future__",
"from frappe.model.document import Document from frappe.contacts.address_and_contact import load_address_and_contact class Member(Document):",
"frappe.model.document import Document from frappe.contacts.address_and_contact import load_address_and_contact class Member(Document): def",
"see license.txt from __future__ import unicode_literals from frappe.model.document import Document",
"# -*- coding: utf-8 -*- # Copyright (c) 2017, Frappe",
"information, please see license.txt from __future__ import unicode_literals from frappe.model.document",
"import Document from frappe.contacts.address_and_contact import load_address_and_contact class Member(Document): def onload(self):",
"`__onload`\"\"\" load_address_and_contact(self) def validate(self): self.validate_email_type(self.email) def validate_email_type(self, email): from frappe.utils",
"class Member(Document): def onload(self): \"\"\"Load address and contacts in `__onload`\"\"\"",
"2017, Frappe Technologies Pvt. Ltd. and contributors # For license",
"coding: utf-8 -*- # Copyright (c) 2017, Frappe Technologies Pvt.",
"import unicode_literals from frappe.model.document import Document from frappe.contacts.address_and_contact import load_address_and_contact",
"Frappe Technologies Pvt. Ltd. and contributors # For license information,",
"frappe.contacts.address_and_contact import load_address_and_contact class Member(Document): def onload(self): \"\"\"Load address and"
] |
[
"BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0],bias=False))",
"nn.Linear(512,self.output_shape[0])) self.compile() class ConvNetMNIST(BaseN.BaseNetwork): name = \"ConvNetMNIST\" def __init__(self,input_shape,output_shape,**kwargs): super(ConvNetMNIST,self).__init__(**kwargs)",
"ConvNet2(BaseN.BaseNetwork): name=\"ConvNet2\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNet2,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 3),nn.Softplus(), BaseN.conv3_2(3,",
"=\"FCSpectralMNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCSpectralMNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(),",
"BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0],bias=False)) self.compile() class ConvNetBigAtari(BaseN.BaseNetwork): name=\"ConvNetBigAtari\"",
"self.compile() class ConvNetBigAtari(BaseN.BaseNetwork): name=\"ConvNetBigAtari\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigAtari,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0],",
"= int(np.prod(input_shape)) self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(x,x),nn.Tanh(), nn.Linear(x,self.output_shape[0])) self.compile() class ConvNet(BaseN.BaseNetwork):",
"super(FCSpectralNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),BaseN.AdaptiveTanh(), nn.Linear(1024,1024),BaseN.AdaptiveTanh(),",
"FCSpectralMNet(BaseN.BaseNetwork): name =\"FCSpectralMNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCSpectralMNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model",
"__init__(self,input_shape,output_shape,owner_name=\"\"): super(FCConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(), BaseN.conv3_2(4, 8),BaseN.AdaptiveTanh())] x =",
"nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,512),nn.Tanh(), nn.Linear(512,1024), BaseN.EigenLayer(1024,self.output_shape[0])) self.compile() class ConvNetBigS(BaseN.BaseNetwork):",
"super(FCConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(), BaseN.conv3_2(4, 8),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape)",
"self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16), BaseN.conv3_2(16, 32))] x =",
"self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.ReLU(), nn.Linear(1024,1024),nn.ReLU(), nn.Linear(1024,512),nn.ReLU(), nn.Linear(512,self.output_shape[0]-1),nn.Tanh(), BaseN.EigenLayer()) self.compile()",
"= [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.ReLU(), BaseN.conv3_2(8, 16),nn.ReLU(), BaseN.conv3_2(8, 8))] x = BaseN.output_shape(self.conv[0],input_shape)",
"BaseN.conv3_2(16, 20))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x),",
"self.compile() class ConvNetBigBias(BaseN.BaseNetwork): name=\"ConvNetBigBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0],",
"= [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 12),BaseN.AdaptiveTanh(), BaseN.conv3_2(12, 16), BaseN.conv3_2(16, 20))] x",
"BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False))",
"BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class ConvNet2(BaseN.BaseNetwork): name=\"ConvNet2\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNet2,self).__init__(input_shape,output_shape,owner_name) self.conv =",
"nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class FCConvNet(BaseN.BaseNetwork): name=\"FCConvNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"):",
"nn.Linear(np.prod(x), 1024),nn.Softplus(), nn.Linear(1024,512),nn.Tanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class FCSpectralNet(BaseN.BaseNetwork): name =\"FCSpectralNet\"",
"__init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(), BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())] x =",
"class FCNet(BaseN.BaseNetwork): name =\"FCNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape",
"nn.Softplus(), BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class ConvNetSimple(BaseN.BaseNetwork): name=\"ConvNetSimple\"",
"FCSpectralNet(BaseN.BaseNetwork): name =\"FCSpectralNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCSpectralNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model",
"3),nn.Softplus(), BaseN.conv3_2(3, 6),BaseN.conv3_2(6, 12))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0],",
"import CholeskyBlock class FCNet(BaseN.BaseNetwork): name =\"FCNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNet,self).__init__(input_shape,output_shape,owner_name) x",
"class ConvNetBig(BaseN.BaseNetwork): name=\"ConvNetBig\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBig,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(),",
"= [nn.Sequential(BaseN.conv3_2(input_shape[0], 3),nn.Softplus(), BaseN.conv3_2(3, 6),BaseN.conv3_2(6, 12))] x = BaseN.output_shape(self.conv[0],input_shape) self.model",
"nn.Linear(np.prod(x), 512), nn.Linear(512,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class ConvNet2(BaseN.BaseNetwork): name=\"ConvNet2\" def",
"8))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(),",
"nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class ConvNetSimple(BaseN.BaseNetwork): name=\"ConvNetSimple\" def __init__(self,input_shape,output_shape,owner_name=\"\"):",
"name=\"ConvNetBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 12),BaseN.AdaptiveTanh(),",
"FCConvNet(BaseN.BaseNetwork): name=\"FCConvNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(), BaseN.conv3_2(4,",
"nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), nn.Linear(512,1024),nn.Tanh(), nn.Linear(1024,512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,256), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class ConvNetBig(BaseN.BaseNetwork):",
"nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0],bias=False)) self.compile() class ConvNetBigAtari(BaseN.BaseNetwork):",
"__init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigS,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16), BaseN.conv3_2(16, 32))]",
"ConvNet(BaseN.BaseNetwork): name=\"ConvNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.ReLU(), BaseN.conv3_2(8,",
"int(np.prod(input_shape)) self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(x,x),nn.Tanh(), nn.Linear(x,self.output_shape[0])) self.compile() class ConvNet(BaseN.BaseNetwork): name=\"ConvNet\"",
"BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False))",
"= nn.Sequential(BaseN.Flatten(), nn.Linear(x,x),nn.Tanh(), nn.Linear(x,self.output_shape[0])) self.compile() class ConvNet(BaseN.BaseNetwork): name=\"ConvNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"):",
"super(ConvNet2,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 3),nn.Softplus(), BaseN.conv3_2(3, 6),BaseN.conv3_2(6, 12))] x =",
"nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.ReLU(), nn.Linear(1024,1024),nn.ReLU(), nn.Linear(1024,512),nn.ReLU(), nn.Linear(512,self.output_shape[0]-1),nn.Tanh(), BaseN.EigenLayer()) self.compile() class FCNetQ(BaseN.BaseNetwork):",
"nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class ConvNet2(BaseN.BaseNetwork):",
"[nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 12),BaseN.AdaptiveTanh(), BaseN.conv3_2(12, 16), BaseN.conv3_2(16, 20))] x =",
"BaseN.EigenLayer(512,self.output_shape[0])) self.compile() class ConvNetBigBias(BaseN.BaseNetwork): name=\"ConvNetBigBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigBias,self).__init__(input_shape,output_shape,owner_name) self.conv =",
"class ConvNetSimple(BaseN.BaseNetwork): name=\"ConvNetSimple\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetSimple,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus())]",
"BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class ConvNet2(BaseN.BaseNetwork): name=\"ConvNet2\"",
"nn.Sequential(BaseN.Flatten(), nn.Linear(x,x),nn.Tanh(), nn.Linear(x,self.output_shape[0])) self.compile() class ConvNet(BaseN.BaseNetwork): name=\"ConvNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNet,self).__init__(input_shape,output_shape,owner_name)",
"class ConvNetBigS(BaseN.BaseNetwork): name=\"ConvNetBigS\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigS,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(),",
"__init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNetSimple,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.Softplus(),",
"__init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNet2,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 3),nn.Softplus(), BaseN.conv3_2(3, 6),BaseN.conv3_2(6, 12))] x",
"__init__(self,input_shape,output_shape,owner_name=\"\"): super(FCSpectralNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),BaseN.AdaptiveTanh(),",
"ConvNetMNIST(BaseN.BaseNetwork): name = \"ConvNetMNIST\" def __init__(self,input_shape,output_shape,**kwargs): super(ConvNetMNIST,self).__init__(**kwargs) self.n = output_shape",
"name=\"ConvNetBigBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(), BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())]",
"nn.Linear(256,256), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class ConvNetBig(BaseN.BaseNetwork): name=\"ConvNetBig\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBig,self).__init__(input_shape,output_shape,owner_name) self.conv",
"512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), nn.Linear(512,1024),nn.Tanh(), nn.Linear(1024,512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,256), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class",
"BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class ConvNetSimple(BaseN.BaseNetwork): name=\"ConvNetSimple\" def",
"nn.Linear(512,1024),nn.Tanh(), nn.Linear(1024,512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,256), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class ConvNetBig(BaseN.BaseNetwork): name=\"ConvNetBig\" def",
"nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0])) self.compile() class ConvNetBigBias(BaseN.BaseNetwork):",
"self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(), BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model",
"def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNetSimple,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x),",
"name =\"FCSpectralMNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCSpectralMNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model =",
"= nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.Softplus(), nn.Linear(1024,512),nn.Tanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class FCSpectralNet(BaseN.BaseNetwork):",
"import base.basenetwork as BaseN from networks.cholesky import CholeskyBlock class FCNet(BaseN.BaseNetwork):",
"[nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x),",
"512), nn.Linear(512,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class ConvNet2(BaseN.BaseNetwork): name=\"ConvNet2\" def __init__(self,input_shape,output_shape,owner_name=\"\"):",
"BaseN.EigenLayer()) self.compile() class FCNetQ(BaseN.BaseNetwork): name =\"FCNetQ\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNetQ,self).__init__(input_shape,output_shape,owner_name) x",
"self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile()",
"x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),BaseN.AdaptiveTanh(), nn.Linear(1024,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,512),BaseN.AdaptiveTanh(),",
"nn.Linear(1024,512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,256), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class ConvNetBig(BaseN.BaseNetwork): name=\"ConvNetBig\" def __init__(self,input_shape,output_shape,owner_name=\"\"):",
"4),nn.ReLU(), BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(),",
"self.compile() class ConvNetSimple(BaseN.BaseNetwork): name=\"ConvNetSimple\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetSimple,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0],",
"16), BaseN.conv3_2(16, 20))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(),",
"x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,512),nn.Tanh(),",
"BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0]))",
"nn.Linear(256,self.output_shape[0])) self.compile() class FCNetSimple(BaseN.BaseNetwork): name =\"FCNetSimple\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNetSimple,self).__init__(input_shape,output_shape,owner_name) x",
"BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), nn.Linear(512,1024),nn.Tanh(), nn.Linear(1024,512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,256), BaseN.EigenLayer(256,self.output_shape[0]))",
"BaseN.conv3_2(3, 6),BaseN.conv3_2(6, 12))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(),",
"512), nn.Linear(512,512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class FCConvNetBias(BaseN.BaseNetwork): name=\"FCConvNetBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"):",
"numpy as np import base.basenetwork as BaseN from networks.cholesky import",
"16), BaseN.conv3_2(16, 32))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(),",
"nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.Softplus(), nn.Linear(1024,512),nn.Tanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class FCSpectralNet(BaseN.BaseNetwork): name",
"self.compile() class FCNetSimple(BaseN.BaseNetwork): name =\"FCNetSimple\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNetSimple,self).__init__(input_shape,output_shape,owner_name) x =",
"= [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(), BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model =",
"8),nn.Softplus(), BaseN.conv3_2(8, 16),nn.Softplus(), BaseN.conv3_2(16, 32))] x = BaseN.output_shape(self.conv[0],input_shape) self.model =",
"self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0])) self.compile()",
"BaseN.conv3_2(8, 12),BaseN.AdaptiveTanh(), BaseN.conv3_2(12, 16), BaseN.conv3_2(16, 20))] x = BaseN.output_shape(self.conv[0],input_shape) self.model",
"BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class FCConvNet(BaseN.BaseNetwork): name=\"FCConvNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv =",
"super(ConvNetSimple,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus())] x = BaseN.output_shape(self.conv[0],input_shape) self.model =",
"name = \"ConvNetMNIST\" def __init__(self,input_shape,output_shape,**kwargs): super(ConvNetMNIST,self).__init__(**kwargs) self.n = output_shape self.conv",
"[nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16), BaseN.conv3_2(16, 32))] x = BaseN.output_shape(self.conv[0],input_shape) self.model",
"self.compile() class FCSpectralMNet(BaseN.BaseNetwork): name =\"FCSpectralMNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCSpectralMNet,self).__init__(input_shape,output_shape,owner_name) x =",
"nn.Sequential(self.conv[0], nn.Softplus(), BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class ConvNetSimple(BaseN.BaseNetwork):",
"= input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.ReLU(), nn.Linear(1024,1024),nn.ReLU(), nn.Linear(1024,512),nn.ReLU(), nn.Linear(512,self.output_shape[0]-1),nn.Tanh(),",
"512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), nn.Linear(512,self.output_shape[0])) self.compile() class ConvNetMNIST(BaseN.BaseNetwork): name = \"ConvNetMNIST\"",
"\"ConvNetMNIST\" def __init__(self,input_shape,output_shape,**kwargs): super(ConvNetMNIST,self).__init__(**kwargs) self.n = output_shape self.conv = [BaseN.ResNetBlock(1,32),",
"8),nn.ReLU(), BaseN.conv3_2(8, 16),nn.ReLU(), BaseN.conv3_2(8, 8))] x = BaseN.output_shape(self.conv[0],input_shape) self.model =",
"input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.ReLU(), nn.Linear(1024,1024),nn.ReLU(), nn.Linear(1024,512),nn.ReLU(), nn.Linear(512,self.output_shape[0]-1),nn.Tanh(), BaseN.EigenLayer())",
"ConvNetBigAtari(BaseN.BaseNetwork): name=\"ConvNetBigAtari\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigAtari,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8,",
"512), nn.Linear(512,512),nn.Tanh(), nn.Linear(512,1024), BaseN.EigenLayer(1024,self.output_shape[0])) self.compile() class ConvNetBigS(BaseN.BaseNetwork): name=\"ConvNetBigS\" def __init__(self,input_shape,output_shape,owner_name=\"\"):",
"BaseN.conv3_2(8, 16),nn.ReLU(), BaseN.conv3_2(8, 8))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0],",
"= nn.Sequential(self.conv[0], nn.Softplus(), BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class",
"nn.Linear(256,512), nn.Linear(512,self.output_shape[0])) self.compile() class ConvNetMNIST(BaseN.BaseNetwork): name = \"ConvNetMNIST\" def __init__(self,input_shape,output_shape,**kwargs):",
"nn.Linear(512,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class ConvNet2(BaseN.BaseNetwork): name=\"ConvNet2\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNet2,self).__init__(input_shape,output_shape,owner_name)",
"512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,self.output_shape[0])) self.compile() class FCNetSimple(BaseN.BaseNetwork): name =\"FCNetSimple\" def __init__(self,input_shape,output_shape,owner_name=\"\"):",
"name=\"ConvNetBig\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBig,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16),nn.Softplus(),",
"FCNetSimple(BaseN.BaseNetwork): name =\"FCNetSimple\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNetSimple,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model",
"name=\"ConvNetSimple\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetSimple,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus())] x =",
"= input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.Softplus(), nn.Linear(1024,512),nn.Tanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0]))",
"BaseN.EigenLayer(512,self.output_shape[0])) self.compile() class FCSpectralMNet(BaseN.BaseNetwork): name =\"FCSpectralMNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCSpectralMNet,self).__init__(input_shape,output_shape,owner_name) x",
"self.compile() class ConvNetBig(BaseN.BaseNetwork): name=\"ConvNetBig\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBig,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0],",
"name=\"ConvNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.ReLU(), BaseN.conv3_2(8, 16),nn.ReLU(),",
"nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), nn.Linear(512,self.output_shape[0])) self.compile() class ConvNetMNIST(BaseN.BaseNetwork): name =",
"[nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(), BaseN.conv3_2(4, 8),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0],",
"ConvNetBigS(BaseN.BaseNetwork): name=\"ConvNetBigS\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigS,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8,",
"512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0],bias=False)) self.compile() class ConvNetBigAtari(BaseN.BaseNetwork): name=\"ConvNetBigAtari\" def __init__(self,input_shape,output_shape,owner_name=\"\"):",
"class FCNetQ(BaseN.BaseNetwork): name =\"FCNetQ\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNetQ,self).__init__(input_shape,output_shape,owner_name) x = int(np.prod(input_shape))",
"= nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0],bias=False)) self.compile() class",
"nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,self.output_shape[0])) self.compile() class FCNetSimple(BaseN.BaseNetwork): name =\"FCNetSimple\" def",
"8),nn.Softplus(), BaseN.conv3_2(8, 16), BaseN.conv3_2(16, 32))] x = BaseN.output_shape(self.conv[0],input_shape) self.model =",
"def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetSimple,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus())] x = BaseN.output_shape(self.conv[0],input_shape)",
"self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(x,x),nn.Tanh(), nn.Linear(x,self.output_shape[0])) self.compile() class ConvNet(BaseN.BaseNetwork): name=\"ConvNet\" def",
"as BaseN from networks.cholesky import CholeskyBlock class FCNet(BaseN.BaseNetwork): name =\"FCNet\"",
"self.n = output_shape self.conv = [BaseN.ResNetBlock(1,32), BaseN.conv3_2(32,64)] x = BaseN.output_shape(self.conv[0],input_shape)",
"__init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBig,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16),nn.Softplus(), BaseN.conv3_2(16, 32))]",
"self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,512),nn.Tanh(), nn.Linear(512,1024), BaseN.EigenLayer(1024,self.output_shape[0])) self.compile()",
"= nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),BaseN.AdaptiveTanh(), nn.Linear(1024,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,512),BaseN.AdaptiveTanh(), BaseN.EigenLayer(512,self.output_shape[0])) self.compile() class FCSpectralMNet(BaseN.BaseNetwork):",
"output_shape self.conv = [BaseN.ResNetBlock(1,32), BaseN.conv3_2(32,64)] x = BaseN.output_shape(self.conv[0],input_shape) self.model =",
"nn.Linear(1024,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,512),BaseN.AdaptiveTanh(), BaseN.EigenLayer(512,self.output_shape[0])) self.compile() class FCSpectralMNet(BaseN.BaseNetwork): name =\"FCSpectralMNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"):",
"nn.Linear(1024,512),nn.Tanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class FCSpectralNet(BaseN.BaseNetwork): name =\"FCSpectralNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"):",
"12),BaseN.AdaptiveTanh(), BaseN.conv3_2(12, 16), BaseN.conv3_2(16, 20))] x = BaseN.output_shape(self.conv[0],input_shape) self.model =",
"[nn.Sequential(BaseN.conv3_2(input_shape[0], 3),nn.Softplus(), BaseN.conv3_2(3, 6),BaseN.conv3_2(6, 12))] x = BaseN.output_shape(self.conv[0],input_shape) self.model =",
"super(ConvNetBigAtari,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16), BaseN.conv3_2(16, 32))] x",
"def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBig,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16),nn.Softplus(), BaseN.conv3_2(16,",
"as np import base.basenetwork as BaseN from networks.cholesky import CholeskyBlock",
"BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class FCConvNetBias(BaseN.BaseNetwork): name=\"FCConvNetBias\"",
"8),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512),",
"super(ConvNetMNIST,self).__init__(**kwargs) self.n = output_shape self.conv = [BaseN.ResNetBlock(1,32), BaseN.conv3_2(32,64)] x =",
"x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.ReLU(), nn.Linear(1024,1024),nn.ReLU(), nn.Linear(1024,512),nn.ReLU(),",
"self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), nn.Linear(512,1024),nn.Tanh(), nn.Linear(1024,512),",
"name =\"FCNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model =",
"= nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0])) self.compile() class",
"[nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.ReLU(), BaseN.conv3_2(8, 16),nn.ReLU(), BaseN.conv3_2(8, 8))] x = BaseN.output_shape(self.conv[0],input_shape) self.model",
"self.compile() class FCSpectralNet(BaseN.BaseNetwork): name =\"FCSpectralNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCSpectralNet,self).__init__(input_shape,output_shape,owner_name) x =",
"def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCSpectralNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x),",
"BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class ConvNetBig(BaseN.BaseNetwork): name=\"ConvNetBig\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBig,self).__init__(input_shape,output_shape,owner_name) self.conv =",
"=\"FCSpectralNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCSpectralNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(),",
"BaseN.EigenLayer(512,self.output_shape[0],bias=False)) self.compile() class ConvNetBigAtari(BaseN.BaseNetwork): name=\"ConvNetBigAtari\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigAtari,self).__init__(input_shape,output_shape,owner_name) self.conv =",
"4),nn.Softplus())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512),",
"def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x),",
"nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),BaseN.AdaptiveTanh(), nn.Linear(1024,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,512),BaseN.AdaptiveTanh(), BaseN.EigenLayer(512,self.output_shape[0])) self.compile() class FCSpectralMNet(BaseN.BaseNetwork): name",
"= [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16), BaseN.conv3_2(16, 32))] x = BaseN.output_shape(self.conv[0],input_shape)",
"torch import nn import numpy as np import base.basenetwork as",
"nn.Linear(np.prod(x), 512), nn.Linear(512,512),nn.Tanh(), nn.Linear(512,1024), BaseN.EigenLayer(1024,self.output_shape[0])) self.compile() class ConvNetBigS(BaseN.BaseNetwork): name=\"ConvNetBigS\" def",
"self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),BaseN.AdaptiveTanh(), nn.Linear(1024,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,512),BaseN.AdaptiveTanh(), BaseN.EigenLayer(512,self.output_shape[0])) self.compile() class",
"BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,self.output_shape[0])) self.compile()",
"nn.Linear(512,256),nn.Tanh(), nn.Linear(256,256), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class ConvNetBig(BaseN.BaseNetwork): name=\"ConvNetBig\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBig,self).__init__(input_shape,output_shape,owner_name)",
"nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0])) self.compile() class ConvNetBigBias(BaseN.BaseNetwork): name=\"ConvNetBigBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigBias,self).__init__(input_shape,output_shape,owner_name)",
"class ConvNetBigBias(BaseN.BaseNetwork): name=\"ConvNetBigBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(),",
"= BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,256),",
"= BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], nn.Softplus(), BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(),",
"base.basenetwork as BaseN from networks.cholesky import CholeskyBlock class FCNet(BaseN.BaseNetwork): name",
"4),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512),",
"BaseN.conv3_2(12, 16), BaseN.conv3_2(16, 20))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0],",
"def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.ReLU(), BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())] x",
"nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class FCConvNet(BaseN.BaseNetwork): name=\"FCConvNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv",
"= nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class",
"class FCSpectralNet(BaseN.BaseNetwork): name =\"FCSpectralNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCSpectralNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape",
"from torch import nn import numpy as np import base.basenetwork",
"super(ConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.ReLU(), BaseN.conv3_2(8, 16),nn.ReLU(), BaseN.conv3_2(8, 8))] x",
"nn.Linear(1024,512),nn.ReLU(), nn.Linear(512,self.output_shape[0]-1),nn.Tanh(), BaseN.EigenLayer()) self.compile() class FCNetQ(BaseN.BaseNetwork): name =\"FCNetQ\" def __init__(self,input_shape,output_shape,owner_name=\"\"):",
"= BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,512),BaseN.AdaptiveTanh(), nn.Linear(512,256),",
"16),nn.ReLU(), BaseN.conv3_2(8, 8))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(),",
"BaseN.Flatten(), nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class ConvNetBias(BaseN.BaseNetwork): name=\"ConvNetBias\" def",
"= nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class FCConvNet(BaseN.BaseNetwork):",
"1024),nn.ReLU(), nn.Linear(1024,1024),nn.ReLU(), nn.Linear(1024,512),nn.ReLU(), nn.Linear(512,self.output_shape[0]-1),nn.Tanh(), BaseN.EigenLayer()) self.compile() class FCNetQ(BaseN.BaseNetwork): name =\"FCNetQ\"",
"nn.Linear(np.prod(x), 512), nn.Linear(512,512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class FCConvNetBias(BaseN.BaseNetwork): name=\"FCConvNetBias\" def",
"= nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class ConvNetBias(BaseN.BaseNetwork):",
"BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class FCConvNetBias(BaseN.BaseNetwork): name=\"FCConvNetBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv =",
"self.compile() class FCConvNet(BaseN.BaseNetwork): name=\"FCConvNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0],",
"name =\"FCNetQ\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNetQ,self).__init__(input_shape,output_shape,owner_name) x = int(np.prod(input_shape)) self.model =",
"self.compile() class FCConvNetBias(BaseN.BaseNetwork): name=\"FCConvNetBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0],",
"nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0])) self.compile() class ConvNetBigBias(BaseN.BaseNetwork): name=\"ConvNetBigBias\" def",
"BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,self.output_shape[0])) self.compile() class FCNetSimple(BaseN.BaseNetwork): name =\"FCNetSimple\"",
"name=\"FCConvNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(), BaseN.conv3_2(4, 8),BaseN.AdaptiveTanh())]",
"nn.Linear(512,1024), BaseN.EigenLayer(1024,self.output_shape[0])) self.compile() class ConvNetBigS(BaseN.BaseNetwork): name=\"ConvNetBigS\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigS,self).__init__(input_shape,output_shape,owner_name) self.conv",
"nn.Linear(np.prod(x), 1024),BaseN.AdaptiveTanh(), nn.Linear(1024,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,512),BaseN.AdaptiveTanh(), BaseN.EigenLayer(512,self.output_shape[0])) self.compile() class FCSpectralMNet(BaseN.BaseNetwork): name =\"FCSpectralMNet\"",
"BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x),",
"self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(), BaseN.conv3_2(4, 8),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model",
"512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class FCConvNet(BaseN.BaseNetwork): name=\"FCConvNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCConvNet,self).__init__(input_shape,output_shape,owner_name)",
"CholeskyBlock class FCNet(BaseN.BaseNetwork): name =\"FCNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNet,self).__init__(input_shape,output_shape,owner_name) x =",
"self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 12),BaseN.AdaptiveTanh(), BaseN.conv3_2(12, 16), BaseN.conv3_2(16, 20))]",
"20))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(),",
"= BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,512),nn.Tanh(), nn.Linear(512,1024),",
"BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,512),nn.Tanh(), nn.Linear(512,1024), BaseN.EigenLayer(1024,self.output_shape[0])) self.compile() class ConvNetBigS(BaseN.BaseNetwork): name=\"ConvNetBigS\"",
"networks.cholesky import CholeskyBlock class FCNet(BaseN.BaseNetwork): name =\"FCNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNet,self).__init__(input_shape,output_shape,owner_name)",
"nn.Linear(256,512), nn.Linear(512,1024),nn.Tanh(), nn.Linear(1024,512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,256), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class ConvNetBig(BaseN.BaseNetwork): name=\"ConvNetBig\"",
"class ConvNet2(BaseN.BaseNetwork): name=\"ConvNet2\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNet2,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 3),nn.Softplus(),",
"nn.Linear(x,x),nn.Tanh(), nn.Linear(x,self.output_shape[0])) self.compile() class ConvNet(BaseN.BaseNetwork): name=\"ConvNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv",
"BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), nn.Linear(512,self.output_shape[0])) self.compile() class ConvNetMNIST(BaseN.BaseNetwork): name",
"nn.Linear(1024,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class ConvNet2(BaseN.BaseNetwork): name=\"ConvNet2\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNet2,self).__init__(input_shape,output_shape,owner_name) self.conv",
"BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), nn.Linear(512,self.output_shape[0]))",
"nn.Linear(1024,512),BaseN.AdaptiveTanh(), BaseN.EigenLayer(512,self.output_shape[0])) self.compile() class FCSpectralMNet(BaseN.BaseNetwork): name =\"FCSpectralMNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCSpectralMNet,self).__init__(input_shape,output_shape,owner_name)",
"= input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),BaseN.AdaptiveTanh(), nn.Linear(1024,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,512),BaseN.AdaptiveTanh(), BaseN.EigenLayer(512,self.output_shape[0]))",
"input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.Softplus(), nn.Linear(1024,512),nn.Tanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0])) self.compile()",
"= [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(), BaseN.conv3_2(4, 8),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model =",
"name=\"ConvNet2\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNet2,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 3),nn.Softplus(), BaseN.conv3_2(3, 6),BaseN.conv3_2(6,",
"nn.Linear(1024,1024),nn.ReLU(), nn.Linear(1024,512),nn.ReLU(), nn.Linear(512,self.output_shape[0]-1),nn.Tanh(), BaseN.EigenLayer()) self.compile() class FCNetQ(BaseN.BaseNetwork): name =\"FCNetQ\" def",
"= [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.ReLU(), BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model =",
"self.compile() class FCNetQ(BaseN.BaseNetwork): name =\"FCNetQ\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNetQ,self).__init__(input_shape,output_shape,owner_name) x =",
"x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,1024),BaseN.AdaptiveTanh(),",
"512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class ConvNetBias(BaseN.BaseNetwork): name=\"ConvNetBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBias,self).__init__(input_shape,output_shape,owner_name)",
"self.compile() class ConvNetMNIST(BaseN.BaseNetwork): name = \"ConvNetMNIST\" def __init__(self,input_shape,output_shape,**kwargs): super(ConvNetMNIST,self).__init__(**kwargs) self.n",
"nn import numpy as np import base.basenetwork as BaseN from",
"= nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), nn.Linear(512,self.output_shape[0])) self.compile() class",
"ConvNetBig(BaseN.BaseNetwork): name=\"ConvNetBig\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBig,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8,",
"= BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False))",
"__init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.Softplus(),",
"self.compile() class ConvNet2(BaseN.BaseNetwork): name=\"ConvNet2\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNet2,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0],",
"def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.ReLU(), BaseN.conv3_2(8, 16),nn.ReLU(), BaseN.conv3_2(8,",
"nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), nn.Linear(512,self.output_shape[0])) self.compile() class ConvNetMNIST(BaseN.BaseNetwork):",
"nn.Linear(512,256),nn.Tanh(), nn.Linear(256,self.output_shape[0])) self.compile() class FCNetSimple(BaseN.BaseNetwork): name =\"FCNetSimple\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNetSimple,self).__init__(input_shape,output_shape,owner_name)",
"= nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class",
"super(FCNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.Softplus(), nn.Linear(1024,512),nn.Tanh(),",
"input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),BaseN.AdaptiveTanh(), nn.Linear(1024,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,512),BaseN.AdaptiveTanh(), BaseN.EigenLayer(512,self.output_shape[0])) self.compile()",
"BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class ConvNetBias(BaseN.BaseNetwork): name=\"ConvNetBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv =",
"nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class FCConvNet(BaseN.BaseNetwork): name=\"FCConvNet\"",
"class ConvNetMNIST(BaseN.BaseNetwork): name = \"ConvNetMNIST\" def __init__(self,input_shape,output_shape,**kwargs): super(ConvNetMNIST,self).__init__(**kwargs) self.n =",
"self.conv = [BaseN.ResNetBlock(1,32), BaseN.conv3_2(32,64)] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0],",
"FCConvNetBias(BaseN.BaseNetwork): name=\"FCConvNetBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.ReLU(), BaseN.conv3_2(4,",
"nn.Linear(np.prod(x), 1024),nn.ReLU(), nn.Linear(1024,1024),nn.ReLU(), nn.Linear(1024,512),nn.ReLU(), nn.Linear(512,self.output_shape[0]-1),nn.Tanh(), BaseN.EigenLayer()) self.compile() class FCNetQ(BaseN.BaseNetwork): name",
"BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile()",
"__init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 12),BaseN.AdaptiveTanh(), BaseN.conv3_2(12, 16),",
"=\"FCNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(),",
"__init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.ReLU(), BaseN.conv3_2(8, 16),nn.ReLU(), BaseN.conv3_2(8, 8))]",
"def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(), BaseN.conv3_2(4, 8),BaseN.AdaptiveTanh())] x",
"FCNetQ(BaseN.BaseNetwork): name =\"FCNetQ\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNetQ,self).__init__(input_shape,output_shape,owner_name) x = int(np.prod(input_shape)) self.model",
"input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.Softplus(), nn.Linear(1024,512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,self.output_shape[0])) self.compile()",
"self.compile() class ConvNet(BaseN.BaseNetwork): name=\"ConvNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0],",
"class ConvNetBias(BaseN.BaseNetwork): name=\"ConvNetBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(),",
"BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class FCSpectralNet(BaseN.BaseNetwork): name =\"FCSpectralNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCSpectralNet,self).__init__(input_shape,output_shape,owner_name) x",
"= \"ConvNetMNIST\" def __init__(self,input_shape,output_shape,**kwargs): super(ConvNetMNIST,self).__init__(**kwargs) self.n = output_shape self.conv =",
"= nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,self.output_shape[0])) self.compile() class FCNetSimple(BaseN.BaseNetwork):",
"name =\"FCNetSimple\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNetSimple,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model =",
"name =\"FCSpectralNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCSpectralNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model =",
"super(ConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 12),BaseN.AdaptiveTanh(), BaseN.conv3_2(12, 16), BaseN.conv3_2(16,",
"nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), nn.Linear(512,1024),nn.Tanh(), nn.Linear(1024,512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,256), BaseN.EigenLayer(256,self.output_shape[0])) self.compile()",
"[nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.ReLU(), BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0],",
"ConvNetSimple(BaseN.BaseNetwork): name=\"ConvNetSimple\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetSimple,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus())] x",
"self.compile() class ConvNetBias(BaseN.BaseNetwork): name=\"ConvNetBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0],",
"= [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(),",
"1024),BaseN.AdaptiveTanh(), nn.Linear(1024,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,512),BaseN.AdaptiveTanh(), BaseN.EigenLayer(512,self.output_shape[0])) self.compile() class FCSpectralMNet(BaseN.BaseNetwork): name =\"FCSpectralMNet\" def",
"nn.Linear(x,self.output_shape[0])) self.compile() class ConvNet(BaseN.BaseNetwork): name=\"ConvNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv =",
"6),BaseN.conv3_2(6, 12))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x),",
"nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0],bias=False)) self.compile() class ConvNetBigAtari(BaseN.BaseNetwork): name=\"ConvNetBigAtari\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigAtari,self).__init__(input_shape,output_shape,owner_name)",
"class ConvNetBigAtari(BaseN.BaseNetwork): name=\"ConvNetBigAtari\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigAtari,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(),",
"nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), nn.Linear(512,1024),nn.Tanh(), nn.Linear(1024,512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,256),",
"self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.Softplus(), nn.Linear(1024,512),nn.Tanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class",
"nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class ConvNetBias(BaseN.BaseNetwork): name=\"ConvNetBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv",
"__init__(self,input_shape,output_shape,owner_name=\"\"): super(FCConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.ReLU(), BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())] x =",
"[BaseN.ResNetBlock(1,32), BaseN.conv3_2(32,64)] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], nn.Softplus(), BaseN.Flatten(),",
"nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class FCConvNetBias(BaseN.BaseNetwork):",
"FCNet(BaseN.BaseNetwork): name =\"FCNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model",
"BaseN.conv3_2(8, 8))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x),",
"ConvNetBigBias(BaseN.BaseNetwork): name=\"ConvNetBigBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(), BaseN.conv3_2(4,",
"super(ConvNetBigS,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16), BaseN.conv3_2(16, 32))] x",
"super(FCConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.ReLU(), BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape)",
"super(FCNetQ,self).__init__(input_shape,output_shape,owner_name) x = int(np.prod(input_shape)) self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(x,x),nn.Tanh(), nn.Linear(x,self.output_shape[0])) self.compile()",
"nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0])) self.compile() class ConvNetBigBias(BaseN.BaseNetwork): name=\"ConvNetBigBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigBias,self).__init__(input_shape,output_shape,owner_name) self.conv",
"nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), nn.Linear(512,self.output_shape[0])) self.compile() class ConvNetMNIST(BaseN.BaseNetwork): name = \"ConvNetMNIST\" def",
"self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class",
"class FCConvNetBias(BaseN.BaseNetwork): name=\"FCConvNetBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.ReLU(),",
"def __init__(self,input_shape,output_shape,**kwargs): super(ConvNetMNIST,self).__init__(**kwargs) self.n = output_shape self.conv = [BaseN.ResNetBlock(1,32), BaseN.conv3_2(32,64)]",
"nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0],bias=False)) self.compile() class ConvNetBigAtari(BaseN.BaseNetwork): name=\"ConvNetBigAtari\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigAtari,self).__init__(input_shape,output_shape,owner_name) self.conv",
"nn.Linear(512,256),nn.Tanh(), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class ConvNetSimple(BaseN.BaseNetwork): name=\"ConvNetSimple\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetSimple,self).__init__(input_shape,output_shape,owner_name) self.conv",
"1024),nn.Softplus(), nn.Linear(1024,512),nn.Tanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class FCSpectralNet(BaseN.BaseNetwork): name =\"FCSpectralNet\" def",
"self.compile() class ConvNetBigS(BaseN.BaseNetwork): name=\"ConvNetBigS\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigS,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0],",
"nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class FCConvNetBias(BaseN.BaseNetwork): name=\"FCConvNetBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv",
"self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.ReLU(), BaseN.conv3_2(8, 16),nn.ReLU(), BaseN.conv3_2(8, 8))] x =",
"import numpy as np import base.basenetwork as BaseN from networks.cholesky",
"class FCSpectralMNet(BaseN.BaseNetwork): name =\"FCSpectralMNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCSpectralMNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape",
"x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.Softplus(), nn.Linear(1024,512), nn.Linear(512,256),nn.Tanh(),",
"BaseN.conv3_2(4, 8),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x),",
"def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNetQ,self).__init__(input_shape,output_shape,owner_name) x = int(np.prod(input_shape)) self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(x,x),nn.Tanh(),",
"super(FCSpectralMNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.ReLU(), nn.Linear(1024,1024),nn.ReLU(),",
"=\"FCNetSimple\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNetSimple,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(),",
"class FCConvNet(BaseN.BaseNetwork): name=\"FCConvNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(),",
"BaseN.EigenLayer(1024,self.output_shape[0])) self.compile() class ConvNetBigS(BaseN.BaseNetwork): name=\"ConvNetBigS\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigS,self).__init__(input_shape,output_shape,owner_name) self.conv =",
"self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.ReLU(), BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model",
"super(ConvNetBigBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(), BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape)",
"ConvNetBias(BaseN.BaseNetwork): name=\"ConvNetBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8,",
"__init__(self,input_shape,output_shape,**kwargs): super(ConvNetMNIST,self).__init__(**kwargs) self.n = output_shape self.conv = [BaseN.ResNetBlock(1,32), BaseN.conv3_2(32,64)] x",
"np import base.basenetwork as BaseN from networks.cholesky import CholeskyBlock class",
"self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0],",
"x = int(np.prod(input_shape)) self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(x,x),nn.Tanh(), nn.Linear(x,self.output_shape[0])) self.compile() class",
"BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0])) self.compile() class ConvNetBigBias(BaseN.BaseNetwork): name=\"ConvNetBigBias\"",
"8),nn.Softplus(), BaseN.conv3_2(8, 12),BaseN.AdaptiveTanh(), BaseN.conv3_2(12, 16), BaseN.conv3_2(16, 20))] x = BaseN.output_shape(self.conv[0],input_shape)",
"x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(),",
"x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.Softplus(), nn.Linear(1024,512),nn.Tanh(), nn.Linear(512,256),",
"BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], nn.Softplus(), BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), BaseN.EigenLayer(256,self.output_shape[0]))",
"nn.Linear(512,512),nn.Tanh(), nn.Linear(512,1024), BaseN.EigenLayer(1024,self.output_shape[0])) self.compile() class ConvNetBigS(BaseN.BaseNetwork): name=\"ConvNetBigS\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigS,self).__init__(input_shape,output_shape,owner_name)",
"nn.Linear(512,self.output_shape[0]-1),nn.Tanh(), BaseN.EigenLayer()) self.compile() class FCNetQ(BaseN.BaseNetwork): name =\"FCNetQ\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNetQ,self).__init__(input_shape,output_shape,owner_name)",
"= nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.ReLU(), nn.Linear(1024,1024),nn.ReLU(), nn.Linear(1024,512),nn.ReLU(), nn.Linear(512,self.output_shape[0]-1),nn.Tanh(), BaseN.EigenLayer()) self.compile() class",
"<reponame>ayyuriss/TRHPO from torch import nn import numpy as np import",
"[nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(), BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0],",
"= input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.Softplus(), nn.Linear(1024,512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,self.output_shape[0]))",
"super(FCNetSimple,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.Softplus(), nn.Linear(1024,512),",
"__init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigAtari,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16), BaseN.conv3_2(16, 32))]",
"self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile()",
"BaseN.conv3_2(8, 16),nn.Softplus(), BaseN.conv3_2(16, 32))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0],",
"BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,512),nn.Tanh(), nn.Linear(512,1024), BaseN.EigenLayer(1024,self.output_shape[0]))",
"self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0],bias=False)) self.compile()",
"nn.Linear(512,512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class FCConvNetBias(BaseN.BaseNetwork): name=\"FCConvNetBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCConvNetBias,self).__init__(input_shape,output_shape,owner_name)",
"from networks.cholesky import CholeskyBlock class FCNet(BaseN.BaseNetwork): name =\"FCNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"):",
"= BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512),",
"nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class ConvNetBias(BaseN.BaseNetwork): name=\"ConvNetBias\"",
"BaseN.conv3_2(8, 16), BaseN.conv3_2(16, 32))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0],",
"= nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,512),nn.Tanh(), nn.Linear(512,1024), BaseN.EigenLayer(1024,self.output_shape[0])) self.compile() class",
"def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 12),BaseN.AdaptiveTanh(), BaseN.conv3_2(12,",
"BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), nn.Linear(512,1024),nn.Tanh(),",
"def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigS,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16), BaseN.conv3_2(16,",
"name=\"FCConvNetBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.ReLU(), BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())]",
"nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0],bias=False)) self.compile() class ConvNetBigAtari(BaseN.BaseNetwork): name=\"ConvNetBigAtari\" def",
"=\"FCNetQ\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNetQ,self).__init__(input_shape,output_shape,owner_name) x = int(np.prod(input_shape)) self.model = nn.Sequential(BaseN.Flatten(),",
"16),nn.Softplus(), BaseN.conv3_2(16, 32))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(),",
"def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCSpectralMNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x),",
"BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class ConvNetSimple(BaseN.BaseNetwork): name=\"ConvNetSimple\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetSimple,self).__init__(input_shape,output_shape,owner_name) self.conv =",
"name=\"ConvNetBigS\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigS,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16),",
"import nn import numpy as np import base.basenetwork as BaseN",
"self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), nn.Linear(512,self.output_shape[0])) self.compile()",
"[nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16),nn.Softplus(), BaseN.conv3_2(16, 32))] x = BaseN.output_shape(self.conv[0],input_shape) self.model",
"4),nn.Softplus(), BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(),",
"self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 3),nn.Softplus(), BaseN.conv3_2(3, 6),BaseN.conv3_2(6, 12))] x = BaseN.output_shape(self.conv[0],input_shape)",
"= output_shape self.conv = [BaseN.ResNetBlock(1,32), BaseN.conv3_2(32,64)] x = BaseN.output_shape(self.conv[0],input_shape) self.model",
"x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(), nn.Linear(512,256),",
"nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class ConvNetBias(BaseN.BaseNetwork): name=\"ConvNetBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"):",
"self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,self.output_shape[0])) self.compile() class",
"name=\"ConvNetBigAtari\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigAtari,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16),",
"self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16),nn.Softplus(), BaseN.conv3_2(16, 32))] x =",
"512), nn.Linear(512,256),nn.Tanh(), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class ConvNetSimple(BaseN.BaseNetwork): name=\"ConvNetSimple\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetSimple,self).__init__(input_shape,output_shape,owner_name)",
"__init__(self,input_shape,output_shape,owner_name=\"\"): super(FCSpectralMNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.ReLU(),",
"def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNet2,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 3),nn.Softplus(), BaseN.conv3_2(3, 6),BaseN.conv3_2(6, 12))]",
"nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,self.output_shape[0])) self.compile() class FCNetSimple(BaseN.BaseNetwork): name",
"__init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetSimple,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus())] x = BaseN.output_shape(self.conv[0],input_shape) self.model",
"= BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,self.output_shape[0]))",
"x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], nn.Softplus(), BaseN.Flatten(), nn.Linear(np.prod(x), 512),",
"= nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), nn.Linear(512,1024),nn.Tanh(), nn.Linear(1024,512), nn.Linear(512,256),nn.Tanh(),",
"x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,512),BaseN.AdaptiveTanh(),",
"= [BaseN.ResNetBlock(1,32), BaseN.conv3_2(32,64)] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], nn.Softplus(),",
"512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0])) self.compile() class ConvNetBigBias(BaseN.BaseNetwork): name=\"ConvNetBigBias\" def __init__(self,input_shape,output_shape,owner_name=\"\"):",
"nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class FCSpectralNet(BaseN.BaseNetwork): name =\"FCSpectralNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCSpectralNet,self).__init__(input_shape,output_shape,owner_name)",
"class FCNetSimple(BaseN.BaseNetwork): name =\"FCNetSimple\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNetSimple,self).__init__(input_shape,output_shape,owner_name) x = input_shape",
"32))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512),",
"BaseN from networks.cholesky import CholeskyBlock class FCNet(BaseN.BaseNetwork): name =\"FCNet\" def",
"4),nn.Softplus(), BaseN.conv3_2(4, 8),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(),",
"class ConvNet(BaseN.BaseNetwork): name=\"ConvNet\" def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.ReLU(),",
"def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(), BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())] x",
"= [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16),nn.Softplus(), BaseN.conv3_2(16, 32))] x = BaseN.output_shape(self.conv[0],input_shape)",
"super(ConvNetBig,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16),nn.Softplus(), BaseN.conv3_2(16, 32))] x",
"BaseN.conv3_2(16, 32))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x),",
"BaseN.conv3_2(32,64)] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], nn.Softplus(), BaseN.Flatten(), nn.Linear(np.prod(x),",
"BaseN.Flatten(), nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class FCConvNet(BaseN.BaseNetwork): name=\"FCConvNet\" def",
"__init__(self,input_shape,output_shape,owner_name=\"\"): super(FCNetQ,self).__init__(input_shape,output_shape,owner_name) x = int(np.prod(input_shape)) self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(x,x),nn.Tanh(), nn.Linear(x,self.output_shape[0]))",
"self.model = nn.Sequential(self.conv[0], nn.Softplus(), BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), BaseN.EigenLayer(256,self.output_shape[0])) self.compile()",
"12))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512),",
"def __init__(self,input_shape,output_shape,owner_name=\"\"): super(ConvNetBigAtari,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16), BaseN.conv3_2(16,"
] |
[
") @app.post( \"/v1/important_words\", description=\"Find lemmas of important words\", response_model=ImportantWordsResponse, )",
"@app.post( \"/v1/parse\", description=\"Find lemmas of important words\", response_model=ImportantWordsResponse, deprecated=True, )",
"using alias - splitting up into two input models class",
"typing import List from .similar import important_words from .server import",
"words\", response_model=ImportantWordsResponse, deprecated=True, ) def v1_parse(*, data: ParseInputDeprecated): logger.info(f\"parse: {repr(data.input_string)}\")",
"description=\"Find lemmas of important words\", response_model=ImportantWordsResponse, ) def v1_important_words(*, data:",
"class ImportantWordsResponse(BaseModel): important_words: List[str] = Field(..., description=\"List of lemmas\") class",
"input_string: str = Field( ..., description=\"Icelandic text for analysis.\", min_length=1,",
"description=\"Icelandic text for analysis.\", min_length=1, max_length=_MAX_LENGTH, ) # Strange things",
"..., description=\"Icelandic text for analysis.\", min_length=1, max_length=_MAX_LENGTH, ) # Strange",
"import app _MAX_LENGTH = 2000 logger = logging.getLogger(__name__) class ImportantWordsResponse(BaseModel):",
"response_model=ImportantWordsResponse, ) def v1_important_words(*, data: ImportantWordsRequest): return ImportantWordsResponse(important_words=important_words(data.input_string)) @app.post( \"/v1/parse\",",
"class ParseInputDeprecated(BaseModel): input_string: str = Field( ..., description=\"Icelandic text for",
"models class ParseInputDeprecated(BaseModel): input_string: str = Field( ..., description=\"Icelandic text",
"@app.post( \"/v1/important_words\", description=\"Find lemmas of important words\", response_model=ImportantWordsResponse, ) def",
") # Strange things happen with error handling when using",
"text for analysis.\", min_length=1, max_length=_MAX_LENGTH, alias=\"in\", ) @app.post( \"/v1/important_words\", description=\"Find",
"handling when using alias - splitting up into two input",
"two input models class ParseInputDeprecated(BaseModel): input_string: str = Field( ...,",
"logging from pydantic import BaseModel, Field from typing import List",
"ImportantWordsResponse(important_words=important_words(data.input_string)) @app.post( \"/v1/parse\", description=\"Find lemmas of important words\", response_model=ImportantWordsResponse, deprecated=True,",
"lemmas of important words\", response_model=ImportantWordsResponse, deprecated=True, ) def v1_parse(*, data:",
"import logging from pydantic import BaseModel, Field from typing import",
"import List from .similar import important_words from .server import app",
"= Field( ..., description=\"Icelandic text for analysis.\", min_length=1, max_length=_MAX_LENGTH, alias=\"in\",",
"analysis.\", min_length=1, max_length=_MAX_LENGTH, alias=\"in\", ) @app.post( \"/v1/important_words\", description=\"Find lemmas of",
"ImportantWordsRequest): return ImportantWordsResponse(important_words=important_words(data.input_string)) @app.post( \"/v1/parse\", description=\"Find lemmas of important words\",",
"v1_important_words(*, data: ImportantWordsRequest): return ImportantWordsResponse(important_words=important_words(data.input_string)) @app.post( \"/v1/parse\", description=\"Find lemmas of",
"from pydantic import BaseModel, Field from typing import List from",
"import important_words from .server import app _MAX_LENGTH = 2000 logger",
"ImportantWordsResponse(BaseModel): important_words: List[str] = Field(..., description=\"List of lemmas\") class ImportantWordsRequest(BaseModel):",
".similar import important_words from .server import app _MAX_LENGTH = 2000",
"= Field(..., description=\"List of lemmas\") class ImportantWordsRequest(BaseModel): input_string: str =",
"Field(..., description=\"List of lemmas\") class ImportantWordsRequest(BaseModel): input_string: str = Field(",
".server import app _MAX_LENGTH = 2000 logger = logging.getLogger(__name__) class",
"when using alias - splitting up into two input models",
"from .server import app _MAX_LENGTH = 2000 logger = logging.getLogger(__name__)",
"important words\", response_model=ImportantWordsResponse, deprecated=True, ) def v1_parse(*, data: ParseInputDeprecated): logger.info(f\"parse:",
"from .similar import important_words from .server import app _MAX_LENGTH =",
"max_length=_MAX_LENGTH, ) # Strange things happen with error handling when",
"for analysis.\", min_length=1, max_length=_MAX_LENGTH, alias=\"in\", ) @app.post( \"/v1/important_words\", description=\"Find lemmas",
"Field( ..., description=\"Icelandic text for analysis.\", min_length=1, max_length=_MAX_LENGTH, ) #",
"= Field( ..., description=\"Icelandic text for analysis.\", min_length=1, max_length=_MAX_LENGTH, )",
"text for analysis.\", min_length=1, max_length=_MAX_LENGTH, ) # Strange things happen",
"ImportantWordsRequest(BaseModel): input_string: str = Field( ..., description=\"Icelandic text for analysis.\",",
"with error handling when using alias - splitting up into",
"str = Field( ..., description=\"Icelandic text for analysis.\", min_length=1, max_length=_MAX_LENGTH,",
"description=\"Find lemmas of important words\", response_model=ImportantWordsResponse, deprecated=True, ) def v1_parse(*,",
"\"/v1/parse\", description=\"Find lemmas of important words\", response_model=ImportantWordsResponse, deprecated=True, ) def",
"= logging.getLogger(__name__) class ImportantWordsResponse(BaseModel): important_words: List[str] = Field(..., description=\"List of",
"def v1_important_words(*, data: ImportantWordsRequest): return ImportantWordsResponse(important_words=important_words(data.input_string)) @app.post( \"/v1/parse\", description=\"Find lemmas",
"response_model=ImportantWordsResponse, deprecated=True, ) def v1_parse(*, data: ParseInputDeprecated): logger.info(f\"parse: {repr(data.input_string)}\") return",
"error handling when using alias - splitting up into two",
"things happen with error handling when using alias - splitting",
"of important words\", response_model=ImportantWordsResponse, ) def v1_important_words(*, data: ImportantWordsRequest): return",
"words\", response_model=ImportantWordsResponse, ) def v1_important_words(*, data: ImportantWordsRequest): return ImportantWordsResponse(important_words=important_words(data.input_string)) @app.post(",
"deprecated=True, ) def v1_parse(*, data: ParseInputDeprecated): logger.info(f\"parse: {repr(data.input_string)}\") return ImportantWordsResponse(important_words=important_words(data.input_string))",
"important_words: List[str] = Field(..., description=\"List of lemmas\") class ImportantWordsRequest(BaseModel): input_string:",
"lemmas\") class ImportantWordsRequest(BaseModel): input_string: str = Field( ..., description=\"Icelandic text",
"analysis.\", min_length=1, max_length=_MAX_LENGTH, ) # Strange things happen with error",
"app _MAX_LENGTH = 2000 logger = logging.getLogger(__name__) class ImportantWordsResponse(BaseModel): important_words:",
"input models class ParseInputDeprecated(BaseModel): input_string: str = Field( ..., description=\"Icelandic",
"Field from typing import List from .similar import important_words from",
"\"/v1/important_words\", description=\"Find lemmas of important words\", response_model=ImportantWordsResponse, ) def v1_important_words(*,",
"ParseInputDeprecated(BaseModel): input_string: str = Field( ..., description=\"Icelandic text for analysis.\",",
"2000 logger = logging.getLogger(__name__) class ImportantWordsResponse(BaseModel): important_words: List[str] = Field(...,",
"description=\"List of lemmas\") class ImportantWordsRequest(BaseModel): input_string: str = Field( ...,",
"of important words\", response_model=ImportantWordsResponse, deprecated=True, ) def v1_parse(*, data: ParseInputDeprecated):",
"description=\"Icelandic text for analysis.\", min_length=1, max_length=_MAX_LENGTH, alias=\"in\", ) @app.post( \"/v1/important_words\",",
"= 2000 logger = logging.getLogger(__name__) class ImportantWordsResponse(BaseModel): important_words: List[str] =",
"for analysis.\", min_length=1, max_length=_MAX_LENGTH, ) # Strange things happen with",
"of lemmas\") class ImportantWordsRequest(BaseModel): input_string: str = Field( ..., description=\"Icelandic",
"# Strange things happen with error handling when using alias",
"splitting up into two input models class ParseInputDeprecated(BaseModel): input_string: str",
"BaseModel, Field from typing import List from .similar import important_words",
"important_words from .server import app _MAX_LENGTH = 2000 logger =",
"- splitting up into two input models class ParseInputDeprecated(BaseModel): input_string:",
"up into two input models class ParseInputDeprecated(BaseModel): input_string: str =",
"logging.getLogger(__name__) class ImportantWordsResponse(BaseModel): important_words: List[str] = Field(..., description=\"List of lemmas\")",
"import BaseModel, Field from typing import List from .similar import",
"Field( ..., description=\"Icelandic text for analysis.\", min_length=1, max_length=_MAX_LENGTH, alias=\"in\", )",
"return ImportantWordsResponse(important_words=important_words(data.input_string)) @app.post( \"/v1/parse\", description=\"Find lemmas of important words\", response_model=ImportantWordsResponse,",
") def v1_important_words(*, data: ImportantWordsRequest): return ImportantWordsResponse(important_words=important_words(data.input_string)) @app.post( \"/v1/parse\", description=\"Find",
"max_length=_MAX_LENGTH, alias=\"in\", ) @app.post( \"/v1/important_words\", description=\"Find lemmas of important words\",",
"min_length=1, max_length=_MAX_LENGTH, alias=\"in\", ) @app.post( \"/v1/important_words\", description=\"Find lemmas of important",
"_MAX_LENGTH = 2000 logger = logging.getLogger(__name__) class ImportantWordsResponse(BaseModel): important_words: List[str]",
"Strange things happen with error handling when using alias -",
"..., description=\"Icelandic text for analysis.\", min_length=1, max_length=_MAX_LENGTH, alias=\"in\", ) @app.post(",
"class ImportantWordsRequest(BaseModel): input_string: str = Field( ..., description=\"Icelandic text for",
"logger = logging.getLogger(__name__) class ImportantWordsResponse(BaseModel): important_words: List[str] = Field(..., description=\"List",
"from typing import List from .similar import important_words from .server",
"pydantic import BaseModel, Field from typing import List from .similar",
"List[str] = Field(..., description=\"List of lemmas\") class ImportantWordsRequest(BaseModel): input_string: str",
"List from .similar import important_words from .server import app _MAX_LENGTH",
"into two input models class ParseInputDeprecated(BaseModel): input_string: str = Field(",
"data: ImportantWordsRequest): return ImportantWordsResponse(important_words=important_words(data.input_string)) @app.post( \"/v1/parse\", description=\"Find lemmas of important",
"alias - splitting up into two input models class ParseInputDeprecated(BaseModel):",
"important words\", response_model=ImportantWordsResponse, ) def v1_important_words(*, data: ImportantWordsRequest): return ImportantWordsResponse(important_words=important_words(data.input_string))",
"alias=\"in\", ) @app.post( \"/v1/important_words\", description=\"Find lemmas of important words\", response_model=ImportantWordsResponse,",
"min_length=1, max_length=_MAX_LENGTH, ) # Strange things happen with error handling",
"lemmas of important words\", response_model=ImportantWordsResponse, ) def v1_important_words(*, data: ImportantWordsRequest):",
"happen with error handling when using alias - splitting up"
] |
[
"# num = 10 / 0 number = int(input(\"Enter a",
"0 number = int(input(\"Enter a number: \")) print(number) # catch",
"a number: \")) print(number) # catch specific errors except ZeroDivisionError",
"specific errors except ZeroDivisionError as err: print(err) except ValueError: print(\"Invalid",
"number: \")) print(number) # catch specific errors except ZeroDivisionError as",
"= 10 / 0 number = int(input(\"Enter a number: \"))",
"int(input(\"Enter a number: \")) print(number) # catch specific errors except",
"number = int(input(\"Enter a number: \")) print(number) # catch specific",
"\")) print(number) # catch specific errors except ZeroDivisionError as err:",
"catch specific errors except ZeroDivisionError as err: print(err) except ValueError:",
"try: # num = 10 / 0 number = int(input(\"Enter",
"10 / 0 number = int(input(\"Enter a number: \")) print(number)",
"<reponame>kmarcini/Learn-Python---Full-Course-for-Beginners-Tutorial-<filename>try-except.py try: # num = 10 / 0 number =",
"num = 10 / 0 number = int(input(\"Enter a number:",
"# catch specific errors except ZeroDivisionError as err: print(err) except",
"print(number) # catch specific errors except ZeroDivisionError as err: print(err)",
"/ 0 number = int(input(\"Enter a number: \")) print(number) #",
"errors except ZeroDivisionError as err: print(err) except ValueError: print(\"Invalid input\")",
"= int(input(\"Enter a number: \")) print(number) # catch specific errors"
] |
[
"outside the provided values, default 0. Return ------ z :",
"t, yout) a = np.linalg.solve(y, z) return np.squeeze(a, -1) def",
"for each peak. t : int array (..., N,) The",
"np.squeeze(a, -1) def sumpeaks(y, a, t, yout=0): \"\"\" Compute the",
"-1) eps = np.finfo(float).eps * N * N * ampl",
"of the signals (`y` is multiplied by `a`). t :",
"sumpeaks. \"\"\" y = np.asarray(y) z = np.asarray(z) t =",
"np.pad(y, [(0, 0)] * (y.ndim - 1) + [(1, 1)],",
"= np.asarray(y) z = np.asarray(z) t = np.asarray(t) y =",
"n = max(y.ndim, indices.ndim) y, indices = _adddims(y, indices) y",
"+ [(1, 1)], constant_values=yout) offset = np.argmax(np.abs(y), axis=-1) ampl =",
"positions even when they are summed. Parameters ---------- y :",
"N,) The indices of the peaks in the sum. yout",
"(`y` is multiplied by `a`). t : int array (...,",
"= np.expand_dims(a, tuple(range(n - a.ndim))) b = np.expand_dims(b, tuple(range(n -",
"Compute the peak heights of a sum of signals. This",
"each signal. Broadcasted along non-last axis. \"\"\" y, a =",
"from scipy import signal y = np.exp(-np.linspace(0, 10, 1000) /",
"== len(t0) a = peaksampl(y, z[t], t) h = sumpeaks(y,",
"scipy import signal y = np.exp(-np.linspace(0, 10, 1000) / 10)",
"| (indices >= len(y)), 0, z) z = np.sum(z, axis=0)",
"np.minimum(indices, y.shape[-1] - 1) indices = np.maximum(indices, 0) N =",
"(..., M,) The single signal shape. z : array (...,",
"yout : scalar The value of the signal outside the",
"the provided values, default 0. Return ------ z : array",
"by the signal positions even when they are summed. Parameters",
"a, t, yout) z = np.matmul(y, a) return np.squeeze(z, axis=-1)",
"array (..., N,) The peak height in the sum of",
"height in the sum of the signals for each signal.",
"sumpeaks(y, a, t) fig, ax = plt.subplots(num='peaksampl', clear=True) ax.plot(z, color='#f55')",
"N), The amplitudes such that z_i = sum_j a_j *",
"ampl = np.take_along_axis(y, np.expand_dims(offset, -1), -1) ampl = np.squeeze(ampl, -1)",
"\"\"\" Get peak amplitudes given their sum. This assumes that",
"(..., N,) The indices of the peaks in the sum.",
"ax.plot(z, color='#f55') ax.vlines(t0, 0, a0, color='gray', zorder=3) ax.vlines(t, 0, a,",
"np.take(y, indices, mode='clip') * a0[:, None] z = np.where((indices <",
"0. Return ------ z : array (..., N,) The peak",
"the signals for each peak. t : int array (...,",
"yout): \"\"\" Shared implementation of peaksampl and sumpeaks. \"\"\" y",
"t) fig, ax = plt.subplots(num='peaksampl', clear=True) ax.plot(z, color='#f55') ax.vlines(t0, 0,",
"of the peaks is given by the signal positions even",
"signal.argrelmax(z) assert len(t) == len(t0) a = peaksampl(y, z[t], t)",
"z_i = sum_j a_j * y[t_i - t_j]. Broadcasted along",
"when they are summed. Parameters ---------- y : array (...,",
"1)], constant_values=yout) offset = np.argmax(np.abs(y), axis=-1) ampl = np.take_along_axis(y, np.expand_dims(offset,",
"of the signals. yout : scalar The value of the",
"np.asarray(t) y = np.pad(y, [(0, 0)] * (y.ndim - 1)",
"Get peak amplitudes given their sum. This assumes that the",
"y = np.take_along_axis(y, indices, -1) eps = np.finfo(float).eps * N",
"y = np.asarray(y) z = np.asarray(z) t = np.asarray(t) y",
"np.where((indices < 0) | (indices >= len(y)), 0, z) z",
": array (..., M,) The single signal shape. z :",
"def _adddims(a, b): n = max(a.ndim, b.ndim) a = np.expand_dims(a,",
"given by the signal positions even when they are summed.",
"None] z = np.where((indices < 0) | (indices >= len(y)),",
"= np.expand_dims(b, tuple(range(n - b.ndim))) return a, b def _yz(y,",
"= _adddims(y, indices) y = np.take_along_axis(y, indices, -1) eps =",
"the position of the peaks is given by the signal",
"signals (`y` is multiplied by `a`). t : int array",
"np.exp(-np.linspace(0, 10, 1000) / 10) i = np.arange(1, 1000) t0",
"__name__ == '__main__': from matplotlib import pyplot as plt from",
"[(1, 1)], constant_values=yout) offset = np.argmax(np.abs(y), axis=-1) ampl = np.take_along_axis(y,",
"even when they are summed. Parameters ---------- y : array",
"default 0. Return ------ z : array (..., N,) The",
"z = _adddims(y, z) return y, z def peaksampl(y, z,",
"z = np.matmul(y, a) return np.squeeze(z, axis=-1) if __name__ ==",
"position of the signals is given by peaks positions even",
"= np.squeeze(ampl, -1) indices = t[..., :, None] - t[...,",
"yout) a = np.linalg.solve(y, z) return np.squeeze(a, -1) def sumpeaks(y,",
"a0, color='gray', zorder=3) ax.vlines(t, 0, a, linestyle='--', zorder=3) ax.plot(t, h,",
"np.squeeze(z, axis=-1) if __name__ == '__main__': from matplotlib import pyplot",
"\"\"\" y, a = _yz(y, a, t, yout) z =",
"z, t, yout) a = np.linalg.solve(y, z) return np.squeeze(a, -1)",
"axis=0) t, = signal.argrelmax(z) assert len(t) == len(t0) a =",
"array (..., M,) The single signal shape. z : array",
"peaks positions even when they are summed. Parameters ---------- y",
"* ampl y[..., ::N + 1] += np.expand_dims(eps, -1) y",
"are summed. Parameters ---------- y : array (..., M,) The",
"N,) The amplitudes of the signals (`y` is multiplied by",
"of peaksampl and sumpeaks. \"\"\" y = np.asarray(y) z =",
"peak. t : int array (..., N,) The indices of",
"N,) The peak height in the sum of the signals",
"- 1) indices = np.maximum(indices, 0) N = t.shape[-1] indices",
"for each signal. Broadcasted along non-last axis. \"\"\" y, a",
"y = y.reshape(y.shape[:-1] + (N, N)) z = z[..., None]",
"N,) The indices of the position of the signals. yout",
"None] - t[..., None, :] + offset[..., None, None] indices",
"amplitudes given their sum. This assumes that the position of",
"a.ndim))) b = np.expand_dims(b, tuple(range(n - b.ndim))) return a, b",
"t, yout): \"\"\" Shared implementation of peaksampl and sumpeaks. \"\"\"",
"and sumpeaks. \"\"\" y = np.asarray(y) z = np.asarray(z) t",
"np.take_along_axis(y, indices, -1) eps = np.finfo(float).eps * N * N",
"signal outside the provided values, default 0. Return ------ a",
"N)) z = z[..., None] y, z = _adddims(y, z)",
"sum of signals. This assumes that the position of the",
"y, a = _yz(y, a, t, yout) z = np.matmul(y,",
"The amplitudes of the signals (`y` is multiplied by `a`).",
"z, t, yout=0): \"\"\" Get peak amplitudes given their sum.",
": int array (..., N,) The indices of the position",
"is given by the signal positions even when they are",
"Shared implementation of peaksampl and sumpeaks. \"\"\" y = np.asarray(y)",
"the sum of the signals for each signal. Broadcasted along",
": array (..., N), The amplitudes such that z_i =",
"np.linalg.solve(y, z) return np.squeeze(a, -1) def sumpeaks(y, a, t, yout=0):",
"= np.finfo(float).eps * N * N * ampl y[..., ::N",
"each peak. t : int array (..., N,) The indices",
"= z[..., None] y, z = _adddims(y, z) return y,",
"None] indices = np.minimum(indices, y.shape[-1] - 1) indices = np.maximum(indices,",
"= _adddims(y, z) return y, z def peaksampl(y, z, t,",
"len(y)), 0, z) z = np.sum(z, axis=0) t, = signal.argrelmax(z)",
"= _yz(y, z, t, yout) a = np.linalg.solve(y, z) return",
"y, z def peaksampl(y, z, t, yout=0): \"\"\" Get peak",
"axis. \"\"\" y, z = _yz(y, z, t, yout) a",
"values, default 0. Return ------ a : array (..., N),",
"indices of the peaks in the sum. yout : scalar",
"_adddims(y, indices) y = np.take_along_axis(y, indices, -1) eps = np.finfo(float).eps",
"return np.squeeze(a, -1) def sumpeaks(y, a, t, yout=0): \"\"\" Compute",
"numpy as np def _adddims(a, b): n = max(a.ndim, b.ndim)",
"indices, -1) eps = np.finfo(float).eps * N * N *",
"assumes that the position of the signals is given by",
"indices = indices.reshape(indices.shape[:-2] + (N * N,)) n = max(y.ndim,",
"* N * N * ampl y[..., ::N + 1]",
"indices = _adddims(y, indices) y = np.take_along_axis(y, indices, -1) eps",
"np.sum(z, axis=0) t, = signal.argrelmax(z) assert len(t) == len(t0) a",
"np.expand_dims(a, tuple(range(n - a.ndim))) b = np.expand_dims(b, tuple(range(n - b.ndim)))",
"signal outside the provided values, default 0. Return ------ z",
"= t.shape[-1] indices = indices.reshape(indices.shape[:-2] + (N * N,)) n",
"y[t_i - t_j]. Broadcasted along non-last axis. \"\"\" y, z",
"b.ndim) a = np.expand_dims(a, tuple(range(n - a.ndim))) b = np.expand_dims(b,",
"* a0[:, None] z = np.where((indices < 0) | (indices",
"= np.take_along_axis(y, np.expand_dims(offset, -1), -1) ampl = np.squeeze(ampl, -1) indices",
"given their sum. This assumes that the position of the",
"\"\"\" y, z = _yz(y, z, t, yout) a =",
"- t[..., None, :] + offset[..., None, None] indices =",
"array (..., N), The amplitudes such that z_i = sum_j",
"t : int array (..., N,) The indices of the",
"max(a.ndim, b.ndim) a = np.expand_dims(a, tuple(range(n - a.ndim))) b =",
"sum. yout : scalar The value of the signal outside",
"-1) y = y.reshape(y.shape[:-1] + (N, N)) z = z[...,",
"This assumes that the position of the signals is given",
"= sum_j a_j * y[t_i - t_j]. Broadcasted along non-last",
"np.argmax(np.abs(y), axis=-1) ampl = np.take_along_axis(y, np.expand_dims(offset, -1), -1) ampl =",
"the sum. yout : scalar The value of the signal",
"indices) y = np.take_along_axis(y, indices, -1) eps = np.finfo(float).eps *",
"a : array (..., N,) The amplitudes of the signals",
"a = np.expand_dims(a, tuple(range(n - a.ndim))) b = np.expand_dims(b, tuple(range(n",
"* (y.ndim - 1) + [(1, 1)], constant_values=yout) offset =",
"peak height in the sum of the signals for each",
"t0 = np.array([10, 340, 523]) a0 = np.array([3, 2, 1])",
"color='gray', zorder=3) ax.vlines(t, 0, a, linestyle='--', zorder=3) ax.plot(t, h, 'ok')",
"b def _yz(y, z, t, yout): \"\"\" Shared implementation of",
"_yz(y, a, t, yout) z = np.matmul(y, a) return np.squeeze(z,",
"yout=0): \"\"\" Compute the peak heights of a sum of",
"position of the peaks is given by the signal positions",
"_yz(y, z, t, yout) a = np.linalg.solve(y, z) return np.squeeze(a,",
"eps = np.finfo(float).eps * N * N * ampl y[...,",
"= plt.subplots(num='peaksampl', clear=True) ax.plot(z, color='#f55') ax.vlines(t0, 0, a0, color='gray', zorder=3)",
"(y.ndim - 1) + [(1, 1)], constant_values=yout) offset = np.argmax(np.abs(y),",
"= peaksampl(y, z[t], t) h = sumpeaks(y, a, t) fig,",
"assert len(t) == len(t0) a = peaksampl(y, z[t], t) h",
"np.expand_dims(offset, -1), -1) ampl = np.squeeze(ampl, -1) indices = t[...,",
"implementation of peaksampl and sumpeaks. \"\"\" y = np.asarray(y) z",
"y = np.pad(y, [(0, 0)] * (y.ndim - 1) +",
"+ offset[..., None, None] indices = np.minimum(indices, y.shape[-1] - 1)",
"< 0) | (indices >= len(y)), 0, z) z =",
"* N * ampl y[..., ::N + 1] += np.expand_dims(eps,",
"offset[..., None, None] indices = np.minimum(indices, y.shape[-1] - 1) indices",
"(..., N,) The peak height in the sum of the",
"None, :] + offset[..., None, None] indices = np.minimum(indices, y.shape[-1]",
"heights of a sum of signals. This assumes that the",
"Broadcasted along non-last axis. \"\"\" y, z = _yz(y, z,",
"= np.asarray(t) y = np.pad(y, [(0, 0)] * (y.ndim -",
"indices of the position of the signals. yout : scalar",
"= max(a.ndim, b.ndim) a = np.expand_dims(a, tuple(range(n - a.ndim))) b",
"color='#f55') ax.vlines(t0, 0, a0, color='gray', zorder=3) ax.vlines(t, 0, a, linestyle='--',",
"axis. \"\"\" y, a = _yz(y, a, t, yout) z",
"= np.asarray(z) t = np.asarray(t) y = np.pad(y, [(0, 0)]",
"the peak heights of a sum of signals. This assumes",
"plt from scipy import signal y = np.exp(-np.linspace(0, 10, 1000)",
"= np.array([10, 340, 523]) a0 = np.array([3, 2, 1]) indices",
"a sum of signals. This assumes that the position of",
"= np.array([3, 2, 1]) indices = i - t0[:, None]",
"t.shape[-1] indices = indices.reshape(indices.shape[:-2] + (N * N,)) n =",
">= len(y)), 0, z) z = np.sum(z, axis=0) t, =",
"along non-last axis. \"\"\" y, a = _yz(y, a, t,",
"summed. Parameters ---------- y : array (..., M,) The single",
"1000) t0 = np.array([10, 340, 523]) a0 = np.array([3, 2,",
"z def peaksampl(y, z, t, yout=0): \"\"\" Get peak amplitudes",
"= np.exp(-np.linspace(0, 10, 1000) / 10) i = np.arange(1, 1000)",
"The indices of the peaks in the sum. yout :",
"np.take_along_axis(y, np.expand_dims(offset, -1), -1) ampl = np.squeeze(ampl, -1) indices =",
"peaksampl(y, z, t, yout=0): \"\"\" Get peak amplitudes given their",
"of the signal outside the provided values, default 0. Return",
"z) z = np.sum(z, axis=0) t, = signal.argrelmax(z) assert len(t)",
"of the peaks in the sum. yout : scalar The",
"outside the provided values, default 0. Return ------ a :",
"y, z = _yz(y, z, t, yout) a = np.linalg.solve(y,",
"------ z : array (..., N,) The peak height in",
"their sum. This assumes that the position of the signals",
"ampl = np.squeeze(ampl, -1) indices = t[..., :, None] -",
"is given by peaks positions even when they are summed.",
"indices = np.minimum(indices, y.shape[-1] - 1) indices = np.maximum(indices, 0)",
"signals for each signal. Broadcasted along non-last axis. \"\"\" y,",
"sum of the signals for each signal. Broadcasted along non-last",
"[(0, 0)] * (y.ndim - 1) + [(1, 1)], constant_values=yout)",
"`a`). t : int array (..., N,) The indices of",
"This assumes that the position of the peaks is given",
"def peaksampl(y, z, t, yout=0): \"\"\" Get peak amplitudes given",
"the signal outside the provided values, default 0. Return ------",
"the position of the signals. yout : scalar The value",
"ampl y[..., ::N + 1] += np.expand_dims(eps, -1) y =",
"-1), -1) ampl = np.squeeze(ampl, -1) indices = t[..., :,",
"shape. z : array (..., N,) The peak height in",
"peak heights of a sum of signals. This assumes that",
"= np.maximum(indices, 0) N = t.shape[-1] indices = indices.reshape(indices.shape[:-2] +",
"signals is given by peaks positions even when they are",
"b): n = max(a.ndim, b.ndim) a = np.expand_dims(a, tuple(range(n -",
"signals. This assumes that the position of the peaks is",
"single signal shape. a : array (..., N,) The amplitudes",
"1000) / 10) i = np.arange(1, 1000) t0 = np.array([10,",
"peak amplitudes given their sum. This assumes that the position",
"yout=0): \"\"\" Get peak amplitudes given their sum. This assumes",
"import numpy as np def _adddims(a, b): n = max(a.ndim,",
"indices = t[..., :, None] - t[..., None, :] +",
"0) N = t.shape[-1] indices = indices.reshape(indices.shape[:-2] + (N *",
"a = peaksampl(y, z[t], t) h = sumpeaks(y, a, t)",
"array (..., N,) The indices of the peaks in the",
"the signal positions even when they are summed. Parameters ----------",
"- b.ndim))) return a, b def _yz(y, z, t, yout):",
"= np.sum(z, axis=0) t, = signal.argrelmax(z) assert len(t) == len(t0)",
"t[..., :, None] - t[..., None, :] + offset[..., None,",
"10, 1000) / 10) i = np.arange(1, 1000) t0 =",
": array (..., M,) The single signal shape. a :",
"sumpeaks(y, a, t, yout=0): \"\"\" Compute the peak heights of",
"y.shape[-1] - 1) indices = np.maximum(indices, 0) N = t.shape[-1]",
"return np.squeeze(z, axis=-1) if __name__ == '__main__': from matplotlib import",
"of signals. This assumes that the position of the peaks",
"t[..., None, :] + offset[..., None, None] indices = np.minimum(indices,",
"non-last axis. \"\"\" y, a = _yz(y, a, t, yout)",
"0)] * (y.ndim - 1) + [(1, 1)], constant_values=yout) offset",
"a, t) fig, ax = plt.subplots(num='peaksampl', clear=True) ax.plot(z, color='#f55') ax.vlines(t0,",
"n = max(a.ndim, b.ndim) a = np.expand_dims(a, tuple(range(n - a.ndim)))",
"* N,)) n = max(y.ndim, indices.ndim) y, indices = _adddims(y,",
"the signals (`y` is multiplied by `a`). t : int",
"the signals is given by peaks positions even when they",
"z[t], t) h = sumpeaks(y, a, t) fig, ax =",
"The single signal shape. z : array (..., N,) The",
"t0[:, None] z = np.take(y, indices, mode='clip') * a0[:, None]",
"t, yout=0): \"\"\" Compute the peak heights of a sum",
"(N * N,)) n = max(y.ndim, indices.ndim) y, indices =",
"of the signals is given by peaks positions even when",
"523]) a0 = np.array([3, 2, 1]) indices = i -",
"of the signals for each signal. Broadcasted along non-last axis.",
"1) + [(1, 1)], constant_values=yout) offset = np.argmax(np.abs(y), axis=-1) ampl",
"-1) indices = t[..., :, None] - t[..., None, :]",
"'__main__': from matplotlib import pyplot as plt from scipy import",
"= np.take(y, indices, mode='clip') * a0[:, None] z = np.where((indices",
"signal. Broadcasted along non-last axis. \"\"\" y, a = _yz(y,",
"ax = plt.subplots(num='peaksampl', clear=True) ax.plot(z, color='#f55') ax.vlines(t0, 0, a0, color='gray',",
"non-last axis. \"\"\" y, z = _yz(y, z, t, yout)",
"indices = i - t0[:, None] z = np.take(y, indices,",
"np.maximum(indices, 0) N = t.shape[-1] indices = indices.reshape(indices.shape[:-2] + (N",
"of a sum of signals. This assumes that the position",
"peaks is given by the signal positions even when they",
"np.squeeze(ampl, -1) indices = t[..., :, None] - t[..., None,",
"if __name__ == '__main__': from matplotlib import pyplot as plt",
"a, linestyle='--', zorder=3) ax.plot(t, h, 'ok') ax.grid('major', linestyle='--') fig.tight_layout() fig.show()",
"sum_j a_j * y[t_i - t_j]. Broadcasted along non-last axis.",
"z) return np.squeeze(a, -1) def sumpeaks(y, a, t, yout=0): \"\"\"",
"as np def _adddims(a, b): n = max(a.ndim, b.ndim) a",
"0. Return ------ a : array (..., N), The amplitudes",
"y.reshape(y.shape[:-1] + (N, N)) z = z[..., None] y, z",
"constant_values=yout) offset = np.argmax(np.abs(y), axis=-1) ampl = np.take_along_axis(y, np.expand_dims(offset, -1),",
": array (..., N,) The peak height in the sum",
"10) i = np.arange(1, 1000) t0 = np.array([10, 340, 523])",
":, None] - t[..., None, :] + offset[..., None, None]",
"= signal.argrelmax(z) assert len(t) == len(t0) a = peaksampl(y, z[t],",
"(..., N), The amplitudes such that z_i = sum_j a_j",
"z = np.take(y, indices, mode='clip') * a0[:, None] z =",
"amplitudes such that z_i = sum_j a_j * y[t_i -",
"(..., N,) The indices of the position of the signals.",
"provided values, default 0. Return ------ a : array (...,",
"a0[:, None] z = np.where((indices < 0) | (indices >=",
"z = np.asarray(z) t = np.asarray(t) y = np.pad(y, [(0,",
"0, z) z = np.sum(z, axis=0) t, = signal.argrelmax(z) assert",
"= np.matmul(y, a) return np.squeeze(z, axis=-1) if __name__ == '__main__':",
"\"\"\" Shared implementation of peaksampl and sumpeaks. \"\"\" y =",
"indices = np.maximum(indices, 0) N = t.shape[-1] indices = indices.reshape(indices.shape[:-2]",
"y, indices = _adddims(y, indices) y = np.take_along_axis(y, indices, -1)",
"+ 1] += np.expand_dims(eps, -1) y = y.reshape(y.shape[:-1] + (N,",
"clear=True) ax.plot(z, color='#f55') ax.vlines(t0, 0, a0, color='gray', zorder=3) ax.vlines(t, 0,",
"signal shape. a : array (..., N,) The amplitudes of",
"M,) The single signal shape. z : array (..., N,)",
"array (..., M,) The single signal shape. a : array",
"such that z_i = sum_j a_j * y[t_i - t_j].",
"yout) z = np.matmul(y, a) return np.squeeze(z, axis=-1) if __name__",
"= max(y.ndim, indices.ndim) y, indices = _adddims(y, indices) y =",
"(N, N)) z = z[..., None] y, z = _adddims(y,",
"_yz(y, z, t, yout): \"\"\" Shared implementation of peaksampl and",
"the provided values, default 0. Return ------ a : array",
"matplotlib import pyplot as plt from scipy import signal y",
"-1) def sumpeaks(y, a, t, yout=0): \"\"\" Compute the peak",
"z[..., None] y, z = _adddims(y, z) return y, z",
"height in the sum of the signals for each peak.",
"int array (..., N,) The indices of the peaks in",
"of the signals for each peak. t : int array",
"signals for each peak. t : int array (..., N,)",
": int array (..., N,) The indices of the peaks",
"the signals for each signal. Broadcasted along non-last axis. \"\"\"",
"(indices >= len(y)), 0, z) z = np.sum(z, axis=0) t,",
"array (..., N,) The indices of the position of the",
"np.asarray(y) z = np.asarray(z) t = np.asarray(t) y = np.pad(y,",
"along non-last axis. \"\"\" y, z = _yz(y, z, t,",
"<reponame>Gattocrucco/sipmfilter import numpy as np def _adddims(a, b): n =",
"in the sum of the signals for each peak. t",
"---------- y : array (..., M,) The single signal shape.",
"z : array (..., N,) The peak height in the",
"a : array (..., N), The amplitudes such that z_i",
"np.finfo(float).eps * N * N * ampl y[..., ::N +",
"the sum of the signals for each peak. t :",
"zorder=3) ax.vlines(t, 0, a, linestyle='--', zorder=3) ax.plot(t, h, 'ok') ax.grid('major',",
"\"\"\" y = np.asarray(y) z = np.asarray(z) t = np.asarray(t)",
"1) indices = np.maximum(indices, 0) N = t.shape[-1] indices =",
"provided values, default 0. Return ------ z : array (...,",
"y : array (..., M,) The single signal shape. a",
"t) h = sumpeaks(y, a, t) fig, ax = plt.subplots(num='peaksampl',",
"mode='clip') * a0[:, None] z = np.where((indices < 0) |",
"_adddims(a, b): n = max(a.ndim, b.ndim) a = np.expand_dims(a, tuple(range(n",
"indices.reshape(indices.shape[:-2] + (N * N,)) n = max(y.ndim, indices.ndim) y,",
"from matplotlib import pyplot as plt from scipy import signal",
"1]) indices = i - t0[:, None] z = np.take(y,",
"np def _adddims(a, b): n = max(a.ndim, b.ndim) a =",
"N * ampl y[..., ::N + 1] += np.expand_dims(eps, -1)",
"= np.where((indices < 0) | (indices >= len(y)), 0, z)",
"t, yout=0): \"\"\" Get peak amplitudes given their sum. This",
"in the sum. yout : scalar The value of the",
"ax.vlines(t0, 0, a0, color='gray', zorder=3) ax.vlines(t, 0, a, linestyle='--', zorder=3)",
"z = np.where((indices < 0) | (indices >= len(y)), 0,",
"z) return y, z def peaksampl(y, z, t, yout=0): \"\"\"",
"return a, b def _yz(y, z, t, yout): \"\"\" Shared",
"M,) The single signal shape. a : array (..., N,)",
"Return ------ z : array (..., N,) The peak height",
"+= np.expand_dims(eps, -1) y = y.reshape(y.shape[:-1] + (N, N)) z",
"Parameters ---------- y : array (..., M,) The single signal",
"\"\"\" Compute the peak heights of a sum of signals.",
"-1) ampl = np.squeeze(ampl, -1) indices = t[..., :, None]",
"+ (N, N)) z = z[..., None] y, z =",
"z = z[..., None] y, z = _adddims(y, z) return",
"The indices of the position of the signals. yout :",
"2, 1]) indices = i - t0[:, None] z =",
"plt.subplots(num='peaksampl', clear=True) ax.plot(z, color='#f55') ax.vlines(t0, 0, a0, color='gray', zorder=3) ax.vlines(t,",
"def sumpeaks(y, a, t, yout=0): \"\"\" Compute the peak heights",
": scalar The value of the signal outside the provided",
"a_j * y[t_i - t_j]. Broadcasted along non-last axis. \"\"\"",
"a) return np.squeeze(z, axis=-1) if __name__ == '__main__': from matplotlib",
"np.asarray(z) t = np.asarray(t) y = np.pad(y, [(0, 0)] *",
"multiplied by `a`). t : int array (..., N,) The",
"1] += np.expand_dims(eps, -1) y = y.reshape(y.shape[:-1] + (N, N))",
"= np.minimum(indices, y.shape[-1] - 1) indices = np.maximum(indices, 0) N",
"None] y, z = _adddims(y, z) return y, z def",
"signal y = np.exp(-np.linspace(0, 10, 1000) / 10) i =",
"- t0[:, None] z = np.take(y, indices, mode='clip') * a0[:,",
"== '__main__': from matplotlib import pyplot as plt from scipy",
"The amplitudes such that z_i = sum_j a_j * y[t_i",
"len(t0) a = peaksampl(y, z[t], t) h = sumpeaks(y, a,",
"y : array (..., M,) The single signal shape. z",
"a, b def _yz(y, z, t, yout): \"\"\" Shared implementation",
"* y[t_i - t_j]. Broadcasted along non-last axis. \"\"\" y,",
"np.array([3, 2, 1]) indices = i - t0[:, None] z",
"axis=-1) ampl = np.take_along_axis(y, np.expand_dims(offset, -1), -1) ampl = np.squeeze(ampl,",
"of the position of the signals. yout : scalar The",
"a0 = np.array([3, 2, 1]) indices = i - t0[:,",
"amplitudes of the signals (`y` is multiplied by `a`). t",
"that the position of the signals is given by peaks",
"_adddims(y, z) return y, z def peaksampl(y, z, t, yout=0):",
"z = _yz(y, z, t, yout) a = np.linalg.solve(y, z)",
"that z_i = sum_j a_j * y[t_i - t_j]. Broadcasted",
"t, yout) z = np.matmul(y, a) return np.squeeze(z, axis=-1) if",
"::N + 1] += np.expand_dims(eps, -1) y = y.reshape(y.shape[:-1] +",
"- a.ndim))) b = np.expand_dims(b, tuple(range(n - b.ndim))) return a,",
"= np.pad(y, [(0, 0)] * (y.ndim - 1) + [(1,",
"= y.reshape(y.shape[:-1] + (N, N)) z = z[..., None] y,",
"h = sumpeaks(y, a, t) fig, ax = plt.subplots(num='peaksampl', clear=True)",
"- 1) + [(1, 1)], constant_values=yout) offset = np.argmax(np.abs(y), axis=-1)",
"a = _yz(y, a, t, yout) z = np.matmul(y, a)",
"= np.argmax(np.abs(y), axis=-1) ampl = np.take_along_axis(y, np.expand_dims(offset, -1), -1) ampl",
"len(t) == len(t0) a = peaksampl(y, z[t], t) h =",
"assumes that the position of the peaks is given by",
"ax.vlines(t, 0, a, linestyle='--', zorder=3) ax.plot(t, h, 'ok') ax.grid('major', linestyle='--')",
"np.array([10, 340, 523]) a0 = np.array([3, 2, 1]) indices =",
"value of the signal outside the provided values, default 0.",
"the peaks is given by the signal positions even when",
"shape. a : array (..., N,) The amplitudes of the",
"t, = signal.argrelmax(z) assert len(t) == len(t0) a = peaksampl(y,",
"import signal y = np.exp(-np.linspace(0, 10, 1000) / 10) i",
"peaksampl(y, z[t], t) h = sumpeaks(y, a, t) fig, ax",
"import pyplot as plt from scipy import signal y =",
"0, a, linestyle='--', zorder=3) ax.plot(t, h, 'ok') ax.grid('major', linestyle='--') fig.tight_layout()",
"np.arange(1, 1000) t0 = np.array([10, 340, 523]) a0 = np.array([3,",
"The value of the signal outside the provided values, default",
"offset = np.argmax(np.abs(y), axis=-1) ampl = np.take_along_axis(y, np.expand_dims(offset, -1), -1)",
"the peaks in the sum. yout : scalar The value",
"The single signal shape. a : array (..., N,) The",
"N,)) n = max(y.ndim, indices.ndim) y, indices = _adddims(y, indices)",
"that the position of the peaks is given by the",
"= _yz(y, a, t, yout) z = np.matmul(y, a) return",
"peaks in the sum. yout : scalar The value of",
"y, z = _adddims(y, z) return y, z def peaksampl(y,",
"= t[..., :, None] - t[..., None, :] + offset[...,",
"N * N * ampl y[..., ::N + 1] +=",
"np.expand_dims(eps, -1) y = y.reshape(y.shape[:-1] + (N, N)) z =",
"pyplot as plt from scipy import signal y = np.exp(-np.linspace(0,",
"0, a0, color='gray', zorder=3) ax.vlines(t, 0, a, linestyle='--', zorder=3) ax.plot(t,",
"a = np.linalg.solve(y, z) return np.squeeze(a, -1) def sumpeaks(y, a,",
"np.matmul(y, a) return np.squeeze(z, axis=-1) if __name__ == '__main__': from",
"b = np.expand_dims(b, tuple(range(n - b.ndim))) return a, b def",
"values, default 0. Return ------ z : array (..., N,)",
"= i - t0[:, None] z = np.take(y, indices, mode='clip')",
"0) | (indices >= len(y)), 0, z) z = np.sum(z,",
"340, 523]) a0 = np.array([3, 2, 1]) indices = i",
"indices, mode='clip') * a0[:, None] z = np.where((indices < 0)",
"y = np.exp(-np.linspace(0, 10, 1000) / 10) i = np.arange(1,",
":] + offset[..., None, None] indices = np.minimum(indices, y.shape[-1] -",
"given by peaks positions even when they are summed. Parameters",
"signal shape. z : array (..., N,) The peak height",
"sum of the signals for each peak. t : int",
"they are summed. Parameters ---------- y : array (..., M,)",
"in the sum of the signals for each signal. Broadcasted",
"The peak height in the sum of the signals for",
"position of the signals. yout : scalar The value of",
"(..., N,) The amplitudes of the signals (`y` is multiplied",
"(..., M,) The single signal shape. a : array (...,",
"the signals. yout : scalar The value of the signal",
"None] z = np.take(y, indices, mode='clip') * a0[:, None] z",
"z, t, yout): \"\"\" Shared implementation of peaksampl and sumpeaks.",
"np.expand_dims(b, tuple(range(n - b.ndim))) return a, b def _yz(y, z,",
"z = np.sum(z, axis=0) t, = signal.argrelmax(z) assert len(t) ==",
"= indices.reshape(indices.shape[:-2] + (N * N,)) n = max(y.ndim, indices.ndim)",
"sum. This assumes that the position of the signals is",
"i - t0[:, None] z = np.take(y, indices, mode='clip') *",
"- t_j]. Broadcasted along non-last axis. \"\"\" y, z =",
"fig, ax = plt.subplots(num='peaksampl', clear=True) ax.plot(z, color='#f55') ax.vlines(t0, 0, a0,",
"signals. yout : scalar The value of the signal outside",
"int array (..., N,) The indices of the position of",
"= np.take_along_axis(y, indices, -1) eps = np.finfo(float).eps * N *",
"Broadcasted along non-last axis. \"\"\" y, a = _yz(y, a,",
"array (..., N,) The amplitudes of the signals (`y` is",
"a, t, yout=0): \"\"\" Compute the peak heights of a",
"y[..., ::N + 1] += np.expand_dims(eps, -1) y = y.reshape(y.shape[:-1]",
"signal positions even when they are summed. Parameters ---------- y",
"t = np.asarray(t) y = np.pad(y, [(0, 0)] * (y.ndim",
"scalar The value of the signal outside the provided values,",
"i = np.arange(1, 1000) t0 = np.array([10, 340, 523]) a0",
"= np.arange(1, 1000) t0 = np.array([10, 340, 523]) a0 =",
"axis=-1) if __name__ == '__main__': from matplotlib import pyplot as",
"the position of the signals is given by peaks positions",
"is multiplied by `a`). t : int array (..., N,)",
"def _yz(y, z, t, yout): \"\"\" Shared implementation of peaksampl",
"= np.linalg.solve(y, z) return np.squeeze(a, -1) def sumpeaks(y, a, t,",
"Return ------ a : array (..., N), The amplitudes such",
"single signal shape. z : array (..., N,) The peak",
"b.ndim))) return a, b def _yz(y, z, t, yout): \"\"\"",
": array (..., N,) The amplitudes of the signals (`y`",
"/ 10) i = np.arange(1, 1000) t0 = np.array([10, 340,",
"= sumpeaks(y, a, t) fig, ax = plt.subplots(num='peaksampl', clear=True) ax.plot(z,",
"as plt from scipy import signal y = np.exp(-np.linspace(0, 10,",
"N = t.shape[-1] indices = indices.reshape(indices.shape[:-2] + (N * N,))",
"t_j]. Broadcasted along non-last axis. \"\"\" y, z = _yz(y,",
"indices.ndim) y, indices = _adddims(y, indices) y = np.take_along_axis(y, indices,",
"return y, z def peaksampl(y, z, t, yout=0): \"\"\" Get",
"peaksampl and sumpeaks. \"\"\" y = np.asarray(y) z = np.asarray(z)",
"------ a : array (..., N), The amplitudes such that",
"default 0. Return ------ a : array (..., N), The",
"by `a`). t : int array (..., N,) The indices",
"tuple(range(n - a.ndim))) b = np.expand_dims(b, tuple(range(n - b.ndim))) return",
"max(y.ndim, indices.ndim) y, indices = _adddims(y, indices) y = np.take_along_axis(y,",
"+ (N * N,)) n = max(y.ndim, indices.ndim) y, indices",
"by peaks positions even when they are summed. Parameters ----------",
"None, None] indices = np.minimum(indices, y.shape[-1] - 1) indices =",
"tuple(range(n - b.ndim))) return a, b def _yz(y, z, t,"
] |
[
") class PMUResource(PS8Resource): name = 'pmu' claimable_mio = [ ]",
"claimable_mio = [ ] def __init__(self): super().__init__(0, 0, None, False)",
"import PS8Resource, MIOSet __all__ = ( 'PMUResource', ) class PMUResource(PS8Resource):",
"super().__init__(0, 0, None, False) def used_mio(self, **kwargs): raise NotImplementedError #",
"MIOSet __all__ = ( 'PMUResource', ) class PMUResource(PS8Resource): name =",
"None, False) def used_mio(self, **kwargs): raise NotImplementedError # :nocov: def",
"from amaranth import * from amaranth.build import * from .common",
"__init__(self): super().__init__(0, 0, None, False) def used_mio(self, **kwargs): raise NotImplementedError",
"'PMUResource', ) class PMUResource(PS8Resource): name = 'pmu' claimable_mio = [",
"amaranth.build import * from .common import PS8Resource, MIOSet __all__ =",
"raise NotImplementedError # :nocov: def generate_mapping(self, **kwargs): raise NotImplementedError #",
"= 'pmu' claimable_mio = [ ] def __init__(self): super().__init__(0, 0,",
".common import PS8Resource, MIOSet __all__ = ( 'PMUResource', ) class",
"**kwargs): raise NotImplementedError # :nocov: def generate_mapping(self, **kwargs): raise NotImplementedError",
"class PMUResource(PS8Resource): name = 'pmu' claimable_mio = [ ] def",
"__all__ = ( 'PMUResource', ) class PMUResource(PS8Resource): name = 'pmu'",
"name = 'pmu' claimable_mio = [ ] def __init__(self): super().__init__(0,",
"used_mio(self, **kwargs): raise NotImplementedError # :nocov: def generate_mapping(self, **kwargs): raise",
"PS8Resource, MIOSet __all__ = ( 'PMUResource', ) class PMUResource(PS8Resource): name",
"import * from .common import PS8Resource, MIOSet __all__ = (",
"= [ ] def __init__(self): super().__init__(0, 0, None, False) def",
"[ ] def __init__(self): super().__init__(0, 0, None, False) def used_mio(self,",
"* from amaranth.build import * from .common import PS8Resource, MIOSet",
"PMUResource(PS8Resource): name = 'pmu' claimable_mio = [ ] def __init__(self):",
"* from .common import PS8Resource, MIOSet __all__ = ( 'PMUResource',",
"= ( 'PMUResource', ) class PMUResource(PS8Resource): name = 'pmu' claimable_mio",
"NotImplementedError # :nocov: def generate_mapping(self, **kwargs): raise NotImplementedError # :nocov:",
"<reponame>shrine-maiden-heavy-industries/arachne<filename>arachne/hdl/xilinx/ps8/resources/pmu.py<gh_stars>1-10 # SPDX-License-Identifier: BSD-3-Clause from amaranth import * from amaranth.build",
"def used_mio(self, **kwargs): raise NotImplementedError # :nocov: def generate_mapping(self, **kwargs):",
"BSD-3-Clause from amaranth import * from amaranth.build import * from",
"from amaranth.build import * from .common import PS8Resource, MIOSet __all__",
"import * from amaranth.build import * from .common import PS8Resource,",
"( 'PMUResource', ) class PMUResource(PS8Resource): name = 'pmu' claimable_mio =",
"SPDX-License-Identifier: BSD-3-Clause from amaranth import * from amaranth.build import *",
"0, None, False) def used_mio(self, **kwargs): raise NotImplementedError # :nocov:",
"def __init__(self): super().__init__(0, 0, None, False) def used_mio(self, **kwargs): raise",
"False) def used_mio(self, **kwargs): raise NotImplementedError # :nocov: def generate_mapping(self,",
"'pmu' claimable_mio = [ ] def __init__(self): super().__init__(0, 0, None,",
"] def __init__(self): super().__init__(0, 0, None, False) def used_mio(self, **kwargs):",
"# SPDX-License-Identifier: BSD-3-Clause from amaranth import * from amaranth.build import",
"from .common import PS8Resource, MIOSet __all__ = ( 'PMUResource', )",
"amaranth import * from amaranth.build import * from .common import"
] |
[
"setUp(self): \"\"\" Create a Washlist item that is completed the",
") self.item.save() def test_job_resets_items(self): \"\"\" Test that job to reset",
"url, {\"title\": \"Tyholt\", \"village\": 1}, HTTP_AUTHORIZATION=self.auth ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(",
"village.save() def test_add_to_template_adds_to_each_list(self): desc = \"Vask badet\" temp_list = TemplateWashList.objects.get(title=\"Moholt\")",
"= reverse(\"templatewashlist-list\") response = self.client.post( url, {\"title\": \"Tyholt\", \"village\": 1},",
"status.HTTP_201_CREATED) self.assertEqual( response.data, TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Tyholt\") ).data, ) def test_partial_update(self): url",
"item that is completed the method also sets up a",
"Dormroom from SIFUser.mixins import AuthTestMixin from StudentVillage.models import StudentVillage from",
"= reverse(\"templatewashlist-detail\", args=[1]) response = self.client.delete(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertEqual(TemplateWashList.objects.count(),",
"ListItem.objects.get(pk=1).completed) class WashlistTemplateAPITest(AuthTestMixin): def setUp(self): super().setUp() self.temp_list = TemplateWashList.objects.create(title=\"Moholt\") village",
"= TemplateWashList.objects.create(title=\"Moholt\") village.templateWashList = temp_list village.save() def test_add_to_template_adds_to_each_list(self): desc =",
"method also sets up a village and a room to",
"HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data[0], TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Moholt\") ).data, ) def",
"response.data, TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Tyholt\") ).data, ) def test_partial_update(self): url = reverse(\"templatewashlist-detail\",",
"status.HTTP_200_OK) self.assertEqual( response.data, TemplateWashListSerializer(TemplateWashList.objects.get(title=\"Berg\")).data, ) def test_destroy(self): url = reverse(\"templatewashlist-detail\",",
"ListItem from Washlist.serializer import TemplateWashListSerializer class WashListTemplateTest(TestCase): room = None",
"status.HTTP_200_OK) self.assertEqual( response.data[0], TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Moholt\") ).data, ) def test_get_detail_template_list(self): url",
"room = None def setUp(self): village = StudentVillage.objects.create(name=\"Moholt\") self.room =",
"TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Tyholt\") ).data, ) def test_partial_update(self): url = reverse(\"templatewashlist-detail\", args=[1])",
"the databases Washlist items \"\"\" reset_washlists() self.assertEqual(False, ListItem.objects.get(pk=1).completed) class WashlistTemplateAPITest(AuthTestMixin):",
"test_get_template_list(self): url = reverse(\"templatewashlist-list\") response = self.client.get(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_200_OK)",
"temp_list = TemplateWashList.objects.create(title=\"Moholt\") village.templateWashList = temp_list village.save() self.item = ListItem.objects.create(",
"args=[1]) response = self.client.get(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data, TemplateWashListSerializer(",
"rest_framework import status from Dormroom.models import Dormroom from SIFUser.mixins import",
"test_add_to_template_adds_to_each_list(self): desc = \"Vask badet\" temp_list = TemplateWashList.objects.get(title=\"Moholt\") TemplateListItem.objects.create(description=desc, washlist=temp_list).save()",
"def test_job_resets_items(self): \"\"\" Test that job to reset Washlist items",
"from django.urls import reverse from rest_framework import status from Dormroom.models",
"TemplateWashList.objects.get(title=\"Moholt\") TemplateListItem.objects.create(description=desc, washlist=temp_list).save() self.assertEqual(desc, ListItem.objects.get(dormroom=self.room).description) class WeeklyResetOfWashlistsTest(TestCase): def setUp(self): \"\"\"",
"desc=\"Vask badet\", completed=True ) self.item.save() def test_job_resets_items(self): \"\"\" Test that",
"None def setUp(self): village = StudentVillage.objects.create(name=\"Moholt\") self.room = Dormroom.objects.create(number=1, village=village)",
"reverse(\"templatewashlist-detail\", args=[1]) response = self.client.get(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data,",
"{\"title\": \"Tyholt\", \"village\": 1}, HTTP_AUTHORIZATION=self.auth ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual( response.data,",
"TestCase from django.urls import reverse from rest_framework import status from",
"= self.client.patch( url, {\"title\": \"Berg\"}, HTTP_AUTHORIZATION=self.auth ) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(",
"self.assertEqual( response.data, TemplateWashListSerializer(TemplateWashList.objects.get(title=\"Berg\")).data, ) def test_destroy(self): url = reverse(\"templatewashlist-detail\", args=[1])",
"from rest_framework import status from Dormroom.models import Dormroom from SIFUser.mixins",
"self.assertEqual( response.data[0], TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Moholt\") ).data, ) def test_get_detail_template_list(self): url =",
"the method also sets up a village and a room",
"self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data, TemplateWashListSerializer(TemplateWashList.objects.get(title=\"Berg\")).data, ) def test_destroy(self): url =",
"run manually actually rests the databases Washlist items \"\"\" reset_washlists()",
"url = reverse(\"templatewashlist-detail\", args=[1]) response = self.client.delete(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)",
"\"\"\" Test that job to reset Washlist items when run",
"url, {\"title\": \"Berg\"}, HTTP_AUTHORIZATION=self.auth ) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data, TemplateWashListSerializer(TemplateWashList.objects.get(title=\"Berg\")).data,",
"reverse from rest_framework import status from Dormroom.models import Dormroom from",
"import TemplateListItem, TemplateWashList from Washlist.models.WashLists import ListItem from Washlist.serializer import",
"setUp(self): village = StudentVillage.objects.create(name=\"Moholt\") self.room = Dormroom.objects.create(number=1, village=village) temp_list =",
"village and a room to relate the Washlist item to",
"StudentVillage.models import StudentVillage from Washlist.jobs import reset_washlists from Washlist.models.Templates import",
"dormroom=self.room, desc=\"Vask badet\", completed=True ) def test_get_template_list(self): url = reverse(\"templatewashlist-list\")",
"= Dormroom.objects.create(number=1, village=village) self.item = ListItem.objects.create( pk=1, dormroom=self.room, desc=\"Vask badet\",",
"the db constraints \"\"\" village = StudentVillage.objects.create(name=\"Moholt\") self.room = Dormroom.objects.create(number=1,",
"\"\"\" reset_washlists() self.assertEqual(False, ListItem.objects.get(pk=1).completed) class WashlistTemplateAPITest(AuthTestMixin): def setUp(self): super().setUp() self.temp_list",
"class WashlistTemplateAPITest(AuthTestMixin): def setUp(self): super().setUp() self.temp_list = TemplateWashList.objects.create(title=\"Moholt\") village =",
"response.data, TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Moholt\") ).data, ) def test_add_template_washlist(self): url = reverse(\"templatewashlist-list\")",
"sets up a village and a room to relate the",
") def test_add_template_washlist(self): url = reverse(\"templatewashlist-list\") response = self.client.post( url,",
"village.templateWashList = temp_list village.save() self.item = ListItem.objects.create( pk=1, dormroom=self.room, desc=\"Vask",
"= self.client.get(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data, TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Moholt\") ).data,",
"reverse(\"templatewashlist-list\") response = self.client.post( url, {\"title\": \"Tyholt\", \"village\": 1}, HTTP_AUTHORIZATION=self.auth",
"reverse(\"templatewashlist-detail\", args=[1]) response = self.client.delete(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) self.assertEqual(TemplateWashList.objects.count(), 0)",
"desc=\"Vask badet\", completed=True ) def test_get_template_list(self): url = reverse(\"templatewashlist-list\") response",
") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data, TemplateWashListSerializer(TemplateWashList.objects.get(title=\"Berg\")).data, ) def test_destroy(self): url",
"import StudentVillage from Washlist.jobs import reset_washlists from Washlist.models.Templates import TemplateListItem,",
"completed the method also sets up a village and a",
"reset Washlist items when run manually actually rests the databases",
"1}, HTTP_AUTHORIZATION=self.auth ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual( response.data, TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Tyholt\") ).data,",
"def setUp(self): village = StudentVillage.objects.create(name=\"Moholt\") self.room = Dormroom.objects.create(number=1, village=village) temp_list",
"Washlist item to satisfy the db constraints \"\"\" village =",
"constraints \"\"\" village = StudentVillage.objects.create(name=\"Moholt\") self.room = Dormroom.objects.create(number=1, village=village) temp_list",
"= ListItem.objects.create( pk=1, dormroom=self.room, desc=\"Vask badet\", completed=True ) def test_get_template_list(self):",
"that job to reset Washlist items when run manually actually",
"TemplateWashList.objects.get(title=\"Moholt\") ).data, ) def test_add_template_washlist(self): url = reverse(\"templatewashlist-list\") response =",
"from django.test import TestCase from django.urls import reverse from rest_framework",
"StudentVillage.objects.create( name=\"Moholt\", templateWashList=self.temp_list ) self.room = Dormroom.objects.create(number=1, village=village) self.item =",
"Washlist.models.WashLists import ListItem from Washlist.serializer import TemplateWashListSerializer class WashListTemplateTest(TestCase): room",
"\"Vask badet\" temp_list = TemplateWashList.objects.get(title=\"Moholt\") TemplateListItem.objects.create(description=desc, washlist=temp_list).save() self.assertEqual(desc, ListItem.objects.get(dormroom=self.room).description) class",
"when run manually actually rests the databases Washlist items \"\"\"",
"a room to relate the Washlist item to satisfy the",
"templateWashList=self.temp_list ) self.room = Dormroom.objects.create(number=1, village=village) self.item = ListItem.objects.create( pk=1,",
"badet\", completed=True ) def test_get_template_list(self): url = reverse(\"templatewashlist-list\") response =",
") def test_get_template_list(self): url = reverse(\"templatewashlist-list\") response = self.client.get(url, HTTP_AUTHORIZATION=self.auth)",
"django.test import TestCase from django.urls import reverse from rest_framework import",
"dormroom=self.room, desc=\"Vask badet\", completed=True ) self.item.save() def test_job_resets_items(self): \"\"\" Test",
"self.client.get(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data[0], TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Moholt\") ).data, )",
") def test_partial_update(self): url = reverse(\"templatewashlist-detail\", args=[1]) response = self.client.patch(",
"class WeeklyResetOfWashlistsTest(TestCase): def setUp(self): \"\"\" Create a Washlist item that",
"TemplateWashListSerializer class WashListTemplateTest(TestCase): room = None def setUp(self): village =",
"rests the databases Washlist items \"\"\" reset_washlists() self.assertEqual(False, ListItem.objects.get(pk=1).completed) class",
"response.data, TemplateWashListSerializer(TemplateWashList.objects.get(title=\"Berg\")).data, ) def test_destroy(self): url = reverse(\"templatewashlist-detail\", args=[1]) response",
"temp_list village.save() self.item = ListItem.objects.create( pk=1, dormroom=self.room, desc=\"Vask badet\", completed=True",
"= None def setUp(self): village = StudentVillage.objects.create(name=\"Moholt\") self.room = Dormroom.objects.create(number=1,",
"super().setUp() self.temp_list = TemplateWashList.objects.create(title=\"Moholt\") village = StudentVillage.objects.create( name=\"Moholt\", templateWashList=self.temp_list )",
"self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual( response.data, TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Tyholt\") ).data, ) def test_partial_update(self):",
"\"\"\" Create a Washlist item that is completed the method",
"TemplateWashList.objects.get(title=\"Tyholt\") ).data, ) def test_partial_update(self): url = reverse(\"templatewashlist-detail\", args=[1]) response",
"village=village) self.item = ListItem.objects.create( pk=1, dormroom=self.room, desc=\"Vask badet\", completed=True )",
"= TemplateWashList.objects.create(title=\"Moholt\") village.templateWashList = temp_list village.save() self.item = ListItem.objects.create( pk=1,",
"also sets up a village and a room to relate",
"up a village and a room to relate the Washlist",
"import TestCase from django.urls import reverse from rest_framework import status",
"self.room = Dormroom.objects.create(number=1, village=village) temp_list = TemplateWashList.objects.create(title=\"Moholt\") village.templateWashList = temp_list",
"self.item = ListItem.objects.create( pk=1, dormroom=self.room, desc=\"Vask badet\", completed=True ) self.item.save()",
"Test that job to reset Washlist items when run manually",
").data, ) def test_get_detail_template_list(self): url = reverse(\"templatewashlist-detail\", args=[1]) response =",
"HTTP_AUTHORIZATION=self.auth ) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data, TemplateWashListSerializer(TemplateWashList.objects.get(title=\"Berg\")).data, ) def test_destroy(self):",
"a Washlist item that is completed the method also sets",
"from StudentVillage.models import StudentVillage from Washlist.jobs import reset_washlists from Washlist.models.Templates",
"SIFUser.mixins import AuthTestMixin from StudentVillage.models import StudentVillage from Washlist.jobs import",
"Washlist.jobs import reset_washlists from Washlist.models.Templates import TemplateListItem, TemplateWashList from Washlist.models.WashLists",
"village.templateWashList = temp_list village.save() def test_add_to_template_adds_to_each_list(self): desc = \"Vask badet\"",
"test_job_resets_items(self): \"\"\" Test that job to reset Washlist items when",
"job to reset Washlist items when run manually actually rests",
"{\"title\": \"Berg\"}, HTTP_AUTHORIZATION=self.auth ) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data, TemplateWashListSerializer(TemplateWashList.objects.get(title=\"Berg\")).data, )",
"Washlist.models.Templates import TemplateListItem, TemplateWashList from Washlist.models.WashLists import ListItem from Washlist.serializer",
"village=village) temp_list = TemplateWashList.objects.create(title=\"Moholt\") village.templateWashList = temp_list village.save() def test_add_to_template_adds_to_each_list(self):",
"is completed the method also sets up a village and",
"= temp_list village.save() def test_add_to_template_adds_to_each_list(self): desc = \"Vask badet\" temp_list",
"= StudentVillage.objects.create(name=\"Moholt\") self.room = Dormroom.objects.create(number=1, village=village) temp_list = TemplateWashList.objects.create(title=\"Moholt\") village.templateWashList",
"self.client.patch( url, {\"title\": \"Berg\"}, HTTP_AUTHORIZATION=self.auth ) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data,",
"import Dormroom from SIFUser.mixins import AuthTestMixin from StudentVillage.models import StudentVillage",
"self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data[0], TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Moholt\") ).data, ) def test_get_detail_template_list(self):",
"TemplateWashList.objects.get(title=\"Moholt\") ).data, ) def test_get_detail_template_list(self): url = reverse(\"templatewashlist-detail\", args=[1]) response",
"badet\", completed=True ) self.item.save() def test_job_resets_items(self): \"\"\" Test that job",
"TemplateWashListSerializer(TemplateWashList.objects.get(title=\"Berg\")).data, ) def test_destroy(self): url = reverse(\"templatewashlist-detail\", args=[1]) response =",
"manually actually rests the databases Washlist items \"\"\" reset_washlists() self.assertEqual(False,",
"def test_partial_update(self): url = reverse(\"templatewashlist-detail\", args=[1]) response = self.client.patch( url,",
"from Washlist.jobs import reset_washlists from Washlist.models.Templates import TemplateListItem, TemplateWashList from",
") self.room = Dormroom.objects.create(number=1, village=village) self.item = ListItem.objects.create( pk=1, dormroom=self.room,",
"= reverse(\"templatewashlist-detail\", args=[1]) response = self.client.get(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(",
"url = reverse(\"templatewashlist-detail\", args=[1]) response = self.client.patch( url, {\"title\": \"Berg\"},",
"and a room to relate the Washlist item to satisfy",
"Dormroom.models import Dormroom from SIFUser.mixins import AuthTestMixin from StudentVillage.models import",
"name=\"Moholt\", templateWashList=self.temp_list ) self.room = Dormroom.objects.create(number=1, village=village) self.item = ListItem.objects.create(",
"the Washlist item to satisfy the db constraints \"\"\" village",
"Washlist.serializer import TemplateWashListSerializer class WashListTemplateTest(TestCase): room = None def setUp(self):",
"temp_list village.save() def test_add_to_template_adds_to_each_list(self): desc = \"Vask badet\" temp_list =",
"desc = \"Vask badet\" temp_list = TemplateWashList.objects.get(title=\"Moholt\") TemplateListItem.objects.create(description=desc, washlist=temp_list).save() self.assertEqual(desc,",
"village=village) temp_list = TemplateWashList.objects.create(title=\"Moholt\") village.templateWashList = temp_list village.save() self.item =",
"self.assertEqual(desc, ListItem.objects.get(dormroom=self.room).description) class WeeklyResetOfWashlistsTest(TestCase): def setUp(self): \"\"\" Create a Washlist",
"<filename>backend/Washlist/tests.py from django.test import TestCase from django.urls import reverse from",
"to satisfy the db constraints \"\"\" village = StudentVillage.objects.create(name=\"Moholt\") self.room",
"self.assertEqual(False, ListItem.objects.get(pk=1).completed) class WashlistTemplateAPITest(AuthTestMixin): def setUp(self): super().setUp() self.temp_list = TemplateWashList.objects.create(title=\"Moholt\")",
"from Washlist.models.Templates import TemplateListItem, TemplateWashList from Washlist.models.WashLists import ListItem from",
"import reverse from rest_framework import status from Dormroom.models import Dormroom",
"response = self.client.get(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data, TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Moholt\")",
"ListItem.objects.create( pk=1, dormroom=self.room, desc=\"Vask badet\", completed=True ) self.item.save() def test_job_resets_items(self):",
"response = self.client.post( url, {\"title\": \"Tyholt\", \"village\": 1}, HTTP_AUTHORIZATION=self.auth )",
"test_partial_update(self): url = reverse(\"templatewashlist-detail\", args=[1]) response = self.client.patch( url, {\"title\":",
"Dormroom.objects.create(number=1, village=village) temp_list = TemplateWashList.objects.create(title=\"Moholt\") village.templateWashList = temp_list village.save() def",
"village = StudentVillage.objects.create( name=\"Moholt\", templateWashList=self.temp_list ) self.room = Dormroom.objects.create(number=1, village=village)",
"Create a Washlist item that is completed the method also",
"url = reverse(\"templatewashlist-detail\", args=[1]) response = self.client.get(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_200_OK)",
"satisfy the db constraints \"\"\" village = StudentVillage.objects.create(name=\"Moholt\") self.room =",
"TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Moholt\") ).data, ) def test_add_template_washlist(self): url = reverse(\"templatewashlist-list\") response",
"def test_add_to_template_adds_to_each_list(self): desc = \"Vask badet\" temp_list = TemplateWashList.objects.get(title=\"Moholt\") TemplateListItem.objects.create(description=desc,",
"that is completed the method also sets up a village",
"Dormroom.objects.create(number=1, village=village) temp_list = TemplateWashList.objects.create(title=\"Moholt\") village.templateWashList = temp_list village.save() self.item",
"TemplateWashList.objects.create(title=\"Moholt\") village.templateWashList = temp_list village.save() self.item = ListItem.objects.create( pk=1, dormroom=self.room,",
"self.temp_list = TemplateWashList.objects.create(title=\"Moholt\") village = StudentVillage.objects.create( name=\"Moholt\", templateWashList=self.temp_list ) self.room",
") self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual( response.data, TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Tyholt\") ).data, ) def",
"self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data, TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Moholt\") ).data, ) def test_add_template_washlist(self):",
"washlist=temp_list).save() self.assertEqual(desc, ListItem.objects.get(dormroom=self.room).description) class WeeklyResetOfWashlistsTest(TestCase): def setUp(self): \"\"\" Create a",
"completed=True ) self.item.save() def test_job_resets_items(self): \"\"\" Test that job to",
"StudentVillage from Washlist.jobs import reset_washlists from Washlist.models.Templates import TemplateListItem, TemplateWashList",
"= reverse(\"templatewashlist-list\") response = self.client.get(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data[0],",
"def test_get_detail_template_list(self): url = reverse(\"templatewashlist-detail\", args=[1]) response = self.client.get(url, HTTP_AUTHORIZATION=self.auth)",
"HTTP_AUTHORIZATION=self.auth ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual( response.data, TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Tyholt\") ).data, )",
"db constraints \"\"\" village = StudentVillage.objects.create(name=\"Moholt\") self.room = Dormroom.objects.create(number=1, village=village)",
"pk=1, dormroom=self.room, desc=\"Vask badet\", completed=True ) def test_get_template_list(self): url =",
"response.data[0], TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Moholt\") ).data, ) def test_get_detail_template_list(self): url = reverse(\"templatewashlist-detail\",",
"def setUp(self): \"\"\" Create a Washlist item that is completed",
"reset_washlists from Washlist.models.Templates import TemplateListItem, TemplateWashList from Washlist.models.WashLists import ListItem",
"status from Dormroom.models import Dormroom from SIFUser.mixins import AuthTestMixin from",
"import status from Dormroom.models import Dormroom from SIFUser.mixins import AuthTestMixin",
"= ListItem.objects.create( pk=1, dormroom=self.room, desc=\"Vask badet\", completed=True ) self.item.save() def",
"pk=1, dormroom=self.room, desc=\"Vask badet\", completed=True ) self.item.save() def test_job_resets_items(self): \"\"\"",
"django.urls import reverse from rest_framework import status from Dormroom.models import",
"TemplateWashList.objects.create(title=\"Moholt\") village = StudentVillage.objects.create( name=\"Moholt\", templateWashList=self.temp_list ) self.room = Dormroom.objects.create(number=1,",
"self.item = ListItem.objects.create( pk=1, dormroom=self.room, desc=\"Vask badet\", completed=True ) def",
"reset_washlists() self.assertEqual(False, ListItem.objects.get(pk=1).completed) class WashlistTemplateAPITest(AuthTestMixin): def setUp(self): super().setUp() self.temp_list =",
"item to satisfy the db constraints \"\"\" village = StudentVillage.objects.create(name=\"Moholt\")",
"url = reverse(\"templatewashlist-list\") response = self.client.post( url, {\"title\": \"Tyholt\", \"village\":",
"completed=True ) def test_get_template_list(self): url = reverse(\"templatewashlist-list\") response = self.client.get(url,",
"= self.client.get(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data[0], TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Moholt\") ).data,",
"response = self.client.patch( url, {\"title\": \"Berg\"}, HTTP_AUTHORIZATION=self.auth ) self.assertEqual(response.status_code, status.HTTP_200_OK)",
"= StudentVillage.objects.create( name=\"Moholt\", templateWashList=self.temp_list ) self.room = Dormroom.objects.create(number=1, village=village) self.item",
"to reset Washlist items when run manually actually rests the",
"= \"Vask badet\" temp_list = TemplateWashList.objects.get(title=\"Moholt\") TemplateListItem.objects.create(description=desc, washlist=temp_list).save() self.assertEqual(desc, ListItem.objects.get(dormroom=self.room).description)",
"ListItem.objects.get(dormroom=self.room).description) class WeeklyResetOfWashlistsTest(TestCase): def setUp(self): \"\"\" Create a Washlist item",
"items when run manually actually rests the databases Washlist items",
"databases Washlist items \"\"\" reset_washlists() self.assertEqual(False, ListItem.objects.get(pk=1).completed) class WashlistTemplateAPITest(AuthTestMixin): def",
"\"\"\" village = StudentVillage.objects.create(name=\"Moholt\") self.room = Dormroom.objects.create(number=1, village=village) temp_list =",
"reverse(\"templatewashlist-detail\", args=[1]) response = self.client.patch( url, {\"title\": \"Berg\"}, HTTP_AUTHORIZATION=self.auth )",
"from Washlist.models.WashLists import ListItem from Washlist.serializer import TemplateWashListSerializer class WashListTemplateTest(TestCase):",
"TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Moholt\") ).data, ) def test_get_detail_template_list(self): url = reverse(\"templatewashlist-detail\", args=[1])",
"Washlist items \"\"\" reset_washlists() self.assertEqual(False, ListItem.objects.get(pk=1).completed) class WashlistTemplateAPITest(AuthTestMixin): def setUp(self):",
"TemplateWashList.objects.create(title=\"Moholt\") village.templateWashList = temp_list village.save() def test_add_to_template_adds_to_each_list(self): desc = \"Vask",
"relate the Washlist item to satisfy the db constraints \"\"\"",
"Washlist item that is completed the method also sets up",
"self.assertEqual( response.data, TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Moholt\") ).data, ) def test_add_template_washlist(self): url =",
"reverse(\"templatewashlist-list\") response = self.client.get(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data[0], TemplateWashListSerializer(",
"TemplateListItem, TemplateWashList from Washlist.models.WashLists import ListItem from Washlist.serializer import TemplateWashListSerializer",
"= Dormroom.objects.create(number=1, village=village) temp_list = TemplateWashList.objects.create(title=\"Moholt\") village.templateWashList = temp_list village.save()",
"test_destroy(self): url = reverse(\"templatewashlist-detail\", args=[1]) response = self.client.delete(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code,",
"from SIFUser.mixins import AuthTestMixin from StudentVillage.models import StudentVillage from Washlist.jobs",
"items \"\"\" reset_washlists() self.assertEqual(False, ListItem.objects.get(pk=1).completed) class WashlistTemplateAPITest(AuthTestMixin): def setUp(self): super().setUp()",
"= self.client.post( url, {\"title\": \"Tyholt\", \"village\": 1}, HTTP_AUTHORIZATION=self.auth ) self.assertEqual(response.status_code,",
"self.client.post( url, {\"title\": \"Tyholt\", \"village\": 1}, HTTP_AUTHORIZATION=self.auth ) self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"TemplateWashList from Washlist.models.WashLists import ListItem from Washlist.serializer import TemplateWashListSerializer class",
"\"Tyholt\", \"village\": 1}, HTTP_AUTHORIZATION=self.auth ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual( response.data, TemplateWashListSerializer(",
"url = reverse(\"templatewashlist-list\") response = self.client.get(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(",
"\"Berg\"}, HTTP_AUTHORIZATION=self.auth ) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data, TemplateWashListSerializer(TemplateWashList.objects.get(title=\"Berg\")).data, ) def",
"self.assertEqual( response.data, TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Tyholt\") ).data, ) def test_partial_update(self): url =",
"response = self.client.get(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data[0], TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Moholt\")",
"WeeklyResetOfWashlistsTest(TestCase): def setUp(self): \"\"\" Create a Washlist item that is",
"from Dormroom.models import Dormroom from SIFUser.mixins import AuthTestMixin from StudentVillage.models",
"status.HTTP_200_OK) self.assertEqual( response.data, TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Moholt\") ).data, ) def test_add_template_washlist(self): url",
"WashListTemplateTest(TestCase): room = None def setUp(self): village = StudentVillage.objects.create(name=\"Moholt\") self.room",
"test_get_detail_template_list(self): url = reverse(\"templatewashlist-detail\", args=[1]) response = self.client.get(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code,",
"test_add_template_washlist(self): url = reverse(\"templatewashlist-list\") response = self.client.post( url, {\"title\": \"Tyholt\",",
"= TemplateWashList.objects.get(title=\"Moholt\") TemplateListItem.objects.create(description=desc, washlist=temp_list).save() self.assertEqual(desc, ListItem.objects.get(dormroom=self.room).description) class WeeklyResetOfWashlistsTest(TestCase): def setUp(self):",
"def setUp(self): super().setUp() self.temp_list = TemplateWashList.objects.create(title=\"Moholt\") village = StudentVillage.objects.create( name=\"Moholt\",",
"Dormroom.objects.create(number=1, village=village) self.item = ListItem.objects.create( pk=1, dormroom=self.room, desc=\"Vask badet\", completed=True",
") def test_destroy(self): url = reverse(\"templatewashlist-detail\", args=[1]) response = self.client.delete(url,",
"TemplateListItem.objects.create(description=desc, washlist=temp_list).save() self.assertEqual(desc, ListItem.objects.get(dormroom=self.room).description) class WeeklyResetOfWashlistsTest(TestCase): def setUp(self): \"\"\" Create",
"= reverse(\"templatewashlist-detail\", args=[1]) response = self.client.patch( url, {\"title\": \"Berg\"}, HTTP_AUTHORIZATION=self.auth",
"WashlistTemplateAPITest(AuthTestMixin): def setUp(self): super().setUp() self.temp_list = TemplateWashList.objects.create(title=\"Moholt\") village = StudentVillage.objects.create(",
"temp_list = TemplateWashList.objects.create(title=\"Moholt\") village.templateWashList = temp_list village.save() def test_add_to_template_adds_to_each_list(self): desc",
"from Washlist.serializer import TemplateWashListSerializer class WashListTemplateTest(TestCase): room = None def",
"to relate the Washlist item to satisfy the db constraints",
"badet\" temp_list = TemplateWashList.objects.get(title=\"Moholt\") TemplateListItem.objects.create(description=desc, washlist=temp_list).save() self.assertEqual(desc, ListItem.objects.get(dormroom=self.room).description) class WeeklyResetOfWashlistsTest(TestCase):",
"def test_get_template_list(self): url = reverse(\"templatewashlist-list\") response = self.client.get(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code,",
"HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data, TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Moholt\") ).data, ) def",
"village = StudentVillage.objects.create(name=\"Moholt\") self.room = Dormroom.objects.create(number=1, village=village) temp_list = TemplateWashList.objects.create(title=\"Moholt\")",
"room to relate the Washlist item to satisfy the db",
"import ListItem from Washlist.serializer import TemplateWashListSerializer class WashListTemplateTest(TestCase): room =",
"= TemplateWashList.objects.create(title=\"Moholt\") village = StudentVillage.objects.create( name=\"Moholt\", templateWashList=self.temp_list ) self.room =",
") def test_get_detail_template_list(self): url = reverse(\"templatewashlist-detail\", args=[1]) response = self.client.get(url,",
"def test_destroy(self): url = reverse(\"templatewashlist-detail\", args=[1]) response = self.client.delete(url, HTTP_AUTHORIZATION=self.auth)",
").data, ) def test_partial_update(self): url = reverse(\"templatewashlist-detail\", args=[1]) response =",
"AuthTestMixin from StudentVillage.models import StudentVillage from Washlist.jobs import reset_washlists from",
"ListItem.objects.create( pk=1, dormroom=self.room, desc=\"Vask badet\", completed=True ) def test_get_template_list(self): url",
"StudentVillage.objects.create(name=\"Moholt\") self.room = Dormroom.objects.create(number=1, village=village) temp_list = TemplateWashList.objects.create(title=\"Moholt\") village.templateWashList =",
"setUp(self): super().setUp() self.temp_list = TemplateWashList.objects.create(title=\"Moholt\") village = StudentVillage.objects.create( name=\"Moholt\", templateWashList=self.temp_list",
"\"village\": 1}, HTTP_AUTHORIZATION=self.auth ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual( response.data, TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Tyholt\")",
"import TemplateWashListSerializer class WashListTemplateTest(TestCase): room = None def setUp(self): village",
"def test_add_template_washlist(self): url = reverse(\"templatewashlist-list\") response = self.client.post( url, {\"title\":",
"= temp_list village.save() self.item = ListItem.objects.create( pk=1, dormroom=self.room, desc=\"Vask badet\",",
"village.save() self.item = ListItem.objects.create( pk=1, dormroom=self.room, desc=\"Vask badet\", completed=True )",
"temp_list = TemplateWashList.objects.get(title=\"Moholt\") TemplateListItem.objects.create(description=desc, washlist=temp_list).save() self.assertEqual(desc, ListItem.objects.get(dormroom=self.room).description) class WeeklyResetOfWashlistsTest(TestCase): def",
"import reset_washlists from Washlist.models.Templates import TemplateListItem, TemplateWashList from Washlist.models.WashLists import",
"self.client.get(url, HTTP_AUTHORIZATION=self.auth) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.data, TemplateWashListSerializer( TemplateWashList.objects.get(title=\"Moholt\") ).data, )",
").data, ) def test_add_template_washlist(self): url = reverse(\"templatewashlist-list\") response = self.client.post(",
"Washlist items when run manually actually rests the databases Washlist",
"args=[1]) response = self.client.patch( url, {\"title\": \"Berg\"}, HTTP_AUTHORIZATION=self.auth ) self.assertEqual(response.status_code,",
"a village and a room to relate the Washlist item",
"import AuthTestMixin from StudentVillage.models import StudentVillage from Washlist.jobs import reset_washlists",
"class WashListTemplateTest(TestCase): room = None def setUp(self): village = StudentVillage.objects.create(name=\"Moholt\")",
"self.item.save() def test_job_resets_items(self): \"\"\" Test that job to reset Washlist",
"self.room = Dormroom.objects.create(number=1, village=village) self.item = ListItem.objects.create( pk=1, dormroom=self.room, desc=\"Vask",
"actually rests the databases Washlist items \"\"\" reset_washlists() self.assertEqual(False, ListItem.objects.get(pk=1).completed)"
] |
[
"if weights is not None: _ovewrite_named_param(kwargs, \"num_classes\", len(weights.meta[\"categories\"])) model =",
"**kwargs) @handle_legacy_interface(weights=(\"pretrained\", MobileNet_V3_Small_Weights.ImageNet1K_V1)) def mobilenet_v3_small( *, weights: Optional[MobileNet_V3_Small_Weights] = None,",
"\"MobileNet_V3_Small_Weights\", \"mobilenet_v3_large\", \"mobilenet_v3_small\", ] def _mobilenet_v3( inverted_residual_setting: List[InvertedResidualConfig], last_channel: int,",
"\"acc@5\": 92.566, }, ) default = ImageNet1K_V2 class MobileNet_V3_Small_Weights(WeightsEnum): ImageNet1K_V1",
"\"acc@5\": 87.402, }, ) default = ImageNet1K_V1 @handle_legacy_interface(weights=(\"pretrained\", MobileNet_V3_Large_Weights.ImageNet1K_V1)) def",
"\"recipe\": \"https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning\", \"acc@1\": 75.274, \"acc@5\": 92.566, }, ) default =",
"progress, **kwargs) @handle_legacy_interface(weights=(\"pretrained\", MobileNet_V3_Small_Weights.ImageNet1K_V1)) def mobilenet_v3_small( *, weights: Optional[MobileNet_V3_Small_Weights] =",
"from torchvision.transforms.functional import InterpolationMode from ...models.mobilenetv3 import MobileNetV3, _mobilenet_v3_conf, InvertedResidualConfig",
"InterpolationMode.BILINEAR, } class MobileNet_V3_Large_Weights(WeightsEnum): ImageNet1K_V1 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth\", transforms=partial(ImageNetEval, crop_size=224),",
"Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_large-5c1a4163.pth\", transforms=partial(ImageNetEval, crop_size=224, resize_size=232), meta={ **_COMMON_META, \"num_params\": 5483032, \"recipe\":",
"= MobileNetV3(inverted_residual_setting, last_channel, **kwargs) if weights is not None: model.load_state_dict(weights.get_state_dict(progress=progress))",
"MobileNetV3: weights = MobileNet_V3_Small_Weights.verify(weights) inverted_residual_setting, last_channel = _mobilenet_v3_conf(\"mobilenet_v3_small\", **kwargs) return",
"model.load_state_dict(weights.get_state_dict(progress=progress)) return model _COMMON_META = { \"task\": \"image_classification\", \"architecture\": \"MobileNetV3\",",
"import WeightsEnum, Weights from ._meta import _IMAGENET_CATEGORIES from ._utils import",
"\"num_params\": 5483032, \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small\", \"acc@1\": 74.042, \"acc@5\": 91.340, }, )",
"_mobilenet_v3_conf(\"mobilenet_v3_large\", **kwargs) return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs) @handle_legacy_interface(weights=(\"pretrained\", MobileNet_V3_Small_Weights.ImageNet1K_V1))",
"import InterpolationMode from ...models.mobilenetv3 import MobileNetV3, _mobilenet_v3_conf, InvertedResidualConfig from ._api",
"**_COMMON_META, \"num_params\": 2542856, \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small\", \"acc@1\": 67.668, \"acc@5\": 87.402, },",
"inverted_residual_setting: List[InvertedResidualConfig], last_channel: int, weights: Optional[WeightsEnum], progress: bool, **kwargs: Any,",
"._utils import handle_legacy_interface, _ovewrite_named_param __all__ = [ \"MobileNetV3\", \"MobileNet_V3_Large_Weights\", \"MobileNet_V3_Small_Weights\",",
"model _COMMON_META = { \"task\": \"image_classification\", \"architecture\": \"MobileNetV3\", \"publication_year\": 2019,",
"**kwargs) return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs) @handle_legacy_interface(weights=(\"pretrained\", MobileNet_V3_Small_Weights.ImageNet1K_V1)) def",
"mobilenet_v3_small( *, weights: Optional[MobileNet_V3_Small_Weights] = None, progress: bool = True,",
"\"num_params\": 2542856, \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small\", \"acc@1\": 67.668, \"acc@5\": 87.402, }, )",
"-> MobileNetV3: weights = MobileNet_V3_Large_Weights.verify(weights) inverted_residual_setting, last_channel = _mobilenet_v3_conf(\"mobilenet_v3_large\", **kwargs)",
"weights is not None: model.load_state_dict(weights.get_state_dict(progress=progress)) return model _COMMON_META = {",
"Any, Optional, List from torchvision.prototype.transforms import ImageNetEval from torchvision.transforms.functional import",
"}, ) ImageNet1K_V2 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_large-5c1a4163.pth\", transforms=partial(ImageNetEval, crop_size=224, resize_size=232), meta={",
"= MobileNet_V3_Small_Weights.verify(weights) inverted_residual_setting, last_channel = _mobilenet_v3_conf(\"mobilenet_v3_small\", **kwargs) return _mobilenet_v3(inverted_residual_setting, last_channel,",
"2019, \"size\": (224, 224), \"min_size\": (1, 1), \"categories\": _IMAGENET_CATEGORIES, \"interpolation\":",
"._api import WeightsEnum, Weights from ._meta import _IMAGENET_CATEGORIES from ._utils",
"MobileNet_V3_Small_Weights(WeightsEnum): ImageNet1K_V1 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth\", transforms=partial(ImageNetEval, crop_size=224), meta={ **_COMMON_META, \"num_params\":",
"\"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small\", \"acc@1\": 67.668, \"acc@5\": 87.402, }, ) default = ImageNet1K_V1",
"ImageNetEval from torchvision.transforms.functional import InterpolationMode from ...models.mobilenetv3 import MobileNetV3, _mobilenet_v3_conf,",
"-> MobileNetV3: weights = MobileNet_V3_Small_Weights.verify(weights) inverted_residual_setting, last_channel = _mobilenet_v3_conf(\"mobilenet_v3_small\", **kwargs)",
"Optional, List from torchvision.prototype.transforms import ImageNetEval from torchvision.transforms.functional import InterpolationMode",
"partial from typing import Any, Optional, List from torchvision.prototype.transforms import",
"url=\"https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth\", transforms=partial(ImageNetEval, crop_size=224), meta={ **_COMMON_META, \"num_params\": 2542856, \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small\", \"acc@1\":",
"**_COMMON_META, \"num_params\": 5483032, \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small\", \"acc@1\": 74.042, \"acc@5\": 91.340, },",
") -> MobileNetV3: if weights is not None: _ovewrite_named_param(kwargs, \"num_classes\",",
"ImageNet1K_V1 @handle_legacy_interface(weights=(\"pretrained\", MobileNet_V3_Large_Weights.ImageNet1K_V1)) def mobilenet_v3_large( *, weights: Optional[MobileNet_V3_Large_Weights] = None,",
"= Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_large-5c1a4163.pth\", transforms=partial(ImageNetEval, crop_size=224, resize_size=232), meta={ **_COMMON_META, \"num_params\": 5483032,",
"67.668, \"acc@5\": 87.402, }, ) default = ImageNet1K_V1 @handle_legacy_interface(weights=(\"pretrained\", MobileNet_V3_Large_Weights.ImageNet1K_V1))",
"return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs) @handle_legacy_interface(weights=(\"pretrained\", MobileNet_V3_Small_Weights.ImageNet1K_V1)) def mobilenet_v3_small(",
"None: model.load_state_dict(weights.get_state_dict(progress=progress)) return model _COMMON_META = { \"task\": \"image_classification\", \"architecture\":",
"transforms=partial(ImageNetEval, crop_size=224), meta={ **_COMMON_META, \"num_params\": 2542856, \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small\", \"acc@1\": 67.668,",
"= None, progress: bool = True, **kwargs: Any ) ->",
"\"categories\": _IMAGENET_CATEGORIES, \"interpolation\": InterpolationMode.BILINEAR, } class MobileNet_V3_Large_Weights(WeightsEnum): ImageNet1K_V1 = Weights(",
"MobileNetV3: weights = MobileNet_V3_Large_Weights.verify(weights) inverted_residual_setting, last_channel = _mobilenet_v3_conf(\"mobilenet_v3_large\", **kwargs) return",
"\"MobileNetV3\", \"MobileNet_V3_Large_Weights\", \"MobileNet_V3_Small_Weights\", \"mobilenet_v3_large\", \"mobilenet_v3_small\", ] def _mobilenet_v3( inverted_residual_setting: List[InvertedResidualConfig],",
"weights: Optional[WeightsEnum], progress: bool, **kwargs: Any, ) -> MobileNetV3: if",
"*, weights: Optional[MobileNet_V3_Large_Weights] = None, progress: bool = True, **kwargs:",
"\"num_classes\", len(weights.meta[\"categories\"])) model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs) if weights is",
"\"interpolation\": InterpolationMode.BILINEAR, } class MobileNet_V3_Large_Weights(WeightsEnum): ImageNet1K_V1 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth\", transforms=partial(ImageNetEval,",
"(224, 224), \"min_size\": (1, 1), \"categories\": _IMAGENET_CATEGORIES, \"interpolation\": InterpolationMode.BILINEAR, }",
"import ImageNetEval from torchvision.transforms.functional import InterpolationMode from ...models.mobilenetv3 import MobileNetV3,",
"MobileNet_V3_Small_Weights.ImageNet1K_V1)) def mobilenet_v3_small( *, weights: Optional[MobileNet_V3_Small_Weights] = None, progress: bool",
") default = ImageNet1K_V2 class MobileNet_V3_Small_Weights(WeightsEnum): ImageNet1K_V1 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth\",",
"None: _ovewrite_named_param(kwargs, \"num_classes\", len(weights.meta[\"categories\"])) model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs) if",
"default = ImageNet1K_V1 @handle_legacy_interface(weights=(\"pretrained\", MobileNet_V3_Large_Weights.ImageNet1K_V1)) def mobilenet_v3_large( *, weights: Optional[MobileNet_V3_Large_Weights]",
"Optional[MobileNet_V3_Large_Weights] = None, progress: bool = True, **kwargs: Any )",
"import _IMAGENET_CATEGORIES from ._utils import handle_legacy_interface, _ovewrite_named_param __all__ = [",
"Any, ) -> MobileNetV3: if weights is not None: _ovewrite_named_param(kwargs,",
"last_channel, **kwargs) if weights is not None: model.load_state_dict(weights.get_state_dict(progress=progress)) return model",
"(1, 1), \"categories\": _IMAGENET_CATEGORIES, \"interpolation\": InterpolationMode.BILINEAR, } class MobileNet_V3_Large_Weights(WeightsEnum): ImageNet1K_V1",
"Any ) -> MobileNetV3: weights = MobileNet_V3_Small_Weights.verify(weights) inverted_residual_setting, last_channel =",
"MobileNet_V3_Large_Weights.ImageNet1K_V1)) def mobilenet_v3_large( *, weights: Optional[MobileNet_V3_Large_Weights] = None, progress: bool",
"handle_legacy_interface, _ovewrite_named_param __all__ = [ \"MobileNetV3\", \"MobileNet_V3_Large_Weights\", \"MobileNet_V3_Small_Weights\", \"mobilenet_v3_large\", \"mobilenet_v3_small\",",
"= True, **kwargs: Any ) -> MobileNetV3: weights = MobileNet_V3_Large_Weights.verify(weights)",
"progress: bool = True, **kwargs: Any ) -> MobileNetV3: weights",
"resize_size=232), meta={ **_COMMON_META, \"num_params\": 5483032, \"recipe\": \"https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning\", \"acc@1\": 75.274, \"acc@5\":",
"**kwargs: Any ) -> MobileNetV3: weights = MobileNet_V3_Large_Weights.verify(weights) inverted_residual_setting, last_channel",
"List[InvertedResidualConfig], last_channel: int, weights: Optional[WeightsEnum], progress: bool, **kwargs: Any, )",
"from typing import Any, Optional, List from torchvision.prototype.transforms import ImageNetEval",
"torchvision.prototype.transforms import ImageNetEval from torchvision.transforms.functional import InterpolationMode from ...models.mobilenetv3 import",
"from ._api import WeightsEnum, Weights from ._meta import _IMAGENET_CATEGORIES from",
"\"mobilenet_v3_small\", ] def _mobilenet_v3( inverted_residual_setting: List[InvertedResidualConfig], last_channel: int, weights: Optional[WeightsEnum],",
"progress: bool, **kwargs: Any, ) -> MobileNetV3: if weights is",
"-> MobileNetV3: if weights is not None: _ovewrite_named_param(kwargs, \"num_classes\", len(weights.meta[\"categories\"]))",
"model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs) if weights is not None:",
"\"acc@1\": 75.274, \"acc@5\": 92.566, }, ) default = ImageNet1K_V2 class",
"class MobileNet_V3_Small_Weights(WeightsEnum): ImageNet1K_V1 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth\", transforms=partial(ImageNetEval, crop_size=224), meta={ **_COMMON_META,",
"}, ) default = ImageNet1K_V1 @handle_legacy_interface(weights=(\"pretrained\", MobileNet_V3_Large_Weights.ImageNet1K_V1)) def mobilenet_v3_large( *,",
"if weights is not None: model.load_state_dict(weights.get_state_dict(progress=progress)) return model _COMMON_META =",
"InvertedResidualConfig from ._api import WeightsEnum, Weights from ._meta import _IMAGENET_CATEGORIES",
") default = ImageNet1K_V1 @handle_legacy_interface(weights=(\"pretrained\", MobileNet_V3_Large_Weights.ImageNet1K_V1)) def mobilenet_v3_large( *, weights:",
"ImageNet1K_V2 class MobileNet_V3_Small_Weights(WeightsEnum): ImageNet1K_V1 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth\", transforms=partial(ImageNetEval, crop_size=224), meta={",
"inverted_residual_setting, last_channel = _mobilenet_v3_conf(\"mobilenet_v3_small\", **kwargs) return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress,",
"**kwargs) if weights is not None: model.load_state_dict(weights.get_state_dict(progress=progress)) return model _COMMON_META",
"**_COMMON_META, \"num_params\": 5483032, \"recipe\": \"https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning\", \"acc@1\": 75.274, \"acc@5\": 92.566, },",
"__all__ = [ \"MobileNetV3\", \"MobileNet_V3_Large_Weights\", \"MobileNet_V3_Small_Weights\", \"mobilenet_v3_large\", \"mobilenet_v3_small\", ] def",
"= Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth\", transforms=partial(ImageNetEval, crop_size=224), meta={ **_COMMON_META, \"num_params\": 5483032, \"recipe\":",
"_mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs) @handle_legacy_interface(weights=(\"pretrained\", MobileNet_V3_Small_Weights.ImageNet1K_V1)) def mobilenet_v3_small( *,",
"from ._utils import handle_legacy_interface, _ovewrite_named_param __all__ = [ \"MobileNetV3\", \"MobileNet_V3_Large_Weights\",",
"\"acc@1\": 74.042, \"acc@5\": 91.340, }, ) ImageNet1K_V2 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_large-5c1a4163.pth\",",
"bool = True, **kwargs: Any ) -> MobileNetV3: weights =",
"MobileNet_V3_Large_Weights(WeightsEnum): ImageNet1K_V1 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth\", transforms=partial(ImageNetEval, crop_size=224), meta={ **_COMMON_META, \"num_params\":",
"_COMMON_META = { \"task\": \"image_classification\", \"architecture\": \"MobileNetV3\", \"publication_year\": 2019, \"size\":",
"not None: _ovewrite_named_param(kwargs, \"num_classes\", len(weights.meta[\"categories\"])) model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs)",
"weights = MobileNet_V3_Large_Weights.verify(weights) inverted_residual_setting, last_channel = _mobilenet_v3_conf(\"mobilenet_v3_large\", **kwargs) return _mobilenet_v3(inverted_residual_setting,",
"_ovewrite_named_param(kwargs, \"num_classes\", len(weights.meta[\"categories\"])) model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs) if weights",
"crop_size=224), meta={ **_COMMON_META, \"num_params\": 5483032, \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small\", \"acc@1\": 74.042, \"acc@5\":",
"weights: Optional[MobileNet_V3_Large_Weights] = None, progress: bool = True, **kwargs: Any",
"75.274, \"acc@5\": 92.566, }, ) default = ImageNet1K_V2 class MobileNet_V3_Small_Weights(WeightsEnum):",
"transforms=partial(ImageNetEval, crop_size=224, resize_size=232), meta={ **_COMMON_META, \"num_params\": 5483032, \"recipe\": \"https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning\", \"acc@1\":",
"not None: model.load_state_dict(weights.get_state_dict(progress=progress)) return model _COMMON_META = { \"task\": \"image_classification\",",
") -> MobileNetV3: weights = MobileNet_V3_Small_Weights.verify(weights) inverted_residual_setting, last_channel = _mobilenet_v3_conf(\"mobilenet_v3_small\",",
"from ._meta import _IMAGENET_CATEGORIES from ._utils import handle_legacy_interface, _ovewrite_named_param __all__",
"2542856, \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small\", \"acc@1\": 67.668, \"acc@5\": 87.402, }, ) default",
"weights = MobileNet_V3_Small_Weights.verify(weights) inverted_residual_setting, last_channel = _mobilenet_v3_conf(\"mobilenet_v3_small\", **kwargs) return _mobilenet_v3(inverted_residual_setting,",
"{ \"task\": \"image_classification\", \"architecture\": \"MobileNetV3\", \"publication_year\": 2019, \"size\": (224, 224),",
"\"publication_year\": 2019, \"size\": (224, 224), \"min_size\": (1, 1), \"categories\": _IMAGENET_CATEGORIES,",
"inverted_residual_setting, last_channel = _mobilenet_v3_conf(\"mobilenet_v3_large\", **kwargs) return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress,",
"5483032, \"recipe\": \"https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning\", \"acc@1\": 75.274, \"acc@5\": 92.566, }, ) default",
"torchvision.transforms.functional import InterpolationMode from ...models.mobilenetv3 import MobileNetV3, _mobilenet_v3_conf, InvertedResidualConfig from",
"crop_size=224, resize_size=232), meta={ **_COMMON_META, \"num_params\": 5483032, \"recipe\": \"https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning\", \"acc@1\": 75.274,",
"Optional[MobileNet_V3_Small_Weights] = None, progress: bool = True, **kwargs: Any )",
"92.566, }, ) default = ImageNet1K_V2 class MobileNet_V3_Small_Weights(WeightsEnum): ImageNet1K_V1 =",
"**kwargs: Any, ) -> MobileNetV3: if weights is not None:",
"\"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small\", \"acc@1\": 74.042, \"acc@5\": 91.340, }, ) ImageNet1K_V2 = Weights(",
"\"architecture\": \"MobileNetV3\", \"publication_year\": 2019, \"size\": (224, 224), \"min_size\": (1, 1),",
"\"min_size\": (1, 1), \"categories\": _IMAGENET_CATEGORIES, \"interpolation\": InterpolationMode.BILINEAR, } class MobileNet_V3_Large_Weights(WeightsEnum):",
"meta={ **_COMMON_META, \"num_params\": 2542856, \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small\", \"acc@1\": 67.668, \"acc@5\": 87.402,",
"Weights from ._meta import _IMAGENET_CATEGORIES from ._utils import handle_legacy_interface, _ovewrite_named_param",
"return model _COMMON_META = { \"task\": \"image_classification\", \"architecture\": \"MobileNetV3\", \"publication_year\":",
"MobileNet_V3_Large_Weights.verify(weights) inverted_residual_setting, last_channel = _mobilenet_v3_conf(\"mobilenet_v3_large\", **kwargs) return _mobilenet_v3(inverted_residual_setting, last_channel, weights,",
"= [ \"MobileNetV3\", \"MobileNet_V3_Large_Weights\", \"MobileNet_V3_Small_Weights\", \"mobilenet_v3_large\", \"mobilenet_v3_small\", ] def _mobilenet_v3(",
"1), \"categories\": _IMAGENET_CATEGORIES, \"interpolation\": InterpolationMode.BILINEAR, } class MobileNet_V3_Large_Weights(WeightsEnum): ImageNet1K_V1 =",
"_mobilenet_v3( inverted_residual_setting: List[InvertedResidualConfig], last_channel: int, weights: Optional[WeightsEnum], progress: bool, **kwargs:",
"ImageNet1K_V2 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_large-5c1a4163.pth\", transforms=partial(ImageNetEval, crop_size=224, resize_size=232), meta={ **_COMMON_META, \"num_params\":",
"MobileNetV3, _mobilenet_v3_conf, InvertedResidualConfig from ._api import WeightsEnum, Weights from ._meta",
"74.042, \"acc@5\": 91.340, }, ) ImageNet1K_V2 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_large-5c1a4163.pth\", transforms=partial(ImageNetEval,",
"91.340, }, ) ImageNet1K_V2 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_large-5c1a4163.pth\", transforms=partial(ImageNetEval, crop_size=224, resize_size=232),",
"def mobilenet_v3_large( *, weights: Optional[MobileNet_V3_Large_Weights] = None, progress: bool =",
"int, weights: Optional[WeightsEnum], progress: bool, **kwargs: Any, ) -> MobileNetV3:",
"\"https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning\", \"acc@1\": 75.274, \"acc@5\": 92.566, }, ) default = ImageNet1K_V2",
"\"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small\", \"acc@1\": 74.042, \"acc@5\": 91.340, }, ) ImageNet1K_V2 =",
"} class MobileNet_V3_Large_Weights(WeightsEnum): ImageNet1K_V1 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth\", transforms=partial(ImageNetEval, crop_size=224), meta={",
"= MobileNet_V3_Large_Weights.verify(weights) inverted_residual_setting, last_channel = _mobilenet_v3_conf(\"mobilenet_v3_large\", **kwargs) return _mobilenet_v3(inverted_residual_setting, last_channel,",
"weights, progress, **kwargs) @handle_legacy_interface(weights=(\"pretrained\", MobileNet_V3_Small_Weights.ImageNet1K_V1)) def mobilenet_v3_small( *, weights: Optional[MobileNet_V3_Small_Weights]",
"last_channel = _mobilenet_v3_conf(\"mobilenet_v3_large\", **kwargs) return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs)",
"\"acc@5\": 91.340, }, ) ImageNet1K_V2 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_large-5c1a4163.pth\", transforms=partial(ImageNetEval, crop_size=224,",
"last_channel, weights, progress, **kwargs) @handle_legacy_interface(weights=(\"pretrained\", MobileNet_V3_Small_Weights.ImageNet1K_V1)) def mobilenet_v3_small( *, weights:",
"meta={ **_COMMON_META, \"num_params\": 5483032, \"recipe\": \"https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning\", \"acc@1\": 75.274, \"acc@5\": 92.566,",
"\"num_params\": 5483032, \"recipe\": \"https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning\", \"acc@1\": 75.274, \"acc@5\": 92.566, }, )",
"url=\"https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth\", transforms=partial(ImageNetEval, crop_size=224), meta={ **_COMMON_META, \"num_params\": 5483032, \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small\", \"acc@1\":",
"}, ) default = ImageNet1K_V2 class MobileNet_V3_Small_Weights(WeightsEnum): ImageNet1K_V1 = Weights(",
"\"acc@1\": 67.668, \"acc@5\": 87.402, }, ) default = ImageNet1K_V1 @handle_legacy_interface(weights=(\"pretrained\",",
"...models.mobilenetv3 import MobileNetV3, _mobilenet_v3_conf, InvertedResidualConfig from ._api import WeightsEnum, Weights",
"True, **kwargs: Any ) -> MobileNetV3: weights = MobileNet_V3_Large_Weights.verify(weights) inverted_residual_setting,",
"[ \"MobileNetV3\", \"MobileNet_V3_Large_Weights\", \"MobileNet_V3_Small_Weights\", \"mobilenet_v3_large\", \"mobilenet_v3_small\", ] def _mobilenet_v3( inverted_residual_setting:",
"weights is not None: _ovewrite_named_param(kwargs, \"num_classes\", len(weights.meta[\"categories\"])) model = MobileNetV3(inverted_residual_setting,",
"functools import partial from typing import Any, Optional, List from",
"87.402, }, ) default = ImageNet1K_V1 @handle_legacy_interface(weights=(\"pretrained\", MobileNet_V3_Large_Weights.ImageNet1K_V1)) def mobilenet_v3_large(",
"True, **kwargs: Any ) -> MobileNetV3: weights = MobileNet_V3_Small_Weights.verify(weights) inverted_residual_setting,",
"WeightsEnum, Weights from ._meta import _IMAGENET_CATEGORIES from ._utils import handle_legacy_interface,",
"None, progress: bool = True, **kwargs: Any ) -> MobileNetV3:",
"**kwargs: Any ) -> MobileNetV3: weights = MobileNet_V3_Small_Weights.verify(weights) inverted_residual_setting, last_channel",
"Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth\", transforms=partial(ImageNetEval, crop_size=224), meta={ **_COMMON_META, \"num_params\": 5483032, \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small\",",
"last_channel = _mobilenet_v3_conf(\"mobilenet_v3_small\", **kwargs) return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs)",
"\"task\": \"image_classification\", \"architecture\": \"MobileNetV3\", \"publication_year\": 2019, \"size\": (224, 224), \"min_size\":",
"\"mobilenet_v3_large\", \"mobilenet_v3_small\", ] def _mobilenet_v3( inverted_residual_setting: List[InvertedResidualConfig], last_channel: int, weights:",
"Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth\", transforms=partial(ImageNetEval, crop_size=224), meta={ **_COMMON_META, \"num_params\": 2542856, \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small\",",
"Optional[WeightsEnum], progress: bool, **kwargs: Any, ) -> MobileNetV3: if weights",
"len(weights.meta[\"categories\"])) model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs) if weights is not",
"] def _mobilenet_v3( inverted_residual_setting: List[InvertedResidualConfig], last_channel: int, weights: Optional[WeightsEnum], progress:",
"ImageNet1K_V1 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth\", transforms=partial(ImageNetEval, crop_size=224), meta={ **_COMMON_META, \"num_params\": 5483032,",
"meta={ **_COMMON_META, \"num_params\": 5483032, \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small\", \"acc@1\": 74.042, \"acc@5\": 91.340,",
"= ImageNet1K_V1 @handle_legacy_interface(weights=(\"pretrained\", MobileNet_V3_Large_Weights.ImageNet1K_V1)) def mobilenet_v3_large( *, weights: Optional[MobileNet_V3_Large_Weights] =",
"is not None: _ovewrite_named_param(kwargs, \"num_classes\", len(weights.meta[\"categories\"])) model = MobileNetV3(inverted_residual_setting, last_channel,",
"def _mobilenet_v3( inverted_residual_setting: List[InvertedResidualConfig], last_channel: int, weights: Optional[WeightsEnum], progress: bool,",
"\"MobileNetV3\", \"publication_year\": 2019, \"size\": (224, 224), \"min_size\": (1, 1), \"categories\":",
"_mobilenet_v3_conf, InvertedResidualConfig from ._api import WeightsEnum, Weights from ._meta import",
"@handle_legacy_interface(weights=(\"pretrained\", MobileNet_V3_Large_Weights.ImageNet1K_V1)) def mobilenet_v3_large( *, weights: Optional[MobileNet_V3_Large_Weights] = None, progress:",
"= True, **kwargs: Any ) -> MobileNetV3: weights = MobileNet_V3_Small_Weights.verify(weights)",
"from ...models.mobilenetv3 import MobileNetV3, _mobilenet_v3_conf, InvertedResidualConfig from ._api import WeightsEnum,",
"weights: Optional[MobileNet_V3_Small_Weights] = None, progress: bool = True, **kwargs: Any",
"= Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth\", transforms=partial(ImageNetEval, crop_size=224), meta={ **_COMMON_META, \"num_params\": 2542856, \"recipe\":",
"= ImageNet1K_V2 class MobileNet_V3_Small_Weights(WeightsEnum): ImageNet1K_V1 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth\", transforms=partial(ImageNetEval, crop_size=224),",
"\"MobileNet_V3_Large_Weights\", \"MobileNet_V3_Small_Weights\", \"mobilenet_v3_large\", \"mobilenet_v3_small\", ] def _mobilenet_v3( inverted_residual_setting: List[InvertedResidualConfig], last_channel:",
"MobileNetV3(inverted_residual_setting, last_channel, **kwargs) if weights is not None: model.load_state_dict(weights.get_state_dict(progress=progress)) return",
"List from torchvision.prototype.transforms import ImageNetEval from torchvision.transforms.functional import InterpolationMode from",
"224), \"min_size\": (1, 1), \"categories\": _IMAGENET_CATEGORIES, \"interpolation\": InterpolationMode.BILINEAR, } class",
"ImageNet1K_V1 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth\", transforms=partial(ImageNetEval, crop_size=224), meta={ **_COMMON_META, \"num_params\": 2542856,",
"bool, **kwargs: Any, ) -> MobileNetV3: if weights is not",
"last_channel: int, weights: Optional[WeightsEnum], progress: bool, **kwargs: Any, ) ->",
"_IMAGENET_CATEGORIES from ._utils import handle_legacy_interface, _ovewrite_named_param __all__ = [ \"MobileNetV3\",",
"_IMAGENET_CATEGORIES, \"interpolation\": InterpolationMode.BILINEAR, } class MobileNet_V3_Large_Weights(WeightsEnum): ImageNet1K_V1 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth\",",
"*, weights: Optional[MobileNet_V3_Small_Weights] = None, progress: bool = True, **kwargs:",
"Any ) -> MobileNetV3: weights = MobileNet_V3_Large_Weights.verify(weights) inverted_residual_setting, last_channel =",
"from functools import partial from typing import Any, Optional, List",
"typing import Any, Optional, List from torchvision.prototype.transforms import ImageNetEval from",
"mobilenet_v3_large( *, weights: Optional[MobileNet_V3_Large_Weights] = None, progress: bool = True,",
"\"image_classification\", \"architecture\": \"MobileNetV3\", \"publication_year\": 2019, \"size\": (224, 224), \"min_size\": (1,",
"import handle_legacy_interface, _ovewrite_named_param __all__ = [ \"MobileNetV3\", \"MobileNet_V3_Large_Weights\", \"MobileNet_V3_Small_Weights\", \"mobilenet_v3_large\",",
"def mobilenet_v3_small( *, weights: Optional[MobileNet_V3_Small_Weights] = None, progress: bool =",
"import MobileNetV3, _mobilenet_v3_conf, InvertedResidualConfig from ._api import WeightsEnum, Weights from",
"MobileNetV3: if weights is not None: _ovewrite_named_param(kwargs, \"num_classes\", len(weights.meta[\"categories\"])) model",
"class MobileNet_V3_Large_Weights(WeightsEnum): ImageNet1K_V1 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth\", transforms=partial(ImageNetEval, crop_size=224), meta={ **_COMMON_META,",
"transforms=partial(ImageNetEval, crop_size=224), meta={ **_COMMON_META, \"num_params\": 5483032, \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small\", \"acc@1\": 74.042,",
"crop_size=224), meta={ **_COMMON_META, \"num_params\": 2542856, \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small\", \"acc@1\": 67.668, \"acc@5\":",
"InterpolationMode from ...models.mobilenetv3 import MobileNetV3, _mobilenet_v3_conf, InvertedResidualConfig from ._api import",
"default = ImageNet1K_V2 class MobileNet_V3_Small_Weights(WeightsEnum): ImageNet1K_V1 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth\", transforms=partial(ImageNetEval,",
"._meta import _IMAGENET_CATEGORIES from ._utils import handle_legacy_interface, _ovewrite_named_param __all__ =",
"from torchvision.prototype.transforms import ImageNetEval from torchvision.transforms.functional import InterpolationMode from ...models.mobilenetv3",
") -> MobileNetV3: weights = MobileNet_V3_Large_Weights.verify(weights) inverted_residual_setting, last_channel = _mobilenet_v3_conf(\"mobilenet_v3_large\",",
"5483032, \"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small\", \"acc@1\": 74.042, \"acc@5\": 91.340, }, ) ImageNet1K_V2",
"= _mobilenet_v3_conf(\"mobilenet_v3_large\", **kwargs) return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs) @handle_legacy_interface(weights=(\"pretrained\",",
"_ovewrite_named_param __all__ = [ \"MobileNetV3\", \"MobileNet_V3_Large_Weights\", \"MobileNet_V3_Small_Weights\", \"mobilenet_v3_large\", \"mobilenet_v3_small\", ]",
"\"recipe\": \"https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small\", \"acc@1\": 67.668, \"acc@5\": 87.402, }, ) default =",
"MobileNet_V3_Small_Weights.verify(weights) inverted_residual_setting, last_channel = _mobilenet_v3_conf(\"mobilenet_v3_small\", **kwargs) return _mobilenet_v3(inverted_residual_setting, last_channel, weights,",
"= { \"task\": \"image_classification\", \"architecture\": \"MobileNetV3\", \"publication_year\": 2019, \"size\": (224,",
"@handle_legacy_interface(weights=(\"pretrained\", MobileNet_V3_Small_Weights.ImageNet1K_V1)) def mobilenet_v3_small( *, weights: Optional[MobileNet_V3_Small_Weights] = None, progress:",
"is not None: model.load_state_dict(weights.get_state_dict(progress=progress)) return model _COMMON_META = { \"task\":",
") ImageNet1K_V2 = Weights( url=\"https://download.pytorch.org/models/mobilenet_v3_large-5c1a4163.pth\", transforms=partial(ImageNetEval, crop_size=224, resize_size=232), meta={ **_COMMON_META,",
"import Any, Optional, List from torchvision.prototype.transforms import ImageNetEval from torchvision.transforms.functional",
"import partial from typing import Any, Optional, List from torchvision.prototype.transforms",
"url=\"https://download.pytorch.org/models/mobilenet_v3_large-5c1a4163.pth\", transforms=partial(ImageNetEval, crop_size=224, resize_size=232), meta={ **_COMMON_META, \"num_params\": 5483032, \"recipe\": \"https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning\",",
"\"size\": (224, 224), \"min_size\": (1, 1), \"categories\": _IMAGENET_CATEGORIES, \"interpolation\": InterpolationMode.BILINEAR,"
] |
[
"this view to handle it in # your API client",
"This url is used by django-allauth and empty TemplateView is",
"django-allauth and empty TemplateView is # defined just to allow",
"email # with verification link is being sent, then it's",
"to allow reverse() call inside app, for example when email",
"use ConfirmEmailView # view from: # django-allauth https://github.com/pennersr/django-allauth/blob/master/allauth/account/views.py re_path(r'^account-confirm-email/(?P<key>[-:\\w]+)/$', TemplateView.as_view(),",
"django.views.generic import TemplateView from .views import RegisterView, VerifyEmailView urlpatterns =",
"email # content. # account_confirm_email - You should override this",
"allow reverse() call inside app, for example when email #",
"with proper key. # If you don't want to use",
"proper key. # If you don't want to use API",
"# view from: # django-allauth https://github.com/pennersr/django-allauth/blob/master/allauth/account/views.py re_path(r'^account-confirm-email/(?P<key>[-:\\w]+)/$', TemplateView.as_view(), name='account_confirm_email'), ]",
"that step, then just use ConfirmEmailView # view from: #",
"used by django-allauth and empty TemplateView is # defined just",
"for example when email # with verification link is being",
"VerifyEmailView.as_view(), name='rest_verify_email'), # This url is used by django-allauth and",
"verification link is being sent, then it's required to render",
"# with verification link is being sent, then it's required",
"reverse() call inside app, for example when email # with",
"url is used by django-allauth and empty TemplateView is #",
"You should override this view to handle it in #",
"link is being sent, then it's required to render email",
"# with proper key. # If you don't want to",
"TemplateView from .views import RegisterView, VerifyEmailView urlpatterns = [ re_path(r'^$',",
"API client somehow and then, send post to /verify-email/ endpoint",
"send post to /verify-email/ endpoint # with proper key. #",
"= [ re_path(r'^$', RegisterView.as_view(), name='rest_register'), re_path(r'^verify-email/$', VerifyEmailView.as_view(), name='rest_verify_email'), # This",
"content. # account_confirm_email - You should override this view to",
"in # your API client somehow and then, send post",
"VerifyEmailView urlpatterns = [ re_path(r'^$', RegisterView.as_view(), name='rest_register'), re_path(r'^verify-email/$', VerifyEmailView.as_view(), name='rest_verify_email'),",
"sent, then it's required to render email # content. #",
"TemplateView is # defined just to allow reverse() call inside",
"being sent, then it's required to render email # content.",
"re_path(r'^$', RegisterView.as_view(), name='rest_register'), re_path(r'^verify-email/$', VerifyEmailView.as_view(), name='rest_verify_email'), # This url is",
"# your API client somehow and then, send post to",
"app, for example when email # with verification link is",
"# This url is used by django-allauth and empty TemplateView",
"django.urls import re_path from django.views.generic import TemplateView from .views import",
"# If you don't want to use API on that",
"don't want to use API on that step, then just",
"example when email # with verification link is being sent,",
"then, send post to /verify-email/ endpoint # with proper key.",
"re_path(r'^verify-email/$', VerifyEmailView.as_view(), name='rest_verify_email'), # This url is used by django-allauth",
"from .views import RegisterView, VerifyEmailView urlpatterns = [ re_path(r'^$', RegisterView.as_view(),",
"with verification link is being sent, then it's required to",
"and then, send post to /verify-email/ endpoint # with proper",
"use API on that step, then just use ConfirmEmailView #",
"just use ConfirmEmailView # view from: # django-allauth https://github.com/pennersr/django-allauth/blob/master/allauth/account/views.py re_path(r'^account-confirm-email/(?P<key>[-:\\w]+)/$',",
"import TemplateView from .views import RegisterView, VerifyEmailView urlpatterns = [",
"empty TemplateView is # defined just to allow reverse() call",
"account_confirm_email - You should override this view to handle it",
"from django.urls import re_path from django.views.generic import TemplateView from .views",
"it's required to render email # content. # account_confirm_email -",
"to handle it in # your API client somehow and",
"call inside app, for example when email # with verification",
"from django.views.generic import TemplateView from .views import RegisterView, VerifyEmailView urlpatterns",
"re_path from django.views.generic import TemplateView from .views import RegisterView, VerifyEmailView",
"[ re_path(r'^$', RegisterView.as_view(), name='rest_register'), re_path(r'^verify-email/$', VerifyEmailView.as_view(), name='rest_verify_email'), # This url",
"RegisterView.as_view(), name='rest_register'), re_path(r'^verify-email/$', VerifyEmailView.as_view(), name='rest_verify_email'), # This url is used",
"import re_path from django.views.generic import TemplateView from .views import RegisterView,",
"then just use ConfirmEmailView # view from: # django-allauth https://github.com/pennersr/django-allauth/blob/master/allauth/account/views.py",
"defined just to allow reverse() call inside app, for example",
"/verify-email/ endpoint # with proper key. # If you don't",
"import RegisterView, VerifyEmailView urlpatterns = [ re_path(r'^$', RegisterView.as_view(), name='rest_register'), re_path(r'^verify-email/$',",
"your API client somehow and then, send post to /verify-email/",
"and empty TemplateView is # defined just to allow reverse()",
"to use API on that step, then just use ConfirmEmailView",
"is # defined just to allow reverse() call inside app,",
"then it's required to render email # content. # account_confirm_email",
"endpoint # with proper key. # If you don't want",
"# account_confirm_email - You should override this view to handle",
"want to use API on that step, then just use",
"# defined just to allow reverse() call inside app, for",
"render email # content. # account_confirm_email - You should override",
"post to /verify-email/ endpoint # with proper key. # If",
"should override this view to handle it in # your",
"If you don't want to use API on that step,",
"it in # your API client somehow and then, send",
"# content. # account_confirm_email - You should override this view",
"view to handle it in # your API client somehow",
"override this view to handle it in # your API",
"when email # with verification link is being sent, then",
"is used by django-allauth and empty TemplateView is # defined",
"by django-allauth and empty TemplateView is # defined just to",
"just to allow reverse() call inside app, for example when",
"API on that step, then just use ConfirmEmailView # view",
"- You should override this view to handle it in",
"name='rest_verify_email'), # This url is used by django-allauth and empty",
"inside app, for example when email # with verification link",
"somehow and then, send post to /verify-email/ endpoint # with",
"RegisterView, VerifyEmailView urlpatterns = [ re_path(r'^$', RegisterView.as_view(), name='rest_register'), re_path(r'^verify-email/$', VerifyEmailView.as_view(),",
"on that step, then just use ConfirmEmailView # view from:",
"step, then just use ConfirmEmailView # view from: # django-allauth",
"required to render email # content. # account_confirm_email - You",
"you don't want to use API on that step, then",
"handle it in # your API client somehow and then,",
"to /verify-email/ endpoint # with proper key. # If you",
"ConfirmEmailView # view from: # django-allauth https://github.com/pennersr/django-allauth/blob/master/allauth/account/views.py re_path(r'^account-confirm-email/(?P<key>[-:\\w]+)/$', TemplateView.as_view(), name='account_confirm_email'),",
"to render email # content. # account_confirm_email - You should",
".views import RegisterView, VerifyEmailView urlpatterns = [ re_path(r'^$', RegisterView.as_view(), name='rest_register'),",
"urlpatterns = [ re_path(r'^$', RegisterView.as_view(), name='rest_register'), re_path(r'^verify-email/$', VerifyEmailView.as_view(), name='rest_verify_email'), #",
"name='rest_register'), re_path(r'^verify-email/$', VerifyEmailView.as_view(), name='rest_verify_email'), # This url is used by",
"client somehow and then, send post to /verify-email/ endpoint #",
"key. # If you don't want to use API on",
"is being sent, then it's required to render email #"
] |
[
"f = open('cvpr2019.bib', 'w') print(soup.title) bibtexs = soup.find_all(\"div\", attrs={\"class\": \"bibref\"})",
"def test_animal_can_download(self): #print(get_html(self.url)) f = open(self.url) soup = parse_html(f.read()) f.close()",
"self.event is None: self.event = Event(shortname='CVPR2019') self.event.save() def test_animal_can_download(self): #print(get_html(self.url))",
"import TestCase # Create your tests here. from crawler.download import",
"self.url = \"/Users/tuannguyenanh/Desktop/cvpr2019.html\"#\"http://openaccess.thecvf.com/CVPR2019.py\" self.root = \"http://openaccess.thecvf.com/\" self.event = Event.objects.filter(shortname='CVPR2019').first() if",
"here. from crawler.download import * from crawler.models import * class",
"= [\"CVPR 2019\", \"Computer Vision Foundation.\"] self.url = \"/Users/tuannguyenanh/Desktop/cvpr2019.html\"#\"http://openaccess.thecvf.com/CVPR2019.py\" self.root",
"2019\", \"Computer Vision Foundation.\"] self.url = \"/Users/tuannguyenanh/Desktop/cvpr2019.html\"#\"http://openaccess.thecvf.com/CVPR2019.py\" self.root = \"http://openaccess.thecvf.com/\"",
"self.root = \"http://openaccess.thecvf.com/\" self.event = Event.objects.filter(shortname='CVPR2019').first() if self.event is None:",
"self.event = Event(shortname='CVPR2019') self.event.save() def test_animal_can_download(self): #print(get_html(self.url)) f = open(self.url)",
"#print(get_html(self.url)) f = open(self.url) soup = parse_html(f.read()) f.close() f =",
"from django.test import TestCase # Create your tests here. from",
"Event(shortname='CVPR2019') self.event.save() def test_animal_can_download(self): #print(get_html(self.url)) f = open(self.url) soup =",
"soup = parse_html(f.read()) f.close() f = open('cvpr2019.bib', 'w') print(soup.title) bibtexs",
"f.close() f = open('cvpr2019.bib', 'w') print(soup.title) bibtexs = soup.find_all(\"div\", attrs={\"class\":",
"= open('cvpr2019.bib', 'w') print(soup.title) bibtexs = soup.find_all(\"div\", attrs={\"class\": \"bibref\"}) #print(bibtexs)",
"open('cvpr2019.bib', 'w') print(soup.title) bibtexs = soup.find_all(\"div\", attrs={\"class\": \"bibref\"}) #print(bibtexs) for",
"# Create your tests here. from crawler.download import * from",
"self.stopWords = [\"CVPR 2019\", \"Computer Vision Foundation.\"] self.url = \"/Users/tuannguyenanh/Desktop/cvpr2019.html\"#\"http://openaccess.thecvf.com/CVPR2019.py\"",
"= \"/Users/tuannguyenanh/Desktop/cvpr2019.html\"#\"http://openaccess.thecvf.com/CVPR2019.py\" self.root = \"http://openaccess.thecvf.com/\" self.event = Event.objects.filter(shortname='CVPR2019').first() if self.event",
"django.test import TestCase # Create your tests here. from crawler.download",
"crawler.models import * class AnimalDownloadTestCase(TestCase): def setUp(self): self.stopWords = [\"CVPR",
"open(self.url) soup = parse_html(f.read()) f.close() f = open('cvpr2019.bib', 'w') print(soup.title)",
"is None: self.event = Event(shortname='CVPR2019') self.event.save() def test_animal_can_download(self): #print(get_html(self.url)) f",
"self.event.save() def test_animal_can_download(self): #print(get_html(self.url)) f = open(self.url) soup = parse_html(f.read())",
"crawler.download import * from crawler.models import * class AnimalDownloadTestCase(TestCase): def",
"your tests here. from crawler.download import * from crawler.models import",
"parse_html(f.read()) f.close() f = open('cvpr2019.bib', 'w') print(soup.title) bibtexs = soup.find_all(\"div\",",
"* from crawler.models import * class AnimalDownloadTestCase(TestCase): def setUp(self): self.stopWords",
"\"http://openaccess.thecvf.com/\" self.event = Event.objects.filter(shortname='CVPR2019').first() if self.event is None: self.event =",
"= open(self.url) soup = parse_html(f.read()) f.close() f = open('cvpr2019.bib', 'w')",
"setUp(self): self.stopWords = [\"CVPR 2019\", \"Computer Vision Foundation.\"] self.url =",
"if self.event is None: self.event = Event(shortname='CVPR2019') self.event.save() def test_animal_can_download(self):",
"Vision Foundation.\"] self.url = \"/Users/tuannguyenanh/Desktop/cvpr2019.html\"#\"http://openaccess.thecvf.com/CVPR2019.py\" self.root = \"http://openaccess.thecvf.com/\" self.event =",
"import * from crawler.models import * class AnimalDownloadTestCase(TestCase): def setUp(self):",
"= soup.find_all(\"div\", attrs={\"class\": \"bibref\"}) #print(bibtexs) for bib in bibtexs: print(bib.text)",
"\"bibref\"}) #print(bibtexs) for bib in bibtexs: print(bib.text) f.write(bib.text.replace('<br>', '\\n')) f.close()",
"None: self.event = Event(shortname='CVPR2019') self.event.save() def test_animal_can_download(self): #print(get_html(self.url)) f =",
"print(soup.title) bibtexs = soup.find_all(\"div\", attrs={\"class\": \"bibref\"}) #print(bibtexs) for bib in",
"[\"CVPR 2019\", \"Computer Vision Foundation.\"] self.url = \"/Users/tuannguyenanh/Desktop/cvpr2019.html\"#\"http://openaccess.thecvf.com/CVPR2019.py\" self.root =",
"soup.find_all(\"div\", attrs={\"class\": \"bibref\"}) #print(bibtexs) for bib in bibtexs: print(bib.text) f.write(bib.text.replace('<br>',",
"AnimalDownloadTestCase(TestCase): def setUp(self): self.stopWords = [\"CVPR 2019\", \"Computer Vision Foundation.\"]",
"from crawler.models import * class AnimalDownloadTestCase(TestCase): def setUp(self): self.stopWords =",
"\"/Users/tuannguyenanh/Desktop/cvpr2019.html\"#\"http://openaccess.thecvf.com/CVPR2019.py\" self.root = \"http://openaccess.thecvf.com/\" self.event = Event.objects.filter(shortname='CVPR2019').first() if self.event is",
"self.event = Event.objects.filter(shortname='CVPR2019').first() if self.event is None: self.event = Event(shortname='CVPR2019')",
"'w') print(soup.title) bibtexs = soup.find_all(\"div\", attrs={\"class\": \"bibref\"}) #print(bibtexs) for bib",
"import * class AnimalDownloadTestCase(TestCase): def setUp(self): self.stopWords = [\"CVPR 2019\",",
"tests here. from crawler.download import * from crawler.models import *",
"attrs={\"class\": \"bibref\"}) #print(bibtexs) for bib in bibtexs: print(bib.text) f.write(bib.text.replace('<br>', '\\n'))",
"TestCase # Create your tests here. from crawler.download import *",
"= \"http://openaccess.thecvf.com/\" self.event = Event.objects.filter(shortname='CVPR2019').first() if self.event is None: self.event",
"Event.objects.filter(shortname='CVPR2019').first() if self.event is None: self.event = Event(shortname='CVPR2019') self.event.save() def",
"= Event.objects.filter(shortname='CVPR2019').first() if self.event is None: self.event = Event(shortname='CVPR2019') self.event.save()",
"= parse_html(f.read()) f.close() f = open('cvpr2019.bib', 'w') print(soup.title) bibtexs =",
"\"Computer Vision Foundation.\"] self.url = \"/Users/tuannguyenanh/Desktop/cvpr2019.html\"#\"http://openaccess.thecvf.com/CVPR2019.py\" self.root = \"http://openaccess.thecvf.com/\" self.event",
"Foundation.\"] self.url = \"/Users/tuannguyenanh/Desktop/cvpr2019.html\"#\"http://openaccess.thecvf.com/CVPR2019.py\" self.root = \"http://openaccess.thecvf.com/\" self.event = Event.objects.filter(shortname='CVPR2019').first()",
"def setUp(self): self.stopWords = [\"CVPR 2019\", \"Computer Vision Foundation.\"] self.url",
"Create your tests here. from crawler.download import * from crawler.models",
"= Event(shortname='CVPR2019') self.event.save() def test_animal_can_download(self): #print(get_html(self.url)) f = open(self.url) soup",
"test_animal_can_download(self): #print(get_html(self.url)) f = open(self.url) soup = parse_html(f.read()) f.close() f",
"* class AnimalDownloadTestCase(TestCase): def setUp(self): self.stopWords = [\"CVPR 2019\", \"Computer",
"f = open(self.url) soup = parse_html(f.read()) f.close() f = open('cvpr2019.bib',",
"class AnimalDownloadTestCase(TestCase): def setUp(self): self.stopWords = [\"CVPR 2019\", \"Computer Vision",
"from crawler.download import * from crawler.models import * class AnimalDownloadTestCase(TestCase):",
"bibtexs = soup.find_all(\"div\", attrs={\"class\": \"bibref\"}) #print(bibtexs) for bib in bibtexs:"
] |
[
"from xml.etree.ElementTree import parse # Download the RSS feed and",
"it u = urlopen('http://planet.python.org/rss20.xml') doc = parse(u) # Extract and",
"doc.iterfind('channel/item'): title = item.findtext('title') date = item.findtext('pubDate') link = item.findtext('link')",
"feed and parse it u = urlopen('http://planet.python.org/rss20.xml') doc = parse(u)",
"item.findtext('pubDate') link = item.findtext('link') print(title) print(date) print(link) print() print(\"Program executed.\")",
"from urllib.request import urlopen from xml.etree.ElementTree import parse # Download",
"parse(u) # Extract and output tags of interest for item",
"import parse # Download the RSS feed and parse it",
"# Extract and output tags of interest for item in",
"urllib.request import urlopen from xml.etree.ElementTree import parse # Download the",
"parse # Download the RSS feed and parse it u",
"date = item.findtext('pubDate') link = item.findtext('link') print(title) print(date) print(link) print()",
"and output tags of interest for item in doc.iterfind('channel/item'): title",
"= urlopen('http://planet.python.org/rss20.xml') doc = parse(u) # Extract and output tags",
"= parse(u) # Extract and output tags of interest for",
"of interest for item in doc.iterfind('channel/item'): title = item.findtext('title') date",
"doc = parse(u) # Extract and output tags of interest",
"urlopen('http://planet.python.org/rss20.xml') doc = parse(u) # Extract and output tags of",
"# Download the RSS feed and parse it u =",
"<gh_stars>1-10 #!/usr/bin/python3 from urllib.request import urlopen from xml.etree.ElementTree import parse",
"item in doc.iterfind('channel/item'): title = item.findtext('title') date = item.findtext('pubDate') link",
"in doc.iterfind('channel/item'): title = item.findtext('title') date = item.findtext('pubDate') link =",
"the RSS feed and parse it u = urlopen('http://planet.python.org/rss20.xml') doc",
"item.findtext('title') date = item.findtext('pubDate') link = item.findtext('link') print(title) print(date) print(link)",
"import urlopen from xml.etree.ElementTree import parse # Download the RSS",
"#!/usr/bin/python3 from urllib.request import urlopen from xml.etree.ElementTree import parse #",
"Extract and output tags of interest for item in doc.iterfind('channel/item'):",
"= item.findtext('pubDate') link = item.findtext('link') print(title) print(date) print(link) print() print(\"Program",
"for item in doc.iterfind('channel/item'): title = item.findtext('title') date = item.findtext('pubDate')",
"parse it u = urlopen('http://planet.python.org/rss20.xml') doc = parse(u) # Extract",
"= item.findtext('title') date = item.findtext('pubDate') link = item.findtext('link') print(title) print(date)",
"xml.etree.ElementTree import parse # Download the RSS feed and parse",
"Download the RSS feed and parse it u = urlopen('http://planet.python.org/rss20.xml')",
"tags of interest for item in doc.iterfind('channel/item'): title = item.findtext('title')",
"urlopen from xml.etree.ElementTree import parse # Download the RSS feed",
"output tags of interest for item in doc.iterfind('channel/item'): title =",
"and parse it u = urlopen('http://planet.python.org/rss20.xml') doc = parse(u) #",
"RSS feed and parse it u = urlopen('http://planet.python.org/rss20.xml') doc =",
"interest for item in doc.iterfind('channel/item'): title = item.findtext('title') date =",
"u = urlopen('http://planet.python.org/rss20.xml') doc = parse(u) # Extract and output",
"title = item.findtext('title') date = item.findtext('pubDate') link = item.findtext('link') print(title)"
] |
[
"name='lead_index'), path('contacts/', views.contacts, name='contacts'), path('leads/', views.leads, name='leads'), path('table/', views.table, name='table'),",
"Add a URL to urlpatterns: path('blog/', include('blog.urls')) \"\"\" from django.contrib",
"2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) \"\"\" from",
"name='table'), path('plotly/', views.plotly, name='plotly'), # url(r'^keys', views.upload, name='keys'), # path('key-gen/',",
"import include, path 2. Add a URL to urlpatterns: path('blog/',",
"path('', Home.as_view(), name='home') Including another URLconf 1. Import the include()",
"2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including",
"include() function: from django.urls import include, path 2. Add a",
"views.key_gen, name='key-gen'), # path('heroku/', generic.ListView.as_view(model=models.Contact), name='heroku'), # path('run/', views.run, name='run'),",
"routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.0/topics/http/urls/",
"Add an import: from my_app import views 2. Add a",
"URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1.",
"Add a URL to urlpatterns: path('', views.home, name='home') Class-based views",
"to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import",
"url(r'^keys', views.upload, name='keys'), # path('key-gen/', views.key_gen, name='key-gen'), # path('heroku/', generic.ListView.as_view(model=models.Contact),",
"1. Add an import: from other_app.views import Home 2. Add",
"views.table, name='table'), path('plotly/', views.plotly, name='plotly'), # url(r'^keys', views.upload, name='keys'), #",
"an import: from other_app.views import Home 2. Add a URL",
"views. For more information please see: https://docs.djangoproject.com/en/2.0/topics/http/urls/ Examples: Function views",
"views.plotly, name='plotly'), # url(r'^keys', views.upload, name='keys'), # path('key-gen/', views.key_gen, name='key-gen'),",
"\"\"\" from django.contrib import admin from django.urls import path from",
"Home.as_view(), name='home') Including another URLconf 1. Import the include() function:",
"name='home') Class-based views 1. Add an import: from other_app.views import",
"function: from django.urls import include, path 2. Add a URL",
"import views 2. Add a URL to urlpatterns: path('', views.home,",
"Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')",
"The `urlpatterns` list routes URLs to views. For more information",
"Add an import: from other_app.views import Home 2. Add a",
"import Home 2. Add a URL to urlpatterns: path('', Home.as_view(),",
"path from contacts import views admin.autodiscover() urlpatterns = [ path('',",
"views 1. Add an import: from other_app.views import Home 2.",
"= [ path('', views.index, name='contact_index'), path('', views.index, name='lead_index'), path('contacts/', views.contacts,",
"generic.ListView.as_view(model=models.Contact), name='heroku'), # path('run/', views.run, name='run'), # path('sorted/<id>', views.merge, name='merge'),",
"include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))",
"admin from django.urls import path from contacts import views admin.autodiscover()",
"path('run/', views.run, name='run'), # path('sorted/<id>', views.merge, name='merge'), # path('sorted/export/<type>', views.download,",
"a URL to urlpatterns: path('blog/', include('blog.urls')) \"\"\" from django.contrib import",
"name='run'), # path('sorted/<id>', views.merge, name='merge'), # path('sorted/export/<type>', views.download, name='export'), #",
"from django.contrib import admin from django.urls import path from contacts",
"other_app.views import Home 2. Add a URL to urlpatterns: path('',",
"views.contacts, name='contacts'), path('leads/', views.leads, name='leads'), path('table/', views.table, name='table'), path('plotly/', views.plotly,",
"views.index, name='lead_index'), path('contacts/', views.contacts, name='contacts'), path('leads/', views.leads, name='leads'), path('table/', views.table,",
"list routes URLs to views. For more information please see:",
"Class-based views 1. Add an import: from other_app.views import Home",
"Examples: Function views 1. Add an import: from my_app import",
"# path('run/', views.run, name='run'), # path('sorted/<id>', views.merge, name='merge'), # path('sorted/export/<type>',",
"the include() function: from django.urls import include, path 2. Add",
"path('table/', views.table, name='table'), path('plotly/', views.plotly, name='plotly'), # url(r'^keys', views.upload, name='keys'),",
"path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) \"\"\"",
"see: https://docs.djangoproject.com/en/2.0/topics/http/urls/ Examples: Function views 1. Add an import: from",
"name='heroku'), # path('run/', views.run, name='run'), # path('sorted/<id>', views.merge, name='merge'), #",
"URLs to views. For more information please see: https://docs.djangoproject.com/en/2.0/topics/http/urls/ Examples:",
"to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an",
"views.leads, name='leads'), path('table/', views.table, name='table'), path('plotly/', views.plotly, name='plotly'), # url(r'^keys',",
"import: from other_app.views import Home 2. Add a URL to",
"1. Add an import: from my_app import views 2. Add",
"urlpatterns: path('blog/', include('blog.urls')) \"\"\" from django.contrib import admin from django.urls",
"urlpatterns = [ path('', views.index, name='contact_index'), path('', views.index, name='lead_index'), path('contacts/',",
"views.upload, name='keys'), # path('key-gen/', views.key_gen, name='key-gen'), # path('heroku/', generic.ListView.as_view(model=models.Contact), name='heroku'),",
"from my_app import views 2. Add a URL to urlpatterns:",
"admin.autodiscover() urlpatterns = [ path('', views.index, name='contact_index'), path('', views.index, name='lead_index'),",
"to urlpatterns: path('blog/', include('blog.urls')) \"\"\" from django.contrib import admin from",
"name='contacts'), path('leads/', views.leads, name='leads'), path('table/', views.table, name='table'), path('plotly/', views.plotly, name='plotly'),",
"# path('key-gen/', views.key_gen, name='key-gen'), # path('heroku/', generic.ListView.as_view(model=models.Contact), name='heroku'), # path('run/',",
"path('', views.index, name='contact_index'), path('', views.index, name='lead_index'), path('contacts/', views.contacts, name='contacts'), path('leads/',",
"import: from my_app import views 2. Add a URL to",
"name='key-gen'), # path('heroku/', generic.ListView.as_view(model=models.Contact), name='heroku'), # path('run/', views.run, name='run'), #",
"Import the include() function: from django.urls import include, path 2.",
"path('plotly/', views.plotly, name='plotly'), # url(r'^keys', views.upload, name='keys'), # path('key-gen/', views.key_gen,",
"URL Configuration The `urlpatterns` list routes URLs to views. For",
"For more information please see: https://docs.djangoproject.com/en/2.0/topics/http/urls/ Examples: Function views 1.",
"1. Import the include() function: from django.urls import include, path",
"\"\"\"dedupper_app URL Configuration The `urlpatterns` list routes URLs to views.",
"path('sorted/<id>', views.merge, name='merge'), # path('sorted/export/<type>', views.download, name='export'), # path('sorted/report/<type>', views.download_times,",
"[ path('', views.index, name='contact_index'), path('', views.index, name='lead_index'), path('contacts/', views.contacts, name='contacts'),",
"name='leads'), path('table/', views.table, name='table'), path('plotly/', views.plotly, name='plotly'), # url(r'^keys', views.upload,",
"another URLconf 1. Import the include() function: from django.urls import",
"views 2. Add a URL to urlpatterns: path('', views.home, name='home')",
"path('key-gen/', views.key_gen, name='key-gen'), # path('heroku/', generic.ListView.as_view(model=models.Contact), name='heroku'), # path('run/', views.run,",
"views.merge, name='merge'), # path('sorted/export/<type>', views.download, name='export'), # path('sorted/report/<type>', views.download_times, name='report'),",
"from django.urls import include, path 2. Add a URL to",
"import admin from django.urls import path from contacts import views",
"my_app import views 2. Add a URL to urlpatterns: path('',",
"information please see: https://docs.djangoproject.com/en/2.0/topics/http/urls/ Examples: Function views 1. Add an",
"django.urls import path from contacts import views admin.autodiscover() urlpatterns =",
"urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import:",
"a URL to urlpatterns: path('', views.home, name='home') Class-based views 1.",
"# url(r'^keys', views.upload, name='keys'), # path('key-gen/', views.key_gen, name='key-gen'), # path('heroku/',",
"django.urls import include, path 2. Add a URL to urlpatterns:",
"name='keys'), # path('key-gen/', views.key_gen, name='key-gen'), # path('heroku/', generic.ListView.as_view(model=models.Contact), name='heroku'), #",
"URL to urlpatterns: path('blog/', include('blog.urls')) \"\"\" from django.contrib import admin",
"name='home') Including another URLconf 1. Import the include() function: from",
"from other_app.views import Home 2. Add a URL to urlpatterns:",
"views.run, name='run'), # path('sorted/<id>', views.merge, name='merge'), # path('sorted/export/<type>', views.download, name='export'),",
"path('', views.home, name='home') Class-based views 1. Add an import: from",
"path('blog/', include('blog.urls')) \"\"\" from django.contrib import admin from django.urls import",
"# path('heroku/', generic.ListView.as_view(model=models.Contact), name='heroku'), # path('run/', views.run, name='run'), # path('sorted/<id>',",
"more information please see: https://docs.djangoproject.com/en/2.0/topics/http/urls/ Examples: Function views 1. Add",
"path('heroku/', generic.ListView.as_view(model=models.Contact), name='heroku'), # path('run/', views.run, name='run'), # path('sorted/<id>', views.merge,",
"URLconf 1. Import the include() function: from django.urls import include,",
"# path('sorted/<id>', views.merge, name='merge'), # path('sorted/export/<type>', views.download, name='export'), # path('sorted/report/<type>',",
"Configuration The `urlpatterns` list routes URLs to views. For more",
"views.index, name='contact_index'), path('', views.index, name='lead_index'), path('contacts/', views.contacts, name='contacts'), path('leads/', views.leads,",
"import views admin.autodiscover() urlpatterns = [ path('', views.index, name='contact_index'), path('',",
"URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add",
"import path from contacts import views admin.autodiscover() urlpatterns = [",
"from contacts import views admin.autodiscover() urlpatterns = [ path('', views.index,",
"views 1. Add an import: from my_app import views 2.",
"views admin.autodiscover() urlpatterns = [ path('', views.index, name='contact_index'), path('', views.index,",
"include('blog.urls')) \"\"\" from django.contrib import admin from django.urls import path",
"Including another URLconf 1. Import the include() function: from django.urls",
"Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another",
"path('contacts/', views.contacts, name='contacts'), path('leads/', views.leads, name='leads'), path('table/', views.table, name='table'), path('plotly/',",
"2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based",
"Function views 1. Add an import: from my_app import views",
"https://docs.djangoproject.com/en/2.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app",
"please see: https://docs.djangoproject.com/en/2.0/topics/http/urls/ Examples: Function views 1. Add an import:",
"name='plotly'), # url(r'^keys', views.upload, name='keys'), # path('key-gen/', views.key_gen, name='key-gen'), #",
"`urlpatterns` list routes URLs to views. For more information please",
"to views. For more information please see: https://docs.djangoproject.com/en/2.0/topics/http/urls/ Examples: Function",
"path('leads/', views.leads, name='leads'), path('table/', views.table, name='table'), path('plotly/', views.plotly, name='plotly'), #",
"contacts import views admin.autodiscover() urlpatterns = [ path('', views.index, name='contact_index'),",
"an import: from my_app import views 2. Add a URL",
"name='merge'), # path('sorted/export/<type>', views.download, name='export'), # path('sorted/report/<type>', views.download_times, name='report'), ]",
"from django.urls import path from contacts import views admin.autodiscover() urlpatterns",
"views.home, name='home') Class-based views 1. Add an import: from other_app.views",
"a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf",
"name='contact_index'), path('', views.index, name='lead_index'), path('contacts/', views.contacts, name='contacts'), path('leads/', views.leads, name='leads'),",
"urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the",
"django.contrib import admin from django.urls import path from contacts import",
"path('', views.index, name='lead_index'), path('contacts/', views.contacts, name='contacts'), path('leads/', views.leads, name='leads'), path('table/',"
] |
[
"<gh_stars>0 from . import qtlib QT_LIB = qtlib.QT_LIB if QT_LIB",
"import qtlib QT_LIB = qtlib.QT_LIB if QT_LIB == 'PyQt5': from",
"qtlib QT_LIB = qtlib.QT_LIB if QT_LIB == 'PyQt5': from PyQt5.uic",
"QT_LIB = qtlib.QT_LIB if QT_LIB == 'PyQt5': from PyQt5.uic import",
"from . import qtlib QT_LIB = qtlib.QT_LIB if QT_LIB ==",
"= qtlib.QT_LIB if QT_LIB == 'PyQt5': from PyQt5.uic import *",
". import qtlib QT_LIB = qtlib.QT_LIB if QT_LIB == 'PyQt5':"
] |
[
"app.yaml CLOUD_STORAGE_BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] # logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) # [end",
"during a request.') return \"\"\" An internal error occurred: <pre>{}</pre>",
"return '<html><a href=\"ingest\">ingest last week</a> earthquake data</html>' @app.route('/ingest') def ingest_last_week():",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"logs for full stacktrace. \"\"\".format(e), 500 if __name__ == '__main__':",
"os import logging import transform import flask import google.cloud.storage as",
"# # Licensed under the Apache License, Version 2.0 (the",
"compliance with the License. # You may obtain a copy",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"2.0 (the \"License\"); # you may not use this file",
"agreed to in writing, software # distributed under the License",
"file except in compliance with the License. # You may",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"Unless required by applicable law or agreed to in writing,",
"# [START app] import os import logging import transform import",
"try invoking it from <a href=\"{}\"> the GCP console /",
"See logs for full stacktrace. \"\"\".format(e), 500 if __name__ ==",
"variable via app.yaml CLOUD_STORAGE_BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] # logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)",
"change permissions blob.make_public() status = 'uploaded {} to {}'.format(outfile, blob.name)",
"to {}'.format(outfile, blob.name) logging.info(status) except KeyError as e: status =",
"distributed under the License is distributed on an \"AS IS\"",
"level=logging.INFO) # [end config] @app.route('/') def welcome(): return '<html><a href=\"ingest\">ingest",
"KeyError for {} -- try invoking it from <a href=\"{}\">",
"# Copyright 2016 Google Inc. # # Licensed under the",
"href=\"ingest\">ingest last week</a> earthquake data</html>' @app.route('/ingest') def ingest_last_week(): try: #",
"# verify that this is a cron job request is_cron",
"-- try invoking it from <a href=\"{}\"> the GCP console",
"/ taskqueues </a></html>'.format( e, 'http://console.cloud.google.com/appengine/taskqueues?tab=CRON') logging.info('Rejected non-Cron request') return status",
"the specific language governing permissions and # limitations under the",
"= '<html>Sorry, this capability is accessible only by the Cron",
"config] @app.route('/') def welcome(): return '<html><a href=\"ingest\">ingest last week</a> earthquake",
"= gcs.Client() bucket = client.get_bucket(CLOUD_STORAGE_BUCKET) blob = gcs.Blob('earthquakes/earthquakes.png', bucket) blob.upload_from_filename(outfile)",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"error occurred: <pre>{}</pre> See logs for full stacktrace. \"\"\".format(e), 500",
"outfile) # upload to cloud storage client = gcs.Client() bucket",
"express or implied. # See the License for the specific",
"applicable law or agreed to in writing, software # distributed",
"{} -- try invoking it from <a href=\"{}\"> the GCP",
"except in compliance with the License. # You may obtain",
"@app.route('/') def welcome(): return '<html><a href=\"ingest\">ingest last week</a> earthquake data</html>'",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"png url = 'http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_week.csv' outfile = 'earthquakes.png' status = 'scheduled",
"a KeyError for {} -- try invoking it from <a",
"it from <a href=\"{}\"> the GCP console / AppEngine /",
"2016 Google Inc. # # Licensed under the Apache License,",
"stacktrace. \"\"\".format(e), 500 if __name__ == '__main__': app.run(host='0.0.0.0', port=8080, debug=True)",
"import os import logging import transform import flask import google.cloud.storage",
"Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) # [end config] @app.route('/') def welcome(): return",
"app] import os import logging import transform import flask import",
"transform import flask import google.cloud.storage as gcs # [start config]",
"not use this file except in compliance with the License.",
"only by the Cron service, but I got a KeyError",
"I got a KeyError for {} -- try invoking it",
"got a KeyError for {} -- try invoking it from",
"GCP console / AppEngine / taskqueues </a></html>'.format( e, 'http://console.cloud.google.com/appengine/taskqueues?tab=CRON') logging.info('Rejected",
"request') return status @app.errorhandler(500) def server_error(e): logging.exception('An error occurred during",
"flask.request.headers['X-Appengine-Cron'] logging.info('Received cron request {}'.format(is_cron)) # create png url =",
"500 if __name__ == '__main__': app.run(host='0.0.0.0', port=8080, debug=True) # [END",
"= 'http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_week.csv' outfile = 'earthquakes.png' status = 'scheduled ingest of",
"the License. # [START app] import os import logging import",
"= gcs.Blob('earthquakes/earthquakes.png', bucket) blob.upload_from_filename(outfile) # change permissions blob.make_public() status =",
"under the License. # [START app] import os import logging",
"@app.route('/ingest') def ingest_last_week(): try: # verify that this is a",
"writing, software # distributed under the License is distributed on",
"[end config] @app.route('/') def welcome(): return '<html><a href=\"ingest\">ingest last week</a>",
"status = 'uploaded {} to {}'.format(outfile, blob.name) logging.info(status) except KeyError",
"in writing, software # distributed under the License is distributed",
"server_error(e): logging.exception('An error occurred during a request.') return \"\"\" An",
"flask.Flask(__name__) # Configure this environment variable via app.yaml CLOUD_STORAGE_BUCKET =",
"you may not use this file except in compliance with",
"= 'uploaded {} to {}'.format(outfile, blob.name) logging.info(status) except KeyError as",
"logging.info(status) except KeyError as e: status = '<html>Sorry, this capability",
"# Licensed under the Apache License, Version 2.0 (the \"License\");",
"e: status = '<html>Sorry, this capability is accessible only by",
"import flask import google.cloud.storage as gcs # [start config] app",
"'<html>Sorry, this capability is accessible only by the Cron service,",
"request.') return \"\"\" An internal error occurred: <pre>{}</pre> See logs",
"occurred: <pre>{}</pre> See logs for full stacktrace. \"\"\".format(e), 500 if",
"to {}'.format(url, outfile) logging.info(status) transform.create_png(url, outfile) # upload to cloud",
"status @app.errorhandler(500) def server_error(e): logging.exception('An error occurred during a request.')",
"earthquake data</html>' @app.route('/ingest') def ingest_last_week(): try: # verify that this",
"use this file except in compliance with the License. #",
"return status @app.errorhandler(500) def server_error(e): logging.exception('An error occurred during a",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"verify that this is a cron job request is_cron =",
"License. # [START app] import os import logging import transform",
"flask import google.cloud.storage as gcs # [start config] app =",
"#!/usr/bin/env python # Copyright 2016 Google Inc. # # Licensed",
"CONDITIONS OF ANY KIND, either express or implied. # See",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"[START app] import os import logging import transform import flask",
"def server_error(e): logging.exception('An error occurred during a request.') return \"\"\"",
"create png url = 'http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_week.csv' outfile = 'earthquakes.png' status =",
"or implied. # See the License for the specific language",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"the Cron service, but I got a KeyError for {}",
"<pre>{}</pre> See logs for full stacktrace. \"\"\".format(e), 500 if __name__",
"License. # You may obtain a copy of the License",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"License, Version 2.0 (the \"License\"); # you may not use",
"the GCP console / AppEngine / taskqueues </a></html>'.format( e, 'http://console.cloud.google.com/appengine/taskqueues?tab=CRON')",
"logging.exception('An error occurred during a request.') return \"\"\" An internal",
"# You may obtain a copy of the License at",
"KIND, either express or implied. # See the License for",
"specific language governing permissions and # limitations under the License.",
"client = gcs.Client() bucket = client.get_bucket(CLOUD_STORAGE_BUCKET) blob = gcs.Blob('earthquakes/earthquakes.png', bucket)",
"An internal error occurred: <pre>{}</pre> See logs for full stacktrace.",
"'http://console.cloud.google.com/appengine/taskqueues?tab=CRON') logging.info('Rejected non-Cron request') return status @app.errorhandler(500) def server_error(e): logging.exception('An",
"is a cron job request is_cron = flask.request.headers['X-Appengine-Cron'] logging.info('Received cron",
"as e: status = '<html>Sorry, this capability is accessible only",
"under the License is distributed on an \"AS IS\" BASIS,",
"'earthquakes.png' status = 'scheduled ingest of {} to {}'.format(url, outfile)",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"gcs.Blob('earthquakes/earthquakes.png', bucket) blob.upload_from_filename(outfile) # change permissions blob.make_public() status = 'uploaded",
"License for the specific language governing permissions and # limitations",
"outfile = 'earthquakes.png' status = 'scheduled ingest of {} to",
"governing permissions and # limitations under the License. # [START",
"capability is accessible only by the Cron service, but I",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"console / AppEngine / taskqueues </a></html>'.format( e, 'http://console.cloud.google.com/appengine/taskqueues?tab=CRON') logging.info('Rejected non-Cron",
"to cloud storage client = gcs.Client() bucket = client.get_bucket(CLOUD_STORAGE_BUCKET) blob",
"a cron job request is_cron = flask.request.headers['X-Appengine-Cron'] logging.info('Received cron request",
"full stacktrace. \"\"\".format(e), 500 if __name__ == '__main__': app.run(host='0.0.0.0', port=8080,",
"'http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_week.csv' outfile = 'earthquakes.png' status = 'scheduled ingest of {}",
"app = flask.Flask(__name__) # Configure this environment variable via app.yaml",
"Google Inc. # # Licensed under the Apache License, Version",
"is accessible only by the Cron service, but I got",
"= 'earthquakes.png' status = 'scheduled ingest of {} to {}'.format(url,",
"ingest_last_week(): try: # verify that this is a cron job",
"invoking it from <a href=\"{}\"> the GCP console / AppEngine",
"the License for the specific language governing permissions and #",
"'scheduled ingest of {} to {}'.format(url, outfile) logging.info(status) transform.create_png(url, outfile)",
"if __name__ == '__main__': app.run(host='0.0.0.0', port=8080, debug=True) # [END app]",
"cron request {}'.format(is_cron)) # create png url = 'http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_week.csv' outfile",
"(the \"License\"); # you may not use this file except",
"Apache License, Version 2.0 (the \"License\"); # you may not",
"week</a> earthquake data</html>' @app.route('/ingest') def ingest_last_week(): try: # verify that",
"# you may not use this file except in compliance",
"either express or implied. # See the License for the",
"bucket = client.get_bucket(CLOUD_STORAGE_BUCKET) blob = gcs.Blob('earthquakes/earthquakes.png', bucket) blob.upload_from_filename(outfile) # change",
"# logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) # [end config] @app.route('/') def welcome():",
"OR CONDITIONS OF ANY KIND, either express or implied. #",
"def ingest_last_week(): try: # verify that this is a cron",
"# limitations under the License. # [START app] import os",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"config] app = flask.Flask(__name__) # Configure this environment variable via",
"os.environ['CLOUD_STORAGE_BUCKET'] # logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) # [end config] @app.route('/') def",
"KeyError as e: status = '<html>Sorry, this capability is accessible",
"the License is distributed on an \"AS IS\" BASIS, #",
"that this is a cron job request is_cron = flask.request.headers['X-Appengine-Cron']",
"data</html>' @app.route('/ingest') def ingest_last_week(): try: # verify that this is",
"in compliance with the License. # You may obtain a",
"software # distributed under the License is distributed on an",
"and # limitations under the License. # [START app] import",
"logging.info(status) transform.create_png(url, outfile) # upload to cloud storage client =",
"{}'.format(is_cron)) # create png url = 'http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_week.csv' outfile = 'earthquakes.png'",
"# Configure this environment variable via app.yaml CLOUD_STORAGE_BUCKET = os.environ['CLOUD_STORAGE_BUCKET']",
"e, 'http://console.cloud.google.com/appengine/taskqueues?tab=CRON') logging.info('Rejected non-Cron request') return status @app.errorhandler(500) def server_error(e):",
"non-Cron request') return status @app.errorhandler(500) def server_error(e): logging.exception('An error occurred",
"internal error occurred: <pre>{}</pre> See logs for full stacktrace. \"\"\".format(e),",
"{} to {}'.format(url, outfile) logging.info(status) transform.create_png(url, outfile) # upload to",
"transform.create_png(url, outfile) # upload to cloud storage client = gcs.Client()",
"logging.info('Rejected non-Cron request') return status @app.errorhandler(500) def server_error(e): logging.exception('An error",
"# # Unless required by applicable law or agreed to",
"by the Cron service, but I got a KeyError for",
"{}'.format(url, outfile) logging.info(status) transform.create_png(url, outfile) # upload to cloud storage",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"as gcs # [start config] app = flask.Flask(__name__) # Configure",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"permissions and # limitations under the License. # [START app]",
"error occurred during a request.') return \"\"\" An internal error",
"blob.make_public() status = 'uploaded {} to {}'.format(outfile, blob.name) logging.info(status) except",
"Version 2.0 (the \"License\"); # you may not use this",
"upload to cloud storage client = gcs.Client() bucket = client.get_bucket(CLOUD_STORAGE_BUCKET)",
"'<html><a href=\"ingest\">ingest last week</a> earthquake data</html>' @app.route('/ingest') def ingest_last_week(): try:",
"logging import transform import flask import google.cloud.storage as gcs #",
"[start config] app = flask.Flask(__name__) # Configure this environment variable",
"this environment variable via app.yaml CLOUD_STORAGE_BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] # logging.basicConfig(format='%(levelname)s:",
"request is_cron = flask.request.headers['X-Appengine-Cron'] logging.info('Received cron request {}'.format(is_cron)) # create",
"but I got a KeyError for {} -- try invoking",
"storage client = gcs.Client() bucket = client.get_bucket(CLOUD_STORAGE_BUCKET) blob = gcs.Blob('earthquakes/earthquakes.png',",
"law or agreed to in writing, software # distributed under",
"gcs.Client() bucket = client.get_bucket(CLOUD_STORAGE_BUCKET) blob = gcs.Blob('earthquakes/earthquakes.png', bucket) blob.upload_from_filename(outfile) #",
"\"\"\".format(e), 500 if __name__ == '__main__': app.run(host='0.0.0.0', port=8080, debug=True) #",
"= client.get_bucket(CLOUD_STORAGE_BUCKET) blob = gcs.Blob('earthquakes/earthquakes.png', bucket) blob.upload_from_filename(outfile) # change permissions",
"<a href=\"{}\"> the GCP console / AppEngine / taskqueues </a></html>'.format(",
"try: # verify that this is a cron job request",
"for {} -- try invoking it from <a href=\"{}\"> the",
"via app.yaml CLOUD_STORAGE_BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] # logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) #",
"# [end config] @app.route('/') def welcome(): return '<html><a href=\"ingest\">ingest last",
"outfile) logging.info(status) transform.create_png(url, outfile) # upload to cloud storage client",
"cloud storage client = gcs.Client() bucket = client.get_bucket(CLOUD_STORAGE_BUCKET) blob =",
"implied. # See the License for the specific language governing",
"job request is_cron = flask.request.headers['X-Appengine-Cron'] logging.info('Received cron request {}'.format(is_cron)) #",
"gcs # [start config] app = flask.Flask(__name__) # Configure this",
"# [start config] app = flask.Flask(__name__) # Configure this environment",
"environment variable via app.yaml CLOUD_STORAGE_BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] # logging.basicConfig(format='%(levelname)s: %(message)s',",
"this is a cron job request is_cron = flask.request.headers['X-Appengine-Cron'] logging.info('Received",
"blob.upload_from_filename(outfile) # change permissions blob.make_public() status = 'uploaded {} to",
"under the Apache License, Version 2.0 (the \"License\"); # you",
"import transform import flask import google.cloud.storage as gcs # [start",
"# change permissions blob.make_public() status = 'uploaded {} to {}'.format(outfile,",
"\"License\"); # you may not use this file except in",
"permissions blob.make_public() status = 'uploaded {} to {}'.format(outfile, blob.name) logging.info(status)",
"'uploaded {} to {}'.format(outfile, blob.name) logging.info(status) except KeyError as e:",
"from <a href=\"{}\"> the GCP console / AppEngine / taskqueues",
"occurred during a request.') return \"\"\" An internal error occurred:",
"for full stacktrace. \"\"\".format(e), 500 if __name__ == '__main__': app.run(host='0.0.0.0',",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"except KeyError as e: status = '<html>Sorry, this capability is",
"import logging import transform import flask import google.cloud.storage as gcs",
"blob.name) logging.info(status) except KeyError as e: status = '<html>Sorry, this",
"of {} to {}'.format(url, outfile) logging.info(status) transform.create_png(url, outfile) # upload",
"= os.environ['CLOUD_STORAGE_BUCKET'] # logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) # [end config] @app.route('/')",
"import google.cloud.storage as gcs # [start config] app = flask.Flask(__name__)",
"# upload to cloud storage client = gcs.Client() bucket =",
"by applicable law or agreed to in writing, software #",
"# distributed under the License is distributed on an \"AS",
"OF ANY KIND, either express or implied. # See the",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"client.get_bucket(CLOUD_STORAGE_BUCKET) blob = gcs.Blob('earthquakes/earthquakes.png', bucket) blob.upload_from_filename(outfile) # change permissions blob.make_public()",
"blob = gcs.Blob('earthquakes/earthquakes.png', bucket) blob.upload_from_filename(outfile) # change permissions blob.make_public() status",
"may obtain a copy of the License at # #",
"# Unless required by applicable law or agreed to in",
"ANY KIND, either express or implied. # See the License",
"See the License for the specific language governing permissions and",
"%(message)s', level=logging.INFO) # [end config] @app.route('/') def welcome(): return '<html><a",
"cron job request is_cron = flask.request.headers['X-Appengine-Cron'] logging.info('Received cron request {}'.format(is_cron))",
"the License. # You may obtain a copy of the",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"for the specific language governing permissions and # limitations under",
"= flask.request.headers['X-Appengine-Cron'] logging.info('Received cron request {}'.format(is_cron)) # create png url",
"Configure this environment variable via app.yaml CLOUD_STORAGE_BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] #",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"to in writing, software # distributed under the License is",
"accessible only by the Cron service, but I got a",
"return \"\"\" An internal error occurred: <pre>{}</pre> See logs for",
"def welcome(): return '<html><a href=\"ingest\">ingest last week</a> earthquake data</html>' @app.route('/ingest')",
"</a></html>'.format( e, 'http://console.cloud.google.com/appengine/taskqueues?tab=CRON') logging.info('Rejected non-Cron request') return status @app.errorhandler(500) def",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"Inc. # # Licensed under the Apache License, Version 2.0",
"# See the License for the specific language governing permissions",
"ingest of {} to {}'.format(url, outfile) logging.info(status) transform.create_png(url, outfile) #",
"{} to {}'.format(outfile, blob.name) logging.info(status) except KeyError as e: status",
"service, but I got a KeyError for {} -- try",
"href=\"{}\"> the GCP console / AppEngine / taskqueues </a></html>'.format( e,",
"welcome(): return '<html><a href=\"ingest\">ingest last week</a> earthquake data</html>' @app.route('/ingest') def",
"You may obtain a copy of the License at #",
"google.cloud.storage as gcs # [start config] app = flask.Flask(__name__) #",
"language governing permissions and # limitations under the License. #",
"Cron service, but I got a KeyError for {} --",
"may not use this file except in compliance with the",
"or agreed to in writing, software # distributed under the",
"CLOUD_STORAGE_BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] # logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) # [end config]",
"required by applicable law or agreed to in writing, software",
"taskqueues </a></html>'.format( e, 'http://console.cloud.google.com/appengine/taskqueues?tab=CRON') logging.info('Rejected non-Cron request') return status @app.errorhandler(500)",
"{}'.format(outfile, blob.name) logging.info(status) except KeyError as e: status = '<html>Sorry,",
"python # Copyright 2016 Google Inc. # # Licensed under",
"is_cron = flask.request.headers['X-Appengine-Cron'] logging.info('Received cron request {}'.format(is_cron)) # create png",
"a request.') return \"\"\" An internal error occurred: <pre>{}</pre> See",
"= 'scheduled ingest of {} to {}'.format(url, outfile) logging.info(status) transform.create_png(url,",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"Copyright 2016 Google Inc. # # Licensed under the Apache",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"status = '<html>Sorry, this capability is accessible only by the",
"@app.errorhandler(500) def server_error(e): logging.exception('An error occurred during a request.') return",
"with the License. # You may obtain a copy of",
"this file except in compliance with the License. # You",
"url = 'http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_week.csv' outfile = 'earthquakes.png' status = 'scheduled ingest",
"the Apache License, Version 2.0 (the \"License\"); # you may",
"last week</a> earthquake data</html>' @app.route('/ingest') def ingest_last_week(): try: # verify",
"# create png url = 'http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_week.csv' outfile = 'earthquakes.png' status",
"status = 'scheduled ingest of {} to {}'.format(url, outfile) logging.info(status)",
"= flask.Flask(__name__) # Configure this environment variable via app.yaml CLOUD_STORAGE_BUCKET",
"bucket) blob.upload_from_filename(outfile) # change permissions blob.make_public() status = 'uploaded {}",
"logging.info('Received cron request {}'.format(is_cron)) # create png url = 'http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_week.csv'",
"this capability is accessible only by the Cron service, but",
"request {}'.format(is_cron)) # create png url = 'http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_week.csv' outfile =",
"AppEngine / taskqueues </a></html>'.format( e, 'http://console.cloud.google.com/appengine/taskqueues?tab=CRON') logging.info('Rejected non-Cron request') return",
"\"\"\" An internal error occurred: <pre>{}</pre> See logs for full",
"limitations under the License. # [START app] import os import",
"/ AppEngine / taskqueues </a></html>'.format( e, 'http://console.cloud.google.com/appengine/taskqueues?tab=CRON') logging.info('Rejected non-Cron request')"
] |
[] |
[
"cls.credential_list: if credential.username == user_name: users_credential_list.append(credential) return users_credential_list def delete_credential(self):",
"def find_by_sitename(cls, sitename): ''' Class method that takes a site",
"sitename def save_credential(self): ''' save_cred method saves the user objects",
"Credential.credential_list: if (credential.sitename == sitename): the_credential = sitename return the_credential",
"the user objects into creds_list ''' Credential.credential_list.append(self) @classmethod def display_credential(cls,",
"that takes a site name and returns the credential that",
"sitename): ''' Class method that copies a credentials details after",
"a password where a user can generate a password based",
"self.username = username self.password = password self.sitename = sitename def",
"in cls.credential_list: if credential.sitename == sitename: return credential @classmethod def",
"%s\" % password) return password @classmethod def find_by_sitename(cls, sitename): '''",
"Method that checks if user exists from the credential list.",
"= [] for credential in cls.credential_list: if credential.username == user_name:",
"been entered ''' find_credential = Credential.find_by_sitename(sitename) return pyperclip.copy(find_credential.password) @classmethod def",
"Class method that takes a site name and returns the",
"credential list. Returns: Boolean: True or false depending if the",
"% chars) length = int(input(\"[*] Input Password Length: \")) while",
"''' save_cred method saves the user objects into creds_list '''",
"\"\" print(\"Use Char list = %s \\n\" % chars) length",
"site ''' for credential in cls.credential_list: if credential.sitename == sitename:",
"method that copies a credentials details after the credentials sitename",
"new credentials ''' credential_list = [] def __init__(self,username,sitename,password): self.username =",
"where a user can generate a password based on their",
"for credential in cls.credential_list: if credential.sitename == sitename: return credential",
"sitename): ''' Method that checks if user exists from the",
"chars) length = int(input(\"[*] Input Password Length: \")) while len(password)",
"import string class Credential: ''' class that generates new credentials",
"password where a user can generate a password based on",
"Class method to show the list of credentials saved '''",
"length: print(\"Password: %s\" % password) return password @classmethod def find_by_sitename(cls,",
"returns the credential that matches that site ''' for credential",
"generate_password(self): ''' Function to generate a password where a user",
"after the credentials sitename has been entered ''' find_credential =",
"''' Method that checks if user exists from the credential",
"def delete_credential(self): ''' delete_contact method deletes a saved credential from",
"chars = \"abcdefghijklmnopqrstuvwxyziABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890^?!?$%&/()=?`'+#*'~';:_,.-<>|\" password = \"\" print(\"Use Char list =",
"that generates new credentials ''' credential_list = [] def __init__(self,username,sitename,password):",
"to generate a password where a user can generate a",
"''' delete_contact method deletes a saved credential from the credential_list",
"def save_credential(self): ''' save_cred method saves the user objects into",
"for credential in cls.credential_list: if credential.username == user_name: users_credential_list.append(credential) return",
"list = %s \\n\" % chars) length = int(input(\"[*] Input",
"Credential.credential_list.append(self) @classmethod def display_credential(cls, user_name): ''' Class method to show",
"\")) while len(password) != length: password = password + random.choice(chars)",
"credentials sitename has been entered ''' find_credential = Credential.find_by_sitename(sitename) return",
"a password based on their length of choice ''' chars",
"= username self.password = password self.sitename = sitename def save_credential(self):",
"+ random.choice(chars) if len(password) == length: print(\"Password: %s\" % password)",
"that matches that site ''' for credential in cls.credential_list: if",
"matches that site ''' for credential in cls.credential_list: if credential.sitename",
"''' for credential in cls.credential_list: if credential.sitename == sitename: return",
"= Credential.find_by_sitename(sitename) return pyperclip.copy(find_credential.password) @classmethod def credential_exist(cls, sitename): ''' Method",
"credential exits ''' the_credential = \"\" for credential in Credential.credential_list:",
"user objects into creds_list ''' Credential.credential_list.append(self) @classmethod def display_credential(cls, user_name):",
"creds_list ''' Credential.credential_list.append(self) @classmethod def display_credential(cls, user_name): ''' Class method",
"password = \"\" print(\"Use Char list = %s \\n\" %",
"credentials ''' credential_list = [] def __init__(self,username,sitename,password): self.username = username",
"cls.credential_list: if credential.sitename == sitename: return credential @classmethod def copy_credential(cls,",
"''' Class method that copies a credentials details after the",
"while len(password) != length: password = password + random.choice(chars) if",
"pyperclip import random import string class Credential: ''' class that",
"a saved credential from the credential_list ''' Credential.credential_list.remove(self) def generate_password(self):",
"\\n\" % chars) length = int(input(\"[*] Input Password Length: \"))",
"def copy_credential(cls, sitename): ''' Class method that copies a credentials",
"find_credential = Credential.find_by_sitename(sitename) return pyperclip.copy(find_credential.password) @classmethod def credential_exist(cls, sitename): '''",
"credential in cls.credential_list: if credential.sitename == sitename: return credential @classmethod",
"password) return password @classmethod def find_by_sitename(cls, sitename): ''' Class method",
"class Credential: ''' class that generates new credentials ''' credential_list",
"% password) return password @classmethod def find_by_sitename(cls, sitename): ''' Class",
"== length: print(\"Password: %s\" % password) return password @classmethod def",
"= password + random.choice(chars) if len(password) == length: print(\"Password: %s\"",
"a site name and returns the credential that matches that",
"Char list = %s \\n\" % chars) length = int(input(\"[*]",
"= \"\" print(\"Use Char list = %s \\n\" % chars)",
"the_credential = \"\" for credential in Credential.credential_list: if (credential.sitename ==",
"self.sitename = sitename def save_credential(self): ''' save_cred method saves the",
"%s \\n\" % chars) length = int(input(\"[*] Input Password Length:",
"print(\"Password: %s\" % password) return password @classmethod def find_by_sitename(cls, sitename):",
"''' chars = \"abcdefghijklmnopqrstuvwxyziABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890^?!?$%&/()=?`'+#*'~';:_,.-<>|\" password = \"\" print(\"Use Char list",
"objects into creds_list ''' Credential.credential_list.append(self) @classmethod def display_credential(cls, user_name): '''",
"delete_contact method deletes a saved credential from the credential_list '''",
"the credential_list ''' Credential.credential_list.remove(self) def generate_password(self): ''' Function to generate",
"[] def __init__(self,username,sitename,password): self.username = username self.password = password self.sitename",
"sitename): ''' Class method that takes a site name and",
"a credentials details after the credentials sitename has been entered",
"method that takes a site name and returns the credential",
"copies a credentials details after the credentials sitename has been",
"into creds_list ''' Credential.credential_list.append(self) @classmethod def display_credential(cls, user_name): ''' Class",
"list of credentials saved ''' users_credential_list = [] for credential",
"users_credential_list.append(credential) return users_credential_list def delete_credential(self): ''' delete_contact method deletes a",
"self.password = password self.sitename = sitename def save_credential(self): ''' save_cred",
"credential_exist(cls, sitename): ''' Method that checks if user exists from",
"return password @classmethod def find_by_sitename(cls, sitename): ''' Class method that",
"password @classmethod def find_by_sitename(cls, sitename): ''' Class method that takes",
"entered ''' find_credential = Credential.find_by_sitename(sitename) return pyperclip.copy(find_credential.password) @classmethod def credential_exist(cls,",
"length of choice ''' chars = \"abcdefghijklmnopqrstuvwxyziABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890^?!?$%&/()=?`'+#*'~';:_,.-<>|\" password = \"\"",
"''' credential_list = [] def __init__(self,username,sitename,password): self.username = username self.password",
"generate a password based on their length of choice '''",
"random import string class Credential: ''' class that generates new",
"site name and returns the credential that matches that site",
"method saves the user objects into creds_list ''' Credential.credential_list.append(self) @classmethod",
"copy_credential(cls, sitename): ''' Class method that copies a credentials details",
"!= length: password = password + random.choice(chars) if len(password) ==",
"print(\"Use Char list = %s \\n\" % chars) length =",
"if user exists from the credential list. Returns: Boolean: True",
"password based on their length of choice ''' chars =",
"''' the_credential = \"\" for credential in Credential.credential_list: if (credential.sitename",
"the credential list. Returns: Boolean: True or false depending if",
"''' find_credential = Credential.find_by_sitename(sitename) return pyperclip.copy(find_credential.password) @classmethod def credential_exist(cls, sitename):",
"user can generate a password based on their length of",
"credentials details after the credentials sitename has been entered '''",
"sitename has been entered ''' find_credential = Credential.find_by_sitename(sitename) return pyperclip.copy(find_credential.password)",
"credential in Credential.credential_list: if (credential.sitename == sitename): the_credential = sitename",
"in cls.credential_list: if credential.username == user_name: users_credential_list.append(credential) return users_credential_list def",
"Class method that copies a credentials details after the credentials",
"find_by_sitename(cls, sitename): ''' Class method that takes a site name",
"__init__(self,username,sitename,password): self.username = username self.password = password self.sitename = sitename",
"credentials saved ''' users_credential_list = [] for credential in cls.credential_list:",
"''' users_credential_list = [] for credential in cls.credential_list: if credential.username",
"\"\" for credential in Credential.credential_list: if (credential.sitename == sitename): the_credential",
"user_name): ''' Class method to show the list of credentials",
"credential.username == user_name: users_credential_list.append(credential) return users_credential_list def delete_credential(self): ''' delete_contact",
"for credential in Credential.credential_list: if (credential.sitename == sitename): the_credential =",
"false depending if the credential exits ''' the_credential = \"\"",
"in Credential.credential_list: if (credential.sitename == sitename): the_credential = sitename return",
"return users_credential_list def delete_credential(self): ''' delete_contact method deletes a saved",
"def generate_password(self): ''' Function to generate a password where a",
"@classmethod def credential_exist(cls, sitename): ''' Method that checks if user",
"password = password + random.choice(chars) if len(password) == length: print(\"Password:",
"import pyperclip import random import string class Credential: ''' class",
"@classmethod def copy_credential(cls, sitename): ''' Class method that copies a",
"Credential.find_by_sitename(sitename) return pyperclip.copy(find_credential.password) @classmethod def credential_exist(cls, sitename): ''' Method that",
"''' class that generates new credentials ''' credential_list = []",
"to show the list of credentials saved ''' users_credential_list =",
"Credential: ''' class that generates new credentials ''' credential_list =",
"method deletes a saved credential from the credential_list ''' Credential.credential_list.remove(self)",
"Function to generate a password where a user can generate",
"username self.password = password self.sitename = sitename def save_credential(self): '''",
"def __init__(self,username,sitename,password): self.username = username self.password = password self.sitename =",
"credential.sitename == sitename: return credential @classmethod def copy_credential(cls, sitename): '''",
"credential @classmethod def copy_credential(cls, sitename): ''' Class method that copies",
"the list of credentials saved ''' users_credential_list = [] for",
"pyperclip.copy(find_credential.password) @classmethod def credential_exist(cls, sitename): ''' Method that checks if",
"the credential that matches that site ''' for credential in",
"== sitename: return credential @classmethod def copy_credential(cls, sitename): ''' Class",
"''' Credential.credential_list.append(self) @classmethod def display_credential(cls, user_name): ''' Class method to",
"can generate a password based on their length of choice",
"takes a site name and returns the credential that matches",
"''' Function to generate a password where a user can",
"= sitename def save_credential(self): ''' save_cred method saves the user",
"random.choice(chars) if len(password) == length: print(\"Password: %s\" % password) return",
"display_credential(cls, user_name): ''' Class method to show the list of",
"the credentials sitename has been entered ''' find_credential = Credential.find_by_sitename(sitename)",
"from the credential_list ''' Credential.credential_list.remove(self) def generate_password(self): ''' Function to",
"users_credential_list = [] for credential in cls.credential_list: if credential.username ==",
"of choice ''' chars = \"abcdefghijklmnopqrstuvwxyziABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890^?!?$%&/()=?`'+#*'~';:_,.-<>|\" password = \"\" print(\"Use",
"that copies a credentials details after the credentials sitename has",
"Credential.credential_list.remove(self) def generate_password(self): ''' Function to generate a password where",
"@classmethod def find_by_sitename(cls, sitename): ''' Class method that takes a",
"user exists from the credential list. Returns: Boolean: True or",
"== user_name: users_credential_list.append(credential) return users_credential_list def delete_credential(self): ''' delete_contact method",
"exists from the credential list. Returns: Boolean: True or false",
"= %s \\n\" % chars) length = int(input(\"[*] Input Password",
"length = int(input(\"[*] Input Password Length: \")) while len(password) !=",
"Input Password Length: \")) while len(password) != length: password =",
"len(password) == length: print(\"Password: %s\" % password) return password @classmethod",
"saved ''' users_credential_list = [] for credential in cls.credential_list: if",
"sitename: return credential @classmethod def copy_credential(cls, sitename): ''' Class method",
"= password self.sitename = sitename def save_credential(self): ''' save_cred method",
"int(input(\"[*] Input Password Length: \")) while len(password) != length: password",
"name and returns the credential that matches that site '''",
"credential_list ''' Credential.credential_list.remove(self) def generate_password(self): ''' Function to generate a",
"credential_list = [] def __init__(self,username,sitename,password): self.username = username self.password =",
"or false depending if the credential exits ''' the_credential =",
"generate a password where a user can generate a password",
"password self.sitename = sitename def save_credential(self): ''' save_cred method saves",
"Password Length: \")) while len(password) != length: password = password",
"return pyperclip.copy(find_credential.password) @classmethod def credential_exist(cls, sitename): ''' Method that checks",
"def display_credential(cls, user_name): ''' Class method to show the list",
"if the credential exits ''' the_credential = \"\" for credential",
"exits ''' the_credential = \"\" for credential in Credential.credential_list: if",
"\"abcdefghijklmnopqrstuvwxyziABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890^?!?$%&/()=?`'+#*'~';:_,.-<>|\" password = \"\" print(\"Use Char list = %s \\n\"",
"length: password = password + random.choice(chars) if len(password) == length:",
"len(password) != length: password = password + random.choice(chars) if len(password)",
"method to show the list of credentials saved ''' users_credential_list",
"True or false depending if the credential exits ''' the_credential",
"password + random.choice(chars) if len(password) == length: print(\"Password: %s\" %",
"= [] def __init__(self,username,sitename,password): self.username = username self.password = password",
"credential from the credential_list ''' Credential.credential_list.remove(self) def generate_password(self): ''' Function",
"if credential.sitename == sitename: return credential @classmethod def copy_credential(cls, sitename):",
"if len(password) == length: print(\"Password: %s\" % password) return password",
"from the credential list. Returns: Boolean: True or false depending",
"credential that matches that site ''' for credential in cls.credential_list:",
"''' Credential.credential_list.remove(self) def generate_password(self): ''' Function to generate a password",
"saves the user objects into creds_list ''' Credential.credential_list.append(self) @classmethod def",
"users_credential_list def delete_credential(self): ''' delete_contact method deletes a saved credential",
"of credentials saved ''' users_credential_list = [] for credential in",
"''' Class method that takes a site name and returns",
"Returns: Boolean: True or false depending if the credential exits",
"and returns the credential that matches that site ''' for",
"return credential @classmethod def copy_credential(cls, sitename): ''' Class method that",
"if credential.username == user_name: users_credential_list.append(credential) return users_credential_list def delete_credential(self): '''",
"save_cred method saves the user objects into creds_list ''' Credential.credential_list.append(self)",
"saved credential from the credential_list ''' Credential.credential_list.remove(self) def generate_password(self): '''",
"import random import string class Credential: ''' class that generates",
"delete_credential(self): ''' delete_contact method deletes a saved credential from the",
"Boolean: True or false depending if the credential exits '''",
"that checks if user exists from the credential list. Returns:",
"checks if user exists from the credential list. Returns: Boolean:",
"based on their length of choice ''' chars = \"abcdefghijklmnopqrstuvwxyziABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890^?!?$%&/()=?`'+#*'~';:_,.-<>|\"",
"save_credential(self): ''' save_cred method saves the user objects into creds_list",
"their length of choice ''' chars = \"abcdefghijklmnopqrstuvwxyziABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890^?!?$%&/()=?`'+#*'~';:_,.-<>|\" password =",
"= int(input(\"[*] Input Password Length: \")) while len(password) != length:",
"= \"abcdefghijklmnopqrstuvwxyziABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890^?!?$%&/()=?`'+#*'~';:_,.-<>|\" password = \"\" print(\"Use Char list = %s",
"= \"\" for credential in Credential.credential_list: if (credential.sitename == sitename):",
"choice ''' chars = \"abcdefghijklmnopqrstuvwxyziABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890^?!?$%&/()=?`'+#*'~';:_,.-<>|\" password = \"\" print(\"Use Char",
"Length: \")) while len(password) != length: password = password +",
"''' Class method to show the list of credentials saved",
"the credential exits ''' the_credential = \"\" for credential in",
"generates new credentials ''' credential_list = [] def __init__(self,username,sitename,password): self.username",
"has been entered ''' find_credential = Credential.find_by_sitename(sitename) return pyperclip.copy(find_credential.password) @classmethod",
"details after the credentials sitename has been entered ''' find_credential",
"a user can generate a password based on their length",
"string class Credential: ''' class that generates new credentials '''",
"that site ''' for credential in cls.credential_list: if credential.sitename ==",
"def credential_exist(cls, sitename): ''' Method that checks if user exists",
"depending if the credential exits ''' the_credential = \"\" for",
"user_name: users_credential_list.append(credential) return users_credential_list def delete_credential(self): ''' delete_contact method deletes",
"[] for credential in cls.credential_list: if credential.username == user_name: users_credential_list.append(credential)",
"class that generates new credentials ''' credential_list = [] def",
"show the list of credentials saved ''' users_credential_list = []",
"on their length of choice ''' chars = \"abcdefghijklmnopqrstuvwxyziABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890^?!?$%&/()=?`'+#*'~';:_,.-<>|\" password",
"@classmethod def display_credential(cls, user_name): ''' Class method to show the",
"list. Returns: Boolean: True or false depending if the credential",
"credential in cls.credential_list: if credential.username == user_name: users_credential_list.append(credential) return users_credential_list",
"deletes a saved credential from the credential_list ''' Credential.credential_list.remove(self) def"
] |
[
"import unittest import app def test_test(): assert app.test() == \"Works!\"",
"<reponame>mehulsatardekar/dice-on-demand import unittest import app def test_test(): assert app.test() =="
] |